From b67da2627990341b3fa72a00d58fc9fd80aba9dc Mon Sep 17 00:00:00 2001 From: Aleksandr Bezobchuk Date: Tue, 13 Oct 2020 16:26:39 -0400 Subject: [PATCH 1/7] Merge PR #10095: Integrated Storage Cloud Auto-Join --- .circleci/config.yml | 2 +- .circleci/config/jobs/test-go.yml | 2 +- .gitignore | 6 +- api/sys_raft.go | 1 + command/operator_raft_join.go | 24 +- go.mod | 4 +- go.sum | 182 +- http/sys_raft.go | 2 + physical/raft/raft.go | 13 +- vault/external_tests/raft/raft_test.go | 51 + vault/raft.go | 68 +- .../github.com/Azure/azure-sdk-for-go/LICENSE | 2 +- .../2019-07-01/compute/availabilitysets.go | 34 +- .../compute/mgmt/2019-07-01/compute/client.go | 3 +- .../2019-07-01/compute/containerservices.go | 24 +- .../2019-07-01/compute/dedicatedhostgroups.go | 30 +- .../mgmt/2019-07-01/compute/dedicatedhosts.go | 25 +- .../2019-07-01/compute/diskencryptionsets.go | 28 +- .../compute/mgmt/2019-07-01/compute/disks.go | 35 +- .../mgmt/2019-07-01/compute/galleries.go | 102 +- .../2019-07-01/compute/galleryapplications.go | 103 +- .../compute/galleryapplicationversions.go | 105 +- .../mgmt/2019-07-01/compute/galleryimages.go | 101 +- .../compute/galleryimageversions.go | 110 +- .../compute/mgmt/2019-07-01/compute/images.go | 27 +- .../mgmt/2019-07-01/compute/loganalytics.go | 11 +- .../compute/mgmt/2019-07-01/compute/models.go | 606 +- .../mgmt/2019-07-01/compute/operations.go | 7 +- .../compute/proximityplacementgroups.go | 39 +- .../mgmt/2019-07-01/compute/resourceskus.go | 9 +- .../mgmt/2019-07-01/compute/snapshots.go | 35 +- .../compute/mgmt/2019-07-01/compute/usage.go | 7 +- .../mgmt/2019-07-01/compute/version.go | 2 +- .../compute/virtualmachineextensionimages.go | 15 +- .../compute/virtualmachineextensions.go | 24 +- .../compute/virtualmachineimages.go | 36 +- .../compute/virtualmachineruncommands.go | 12 +- .../2019-07-01/compute/virtualmachines.go | 96 +- .../virtualmachinescalesetextensions.go | 102 +- .../virtualmachinescalesetrollingupgrades.go | 19 +- .../compute/virtualmachinescalesets.go | 102 +- .../virtualmachinescalesetvmextensions.go | 23 +- .../compute/virtualmachinescalesetvms.go | 67 +- .../2019-07-01/compute/virtualmachinesizes.go | 8 +- .../graphrbac/1.6/graphrbac/applications.go | 59 +- .../graphrbac/1.6/graphrbac/client.go | 3 +- .../1.6/graphrbac/deletedapplications.go | 20 +- .../graphrbac/1.6/graphrbac/domains.go | 11 +- .../graphrbac/1.6/graphrbac/groups.go | 59 +- .../graphrbac/1.6/graphrbac/models.go | 37 +- .../1.6/graphrbac/oauth2permissiongrant.go | 20 +- .../graphrbac/1.6/graphrbac/objects.go | 11 +- .../1.6/graphrbac/serviceprincipals.go | 48 +- .../graphrbac/1.6/graphrbac/signedinuser.go | 15 +- .../services/graphrbac/1.6/graphrbac/users.go | 45 +- .../graphrbac/1.6/graphrbac/version.go | 2 +- .../services/keyvault/v7.0/keyvault/client.go | 266 +- .../services/keyvault/v7.0/keyvault/models.go | 2 +- .../2015-06-15/network/applicationgateways.go | 637 + .../network/mgmt/2015-06-15/network/client.go | 134 + .../expressroutecircuitauthorizations.go | 391 + .../network/expressroutecircuitpeerings.go | 389 + .../network/expressroutecircuits.go | 831 + .../network/expressrouteserviceproviders.go | 151 + .../2015-06-15/network/interfacesgroup.go | 804 + .../mgmt/2015-06-15/network/loadbalancers.go | 492 + .../network/localnetworkgateways.go | 381 + .../network/mgmt/2015-06-15/network/models.go | 9163 +++ .../2015-06-15/network/publicipaddresses.go | 505 + .../network/mgmt/2015-06-15/network/routes.go | 388 + .../mgmt/2015-06-15/network/routetables.go | 492 + .../mgmt/2015-06-15/network/securitygroups.go | 492 + .../mgmt/2015-06-15/network/securityrules.go | 398 + .../mgmt/2015-06-15/network/subnets.go | 392 + .../network/mgmt/2015-06-15/network/usages.go | 160 + .../mgmt/2015-06-15/network/version.go | 30 + .../virtualnetworkgatewayconnections.go | 620 + .../network/virtualnetworkgateways.go | 539 + .../2015-06-15/network/virtualnetworks.go | 492 + .../authorization/classicadministrators.go | 8 +- .../authorization/client.go | 3 +- .../authorization/globaladministrator.go | 8 +- .../authorization/permissions.go | 11 +- .../provideroperationsmetadata.go | 12 +- .../authorization/roleassignments.go | 43 +- .../authorization/roledefinitions.go | 23 +- .../authorization/version.go | 2 +- .../Azure/azure-sdk-for-go/version/version.go | 2 +- .../github.com/Azure/go-autorest/.gitignore | 32 + .../github.com/Azure/go-autorest/CHANGELOG.md | 1004 + .../github.com/Azure/go-autorest/GNUmakefile | 23 + .../github.com/Azure/go-autorest/Gopkg.lock | 324 + .../github.com/Azure/go-autorest/Gopkg.toml | 59 + vendor/github.com/Azure/go-autorest/LICENSE | 191 + vendor/github.com/Azure/go-autorest/README.md | 165 + .../Azure/go-autorest/autorest/adal/go.mod | 10 +- .../Azure/go-autorest/autorest/adal/go.sum | 29 +- .../autorest/adal/go_mod_tidy_hack.go | 4 +- .../go-autorest/autorest/authorization.go | 7 +- .../go-autorest/autorest/azure/auth/auth.go | 16 +- .../go-autorest/autorest/azure/auth/go.mod | 9 +- .../go-autorest/autorest/azure/auth/go.sum | 46 +- .../autorest/azure/auth/go_mod_tidy_hack.go | 4 +- .../go-autorest/autorest/azure/cli/go.mod | 6 +- .../go-autorest/autorest/azure/cli/go.sum | 34 +- .../autorest/azure/cli/go_mod_tidy_hack.go | 4 +- .../autorest/azure/environments.go | 4 +- .../Azure/go-autorest/autorest/date/go.mod | 2 +- .../Azure/go-autorest/autorest/date/go.sum | 18 +- .../autorest/date/go_mod_tidy_hack.go | 4 +- .../Azure/go-autorest/autorest/go.mod | 11 +- .../Azure/go-autorest/autorest/go.sum | 35 +- .../go-autorest/autorest/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/autorest/to/go.mod | 2 +- .../Azure/go-autorest/autorest/to/go.sum | 19 +- .../autorest/to/go_mod_tidy_hack.go | 4 +- .../go-autorest/autorest/validation/go.mod | 2 +- .../go-autorest/autorest/validation/go.sum | 19 +- .../autorest/validation/go_mod_tidy_hack.go | 4 +- .../Azure/go-autorest/autorest/version.go | 2 +- .../Azure/go-autorest/azure-pipelines.yml | 105 + vendor/github.com/Azure/go-autorest/doc.go | 18 + .../Azure/go-autorest/logger/go.mod | 2 + .../Azure/go-autorest/logger/go.sum | 2 + .../go-autorest/logger/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/tracing/go.mod | 2 + .../Azure/go-autorest/tracing/go.sum | 2 + .../go-autorest/tracing/go_mod_tidy_hack.go | 24 + .../denverdino/aliyungo/LICENSE.txt | 191 + .../denverdino/aliyungo/common/client.go | 357 + .../denverdino/aliyungo/common/endpoint.go | 118 + .../denverdino/aliyungo/common/endpoints.xml | 1349 + .../denverdino/aliyungo/common/regions.go | 34 + .../denverdino/aliyungo/common/request.go | 101 + .../denverdino/aliyungo/common/types.go | 89 + .../denverdino/aliyungo/common/version.go | 3 + .../denverdino/aliyungo/ecs/client.go | 74 + .../denverdino/aliyungo/ecs/disks.go | 340 + .../denverdino/aliyungo/ecs/forward_entry.go | 112 + .../denverdino/aliyungo/ecs/images.go | 323 + .../denverdino/aliyungo/ecs/instance_types.go | 82 + .../denverdino/aliyungo/ecs/instances.go | 681 + .../denverdino/aliyungo/ecs/monitoring.go | 136 + .../denverdino/aliyungo/ecs/nat_gateway.go | 210 + .../denverdino/aliyungo/ecs/networks.go | 258 + .../denverdino/aliyungo/ecs/regions.go | 34 + .../denverdino/aliyungo/ecs/route_tables.go | 185 + .../aliyungo/ecs/router_interface.go | 227 + .../aliyungo/ecs/security_groups.go | 282 + .../denverdino/aliyungo/ecs/snapshots.go | 140 + .../denverdino/aliyungo/ecs/snat_entry.go | 103 + .../denverdino/aliyungo/ecs/ssh_key_pair.go | 153 + .../denverdino/aliyungo/ecs/tags.go | 128 + .../denverdino/aliyungo/ecs/vpcs.go | 160 + .../denverdino/aliyungo/ecs/vrouters.go | 77 + .../denverdino/aliyungo/ecs/vswitches.go | 164 + .../denverdino/aliyungo/ecs/zones.go | 103 + .../denverdino/aliyungo/util/attempt.go | 76 + .../denverdino/aliyungo/util/encoding.go | 314 + .../denverdino/aliyungo/util/iso6801.go | 80 + .../denverdino/aliyungo/util/signature.go | 40 + .../denverdino/aliyungo/util/util.go | 147 + .../github.com/digitalocean/godo/.gitignore | 1 + .../github.com/digitalocean/godo/.travis.yml | 8 + .../github.com/digitalocean/godo/CHANGELOG.md | 90 + .../digitalocean/godo/CONTRIBUTING.md | 24 + .../github.com/digitalocean/godo/LICENSE.txt | 55 + vendor/github.com/digitalocean/godo/README.md | 150 + .../github.com/digitalocean/godo/account.go | 60 + vendor/github.com/digitalocean/godo/action.go | 104 + vendor/github.com/digitalocean/godo/cdn.go | 214 + .../digitalocean/godo/certificates.go | 126 + vendor/github.com/digitalocean/godo/doc.go | 2 + .../github.com/digitalocean/godo/domains.go | 337 + .../digitalocean/godo/droplet_actions.go | 329 + .../github.com/digitalocean/godo/droplets.go | 570 + vendor/github.com/digitalocean/godo/errors.go | 24 + .../github.com/digitalocean/godo/firewalls.go | 267 + .../digitalocean/godo/floating_ips.go | 139 + .../digitalocean/godo/floating_ips_actions.go | 109 + vendor/github.com/digitalocean/godo/godo.go | 400 + .../digitalocean/godo/image_actions.go | 102 + vendor/github.com/digitalocean/godo/images.go | 241 + vendor/github.com/digitalocean/godo/keys.go | 226 + .../digitalocean/godo/kubernetes.go | 432 + vendor/github.com/digitalocean/godo/links.go | 83 + .../digitalocean/godo/load_balancers.go | 311 + .../github.com/digitalocean/godo/projects.go | 302 + .../github.com/digitalocean/godo/regions.go | 64 + vendor/github.com/digitalocean/godo/sizes.go | 68 + .../github.com/digitalocean/godo/snapshots.go | 140 + .../github.com/digitalocean/godo/storage.go | 251 + .../digitalocean/godo/storage_actions.go | 129 + .../github.com/digitalocean/godo/strings.go | 102 + vendor/github.com/digitalocean/godo/tags.go | 231 + .../github.com/digitalocean/godo/timestamp.go | 35 + vendor/github.com/google/gofuzz/README.md | 2 +- vendor/github.com/google/gofuzz/fuzz.go | 35 +- .../gophercloud/gophercloud/.gitignore | 3 + .../gophercloud/gophercloud/.travis.yml | 25 + .../gophercloud/gophercloud/.zuul.yaml | 114 + .../gophercloud/gophercloud/CHANGELOG.md | 0 .../gophercloud/gophercloud/LICENSE | 191 + .../gophercloud/gophercloud/README.md | 159 + .../gophercloud/gophercloud/auth_options.go | 437 + .../gophercloud/gophercloud/auth_result.go | 52 + .../github.com/gophercloud/gophercloud/doc.go | 110 + .../gophercloud/endpoint_search.go | 76 + .../gophercloud/gophercloud/errors.go | 471 + .../github.com/gophercloud/gophercloud/go.mod | 7 + .../github.com/gophercloud/gophercloud/go.sum | 8 + .../gophercloud/openstack/auth_env.go | 125 + .../gophercloud/openstack/client.go | 438 + .../openstack/compute/v2/flavors/doc.go | 137 + .../openstack/compute/v2/flavors/requests.go | 357 + .../openstack/compute/v2/flavors/results.go | 252 + .../openstack/compute/v2/flavors/urls.go | 49 + .../openstack/compute/v2/images/doc.go | 32 + .../openstack/compute/v2/images/requests.go | 109 + .../openstack/compute/v2/images/results.go | 95 + .../openstack/compute/v2/images/urls.go | 15 + .../openstack/compute/v2/servers/doc.go | 115 + .../openstack/compute/v2/servers/errors.go | 71 + .../compute/v2/servers/microversions.go | 11 + .../openstack/compute/v2/servers/requests.go | 812 + .../openstack/compute/v2/servers/results.go | 414 + .../openstack/compute/v2/servers/urls.go | 51 + .../openstack/compute/v2/servers/util.go | 21 + .../gophercloud/gophercloud/openstack/doc.go | 14 + .../openstack/endpoint_location.go | 107 + .../gophercloud/openstack/errors.go | 71 + .../openstack/identity/v2/tenants/doc.go | 65 + .../openstack/identity/v2/tenants/requests.go | 116 + .../openstack/identity/v2/tenants/results.go | 91 + .../openstack/identity/v2/tenants/urls.go | 23 + .../openstack/identity/v2/tokens/doc.go | 46 + .../openstack/identity/v2/tokens/requests.go | 103 + .../openstack/identity/v2/tokens/results.go | 174 + .../openstack/identity/v2/tokens/urls.go | 13 + .../openstack/identity/v3/tokens/doc.go | 108 + .../openstack/identity/v3/tokens/requests.go | 162 + .../openstack/identity/v3/tokens/results.go | 178 + .../openstack/identity/v3/tokens/urls.go | 7 + .../openstack/utils/base_endpoint.go | 28 + .../openstack/utils/choose_version.go | 111 + .../gophercloud/pagination/http.go | 60 + .../gophercloud/pagination/linked.go | 92 + .../gophercloud/pagination/marker.go | 58 + .../gophercloud/pagination/pager.go | 251 + .../gophercloud/gophercloud/pagination/pkg.go | 4 + .../gophercloud/pagination/single.go | 33 + .../gophercloud/gophercloud/params.go | 491 + .../gophercloud/provider_client.go | 501 + .../gophercloud/gophercloud/results.go | 448 + .../gophercloud/gophercloud/service_client.go | 154 + .../gophercloud/gophercloud/util.go | 102 + .../hashicorp/go-discover/.gitignore | 6 + .../hashicorp/go-discover/.travis.yml | 10 + .../github.com/hashicorp/go-discover/LICENSE | 354 + .../hashicorp/go-discover/README.md | 305 + .../hashicorp/go-discover/config.go | 229 + .../hashicorp/go-discover/discover.go | 186 + .../github.com/hashicorp/go-discover/go.mod | 45 + .../github.com/hashicorp/go-discover/go.sum | 317 + .../provider/aliyun/aliyun_discover.go | 103 + .../go-discover/provider/aws/aws_discover.go | 164 + .../provider/azure/azure_discover.go | 228 + .../digitalocean/digitalocean_discover.go | 121 + .../go-discover/provider/gce/gce_discover.go | 200 + .../provider/linode/linode_discover.go | 142 + .../provider/mdns/mdns_provider.go | 119 + .../go-discover/provider/os/os_discover.go | 223 + .../provider/packet/packet_discover.go | 147 + .../provider/scaleway/scaleway_discover.go | 81 + .../provider/softlayer/softlayer_discover.go | 67 + .../tencentcloud/tencentcloud_discover.go | 134 + .../provider/triton/triton_discover.go | 90 + .../provider/vsphere/vsphere_discover.go | 417 + vendor/github.com/hashicorp/mdns/.gitignore | 23 + vendor/github.com/hashicorp/mdns/LICENSE | 20 + vendor/github.com/hashicorp/mdns/README.md | 37 + vendor/github.com/hashicorp/mdns/client.go | 365 + vendor/github.com/hashicorp/mdns/go.mod | 9 + vendor/github.com/hashicorp/mdns/go.sum | 10 + vendor/github.com/hashicorp/mdns/server.go | 288 + vendor/github.com/hashicorp/mdns/zone.go | 307 + vendor/github.com/hashicorp/vault/api/go.mod | 2 +- .../hashicorp/vault/api/sys_raft.go | 1 + vendor/github.com/hashicorp/vic/LICENSE | 24135 +++++++ .../vic/pkg/vsphere/tags/categories.go | 212 + .../vic/pkg/vsphere/tags/rest_client.go | 278 + .../vic/pkg/vsphere/tags/tag_association.go | 135 + .../hashicorp/vic/pkg/vsphere/tags/tags.go | 255 + .../joyent/triton-go/compute/client.go | 90 + .../joyent/triton-go/compute/datacenters.go | 101 + .../joyent/triton-go/compute/images.go | 268 + .../joyent/triton-go/compute/instances.go | 1230 + .../joyent/triton-go/compute/packages.go | 128 + .../joyent/triton-go/compute/ping.go | 58 + .../joyent/triton-go/compute/services.go | 72 + .../joyent/triton-go/compute/snapshots.go | 164 + .../joyent/triton-go/compute/volumes.go | 217 + vendor/github.com/linode/linodego/.gitignore | 19 + vendor/github.com/linode/linodego/.travis.yml | 22 + .../github.com/linode/linodego/API_SUPPORT.md | 379 + .../github.com/linode/linodego/CHANGELOG.md | 239 + vendor/github.com/linode/linodego/Gopkg.lock | 111 + vendor/github.com/linode/linodego/Gopkg.toml | 29 + vendor/github.com/linode/linodego/LICENSE | 21 + vendor/github.com/linode/linodego/Makefile | 43 + vendor/github.com/linode/linodego/README.md | 178 + vendor/github.com/linode/linodego/account.go | 45 + .../linode/linodego/account_events.go | 277 + .../linode/linodego/account_invoices.go | 125 + .../linode/linodego/account_notifications.go | 101 + .../linode/linodego/account_users.go | 164 + vendor/github.com/linode/linodego/client.go | 257 + .../linode/linodego/domain_records.go | 195 + vendor/github.com/linode/linodego/domains.go | 295 + vendor/github.com/linode/linodego/env.sample | 2 + vendor/github.com/linode/linodego/errors.go | 109 + vendor/github.com/linode/linodego/images.go | 168 + .../linode/linodego/instance_configs.go | 246 + .../linode/linodego/instance_disks.go | 251 + .../linode/linodego/instance_ips.go | 106 + .../linode/linodego/instance_snapshots.go | 188 + .../linode/linodego/instance_volumes.go | 38 + .../github.com/linode/linodego/instances.go | 454 + vendor/github.com/linode/linodego/kernels.go | 61 + vendor/github.com/linode/linodego/longview.go | 67 + .../linode/linodego/longview_subscriptions.go | 70 + vendor/github.com/linode/linodego/managed.go | 1 + .../github.com/linode/linodego/network_ips.go | 90 + .../linode/linodego/network_pools.go | 50 + .../linode/linodego/network_ranges.go | 50 + .../linode/linodego/nodebalancer.go | 199 + .../linodego/nodebalancer_config_nodes.go | 186 + .../linode/linodego/nodebalancer_configs.go | 334 + .../github.com/linode/linodego/pagination.go | 430 + vendor/github.com/linode/linodego/profile.go | 117 + .../linode/linodego/profile_sshkeys.go | 160 + .../linode/linodego/profile_tokens.go | 195 + vendor/github.com/linode/linodego/regions.go | 64 + .../github.com/linode/linodego/resources.go | 162 + .../linode/linodego/stackscripts.go | 208 + vendor/github.com/linode/linodego/support.go | 88 + vendor/github.com/linode/linodego/tags.go | 219 + vendor/github.com/linode/linodego/types.go | 90 + vendor/github.com/linode/linodego/util.go | 15 + vendor/github.com/linode/linodego/volumes.go | 299 + vendor/github.com/linode/linodego/waitfor.go | 273 + vendor/github.com/miekg/dns/.codecov.yml | 8 + vendor/github.com/miekg/dns/.gitignore | 4 + vendor/github.com/miekg/dns/.travis.yml | 19 + vendor/github.com/miekg/dns/AUTHORS | 1 + vendor/github.com/miekg/dns/CONTRIBUTORS | 10 + vendor/github.com/miekg/dns/COPYRIGHT | 9 + vendor/github.com/miekg/dns/Gopkg.lock | 33 + vendor/github.com/miekg/dns/Gopkg.toml | 38 + vendor/github.com/miekg/dns/LICENSE | 32 + vendor/github.com/miekg/dns/Makefile.fuzz | 33 + vendor/github.com/miekg/dns/Makefile.release | 52 + vendor/github.com/miekg/dns/README.md | 174 + vendor/github.com/miekg/dns/acceptfunc.go | 54 + vendor/github.com/miekg/dns/client.go | 415 + vendor/github.com/miekg/dns/clientconfig.go | 135 + vendor/github.com/miekg/dns/dane.go | 43 + vendor/github.com/miekg/dns/defaults.go | 378 + vendor/github.com/miekg/dns/dns.go | 134 + vendor/github.com/miekg/dns/dnssec.go | 794 + vendor/github.com/miekg/dns/dnssec_keygen.go | 140 + vendor/github.com/miekg/dns/dnssec_keyscan.go | 322 + vendor/github.com/miekg/dns/dnssec_privkey.go | 94 + vendor/github.com/miekg/dns/doc.go | 269 + vendor/github.com/miekg/dns/duplicate.go | 38 + vendor/github.com/miekg/dns/edns.go | 659 + vendor/github.com/miekg/dns/format.go | 93 + vendor/github.com/miekg/dns/fuzz.go | 23 + vendor/github.com/miekg/dns/generate.go | 242 + vendor/github.com/miekg/dns/labels.go | 188 + vendor/github.com/miekg/dns/listen_go111.go | 44 + .../github.com/miekg/dns/listen_go_not111.go | 23 + vendor/github.com/miekg/dns/msg.go | 1228 + vendor/github.com/miekg/dns/msg_helpers.go | 671 + vendor/github.com/miekg/dns/msg_truncate.go | 111 + vendor/github.com/miekg/dns/nsecx.go | 95 + vendor/github.com/miekg/dns/privaterr.go | 114 + vendor/github.com/miekg/dns/reverse.go | 52 + vendor/github.com/miekg/dns/sanitize.go | 86 + vendor/github.com/miekg/dns/scan.go | 1397 + vendor/github.com/miekg/dns/scan_rr.go | 1698 + vendor/github.com/miekg/dns/serve_mux.go | 147 + vendor/github.com/miekg/dns/server.go | 764 + vendor/github.com/miekg/dns/sig0.go | 209 + vendor/github.com/miekg/dns/singleinflight.go | 61 + vendor/github.com/miekg/dns/smimea.go | 44 + vendor/github.com/miekg/dns/tlsa.go | 44 + vendor/github.com/miekg/dns/tsig.go | 389 + vendor/github.com/miekg/dns/types.go | 1413 + vendor/github.com/miekg/dns/udp.go | 102 + vendor/github.com/miekg/dns/udp_windows.go | 35 + vendor/github.com/miekg/dns/update.go | 110 + vendor/github.com/miekg/dns/version.go | 15 + vendor/github.com/miekg/dns/xfr.go | 263 + vendor/github.com/miekg/dns/zduplicate.go | 1140 + vendor/github.com/miekg/dns/zmsg.go | 2722 + vendor/github.com/miekg/dns/ztypes.go | 881 + .../nicolai86/scaleway-sdk/LICENSE.md | 20 + .../nicolai86/scaleway-sdk/README.md | 9 + .../github.com/nicolai86/scaleway-sdk/api.go | 302 + .../nicolai86/scaleway-sdk/availability.go | 50 + .../nicolai86/scaleway-sdk/bootscript.go | 83 + .../nicolai86/scaleway-sdk/container.go | 70 + .../nicolai86/scaleway-sdk/dashboard.go | 42 + .../nicolai86/scaleway-sdk/image.go | 469 + .../github.com/nicolai86/scaleway-sdk/ip.go | 209 + .../nicolai86/scaleway-sdk/organization.go | 39 + .../nicolai86/scaleway-sdk/permissions.go | 39 + .../nicolai86/scaleway-sdk/quota.go | 36 + .../nicolai86/scaleway-sdk/security_group.go | 146 + .../scaleway-sdk/security_group_rule.go | 140 + .../nicolai86/scaleway-sdk/server.go | 324 + .../nicolai86/scaleway-sdk/snapshot.go | 138 + .../nicolai86/scaleway-sdk/tasks.go | 82 + .../nicolai86/scaleway-sdk/tokens.go | 135 + .../github.com/nicolai86/scaleway-sdk/user.go | 101 + .../nicolai86/scaleway-sdk/user_data.go | 140 + .../nicolai86/scaleway-sdk/volume.go | 183 + .../github.com/packethost/packngo/.drone.yml | 28 + .../github.com/packethost/packngo/.gitignore | 28 + .../packethost/packngo/CHANGELOG.md | 54 + .../github.com/packethost/packngo/LICENSE.txt | 56 + .../github.com/packethost/packngo/README.md | 72 + .../packethost/packngo/billing_address.go | 7 + .../github.com/packethost/packngo/devices.go | 257 + vendor/github.com/packethost/packngo/email.go | 37 + .../packethost/packngo/facilities.go | 52 + vendor/github.com/packethost/packngo/ip.go | 194 + .../packethost/packngo/operatingsystems.go | 41 + .../packethost/packngo/organizations.go | 147 + .../github.com/packethost/packngo/packngo.go | 306 + .../packethost/packngo/payment_methods.go | 72 + vendor/github.com/packethost/packngo/plans.go | 117 + vendor/github.com/packethost/packngo/ports.go | 225 + .../github.com/packethost/packngo/projects.go | 138 + vendor/github.com/packethost/packngo/rate.go | 12 + .../packethost/packngo/spotmarket.go | 39 + .../github.com/packethost/packngo/sshkeys.go | 138 + .../packethost/packngo/timestamp.go | 35 + vendor/github.com/packethost/packngo/user.go | 64 + vendor/github.com/packethost/packngo/utils.go | 91 + .../packethost/packngo/virtualnetworks.go | 81 + .../github.com/packethost/packngo/volumes.go | 239 + vendor/github.com/renier/xmlrpc/.gitignore | 2 + vendor/github.com/renier/xmlrpc/LICENSE | 19 + vendor/github.com/renier/xmlrpc/README.md | 79 + vendor/github.com/renier/xmlrpc/client.go | 181 + vendor/github.com/renier/xmlrpc/decoder.go | 547 + vendor/github.com/renier/xmlrpc/encoder.go | 204 + vendor/github.com/renier/xmlrpc/request.go | 57 + vendor/github.com/renier/xmlrpc/response.go | 57 + .../github.com/renier/xmlrpc/test_server.rb | 25 + vendor/github.com/renier/xmlrpc/xmlrpc.go | 28 + .../github.com/softlayer/softlayer-go/LICENSE | 202 + .../softlayer/softlayer-go/config/config.go | 145 + .../softlayer/softlayer-go/datatypes/abuse.go | 32 + .../softlayer-go/datatypes/account.go | 2986 + .../softlayer-go/datatypes/auxiliary.go | 342 + .../softlayer-go/datatypes/billing.go | 2674 + .../softlayer/softlayer-go/datatypes/brand.go | 262 + .../softlayer-go/datatypes/business.go | 43 + .../softlayer-go/datatypes/catalyst.go | 211 + .../softlayer-go/datatypes/compliance.go | 35 + .../softlayer-go/datatypes/configuration.go | 553 + .../softlayer-go/datatypes/container.go | 5826 ++ .../softlayer/softlayer-go/datatypes/dns.go | 430 + .../softlayer/softlayer-go/datatypes/email.go | 63 + .../softlayer-go/datatypes/entity.go | 25 + .../softlayer/softlayer-go/datatypes/event.go | 71 + .../softlayer-go/datatypes/flexiblecredit.go | 110 + .../softlayer-go/datatypes/hardware.go | 1938 + .../softlayer-go/datatypes/layout.go | 245 + .../softlayer/softlayer-go/datatypes/legal.go | 58 + .../softlayer-go/datatypes/locale.go | 101 + .../softlayer-go/datatypes/location.go | 443 + .../softlayer-go/datatypes/marketplace.go | 195 + .../softlayer-go/datatypes/mcafee.go | 319 + .../softlayer-go/datatypes/metric.go | 207 + .../softlayer-go/datatypes/monitoring.go | 248 + .../softlayer-go/datatypes/network.go | 6136 ++ .../softlayer-go/datatypes/notification.go | 676 + .../softlayer-go/datatypes/policy.go | 29 + .../softlayer-go/datatypes/product.go | 1895 + .../softlayer-go/datatypes/provisioning.go | 303 + .../softlayer-go/datatypes/resource.go | 413 + .../softlayer/softlayer-go/datatypes/sales.go | 62 + .../softlayer/softlayer-go/datatypes/scale.go | 559 + .../softlayer-go/datatypes/search.go | 26 + .../softlayer-go/datatypes/security.go | 294 + .../softlayer-go/datatypes/service.go | 55 + .../softlayer-go/datatypes/softlayer.go | 104 + .../softlayer-go/datatypes/software.go | 768 + .../softlayer-go/datatypes/survey.go | 150 + .../softlayer/softlayer-go/datatypes/tag.go | 157 + .../softlayer-go/datatypes/ticket.go | 733 + .../softlayer/softlayer-go/datatypes/user.go | 1450 + .../softlayer-go/datatypes/utility.go | 46 + .../softlayer-go/datatypes/virtual.go | 1379 + .../softlayer/softlayer-go/filter/filters.go | 305 + .../softlayer-go/services/account.go | 5459 ++ .../softlayer-go/services/auxiliary.go | 729 + .../softlayer-go/services/billing.go | 2794 + .../softlayer/softlayer-go/services/brand.go | 375 + .../softlayer-go/services/business.go | 122 + .../softlayer-go/services/catalyst.go | 207 + .../softlayer-go/services/compliance.go | 82 + .../softlayer-go/services/configuration.go | 799 + .../softlayer/softlayer-go/services/dns.go | 1094 + .../softlayer/softlayer-go/services/email.go | 158 + .../softlayer/softlayer-go/services/event.go | 103 + .../softlayer-go/services/flexiblecredit.go | 100 + .../softlayer-go/services/hardware.go | 10188 +++ .../softlayer/softlayer-go/services/layout.go | 512 + .../softlayer/softlayer-go/services/locale.go | 225 + .../softlayer-go/services/location.go | 884 + .../softlayer-go/services/marketplace.go | 148 + .../softlayer/softlayer-go/services/metric.go | 234 + .../softlayer-go/services/monitoring.go | 685 + .../softlayer-go/services/network.go | 15408 +++++ .../softlayer-go/services/notification.go | 892 + .../softlayer-go/services/product.go | 2077 + .../softlayer-go/services/provisioning.go | 564 + .../softlayer-go/services/resource.go | 419 + .../softlayer/softlayer-go/services/sales.go | 112 + .../softlayer/softlayer-go/services/scale.go | 1668 + .../softlayer/softlayer-go/services/search.go | 122 + .../softlayer-go/services/security.go | 456 + .../softlayer-go/services/software.go | 797 + .../softlayer/softlayer-go/services/survey.go | 112 + .../softlayer/softlayer-go/services/tag.go | 123 + .../softlayer/softlayer-go/services/ticket.go | 982 + .../softlayer/softlayer-go/services/user.go | 4388 ++ .../softlayer-go/services/utility.go | 88 + .../softlayer-go/services/virtual.go | 3167 + .../softlayer/softlayer-go/session/rest.go | 314 + .../softlayer/softlayer-go/session/session.go | 359 + .../softlayer/softlayer-go/session/xmlrpc.go | 253 + .../softlayer/softlayer-go/sl/errors.go | 49 + .../softlayer/softlayer-go/sl/helpers.go | 175 + .../softlayer/softlayer-go/sl/options.go | 27 + .../softlayer/softlayer-go/sl/version.go | 47 + .../tencentcloud/tencentcloud-sdk-go/LICENSE | 201 + .../tencentcloud/common/client.go | 270 + .../tencentcloud/common/credentials.go | 58 + .../tencentcloud/common/errors/errors.go | 35 + .../tencentcloud/common/http/request.go | 233 + .../tencentcloud/common/http/response.go | 81 + .../common/profile/client_profile.go | 23 + .../common/profile/http_profile.go | 19 + .../tencentcloud/common/sign.go | 94 + .../tencentcloud/common/types.go | 47 + .../tencentcloud/cvm/v20170312/client.go | 1855 + .../tencentcloud/cvm/v20170312/models.go | 3853 ++ vendor/github.com/vmware/govmomi/.gitignore | 2 + .../github.com/vmware/govmomi/.goreleaser.yml | 57 + vendor/github.com/vmware/govmomi/.mailmap | 20 + vendor/github.com/vmware/govmomi/.travis.yml | 30 + vendor/github.com/vmware/govmomi/CHANGELOG.md | 313 + .../github.com/vmware/govmomi/CONTRIBUTING.md | 101 + vendor/github.com/vmware/govmomi/CONTRIBUTORS | 83 + vendor/github.com/vmware/govmomi/Dockerfile | 4 + vendor/github.com/vmware/govmomi/Gopkg.lock | 44 + vendor/github.com/vmware/govmomi/Gopkg.toml | 19 + vendor/github.com/vmware/govmomi/LICENSE.txt | 202 + vendor/github.com/vmware/govmomi/Makefile | 29 + vendor/github.com/vmware/govmomi/README.md | 86 + vendor/github.com/vmware/govmomi/client.go | 136 + vendor/github.com/vmware/govmomi/find/doc.go | 37 + .../github.com/vmware/govmomi/find/error.go | 64 + .../github.com/vmware/govmomi/find/finder.go | 1042 + .../vmware/govmomi/find/recurser.go | 253 + .../github.com/vmware/govmomi/list/lister.go | 563 + vendor/github.com/vmware/govmomi/list/path.go | 44 + vendor/github.com/vmware/govmomi/nfc/lease.go | 238 + .../vmware/govmomi/nfc/lease_updater.go | 146 + .../govmomi/object/authorization_manager.go | 174 + .../object/authorization_manager_internal.go | 86 + .../object/cluster_compute_resource.go | 70 + .../vmware/govmomi/object/common.go | 132 + .../vmware/govmomi/object/compute_resource.go | 111 + .../govmomi/object/custom_fields_manager.go | 146 + .../object/customization_spec_manager.go | 166 + .../vmware/govmomi/object/datacenter.go | 129 + .../vmware/govmomi/object/datastore.go | 435 + .../vmware/govmomi/object/datastore_file.go | 414 + .../govmomi/object/datastore_file_manager.go | 228 + .../vmware/govmomi/object/datastore_path.go | 71 + .../vmware/govmomi/object/diagnostic_log.go | 76 + .../govmomi/object/diagnostic_manager.go | 104 + .../object/distributed_virtual_portgroup.go | 80 + .../object/distributed_virtual_switch.go | 80 + .../govmomi/object/extension_manager.go | 113 + .../vmware/govmomi/object/file_manager.go | 126 + .../vmware/govmomi/object/folder.go | 227 + .../govmomi/object/history_collector.go | 72 + .../govmomi/object/host_account_manager.go | 65 + .../govmomi/object/host_certificate_info.go | 250 + .../object/host_certificate_manager.go | 162 + .../govmomi/object/host_config_manager.go | 196 + .../govmomi/object/host_datastore_browser.go | 65 + .../govmomi/object/host_datastore_system.go | 119 + .../govmomi/object/host_date_time_system.go | 69 + .../govmomi/object/host_firewall_system.go | 181 + .../govmomi/object/host_network_system.go | 358 + .../govmomi/object/host_service_system.go | 88 + .../govmomi/object/host_storage_system.go | 174 + .../vmware/govmomi/object/host_system.go | 153 + .../object/host_virtual_nic_manager.go | 93 + .../object/host_vsan_internal_system.go | 117 + .../vmware/govmomi/object/host_vsan_system.go | 88 + .../govmomi/object/namespace_manager.go | 76 + .../vmware/govmomi/object/network.go | 56 + .../govmomi/object/network_reference.go | 31 + .../vmware/govmomi/object/opaque_network.go | 57 + .../vmware/govmomi/object/option_manager.go | 59 + .../vmware/govmomi/object/resource_pool.go | 138 + .../vmware/govmomi/object/search_index.go | 163 + .../vmware/govmomi/object/storage_pod.go | 34 + .../object/storage_resource_manager.go | 179 + .../github.com/vmware/govmomi/object/task.go | 62 + .../github.com/vmware/govmomi/object/types.go | 67 + .../vmware/govmomi/object/virtual_app.go | 105 + .../govmomi/object/virtual_device_list.go | 935 + .../govmomi/object/virtual_disk_manager.go | 227 + .../object/virtual_disk_manager_internal.go | 166 + .../vmware/govmomi/object/virtual_machine.go | 801 + .../vmware_distributed_virtual_switch.go | 21 + .../vmware/govmomi/property/collector.go | 205 + .../vmware/govmomi/property/filter.go | 139 + .../vmware/govmomi/property/wait.go | 115 + .../vmware/govmomi/session/keep_alive.go | 127 + .../vmware/govmomi/session/manager.go | 267 + .../github.com/vmware/govmomi/task/error.go | 32 + vendor/github.com/vmware/govmomi/task/wait.go | 132 + .../github.com/vmware/govmomi/vim25/client.go | 146 + .../vmware/govmomi/vim25/debug/debug.go | 81 + vendor/github.com/vmware/govmomi/vim25/doc.go | 29 + .../vmware/govmomi/vim25/methods/methods.go | 17284 +++++ .../govmomi/vim25/methods/service_content.go | 57 + .../vmware/govmomi/vim25/mo/ancestors.go | 137 + .../vmware/govmomi/vim25/mo/entity.go | 24 + .../vmware/govmomi/vim25/mo/extra.go | 61 + .../github.com/vmware/govmomi/vim25/mo/mo.go | 1801 + .../vmware/govmomi/vim25/mo/reference.go | 26 + .../vmware/govmomi/vim25/mo/registry.go | 21 + .../vmware/govmomi/vim25/mo/retrieve.go | 174 + .../vmware/govmomi/vim25/mo/type_info.go | 247 + .../govmomi/vim25/progress/aggregator.go | 73 + .../vmware/govmomi/vim25/progress/doc.go | 32 + .../vmware/govmomi/vim25/progress/prefix.go | 54 + .../vmware/govmomi/vim25/progress/reader.go | 185 + .../vmware/govmomi/vim25/progress/report.go | 26 + .../vmware/govmomi/vim25/progress/scale.go | 76 + .../vmware/govmomi/vim25/progress/sinker.go | 33 + .../vmware/govmomi/vim25/progress/tee.go | 41 + .../github.com/vmware/govmomi/vim25/retry.go | 105 + .../vmware/govmomi/vim25/soap/client.go | 775 + .../vmware/govmomi/vim25/soap/debug.go | 149 + .../vmware/govmomi/vim25/soap/error.go | 115 + .../vmware/govmomi/vim25/soap/soap.go | 49 + .../vmware/govmomi/vim25/types/base.go | 19 + .../vmware/govmomi/vim25/types/enum.go | 4824 ++ .../vmware/govmomi/vim25/types/fault.go | 32 + .../vmware/govmomi/vim25/types/helpers.go | 95 + .../vmware/govmomi/vim25/types/if.go | 3449 + .../vmware/govmomi/vim25/types/registry.go | 43 + .../vmware/govmomi/vim25/types/types.go | 55433 ++++++++++++++++ .../vmware/govmomi/vim25/xml/LICENSE | 27 + .../vmware/govmomi/vim25/xml/extras.go | 99 + .../vmware/govmomi/vim25/xml/marshal.go | 949 + .../vmware/govmomi/vim25/xml/read.go | 781 + .../vmware/govmomi/vim25/xml/typeinfo.go | 366 + .../vmware/govmomi/vim25/xml/xml.go | 1939 + vendor/golang.org/x/net/bpf/asm.go | 41 + vendor/golang.org/x/net/bpf/constants.go | 222 + vendor/golang.org/x/net/bpf/doc.go | 82 + vendor/golang.org/x/net/bpf/instructions.go | 726 + vendor/golang.org/x/net/bpf/setter.go | 10 + vendor/golang.org/x/net/bpf/vm.go | 150 + .../golang.org/x/net/bpf/vm_instructions.go | 182 + .../golang.org/x/net/internal/iana/const.go | 223 + .../x/net/internal/socket/cmsghdr.go | 11 + .../x/net/internal/socket/cmsghdr_bsd.go | 13 + .../internal/socket/cmsghdr_linux_32bit.go | 14 + .../internal/socket/cmsghdr_linux_64bit.go | 14 + .../internal/socket/cmsghdr_solaris_64bit.go | 14 + .../x/net/internal/socket/cmsghdr_stub.go | 17 + .../golang.org/x/net/internal/socket/empty.s | 7 + .../x/net/internal/socket/error_unix.go | 31 + .../x/net/internal/socket/error_windows.go | 26 + .../x/net/internal/socket/iovec_32bit.go | 19 + .../x/net/internal/socket/iovec_64bit.go | 19 + .../internal/socket/iovec_solaris_64bit.go | 19 + .../x/net/internal/socket/iovec_stub.go | 11 + .../x/net/internal/socket/mmsghdr_stub.go | 21 + .../x/net/internal/socket/mmsghdr_unix.go | 42 + .../x/net/internal/socket/msghdr_bsd.go | 39 + .../x/net/internal/socket/msghdr_bsdvar.go | 16 + .../x/net/internal/socket/msghdr_linux.go | 36 + .../net/internal/socket/msghdr_linux_32bit.go | 24 + .../net/internal/socket/msghdr_linux_64bit.go | 24 + .../x/net/internal/socket/msghdr_openbsd.go | 14 + .../internal/socket/msghdr_solaris_64bit.go | 36 + .../x/net/internal/socket/msghdr_stub.go | 14 + .../x/net/internal/socket/norace.go | 12 + .../golang.org/x/net/internal/socket/race.go | 37 + .../x/net/internal/socket/rawconn.go | 64 + .../x/net/internal/socket/rawconn_mmsg.go | 79 + .../x/net/internal/socket/rawconn_msg.go | 78 + .../x/net/internal/socket/rawconn_nommsg.go | 15 + .../x/net/internal/socket/rawconn_nomsg.go | 15 + .../x/net/internal/socket/socket.go | 288 + .../golang.org/x/net/internal/socket/sys.go | 33 + .../x/net/internal/socket/sys_bsd.go | 15 + .../x/net/internal/socket/sys_bsdvar.go | 23 + .../x/net/internal/socket/sys_const_unix.go | 17 + .../x/net/internal/socket/sys_darwin.go | 7 + .../x/net/internal/socket/sys_dragonfly.go | 32 + .../net/internal/socket/sys_go1_11_darwin.go | 33 + .../x/net/internal/socket/sys_linkname.go | 42 + .../x/net/internal/socket/sys_linux.go | 27 + .../x/net/internal/socket/sys_linux_386.go | 55 + .../x/net/internal/socket/sys_linux_386.s | 11 + .../x/net/internal/socket/sys_linux_amd64.go | 10 + .../x/net/internal/socket/sys_linux_arm.go | 10 + .../x/net/internal/socket/sys_linux_arm64.go | 10 + .../x/net/internal/socket/sys_linux_mips.go | 10 + .../x/net/internal/socket/sys_linux_mips64.go | 10 + .../net/internal/socket/sys_linux_mips64le.go | 10 + .../x/net/internal/socket/sys_linux_mipsle.go | 10 + .../x/net/internal/socket/sys_linux_ppc64.go | 10 + .../net/internal/socket/sys_linux_ppc64le.go | 10 + .../net/internal/socket/sys_linux_riscv64.go | 12 + .../x/net/internal/socket/sys_linux_s390x.go | 55 + .../x/net/internal/socket/sys_linux_s390x.s | 11 + .../x/net/internal/socket/sys_netbsd.go | 25 + .../x/net/internal/socket/sys_posix.go | 183 + .../x/net/internal/socket/sys_solaris.go | 70 + .../x/net/internal/socket/sys_solaris_amd64.s | 11 + .../x/net/internal/socket/sys_stub.go | 63 + .../x/net/internal/socket/sys_unix.go | 33 + .../x/net/internal/socket/sys_windows.go | 71 + .../x/net/internal/socket/zsys_aix_ppc64.go | 60 + .../x/net/internal/socket/zsys_darwin_386.go | 51 + .../net/internal/socket/zsys_darwin_amd64.go | 53 + .../x/net/internal/socket/zsys_darwin_arm.go | 51 + .../net/internal/socket/zsys_darwin_arm64.go | 53 + .../internal/socket/zsys_dragonfly_amd64.go | 53 + .../x/net/internal/socket/zsys_freebsd_386.go | 51 + .../net/internal/socket/zsys_freebsd_amd64.go | 53 + .../x/net/internal/socket/zsys_freebsd_arm.go | 51 + .../net/internal/socket/zsys_freebsd_arm64.go | 53 + .../x/net/internal/socket/zsys_linux_386.go | 54 + .../x/net/internal/socket/zsys_linux_amd64.go | 57 + .../x/net/internal/socket/zsys_linux_arm.go | 55 + .../x/net/internal/socket/zsys_linux_arm64.go | 58 + .../x/net/internal/socket/zsys_linux_mips.go | 55 + .../net/internal/socket/zsys_linux_mips64.go | 58 + .../internal/socket/zsys_linux_mips64le.go | 58 + .../net/internal/socket/zsys_linux_mipsle.go | 55 + .../x/net/internal/socket/zsys_linux_ppc64.go | 58 + .../net/internal/socket/zsys_linux_ppc64le.go | 58 + .../net/internal/socket/zsys_linux_riscv64.go | 59 + .../x/net/internal/socket/zsys_linux_s390x.go | 58 + .../x/net/internal/socket/zsys_netbsd_386.go | 57 + .../net/internal/socket/zsys_netbsd_amd64.go | 60 + .../x/net/internal/socket/zsys_netbsd_arm.go | 57 + .../net/internal/socket/zsys_netbsd_arm64.go | 59 + .../x/net/internal/socket/zsys_openbsd_386.go | 51 + .../net/internal/socket/zsys_openbsd_amd64.go | 53 + .../x/net/internal/socket/zsys_openbsd_arm.go | 51 + .../net/internal/socket/zsys_openbsd_arm64.go | 53 + .../net/internal/socket/zsys_solaris_amd64.go | 52 + vendor/golang.org/x/net/ipv4/batch.go | 194 + vendor/golang.org/x/net/ipv4/control.go | 144 + vendor/golang.org/x/net/ipv4/control_bsd.go | 41 + .../golang.org/x/net/ipv4/control_pktinfo.go | 39 + vendor/golang.org/x/net/ipv4/control_stub.go | 13 + vendor/golang.org/x/net/ipv4/control_unix.go | 73 + .../golang.org/x/net/ipv4/control_windows.go | 12 + vendor/golang.org/x/net/ipv4/dgramopt.go | 264 + vendor/golang.org/x/net/ipv4/doc.go | 244 + vendor/golang.org/x/net/ipv4/endpoint.go | 186 + vendor/golang.org/x/net/ipv4/genericopt.go | 55 + vendor/golang.org/x/net/ipv4/header.go | 172 + vendor/golang.org/x/net/ipv4/helper.go | 77 + vendor/golang.org/x/net/ipv4/iana.go | 38 + vendor/golang.org/x/net/ipv4/icmp.go | 57 + vendor/golang.org/x/net/ipv4/icmp_linux.go | 25 + vendor/golang.org/x/net/ipv4/icmp_stub.go | 25 + vendor/golang.org/x/net/ipv4/packet.go | 117 + vendor/golang.org/x/net/ipv4/payload.go | 23 + vendor/golang.org/x/net/ipv4/payload_cmsg.go | 84 + .../golang.org/x/net/ipv4/payload_nocmsg.go | 39 + vendor/golang.org/x/net/ipv4/sockopt.go | 44 + vendor/golang.org/x/net/ipv4/sockopt_posix.go | 71 + vendor/golang.org/x/net/ipv4/sockopt_stub.go | 42 + vendor/golang.org/x/net/ipv4/sys_aix.go | 38 + vendor/golang.org/x/net/ipv4/sys_asmreq.go | 122 + .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 25 + vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 42 + .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_bpf.go | 24 + vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv4/sys_bsd.go | 37 + vendor/golang.org/x/net/ipv4/sys_darwin.go | 65 + vendor/golang.org/x/net/ipv4/sys_dragonfly.go | 35 + vendor/golang.org/x/net/ipv4/sys_freebsd.go | 76 + vendor/golang.org/x/net/ipv4/sys_linux.go | 60 + vendor/golang.org/x/net/ipv4/sys_solaris.go | 57 + vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 52 + .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv4/sys_stub.go | 13 + vendor/golang.org/x/net/ipv4/sys_windows.go | 67 + .../golang.org/x/net/ipv4/zsys_aix_ppc64.go | 33 + vendor/golang.org/x/net/ipv4/zsys_darwin.go | 99 + .../golang.org/x/net/ipv4/zsys_dragonfly.go | 31 + .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 93 + .../x/net/ipv4/zsys_freebsd_amd64.go | 95 + .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 95 + .../x/net/ipv4/zsys_freebsd_arm64.go | 93 + .../golang.org/x/net/ipv4/zsys_linux_386.go | 130 + .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 132 + .../golang.org/x/net/ipv4/zsys_linux_arm.go | 130 + .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 132 + .../golang.org/x/net/ipv4/zsys_linux_mips.go | 130 + .../x/net/ipv4/zsys_linux_mips64.go | 132 + .../x/net/ipv4/zsys_linux_mips64le.go | 132 + .../x/net/ipv4/zsys_linux_mipsle.go | 130 + .../golang.org/x/net/ipv4/zsys_linux_ppc.go | 130 + .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 132 + .../x/net/ipv4/zsys_linux_ppc64le.go | 132 + .../x/net/ipv4/zsys_linux_riscv64.go | 134 + .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 132 + vendor/golang.org/x/net/ipv4/zsys_netbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_openbsd.go | 30 + vendor/golang.org/x/net/ipv4/zsys_solaris.go | 100 + vendor/golang.org/x/net/ipv6/batch.go | 116 + vendor/golang.org/x/net/ipv6/control.go | 187 + .../x/net/ipv6/control_rfc2292_unix.go | 48 + .../x/net/ipv6/control_rfc3542_unix.go | 94 + vendor/golang.org/x/net/ipv6/control_stub.go | 13 + vendor/golang.org/x/net/ipv6/control_unix.go | 55 + .../golang.org/x/net/ipv6/control_windows.go | 12 + vendor/golang.org/x/net/ipv6/dgramopt.go | 301 + vendor/golang.org/x/net/ipv6/doc.go | 243 + vendor/golang.org/x/net/ipv6/endpoint.go | 127 + vendor/golang.org/x/net/ipv6/genericopt.go | 56 + vendor/golang.org/x/net/ipv6/header.go | 55 + vendor/golang.org/x/net/ipv6/helper.go | 58 + vendor/golang.org/x/net/ipv6/iana.go | 86 + vendor/golang.org/x/net/ipv6/icmp.go | 60 + vendor/golang.org/x/net/ipv6/icmp_bsd.go | 29 + vendor/golang.org/x/net/ipv6/icmp_linux.go | 27 + vendor/golang.org/x/net/ipv6/icmp_solaris.go | 27 + vendor/golang.org/x/net/ipv6/icmp_stub.go | 23 + vendor/golang.org/x/net/ipv6/icmp_windows.go | 22 + vendor/golang.org/x/net/ipv6/payload.go | 23 + vendor/golang.org/x/net/ipv6/payload_cmsg.go | 70 + .../golang.org/x/net/ipv6/payload_nocmsg.go | 38 + vendor/golang.org/x/net/ipv6/sockopt.go | 43 + vendor/golang.org/x/net/ipv6/sockopt_posix.go | 89 + vendor/golang.org/x/net/ipv6/sockopt_stub.go | 46 + vendor/golang.org/x/net/ipv6/sys_aix.go | 77 + vendor/golang.org/x/net/ipv6/sys_asmreq.go | 24 + .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 17 + vendor/golang.org/x/net/ipv6/sys_bpf.go | 24 + vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 16 + vendor/golang.org/x/net/ipv6/sys_bsd.go | 57 + vendor/golang.org/x/net/ipv6/sys_darwin.go | 78 + vendor/golang.org/x/net/ipv6/sys_freebsd.go | 92 + vendor/golang.org/x/net/ipv6/sys_linux.go | 75 + vendor/golang.org/x/net/ipv6/sys_solaris.go | 74 + vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 54 + .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 21 + vendor/golang.org/x/net/ipv6/sys_stub.go | 13 + vendor/golang.org/x/net/ipv6/sys_windows.go | 75 + .../golang.org/x/net/ipv6/zsys_aix_ppc64.go | 103 + vendor/golang.org/x/net/ipv6/zsys_darwin.go | 131 + .../golang.org/x/net/ipv6/zsys_dragonfly.go | 88 + .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 122 + .../x/net/ipv6/zsys_freebsd_amd64.go | 124 + .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 124 + .../x/net/ipv6/zsys_freebsd_arm64.go | 122 + .../golang.org/x/net/ipv6/zsys_linux_386.go | 152 + .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 154 + .../golang.org/x/net/ipv6/zsys_linux_arm.go | 152 + .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 154 + .../golang.org/x/net/ipv6/zsys_linux_mips.go | 152 + .../x/net/ipv6/zsys_linux_mips64.go | 154 + .../x/net/ipv6/zsys_linux_mips64le.go | 154 + .../x/net/ipv6/zsys_linux_mipsle.go | 152 + .../golang.org/x/net/ipv6/zsys_linux_ppc.go | 152 + .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 154 + .../x/net/ipv6/zsys_linux_ppc64le.go | 154 + .../x/net/ipv6/zsys_linux_riscv64.go | 156 + .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 154 + vendor/golang.org/x/net/ipv6/zsys_netbsd.go | 84 + vendor/golang.org/x/net/ipv6/zsys_openbsd.go | 93 + vendor/golang.org/x/net/ipv6/zsys_solaris.go | 131 + vendor/golang.org/x/net/publicsuffix/list.go | 181 + vendor/golang.org/x/net/publicsuffix/table.go | 10150 +++ .../x/text/encoding/charmap/charmap.go | 249 + .../x/text/encoding/charmap/tables.go | 7410 +++ vendor/gopkg.in/resty.v1/.gitignore | 28 + vendor/gopkg.in/resty.v1/.travis.yml | 28 + vendor/gopkg.in/resty.v1/BUILD.bazel | 36 + vendor/gopkg.in/resty.v1/LICENSE | 21 + vendor/gopkg.in/resty.v1/README.md | 741 + vendor/gopkg.in/resty.v1/WORKSPACE | 27 + vendor/gopkg.in/resty.v1/client.go | 926 + vendor/gopkg.in/resty.v1/default.go | 327 + vendor/gopkg.in/resty.v1/go.mod | 3 + vendor/gopkg.in/resty.v1/middleware.go | 469 + vendor/gopkg.in/resty.v1/redirect.go | 99 + vendor/gopkg.in/resty.v1/request.go | 586 + vendor/gopkg.in/resty.v1/request16.go | 63 + vendor/gopkg.in/resty.v1/request17.go | 96 + vendor/gopkg.in/resty.v1/response.go | 150 + vendor/gopkg.in/resty.v1/resty.go | 9 + vendor/gopkg.in/resty.v1/retry.go | 118 + vendor/gopkg.in/resty.v1/util.go | 281 + .../api/authentication/v1/generated.pb.go | 1204 +- vendor/k8s.io/api/authentication/v1/types.go | 2 +- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 1 - .../apimachinery/pkg/api/errors/errors.go | 35 +- .../apimachinery/pkg/api/resource/OWNERS | 2 - .../pkg/api/resource/generated.pb.go | 48 +- .../pkg/api/resource/generated.proto | 2 +- .../apimachinery/pkg/api/resource/math.go | 12 +- .../apimachinery/pkg/api/resource/quantity.go | 35 +- .../pkg/api/resource/quantity_proto.go | 34 +- .../apimachinery/pkg/apis/meta/v1/OWNERS | 1 - .../pkg/apis/meta/v1/controller_ref.go | 19 +- .../pkg/apis/meta/v1/conversion.go | 148 +- .../apimachinery/pkg/apis/meta/v1/deepcopy.go | 46 + .../apimachinery/pkg/apis/meta/v1/doc.go | 1 + .../apimachinery/pkg/apis/meta/v1/duration.go | 5 + .../pkg/apis/meta/v1/generated.pb.go | 5789 +- .../pkg/apis/meta/v1/generated.proto | 170 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 29 +- .../apimachinery/pkg/apis/meta/v1/meta.go | 10 +- .../pkg/apis/meta/v1/micro_time.go | 31 +- .../pkg/apis/meta/v1/micro_time_proto.go | 8 + .../apimachinery/pkg/apis/meta/v1/register.go | 67 +- .../apimachinery/pkg/apis/meta/v1/time.go | 20 +- .../pkg/apis/meta/v1/time_proto.go | 12 +- .../apimachinery/pkg/apis/meta/v1/types.go | 311 +- .../meta/v1/types_swagger_doc_generated.go | 160 +- .../apis/meta/v1/zz_generated.conversion.go | 523 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 241 +- .../apimachinery/pkg/conversion/converter.go | 155 +- .../pkg/conversion/queryparams/convert.go | 4 - .../k8s.io/apimachinery/pkg/labels/labels.go | 2 +- .../apimachinery/pkg/labels/selector.go | 49 +- .../k8s.io/apimachinery/pkg/runtime/codec.go | 92 +- .../apimachinery/pkg/runtime/conversion.go | 129 +- .../apimachinery/pkg/runtime/converter.go | 124 +- .../apimachinery/pkg/runtime/embedded.go | 15 +- .../k8s.io/apimachinery/pkg/runtime/error.go | 29 + .../apimachinery/pkg/runtime/generated.pb.go | 408 +- .../apimachinery/pkg/runtime/interfaces.go | 88 + .../k8s.io/apimachinery/pkg/runtime/mapper.go | 98 + .../apimachinery/pkg/runtime/negotiate.go | 146 + .../apimachinery/pkg/runtime/register.go | 30 - .../pkg/runtime/schema/generated.pb.go | 22 +- .../pkg/runtime/schema/group_version.go | 14 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 45 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 15 +- .../apimachinery/pkg/runtime/types_proto.go | 90 +- .../pkg/runtime/zz_generated.deepcopy.go | 33 - .../apimachinery/pkg/util/errors/errors.go | 32 +- .../pkg/util/intstr/generated.pb.go | 222 +- .../apimachinery/pkg/util/intstr/intstr.go | 9 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 28 +- .../pkg/util/naming/from_stack.go | 2 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 75 +- .../apimachinery/pkg/util/net/interface.go | 73 +- .../k8s.io/apimachinery/pkg/util/net/util.go | 17 + .../apimachinery/pkg/util/runtime/runtime.go | 17 +- .../k8s.io/apimachinery/pkg/util/sets/byte.go | 6 +- .../k8s.io/apimachinery/pkg/util/sets/int.go | 6 +- .../apimachinery/pkg/util/sets/int32.go | 205 + .../apimachinery/pkg/util/sets/int64.go | 6 +- .../apimachinery/pkg/util/sets/string.go | 6 +- .../pkg/util/validation/field/errors.go | 15 +- .../pkg/util/validation/validation.go | 84 +- .../apimachinery/pkg/watch/streamwatcher.go | 2 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 15 +- vendor/k8s.io/klog/.travis.yml | 3 +- vendor/k8s.io/klog/README.md | 24 +- vendor/k8s.io/klog/SECURITY_CONTACTS | 4 +- vendor/k8s.io/klog/go.mod | 5 + vendor/k8s.io/klog/go.sum | 2 + vendor/k8s.io/klog/klog.go | 173 +- vendor/k8s.io/klog/klog_file.go | 2 +- vendor/modules.txt | 120 +- .../structured-merge-diff/v3/LICENSE | 201 + .../v3/value/allocator.go | 203 + .../structured-merge-diff/v3/value/doc.go | 21 + .../structured-merge-diff/v3/value/fields.go | 97 + .../v3/value/jsontagutil.go | 91 + .../structured-merge-diff/v3/value/list.go | 139 + .../v3/value/listreflect.go | 98 + .../v3/value/listunstructured.go | 74 + .../structured-merge-diff/v3/value/map.go | 270 + .../v3/value/mapreflect.go | 209 + .../v3/value/mapunstructured.go | 190 + .../v3/value/reflectcache.go | 463 + .../structured-merge-diff/v3/value/scalar.go | 50 + .../v3/value/structreflect.go | 208 + .../structured-merge-diff/v3/value/value.go | 347 + .../v3/value/valuereflect.go | 294 + .../v3/value/valueunstructured.go | 178 + 1024 files changed, 357142 insertions(+), 4949 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/applicationgateways.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuitauthorizations.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuitpeerings.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuits.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressrouteserviceproviders.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/interfacesgroup.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/loadbalancers.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/localnetworkgateways.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/publicipaddresses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/routes.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/routetables.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/securitygroups.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/securityrules.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/subnets.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/usages.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworkgatewayconnections.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworkgateways.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworks.go create mode 100644 vendor/github.com/Azure/go-autorest/.gitignore create mode 100644 vendor/github.com/Azure/go-autorest/CHANGELOG.md create mode 100644 vendor/github.com/Azure/go-autorest/GNUmakefile create mode 100644 vendor/github.com/Azure/go-autorest/Gopkg.lock create mode 100644 vendor/github.com/Azure/go-autorest/Gopkg.toml create mode 100644 vendor/github.com/Azure/go-autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/azure-pipelines.yml create mode 100644 vendor/github.com/Azure/go-autorest/doc.go create mode 100644 vendor/github.com/Azure/go-autorest/logger/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go create mode 100644 vendor/github.com/denverdino/aliyungo/LICENSE.txt create mode 100644 vendor/github.com/denverdino/aliyungo/common/client.go create mode 100644 vendor/github.com/denverdino/aliyungo/common/endpoint.go create mode 100644 vendor/github.com/denverdino/aliyungo/common/endpoints.xml create mode 100644 vendor/github.com/denverdino/aliyungo/common/regions.go create mode 100644 vendor/github.com/denverdino/aliyungo/common/request.go create mode 100644 vendor/github.com/denverdino/aliyungo/common/types.go create mode 100644 vendor/github.com/denverdino/aliyungo/common/version.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/client.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/disks.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/images.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/instance_types.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/instances.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/monitoring.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/nat_gateway.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/networks.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/regions.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/route_tables.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/router_interface.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/security_groups.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/snapshots.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/snat_entry.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/ssh_key_pair.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/tags.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/vpcs.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/vrouters.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/vswitches.go create mode 100644 vendor/github.com/denverdino/aliyungo/ecs/zones.go create mode 100644 vendor/github.com/denverdino/aliyungo/util/attempt.go create mode 100644 vendor/github.com/denverdino/aliyungo/util/encoding.go create mode 100644 vendor/github.com/denverdino/aliyungo/util/iso6801.go create mode 100644 vendor/github.com/denverdino/aliyungo/util/signature.go create mode 100644 vendor/github.com/denverdino/aliyungo/util/util.go create mode 100644 vendor/github.com/digitalocean/godo/.gitignore create mode 100644 vendor/github.com/digitalocean/godo/.travis.yml create mode 100644 vendor/github.com/digitalocean/godo/CHANGELOG.md create mode 100644 vendor/github.com/digitalocean/godo/CONTRIBUTING.md create mode 100644 vendor/github.com/digitalocean/godo/LICENSE.txt create mode 100644 vendor/github.com/digitalocean/godo/README.md create mode 100644 vendor/github.com/digitalocean/godo/account.go create mode 100644 vendor/github.com/digitalocean/godo/action.go create mode 100644 vendor/github.com/digitalocean/godo/cdn.go create mode 100644 vendor/github.com/digitalocean/godo/certificates.go create mode 100644 vendor/github.com/digitalocean/godo/doc.go create mode 100644 vendor/github.com/digitalocean/godo/domains.go create mode 100644 vendor/github.com/digitalocean/godo/droplet_actions.go create mode 100644 vendor/github.com/digitalocean/godo/droplets.go create mode 100644 vendor/github.com/digitalocean/godo/errors.go create mode 100644 vendor/github.com/digitalocean/godo/firewalls.go create mode 100644 vendor/github.com/digitalocean/godo/floating_ips.go create mode 100644 vendor/github.com/digitalocean/godo/floating_ips_actions.go create mode 100644 vendor/github.com/digitalocean/godo/godo.go create mode 100644 vendor/github.com/digitalocean/godo/image_actions.go create mode 100644 vendor/github.com/digitalocean/godo/images.go create mode 100644 vendor/github.com/digitalocean/godo/keys.go create mode 100644 vendor/github.com/digitalocean/godo/kubernetes.go create mode 100644 vendor/github.com/digitalocean/godo/links.go create mode 100644 vendor/github.com/digitalocean/godo/load_balancers.go create mode 100644 vendor/github.com/digitalocean/godo/projects.go create mode 100644 vendor/github.com/digitalocean/godo/regions.go create mode 100644 vendor/github.com/digitalocean/godo/sizes.go create mode 100644 vendor/github.com/digitalocean/godo/snapshots.go create mode 100644 vendor/github.com/digitalocean/godo/storage.go create mode 100644 vendor/github.com/digitalocean/godo/storage_actions.go create mode 100644 vendor/github.com/digitalocean/godo/strings.go create mode 100644 vendor/github.com/digitalocean/godo/tags.go create mode 100644 vendor/github.com/digitalocean/godo/timestamp.go create mode 100644 vendor/github.com/gophercloud/gophercloud/.gitignore create mode 100644 vendor/github.com/gophercloud/gophercloud/.travis.yml create mode 100644 vendor/github.com/gophercloud/gophercloud/.zuul.yaml create mode 100644 vendor/github.com/gophercloud/gophercloud/CHANGELOG.md create mode 100644 vendor/github.com/gophercloud/gophercloud/LICENSE create mode 100644 vendor/github.com/gophercloud/gophercloud/README.md create mode 100644 vendor/github.com/gophercloud/gophercloud/auth_options.go create mode 100644 vendor/github.com/gophercloud/gophercloud/auth_result.go create mode 100644 vendor/github.com/gophercloud/gophercloud/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/endpoint_search.go create mode 100644 vendor/github.com/gophercloud/gophercloud/errors.go create mode 100644 vendor/github.com/gophercloud/gophercloud/go.mod create mode 100644 vendor/github.com/gophercloud/gophercloud/go.sum create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/client.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/microversions.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/errors.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go create mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/http.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/linked.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/marker.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/pager.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/pkg.go create mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/single.go create mode 100644 vendor/github.com/gophercloud/gophercloud/params.go create mode 100644 vendor/github.com/gophercloud/gophercloud/provider_client.go create mode 100644 vendor/github.com/gophercloud/gophercloud/results.go create mode 100644 vendor/github.com/gophercloud/gophercloud/service_client.go create mode 100644 vendor/github.com/gophercloud/gophercloud/util.go create mode 100644 vendor/github.com/hashicorp/go-discover/.gitignore create mode 100644 vendor/github.com/hashicorp/go-discover/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-discover/LICENSE create mode 100644 vendor/github.com/hashicorp/go-discover/README.md create mode 100644 vendor/github.com/hashicorp/go-discover/config.go create mode 100644 vendor/github.com/hashicorp/go-discover/discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/go.mod create mode 100644 vendor/github.com/hashicorp/go-discover/go.sum create mode 100644 vendor/github.com/hashicorp/go-discover/provider/aliyun/aliyun_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/aws/aws_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/azure/azure_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/digitalocean/digitalocean_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/gce/gce_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/linode/linode_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/mdns/mdns_provider.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/os/os_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/packet/packet_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/scaleway/scaleway_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/softlayer/softlayer_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/tencentcloud/tencentcloud_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/triton/triton_discover.go create mode 100644 vendor/github.com/hashicorp/go-discover/provider/vsphere/vsphere_discover.go create mode 100644 vendor/github.com/hashicorp/mdns/.gitignore create mode 100644 vendor/github.com/hashicorp/mdns/LICENSE create mode 100644 vendor/github.com/hashicorp/mdns/README.md create mode 100644 vendor/github.com/hashicorp/mdns/client.go create mode 100644 vendor/github.com/hashicorp/mdns/go.mod create mode 100644 vendor/github.com/hashicorp/mdns/go.sum create mode 100644 vendor/github.com/hashicorp/mdns/server.go create mode 100644 vendor/github.com/hashicorp/mdns/zone.go create mode 100644 vendor/github.com/hashicorp/vic/LICENSE create mode 100644 vendor/github.com/hashicorp/vic/pkg/vsphere/tags/categories.go create mode 100644 vendor/github.com/hashicorp/vic/pkg/vsphere/tags/rest_client.go create mode 100644 vendor/github.com/hashicorp/vic/pkg/vsphere/tags/tag_association.go create mode 100644 vendor/github.com/hashicorp/vic/pkg/vsphere/tags/tags.go create mode 100644 vendor/github.com/joyent/triton-go/compute/client.go create mode 100644 vendor/github.com/joyent/triton-go/compute/datacenters.go create mode 100644 vendor/github.com/joyent/triton-go/compute/images.go create mode 100644 vendor/github.com/joyent/triton-go/compute/instances.go create mode 100644 vendor/github.com/joyent/triton-go/compute/packages.go create mode 100644 vendor/github.com/joyent/triton-go/compute/ping.go create mode 100644 vendor/github.com/joyent/triton-go/compute/services.go create mode 100644 vendor/github.com/joyent/triton-go/compute/snapshots.go create mode 100644 vendor/github.com/joyent/triton-go/compute/volumes.go create mode 100644 vendor/github.com/linode/linodego/.gitignore create mode 100644 vendor/github.com/linode/linodego/.travis.yml create mode 100644 vendor/github.com/linode/linodego/API_SUPPORT.md create mode 100644 vendor/github.com/linode/linodego/CHANGELOG.md create mode 100644 vendor/github.com/linode/linodego/Gopkg.lock create mode 100644 vendor/github.com/linode/linodego/Gopkg.toml create mode 100644 vendor/github.com/linode/linodego/LICENSE create mode 100644 vendor/github.com/linode/linodego/Makefile create mode 100644 vendor/github.com/linode/linodego/README.md create mode 100644 vendor/github.com/linode/linodego/account.go create mode 100644 vendor/github.com/linode/linodego/account_events.go create mode 100644 vendor/github.com/linode/linodego/account_invoices.go create mode 100644 vendor/github.com/linode/linodego/account_notifications.go create mode 100644 vendor/github.com/linode/linodego/account_users.go create mode 100644 vendor/github.com/linode/linodego/client.go create mode 100644 vendor/github.com/linode/linodego/domain_records.go create mode 100644 vendor/github.com/linode/linodego/domains.go create mode 100644 vendor/github.com/linode/linodego/env.sample create mode 100644 vendor/github.com/linode/linodego/errors.go create mode 100644 vendor/github.com/linode/linodego/images.go create mode 100644 vendor/github.com/linode/linodego/instance_configs.go create mode 100644 vendor/github.com/linode/linodego/instance_disks.go create mode 100644 vendor/github.com/linode/linodego/instance_ips.go create mode 100644 vendor/github.com/linode/linodego/instance_snapshots.go create mode 100644 vendor/github.com/linode/linodego/instance_volumes.go create mode 100644 vendor/github.com/linode/linodego/instances.go create mode 100644 vendor/github.com/linode/linodego/kernels.go create mode 100644 vendor/github.com/linode/linodego/longview.go create mode 100644 vendor/github.com/linode/linodego/longview_subscriptions.go create mode 100644 vendor/github.com/linode/linodego/managed.go create mode 100644 vendor/github.com/linode/linodego/network_ips.go create mode 100644 vendor/github.com/linode/linodego/network_pools.go create mode 100644 vendor/github.com/linode/linodego/network_ranges.go create mode 100644 vendor/github.com/linode/linodego/nodebalancer.go create mode 100644 vendor/github.com/linode/linodego/nodebalancer_config_nodes.go create mode 100644 vendor/github.com/linode/linodego/nodebalancer_configs.go create mode 100644 vendor/github.com/linode/linodego/pagination.go create mode 100644 vendor/github.com/linode/linodego/profile.go create mode 100644 vendor/github.com/linode/linodego/profile_sshkeys.go create mode 100644 vendor/github.com/linode/linodego/profile_tokens.go create mode 100644 vendor/github.com/linode/linodego/regions.go create mode 100644 vendor/github.com/linode/linodego/resources.go create mode 100644 vendor/github.com/linode/linodego/stackscripts.go create mode 100644 vendor/github.com/linode/linodego/support.go create mode 100644 vendor/github.com/linode/linodego/tags.go create mode 100644 vendor/github.com/linode/linodego/types.go create mode 100644 vendor/github.com/linode/linodego/util.go create mode 100644 vendor/github.com/linode/linodego/volumes.go create mode 100644 vendor/github.com/linode/linodego/waitfor.go create mode 100644 vendor/github.com/miekg/dns/.codecov.yml create mode 100644 vendor/github.com/miekg/dns/.gitignore create mode 100644 vendor/github.com/miekg/dns/.travis.yml create mode 100644 vendor/github.com/miekg/dns/AUTHORS create mode 100644 vendor/github.com/miekg/dns/CONTRIBUTORS create mode 100644 vendor/github.com/miekg/dns/COPYRIGHT create mode 100644 vendor/github.com/miekg/dns/Gopkg.lock create mode 100644 vendor/github.com/miekg/dns/Gopkg.toml create mode 100644 vendor/github.com/miekg/dns/LICENSE create mode 100644 vendor/github.com/miekg/dns/Makefile.fuzz create mode 100644 vendor/github.com/miekg/dns/Makefile.release create mode 100644 vendor/github.com/miekg/dns/README.md create mode 100644 vendor/github.com/miekg/dns/acceptfunc.go create mode 100644 vendor/github.com/miekg/dns/client.go create mode 100644 vendor/github.com/miekg/dns/clientconfig.go create mode 100644 vendor/github.com/miekg/dns/dane.go create mode 100644 vendor/github.com/miekg/dns/defaults.go create mode 100644 vendor/github.com/miekg/dns/dns.go create mode 100644 vendor/github.com/miekg/dns/dnssec.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go create mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go create mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go create mode 100644 vendor/github.com/miekg/dns/doc.go create mode 100644 vendor/github.com/miekg/dns/duplicate.go create mode 100644 vendor/github.com/miekg/dns/edns.go create mode 100644 vendor/github.com/miekg/dns/format.go create mode 100644 vendor/github.com/miekg/dns/fuzz.go create mode 100644 vendor/github.com/miekg/dns/generate.go create mode 100644 vendor/github.com/miekg/dns/labels.go create mode 100644 vendor/github.com/miekg/dns/listen_go111.go create mode 100644 vendor/github.com/miekg/dns/listen_go_not111.go create mode 100644 vendor/github.com/miekg/dns/msg.go create mode 100644 vendor/github.com/miekg/dns/msg_helpers.go create mode 100644 vendor/github.com/miekg/dns/msg_truncate.go create mode 100644 vendor/github.com/miekg/dns/nsecx.go create mode 100644 vendor/github.com/miekg/dns/privaterr.go create mode 100644 vendor/github.com/miekg/dns/reverse.go create mode 100644 vendor/github.com/miekg/dns/sanitize.go create mode 100644 vendor/github.com/miekg/dns/scan.go create mode 100644 vendor/github.com/miekg/dns/scan_rr.go create mode 100644 vendor/github.com/miekg/dns/serve_mux.go create mode 100644 vendor/github.com/miekg/dns/server.go create mode 100644 vendor/github.com/miekg/dns/sig0.go create mode 100644 vendor/github.com/miekg/dns/singleinflight.go create mode 100644 vendor/github.com/miekg/dns/smimea.go create mode 100644 vendor/github.com/miekg/dns/tlsa.go create mode 100644 vendor/github.com/miekg/dns/tsig.go create mode 100644 vendor/github.com/miekg/dns/types.go create mode 100644 vendor/github.com/miekg/dns/udp.go create mode 100644 vendor/github.com/miekg/dns/udp_windows.go create mode 100644 vendor/github.com/miekg/dns/update.go create mode 100644 vendor/github.com/miekg/dns/version.go create mode 100644 vendor/github.com/miekg/dns/xfr.go create mode 100644 vendor/github.com/miekg/dns/zduplicate.go create mode 100644 vendor/github.com/miekg/dns/zmsg.go create mode 100644 vendor/github.com/miekg/dns/ztypes.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/LICENSE.md create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/README.md create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/api.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/availability.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/bootscript.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/container.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/dashboard.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/image.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/ip.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/organization.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/permissions.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/quota.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/security_group.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/security_group_rule.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/server.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/snapshot.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/tasks.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/tokens.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/user.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/user_data.go create mode 100644 vendor/github.com/nicolai86/scaleway-sdk/volume.go create mode 100644 vendor/github.com/packethost/packngo/.drone.yml create mode 100644 vendor/github.com/packethost/packngo/.gitignore create mode 100644 vendor/github.com/packethost/packngo/CHANGELOG.md create mode 100644 vendor/github.com/packethost/packngo/LICENSE.txt create mode 100644 vendor/github.com/packethost/packngo/README.md create mode 100644 vendor/github.com/packethost/packngo/billing_address.go create mode 100644 vendor/github.com/packethost/packngo/devices.go create mode 100644 vendor/github.com/packethost/packngo/email.go create mode 100644 vendor/github.com/packethost/packngo/facilities.go create mode 100644 vendor/github.com/packethost/packngo/ip.go create mode 100644 vendor/github.com/packethost/packngo/operatingsystems.go create mode 100644 vendor/github.com/packethost/packngo/organizations.go create mode 100644 vendor/github.com/packethost/packngo/packngo.go create mode 100644 vendor/github.com/packethost/packngo/payment_methods.go create mode 100644 vendor/github.com/packethost/packngo/plans.go create mode 100644 vendor/github.com/packethost/packngo/ports.go create mode 100644 vendor/github.com/packethost/packngo/projects.go create mode 100644 vendor/github.com/packethost/packngo/rate.go create mode 100644 vendor/github.com/packethost/packngo/spotmarket.go create mode 100644 vendor/github.com/packethost/packngo/sshkeys.go create mode 100644 vendor/github.com/packethost/packngo/timestamp.go create mode 100644 vendor/github.com/packethost/packngo/user.go create mode 100644 vendor/github.com/packethost/packngo/utils.go create mode 100644 vendor/github.com/packethost/packngo/virtualnetworks.go create mode 100644 vendor/github.com/packethost/packngo/volumes.go create mode 100644 vendor/github.com/renier/xmlrpc/.gitignore create mode 100644 vendor/github.com/renier/xmlrpc/LICENSE create mode 100644 vendor/github.com/renier/xmlrpc/README.md create mode 100644 vendor/github.com/renier/xmlrpc/client.go create mode 100644 vendor/github.com/renier/xmlrpc/decoder.go create mode 100644 vendor/github.com/renier/xmlrpc/encoder.go create mode 100644 vendor/github.com/renier/xmlrpc/request.go create mode 100644 vendor/github.com/renier/xmlrpc/response.go create mode 100644 vendor/github.com/renier/xmlrpc/test_server.rb create mode 100644 vendor/github.com/renier/xmlrpc/xmlrpc.go create mode 100644 vendor/github.com/softlayer/softlayer-go/LICENSE create mode 100644 vendor/github.com/softlayer/softlayer-go/config/config.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/account.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/billing.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/brand.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/business.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/container.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/dns.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/email.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/entity.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/event.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/flexiblecredit.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/hardware.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/layout.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/legal.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/locale.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/location.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/marketplace.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/mcafee.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/metric.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/monitoring.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/network.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/notification.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/policy.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/product.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/provisioning.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/resource.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/sales.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/scale.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/search.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/security.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/service.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/softlayer.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/software.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/survey.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/tag.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/ticket.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/user.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/utility.go create mode 100644 vendor/github.com/softlayer/softlayer-go/datatypes/virtual.go create mode 100644 vendor/github.com/softlayer/softlayer-go/filter/filters.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/account.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/auxiliary.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/billing.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/brand.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/business.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/catalyst.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/compliance.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/configuration.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/dns.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/email.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/event.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/flexiblecredit.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/hardware.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/layout.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/locale.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/location.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/marketplace.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/metric.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/monitoring.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/network.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/notification.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/product.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/provisioning.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/resource.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/sales.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/scale.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/search.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/security.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/software.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/survey.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/tag.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/ticket.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/user.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/utility.go create mode 100644 vendor/github.com/softlayer/softlayer-go/services/virtual.go create mode 100644 vendor/github.com/softlayer/softlayer-go/session/rest.go create mode 100644 vendor/github.com/softlayer/softlayer-go/session/session.go create mode 100644 vendor/github.com/softlayer/softlayer-go/session/xmlrpc.go create mode 100644 vendor/github.com/softlayer/softlayer-go/sl/errors.go create mode 100644 vendor/github.com/softlayer/softlayer-go/sl/helpers.go create mode 100644 vendor/github.com/softlayer/softlayer-go/sl/options.go create mode 100644 vendor/github.com/softlayer/softlayer-go/sl/version.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/LICENSE create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/client.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/credentials.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors/errors.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/request.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/response.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile/client_profile.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile/http_profile.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/sign.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/types.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312/client.go create mode 100644 vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312/models.go create mode 100644 vendor/github.com/vmware/govmomi/.gitignore create mode 100644 vendor/github.com/vmware/govmomi/.goreleaser.yml create mode 100644 vendor/github.com/vmware/govmomi/.mailmap create mode 100644 vendor/github.com/vmware/govmomi/.travis.yml create mode 100644 vendor/github.com/vmware/govmomi/CHANGELOG.md create mode 100644 vendor/github.com/vmware/govmomi/CONTRIBUTING.md create mode 100644 vendor/github.com/vmware/govmomi/CONTRIBUTORS create mode 100644 vendor/github.com/vmware/govmomi/Dockerfile create mode 100644 vendor/github.com/vmware/govmomi/Gopkg.lock create mode 100644 vendor/github.com/vmware/govmomi/Gopkg.toml create mode 100644 vendor/github.com/vmware/govmomi/LICENSE.txt create mode 100644 vendor/github.com/vmware/govmomi/Makefile create mode 100644 vendor/github.com/vmware/govmomi/README.md create mode 100644 vendor/github.com/vmware/govmomi/client.go create mode 100644 vendor/github.com/vmware/govmomi/find/doc.go create mode 100644 vendor/github.com/vmware/govmomi/find/error.go create mode 100644 vendor/github.com/vmware/govmomi/find/finder.go create mode 100644 vendor/github.com/vmware/govmomi/find/recurser.go create mode 100644 vendor/github.com/vmware/govmomi/list/lister.go create mode 100644 vendor/github.com/vmware/govmomi/list/path.go create mode 100644 vendor/github.com/vmware/govmomi/nfc/lease.go create mode 100644 vendor/github.com/vmware/govmomi/nfc/lease_updater.go create mode 100644 vendor/github.com/vmware/govmomi/object/authorization_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/authorization_manager_internal.go create mode 100644 vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go create mode 100644 vendor/github.com/vmware/govmomi/object/common.go create mode 100644 vendor/github.com/vmware/govmomi/object/compute_resource.go create mode 100644 vendor/github.com/vmware/govmomi/object/custom_fields_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/customization_spec_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/datacenter.go create mode 100644 vendor/github.com/vmware/govmomi/object/datastore.go create mode 100644 vendor/github.com/vmware/govmomi/object/datastore_file.go create mode 100644 vendor/github.com/vmware/govmomi/object/datastore_file_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/datastore_path.go create mode 100644 vendor/github.com/vmware/govmomi/object/diagnostic_log.go create mode 100644 vendor/github.com/vmware/govmomi/object/diagnostic_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go create mode 100644 vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go create mode 100644 vendor/github.com/vmware/govmomi/object/extension_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/file_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/folder.go create mode 100644 vendor/github.com/vmware/govmomi/object/history_collector.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_account_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_certificate_info.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_certificate_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_config_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_datastore_browser.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_datastore_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_date_time_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_firewall_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_network_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_service_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_storage_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_virtual_nic_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_vsan_internal_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/host_vsan_system.go create mode 100644 vendor/github.com/vmware/govmomi/object/namespace_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/network.go create mode 100644 vendor/github.com/vmware/govmomi/object/network_reference.go create mode 100644 vendor/github.com/vmware/govmomi/object/opaque_network.go create mode 100644 vendor/github.com/vmware/govmomi/object/option_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/resource_pool.go create mode 100644 vendor/github.com/vmware/govmomi/object/search_index.go create mode 100644 vendor/github.com/vmware/govmomi/object/storage_pod.go create mode 100644 vendor/github.com/vmware/govmomi/object/storage_resource_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/task.go create mode 100644 vendor/github.com/vmware/govmomi/object/types.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_app.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_device_list.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_disk_manager.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_disk_manager_internal.go create mode 100644 vendor/github.com/vmware/govmomi/object/virtual_machine.go create mode 100644 vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go create mode 100644 vendor/github.com/vmware/govmomi/property/collector.go create mode 100644 vendor/github.com/vmware/govmomi/property/filter.go create mode 100644 vendor/github.com/vmware/govmomi/property/wait.go create mode 100644 vendor/github.com/vmware/govmomi/session/keep_alive.go create mode 100644 vendor/github.com/vmware/govmomi/session/manager.go create mode 100644 vendor/github.com/vmware/govmomi/task/error.go create mode 100644 vendor/github.com/vmware/govmomi/task/wait.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/client.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/debug/debug.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/doc.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/methods/methods.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/methods/service_content.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/entity.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/extra.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/mo.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/reference.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/registry.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/mo/type_info.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/doc.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/prefix.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/reader.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/report.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/scale.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/sinker.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/progress/tee.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/retry.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/client.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/debug.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/error.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/soap/soap.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/base.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/enum.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/fault.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/helpers.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/if.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/registry.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/types/types.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/LICENSE create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/extras.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/marshal.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/read.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go create mode 100644 vendor/github.com/vmware/govmomi/vim25/xml/xml.go create mode 100644 vendor/golang.org/x/net/bpf/asm.go create mode 100644 vendor/golang.org/x/net/bpf/constants.go create mode 100644 vendor/golang.org/x/net/bpf/doc.go create mode 100644 vendor/golang.org/x/net/bpf/instructions.go create mode 100644 vendor/golang.org/x/net/bpf/setter.go create mode 100644 vendor/golang.org/x/net/bpf/vm.go create mode 100644 vendor/golang.org/x/net/bpf/vm_instructions.go create mode 100644 vendor/golang.org/x/net/internal/iana/const.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/empty.s create mode 100644 vendor/golang.org/x/net/internal/socket/error_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/error_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/iovec_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/norace.go create mode 100644 vendor/golang.org/x/net/internal/socket/race.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_msg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go create mode 100644 vendor/golang.org/x/net/internal/socket/socket.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_const_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linkname.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_netbsd.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_posix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s create mode 100644 vendor/golang.org/x/net/internal/socket/sys_stub.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_windows.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/batch.go create mode 100644 vendor/golang.org/x/net/ipv4/control.go create mode 100644 vendor/golang.org/x/net/ipv4/control_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/control_pktinfo.go create mode 100644 vendor/golang.org/x/net/ipv4/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv4/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv4/doc.go create mode 100644 vendor/golang.org/x/net/ipv4/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv4/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv4/header.go create mode 100644 vendor/golang.org/x/net/ipv4/helper.go create mode 100644 vendor/golang.org/x/net/ipv4/iana.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/packet.go create mode 100644 vendor/golang.org/x/net/ipv4/payload.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv4/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_aix.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_arm64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/batch.go create mode 100644 vendor/golang.org/x/net/ipv6/control.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/control_unix.go create mode 100644 vendor/golang.org/x/net/ipv6/control_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/dgramopt.go create mode 100644 vendor/golang.org/x/net/ipv6/doc.go create mode 100644 vendor/golang.org/x/net/ipv6/endpoint.go create mode 100644 vendor/golang.org/x/net/ipv6/genericopt.go create mode 100644 vendor/golang.org/x/net/ipv6/header.go create mode 100644 vendor/golang.org/x/net/ipv6/helper.go create mode 100644 vendor/golang.org/x/net/ipv6/iana.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/icmp_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/payload.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/payload_nocmsg.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_posix.go create mode 100644 vendor/golang.org/x/net/ipv6/sockopt_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_aix.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_bsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_freebsd.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_linux.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_solaris.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_stub.go create mode 100644 vendor/golang.org/x/net/ipv6/sys_windows.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_darwin.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_dragonfly.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_arm64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_386.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_netbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_openbsd.go create mode 100644 vendor/golang.org/x/net/ipv6/zsys_solaris.go create mode 100644 vendor/golang.org/x/net/publicsuffix/list.go create mode 100644 vendor/golang.org/x/net/publicsuffix/table.go create mode 100644 vendor/golang.org/x/text/encoding/charmap/charmap.go create mode 100644 vendor/golang.org/x/text/encoding/charmap/tables.go create mode 100644 vendor/gopkg.in/resty.v1/.gitignore create mode 100644 vendor/gopkg.in/resty.v1/.travis.yml create mode 100644 vendor/gopkg.in/resty.v1/BUILD.bazel create mode 100644 vendor/gopkg.in/resty.v1/LICENSE create mode 100644 vendor/gopkg.in/resty.v1/README.md create mode 100644 vendor/gopkg.in/resty.v1/WORKSPACE create mode 100644 vendor/gopkg.in/resty.v1/client.go create mode 100644 vendor/gopkg.in/resty.v1/default.go create mode 100644 vendor/gopkg.in/resty.v1/go.mod create mode 100644 vendor/gopkg.in/resty.v1/middleware.go create mode 100644 vendor/gopkg.in/resty.v1/redirect.go create mode 100644 vendor/gopkg.in/resty.v1/request.go create mode 100644 vendor/gopkg.in/resty.v1/request16.go create mode 100644 vendor/gopkg.in/resty.v1/request17.go create mode 100644 vendor/gopkg.in/resty.v1/response.go create mode 100644 vendor/gopkg.in/resty.v1/resty.go create mode 100644 vendor/gopkg.in/resty.v1/retry.go create mode 100644 vendor/gopkg.in/resty.v1/util.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/mapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int32.go create mode 100644 vendor/k8s.io/klog/go.mod create mode 100644 vendor/k8s.io/klog/go.sum create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go diff --git a/.circleci/config.yml b/.circleci/config.yml index fb3755bd71c3..108ce5bb7d0b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1812,7 +1812,7 @@ jobs: test-go: docker: - image: circleci/golang:1.14.7-stretch - resource_class: large + resource_class: xlarge working_directory: /go/src/github.com/hashicorp/vault parallelism: 8 steps: diff --git a/.circleci/config/jobs/test-go.yml b/.circleci/config/jobs/test-go.yml index 8116234a20f0..a7f215262d28 100644 --- a/.circleci/config/jobs/test-go.yml +++ b/.circleci/config/jobs/test-go.yml @@ -1,4 +1,4 @@ -executor: docker-env-large +executor: docker-env-xlarge parallelism: 8 steps: - check-branch-name diff --git a/.gitignore b/.gitignore index cd3a0c83f362..e0af2f4a27af 100644 --- a/.gitignore +++ b/.gitignore @@ -57,7 +57,8 @@ Vagrantfile dist/* -tags +# ignore ctags +./tags # Editor backups *~ @@ -105,3 +106,6 @@ website/.cache website/assets/node_modules website/assets/public website/components/node_modules + +.buildcache/ +.releaser/ diff --git a/api/sys_raft.go b/api/sys_raft.go index 1a8aa1176b63..3f7e0a3b86a0 100644 --- a/api/sys_raft.go +++ b/api/sys_raft.go @@ -16,6 +16,7 @@ type RaftJoinResponse struct { // RaftJoinRequest represents the parameters consumed by the raft join API type RaftJoinRequest struct { + AutoJoin string `json:"auto_join"` LeaderAPIAddr string `json:"leader_api_addr"` LeaderCACert string `json:"leader_ca_cert"` LeaderClientCert string `json:"leader_client_cert"` diff --git a/command/operator_raft_join.go b/command/operator_raft_join.go index f3fab7ccd3f4..1bc6fb656289 100644 --- a/command/operator_raft_join.go +++ b/command/operator_raft_join.go @@ -27,13 +27,18 @@ func (c *OperatorRaftJoinCommand) Synopsis() string { func (c *OperatorRaftJoinCommand) Help() string { helpText := ` -Usage: vault operator raft join [options] +Usage: vault operator raft join [options] Join the current node as a peer to the Raft cluster by providing the address of the Raft leader node. $ vault operator raft join "http://127.0.0.2:8200" + Join the current node as a peer to the Raft cluster by providing cloud auto-join + configuration. + + $ vault operator raft join "provider=aws region=eu-west-1 ..." + TLS certificate data can also be consumed from a file on disk by prefixing with the "@" symbol. For example: @@ -106,14 +111,14 @@ func (c *OperatorRaftJoinCommand) Run(args []string) int { return 1 } - leaderAPIAddr := "" + leaderInfo := "" args = f.Args() switch len(args) { case 0: // No-op: This is acceptable if we're using raft for HA-only case 1: - leaderAPIAddr = strings.TrimSpace(args[0]) + leaderInfo = strings.TrimSpace(args[0]) default: c.UI.Error(fmt.Sprintf("Too many arguments (expected 0-1, got %d)", len(args))) return 1 @@ -143,14 +148,21 @@ func (c *OperatorRaftJoinCommand) Run(args []string) int { return 2 } - resp, err := client.Sys().RaftJoin(&api.RaftJoinRequest{ - LeaderAPIAddr: leaderAPIAddr, + joinReq := &api.RaftJoinRequest{ LeaderCACert: leaderCACert, LeaderClientCert: leaderClientCert, LeaderClientKey: leaderClientKey, Retry: c.flagRetry, NonVoter: c.flagNonVoter, - }) + } + + if strings.Contains(leaderInfo, "provider=") { + joinReq.AutoJoin = leaderInfo + } else { + joinReq.LeaderAPIAddr = leaderInfo + } + + resp, err := client.Sys().RaftJoin(joinReq) if err != nil { c.UI.Error(fmt.Sprintf("Error joining the node to the Raft cluster: %s", err)) return 2 diff --git a/go.mod b/go.mod index 2977bd1213b5..c3da27493726 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( cloud.google.com/go/spanner v1.5.1 cloud.google.com/go/storage v1.6.0 github.com/Azure/azure-storage-blob-go v0.10.0 - github.com/Azure/go-autorest/autorest v0.10.1 + github.com/Azure/go-autorest/autorest v0.11.0 github.com/DataDog/zstd v1.4.5 // indirect github.com/NYTimes/gziphandler v1.1.1 github.com/SAP/go-hdb v0.14.1 @@ -48,12 +48,12 @@ require ( github.com/gocql/gocql v0.0.0-20200624222514-34081eda590e github.com/golang/protobuf v1.4.2 github.com/google/go-github v17.0.0+incompatible - github.com/google/go-metrics-stackdriver v0.2.0 // indirect github.com/hashicorp/consul-template v0.25.1 github.com/hashicorp/consul/api v1.4.0 github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-bindata v3.0.8-0.20180209072458-bf7910af8997+incompatible github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f github.com/hashicorp/go-gcp-common v0.6.0 github.com/hashicorp/go-hclog v0.14.1 github.com/hashicorp/go-kms-wrapping v0.5.16 diff --git a/go.sum b/go.sum index 0fc196ea8934..ad49c98b7865 100644 --- a/go.sum +++ b/go.sum @@ -17,12 +17,15 @@ cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbf cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.6.0 h1:ajp/DjpiCHO71SyIhwb83YsUGAyWuzVvMko+9xCsJLw= cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/spanner v1.5.1 h1:dWyj10TLlaxH2No6+tXsSCaq9oWgrRbXy1N3x/bhMGU= cloud.google.com/go/spanner v1.5.1/go.mod h1:e1+8M6PF3ntV9Xr57X2Gf+UhylXXYF6gI4WRZ1kfu2A= @@ -36,45 +39,59 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= -github.com/Azure/azure-sdk-for-go v36.2.0+incompatible h1:09cv2WoH0g6jl6m2iT+R9qcIPZKhXEL0sbmLhxP895s= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v44.0.0+incompatible h1:e82Yv2HNpS0kuyeCrV29OPKvEiqfs2/uJHic3/3iKdg= +github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs= github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.10.1 h1:uaB8A32IZU9YKs9v50+/LWIWTDHJk2vlGzbfd7FfESI= github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.0 h1:tnO41Uo+/0sxTMFY/U7aKg2abek3JOnnXcuSuba74jI= +github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.0 h1:nSMjYIe24eBYasAIxt859TxyXef/IqoH+8/g4+LmcVs= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -93,11 +110,14 @@ github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6tr github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a h1:KFHLI4QGttB0i7M3qOkAo8Zn/GSsxwwCnInFqBaYtkM= @@ -107,6 +127,7 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -140,10 +161,12 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.27 h1:9gPjZWVDSoQrBO2AvqrWObS6KAZByfEJxQoCYo4ZfK0= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -152,7 +175,9 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= @@ -187,11 +212,14 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -234,11 +262,17 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 h1:lrWnAyy/F72MbxIxFUzKmcMCdt9Oi8RzpAxzTNQHD7o= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.7.5 h1:JOQbAO6QT1GGjor0doT0mXefX2FgUDPOpYh2RaXA+ko= +github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -256,6 +290,7 @@ github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdf github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M= github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -264,12 +299,15 @@ github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaB github.com/elazarl/go-bindata-assetfs v1.0.1-0.20200509193318-234c15e7648f h1:AwZUiMWfYSmIiHdFJIubTSs8BFIFoMmUFbeuwBzHIPs= github.com/elazarl/go-bindata-assetfs v1.0.1-0.20200509193318-234c15e7648f/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -279,7 +317,9 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= @@ -287,6 +327,7 @@ github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 h1:D2LrfOPgGHQprIx github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 h1:VzbudKn/nvxYKOdzgkEBS6SSreRjAgoJ+ZeS4wPFkgc= github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= @@ -306,9 +347,15 @@ github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9p github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -347,7 +394,9 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -367,6 +416,7 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -378,11 +428,14 @@ github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4r github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-metrics-stackdriver v0.2.0 h1:rbs2sxHAPn2OtUj9JdR/Gij1YKGl0BTVD0augB+HEjE= github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -397,26 +450,39 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ= github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= @@ -427,6 +493,7 @@ github.com/hashicorp/consul/api v1.4.0 h1:jfESivXnO5uLdH650JU/6AnjRoHrLhULq0FnC3 github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.4.0 h1:zBtCfKJZcJDBvSCkQJch4ulp59m1rATFLKwNo/LYY30= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -435,6 +502,9 @@ github.com/hashicorp/go-bindata v3.0.8-0.20180209072458-bf7910af8997+incompatibl github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f h1:7WFMVeuJQp6BkzuTv9O52pzwtEFVUJubKYN+zez8eTI= +github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o= +github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg= github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= github.com/hashicorp/go-gcp-common v0.6.0 h1:m1X+DK003bj4WEjqOnv+Csepb3zpfN/bidboUeUSj68= @@ -499,7 +569,10 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.4 h1:gkyML/r71w3FL8gUi74Vk76avkj/9lYAY9lvg0OcoGs= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d h1:BXqsASWhyiAiEVm6FcltF0dg8XvoookQwmpHn8lstu8= github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d/go.mod h1:WKCL+tLVhN1D+APwH3JiTRZoxcdwRk86bWu1LVCUPaE= @@ -558,22 +631,29 @@ github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2 h1:X9eK6NSb1qafvoE github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2/go.mod h1:YRW9zn9NZNitRlPYNAWRp/YEdKCF/X8aOg8IYSxFT5Y= github.com/hashicorp/vault-plugin-secrets-openldap v0.1.5 h1:jLYOv9YdaPdb7qfBrLDaHd8AxDjapBKHLviwftt7biw= github.com/hashicorp/vault-plugin-secrets-openldap v0.1.5/go.mod h1:NM+5N+URHHg8ZvlyOJuPy5McC3x0m//96uDCbM8Ygzc= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huaweicloud/golangsdk v0.0.0-20200304081349-45ec0797f2a4/go.mod h1:WQBcHRNX9shz3928lWEvstQJtAtYI7ks6XlgtRT9Tcw= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 h1:3K3KcD4S6/Y2hevi70EzUTNKOS3cryQyhUnkjE6Tz0w= github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jarcoal/httpmock v1.0.4/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jarcoal/httpmock v1.0.5 h1:cHtVEcTxRSX4J0je7mWPfc9BpDpqzXSJ5HbymZmyHck= github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jcmturner/aescts v1.0.1 h1:5jhUSHbHSZjQeWFY//Lv8dpP/O3sMDOxrGV/IfCqh44= github.com/jcmturner/aescts v1.0.1/go.mod h1:k9gJoDUf1GH5r2IBtBjwjDCoLELYxOcEhitdP8RL7qQ= @@ -596,7 +676,9 @@ github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f h1:ENpDacvnr8faw5ugQmEF1QYk+f/Y9lXFvuYmRxykago= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -612,6 +694,7 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -643,8 +726,12 @@ github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= +github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -670,6 +757,7 @@ github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1 github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5 h1:uA3b4GgZMZxAJsTkd+CVQ85b7KBlD7HLpd/FfTNlGN0= github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod h1:+pmbihVqjC3GPdfWv1V2TnRSuVvwrWLKfEP/MZVB/Wc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= @@ -708,7 +796,9 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mongodb/go-client-mongodb-atlas v0.1.2 h1:qmUme1TlQBPZupmXMnpD8DxnfGXLVGs3w+0Z17HBiSA= github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF10yY3tJM+1xATXMJ3oU35LU= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwielbut/pointy v1.1.0 h1:U5/YEfoIkaGCHv0St3CgjduqXID4FNRoyZgLM1kY9vg= github.com/mwielbut/pointy v1.1.0/go.mod h1:MvvO+uMFj9T5DMda33HlvogsFBX7pWWKAkFIn4teYwY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -725,6 +815,8 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ= github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= @@ -737,13 +829,18 @@ github.com/okta/okta-sdk-golang/v2 v2.0.0 h1:qwl5Ezpy5a3I2WphiHolpgTtOC+YMTDIpFq github.com/okta/okta-sdk-golang/v2 v2.0.0/go.mod h1:fQubbeV8gksr8e1pmRVSE8kIj1TFqlgYqi8WsvSKmQk= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -772,9 +869,13 @@ github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35uk github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= @@ -782,6 +883,7 @@ github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTK github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= @@ -844,6 +946,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU= github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= @@ -858,46 +962,59 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCL github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sethvargo/go-limiter v0.3.0 h1:yRMc+Qs2yqw6YJp6UxrO2iUs6DOSq4zcnljbB7/rMns= github.com/sethvargo/go-limiter v0.3.0/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= github.com/shirou/gopsutil v2.20.6-0.20200630091542-01afd763e6c0+incompatible h1:IYOqH6sML3rQGNVEQ5foLtpDt4TeW8PIUBuI9f8itkI= github.com/shirou/gopsutil v2.20.6-0.20200630091542-01afd763e6c0+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/square/go-jose v2.4.1+incompatible/go.mod h1:7MxpAF/1WTVUu8Am+T5kNy+t0902CaLWM4Z745MkOa8= github.com/square/go-jose/v3 v3.0.0-20200225220504-708a9fe87ddc/go.mod h1:JbpHhNyeVc538vtj/ECJ3gPYm1VEitNjsLhm4eJQQbg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -910,9 +1027,13 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= +github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible h1:K3fcS92NS8cRntIdu8Uqy2ZSePvX73nNhOkKuPGJLXQ= github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= +github.com/tidwall/pretty v1.0.1 h1:WE4RBSZ1x6McVVC8S/Md+Qse8YUv6HRObAx6ke00NY8= github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -924,12 +1045,15 @@ github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oW github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= +github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yandex-cloud/go-genproto v0.0.0-20200722140432-762fe965ce77/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE= @@ -964,6 +1088,7 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= @@ -973,6 +1098,7 @@ golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= @@ -985,6 +1111,7 @@ golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1023,6 +1150,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1078,6 +1206,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1089,6 +1218,7 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1108,6 +1238,7 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1130,6 +1261,7 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1145,6 +1277,7 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1289,12 +1422,15 @@ google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= @@ -1305,17 +1441,20 @@ gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/ldap.v3 v3.0.3 h1:YKRHW/2sIl05JsCtx/5ZuUueFuJyoj/6+DGXe3wp6ro= gopkg.in/ldap.v3 v3.0.3/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/ory-am/dockertest.v3 v3.3.4 h1:oen8RiwxVNxtQ1pRoV4e4jqh6UjNsOuIZ1NXns6jdcw= gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= +gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1328,6 +1467,7 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1339,17 +1479,31 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.0.0-20190409092523-d687e77c8ae9 h1:c9UEl5z8gk1DGh/g3snETZ+a52YeR9VdbX/3BQ4PHas= k8s.io/api v0.0.0-20190409092523-d687e77c8ae9/go.mod h1:FQEUn50aaytlU65qqBn/w+5ugllHwrBzKm7DzbnXdzE= -k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b h1:fVkKJL9FIpA8LSJyHVM00MP45q1WJ7+af77vcxmQP4g= +k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= +k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b/go.mod h1:FW86P8YXVLsbuplGMZeb20J3jYHscrDqw4jELaFJvRU= -k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 h1:SHucoAy7lRb+w5oC/hbXyZg+zX+Wftn6hD4tGzHCVqA= +k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= +k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/http/sys_raft.go b/http/sys_raft.go index e139e195a8c9..ebe557386dd4 100644 --- a/http/sys_raft.go +++ b/http/sys_raft.go @@ -67,6 +67,7 @@ func handleSysRaftJoinPost(core *vault.Core, w http.ResponseWriter, r *http.Requ leaderInfos := []*raft.LeaderJoinInfo{ { + AutoJoin: req.AutoJoin, LeaderAPIAddr: req.LeaderAPIAddr, TLSConfig: tlsConfig, Retry: req.Retry, @@ -90,6 +91,7 @@ type JoinResponse struct { } type JoinRequest struct { + AutoJoin string `json:"auto_join"` LeaderAPIAddr string `json:"leader_api_addr"` LeaderCACert string `json:"leader_ca_cert"` LeaderClientCert string `json:"leader_client_cert"` diff --git a/physical/raft/raft.go b/physical/raft/raft.go index 06c028d2792f..6403ccf08635 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -125,6 +125,11 @@ type RaftBackend struct { // LeaderJoinInfo contains information required by a node to join itself as a // follower to an existing raft cluster type LeaderJoinInfo struct { + // AutoJoin defines any cloud auto-join metadata. If supplied, Vault will + // attempt to automatically discover peers in addition to what can be provided + // via 'leader_api_addr'. + AutoJoin string `json:"auto_join"` + // LeaderAPIAddr is the address of the leader node to connect to LeaderAPIAddr string `json:"leader_api_addr"` @@ -178,11 +183,15 @@ func (b *RaftBackend) JoinConfig() ([]*LeaderJoinInfo, error) { return nil, errors.New("invalid retry_join config") } - for _, info := range leaderInfos { + for i, info := range leaderInfos { + if len(info.AutoJoin) != 0 && len(info.LeaderAPIAddr) != 0 { + return nil, errors.New("cannot provide both a leader_api_addr and auto_join") + } + info.Retry = true info.TLSConfig, err = parseTLSInfo(info) if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("failed to create tls config to communicate with leader node %q: {{err}}", info.LeaderAPIAddr), err) + return nil, errwrap.Wrapf(fmt.Sprintf("failed to create tls config to communicate with leader node (retry_join index: %d): {{err}}", i), err) } } diff --git a/vault/external_tests/raft/raft_test.go b/vault/external_tests/raft/raft_test.go index 884575e6938f..2014fe455d2c 100644 --- a/vault/external_tests/raft/raft_test.go +++ b/vault/external_tests/raft/raft_test.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" "golang.org/x/net/http2" ) @@ -40,6 +41,56 @@ func raftCluster(t testing.TB) *vault.TestCluster { return cluster } +func TestRaft_RetryAutoJoin(t *testing.T) { + t.Parallel() + + var ( + conf vault.CoreConfig + + opts = vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} + ) + + teststorage.RaftBackendSetup(&conf, &opts) + + opts.SetupFunc = nil + cluster := vault.NewTestCluster(t, &conf, &opts) + + cluster.Start() + defer cluster.Cleanup() + + addressProvider := &testhelpers.TestRaftServerAddressProvider{Cluster: cluster} + leaderCore := cluster.Cores[0] + atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) + + { + testhelpers.EnsureCoreSealed(t, leaderCore) + leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + cluster.UnsealCore(t, leaderCore) + vault.TestWaitActive(t, leaderCore.Core) + } + + leaderInfos := []*raft.LeaderJoinInfo{ + { + AutoJoin: "provider=aws region=eu-west-1 tag_key=consul tag_value=tag access_key_id=a secret_access_key=a", + TLSConfig: leaderCore.TLSConfig, + Retry: true, + }, + } + + { + // expected to pass but not join as we're not actually discovering leader addresses + core := cluster.Cores[1] + core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) + require.NoError(t, err) + } + + testhelpers.VerifyRaftPeers(t, cluster.Cores[0].Client, map[string]bool{ + "core-0": true, + }) +} + func TestRaft_Retry_Join(t *testing.T) { t.Parallel() var conf vault.CoreConfig diff --git a/vault/raft.go b/vault/raft.go index cd3bee3aeec7..f7f53d4e814b 100644 --- a/vault/raft.go +++ b/vault/raft.go @@ -16,6 +16,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-discover" "github.com/hashicorp/go-hclog" wrapping "github.com/hashicorp/go-kms-wrapping" uuid "github.com/hashicorp/go-uuid" @@ -739,14 +740,14 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo return false, errors.New("node must be unsealed before joining") } - // Disallow leader API address to be provided if we're using raft for HA-only + // Disallow leader API address to be provided if we're using raft for HA-only. // The leader API address is obtained directly through storage. This serves // as a form of verification that this node is sharing the same physical // storage as the leader node. if isRaftHAOnly { for _, info := range leaderInfos { - if info.LeaderAPIAddr != "" { - return false, errors.New("leader API address must be unset when raft is used exclusively for HA") + if info.LeaderAPIAddr != "" || info.AutoJoin != "" { + return false, errors.New("leader API address and auto-join metadata must be unset when raft is used exclusively for HA") } } @@ -778,12 +779,17 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo leaderInfos[0].LeaderAPIAddr = adv.RedirectAddr } + disco, err := newDiscover() + if err != nil { + return false, errwrap.Wrapf("failed to create auto-join discovery: {{err}}", err) + } + join := func(retry bool) error { - joinLeader := func(leaderInfo *raft.LeaderJoinInfo) error { + joinLeader := func(leaderInfo *raft.LeaderJoinInfo, leaderAddr string) error { if leaderInfo == nil { return errors.New("raft leader information is nil") } - if len(leaderInfo.LeaderAPIAddr) == 0 { + if len(leaderAddr) == 0 { return errors.New("raft leader address not provided") } @@ -797,7 +803,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo return nil } - c.logger.Info("attempting to join possible raft leader node", "leader_addr", leaderInfo.LeaderAPIAddr) + c.logger.Info("attempting to join possible raft leader node", "leader_addr", leaderAddr) // Create an API client to interact with the leader node transport := cleanhttp.DefaultPooledTransport() @@ -819,13 +825,16 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo client := &http.Client{ Transport: transport, } + config := api.DefaultConfig() if config.Error != nil { return errwrap.Wrapf("failed to create api client: {{err}}", config.Error) } - config.Address = leaderInfo.LeaderAPIAddr + + config.Address = leaderAddr config.HttpClient = client config.MaxRetries = 0 + apiClient, err := api.NewClient(config) if err != nil { return errwrap.Wrapf("failed to create api client: {{err}}", err) @@ -865,6 +874,7 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo if err := proto.Unmarshal(challengeRaw, eBlob); err != nil { return errwrap.Wrapf("error decoding raft bootstrap challenge: {{err}}", err) } + raftInfo := &raftInformation{ challenge: eBlob, leaderClient: apiClient, @@ -911,11 +921,36 @@ func (c *Core) JoinRaftCluster(ctx context.Context, leaderInfos []*raft.LeaderJo // Each join try goes through all the possible leader nodes and attempts to join // them, until one of the attempt succeeds. for _, leaderInfo := range leaderInfos { - err = joinLeader(leaderInfo) - if err == nil { - return nil + switch { + case leaderInfo.LeaderAPIAddr != "" && leaderInfo.AutoJoin != "": + c.logger.Info("join attempt failed", "error", errors.New("cannot provide both leader address and auto-join metadata")) + + case leaderInfo.LeaderAPIAddr != "": + if err := joinLeader(leaderInfo, leaderInfo.LeaderAPIAddr); err != nil { + c.logger.Info("join attempt failed", "error", err) + } else { + // successfully joined leader + return nil + } + + case leaderInfo.AutoJoin != "": + addrs, err := disco.Addrs(leaderInfo.AutoJoin, c.logger.StandardLogger(nil)) + if err != nil { + c.logger.Info("failed to parse addresses from auto-join metadata", "error", err) + } + + for _, addr := range addrs { + if err := joinLeader(leaderInfo, addr); err != nil { + c.logger.Info("join attempt failed", "error", err) + } else { + // successfully joined leader + return nil + } + } + + default: + c.logger.Info("join attempt failed", "error", errors.New("must provide leader address or auto-join metadata")) } - c.logger.Info("join attempt failed", "error", err) } return errors.New("failed to join any raft leader node") @@ -1116,3 +1151,14 @@ type answerResp struct { Peers []raft.Peer `json:"peers"` TLSKeyring *raft.TLSKeyring `json:"tls_keyring"` } + +func newDiscover() (*discover.Discover, error) { + providers := make(map[string]discover.Provider) + for k, v := range discover.Providers { + providers[k] = v + } + + return discover.New( + discover.WithProviders(providers), + ) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE index af39a91e7033..047555ec7e58 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE +++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016 Microsoft Corporation + Copyright 2020 Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/availabilitysets.go index 92945517338f..68678dd0fd62 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/availabilitysets.go @@ -35,7 +35,9 @@ func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient { return NewAvailabilitySetsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewAvailabilitySetsClientWithBaseURI creates an instance of the AvailabilitySetsClient client. +// NewAvailabilitySetsClientWithBaseURI creates an instance of the AvailabilitySetsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient { return AvailabilitySetsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -103,8 +105,7 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context, // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -112,7 +113,6 @@ func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*h func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response) (result AvailabilitySet, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -180,8 +180,7 @@ func (client AvailabilitySetsClient) DeletePreparer(ctx context.Context, resourc // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // DeleteResponder handles the response to the Delete request. The method always @@ -189,7 +188,6 @@ func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Resp func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -256,8 +254,7 @@ func (client AvailabilitySetsClient) GetPreparer(ctx context.Context, resourceGr // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -265,7 +262,6 @@ func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Respons func (client AvailabilitySetsClient) GetResponder(resp *http.Response) (result AvailabilitySet, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -332,8 +328,7 @@ func (client AvailabilitySetsClient) ListPreparer(ctx context.Context, resourceG // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -341,7 +336,6 @@ func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Respon func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result AvailabilitySetListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -447,8 +441,7 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(ctx context.Cont // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always @@ -456,7 +449,6 @@ func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -466,7 +458,7 @@ func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Resp // ListBySubscription lists all availability sets in a subscription. // Parameters: -// expand - the expand expression to apply to the operation. +// expand - the expand expression to apply to the operation. Allowed values are 'instanceView'. func (client AvailabilitySetsClient) ListBySubscription(ctx context.Context, expand string) (result AvailabilitySetListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/AvailabilitySetsClient.ListBySubscription") @@ -525,8 +517,7 @@ func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Cont // ListBySubscriptionSender sends the ListBySubscription request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always @@ -534,7 +525,6 @@ func (client AvailabilitySetsClient) ListBySubscriptionSender(req *http.Request) func (client AvailabilitySetsClient) ListBySubscriptionResponder(resp *http.Response) (result AvailabilitySetListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -642,8 +632,7 @@ func (client AvailabilitySetsClient) UpdatePreparer(ctx context.Context, resourc // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) UpdateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // UpdateResponder handles the response to the Update request. The method always @@ -651,7 +640,6 @@ func (client AvailabilitySetsClient) UpdateSender(req *http.Request) (*http.Resp func (client AvailabilitySetsClient) UpdateResponder(resp *http.Response) (result AvailabilitySet, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/client.go index b23c9ca74268..d9d0430190ea 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/client.go @@ -41,7 +41,8 @@ func New(subscriptionID string) BaseClient { return NewWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewWithBaseURI creates an instance of the BaseClient client. +// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with +// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { return BaseClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/containerservices.go index d3192f0de414..6a32bdeb6dd4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/containerservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/containerservices.go @@ -36,7 +36,9 @@ func NewContainerServicesClient(subscriptionID string) ContainerServicesClient { return NewContainerServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewContainerServicesClientWithBaseURI creates an instance of the ContainerServicesClient client. +// NewContainerServicesClientWithBaseURI creates an instance of the ContainerServicesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewContainerServicesClientWithBaseURI(baseURI string, subscriptionID string) ContainerServicesClient { return ContainerServicesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -130,9 +132,8 @@ func (client ContainerServicesClient) CreateOrUpdatePreparer(ctx context.Context // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (future ContainerServicesCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -145,7 +146,6 @@ func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (f func (client ContainerServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ContainerService, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -210,9 +210,8 @@ func (client ContainerServicesClient) DeletePreparer(ctx context.Context, resour // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) DeleteSender(req *http.Request) (future ContainerServicesDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -225,7 +224,6 @@ func (client ContainerServicesClient) DeleteSender(req *http.Request) (future Co func (client ContainerServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -294,8 +292,7 @@ func (client ContainerServicesClient) GetPreparer(ctx context.Context, resourceG // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -303,7 +300,6 @@ func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Respon func (client ContainerServicesClient) GetResponder(resp *http.Response) (result ContainerService, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -368,8 +364,7 @@ func (client ContainerServicesClient) ListPreparer(ctx context.Context) (*http.R // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -377,7 +372,6 @@ func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Respo func (client ContainerServicesClient) ListResponder(resp *http.Response) (result ContainerServiceListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -483,8 +477,7 @@ func (client ContainerServicesClient) ListByResourceGroupPreparer(ctx context.Co // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -492,7 +485,6 @@ func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Reques func (client ContainerServicesClient) ListByResourceGroupResponder(resp *http.Response) (result ContainerServiceListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhostgroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhostgroups.go index c786ffefc3f3..e103706358b3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhostgroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhostgroups.go @@ -36,7 +36,9 @@ func NewDedicatedHostGroupsClient(subscriptionID string) DedicatedHostGroupsClie return NewDedicatedHostGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewDedicatedHostGroupsClientWithBaseURI creates an instance of the DedicatedHostGroupsClient client. +// NewDedicatedHostGroupsClientWithBaseURI creates an instance of the DedicatedHostGroupsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewDedicatedHostGroupsClientWithBaseURI(baseURI string, subscriptionID string) DedicatedHostGroupsClient { return DedicatedHostGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -63,7 +65,7 @@ func (client DedicatedHostGroupsClient) CreateOrUpdate(ctx context.Context, reso Constraints: []validation.Constraint{{Target: "parameters.DedicatedHostGroupProperties", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.DedicatedHostGroupProperties.PlatformFaultDomainCount", Name: validation.Null, Rule: true, Chain: []validation.Constraint{{Target: "parameters.DedicatedHostGroupProperties.PlatformFaultDomainCount", Name: validation.InclusiveMaximum, Rule: int64(3), Chain: nil}, - {Target: "parameters.DedicatedHostGroupProperties.PlatformFaultDomainCount", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "parameters.DedicatedHostGroupProperties.PlatformFaultDomainCount", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}, }}}}}); err != nil { return result, validation.NewError("compute.DedicatedHostGroupsClient", "CreateOrUpdate", err.Error()) @@ -116,8 +118,7 @@ func (client DedicatedHostGroupsClient) CreateOrUpdatePreparer(ctx context.Conte // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -125,7 +126,6 @@ func (client DedicatedHostGroupsClient) CreateOrUpdateSender(req *http.Request) func (client DedicatedHostGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result DedicatedHostGroup, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -193,8 +193,7 @@ func (client DedicatedHostGroupsClient) DeletePreparer(ctx context.Context, reso // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // DeleteResponder handles the response to the Delete request. The method always @@ -202,7 +201,6 @@ func (client DedicatedHostGroupsClient) DeleteSender(req *http.Request) (*http.R func (client DedicatedHostGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -269,8 +267,7 @@ func (client DedicatedHostGroupsClient) GetPreparer(ctx context.Context, resourc // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostGroupsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -278,7 +275,6 @@ func (client DedicatedHostGroupsClient) GetSender(req *http.Request) (*http.Resp func (client DedicatedHostGroupsClient) GetResponder(resp *http.Response) (result DedicatedHostGroup, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -346,8 +342,7 @@ func (client DedicatedHostGroupsClient) ListByResourceGroupPreparer(ctx context. // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostGroupsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -355,7 +350,6 @@ func (client DedicatedHostGroupsClient) ListByResourceGroupSender(req *http.Requ func (client DedicatedHostGroupsClient) ListByResourceGroupResponder(resp *http.Response) (result DedicatedHostGroupListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -457,8 +451,7 @@ func (client DedicatedHostGroupsClient) ListBySubscriptionPreparer(ctx context.C // ListBySubscriptionSender sends the ListBySubscription request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostGroupsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always @@ -466,7 +459,6 @@ func (client DedicatedHostGroupsClient) ListBySubscriptionSender(req *http.Reque func (client DedicatedHostGroupsClient) ListBySubscriptionResponder(resp *http.Response) (result DedicatedHostGroupListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -574,8 +566,7 @@ func (client DedicatedHostGroupsClient) UpdatePreparer(ctx context.Context, reso // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostGroupsClient) UpdateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // UpdateResponder handles the response to the Update request. The method always @@ -583,7 +574,6 @@ func (client DedicatedHostGroupsClient) UpdateSender(req *http.Request) (*http.R func (client DedicatedHostGroupsClient) UpdateResponder(resp *http.Response) (result DedicatedHostGroup, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhosts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhosts.go index fb6ff59522b3..9ed470c41dd4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhosts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/dedicatedhosts.go @@ -36,7 +36,8 @@ func NewDedicatedHostsClient(subscriptionID string) DedicatedHostsClient { return NewDedicatedHostsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewDedicatedHostsClientWithBaseURI creates an instance of the DedicatedHostsClient client. +// NewDedicatedHostsClientWithBaseURI creates an instance of the DedicatedHostsClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewDedicatedHostsClientWithBaseURI(baseURI string, subscriptionID string) DedicatedHostsClient { return DedicatedHostsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -63,7 +64,7 @@ func (client DedicatedHostsClient) CreateOrUpdate(ctx context.Context, resourceG Constraints: []validation.Constraint{{Target: "parameters.DedicatedHostProperties", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.DedicatedHostProperties.PlatformFaultDomain", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.DedicatedHostProperties.PlatformFaultDomain", Name: validation.InclusiveMaximum, Rule: int64(2), Chain: nil}, - {Target: "parameters.DedicatedHostProperties.PlatformFaultDomain", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}, + {Target: "parameters.DedicatedHostProperties.PlatformFaultDomain", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, }}, }}, {Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { @@ -112,9 +113,8 @@ func (client DedicatedHostsClient) CreateOrUpdatePreparer(ctx context.Context, r // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostsClient) CreateOrUpdateSender(req *http.Request) (future DedicatedHostsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -127,7 +127,6 @@ func (client DedicatedHostsClient) CreateOrUpdateSender(req *http.Request) (futu func (client DedicatedHostsClient) CreateOrUpdateResponder(resp *http.Response) (result DedicatedHost, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -191,9 +190,8 @@ func (client DedicatedHostsClient) DeletePreparer(ctx context.Context, resourceG // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostsClient) DeleteSender(req *http.Request) (future DedicatedHostsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -206,7 +204,6 @@ func (client DedicatedHostsClient) DeleteSender(req *http.Request) (future Dedic func (client DedicatedHostsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -279,8 +276,7 @@ func (client DedicatedHostsClient) GetPreparer(ctx context.Context, resourceGrou // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -288,7 +284,6 @@ func (client DedicatedHostsClient) GetSender(req *http.Request) (*http.Response, func (client DedicatedHostsClient) GetResponder(resp *http.Response) (result DedicatedHost, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -358,8 +353,7 @@ func (client DedicatedHostsClient) ListByHostGroupPreparer(ctx context.Context, // ListByHostGroupSender sends the ListByHostGroup request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostsClient) ListByHostGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByHostGroupResponder handles the response to the ListByHostGroup request. The method always @@ -367,7 +361,6 @@ func (client DedicatedHostsClient) ListByHostGroupSender(req *http.Request) (*ht func (client DedicatedHostsClient) ListByHostGroupResponder(resp *http.Response) (result DedicatedHostListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -471,9 +464,8 @@ func (client DedicatedHostsClient) UpdatePreparer(ctx context.Context, resourceG // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client DedicatedHostsClient) UpdateSender(req *http.Request) (future DedicatedHostsUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -486,7 +478,6 @@ func (client DedicatedHostsClient) UpdateSender(req *http.Request) (future Dedic func (client DedicatedHostsClient) UpdateResponder(resp *http.Response) (result DedicatedHost, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/diskencryptionsets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/diskencryptionsets.go index cb93acdde874..418c4d25f10f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/diskencryptionsets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/diskencryptionsets.go @@ -36,7 +36,9 @@ func NewDiskEncryptionSetsClient(subscriptionID string) DiskEncryptionSetsClient return NewDiskEncryptionSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewDiskEncryptionSetsClientWithBaseURI creates an instance of the DiskEncryptionSetsClient client. +// NewDiskEncryptionSetsClientWithBaseURI creates an instance of the DiskEncryptionSetsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewDiskEncryptionSetsClientWithBaseURI(baseURI string, subscriptionID string) DiskEncryptionSetsClient { return DiskEncryptionSetsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -112,9 +114,8 @@ func (client DiskEncryptionSetsClient) CreateOrUpdatePreparer(ctx context.Contex // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client DiskEncryptionSetsClient) CreateOrUpdateSender(req *http.Request) (future DiskEncryptionSetsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -127,7 +128,6 @@ func (client DiskEncryptionSetsClient) CreateOrUpdateSender(req *http.Request) ( func (client DiskEncryptionSetsClient) CreateOrUpdateResponder(resp *http.Response) (result DiskEncryptionSet, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -191,9 +191,8 @@ func (client DiskEncryptionSetsClient) DeletePreparer(ctx context.Context, resou // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client DiskEncryptionSetsClient) DeleteSender(req *http.Request) (future DiskEncryptionSetsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -206,7 +205,6 @@ func (client DiskEncryptionSetsClient) DeleteSender(req *http.Request) (future D func (client DiskEncryptionSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -275,8 +273,7 @@ func (client DiskEncryptionSetsClient) GetPreparer(ctx context.Context, resource // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DiskEncryptionSetsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -284,7 +281,6 @@ func (client DiskEncryptionSetsClient) GetSender(req *http.Request) (*http.Respo func (client DiskEncryptionSetsClient) GetResponder(resp *http.Response) (result DiskEncryptionSet, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -348,8 +344,7 @@ func (client DiskEncryptionSetsClient) ListPreparer(ctx context.Context) (*http. // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DiskEncryptionSetsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -357,7 +352,6 @@ func (client DiskEncryptionSetsClient) ListSender(req *http.Request) (*http.Resp func (client DiskEncryptionSetsClient) ListResponder(resp *http.Response) (result DiskEncryptionSetList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -461,8 +455,7 @@ func (client DiskEncryptionSetsClient) ListByResourceGroupPreparer(ctx context.C // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client DiskEncryptionSetsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -470,7 +463,6 @@ func (client DiskEncryptionSetsClient) ListByResourceGroupSender(req *http.Reque func (client DiskEncryptionSetsClient) ListByResourceGroupResponder(resp *http.Response) (result DiskEncryptionSetList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -575,9 +567,8 @@ func (client DiskEncryptionSetsClient) UpdatePreparer(ctx context.Context, resou // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client DiskEncryptionSetsClient) UpdateSender(req *http.Request) (future DiskEncryptionSetsUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -590,7 +581,6 @@ func (client DiskEncryptionSetsClient) UpdateSender(req *http.Request) (future D func (client DiskEncryptionSetsClient) UpdateResponder(resp *http.Response) (result DiskEncryptionSet, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/disks.go index 4bf336689169..1ec3525478f1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/disks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/disks.go @@ -36,7 +36,8 @@ func NewDisksClient(subscriptionID string) DisksClient { return NewDisksClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewDisksClientWithBaseURI creates an instance of the DisksClient client. +// NewDisksClientWithBaseURI creates an instance of the DisksClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient { return DisksClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -114,9 +115,8 @@ func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) CreateOrUpdateSender(req *http.Request) (future DisksCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -129,7 +129,6 @@ func (client DisksClient) CreateOrUpdateSender(req *http.Request) (future DisksC func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result Disk, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -193,9 +192,8 @@ func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) DeleteSender(req *http.Request) (future DisksDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -208,7 +206,6 @@ func (client DisksClient) DeleteSender(req *http.Request) (future DisksDeleteFut func (client DisksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -277,8 +274,7 @@ func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName str // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -286,7 +282,6 @@ func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) { func (client DisksClient) GetResponder(resp *http.Response) (result Disk, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -359,9 +354,8 @@ func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroup // GrantAccessSender sends the GrantAccess request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) GrantAccessSender(req *http.Request) (future DisksGrantAccessFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -374,7 +368,6 @@ func (client DisksClient) GrantAccessSender(req *http.Request) (future DisksGran func (client DisksClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -438,8 +431,7 @@ func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, erro // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -447,7 +439,6 @@ func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) func (client DisksClient) ListResponder(resp *http.Response) (result DiskList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -551,8 +542,7 @@ func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resou // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -560,7 +550,6 @@ func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Re func (client DisksClient) ListByResourceGroupResponder(resp *http.Response) (result DiskList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -661,9 +650,8 @@ func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGrou // RevokeAccessSender sends the RevokeAccess request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) RevokeAccessSender(req *http.Request) (future DisksRevokeAccessFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -676,7 +664,6 @@ func (client DisksClient) RevokeAccessSender(req *http.Request) (future DisksRev func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -742,9 +729,8 @@ func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) UpdateSender(req *http.Request) (future DisksUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -757,7 +743,6 @@ func (client DisksClient) UpdateSender(req *http.Request) (future DisksUpdateFut func (client DisksClient) UpdateResponder(resp *http.Response) (result Disk, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleries.go index 553defda18ea..f1feeb41b2d7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleries.go @@ -35,7 +35,8 @@ func NewGalleriesClient(subscriptionID string) GalleriesClient { return NewGalleriesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewGalleriesClientWithBaseURI creates an instance of the GalleriesClient client. +// NewGalleriesClientWithBaseURI creates an instance of the GalleriesClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewGalleriesClientWithBaseURI(baseURI string, subscriptionID string) GalleriesClient { return GalleriesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -98,9 +99,8 @@ func (client GalleriesClient) CreateOrUpdatePreparer(ctx context.Context, resour // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client GalleriesClient) CreateOrUpdateSender(req *http.Request) (future GalleriesCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -113,7 +113,6 @@ func (client GalleriesClient) CreateOrUpdateSender(req *http.Request) (future Ga func (client GalleriesClient) CreateOrUpdateResponder(resp *http.Response) (result Gallery, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -175,9 +174,8 @@ func (client GalleriesClient) DeletePreparer(ctx context.Context, resourceGroupN // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client GalleriesClient) DeleteSender(req *http.Request) (future GalleriesDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -190,7 +188,6 @@ func (client GalleriesClient) DeleteSender(req *http.Request) (future GalleriesD func (client GalleriesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -257,8 +254,7 @@ func (client GalleriesClient) GetPreparer(ctx context.Context, resourceGroupName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client GalleriesClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -266,7 +262,6 @@ func (client GalleriesClient) GetSender(req *http.Request) (*http.Response, erro func (client GalleriesClient) GetResponder(resp *http.Response) (result Gallery, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -330,8 +325,7 @@ func (client GalleriesClient) ListPreparer(ctx context.Context) (*http.Request, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client GalleriesClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -339,7 +333,6 @@ func (client GalleriesClient) ListSender(req *http.Request) (*http.Response, err func (client GalleriesClient) ListResponder(resp *http.Response) (result GalleryList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -443,8 +436,7 @@ func (client GalleriesClient) ListByResourceGroupPreparer(ctx context.Context, r // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client GalleriesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -452,7 +444,6 @@ func (client GalleriesClient) ListByResourceGroupSender(req *http.Request) (*htt func (client GalleriesClient) ListByResourceGroupResponder(resp *http.Response) (result GalleryList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -496,3 +487,82 @@ func (client GalleriesClient) ListByResourceGroupComplete(ctx context.Context, r result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) return } + +// Update update a Shared Image Gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery. The allowed characters are alphabets and numbers with +// dots and periods allowed in the middle. The maximum length is 80 characters. +// gallery - parameters supplied to the update Shared Image Gallery operation. +func (client GalleriesClient) Update(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate) (result GalleriesUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleriesClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, gallery) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client GalleriesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters), + autorest.WithJSON(gallery), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) UpdateSender(req *http.Request) (future GalleriesUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client GalleriesClient) UpdateResponder(resp *http.Response) (result Gallery, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplications.go index c2c651209dee..2b300b71db44 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplications.go @@ -35,7 +35,9 @@ func NewGalleryApplicationsClient(subscriptionID string) GalleryApplicationsClie return NewGalleryApplicationsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewGalleryApplicationsClientWithBaseURI creates an instance of the GalleryApplicationsClient client. +// NewGalleryApplicationsClientWithBaseURI creates an instance of the GalleryApplicationsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewGalleryApplicationsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationsClient { return GalleryApplicationsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -102,9 +104,8 @@ func (client GalleryApplicationsClient) CreateOrUpdatePreparer(ctx context.Conte // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client GalleryApplicationsClient) CreateOrUpdateSender(req *http.Request) (future GalleryApplicationsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -117,7 +118,6 @@ func (client GalleryApplicationsClient) CreateOrUpdateSender(req *http.Request) func (client GalleryApplicationsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryApplication, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -182,9 +182,8 @@ func (client GalleryApplicationsClient) DeletePreparer(ctx context.Context, reso // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client GalleryApplicationsClient) DeleteSender(req *http.Request) (future GalleryApplicationsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -197,7 +196,6 @@ func (client GalleryApplicationsClient) DeleteSender(req *http.Request) (future func (client GalleryApplicationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -267,8 +265,7 @@ func (client GalleryApplicationsClient) GetPreparer(ctx context.Context, resourc // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client GalleryApplicationsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -276,7 +273,6 @@ func (client GalleryApplicationsClient) GetSender(req *http.Request) (*http.Resp func (client GalleryApplicationsClient) GetResponder(resp *http.Response) (result GalleryApplication, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -346,8 +342,7 @@ func (client GalleryApplicationsClient) ListByGalleryPreparer(ctx context.Contex // ListByGallerySender sends the ListByGallery request. The method will close the // http.Response Body if it receives an error. func (client GalleryApplicationsClient) ListByGallerySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByGalleryResponder handles the response to the ListByGallery request. The method always @@ -355,7 +350,6 @@ func (client GalleryApplicationsClient) ListByGallerySender(req *http.Request) ( func (client GalleryApplicationsClient) ListByGalleryResponder(resp *http.Response) (result GalleryApplicationList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -399,3 +393,86 @@ func (client GalleryApplicationsClient) ListByGalleryComplete(ctx context.Contex result.page, err = client.ListByGallery(ctx, resourceGroupName, galleryName) return } + +// Update update a gallery Application Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery in which the Application Definition is to be +// updated. +// galleryApplicationName - the name of the gallery Application Definition to be updated. The allowed +// characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum +// length is 80 characters. +// galleryApplication - parameters supplied to the update gallery Application operation. +func (client GalleryApplicationsClient) Update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate) (result GalleryApplicationsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplication) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client GalleryApplicationsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryApplicationName": autorest.Encode("path", galleryApplicationName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}", pathParameters), + autorest.WithJSON(galleryApplication), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationsClient) UpdateSender(req *http.Request) (future GalleryApplicationsUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client GalleryApplicationsClient) UpdateResponder(resp *http.Response) (result GalleryApplication, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go index 40b2dc830e93..9e098d1ae246 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryapplicationversions.go @@ -36,7 +36,9 @@ func NewGalleryApplicationVersionsClient(subscriptionID string) GalleryApplicati return NewGalleryApplicationVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewGalleryApplicationVersionsClientWithBaseURI creates an instance of the GalleryApplicationVersionsClient client. +// NewGalleryApplicationVersionsClientWithBaseURI creates an instance of the GalleryApplicationVersionsClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). func NewGalleryApplicationVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryApplicationVersionsClient { return GalleryApplicationVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -119,9 +121,8 @@ func (client GalleryApplicationVersionsClient) CreateOrUpdatePreparer(ctx contex // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client GalleryApplicationVersionsClient) CreateOrUpdateSender(req *http.Request) (future GalleryApplicationVersionsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -134,7 +135,6 @@ func (client GalleryApplicationVersionsClient) CreateOrUpdateSender(req *http.Re func (client GalleryApplicationVersionsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryApplicationVersion, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -201,9 +201,8 @@ func (client GalleryApplicationVersionsClient) DeletePreparer(ctx context.Contex // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client GalleryApplicationVersionsClient) DeleteSender(req *http.Request) (future GalleryApplicationVersionsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -216,7 +215,6 @@ func (client GalleryApplicationVersionsClient) DeleteSender(req *http.Request) ( func (client GalleryApplicationVersionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -292,8 +290,7 @@ func (client GalleryApplicationVersionsClient) GetPreparer(ctx context.Context, // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client GalleryApplicationVersionsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -301,7 +298,6 @@ func (client GalleryApplicationVersionsClient) GetSender(req *http.Request) (*ht func (client GalleryApplicationVersionsClient) GetResponder(resp *http.Response) (result GalleryApplicationVersion, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -373,8 +369,7 @@ func (client GalleryApplicationVersionsClient) ListByGalleryApplicationPreparer( // ListByGalleryApplicationSender sends the ListByGalleryApplication request. The method will close the // http.Response Body if it receives an error. func (client GalleryApplicationVersionsClient) ListByGalleryApplicationSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByGalleryApplicationResponder handles the response to the ListByGalleryApplication request. The method always @@ -382,7 +377,6 @@ func (client GalleryApplicationVersionsClient) ListByGalleryApplicationSender(re func (client GalleryApplicationVersionsClient) ListByGalleryApplicationResponder(resp *http.Response) (result GalleryApplicationVersionList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -426,3 +420,88 @@ func (client GalleryApplicationVersionsClient) ListByGalleryApplicationComplete( result.page, err = client.ListByGalleryApplication(ctx, resourceGroupName, galleryName, galleryApplicationName) return } + +// Update update a gallery Application Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - the name of the gallery Application Definition in which the Application Version is +// to be updated. +// galleryApplicationVersionName - the name of the gallery Application Version to be updated. Needs to follow +// semantic version name pattern: The allowed characters are digit and period. Digits must be within the range +// of a 32-bit integer. Format: .. +// galleryApplicationVersion - parameters supplied to the update gallery Application Version operation. +func (client GalleryApplicationVersionsClient) Update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate) (result GalleryApplicationVersionsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryApplicationVersionsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, galleryApplicationVersion) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client GalleryApplicationVersionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryApplicationName": autorest.Encode("path", galleryApplicationName), + "galleryApplicationVersionName": autorest.Encode("path", galleryApplicationVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}", pathParameters), + autorest.WithJSON(galleryApplicationVersion), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryApplicationVersionsClient) UpdateSender(req *http.Request) (future GalleryApplicationVersionsUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client GalleryApplicationVersionsClient) UpdateResponder(resp *http.Response) (result GalleryApplicationVersion, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryimages.go index c4c78e5706bb..7e0af343a454 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryimages.go @@ -36,7 +36,8 @@ func NewGalleryImagesClient(subscriptionID string) GalleryImagesClient { return NewGalleryImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewGalleryImagesClientWithBaseURI creates an instance of the GalleryImagesClient client. +// NewGalleryImagesClientWithBaseURI creates an instance of the GalleryImagesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) GalleryImagesClient { return GalleryImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -114,9 +115,8 @@ func (client GalleryImagesClient) CreateOrUpdatePreparer(ctx context.Context, re // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client GalleryImagesClient) CreateOrUpdateSender(req *http.Request) (future GalleryImagesCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -129,7 +129,6 @@ func (client GalleryImagesClient) CreateOrUpdateSender(req *http.Request) (futur func (client GalleryImagesClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryImage, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -193,9 +192,8 @@ func (client GalleryImagesClient) DeletePreparer(ctx context.Context, resourceGr // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client GalleryImagesClient) DeleteSender(req *http.Request) (future GalleryImagesDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -208,7 +206,6 @@ func (client GalleryImagesClient) DeleteSender(req *http.Request) (future Galler func (client GalleryImagesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -277,8 +274,7 @@ func (client GalleryImagesClient) GetPreparer(ctx context.Context, resourceGroup // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client GalleryImagesClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -286,7 +282,6 @@ func (client GalleryImagesClient) GetSender(req *http.Request) (*http.Response, func (client GalleryImagesClient) GetResponder(resp *http.Response) (result GalleryImage, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -355,8 +350,7 @@ func (client GalleryImagesClient) ListByGalleryPreparer(ctx context.Context, res // ListByGallerySender sends the ListByGallery request. The method will close the // http.Response Body if it receives an error. func (client GalleryImagesClient) ListByGallerySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByGalleryResponder handles the response to the ListByGallery request. The method always @@ -364,7 +358,6 @@ func (client GalleryImagesClient) ListByGallerySender(req *http.Request) (*http. func (client GalleryImagesClient) ListByGalleryResponder(resp *http.Response) (result GalleryImageList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -408,3 +401,85 @@ func (client GalleryImagesClient) ListByGalleryComplete(ctx context.Context, res result.page, err = client.ListByGallery(ctx, resourceGroupName, galleryName) return } + +// Update update a gallery Image Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition is to be updated. +// galleryImageName - the name of the gallery Image Definition to be updated. The allowed characters are +// alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80 +// characters. +// galleryImage - parameters supplied to the update gallery image operation. +func (client GalleryImagesClient) Update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate) (result GalleryImagesUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImagesClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImage) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client GalleryImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters), + autorest.WithJSON(galleryImage), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImagesClient) UpdateSender(req *http.Request) (future GalleryImagesUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client GalleryImagesClient) UpdateResponder(resp *http.Response) (result GalleryImage, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryimageversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryimageversions.go index 658da5864e04..fbd3ed7cec71 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryimageversions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/galleryimageversions.go @@ -36,7 +36,9 @@ func NewGalleryImageVersionsClient(subscriptionID string) GalleryImageVersionsCl return NewGalleryImageVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewGalleryImageVersionsClientWithBaseURI creates an instance of the GalleryImageVersionsClient client. +// NewGalleryImageVersionsClientWithBaseURI creates an instance of the GalleryImageVersionsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryImageVersionsClient { return GalleryImageVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -64,11 +66,7 @@ func (client GalleryImageVersionsClient) CreateOrUpdate(ctx context.Context, res if err := validation.Validate([]validation.Validation{ {TargetValue: galleryImageVersion, Constraints: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties.StorageProfile", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties.StorageProfile.Source", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties.StorageProfile.Source.ID", Name: validation.Null, Rule: true, Chain: nil}}}, - }}, - }}}}}); err != nil { + Chain: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties.StorageProfile", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { return result, validation.NewError("compute.GalleryImageVersionsClient", "CreateOrUpdate", err.Error()) } @@ -115,9 +113,8 @@ func (client GalleryImageVersionsClient) CreateOrUpdatePreparer(ctx context.Cont // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client GalleryImageVersionsClient) CreateOrUpdateSender(req *http.Request) (future GalleryImageVersionsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -130,7 +127,6 @@ func (client GalleryImageVersionsClient) CreateOrUpdateSender(req *http.Request) func (client GalleryImageVersionsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryImageVersion, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -196,9 +192,8 @@ func (client GalleryImageVersionsClient) DeletePreparer(ctx context.Context, res // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client GalleryImageVersionsClient) DeleteSender(req *http.Request) (future GalleryImageVersionsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -211,7 +206,6 @@ func (client GalleryImageVersionsClient) DeleteSender(req *http.Request) (future func (client GalleryImageVersionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -286,8 +280,7 @@ func (client GalleryImageVersionsClient) GetPreparer(ctx context.Context, resour // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client GalleryImageVersionsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -295,7 +288,6 @@ func (client GalleryImageVersionsClient) GetSender(req *http.Request) (*http.Res func (client GalleryImageVersionsClient) GetResponder(resp *http.Response) (result GalleryImageVersion, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -367,8 +359,7 @@ func (client GalleryImageVersionsClient) ListByGalleryImagePreparer(ctx context. // ListByGalleryImageSender sends the ListByGalleryImage request. The method will close the // http.Response Body if it receives an error. func (client GalleryImageVersionsClient) ListByGalleryImageSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByGalleryImageResponder handles the response to the ListByGalleryImage request. The method always @@ -376,7 +367,6 @@ func (client GalleryImageVersionsClient) ListByGalleryImageSender(req *http.Requ func (client GalleryImageVersionsClient) ListByGalleryImageResponder(resp *http.Response) (result GalleryImageVersionList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -420,3 +410,87 @@ func (client GalleryImageVersionsClient) ListByGalleryImageComplete(ctx context. result.page, err = client.ListByGalleryImage(ctx, resourceGroupName, galleryName, galleryImageName) return } + +// Update update a gallery Image Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - the name of the gallery Image Definition in which the Image Version is to be updated. +// galleryImageVersionName - the name of the gallery Image Version to be updated. Needs to follow semantic +// version name pattern: The allowed characters are digit and period. Digits must be within the range of a +// 32-bit integer. Format: .. +// galleryImageVersion - parameters supplied to the update gallery Image Version operation. +func (client GalleryImageVersionsClient) Update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate) (result GalleryImageVersionsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GalleryImageVersionsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, galleryImageVersion) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client GalleryImageVersionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters), + autorest.WithJSON(galleryImageVersion), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageVersionsClient) UpdateSender(req *http.Request) (future GalleryImageVersionsUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client GalleryImageVersionsClient) UpdateResponder(resp *http.Response) (result GalleryImageVersion, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/images.go index dfb92b46b0f7..cc727af701f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/images.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/images.go @@ -35,7 +35,8 @@ func NewImagesClient(subscriptionID string) ImagesClient { return NewImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewImagesClientWithBaseURI creates an instance of the ImagesClient client. +// NewImagesClientWithBaseURI creates an instance of the ImagesClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewImagesClientWithBaseURI(baseURI string, subscriptionID string) ImagesClient { return ImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -97,9 +98,8 @@ func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceG // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (future ImagesCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -112,7 +112,6 @@ func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (future Image func (client ImagesClient) CreateOrUpdateResponder(resp *http.Response) (result Image, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -174,9 +173,8 @@ func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) DeleteSender(req *http.Request) (future ImagesDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -189,7 +187,6 @@ func (client ImagesClient) DeleteSender(req *http.Request) (future ImagesDeleteF func (client ImagesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -260,8 +257,7 @@ func (client ImagesClient) GetPreparer(ctx context.Context, resourceGroupName st // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -269,7 +265,6 @@ func (client ImagesClient) GetSender(req *http.Request) (*http.Response, error) func (client ImagesClient) GetResponder(resp *http.Response) (result Image, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -334,8 +329,7 @@ func (client ImagesClient) ListPreparer(ctx context.Context) (*http.Request, err // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -343,7 +337,6 @@ func (client ImagesClient) ListSender(req *http.Request) (*http.Response, error) func (client ImagesClient) ListResponder(resp *http.Response) (result ImageListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -447,8 +440,7 @@ func (client ImagesClient) ListByResourceGroupPreparer(ctx context.Context, reso // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -456,7 +448,6 @@ func (client ImagesClient) ListByResourceGroupSender(req *http.Request) (*http.R func (client ImagesClient) ListByResourceGroupResponder(resp *http.Response) (result ImageListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -558,9 +549,8 @@ func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) UpdateSender(req *http.Request) (future ImagesUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -573,7 +563,6 @@ func (client ImagesClient) UpdateSender(req *http.Request) (future ImagesUpdateF func (client ImagesClient) UpdateResponder(resp *http.Response) (result Image, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/loganalytics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/loganalytics.go index 145d834d59b8..24060d152019 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/loganalytics.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/loganalytics.go @@ -36,7 +36,8 @@ func NewLogAnalyticsClient(subscriptionID string) LogAnalyticsClient { return NewLogAnalyticsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewLogAnalyticsClientWithBaseURI creates an instance of the LogAnalyticsClient client. +// NewLogAnalyticsClientWithBaseURI creates an instance of the LogAnalyticsClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewLogAnalyticsClientWithBaseURI(baseURI string, subscriptionID string) LogAnalyticsClient { return LogAnalyticsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -103,9 +104,8 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context // ExportRequestRateByIntervalSender sends the ExportRequestRateByInterval request. The method will close the // http.Response Body if it receives an error. func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Request) (future LogAnalyticsExportRequestRateByIntervalFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -118,7 +118,6 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Req func (client LogAnalyticsClient) ExportRequestRateByIntervalResponder(resp *http.Response) (result LogAnalyticsOperationResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -188,9 +187,8 @@ func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Con // ExportThrottledRequestsSender sends the ExportThrottledRequests request. The method will close the // http.Response Body if it receives an error. func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request) (future LogAnalyticsExportThrottledRequestsFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -203,7 +201,6 @@ func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request func (client LogAnalyticsClient) ExportThrottledRequestsResponder(resp *http.Response) (result LogAnalyticsOperationResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/models.go index 11e0c0a1bd2c..615581d96ca6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/models.go @@ -518,9 +518,9 @@ func PossibleMaintenanceOperationResultCodeTypesValues() []MaintenanceOperationR type OperatingSystemStateTypes string const ( - // Generalized ... + // Generalized Generalized image. Needs to be provisioned during deployment time. Generalized OperatingSystemStateTypes = "Generalized" - // Specialized ... + // Specialized Specialized image. Contains already provisioned OS Disk. Specialized OperatingSystemStateTypes = "Specialized" ) @@ -970,11 +970,13 @@ const ( Low VirtualMachinePriorityTypes = "Low" // Regular ... Regular VirtualMachinePriorityTypes = "Regular" + // Spot ... + Spot VirtualMachinePriorityTypes = "Spot" ) // PossibleVirtualMachinePriorityTypesValues returns an array of possible values for the VirtualMachinePriorityTypes const type. func PossibleVirtualMachinePriorityTypesValues() []VirtualMachinePriorityTypes { - return []VirtualMachinePriorityTypes{Low, Regular} + return []VirtualMachinePriorityTypes{Low, Regular, Spot} } // VirtualMachineScaleSetScaleInRules enumerates the values for virtual machine scale set scale in rules. @@ -1429,10 +1431,8 @@ type AutomaticOSUpgradeProperties struct { type AutomaticRepairsPolicy struct { // Enabled - Specifies whether automatic repairs should be enabled on the virtual machine scale set. The default value is false. Enabled *bool `json:"enabled,omitempty"` - // GracePeriod - The amount of time for which automatic repairs are suspended due to a state change on VM. The grace time starts after the state change has completed. This helps avoid premature or accidental repairs. The time duration should be specified in ISO 8601 format. The default value is 5 minutes (PT5M). + // GracePeriod - The amount of time for which automatic repairs are suspended due to a state change on VM. The grace time starts after the state change has completed. This helps avoid premature or accidental repairs. The time duration should be specified in ISO 8601 format. The minimum allowed grace period is 30 minutes (PT30M), which is also the default value. The maximum allowed grace period is 90 minutes (PT90M). GracePeriod *string `json:"gracePeriod,omitempty"` - // MaxInstanceRepairsPercent - The percentage (capacity of scaleset) of virtual machines that will be simultaneously repaired. The default value is 20%. - MaxInstanceRepairsPercent *int32 `json:"maxInstanceRepairsPercent,omitempty"` } // AvailabilitySet specifies information about the availability set that the virtual machine should be @@ -1785,10 +1785,10 @@ func (asu *AvailabilitySetUpdate) UnmarshalJSON(body []byte) error { return nil } -// BillingProfile specifies the billing related details of a low priority VM or VMSS.

Minimum +// BillingProfile specifies the billing related details of a Azure Spot VM or VMSS.

Minimum // api-version: 2019-03-01. type BillingProfile struct { - // MaxPrice - Specifies the maximum price you are willing to pay for a low priority VM/VMSS. This price is in US Dollars.

This price will be compared with the current low priority price for the VM size. Also, the prices are compared at the time of create/update of low priority VM/VMSS and the operation will only succeed if the maxPrice is greater than the current low priority price.

The maxPrice will also be used for evicting a low priority VM/VMSS if the current low priority price goes beyond the maxPrice after creation of VM/VMSS.

Possible values are:

- Any decimal value greater than zero. Example: $0.01538

-1 – indicates default price to be up-to on-demand.

You can set the maxPrice to -1 to indicate that the low priority VM/VMSS should not be evicted for price reasons. Also, the default max price is -1 if it is not provided by you.

Minimum api-version: 2019-03-01. + // MaxPrice - Specifies the maximum price you are willing to pay for a Azure Spot VM/VMSS. This price is in US Dollars.

This price will be compared with the current Azure Spot price for the VM size. Also, the prices are compared at the time of create/update of Azure Spot VM/VMSS and the operation will only succeed if the maxPrice is greater than the current Azure Spot price.

The maxPrice will also be used for evicting a Azure Spot VM/VMSS if the current Azure Spot price goes beyond the maxPrice after creation of VM/VMSS.

Possible values are:

- Any decimal value greater than zero. Example: 0.01538

-1 – indicates default price to be up-to on-demand.

You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS should not be evicted for price reasons. Also, the default max price is -1 if it is not provided by you.

Minimum api-version: 2019-03-01. MaxPrice *float64 `json:"maxPrice,omitempty"` } @@ -2228,7 +2228,7 @@ type ContainerServiceWindowsProfile struct { type CreationData struct { // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', 'Import', 'Copy', 'Restore', 'Upload' CreateOption DiskCreateOption `json:"createOption,omitempty"` - // StorageAccountID - If createOption is Import, the Azure Resource Manager identifier of the storage account containing the blob to import as a disk. Required only if the blob is in a different subscription + // StorageAccountID - Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to import as a disk. StorageAccountID *string `json:"storageAccountId,omitempty"` // ImageReference - Disk source information. ImageReference *ImageDiskReference `json:"imageReference,omitempty"` @@ -4027,6 +4027,8 @@ type DiskUpdateProperties struct { DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` + // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` } // Encryption encryption at rest settings for disk or snapshot @@ -4128,6 +4130,35 @@ func (future *GalleriesDeleteFuture) Result(client GalleriesClient) (ar autorest return } +// GalleriesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleriesUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleriesUpdateFuture) Result(client GalleriesClient) (g Gallery, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { + g, err = client.UpdateResponder(g.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + } + } + return +} + // Gallery specifies information about the Shared Image Gallery that you want to create or update. type Gallery struct { autorest.Response `json:"-"` @@ -4543,6 +4574,88 @@ func (future *GalleryApplicationsDeleteFuture) Result(client GalleryApplications return } +// GalleryApplicationsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryApplicationsUpdateFuture) Result(client GalleryApplicationsClient) (ga GalleryApplication, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ga.Response.Response, err = future.GetResult(sender); err == nil && ga.Response.Response.StatusCode != http.StatusNoContent { + ga, err = client.UpdateResponder(ga.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationsUpdateFuture", "Result", ga.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryApplicationUpdate specifies information about the gallery Application Definition that you want to +// update. +type GalleryApplicationUpdate struct { + *GalleryApplicationProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryApplicationUpdate. +func (gau GalleryApplicationUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gau.GalleryApplicationProperties != nil { + objectMap["properties"] = gau.GalleryApplicationProperties + } + if gau.Tags != nil { + objectMap["tags"] = gau.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryApplicationUpdate struct. +func (gau *GalleryApplicationUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryApplicationProperties GalleryApplicationProperties + err = json.Unmarshal(*v, &galleryApplicationProperties) + if err != nil { + return err + } + gau.GalleryApplicationProperties = &galleryApplicationProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + gau.Tags = tags + } + } + } + + return nil +} + // GalleryApplicationVersion specifies information about the gallery Application Version that you want to // create or update. type GalleryApplicationVersion struct { @@ -4873,6 +4986,88 @@ func (future *GalleryApplicationVersionsDeleteFuture) Result(client GalleryAppli return } +// GalleryApplicationVersionsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryApplicationVersionsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryApplicationVersionsUpdateFuture) Result(client GalleryApplicationVersionsClient) (gav GalleryApplicationVersion, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryApplicationVersionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gav.Response.Response, err = future.GetResult(sender); err == nil && gav.Response.Response.StatusCode != http.StatusNoContent { + gav, err = client.UpdateResponder(gav.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryApplicationVersionsUpdateFuture", "Result", gav.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryApplicationVersionUpdate specifies information about the gallery Application Version that you +// want to update. +type GalleryApplicationVersionUpdate struct { + *GalleryApplicationVersionProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryApplicationVersionUpdate. +func (gavu GalleryApplicationVersionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gavu.GalleryApplicationVersionProperties != nil { + objectMap["properties"] = gavu.GalleryApplicationVersionProperties + } + if gavu.Tags != nil { + objectMap["tags"] = gavu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryApplicationVersionUpdate struct. +func (gavu *GalleryApplicationVersionUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryApplicationVersionProperties GalleryApplicationVersionProperties + err = json.Unmarshal(*v, &galleryApplicationVersionProperties) + if err != nil { + return err + } + gavu.GalleryApplicationVersionProperties = &galleryApplicationVersionProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + gavu.Tags = tags + } + } + } + + return nil +} + // GalleryArtifactPublishingProfileBase describes the basic gallery artifact publishing profile. type GalleryArtifactPublishingProfileBase struct { // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. @@ -5260,6 +5455,87 @@ func (future *GalleryImagesDeleteFuture) Result(client GalleryImagesClient) (ar return } +// GalleryImagesUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleryImagesUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryImagesUpdateFuture) Result(client GalleryImagesClient) (gi GalleryImage, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { + gi, err = client.UpdateResponder(gi.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryImageUpdate specifies information about the gallery Image Definition that you want to update. +type GalleryImageUpdate struct { + *GalleryImageProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryImageUpdate. +func (giu GalleryImageUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if giu.GalleryImageProperties != nil { + objectMap["properties"] = giu.GalleryImageProperties + } + if giu.Tags != nil { + objectMap["tags"] = giu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryImageUpdate struct. +func (giu *GalleryImageUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryImageProperties GalleryImageProperties + err = json.Unmarshal(*v, &galleryImageProperties) + if err != nil { + return err + } + giu.GalleryImageProperties = &galleryImageProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + giu.Tags = tags + } + } + } + + return nil +} + // GalleryImageVersion specifies information about the gallery Image Version that you want to create or // update. type GalleryImageVersion struct { @@ -5593,6 +5869,87 @@ type GalleryImageVersionStorageProfile struct { DataDiskImages *[]GalleryDataDiskImage `json:"dataDiskImages,omitempty"` } +// GalleryImageVersionsUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryImageVersionsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryImageVersionsUpdateFuture) Result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { + giv, err = client.UpdateResponder(giv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryImageVersionUpdate specifies information about the gallery Image Version that you want to update. +type GalleryImageVersionUpdate struct { + *GalleryImageVersionProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryImageVersionUpdate. +func (givu GalleryImageVersionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if givu.GalleryImageVersionProperties != nil { + objectMap["properties"] = givu.GalleryImageVersionProperties + } + if givu.Tags != nil { + objectMap["tags"] = givu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryImageVersionUpdate struct. +func (givu *GalleryImageVersionUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryImageVersionProperties GalleryImageVersionProperties + err = json.Unmarshal(*v, &galleryImageVersionProperties) + if err != nil { + return err + } + givu.GalleryImageVersionProperties = &galleryImageVersionProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + givu.Tags = tags + } + } + } + + return nil +} + // GalleryList the List Galleries operation response. type GalleryList struct { autorest.Response `json:"-"` @@ -5757,6 +6114,58 @@ type GalleryProperties struct { ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` } +// GalleryUpdate specifies information about the Shared Image Gallery that you want to update. +type GalleryUpdate struct { + *GalleryProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryUpdate. +func (gu GalleryUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gu.GalleryProperties != nil { + objectMap["properties"] = gu.GalleryProperties + } + if gu.Tags != nil { + objectMap["tags"] = gu.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryUpdate struct. +func (gu *GalleryUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryProperties GalleryProperties + err = json.Unmarshal(*v, &galleryProperties) + if err != nil { + return err + } + gu.GalleryProperties = &galleryProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + gu.Tags = tags + } + } + } + + return nil +} + // GrantAccessData data used for requesting a SAS. type GrantAccessData struct { // Access - Possible values include: 'None', 'Read', 'Write' @@ -6113,7 +6522,7 @@ type ImagePurchasePlan struct { // ImageReference specifies information about the image to use. You can specify information about platform // images, marketplace images, or virtual machine images. This element is required when you want to use a // platform image, marketplace image, or virtual machine image, but is not used in other creation -// operations. +// operations. NOTE: Image reference publisher and offer can only be set when you create the scale set type ImageReference struct { // Publisher - The image publisher. Publisher *string `json:"publisher,omitempty"` @@ -6123,6 +6532,8 @@ type ImageReference struct { Sku *string `json:"sku,omitempty"` // Version - Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available. Version *string `json:"version,omitempty"` + // ExactVersion - READ-ONLY; Specifies in decimal numbers, the version of platform image or marketplace image used to create the virtual machine. This readonly field differs from 'version', only if the value specified in 'version' field is 'latest'. + ExactVersion *string `json:"exactVersion,omitempty"` // ID - Resource Id ID *string `json:"id,omitempty"` } @@ -6793,15 +7204,16 @@ type OSDiskImage struct { OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` } -// OSProfile specifies the operating system settings for the virtual machine. +// OSProfile specifies the operating system settings for the virtual machine. Some of the settings cannot +// be changed once VM is provisioned. type OSProfile struct { // ComputerName - Specifies the host OS name of the virtual machine.

This name cannot be updated after the VM is created.

**Max-length (Windows):** 15 characters

**Max-length (Linux):** 64 characters.

For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions). ComputerName *string `json:"computerName,omitempty"` - // AdminUsername - Specifies the name of the administrator account.

**Windows-only restriction:** Cannot end in "."

**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

**Minimum-length (Linux):** 1 character

**Max-length (Linux):** 64 characters

**Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + // AdminUsername - Specifies the name of the administrator account.

    This property cannot be updated after the VM is created.

    **Windows-only restriction:** Cannot end in "."

    **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

    **Minimum-length (Linux):** 1 character

    **Max-length (Linux):** 64 characters

    **Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) AdminUsername *string `json:"adminUsername,omitempty"` // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password) AdminPassword *string `json:"adminPassword,omitempty"` - // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    **Note: Do not pass any secrets or passwords in customData property**

    This property cannot be updated after the VM is created.

    customData is passed to the VM to be saved as a file, for more information see [Custom Data on Azure VMs](https://docs.microsoft.com/azure/virtual-machines/custom-data)

    For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) CustomData *string `json:"customData,omitempty"` // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` @@ -6811,7 +7223,7 @@ type OSProfile struct { Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` // AllowExtensionOperations - Specifies whether extension operations should be allowed on the virtual machine.

    This may only be set to False when no extensions are present on the virtual machine. AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"` - // RequireGuestProvisionSignal - Specifies whether the guest provision signal is required from the virtual machine. + // RequireGuestProvisionSignal - Specifies whether the guest provision signal is required to infer provision success of the virtual machine. RequireGuestProvisionSignal *bool `json:"requireGuestProvisionSignal,omitempty"` } @@ -7084,11 +7496,13 @@ type ProximityPlacementGroupProperties struct { // ProximityPlacementGroupType - Specifies the type of the proximity placement group.

    Possible values are:

    **Standard** : Co-locate resources within an Azure region or Availability Zone.

    **Ultra** : For future use. Possible values include: 'Standard', 'Ultra' ProximityPlacementGroupType ProximityPlacementGroupType `json:"proximityPlacementGroupType,omitempty"` // VirtualMachines - READ-ONLY; A list of references to all virtual machines in the proximity placement group. - VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` + VirtualMachines *[]SubResourceWithColocationStatus `json:"virtualMachines,omitempty"` // VirtualMachineScaleSets - READ-ONLY; A list of references to all virtual machine scale sets in the proximity placement group. - VirtualMachineScaleSets *[]SubResource `json:"virtualMachineScaleSets,omitempty"` + VirtualMachineScaleSets *[]SubResourceWithColocationStatus `json:"virtualMachineScaleSets,omitempty"` // AvailabilitySets - READ-ONLY; A list of references to all availability sets in the proximity placement group. - AvailabilitySets *[]SubResource `json:"availabilitySets,omitempty"` + AvailabilitySets *[]SubResourceWithColocationStatus `json:"availabilitySets,omitempty"` + // ColocationStatus - Describes colocation status of the Proximity Placement Group. + ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"` } // ProximityPlacementGroupUpdate specifies information about the proximity placement group. @@ -7835,7 +8249,9 @@ type ScheduledEventsProfile struct { TerminateNotificationProfile *TerminateNotificationProfile `json:"terminateNotificationProfile,omitempty"` } -// Sku describes a virtual machine scale set sku. +// Sku describes a virtual machine scale set sku. NOTE: If the new VM SKU is not supported on the hardware +// the scale set is currently on, you need to deallocate the VMs in the scale set before you modify the SKU +// name. type Sku struct { // Name - The sku name. Name *string `json:"name,omitempty"` @@ -8355,6 +8771,8 @@ type SnapshotUpdateProperties struct { DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + // Encryption - Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` } // SourceVault the vault id is an Azure Resource Manager Resource id in the form @@ -8401,6 +8819,14 @@ type SubResourceReadOnly struct { ID *string `json:"id,omitempty"` } +// SubResourceWithColocationStatus ... +type SubResourceWithColocationStatus struct { + // ColocationStatus - Describes colocation status of a resource in the Proximity Placement Group. + ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + // TargetRegion describes the target region information. type TargetRegion struct { // Name - The name of the region. @@ -9512,23 +9938,23 @@ type VirtualMachineProperties struct { StorageProfile *StorageProfile `json:"storageProfile,omitempty"` // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine. AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` - // OsProfile - Specifies the operating system settings for the virtual machine. + // OsProfile - Specifies the operating system settings used while creating the virtual machine. Some of the settings cannot be changed once VM is provisioned. OsProfile *OSProfile `json:"osProfile,omitempty"` // NetworkProfile - Specifies the network interfaces of the virtual machine. NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set.

    This property cannot exist along with a non-null properties.virtualMachineScaleSet reference. + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. The availability set to which the VM is being added should be under the same resource group as the availability set resource. An existing VM cannot be added to an availability set.

    This property cannot exist along with a non-null properties.virtualMachineScaleSet reference. AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` // VirtualMachineScaleSet - Specifies information about the virtual machine scale set that the virtual machine should be assigned to. Virtual machines specified in the same virtual machine scale set are allocated to different nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time. An existing VM cannot be added to a virtual machine scale set.

    This property cannot exist along with a non-null properties.availabilitySet reference.

    Minimum api‐version: 2019‐03‐01 VirtualMachineScaleSet *SubResource `json:"virtualMachineScaleSet,omitempty"` // ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine should be assigned to.

    Minimum api-version: 2018-04-01. ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"` - // Priority - Specifies the priority for the virtual machine.

    Minimum api-version: 2019-03-01. Possible values include: 'Regular', 'Low' + // Priority - Specifies the priority for the virtual machine.

    Minimum api-version: 2019-03-01. Possible values include: 'Regular', 'Low', 'Spot' Priority VirtualMachinePriorityTypes `json:"priority,omitempty"` - // EvictionPolicy - Specifies the eviction policy for the low priority virtual machine. Only supported value is 'Deallocate'.

    Minimum api-version: 2019-03-01. Possible values include: 'Deallocate', 'Delete' + // EvictionPolicy - Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set.

    For Azure Spot virtual machines, the only supported value is 'Deallocate' and the minimum api-version is 2019-03-01.

    For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. Possible values include: 'Deallocate', 'Delete' EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"` - // BillingProfile - Specifies the billing related details of a low priority virtual machine.

    Minimum api-version: 2019-03-01. + // BillingProfile - Specifies the billing related details of a Azure Spot virtual machine.

    Minimum api-version: 2019-03-01. BillingProfile *BillingProfile `json:"billingProfile,omitempty"` // Host - Specifies information about the dedicated host that the virtual machine resides in.

    Minimum api-version: 2018-10-01. Host *SubResource `json:"host,omitempty"` @@ -9559,7 +9985,7 @@ type VirtualMachineScaleSet struct { *VirtualMachineScaleSetProperties `json:"properties,omitempty"` // Identity - The identity of the virtual machine scale set, if configured. Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` - // Zones - The virtual machine scale set zones. + // Zones - The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set. Zones *[]string `json:"zones,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` @@ -9731,7 +10157,9 @@ type VirtualMachineScaleSetDataDisk struct { type VirtualMachineScaleSetExtension struct { autorest.Response `json:"-"` // Name - The name of the extension. - Name *string `json:"name,omitempty"` + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` // ID - READ-ONLY; Resource Id ID *string `json:"id,omitempty"` @@ -9767,6 +10195,15 @@ func (vmsse *VirtualMachineScaleSetExtension) UnmarshalJSON(body []byte) error { } vmsse.Name = &name } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vmsse.Type = &typeVar + } case "properties": if v != nil { var virtualMachineScaleSetExtensionProperties VirtualMachineScaleSetExtensionProperties @@ -10019,6 +10456,106 @@ func (future *VirtualMachineScaleSetExtensionsDeleteFuture) Result(client Virtua return } +// VirtualMachineScaleSetExtensionsUpdateFuture an abstraction for monitoring and retrieving the results of +// a long-running operation. +type VirtualMachineScaleSetExtensionsUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetExtensionsUpdateFuture) Result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetExtensionsUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vmsse.Response.Response, err = future.GetResult(sender); err == nil && vmsse.Response.Response.StatusCode != http.StatusNoContent { + vmsse, err = client.UpdateResponder(vmsse.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsUpdateFuture", "Result", vmsse.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualMachineScaleSetExtensionUpdate describes a Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtensionUpdate struct { + // Name - READ-ONLY; The name of the extension. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetExtensionUpdate. +func (vmsseu VirtualMachineScaleSetExtensionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmsseu.VirtualMachineScaleSetExtensionProperties != nil { + objectMap["properties"] = vmsseu.VirtualMachineScaleSetExtensionProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualMachineScaleSetExtensionUpdate struct. +func (vmsseu *VirtualMachineScaleSetExtensionUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vmsseu.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vmsseu.Type = &typeVar + } + case "properties": + if v != nil { + var virtualMachineScaleSetExtensionProperties VirtualMachineScaleSetExtensionProperties + err = json.Unmarshal(*v, &virtualMachineScaleSetExtensionProperties) + if err != nil { + return err + } + vmsseu.VirtualMachineScaleSetExtensionProperties = &virtualMachineScaleSetExtensionProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vmsseu.ID = &ID + } + } + } + + return nil +} + // VirtualMachineScaleSetIdentity identity for the virtual machine scale set. type VirtualMachineScaleSetIdentity struct { // PrincipalID - READ-ONLY; The principal id of virtual machine scale set identity. This property will only be provided for a system assigned identity. @@ -11591,7 +12128,8 @@ func (vmssuic *VirtualMachineScaleSetUpdateIPConfiguration) UnmarshalJSON(body [ } // VirtualMachineScaleSetUpdateIPConfigurationProperties describes a virtual machine scale set network -// profile's IP configuration properties. +// profile's IP configuration properties. NOTE: The subnet of a scale set may be modified as long as the +// original subnet and the new subnet are in the same virtual network. type VirtualMachineScaleSetUpdateIPConfigurationProperties struct { // Subnet - The subnet. Subnet *APIEntityReference `json:"subnet,omitempty"` @@ -11745,12 +12283,14 @@ type VirtualMachineScaleSetUpdateProperties struct { Overprovision *bool `json:"overprovision,omitempty"` // DoNotRunExtensionsOnOverprovisionedVMs - When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs. DoNotRunExtensionsOnOverprovisionedVMs *bool `json:"doNotRunExtensionsOnOverprovisionedVMs,omitempty"` - // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. + // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines.NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true. SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type. AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` // ScaleInPolicy - Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in. ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"` + // ProximityPlacementGroup - Specifies information about the proximity placement group that the virtual machine scale set should be assigned to.

    Minimum api-version: 2018-04-01. + ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"` } // VirtualMachineScaleSetUpdatePublicIPAddressConfiguration describes a virtual machines scale set IP @@ -11839,7 +12379,7 @@ type VirtualMachineScaleSetUpdateVMProfile struct { ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` // LicenseType - The license type, which is for bring your own license scenario. LicenseType *string `json:"licenseType,omitempty"` - // BillingProfile - Specifies the billing related details of a low priority VMSS.

    Minimum api-version: 2019-03-01. + // BillingProfile - Specifies the billing related details of a Azure Spot VMSS.

    Minimum api-version: 2019-03-01. BillingProfile *BillingProfile `json:"billingProfile,omitempty"` // ScheduledEventsProfile - Specifies Scheduled Event related configurations. ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` @@ -12302,11 +12842,11 @@ type VirtualMachineScaleSetVMProfile struct { ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 LicenseType *string `json:"licenseType,omitempty"` - // Priority - Specifies the priority for the virtual machines in the scale set.

    Minimum api-version: 2017-10-30-preview. Possible values include: 'Regular', 'Low' + // Priority - Specifies the priority for the virtual machines in the scale set.

    Minimum api-version: 2017-10-30-preview. Possible values include: 'Regular', 'Low', 'Spot' Priority VirtualMachinePriorityTypes `json:"priority,omitempty"` - // EvictionPolicy - Specifies the eviction policy for virtual machines in a low priority scale set.

    Minimum api-version: 2017-10-30-preview. Possible values include: 'Deallocate', 'Delete' + // EvictionPolicy - Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set.

    For Azure Spot virtual machines, the only supported value is 'Deallocate' and the minimum api-version is 2019-03-01.

    For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. Possible values include: 'Deallocate', 'Delete' EvictionPolicy VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"` - // BillingProfile - Specifies the billing related details of a low priority VMSS.

    Minimum api-version: 2019-03-01. + // BillingProfile - Specifies the billing related details of a Azure Spot VMSS.

    Minimum api-version: 2019-03-01. BillingProfile *BillingProfile `json:"billingProfile,omitempty"` // ScheduledEventsProfile - Specifies Scheduled Event related configurations. ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` @@ -13110,7 +13650,7 @@ type WindowsConfiguration struct { ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` // EnableAutomaticUpdates - Indicates whether Automatic Updates is enabled for the Windows virtual machine. Default value is true.

    For virtual machine scale sets, this property can be updated and updates will take effect on OS reprovisioning. EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` - // TimeZone - Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time" + // TimeZone - Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time".

    Possible values can be [TimeZoneInfo.Id](https://docs.microsoft.com/en-us/dotnet/api/system.timezoneinfo.id?#System_TimeZoneInfo_Id) value from time zones returned by [TimeZoneInfo.GetSystemTimeZones](https://docs.microsoft.com/en-us/dotnet/api/system.timezoneinfo.getsystemtimezones). TimeZone *string `json:"timeZone,omitempty"` // AdditionalUnattendContent - Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"` @@ -13126,7 +13666,7 @@ type WinRMConfiguration struct { // WinRMListener describes Protocol and thumbprint of Windows Remote Management listener type WinRMListener struct { - // Protocol - Specifies the protocol of listener.

    Possible values are:
    **http**

    **https**. Possible values include: 'HTTP', 'HTTPS' + // Protocol - Specifies the protocol of WinRM listener.

    Possible values are:
    **http**

    **https**. Possible values include: 'HTTP', 'HTTPS' Protocol ProtocolTypes `json:"protocol,omitempty"` // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } CertificateURL *string `json:"certificateUrl,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/operations.go index 021b16328e7d..27ed7fb64407 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/operations.go @@ -35,7 +35,8 @@ func NewOperationsClient(subscriptionID string) OperationsClient { return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -91,8 +92,7 @@ func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -100,7 +100,6 @@ func (client OperationsClient) ListSender(req *http.Request) (*http.Response, er func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/proximityplacementgroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/proximityplacementgroups.go index a42d52685560..41b02851572f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/proximityplacementgroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/proximityplacementgroups.go @@ -35,7 +35,9 @@ func NewProximityPlacementGroupsClient(subscriptionID string) ProximityPlacement return NewProximityPlacementGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewProximityPlacementGroupsClientWithBaseURI creates an instance of the ProximityPlacementGroupsClient client. +// NewProximityPlacementGroupsClientWithBaseURI creates an instance of the ProximityPlacementGroupsClient client using +// a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). func NewProximityPlacementGroupsClientWithBaseURI(baseURI string, subscriptionID string) ProximityPlacementGroupsClient { return ProximityPlacementGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -103,8 +105,7 @@ func (client ProximityPlacementGroupsClient) CreateOrUpdatePreparer(ctx context. // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ProximityPlacementGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -112,7 +113,6 @@ func (client ProximityPlacementGroupsClient) CreateOrUpdateSender(req *http.Requ func (client ProximityPlacementGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result ProximityPlacementGroup, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -180,8 +180,7 @@ func (client ProximityPlacementGroupsClient) DeletePreparer(ctx context.Context, // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ProximityPlacementGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // DeleteResponder handles the response to the Delete request. The method always @@ -189,7 +188,6 @@ func (client ProximityPlacementGroupsClient) DeleteSender(req *http.Request) (*h func (client ProximityPlacementGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) result.Response = resp @@ -200,7 +198,9 @@ func (client ProximityPlacementGroupsClient) DeleteResponder(resp *http.Response // Parameters: // resourceGroupName - the name of the resource group. // proximityPlacementGroupName - the name of the proximity placement group. -func (client ProximityPlacementGroupsClient) Get(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string) (result ProximityPlacementGroup, err error) { +// includeColocationStatus - includeColocationStatus=true enables fetching the colocation status of all the +// resources in the proximity placement group. +func (client ProximityPlacementGroupsClient) Get(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, includeColocationStatus string) (result ProximityPlacementGroup, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ProximityPlacementGroupsClient.Get") defer func() { @@ -211,7 +211,7 @@ func (client ProximityPlacementGroupsClient) Get(ctx context.Context, resourceGr tracing.EndSpan(ctx, sc, err) }() } - req, err := client.GetPreparer(ctx, resourceGroupName, proximityPlacementGroupName) + req, err := client.GetPreparer(ctx, resourceGroupName, proximityPlacementGroupName, includeColocationStatus) if err != nil { err = autorest.NewErrorWithError(err, "compute.ProximityPlacementGroupsClient", "Get", nil, "Failure preparing request") return @@ -233,7 +233,7 @@ func (client ProximityPlacementGroupsClient) Get(ctx context.Context, resourceGr } // GetPreparer prepares the Get request. -func (client ProximityPlacementGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string) (*http.Request, error) { +func (client ProximityPlacementGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, includeColocationStatus string) (*http.Request, error) { pathParameters := map[string]interface{}{ "proximityPlacementGroupName": autorest.Encode("path", proximityPlacementGroupName), "resourceGroupName": autorest.Encode("path", resourceGroupName), @@ -244,6 +244,9 @@ func (client ProximityPlacementGroupsClient) GetPreparer(ctx context.Context, re queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if len(includeColocationStatus) > 0 { + queryParameters["includeColocationStatus"] = autorest.Encode("query", includeColocationStatus) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -256,8 +259,7 @@ func (client ProximityPlacementGroupsClient) GetPreparer(ctx context.Context, re // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ProximityPlacementGroupsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -265,7 +267,6 @@ func (client ProximityPlacementGroupsClient) GetSender(req *http.Request) (*http func (client ProximityPlacementGroupsClient) GetResponder(resp *http.Response) (result ProximityPlacementGroup, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -332,8 +333,7 @@ func (client ProximityPlacementGroupsClient) ListByResourceGroupPreparer(ctx con // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ProximityPlacementGroupsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -341,7 +341,6 @@ func (client ProximityPlacementGroupsClient) ListByResourceGroupSender(req *http func (client ProximityPlacementGroupsClient) ListByResourceGroupResponder(resp *http.Response) (result ProximityPlacementGroupListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -442,8 +441,7 @@ func (client ProximityPlacementGroupsClient) ListBySubscriptionPreparer(ctx cont // ListBySubscriptionSender sends the ListBySubscription request. The method will close the // http.Response Body if it receives an error. func (client ProximityPlacementGroupsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always @@ -451,7 +449,6 @@ func (client ProximityPlacementGroupsClient) ListBySubscriptionSender(req *http. func (client ProximityPlacementGroupsClient) ListBySubscriptionResponder(resp *http.Response) (result ProximityPlacementGroupListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -559,8 +556,7 @@ func (client ProximityPlacementGroupsClient) UpdatePreparer(ctx context.Context, // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client ProximityPlacementGroupsClient) UpdateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // UpdateResponder handles the response to the Update request. The method always @@ -568,7 +564,6 @@ func (client ProximityPlacementGroupsClient) UpdateSender(req *http.Request) (*h func (client ProximityPlacementGroupsClient) UpdateResponder(resp *http.Response) (result ProximityPlacementGroup, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/resourceskus.go index adb402788c87..f98885df73f9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/resourceskus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/resourceskus.go @@ -35,14 +35,15 @@ func NewResourceSkusClient(subscriptionID string) ResourceSkusClient { return NewResourceSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client. +// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient { return ResourceSkusClient{NewWithBaseURI(baseURI, subscriptionID)} } // List gets the list of Microsoft.Compute SKUs available for your Subscription. // Parameters: -// filter - the filter to apply on the operation. +// filter - the filter to apply on the operation. Only **location** filter is supported currently. func (client ResourceSkusClient) List(ctx context.Context, filter string) (result ResourceSkusResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ResourceSkusClient.List") @@ -101,8 +102,7 @@ func (client ResourceSkusClient) ListPreparer(ctx context.Context, filter string // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -110,7 +110,6 @@ func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, func (client ResourceSkusClient) ListResponder(resp *http.Response) (result ResourceSkusResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/snapshots.go index 3a52fe87388b..32971eba3448 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/snapshots.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/snapshots.go @@ -36,7 +36,8 @@ func NewSnapshotsClient(subscriptionID string) SnapshotsClient { return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient client. +// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient { return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -113,9 +114,8 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resour // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (future SnapshotsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -128,7 +128,6 @@ func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (future Sn func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -191,9 +190,8 @@ func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupN // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) DeleteSender(req *http.Request) (future SnapshotsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -206,7 +204,6 @@ func (client SnapshotsClient) DeleteSender(req *http.Request) (future SnapshotsD func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -274,8 +271,7 @@ func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -283,7 +279,6 @@ func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, erro func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -355,9 +350,8 @@ func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceG // GrantAccessSender sends the GrantAccess request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) GrantAccessSender(req *http.Request) (future SnapshotsGrantAccessFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -370,7 +364,6 @@ func (client SnapshotsClient) GrantAccessSender(req *http.Request) (future Snaps func (client SnapshotsClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -434,8 +427,7 @@ func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -443,7 +435,6 @@ func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, err func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -547,8 +538,7 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, r // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -556,7 +546,6 @@ func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*htt func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotList, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -656,9 +645,8 @@ func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resource // RevokeAccessSender sends the RevokeAccess request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (future SnapshotsRevokeAccessFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -671,7 +659,6 @@ func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (future Snap func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -736,9 +723,8 @@ func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupN // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) UpdateSender(req *http.Request) (future SnapshotsUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -751,7 +737,6 @@ func (client SnapshotsClient) UpdateSender(req *http.Request) (future SnapshotsU func (client SnapshotsClient) UpdateResponder(resp *http.Response) (result Snapshot, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/usage.go index 690136c06d54..4b32b6c5030f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/usage.go @@ -36,7 +36,8 @@ func NewUsageClient(subscriptionID string) UsageClient { return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewUsageClientWithBaseURI creates an instance of the UsageClient client. +// NewUsageClientWithBaseURI creates an instance of the UsageClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -107,8 +108,7 @@ func (client UsageClient) ListPreparer(ctx context.Context, location string) (*h // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -116,7 +116,6 @@ func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) func (client UsageClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/version.go index 7ad07e01584a..cbadbc9459f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/version.go @@ -21,7 +21,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " compute/2019-07-01" + return "Azure-SDK-For-Go/" + Version() + " compute/2019-07-01" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensionimages.go index 7a670a607c82..ccc4fcd6a3d0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensionimages.go @@ -36,7 +36,8 @@ func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachin } // NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of the VirtualMachineExtensionImagesClient -// client. +// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI +// (sovereign clouds, Azure stack). func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient { return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -102,8 +103,7 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(ctx context.Contex // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -111,7 +111,6 @@ func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) ( func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Response) (result VirtualMachineExtensionImage, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -178,8 +177,7 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(ctx context. // ListTypesSender sends the ListTypes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListTypesResponder handles the response to the ListTypes request. The method always @@ -187,7 +185,6 @@ func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Requ func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) @@ -265,8 +262,7 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(ctx conte // ListVersionsSender sends the ListVersions request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListVersionsResponder handles the response to the ListVersions request. The method always @@ -274,7 +270,6 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.R func (client VirtualMachineExtensionImagesClient) ListVersionsResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensions.go index a77180ddc688..e321691bd098 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineextensions.go @@ -35,7 +35,9 @@ func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExte return NewVirtualMachineExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the VirtualMachineExtensionsClient client. +// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the VirtualMachineExtensionsClient client using +// a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient { return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -99,9 +101,8 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context. // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineExtensionsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -114,7 +115,6 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Requ func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -178,9 +178,8 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context, // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineExtensionsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -193,7 +192,6 @@ func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (fu func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -266,8 +264,7 @@ func (client VirtualMachineExtensionsClient) GetPreparer(ctx context.Context, re // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -275,7 +272,6 @@ func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http func (client VirtualMachineExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineExtension, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -347,8 +343,7 @@ func (client VirtualMachineExtensionsClient) ListPreparer(ctx context.Context, r // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -356,7 +351,6 @@ func (client VirtualMachineExtensionsClient) ListSender(req *http.Request) (*htt func (client VirtualMachineExtensionsClient) ListResponder(resp *http.Response) (result VirtualMachineExtensionsListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -423,9 +417,8 @@ func (client VirtualMachineExtensionsClient) UpdatePreparer(ctx context.Context, // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) UpdateSender(req *http.Request) (future VirtualMachineExtensionsUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -438,7 +431,6 @@ func (client VirtualMachineExtensionsClient) UpdateSender(req *http.Request) (fu func (client VirtualMachineExtensionsClient) UpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineimages.go index e1e830c2f432..c924945fcd98 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineimages.go @@ -35,7 +35,9 @@ func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesCl return NewVirtualMachineImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineImagesClientWithBaseURI creates an instance of the VirtualMachineImagesClient client. +// NewVirtualMachineImagesClientWithBaseURI creates an instance of the VirtualMachineImagesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient { return VirtualMachineImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -106,8 +108,7 @@ func (client VirtualMachineImagesClient) GetPreparer(ctx context.Context, locati // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -115,7 +116,6 @@ func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Res func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (result VirtualMachineImage, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -129,8 +129,8 @@ func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (resu // publisherName - a valid image publisher. // offer - a valid image publisher offer. // skus - a valid image SKU. -// filter - the filter to apply on the operation. -func (client VirtualMachineImagesClient) List(ctx context.Context, location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) { +// expand - the expand expression to apply on the operation. +func (client VirtualMachineImagesClient) List(ctx context.Context, location string, publisherName string, offer string, skus string, expand string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineImagesClient.List") defer func() { @@ -141,7 +141,7 @@ func (client VirtualMachineImagesClient) List(ctx context.Context, location stri tracing.EndSpan(ctx, sc, err) }() } - req, err := client.ListPreparer(ctx, location, publisherName, offer, skus, filter, top, orderby) + req, err := client.ListPreparer(ctx, location, publisherName, offer, skus, expand, top, orderby) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", nil, "Failure preparing request") return @@ -163,7 +163,7 @@ func (client VirtualMachineImagesClient) List(ctx context.Context, location stri } // ListPreparer prepares the List request. -func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (*http.Request, error) { +func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, location string, publisherName string, offer string, skus string, expand string, top *int32, orderby string) (*http.Request, error) { pathParameters := map[string]interface{}{ "location": autorest.Encode("path", location), "offer": autorest.Encode("path", offer), @@ -176,8 +176,8 @@ func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, locat queryParameters := map[string]interface{}{ "api-version": APIVersion, } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) } if top != nil { queryParameters["$top"] = autorest.Encode("query", *top) @@ -197,8 +197,7 @@ func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, locat // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -206,7 +205,6 @@ func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Re func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) @@ -274,8 +272,7 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(ctx context.Context, // ListOffersSender sends the ListOffers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListOffersResponder handles the response to the ListOffers request. The method always @@ -283,7 +280,6 @@ func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*h func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) @@ -349,8 +345,7 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(ctx context.Cont // ListPublishersSender sends the ListPublishers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListPublishersResponder handles the response to the ListPublishers request. The method always @@ -358,7 +353,6 @@ func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) @@ -428,8 +422,7 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(ctx context.Context, l // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListSkusResponder handles the response to the ListSkus request. The method always @@ -437,7 +430,6 @@ func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*htt func (client VirtualMachineImagesClient) ListSkusResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result.Value), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineruncommands.go index 82be43acfd4c..db043dcb708e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachineruncommands.go @@ -36,7 +36,9 @@ func NewVirtualMachineRunCommandsClient(subscriptionID string) VirtualMachineRun return NewVirtualMachineRunCommandsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineRunCommandsClientWithBaseURI creates an instance of the VirtualMachineRunCommandsClient client. +// NewVirtualMachineRunCommandsClientWithBaseURI creates an instance of the VirtualMachineRunCommandsClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). func NewVirtualMachineRunCommandsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineRunCommandsClient { return VirtualMachineRunCommandsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -107,8 +109,7 @@ func (client VirtualMachineRunCommandsClient) GetPreparer(ctx context.Context, l // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineRunCommandsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -116,7 +117,6 @@ func (client VirtualMachineRunCommandsClient) GetSender(req *http.Request) (*htt func (client VirtualMachineRunCommandsClient) GetResponder(resp *http.Response) (result RunCommandDocument, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -189,8 +189,7 @@ func (client VirtualMachineRunCommandsClient) ListPreparer(ctx context.Context, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineRunCommandsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -198,7 +197,6 @@ func (client VirtualMachineRunCommandsClient) ListSender(req *http.Request) (*ht func (client VirtualMachineRunCommandsClient) ListResponder(resp *http.Response) (result RunCommandListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachines.go index b329db181d98..f29f263fd050 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachines.go @@ -36,7 +36,8 @@ func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient { return NewVirtualMachinesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachinesClientWithBaseURI creates an instance of the VirtualMachinesClient client. +// NewVirtualMachinesClientWithBaseURI creates an instance of the VirtualMachinesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient { return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -107,9 +108,8 @@ func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourc // CaptureSender sends the Capture request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) CaptureSender(req *http.Request) (future VirtualMachinesCaptureFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -122,7 +122,6 @@ func (client VirtualMachinesClient) CaptureSender(req *http.Request) (future Vir func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result VirtualMachineCaptureResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -185,9 +184,8 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Co // ConvertToManagedDisksSender sends the ConvertToManagedDisks request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Request) (future VirtualMachinesConvertToManagedDisksFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -200,14 +198,14 @@ func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Reques func (client VirtualMachinesClient) ConvertToManagedDisksResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp return } -// CreateOrUpdate the operation to create or update a virtual machine. +// CreateOrUpdate the operation to create or update a virtual machine. Please note some properties can be set only +// during virtual machine creation. // Parameters: // resourceGroupName - the name of the resource group. // VMName - the name of the virtual machine. @@ -286,9 +284,8 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context, // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachinesCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -301,7 +298,6 @@ func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (fut func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachine, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -364,9 +360,8 @@ func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, reso // DeallocateSender sends the Deallocate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (future VirtualMachinesDeallocateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -379,7 +374,6 @@ func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (future func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -440,9 +434,8 @@ func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resource // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) DeleteSender(req *http.Request) (future VirtualMachinesDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -455,14 +448,17 @@ func (client VirtualMachinesClient) DeleteSender(req *http.Request) (future Virt func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } -// Generalize sets the state of the virtual machine to generalized. +// Generalize sets the OS state of the virtual machine to generalized. It is recommended to sysprep the virtual machine +// before performing this operation.
    For Windows, please refer to [Create a managed image of a generalized VM in +// Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/capture-image-resource).
    For Linux, please +// refer to [How to create an image of a virtual machine or +// VHD](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image). // Parameters: // resourceGroupName - the name of the resource group. // VMName - the name of the virtual machine. @@ -522,8 +518,7 @@ func (client VirtualMachinesClient) GeneralizePreparer(ctx context.Context, reso // GeneralizeSender sends the Generalize request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GeneralizeResponder handles the response to the Generalize request. The method always @@ -531,7 +526,6 @@ func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.R func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) result.Response = resp @@ -602,8 +596,7 @@ func (client VirtualMachinesClient) GetPreparer(ctx context.Context, resourceGro // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -611,7 +604,6 @@ func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result VirtualMachine, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -679,8 +671,7 @@ func (client VirtualMachinesClient) InstanceViewPreparer(ctx context.Context, re // InstanceViewSender sends the InstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) InstanceViewSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // InstanceViewResponder handles the response to the InstanceView request. The method always @@ -688,7 +679,6 @@ func (client VirtualMachinesClient) InstanceViewSender(req *http.Request) (*http func (client VirtualMachinesClient) InstanceViewResponder(resp *http.Response) (result VirtualMachineInstanceView, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -756,8 +746,7 @@ func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGr // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -765,7 +754,6 @@ func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Respons func (client VirtualMachinesClient) ListResponder(resp *http.Response) (result VirtualMachineListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -872,8 +860,7 @@ func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context, statusO // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -881,7 +868,6 @@ func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Resp func (client VirtualMachinesClient) ListAllResponder(resp *http.Response) (result VirtualMachineListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -986,8 +972,7 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(ctx context.Conte // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always @@ -995,7 +980,6 @@ func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -1068,8 +1052,7 @@ func (client VirtualMachinesClient) ListByLocationPreparer(ctx context.Context, // ListByLocationSender sends the ListByLocation request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListByLocationSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByLocationResponder handles the response to the ListByLocation request. The method always @@ -1077,7 +1060,6 @@ func (client VirtualMachinesClient) ListByLocationSender(req *http.Request) (*ht func (client VirtualMachinesClient) ListByLocationResponder(resp *http.Response) (result VirtualMachineListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -1176,9 +1158,8 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Conte // PerformMaintenanceSender sends the PerformMaintenance request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachinesPerformMaintenanceFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1191,7 +1172,6 @@ func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) func (client VirtualMachinesClient) PerformMaintenanceResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1261,9 +1241,8 @@ func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resour // PowerOffSender sends the PowerOff request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (future VirtualMachinesPowerOffFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1276,7 +1255,6 @@ func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (future Vi func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1337,9 +1315,8 @@ func (client VirtualMachinesClient) ReapplyPreparer(ctx context.Context, resourc // ReapplySender sends the Reapply request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ReapplySender(req *http.Request) (future VirtualMachinesReapplyFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1352,7 +1329,6 @@ func (client VirtualMachinesClient) ReapplySender(req *http.Request) (future Vir func (client VirtualMachinesClient) ReapplyResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1413,9 +1389,8 @@ func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resour // RedeploySender sends the Redeploy request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) RedeploySender(req *http.Request) (future VirtualMachinesRedeployFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1428,7 +1403,6 @@ func (client VirtualMachinesClient) RedeploySender(req *http.Request) (future Vi func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1495,9 +1469,8 @@ func (client VirtualMachinesClient) ReimagePreparer(ctx context.Context, resourc // ReimageSender sends the Reimage request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ReimageSender(req *http.Request) (future VirtualMachinesReimageFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1510,7 +1483,6 @@ func (client VirtualMachinesClient) ReimageSender(req *http.Request) (future Vir func (client VirtualMachinesClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1571,9 +1543,8 @@ func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourc // RestartSender sends the Restart request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) RestartSender(req *http.Request) (future VirtualMachinesRestartFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1586,7 +1557,6 @@ func (client VirtualMachinesClient) RestartSender(req *http.Request) (future Vir func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1656,9 +1626,8 @@ func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, reso // RunCommandSender sends the RunCommand request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (future VirtualMachinesRunCommandFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1671,7 +1640,6 @@ func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (future func (client VirtualMachinesClient) RunCommandResponder(resp *http.Response) (result RunCommandResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -1733,9 +1701,8 @@ func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceG // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) StartSender(req *http.Request) (future VirtualMachinesStartFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1748,7 +1715,6 @@ func (client VirtualMachinesClient) StartSender(req *http.Request) (future Virtu func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1812,9 +1778,8 @@ func (client VirtualMachinesClient) UpdatePreparer(ctx context.Context, resource // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) UpdateSender(req *http.Request) (future VirtualMachinesUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1827,7 +1792,6 @@ func (client VirtualMachinesClient) UpdateSender(req *http.Request) (future Virt func (client VirtualMachinesClient) UpdateResponder(resp *http.Response) (result VirtualMachine, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetextensions.go index 7e4d6d0a6b70..eb10cab4a7c0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetextensions.go @@ -36,7 +36,8 @@ func NewVirtualMachineScaleSetExtensionsClient(subscriptionID string) VirtualMac } // NewVirtualMachineScaleSetExtensionsClientWithBaseURI creates an instance of the -// VirtualMachineScaleSetExtensionsClient client. +// VirtualMachineScaleSetExtensionsClient client using a custom endpoint. Use this when interacting with an Azure +// cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewVirtualMachineScaleSetExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetExtensionsClient { return VirtualMachineScaleSetExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -87,6 +88,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx "api-version": APIVersion, } + extensionParameters.Type = nil preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), @@ -100,9 +102,8 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -115,7 +116,6 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *h func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -179,9 +179,8 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context. // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetExtensionsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -194,7 +193,6 @@ func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Requ func (client VirtualMachineScaleSetExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -267,8 +265,7 @@ func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(ctx context.Con // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -276,7 +273,6 @@ func (client VirtualMachineScaleSetExtensionsClient) GetSender(req *http.Request func (client VirtualMachineScaleSetExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -345,8 +341,7 @@ func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(ctx context.Co // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -354,7 +349,6 @@ func (client VirtualMachineScaleSetExtensionsClient) ListSender(req *http.Reques func (client VirtualMachineScaleSetExtensionsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetExtensionListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -398,3 +392,85 @@ func (client VirtualMachineScaleSetExtensionsClient) ListComplete(ctx context.Co result.page, err = client.List(ctx, resourceGroupName, VMScaleSetName) return } + +// Update the operation to update an extension. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMScaleSetName - the name of the VM scale set where the extension should be updated. +// vmssExtensionName - the name of the VM scale set extension. +// extensionParameters - parameters supplied to the Update VM scale set Extension operation. +func (client VirtualMachineScaleSetExtensionsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate) (result VirtualMachineScaleSetExtensionsUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetExtensionsClient.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName, extensionParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client VirtualMachineScaleSetExtensionsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + "vmssExtensionName": autorest.Encode("path", vmssExtensionName), + } + + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + extensionParameters.Name = nil + extensionParameters.Type = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters), + autorest.WithJSON(extensionParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetExtensionsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetExtensionsUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetExtensionsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetrollingupgrades.go index c20c01d515cf..f94b61212d4e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetrollingupgrades.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetrollingupgrades.go @@ -37,7 +37,8 @@ func NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID string) Virtu } // NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI creates an instance of the -// VirtualMachineScaleSetRollingUpgradesClient client. +// VirtualMachineScaleSetRollingUpgradesClient client using a custom endpoint. Use this when interacting with an Azure +// cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetRollingUpgradesClient { return VirtualMachineScaleSetRollingUpgradesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -96,9 +97,8 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx con // CancelSender sends the Cancel request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesCancelFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -111,7 +111,6 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http func (client VirtualMachineScaleSetRollingUpgradesClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -178,8 +177,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(ctx // GetLatestSender sends the GetLatest request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetLatestResponder handles the response to the GetLatest request. The method always @@ -187,7 +185,6 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestSender(req *h func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestResponder(resp *http.Response) (result RollingUpgradeStatusInfo, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -251,9 +248,8 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeP // StartExtensionUpgradeSender sends the StartExtensionUpgrade request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -266,7 +262,6 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeS func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -328,9 +323,8 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer // StartOSUpgradeSender sends the StartOSUpgrade request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -343,7 +337,6 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(r func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesets.go index 54b1393fe57d..13de192137b8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesets.go @@ -36,7 +36,9 @@ func NewVirtualMachineScaleSetsClient(subscriptionID string) VirtualMachineScale return NewVirtualMachineScaleSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the VirtualMachineScaleSetsClient client. +// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the VirtualMachineScaleSetsClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetsClient { return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -87,20 +89,25 @@ func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupPrepare "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } + const APIVersion = "2019-07-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/convertToSinglePlacementGroup", pathParameters), - autorest.WithJSON(parameters)) + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ConvertToSinglePlacementGroupSender sends the ConvertToSinglePlacementGroup request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ConvertToSinglePlacementGroupResponder handles the response to the ConvertToSinglePlacementGroup request. The method always @@ -108,7 +115,6 @@ func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupSender( func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) result.Response = resp @@ -138,15 +144,15 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, - {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMinimum, Rule: 5, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMinimum, Rule: int64(5), Chain: nil}, }}, {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, - {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMinimum, Rule: 5, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMinimum, Rule: int64(5), Chain: nil}, }}, {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMaximum, Rule: int64(100), Chain: nil}, - {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, }}, }}, }}, @@ -195,9 +201,8 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.C // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -210,7 +215,6 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Reque func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -279,9 +283,8 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Conte // DeallocateSender sends the Deallocate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetsDeallocateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -294,7 +297,6 @@ func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -355,9 +357,8 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context, // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -370,7 +371,6 @@ func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (fut func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -440,9 +440,8 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context. // DeleteInstancesSender sends the DeleteInstances request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (future VirtualMachineScaleSetsDeleteInstancesFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -455,7 +454,6 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Requ func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -525,8 +523,7 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp // ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender sends the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ForceRecoveryServiceFabricPlatformUpdateDomainWalkResponder handles the response to the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. The method always @@ -534,7 +531,6 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkResponder(resp *http.Response) (result RecoveryWalkResponse, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -602,8 +598,7 @@ func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, res // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -611,7 +606,6 @@ func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http. func (client VirtualMachineScaleSetsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -679,8 +673,7 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(ctx context. // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -688,7 +681,6 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Requ func (client VirtualMachineScaleSetsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetInstanceView, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -757,8 +749,7 @@ func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryPreparer(ctx cont // GetOSUpgradeHistorySender sends the GetOSUpgradeHistory request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistorySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetOSUpgradeHistoryResponder handles the response to the GetOSUpgradeHistory request. The method always @@ -766,7 +757,6 @@ func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistorySender(req *http. func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryResponder(resp *http.Response) (result VirtualMachineScaleSetListOSUpgradeHistory, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -870,8 +860,7 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(ctx context.Context, re // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -879,7 +868,6 @@ func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http func (client VirtualMachineScaleSetsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -982,8 +970,7 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer(ctx context.Context) // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -991,7 +978,6 @@ func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*h func (client VirtualMachineScaleSetsClient) ListAllResponder(resp *http.Response) (result VirtualMachineScaleSetListWithLinkResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -1098,8 +1084,7 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(ctx context.Context // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListSkusResponder handles the response to the ListSkus request. The method always @@ -1107,7 +1092,6 @@ func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (* func (client VirtualMachineScaleSetsClient) ListSkusResponder(resp *http.Response) (result VirtualMachineScaleSetListSkusResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -1214,9 +1198,8 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenancePreparer(ctx conte // PerformMaintenanceSender sends the PerformMaintenance request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachineScaleSetsPerformMaintenanceFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1229,7 +1212,6 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenanceSender(req *http.R func (client VirtualMachineScaleSetsClient) PerformMaintenanceResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1305,9 +1287,8 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context // PowerOffSender sends the PowerOff request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetsPowerOffFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1320,7 +1301,6 @@ func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (f func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1388,9 +1368,8 @@ func (client VirtualMachineScaleSetsClient) RedeployPreparer(ctx context.Context // RedeploySender sends the Redeploy request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) RedeploySender(req *http.Request) (future VirtualMachineScaleSetsRedeployFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1403,7 +1382,6 @@ func (client VirtualMachineScaleSetsClient) RedeploySender(req *http.Request) (f func (client VirtualMachineScaleSetsClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1471,9 +1449,8 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, // ReimageSender sends the Reimage request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetsReimageFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1486,7 +1463,6 @@ func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (fu func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1554,9 +1530,8 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Conte // ReimageAllSender sends the ReimageAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetsReimageAllFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1569,7 +1544,6 @@ func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) func (client VirtualMachineScaleSetsClient) ReimageAllResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1636,9 +1610,8 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context, // RestartSender sends the Restart request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetsRestartFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1651,7 +1624,6 @@ func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (fu func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1718,9 +1690,8 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, r // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetsStartFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1733,7 +1704,6 @@ func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (futu func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1797,9 +1767,8 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context, // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetsUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1812,7 +1781,6 @@ func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (fut func (client VirtualMachineScaleSetsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -1883,9 +1851,8 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context. // UpdateInstancesSender sends the UpdateInstances request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (future VirtualMachineScaleSetsUpdateInstancesFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1898,7 +1865,6 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Requ func (client VirtualMachineScaleSetsClient) UpdateInstancesResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvmextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvmextensions.go index 3fe6bf51dd22..7b50a4704b19 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvmextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvmextensions.go @@ -37,7 +37,8 @@ func NewVirtualMachineScaleSetVMExtensionsClient(subscriptionID string) VirtualM } // NewVirtualMachineScaleSetVMExtensionsClientWithBaseURI creates an instance of the -// VirtualMachineScaleSetVMExtensionsClient client. +// VirtualMachineScaleSetVMExtensionsClient client using a custom endpoint. Use this when interacting with an Azure +// cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewVirtualMachineScaleSetVMExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMExtensionsClient { return VirtualMachineScaleSetVMExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -103,9 +104,8 @@ func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdatePreparer(ct // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetVMExtensionsCreateOrUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -118,7 +118,6 @@ func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdateSender(req func (client VirtualMachineScaleSetVMExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -184,9 +183,8 @@ func (client VirtualMachineScaleSetVMExtensionsClient) DeletePreparer(ctx contex // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetVMExtensionsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -199,7 +197,6 @@ func (client VirtualMachineScaleSetVMExtensionsClient) DeleteSender(req *http.Re func (client VirtualMachineScaleSetVMExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -274,8 +271,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) GetPreparer(ctx context.C // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -283,7 +279,6 @@ func (client VirtualMachineScaleSetVMExtensionsClient) GetSender(req *http.Reque func (client VirtualMachineScaleSetVMExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineExtension, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -357,8 +352,7 @@ func (client VirtualMachineScaleSetVMExtensionsClient) ListPreparer(ctx context. // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMExtensionsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -366,7 +360,6 @@ func (client VirtualMachineScaleSetVMExtensionsClient) ListSender(req *http.Requ func (client VirtualMachineScaleSetVMExtensionsClient) ListResponder(resp *http.Response) (result VirtualMachineExtensionsListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -435,9 +428,8 @@ func (client VirtualMachineScaleSetVMExtensionsClient) UpdatePreparer(ctx contex // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMExtensionsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetVMExtensionsUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -450,7 +442,6 @@ func (client VirtualMachineScaleSetVMExtensionsClient) UpdateSender(req *http.Re func (client VirtualMachineScaleSetVMExtensionsClient) UpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go index 155e6eee83df..b694197be1d9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinescalesetvms.go @@ -36,7 +36,9 @@ func NewVirtualMachineScaleSetVMsClient(subscriptionID string) VirtualMachineSca return NewVirtualMachineScaleSetVMsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the VirtualMachineScaleSetVMsClient client. +// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the VirtualMachineScaleSetVMsClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMsClient { return VirtualMachineScaleSetVMsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -99,9 +101,8 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Con // DeallocateSender sends the Deallocate request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetVMsDeallocateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -114,7 +115,6 @@ func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -177,9 +177,8 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetVMsDeleteFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -192,7 +191,6 @@ func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (f func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -265,8 +263,7 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -274,7 +271,6 @@ func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*htt func (client VirtualMachineScaleSetVMsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetVM, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -344,8 +340,7 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx contex // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -353,7 +348,6 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Re func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetVMInstanceView, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -365,9 +359,10 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *htt // Parameters: // resourceGroupName - the name of the resource group. // virtualMachineScaleSetName - the name of the VM scale set. -// filter - the filter to apply to the operation. -// selectParameter - the list parameters. -// expand - the expand expression to apply to the operation. +// filter - the filter to apply to the operation. Allowed values are 'startswith(instanceView/statuses/code, +// 'PowerState') eq true', 'properties/latestModelApplied eq true', 'properties/latestModelApplied eq false'. +// selectParameter - the list parameters. Allowed values are 'instanceView', 'instanceView/statuses'. +// expand - the expand expression to apply to the operation. Allowed values are 'instanceView'. func (client VirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/VirtualMachineScaleSetVMsClient.List") @@ -434,8 +429,7 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(ctx context.Context, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -443,7 +437,6 @@ func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*ht func (client VirtualMachineScaleSetVMsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetVMListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -544,9 +537,8 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenancePreparer(ctx con // PerformMaintenanceSender sends the PerformMaintenance request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachineScaleSetVMsPerformMaintenanceFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -559,7 +551,6 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceSender(req *http func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -631,9 +622,8 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Conte // PowerOffSender sends the PowerOff request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetVMsPowerOffFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -646,7 +636,6 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -710,9 +699,8 @@ func (client VirtualMachineScaleSetVMsClient) RedeployPreparer(ctx context.Conte // RedeploySender sends the Redeploy request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) RedeploySender(req *http.Request) (future VirtualMachineScaleSetVMsRedeployFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -725,7 +713,6 @@ func (client VirtualMachineScaleSetVMsClient) RedeploySender(req *http.Request) func (client VirtualMachineScaleSetVMsClient) RedeployResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -794,9 +781,8 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Contex // ReimageSender sends the Reimage request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -809,7 +795,6 @@ func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) ( func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -873,9 +858,8 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Con // ReimageAllSender sends the ReimageAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageAllFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -888,7 +872,6 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request func (client VirtualMachineScaleSetVMsClient) ReimageAllResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -951,9 +934,8 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Contex // RestartSender sends the Restart request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetVMsRestartFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -966,7 +948,6 @@ func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) ( func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1038,9 +1019,8 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandPreparer(ctx context.Con // RunCommandSender sends the RunCommand request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) RunCommandSender(req *http.Request) (future VirtualMachineScaleSetVMsRunCommandFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1053,7 +1033,6 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandSender(req *http.Request func (client VirtualMachineScaleSetVMsClient) RunCommandResponder(resp *http.Response) (result RunCommandResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -1117,9 +1096,8 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context, // StartSender sends the Start request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetVMsStartFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1132,7 +1110,6 @@ func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (fu func (client VirtualMachineScaleSetVMsClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByClosing()) result.Response = resp @@ -1223,9 +1200,8 @@ func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetVMsUpdateFuture, err error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) var resp *http.Response - resp, err = autorest.SendWithSender(client, req, sd...) + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } @@ -1238,7 +1214,6 @@ func (client VirtualMachineScaleSetVMsClient) UpdateSender(req *http.Request) (f func (client VirtualMachineScaleSetVMsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSetVM, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinesizes.go index 55335fe9a518..98abd86abd40 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute/virtualmachinesizes.go @@ -36,7 +36,9 @@ func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClie return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewVirtualMachineSizesClientWithBaseURI creates an instance of the VirtualMachineSizesClient client. +// NewVirtualMachineSizesClientWithBaseURI creates an instance of the VirtualMachineSizesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient { return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -106,8 +108,7 @@ func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, locati // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -115,7 +116,6 @@ func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Res func (client VirtualMachineSizesClient) ListResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go index fab7676fa8e3..3d9b6b95d241 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/applications.go @@ -37,7 +37,8 @@ func NewApplicationsClient(tenantID string) ApplicationsClient { return NewApplicationsClientWithBaseURI(DefaultBaseURI, tenantID) } -// NewApplicationsClientWithBaseURI creates an instance of the ApplicationsClient client. +// NewApplicationsClientWithBaseURI creates an instance of the ApplicationsClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewApplicationsClientWithBaseURI(baseURI string, tenantID string) ApplicationsClient { return ApplicationsClient{NewWithBaseURI(baseURI, tenantID)} } @@ -110,8 +111,7 @@ func (client ApplicationsClient) AddOwnerPreparer(ctx context.Context, applicati // AddOwnerSender sends the AddOwner request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) AddOwnerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // AddOwnerResponder handles the response to the AddOwner request. The method always @@ -119,7 +119,6 @@ func (client ApplicationsClient) AddOwnerSender(req *http.Request) (*http.Respon func (client ApplicationsClient) AddOwnerResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -191,8 +190,7 @@ func (client ApplicationsClient) CreatePreparer(ctx context.Context, parameters // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) CreateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateResponder handles the response to the Create request. The method always @@ -200,7 +198,6 @@ func (client ApplicationsClient) CreateSender(req *http.Request) (*http.Response func (client ApplicationsClient) CreateResponder(resp *http.Response) (result Application, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -266,8 +263,7 @@ func (client ApplicationsClient) DeletePreparer(ctx context.Context, application // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteResponder handles the response to the Delete request. The method always @@ -275,7 +271,6 @@ func (client ApplicationsClient) DeleteSender(req *http.Request) (*http.Response func (client ApplicationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -340,8 +335,7 @@ func (client ApplicationsClient) GetPreparer(ctx context.Context, applicationObj // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always @@ -349,7 +343,6 @@ func (client ApplicationsClient) GetSender(req *http.Request) (*http.Response, e func (client ApplicationsClient) GetResponder(resp *http.Response) (result Application, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -415,8 +408,7 @@ func (client ApplicationsClient) GetServicePrincipalsIDByAppIDPreparer(ctx conte // GetServicePrincipalsIDByAppIDSender sends the GetServicePrincipalsIDByAppID request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) GetServicePrincipalsIDByAppIDSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetServicePrincipalsIDByAppIDResponder handles the response to the GetServicePrincipalsIDByAppID request. The method always @@ -424,7 +416,6 @@ func (client ApplicationsClient) GetServicePrincipalsIDByAppIDSender(req *http.R func (client ApplicationsClient) GetServicePrincipalsIDByAppIDResponder(resp *http.Response) (result ServicePrincipalObjectResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -498,8 +489,7 @@ func (client ApplicationsClient) ListPreparer(ctx context.Context, filter string // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -507,7 +497,6 @@ func (client ApplicationsClient) ListSender(req *http.Request) (*http.Response, func (client ApplicationsClient) ListResponder(resp *http.Response) (result ApplicationListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -589,8 +578,7 @@ func (client ApplicationsClient) ListKeyCredentialsPreparer(ctx context.Context, // ListKeyCredentialsSender sends the ListKeyCredentials request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListKeyCredentialsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListKeyCredentialsResponder handles the response to the ListKeyCredentials request. The method always @@ -598,7 +586,6 @@ func (client ApplicationsClient) ListKeyCredentialsSender(req *http.Request) (*h func (client ApplicationsClient) ListKeyCredentialsResponder(resp *http.Response) (result KeyCredentialListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -664,8 +651,7 @@ func (client ApplicationsClient) ListNextPreparer(ctx context.Context, nextLink // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListNextSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListNextResponder handles the response to the ListNext request. The method always @@ -673,7 +659,6 @@ func (client ApplicationsClient) ListNextSender(req *http.Request) (*http.Respon func (client ApplicationsClient) ListNextResponder(resp *http.Response) (result ApplicationListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -740,8 +725,7 @@ func (client ApplicationsClient) ListOwnersPreparer(ctx context.Context, applica // ListOwnersSender sends the ListOwners request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListOwnersSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListOwnersResponder handles the response to the ListOwners request. The method always @@ -749,7 +733,6 @@ func (client ApplicationsClient) ListOwnersSender(req *http.Request) (*http.Resp func (client ApplicationsClient) ListOwnersResponder(resp *http.Response) (result DirectoryObjectListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -852,8 +835,7 @@ func (client ApplicationsClient) ListPasswordCredentialsPreparer(ctx context.Con // ListPasswordCredentialsSender sends the ListPasswordCredentials request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) ListPasswordCredentialsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListPasswordCredentialsResponder handles the response to the ListPasswordCredentials request. The method always @@ -861,7 +843,6 @@ func (client ApplicationsClient) ListPasswordCredentialsSender(req *http.Request func (client ApplicationsClient) ListPasswordCredentialsResponder(resp *http.Response) (result PasswordCredentialListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -930,8 +911,7 @@ func (client ApplicationsClient) PatchPreparer(ctx context.Context, applicationO // PatchSender sends the Patch request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) PatchSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // PatchResponder handles the response to the Patch request. The method always @@ -939,7 +919,6 @@ func (client ApplicationsClient) PatchSender(req *http.Request) (*http.Response, func (client ApplicationsClient) PatchResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -1006,8 +985,7 @@ func (client ApplicationsClient) RemoveOwnerPreparer(ctx context.Context, applic // RemoveOwnerSender sends the RemoveOwner request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) RemoveOwnerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RemoveOwnerResponder handles the response to the RemoveOwner request. The method always @@ -1015,7 +993,6 @@ func (client ApplicationsClient) RemoveOwnerSender(req *http.Request) (*http.Res func (client ApplicationsClient) RemoveOwnerResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -1083,8 +1060,7 @@ func (client ApplicationsClient) UpdateKeyCredentialsPreparer(ctx context.Contex // UpdateKeyCredentialsSender sends the UpdateKeyCredentials request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) UpdateKeyCredentialsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateKeyCredentialsResponder handles the response to the UpdateKeyCredentials request. The method always @@ -1092,7 +1068,6 @@ func (client ApplicationsClient) UpdateKeyCredentialsSender(req *http.Request) ( func (client ApplicationsClient) UpdateKeyCredentialsResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -1160,8 +1135,7 @@ func (client ApplicationsClient) UpdatePasswordCredentialsPreparer(ctx context.C // UpdatePasswordCredentialsSender sends the UpdatePasswordCredentials request. The method will close the // http.Response Body if it receives an error. func (client ApplicationsClient) UpdatePasswordCredentialsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdatePasswordCredentialsResponder handles the response to the UpdatePasswordCredentials request. The method always @@ -1169,7 +1143,6 @@ func (client ApplicationsClient) UpdatePasswordCredentialsSender(req *http.Reque func (client ApplicationsClient) UpdatePasswordCredentialsResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/client.go index 6f46fe18bf90..e22971a0fc5d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/client.go @@ -41,7 +41,8 @@ func New(tenantID string) BaseClient { return NewWithBaseURI(DefaultBaseURI, tenantID) } -// NewWithBaseURI creates an instance of the BaseClient client. +// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with +// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewWithBaseURI(baseURI string, tenantID string) BaseClient { return BaseClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go index fe3b2da5dbc3..e2c146d6a659 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/deletedapplications.go @@ -36,7 +36,9 @@ func NewDeletedApplicationsClient(tenantID string) DeletedApplicationsClient { return NewDeletedApplicationsClientWithBaseURI(DefaultBaseURI, tenantID) } -// NewDeletedApplicationsClientWithBaseURI creates an instance of the DeletedApplicationsClient client. +// NewDeletedApplicationsClientWithBaseURI creates an instance of the DeletedApplicationsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewDeletedApplicationsClientWithBaseURI(baseURI string, tenantID string) DeletedApplicationsClient { return DeletedApplicationsClient{NewWithBaseURI(baseURI, tenantID)} } @@ -99,8 +101,7 @@ func (client DeletedApplicationsClient) HardDeletePreparer(ctx context.Context, // HardDeleteSender sends the HardDelete request. The method will close the // http.Response Body if it receives an error. func (client DeletedApplicationsClient) HardDeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // HardDeleteResponder handles the response to the HardDelete request. The method always @@ -108,7 +109,6 @@ func (client DeletedApplicationsClient) HardDeleteSender(req *http.Request) (*ht func (client DeletedApplicationsClient) HardDeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -181,8 +181,7 @@ func (client DeletedApplicationsClient) ListPreparer(ctx context.Context, filter // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DeletedApplicationsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -190,7 +189,6 @@ func (client DeletedApplicationsClient) ListSender(req *http.Request) (*http.Res func (client DeletedApplicationsClient) ListResponder(resp *http.Response) (result ApplicationListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -272,8 +270,7 @@ func (client DeletedApplicationsClient) ListNextPreparer(ctx context.Context, ne // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client DeletedApplicationsClient) ListNextSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListNextResponder handles the response to the ListNext request. The method always @@ -281,7 +278,6 @@ func (client DeletedApplicationsClient) ListNextSender(req *http.Request) (*http func (client DeletedApplicationsClient) ListNextResponder(resp *http.Response) (result ApplicationListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -347,8 +343,7 @@ func (client DeletedApplicationsClient) RestorePreparer(ctx context.Context, obj // RestoreSender sends the Restore request. The method will close the // http.Response Body if it receives an error. func (client DeletedApplicationsClient) RestoreSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RestoreResponder handles the response to the Restore request. The method always @@ -356,7 +351,6 @@ func (client DeletedApplicationsClient) RestoreSender(req *http.Request) (*http. func (client DeletedApplicationsClient) RestoreResponder(resp *http.Response) (result Application, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go index 93c6cac1e063..ace3959ac60f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/domains.go @@ -35,7 +35,8 @@ func NewDomainsClient(tenantID string) DomainsClient { return NewDomainsClientWithBaseURI(DefaultBaseURI, tenantID) } -// NewDomainsClientWithBaseURI creates an instance of the DomainsClient client. +// NewDomainsClientWithBaseURI creates an instance of the DomainsClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewDomainsClientWithBaseURI(baseURI string, tenantID string) DomainsClient { return DomainsClient{NewWithBaseURI(baseURI, tenantID)} } @@ -98,8 +99,7 @@ func (client DomainsClient) GetPreparer(ctx context.Context, domainName string) // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always @@ -107,7 +107,6 @@ func (client DomainsClient) GetSender(req *http.Request) (*http.Response, error) func (client DomainsClient) GetResponder(resp *http.Response) (result Domain, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -175,8 +174,7 @@ func (client DomainsClient) ListPreparer(ctx context.Context, filter string) (*h // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DomainsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -184,7 +182,6 @@ func (client DomainsClient) ListSender(req *http.Request) (*http.Response, error func (client DomainsClient) ListResponder(resp *http.Response) (result DomainListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go index 4ed37e94f0a8..cabe5700a08a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/groups.go @@ -37,7 +37,8 @@ func NewGroupsClient(tenantID string) GroupsClient { return NewGroupsClientWithBaseURI(DefaultBaseURI, tenantID) } -// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client. +// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewGroupsClientWithBaseURI(baseURI string, tenantID string) GroupsClient { return GroupsClient{NewWithBaseURI(baseURI, tenantID)} } @@ -110,8 +111,7 @@ func (client GroupsClient) AddMemberPreparer(ctx context.Context, groupObjectID // AddMemberSender sends the AddMember request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) AddMemberSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // AddMemberResponder handles the response to the AddMember request. The method always @@ -119,7 +119,6 @@ func (client GroupsClient) AddMemberSender(req *http.Request) (*http.Response, e func (client GroupsClient) AddMemberResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -194,8 +193,7 @@ func (client GroupsClient) AddOwnerPreparer(ctx context.Context, objectID string // AddOwnerSender sends the AddOwner request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) AddOwnerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // AddOwnerResponder handles the response to the AddOwner request. The method always @@ -203,7 +201,6 @@ func (client GroupsClient) AddOwnerSender(req *http.Request) (*http.Response, er func (client GroupsClient) AddOwnerResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -278,8 +275,7 @@ func (client GroupsClient) CreatePreparer(ctx context.Context, parameters GroupC // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) CreateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateResponder handles the response to the Create request. The method always @@ -287,7 +283,6 @@ func (client GroupsClient) CreateSender(req *http.Request) (*http.Response, erro func (client GroupsClient) CreateResponder(resp *http.Response) (result ADGroup, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -353,8 +348,7 @@ func (client GroupsClient) DeletePreparer(ctx context.Context, objectID string) // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteResponder handles the response to the Delete request. The method always @@ -362,7 +356,6 @@ func (client GroupsClient) DeleteSender(req *http.Request) (*http.Response, erro func (client GroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -427,8 +420,7 @@ func (client GroupsClient) GetPreparer(ctx context.Context, objectID string) (*h // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always @@ -436,7 +428,6 @@ func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) func (client GroupsClient) GetResponder(resp *http.Response) (result ADGroup, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -508,8 +499,7 @@ func (client GroupsClient) GetGroupMembersPreparer(ctx context.Context, objectID // GetGroupMembersSender sends the GetGroupMembers request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) GetGroupMembersSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetGroupMembersResponder handles the response to the GetGroupMembers request. The method always @@ -517,7 +507,6 @@ func (client GroupsClient) GetGroupMembersSender(req *http.Request) (*http.Respo func (client GroupsClient) GetGroupMembersResponder(resp *http.Response) (result DirectoryObjectListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -599,8 +588,7 @@ func (client GroupsClient) GetGroupMembersNextPreparer(ctx context.Context, next // GetGroupMembersNextSender sends the GetGroupMembersNext request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) GetGroupMembersNextSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetGroupMembersNextResponder handles the response to the GetGroupMembersNext request. The method always @@ -608,7 +596,6 @@ func (client GroupsClient) GetGroupMembersNextSender(req *http.Request) (*http.R func (client GroupsClient) GetGroupMembersNextResponder(resp *http.Response) (result DirectoryObjectListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -683,8 +670,7 @@ func (client GroupsClient) GetMemberGroupsPreparer(ctx context.Context, objectID // GetMemberGroupsSender sends the GetMemberGroups request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) GetMemberGroupsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetMemberGroupsResponder handles the response to the GetMemberGroups request. The method always @@ -692,7 +678,6 @@ func (client GroupsClient) GetMemberGroupsSender(req *http.Request) (*http.Respo func (client GroupsClient) GetMemberGroupsResponder(resp *http.Response) (result GroupGetMemberGroupsResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -767,8 +752,7 @@ func (client GroupsClient) IsMemberOfPreparer(ctx context.Context, parameters Ch // IsMemberOfSender sends the IsMemberOf request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) IsMemberOfSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // IsMemberOfResponder handles the response to the IsMemberOf request. The method always @@ -776,7 +760,6 @@ func (client GroupsClient) IsMemberOfSender(req *http.Request) (*http.Response, func (client GroupsClient) IsMemberOfResponder(resp *http.Response) (result CheckGroupMembershipResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -850,8 +833,7 @@ func (client GroupsClient) ListPreparer(ctx context.Context, filter string) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -859,7 +841,6 @@ func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) func (client GroupsClient) ListResponder(resp *http.Response) (result GroupListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -941,8 +922,7 @@ func (client GroupsClient) ListNextPreparer(ctx context.Context, nextLink string // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) ListNextSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListNextResponder handles the response to the ListNext request. The method always @@ -950,7 +930,6 @@ func (client GroupsClient) ListNextSender(req *http.Request) (*http.Response, er func (client GroupsClient) ListNextResponder(resp *http.Response) (result GroupListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -1017,8 +996,7 @@ func (client GroupsClient) ListOwnersPreparer(ctx context.Context, objectID stri // ListOwnersSender sends the ListOwners request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) ListOwnersSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListOwnersResponder handles the response to the ListOwners request. The method always @@ -1026,7 +1004,6 @@ func (client GroupsClient) ListOwnersSender(req *http.Request) (*http.Response, func (client GroupsClient) ListOwnersResponder(resp *http.Response) (result DirectoryObjectListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -1131,8 +1108,7 @@ func (client GroupsClient) RemoveMemberPreparer(ctx context.Context, groupObject // RemoveMemberSender sends the RemoveMember request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) RemoveMemberSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RemoveMemberResponder handles the response to the RemoveMember request. The method always @@ -1140,7 +1116,6 @@ func (client GroupsClient) RemoveMemberSender(req *http.Request) (*http.Response func (client GroupsClient) RemoveMemberResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -1207,8 +1182,7 @@ func (client GroupsClient) RemoveOwnerPreparer(ctx context.Context, objectID str // RemoveOwnerSender sends the RemoveOwner request. The method will close the // http.Response Body if it receives an error. func (client GroupsClient) RemoveOwnerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RemoveOwnerResponder handles the response to the RemoveOwner request. The method always @@ -1216,7 +1190,6 @@ func (client GroupsClient) RemoveOwnerSender(req *http.Request) (*http.Response, func (client GroupsClient) RemoveOwnerResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go index e6b583d96f14..73881629af3a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/models.go @@ -45,6 +45,23 @@ func PossibleConsentTypeValues() []ConsentType { return []ConsentType{AllPrincipals, Principal} } +// GroupMembershipClaimTypes enumerates the values for group membership claim types. +type GroupMembershipClaimTypes string + +const ( + // All ... + All GroupMembershipClaimTypes = "All" + // None ... + None GroupMembershipClaimTypes = "None" + // SecurityGroup ... + SecurityGroup GroupMembershipClaimTypes = "SecurityGroup" +) + +// PossibleGroupMembershipClaimTypesValues returns an array of possible values for the GroupMembershipClaimTypes const type. +func PossibleGroupMembershipClaimTypesValues() []GroupMembershipClaimTypes { + return []GroupMembershipClaimTypes{All, None, SecurityGroup} +} + // ObjectType enumerates the values for object type. type ObjectType string @@ -338,8 +355,8 @@ type Application struct { DisplayName *string `json:"displayName,omitempty"` // ErrorURL - A URL provided by the author of the application to report errors when using the application. ErrorURL *string `json:"errorUrl,omitempty"` - // GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. - GroupMembershipClaims interface{} `json:"groupMembershipClaims,omitempty"` + // GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. Possible values include: 'None', 'SecurityGroup', 'All' + GroupMembershipClaims GroupMembershipClaimTypes `json:"groupMembershipClaims,omitempty"` // Homepage - The home page of the application. Homepage *string `json:"homepage,omitempty"` // IdentifierUris - A collection of URIs for the application. @@ -424,7 +441,7 @@ func (a Application) MarshalJSON() ([]byte, error) { if a.ErrorURL != nil { objectMap["errorUrl"] = a.ErrorURL } - if a.GroupMembershipClaims != nil { + if a.GroupMembershipClaims != "" { objectMap["groupMembershipClaims"] = a.GroupMembershipClaims } if a.Homepage != nil { @@ -624,7 +641,7 @@ func (a *Application) UnmarshalJSON(body []byte) error { } case "groupMembershipClaims": if v != nil { - var groupMembershipClaims interface{} + var groupMembershipClaims GroupMembershipClaimTypes err = json.Unmarshal(*v, &groupMembershipClaims) if err != nil { return err @@ -890,8 +907,8 @@ type ApplicationBase struct { AvailableToOtherTenants *bool `json:"availableToOtherTenants,omitempty"` // ErrorURL - A URL provided by the author of the application to report errors when using the application. ErrorURL *string `json:"errorUrl,omitempty"` - // GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. - GroupMembershipClaims interface{} `json:"groupMembershipClaims,omitempty"` + // GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. Possible values include: 'None', 'SecurityGroup', 'All' + GroupMembershipClaims GroupMembershipClaimTypes `json:"groupMembershipClaims,omitempty"` // Homepage - The home page of the application. Homepage *string `json:"homepage,omitempty"` // InformationalUrls - URLs with more information about the application. @@ -955,8 +972,8 @@ type ApplicationCreateParameters struct { AvailableToOtherTenants *bool `json:"availableToOtherTenants,omitempty"` // ErrorURL - A URL provided by the author of the application to report errors when using the application. ErrorURL *string `json:"errorUrl,omitempty"` - // GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. - GroupMembershipClaims interface{} `json:"groupMembershipClaims,omitempty"` + // GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. Possible values include: 'None', 'SecurityGroup', 'All' + GroupMembershipClaims GroupMembershipClaimTypes `json:"groupMembershipClaims,omitempty"` // Homepage - The home page of the application. Homepage *string `json:"homepage,omitempty"` // InformationalUrls - URLs with more information about the application. @@ -1154,8 +1171,8 @@ type ApplicationUpdateParameters struct { AvailableToOtherTenants *bool `json:"availableToOtherTenants,omitempty"` // ErrorURL - A URL provided by the author of the application to report errors when using the application. ErrorURL *string `json:"errorUrl,omitempty"` - // GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. - GroupMembershipClaims interface{} `json:"groupMembershipClaims,omitempty"` + // GroupMembershipClaims - Configures the groups claim issued in a user or OAuth 2.0 access token that the app expects. Possible values include: 'None', 'SecurityGroup', 'All' + GroupMembershipClaims GroupMembershipClaimTypes `json:"groupMembershipClaims,omitempty"` // Homepage - The home page of the application. Homepage *string `json:"homepage,omitempty"` // InformationalUrls - URLs with more information about the application. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go index 04ddbb3e89c4..937987a58781 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/oauth2permissiongrant.go @@ -36,7 +36,9 @@ func NewOAuth2PermissionGrantClient(tenantID string) OAuth2PermissionGrantClient return NewOAuth2PermissionGrantClientWithBaseURI(DefaultBaseURI, tenantID) } -// NewOAuth2PermissionGrantClientWithBaseURI creates an instance of the OAuth2PermissionGrantClient client. +// NewOAuth2PermissionGrantClientWithBaseURI creates an instance of the OAuth2PermissionGrantClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). func NewOAuth2PermissionGrantClientWithBaseURI(baseURI string, tenantID string) OAuth2PermissionGrantClient { return OAuth2PermissionGrantClient{NewWithBaseURI(baseURI, tenantID)} } @@ -103,8 +105,7 @@ func (client OAuth2PermissionGrantClient) CreatePreparer(ctx context.Context, bo // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client OAuth2PermissionGrantClient) CreateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateResponder handles the response to the Create request. The method always @@ -112,7 +113,6 @@ func (client OAuth2PermissionGrantClient) CreateSender(req *http.Request) (*http func (client OAuth2PermissionGrantClient) CreateResponder(resp *http.Response) (result OAuth2PermissionGrant, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -178,8 +178,7 @@ func (client OAuth2PermissionGrantClient) DeletePreparer(ctx context.Context, ob // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client OAuth2PermissionGrantClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteResponder handles the response to the Delete request. The method always @@ -187,7 +186,6 @@ func (client OAuth2PermissionGrantClient) DeleteSender(req *http.Request) (*http func (client OAuth2PermissionGrantClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -260,8 +258,7 @@ func (client OAuth2PermissionGrantClient) ListPreparer(ctx context.Context, filt // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client OAuth2PermissionGrantClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -269,7 +266,6 @@ func (client OAuth2PermissionGrantClient) ListSender(req *http.Request) (*http.R func (client OAuth2PermissionGrantClient) ListResponder(resp *http.Response) (result OAuth2PermissionGrantListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -351,8 +347,7 @@ func (client OAuth2PermissionGrantClient) ListNextPreparer(ctx context.Context, // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client OAuth2PermissionGrantClient) ListNextSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListNextResponder handles the response to the ListNext request. The method always @@ -360,7 +355,6 @@ func (client OAuth2PermissionGrantClient) ListNextSender(req *http.Request) (*ht func (client OAuth2PermissionGrantClient) ListNextResponder(resp *http.Response) (result OAuth2PermissionGrantListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go index 3f6ca5c4bfc1..266c4155350c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/objects.go @@ -36,7 +36,8 @@ func NewObjectsClient(tenantID string) ObjectsClient { return NewObjectsClientWithBaseURI(DefaultBaseURI, tenantID) } -// NewObjectsClientWithBaseURI creates an instance of the ObjectsClient client. +// NewObjectsClientWithBaseURI creates an instance of the ObjectsClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewObjectsClientWithBaseURI(baseURI string, tenantID string) ObjectsClient { return ObjectsClient{NewWithBaseURI(baseURI, tenantID)} } @@ -107,8 +108,7 @@ func (client ObjectsClient) GetObjectsByObjectIdsPreparer(ctx context.Context, p // GetObjectsByObjectIdsSender sends the GetObjectsByObjectIds request. The method will close the // http.Response Body if it receives an error. func (client ObjectsClient) GetObjectsByObjectIdsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetObjectsByObjectIdsResponder handles the response to the GetObjectsByObjectIds request. The method always @@ -116,7 +116,6 @@ func (client ObjectsClient) GetObjectsByObjectIdsSender(req *http.Request) (*htt func (client ObjectsClient) GetObjectsByObjectIdsResponder(resp *http.Response) (result DirectoryObjectListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -198,8 +197,7 @@ func (client ObjectsClient) GetObjectsByObjectIdsNextPreparer(ctx context.Contex // GetObjectsByObjectIdsNextSender sends the GetObjectsByObjectIdsNext request. The method will close the // http.Response Body if it receives an error. func (client ObjectsClient) GetObjectsByObjectIdsNextSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetObjectsByObjectIdsNextResponder handles the response to the GetObjectsByObjectIdsNext request. The method always @@ -207,7 +205,6 @@ func (client ObjectsClient) GetObjectsByObjectIdsNextSender(req *http.Request) ( func (client ObjectsClient) GetObjectsByObjectIdsNextResponder(resp *http.Response) (result DirectoryObjectListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go index 45099830b53b..2778bbb3112d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/serviceprincipals.go @@ -37,7 +37,9 @@ func NewServicePrincipalsClient(tenantID string) ServicePrincipalsClient { return NewServicePrincipalsClientWithBaseURI(DefaultBaseURI, tenantID) } -// NewServicePrincipalsClientWithBaseURI creates an instance of the ServicePrincipalsClient client. +// NewServicePrincipalsClientWithBaseURI creates an instance of the ServicePrincipalsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewServicePrincipalsClientWithBaseURI(baseURI string, tenantID string) ServicePrincipalsClient { return ServicePrincipalsClient{NewWithBaseURI(baseURI, tenantID)} } @@ -107,8 +109,7 @@ func (client ServicePrincipalsClient) CreatePreparer(ctx context.Context, parame // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) CreateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateResponder handles the response to the Create request. The method always @@ -116,7 +117,6 @@ func (client ServicePrincipalsClient) CreateSender(req *http.Request) (*http.Res func (client ServicePrincipalsClient) CreateResponder(resp *http.Response) (result ServicePrincipal, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -182,8 +182,7 @@ func (client ServicePrincipalsClient) DeletePreparer(ctx context.Context, object // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteResponder handles the response to the Delete request. The method always @@ -191,7 +190,6 @@ func (client ServicePrincipalsClient) DeleteSender(req *http.Request) (*http.Res func (client ServicePrincipalsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -256,8 +254,7 @@ func (client ServicePrincipalsClient) GetPreparer(ctx context.Context, objectID // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always @@ -265,7 +262,6 @@ func (client ServicePrincipalsClient) GetSender(req *http.Request) (*http.Respon func (client ServicePrincipalsClient) GetResponder(resp *http.Response) (result ServicePrincipal, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -339,8 +335,7 @@ func (client ServicePrincipalsClient) ListPreparer(ctx context.Context, filter s // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -348,7 +343,6 @@ func (client ServicePrincipalsClient) ListSender(req *http.Request) (*http.Respo func (client ServicePrincipalsClient) ListResponder(resp *http.Response) (result ServicePrincipalListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -430,8 +424,7 @@ func (client ServicePrincipalsClient) ListKeyCredentialsPreparer(ctx context.Con // ListKeyCredentialsSender sends the ListKeyCredentials request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListKeyCredentialsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListKeyCredentialsResponder handles the response to the ListKeyCredentials request. The method always @@ -439,7 +432,6 @@ func (client ServicePrincipalsClient) ListKeyCredentialsSender(req *http.Request func (client ServicePrincipalsClient) ListKeyCredentialsResponder(resp *http.Response) (result KeyCredentialListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -505,8 +497,7 @@ func (client ServicePrincipalsClient) ListNextPreparer(ctx context.Context, next // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListNextSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListNextResponder handles the response to the ListNext request. The method always @@ -514,7 +505,6 @@ func (client ServicePrincipalsClient) ListNextSender(req *http.Request) (*http.R func (client ServicePrincipalsClient) ListNextResponder(resp *http.Response) (result ServicePrincipalListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -581,8 +571,7 @@ func (client ServicePrincipalsClient) ListOwnersPreparer(ctx context.Context, ob // ListOwnersSender sends the ListOwners request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListOwnersSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListOwnersResponder handles the response to the ListOwners request. The method always @@ -590,7 +579,6 @@ func (client ServicePrincipalsClient) ListOwnersSender(req *http.Request) (*http func (client ServicePrincipalsClient) ListOwnersResponder(resp *http.Response) (result DirectoryObjectListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -693,8 +681,7 @@ func (client ServicePrincipalsClient) ListPasswordCredentialsPreparer(ctx contex // ListPasswordCredentialsSender sends the ListPasswordCredentials request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) ListPasswordCredentialsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListPasswordCredentialsResponder handles the response to the ListPasswordCredentials request. The method always @@ -702,7 +689,6 @@ func (client ServicePrincipalsClient) ListPasswordCredentialsSender(req *http.Re func (client ServicePrincipalsClient) ListPasswordCredentialsResponder(resp *http.Response) (result PasswordCredentialListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -771,8 +757,7 @@ func (client ServicePrincipalsClient) UpdatePreparer(ctx context.Context, object // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) UpdateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateResponder handles the response to the Update request. The method always @@ -780,7 +765,6 @@ func (client ServicePrincipalsClient) UpdateSender(req *http.Request) (*http.Res func (client ServicePrincipalsClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -848,8 +832,7 @@ func (client ServicePrincipalsClient) UpdateKeyCredentialsPreparer(ctx context.C // UpdateKeyCredentialsSender sends the UpdateKeyCredentials request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) UpdateKeyCredentialsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateKeyCredentialsResponder handles the response to the UpdateKeyCredentials request. The method always @@ -857,7 +840,6 @@ func (client ServicePrincipalsClient) UpdateKeyCredentialsSender(req *http.Reque func (client ServicePrincipalsClient) UpdateKeyCredentialsResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -925,8 +907,7 @@ func (client ServicePrincipalsClient) UpdatePasswordCredentialsPreparer(ctx cont // UpdatePasswordCredentialsSender sends the UpdatePasswordCredentials request. The method will close the // http.Response Body if it receives an error. func (client ServicePrincipalsClient) UpdatePasswordCredentialsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdatePasswordCredentialsResponder handles the response to the UpdatePasswordCredentials request. The method always @@ -934,7 +915,6 @@ func (client ServicePrincipalsClient) UpdatePasswordCredentialsSender(req *http. func (client ServicePrincipalsClient) UpdatePasswordCredentialsResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go index 057658eefd6b..854e04d65aa6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/signedinuser.go @@ -36,7 +36,8 @@ func NewSignedInUserClient(tenantID string) SignedInUserClient { return NewSignedInUserClientWithBaseURI(DefaultBaseURI, tenantID) } -// NewSignedInUserClientWithBaseURI creates an instance of the SignedInUserClient client. +// NewSignedInUserClientWithBaseURI creates an instance of the SignedInUserClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewSignedInUserClientWithBaseURI(baseURI string, tenantID string) SignedInUserClient { return SignedInUserClient{NewWithBaseURI(baseURI, tenantID)} } @@ -96,8 +97,7 @@ func (client SignedInUserClient) GetPreparer(ctx context.Context) (*http.Request // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SignedInUserClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always @@ -105,7 +105,6 @@ func (client SignedInUserClient) GetSender(req *http.Request) (*http.Response, e func (client SignedInUserClient) GetResponder(resp *http.Response) (result User, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -174,8 +173,7 @@ func (client SignedInUserClient) ListOwnedObjectsPreparer(ctx context.Context) ( // ListOwnedObjectsSender sends the ListOwnedObjects request. The method will close the // http.Response Body if it receives an error. func (client SignedInUserClient) ListOwnedObjectsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListOwnedObjectsResponder handles the response to the ListOwnedObjects request. The method always @@ -183,7 +181,6 @@ func (client SignedInUserClient) ListOwnedObjectsSender(req *http.Request) (*htt func (client SignedInUserClient) ListOwnedObjectsResponder(resp *http.Response) (result DirectoryObjectListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -265,8 +262,7 @@ func (client SignedInUserClient) ListOwnedObjectsNextPreparer(ctx context.Contex // ListOwnedObjectsNextSender sends the ListOwnedObjectsNext request. The method will close the // http.Response Body if it receives an error. func (client SignedInUserClient) ListOwnedObjectsNextSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListOwnedObjectsNextResponder handles the response to the ListOwnedObjectsNext request. The method always @@ -274,7 +270,6 @@ func (client SignedInUserClient) ListOwnedObjectsNextSender(req *http.Request) ( func (client SignedInUserClient) ListOwnedObjectsNextResponder(resp *http.Response) (result DirectoryObjectListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go index 3c688fe7ba07..63cf3b8b21fe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/users.go @@ -37,7 +37,8 @@ func NewUsersClient(tenantID string) UsersClient { return NewUsersClientWithBaseURI(DefaultBaseURI, tenantID) } -// NewUsersClientWithBaseURI creates an instance of the UsersClient client. +// NewUsersClientWithBaseURI creates an instance of the UsersClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewUsersClientWithBaseURI(baseURI string, tenantID string) UsersClient { return UsersClient{NewWithBaseURI(baseURI, tenantID)} } @@ -112,8 +113,7 @@ func (client UsersClient) CreatePreparer(ctx context.Context, parameters UserCre // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) CreateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateResponder handles the response to the Create request. The method always @@ -121,7 +121,6 @@ func (client UsersClient) CreateSender(req *http.Request) (*http.Response, error func (client UsersClient) CreateResponder(resp *http.Response) (result User, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -187,8 +186,7 @@ func (client UsersClient) DeletePreparer(ctx context.Context, upnOrObjectID stri // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteResponder handles the response to the Delete request. The method always @@ -196,7 +194,6 @@ func (client UsersClient) DeleteSender(req *http.Request) (*http.Response, error func (client UsersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp @@ -261,8 +258,7 @@ func (client UsersClient) GetPreparer(ctx context.Context, upnOrObjectID string) // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always @@ -270,7 +266,6 @@ func (client UsersClient) GetSender(req *http.Request) (*http.Response, error) { func (client UsersClient) GetResponder(resp *http.Response) (result User, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -345,8 +340,7 @@ func (client UsersClient) GetMemberGroupsPreparer(ctx context.Context, objectID // GetMemberGroupsSender sends the GetMemberGroups request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) GetMemberGroupsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetMemberGroupsResponder handles the response to the GetMemberGroups request. The method always @@ -354,7 +348,6 @@ func (client UsersClient) GetMemberGroupsSender(req *http.Request) (*http.Respon func (client UsersClient) GetMemberGroupsResponder(resp *http.Response) (result UserGetMemberGroupsResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -365,7 +358,8 @@ func (client UsersClient) GetMemberGroupsResponder(resp *http.Response) (result // List gets list of users for the current tenant. // Parameters: // filter - the filter to apply to the operation. -func (client UsersClient) List(ctx context.Context, filter string) (result UserListResultPage, err error) { +// expand - the expand value for the operation result. +func (client UsersClient) List(ctx context.Context, filter string, expand string) (result UserListResultPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/UsersClient.List") defer func() { @@ -382,7 +376,7 @@ func (client UsersClient) List(ctx context.Context, filter string) (result UserL } return client.ListNext(ctx, *lastResult.OdataNextLink) } - req, err := client.ListPreparer(ctx, filter) + req, err := client.ListPreparer(ctx, filter, expand) if err != nil { err = autorest.NewErrorWithError(err, "graphrbac.UsersClient", "List", nil, "Failure preparing request") return @@ -404,7 +398,7 @@ func (client UsersClient) List(ctx context.Context, filter string) (result UserL } // ListPreparer prepares the List request. -func (client UsersClient) ListPreparer(ctx context.Context, filter string) (*http.Request, error) { +func (client UsersClient) ListPreparer(ctx context.Context, filter string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ "tenantID": autorest.Encode("path", client.TenantID), } @@ -416,6 +410,9 @@ func (client UsersClient) ListPreparer(ctx context.Context, filter string) (*htt if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -428,8 +425,7 @@ func (client UsersClient) ListPreparer(ctx context.Context, filter string) (*htt // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -437,7 +433,6 @@ func (client UsersClient) ListSender(req *http.Request) (*http.Response, error) func (client UsersClient) ListResponder(resp *http.Response) (result UserListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -446,7 +441,7 @@ func (client UsersClient) ListResponder(resp *http.Response) (result UserListRes } // ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client UsersClient) ListComplete(ctx context.Context, filter string) (result UserListResultIterator, err error) { +func (client UsersClient) ListComplete(ctx context.Context, filter string, expand string) (result UserListResultIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/UsersClient.List") defer func() { @@ -457,7 +452,7 @@ func (client UsersClient) ListComplete(ctx context.Context, filter string) (resu tracing.EndSpan(ctx, sc, err) }() } - result.page, err = client.List(ctx, filter) + result.page, err = client.List(ctx, filter, expand) return } @@ -519,8 +514,7 @@ func (client UsersClient) ListNextPreparer(ctx context.Context, nextLink string) // ListNextSender sends the ListNext request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) ListNextSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListNextResponder handles the response to the ListNext request. The method always @@ -528,7 +522,6 @@ func (client UsersClient) ListNextSender(req *http.Request) (*http.Response, err func (client UsersClient) ListNextResponder(resp *http.Response) (result UserListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -597,8 +590,7 @@ func (client UsersClient) UpdatePreparer(ctx context.Context, upnOrObjectID stri // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client UsersClient) UpdateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateResponder handles the response to the Update request. The method always @@ -606,7 +598,6 @@ func (client UsersClient) UpdateSender(req *http.Request) (*http.Response, error func (client UsersClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/version.go index b8397dc96ad8..b0d5c79fd7c8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac/version.go @@ -21,7 +21,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " graphrbac/1.6" + return "Azure-SDK-For-Go/" + Version() + " graphrbac/1.6" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go index 2ca7fd7754b4..390080f9d8b6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go @@ -109,8 +109,7 @@ func (client BaseClient) BackupCertificatePreparer(ctx context.Context, vaultBas // BackupCertificateSender sends the BackupCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) BackupCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // BackupCertificateResponder handles the response to the BackupCertificate request. The method always @@ -196,8 +195,7 @@ func (client BaseClient) BackupKeyPreparer(ctx context.Context, vaultBaseURL str // BackupKeySender sends the BackupKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) BackupKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // BackupKeyResponder handles the response to the BackupKey request. The method always @@ -276,8 +274,7 @@ func (client BaseClient) BackupSecretPreparer(ctx context.Context, vaultBaseURL // BackupSecretSender sends the BackupSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) BackupSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // BackupSecretResponder handles the response to the BackupSecret request. The method always @@ -356,8 +353,7 @@ func (client BaseClient) BackupStorageAccountPreparer(ctx context.Context, vault // BackupStorageAccountSender sends the BackupStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) BackupStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // BackupStorageAccountResponder handles the response to the BackupStorageAccount request. The method always @@ -397,7 +393,7 @@ func (client BaseClient) CreateCertificate(ctx context.Context, vaultBaseURL str Constraints: []validation.Constraint{{Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}, + Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, }}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "CreateCertificate", err.Error()) @@ -452,8 +448,7 @@ func (client BaseClient) CreateCertificatePreparer(ctx context.Context, vaultBas // CreateCertificateSender sends the CreateCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) CreateCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateCertificateResponder handles the response to the CreateCertificate request. The method always @@ -541,8 +536,7 @@ func (client BaseClient) CreateKeyPreparer(ctx context.Context, vaultBaseURL str // CreateKeySender sends the CreateKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) CreateKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateKeyResponder handles the response to the CreateKey request. The method always @@ -635,8 +629,7 @@ func (client BaseClient) DecryptPreparer(ctx context.Context, vaultBaseURL strin // DecryptSender sends the Decrypt request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DecryptSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DecryptResponder handles the response to the Decrypt request. The method always @@ -716,8 +709,7 @@ func (client BaseClient) DeleteCertificatePreparer(ctx context.Context, vaultBas // DeleteCertificateSender sends the DeleteCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteCertificateResponder handles the response to the DeleteCertificate request. The method always @@ -791,8 +783,7 @@ func (client BaseClient) DeleteCertificateContactsPreparer(ctx context.Context, // DeleteCertificateContactsSender sends the DeleteCertificateContacts request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteCertificateContactsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteCertificateContactsResponder handles the response to the DeleteCertificateContacts request. The method always @@ -871,8 +862,7 @@ func (client BaseClient) DeleteCertificateIssuerPreparer(ctx context.Context, va // DeleteCertificateIssuerSender sends the DeleteCertificateIssuer request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteCertificateIssuerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteCertificateIssuerResponder handles the response to the DeleteCertificateIssuer request. The method always @@ -951,8 +941,7 @@ func (client BaseClient) DeleteCertificateOperationPreparer(ctx context.Context, // DeleteCertificateOperationSender sends the DeleteCertificateOperation request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteCertificateOperationSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteCertificateOperationResponder handles the response to the DeleteCertificateOperation request. The method always @@ -1032,8 +1021,7 @@ func (client BaseClient) DeleteKeyPreparer(ctx context.Context, vaultBaseURL str // DeleteKeySender sends the DeleteKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteKeyResponder handles the response to the DeleteKey request. The method always @@ -1122,8 +1110,7 @@ func (client BaseClient) DeleteSasDefinitionPreparer(ctx context.Context, vaultB // DeleteSasDefinitionSender sends the DeleteSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteSasDefinitionResponder handles the response to the DeleteSasDefinition request. The method always @@ -1202,8 +1189,7 @@ func (client BaseClient) DeleteSecretPreparer(ctx context.Context, vaultBaseURL // DeleteSecretSender sends the DeleteSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteSecretResponder handles the response to the DeleteSecret request. The method always @@ -1287,8 +1273,7 @@ func (client BaseClient) DeleteStorageAccountPreparer(ctx context.Context, vault // DeleteStorageAccountSender sends the DeleteStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) DeleteStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteStorageAccountResponder handles the response to the DeleteStorageAccount request. The method always @@ -1383,8 +1368,7 @@ func (client BaseClient) EncryptPreparer(ctx context.Context, vaultBaseURL strin // EncryptSender sends the Encrypt request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) EncryptSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // EncryptResponder handles the response to the Encrypt request. The method always @@ -1465,8 +1449,7 @@ func (client BaseClient) GetCertificatePreparer(ctx context.Context, vaultBaseUR // GetCertificateSender sends the GetCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetCertificateResponder handles the response to the GetCertificate request. The method always @@ -1540,8 +1523,7 @@ func (client BaseClient) GetCertificateContactsPreparer(ctx context.Context, vau // GetCertificateContactsSender sends the GetCertificateContacts request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateContactsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetCertificateContactsResponder handles the response to the GetCertificateContacts request. The method always @@ -1620,8 +1602,7 @@ func (client BaseClient) GetCertificateIssuerPreparer(ctx context.Context, vault // GetCertificateIssuerSender sends the GetCertificateIssuer request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetCertificateIssuerResponder handles the response to the GetCertificateIssuer request. The method always @@ -1658,7 +1639,7 @@ func (client BaseClient) GetCertificateIssuers(ctx context.Context, vaultBaseURL {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetCertificateIssuers", err.Error()) } @@ -1710,8 +1691,7 @@ func (client BaseClient) GetCertificateIssuersPreparer(ctx context.Context, vaul // GetCertificateIssuersSender sends the GetCertificateIssuers request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateIssuersSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetCertificateIssuersResponder handles the response to the GetCertificateIssuers request. The method always @@ -1827,8 +1807,7 @@ func (client BaseClient) GetCertificateOperationPreparer(ctx context.Context, va // GetCertificateOperationSender sends the GetCertificateOperation request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateOperationSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetCertificateOperationResponder handles the response to the GetCertificateOperation request. The method always @@ -1907,8 +1886,7 @@ func (client BaseClient) GetCertificatePolicyPreparer(ctx context.Context, vault // GetCertificatePolicySender sends the GetCertificatePolicy request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificatePolicySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetCertificatePolicyResponder handles the response to the GetCertificatePolicy request. The method always @@ -1946,7 +1924,7 @@ func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL strin {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetCertificates", err.Error()) } @@ -2001,8 +1979,7 @@ func (client BaseClient) GetCertificatesPreparer(ctx context.Context, vaultBaseU // GetCertificatesSender sends the GetCertificates request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificatesSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetCertificatesResponder handles the response to the GetCertificates request. The method always @@ -2077,7 +2054,7 @@ func (client BaseClient) GetCertificateVersions(ctx context.Context, vaultBaseUR {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetCertificateVersions", err.Error()) } @@ -2133,8 +2110,7 @@ func (client BaseClient) GetCertificateVersionsPreparer(ctx context.Context, vau // GetCertificateVersionsSender sends the GetCertificateVersions request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetCertificateVersionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetCertificateVersionsResponder handles the response to the GetCertificateVersions request. The method always @@ -2251,8 +2227,7 @@ func (client BaseClient) GetDeletedCertificatePreparer(ctx context.Context, vaul // GetDeletedCertificateSender sends the GetDeletedCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedCertificateResponder handles the response to the GetDeletedCertificate request. The method always @@ -2292,7 +2267,7 @@ func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseUR {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetDeletedCertificates", err.Error()) } @@ -2347,8 +2322,7 @@ func (client BaseClient) GetDeletedCertificatesPreparer(ctx context.Context, vau // GetDeletedCertificatesSender sends the GetDeletedCertificates request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedCertificatesSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedCertificatesResponder handles the response to the GetDeletedCertificates request. The method always @@ -2465,8 +2439,7 @@ func (client BaseClient) GetDeletedKeyPreparer(ctx context.Context, vaultBaseURL // GetDeletedKeySender sends the GetDeletedKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedKeyResponder handles the response to the GetDeletedKey request. The method always @@ -2505,7 +2478,7 @@ func (client BaseClient) GetDeletedKeys(ctx context.Context, vaultBaseURL string {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetDeletedKeys", err.Error()) } @@ -2557,8 +2530,7 @@ func (client BaseClient) GetDeletedKeysPreparer(ctx context.Context, vaultBaseUR // GetDeletedKeysSender sends the GetDeletedKeys request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedKeysSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedKeysResponder handles the response to the GetDeletedKeys request. The method always @@ -2684,8 +2656,7 @@ func (client BaseClient) GetDeletedSasDefinitionPreparer(ctx context.Context, va // GetDeletedSasDefinitionSender sends the GetDeletedSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedSasDefinitionResponder handles the response to the GetDeletedSasDefinition request. The method always @@ -2725,7 +2696,7 @@ func (client BaseClient) GetDeletedSasDefinitions(ctx context.Context, vaultBase {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinitions", err.Error()) } @@ -2781,8 +2752,7 @@ func (client BaseClient) GetDeletedSasDefinitionsPreparer(ctx context.Context, v // GetDeletedSasDefinitionsSender sends the GetDeletedSasDefinitions request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedSasDefinitionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedSasDefinitionsResponder handles the response to the GetDeletedSasDefinitions request. The method always @@ -2898,8 +2868,7 @@ func (client BaseClient) GetDeletedSecretPreparer(ctx context.Context, vaultBase // GetDeletedSecretSender sends the GetDeletedSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedSecretResponder handles the response to the GetDeletedSecret request. The method always @@ -2936,7 +2905,7 @@ func (client BaseClient) GetDeletedSecrets(ctx context.Context, vaultBaseURL str {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetDeletedSecrets", err.Error()) } @@ -2988,8 +2957,7 @@ func (client BaseClient) GetDeletedSecretsPreparer(ctx context.Context, vaultBas // GetDeletedSecretsSender sends the GetDeletedSecrets request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedSecretsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedSecretsResponder handles the response to the GetDeletedSecrets request. The method always @@ -3111,8 +3079,7 @@ func (client BaseClient) GetDeletedStorageAccountPreparer(ctx context.Context, v // GetDeletedStorageAccountSender sends the GetDeletedStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedStorageAccountResponder handles the response to the GetDeletedStorageAccount request. The method always @@ -3149,7 +3116,7 @@ func (client BaseClient) GetDeletedStorageAccounts(ctx context.Context, vaultBas {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccounts", err.Error()) } @@ -3201,8 +3168,7 @@ func (client BaseClient) GetDeletedStorageAccountsPreparer(ctx context.Context, // GetDeletedStorageAccountsSender sends the GetDeletedStorageAccounts request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetDeletedStorageAccountsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetDeletedStorageAccountsResponder handles the response to the GetDeletedStorageAccounts request. The method always @@ -3320,8 +3286,7 @@ func (client BaseClient) GetKeyPreparer(ctx context.Context, vaultBaseURL string // GetKeySender sends the GetKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetKeyResponder handles the response to the GetKey request. The method always @@ -3360,7 +3325,7 @@ func (client BaseClient) GetKeys(ctx context.Context, vaultBaseURL string, maxre {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetKeys", err.Error()) } @@ -3412,8 +3377,7 @@ func (client BaseClient) GetKeysPreparer(ctx context.Context, vaultBaseURL strin // GetKeysSender sends the GetKeys request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetKeysSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetKeysResponder handles the response to the GetKeys request. The method always @@ -3488,7 +3452,7 @@ func (client BaseClient) GetKeyVersions(ctx context.Context, vaultBaseURL string {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetKeyVersions", err.Error()) } @@ -3544,8 +3508,7 @@ func (client BaseClient) GetKeyVersionsPreparer(ctx context.Context, vaultBaseUR // GetKeyVersionsSender sends the GetKeyVersions request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetKeyVersionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetKeyVersionsResponder handles the response to the GetKeyVersions request. The method always @@ -3671,8 +3634,7 @@ func (client BaseClient) GetSasDefinitionPreparer(ctx context.Context, vaultBase // GetSasDefinitionSender sends the GetSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetSasDefinitionResponder handles the response to the GetSasDefinition request. The method always @@ -3712,7 +3674,7 @@ func (client BaseClient) GetSasDefinitions(ctx context.Context, vaultBaseURL str {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetSasDefinitions", err.Error()) } @@ -3768,8 +3730,7 @@ func (client BaseClient) GetSasDefinitionsPreparer(ctx context.Context, vaultBas // GetSasDefinitionsSender sends the GetSasDefinitions request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSasDefinitionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetSasDefinitionsResponder handles the response to the GetSasDefinitions request. The method always @@ -3887,8 +3848,7 @@ func (client BaseClient) GetSecretPreparer(ctx context.Context, vaultBaseURL str // GetSecretSender sends the GetSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetSecretResponder handles the response to the GetSecret request. The method always @@ -3926,7 +3886,7 @@ func (client BaseClient) GetSecrets(ctx context.Context, vaultBaseURL string, ma {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetSecrets", err.Error()) } @@ -3978,8 +3938,7 @@ func (client BaseClient) GetSecretsPreparer(ctx context.Context, vaultBaseURL st // GetSecretsSender sends the GetSecrets request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSecretsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetSecretsResponder handles the response to the GetSecrets request. The method always @@ -4054,7 +4013,7 @@ func (client BaseClient) GetSecretVersions(ctx context.Context, vaultBaseURL str {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetSecretVersions", err.Error()) } @@ -4110,8 +4069,7 @@ func (client BaseClient) GetSecretVersionsPreparer(ctx context.Context, vaultBas // GetSecretVersionsSender sends the GetSecretVersions request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetSecretVersionsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetSecretVersionsResponder handles the response to the GetSecretVersions request. The method always @@ -4233,8 +4191,7 @@ func (client BaseClient) GetStorageAccountPreparer(ctx context.Context, vaultBas // GetStorageAccountSender sends the GetStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetStorageAccountResponder handles the response to the GetStorageAccount request. The method always @@ -4271,7 +4228,7 @@ func (client BaseClient) GetStorageAccounts(ctx context.Context, vaultBaseURL st {TargetValue: maxresults, Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}, + {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "GetStorageAccounts", err.Error()) } @@ -4323,8 +4280,7 @@ func (client BaseClient) GetStorageAccountsPreparer(ctx context.Context, vaultBa // GetStorageAccountsSender sends the GetStorageAccounts request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) GetStorageAccountsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetStorageAccountsResponder handles the response to the GetStorageAccounts request. The method always @@ -4403,7 +4359,7 @@ func (client BaseClient) ImportCertificate(ctx context.Context, vaultBaseURL str {Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}, + Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, }}, }}}}}); err != nil { return result, validation.NewError("keyvault.BaseClient", "ImportCertificate", err.Error()) @@ -4458,8 +4414,7 @@ func (client BaseClient) ImportCertificatePreparer(ctx context.Context, vaultBas // ImportCertificateSender sends the ImportCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) ImportCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ImportCertificateResponder handles the response to the ImportCertificate request. The method always @@ -4550,8 +4505,7 @@ func (client BaseClient) ImportKeyPreparer(ctx context.Context, vaultBaseURL str // ImportKeySender sends the ImportKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) ImportKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ImportKeyResponder handles the response to the ImportKey request. The method always @@ -4639,8 +4593,7 @@ func (client BaseClient) MergeCertificatePreparer(ctx context.Context, vaultBase // MergeCertificateSender sends the MergeCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) MergeCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // MergeCertificateResponder handles the response to the MergeCertificate request. The method always @@ -4720,8 +4673,7 @@ func (client BaseClient) PurgeDeletedCertificatePreparer(ctx context.Context, va // PurgeDeletedCertificateSender sends the PurgeDeletedCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) PurgeDeletedCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // PurgeDeletedCertificateResponder handles the response to the PurgeDeletedCertificate request. The method always @@ -4800,8 +4752,7 @@ func (client BaseClient) PurgeDeletedKeyPreparer(ctx context.Context, vaultBaseU // PurgeDeletedKeySender sends the PurgeDeletedKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) PurgeDeletedKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // PurgeDeletedKeyResponder handles the response to the PurgeDeletedKey request. The method always @@ -4880,8 +4831,7 @@ func (client BaseClient) PurgeDeletedSecretPreparer(ctx context.Context, vaultBa // PurgeDeletedSecretSender sends the PurgeDeletedSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) PurgeDeletedSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // PurgeDeletedSecretResponder handles the response to the PurgeDeletedSecret request. The method always @@ -4966,8 +4916,7 @@ func (client BaseClient) PurgeDeletedStorageAccountPreparer(ctx context.Context, // PurgeDeletedStorageAccountSender sends the PurgeDeletedStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) PurgeDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // PurgeDeletedStorageAccountResponder handles the response to the PurgeDeletedStorageAccount request. The method always @@ -5046,8 +4995,7 @@ func (client BaseClient) RecoverDeletedCertificatePreparer(ctx context.Context, // RecoverDeletedCertificateSender sends the RecoverDeletedCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RecoverDeletedCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RecoverDeletedCertificateResponder handles the response to the RecoverDeletedCertificate request. The method always @@ -5128,8 +5076,7 @@ func (client BaseClient) RecoverDeletedKeyPreparer(ctx context.Context, vaultBas // RecoverDeletedKeySender sends the RecoverDeletedKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RecoverDeletedKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RecoverDeletedKeyResponder handles the response to the RecoverDeletedKey request. The method always @@ -5218,8 +5165,7 @@ func (client BaseClient) RecoverDeletedSasDefinitionPreparer(ctx context.Context // RecoverDeletedSasDefinitionSender sends the RecoverDeletedSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RecoverDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RecoverDeletedSasDefinitionResponder handles the response to the RecoverDeletedSasDefinition request. The method always @@ -5298,8 +5244,7 @@ func (client BaseClient) RecoverDeletedSecretPreparer(ctx context.Context, vault // RecoverDeletedSecretSender sends the RecoverDeletedSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RecoverDeletedSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RecoverDeletedSecretResponder handles the response to the RecoverDeletedSecret request. The method always @@ -5384,8 +5329,7 @@ func (client BaseClient) RecoverDeletedStorageAccountPreparer(ctx context.Contex // RecoverDeletedStorageAccountSender sends the RecoverDeletedStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RecoverDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RecoverDeletedStorageAccountResponder handles the response to the RecoverDeletedStorageAccount request. The method always @@ -5475,8 +5419,7 @@ func (client BaseClient) RegenerateStorageAccountKeyPreparer(ctx context.Context // RegenerateStorageAccountKeySender sends the RegenerateStorageAccountKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RegenerateStorageAccountKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RegenerateStorageAccountKeyResponder handles the response to the RegenerateStorageAccountKey request. The method always @@ -5559,8 +5502,7 @@ func (client BaseClient) RestoreCertificatePreparer(ctx context.Context, vaultBa // RestoreCertificateSender sends the RestoreCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RestoreCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RestoreCertificateResponder handles the response to the RestoreCertificate request. The method always @@ -5650,8 +5592,7 @@ func (client BaseClient) RestoreKeyPreparer(ctx context.Context, vaultBaseURL st // RestoreKeySender sends the RestoreKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RestoreKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RestoreKeyResponder handles the response to the RestoreKey request. The method always @@ -5734,8 +5675,7 @@ func (client BaseClient) RestoreSecretPreparer(ctx context.Context, vaultBaseURL // RestoreSecretSender sends the RestoreSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RestoreSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RestoreSecretResponder handles the response to the RestoreSecret request. The method always @@ -5818,8 +5758,7 @@ func (client BaseClient) RestoreStorageAccountPreparer(ctx context.Context, vaul // RestoreStorageAccountSender sends the RestoreStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) RestoreStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // RestoreStorageAccountResponder handles the response to the RestoreStorageAccount request. The method always @@ -5897,8 +5836,7 @@ func (client BaseClient) SetCertificateContactsPreparer(ctx context.Context, vau // SetCertificateContactsSender sends the SetCertificateContacts request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetCertificateContactsSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // SetCertificateContactsResponder handles the response to the SetCertificateContacts request. The method always @@ -5986,8 +5924,7 @@ func (client BaseClient) SetCertificateIssuerPreparer(ctx context.Context, vault // SetCertificateIssuerSender sends the SetCertificateIssuer request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // SetCertificateIssuerResponder handles the response to the SetCertificateIssuer request. The method always @@ -6082,8 +6019,7 @@ func (client BaseClient) SetSasDefinitionPreparer(ctx context.Context, vaultBase // SetSasDefinitionSender sends the SetSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // SetSasDefinitionResponder handles the response to the SetSasDefinition request. The method always @@ -6173,8 +6109,7 @@ func (client BaseClient) SetSecretPreparer(ctx context.Context, vaultBaseURL str // SetSecretSender sends the SetSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // SetSecretResponder handles the response to the SetSecret request. The method always @@ -6265,8 +6200,7 @@ func (client BaseClient) SetStorageAccountPreparer(ctx context.Context, vaultBas // SetStorageAccountSender sends the SetStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SetStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // SetStorageAccountResponder handles the response to the SetStorageAccount request. The method always @@ -6356,8 +6290,7 @@ func (client BaseClient) SignPreparer(ctx context.Context, vaultBaseURL string, // SignSender sends the Sign request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) SignSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // SignResponder handles the response to the Sign request. The method always @@ -6449,8 +6382,7 @@ func (client BaseClient) UnwrapKeyPreparer(ctx context.Context, vaultBaseURL str // UnwrapKeySender sends the UnwrapKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UnwrapKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UnwrapKeyResponder handles the response to the UnwrapKey request. The method always @@ -6534,8 +6466,7 @@ func (client BaseClient) UpdateCertificatePreparer(ctx context.Context, vaultBas // UpdateCertificateSender sends the UpdateCertificate request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateCertificateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateCertificateResponder handles the response to the UpdateCertificate request. The method always @@ -6617,8 +6548,7 @@ func (client BaseClient) UpdateCertificateIssuerPreparer(ctx context.Context, va // UpdateCertificateIssuerSender sends the UpdateCertificateIssuer request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateCertificateIssuerSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateCertificateIssuerResponder handles the response to the UpdateCertificateIssuer request. The method always @@ -6700,8 +6630,7 @@ func (client BaseClient) UpdateCertificateOperationPreparer(ctx context.Context, // UpdateCertificateOperationSender sends the UpdateCertificateOperation request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateCertificateOperationSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateCertificateOperationResponder handles the response to the UpdateCertificateOperation request. The method always @@ -6784,8 +6713,7 @@ func (client BaseClient) UpdateCertificatePolicyPreparer(ctx context.Context, va // UpdateCertificatePolicySender sends the UpdateCertificatePolicy request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateCertificatePolicySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateCertificatePolicyResponder handles the response to the UpdateCertificatePolicy request. The method always @@ -6869,8 +6797,7 @@ func (client BaseClient) UpdateKeyPreparer(ctx context.Context, vaultBaseURL str // UpdateKeySender sends the UpdateKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateKeyResponder handles the response to the UpdateKey request. The method always @@ -6962,8 +6889,7 @@ func (client BaseClient) UpdateSasDefinitionPreparer(ctx context.Context, vaultB // UpdateSasDefinitionSender sends the UpdateSasDefinition request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateSasDefinitionSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateSasDefinitionResponder handles the response to the UpdateSasDefinition request. The method always @@ -7048,8 +6974,7 @@ func (client BaseClient) UpdateSecretPreparer(ctx context.Context, vaultBaseURL // UpdateSecretSender sends the UpdateSecret request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateSecretSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateSecretResponder handles the response to the UpdateSecret request. The method always @@ -7137,8 +7062,7 @@ func (client BaseClient) UpdateStorageAccountPreparer(ctx context.Context, vault // UpdateStorageAccountSender sends the UpdateStorageAccount request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) UpdateStorageAccountSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // UpdateStorageAccountResponder handles the response to the UpdateStorageAccount request. The method always @@ -7231,8 +7155,7 @@ func (client BaseClient) VerifyPreparer(ctx context.Context, vaultBaseURL string // VerifySender sends the Verify request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) VerifySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // VerifyResponder handles the response to the Verify request. The method always @@ -7325,8 +7248,7 @@ func (client BaseClient) WrapKeyPreparer(ctx context.Context, vaultBaseURL strin // WrapKeySender sends the WrapKey request. The method will close the // http.Response Body if it receives an error. func (client BaseClient) WrapKeySender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // WrapKeyResponder handles the response to the WrapKey request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go index 79fed73a9c9e..a57e7269b15e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go @@ -2045,7 +2045,7 @@ type IssuerCredentials struct { type IssuerParameters struct { // Name - Name of the referenced issuer object or reserved names; for example, 'Self' or 'Unknown'. Name *string `json:"name,omitempty"` - // CertificateType - Type of certificate to be requested from the issuer provider. + // CertificateType - Certificate type as supported by the provider (optional); for example 'OV-SSL', 'EV-SSL' CertificateType *string `json:"cty,omitempty"` // CertificateTransparency - Indicates if the certificates generated under this policy should be published to certificate transparency logs. CertificateTransparency *bool `json:"cert_transparency,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/applicationgateways.go new file mode 100644 index 000000000000..d02fd761316e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/applicationgateways.go @@ -0,0 +1,637 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ApplicationGatewaysClient is the network Client +type ApplicationGatewaysClient struct { + BaseClient +} + +// NewApplicationGatewaysClient creates an instance of the ApplicationGatewaysClient client. +func NewApplicationGatewaysClient(subscriptionID string) ApplicationGatewaysClient { + return NewApplicationGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewApplicationGatewaysClientWithBaseURI creates an instance of the ApplicationGatewaysClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewApplicationGatewaysClientWithBaseURI(baseURI string, subscriptionID string) ApplicationGatewaysClient { + return ApplicationGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates the specified application gateway. +// Parameters: +// resourceGroupName - the name of the resource group. +// applicationGatewayName - the name of the application gateway. +// parameters - parameters supplied to the create or update application gateway operation. +func (client ApplicationGatewaysClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway) (result ApplicationGatewaysCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, applicationGatewayName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) (future ApplicationGatewaysCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result ApplicationGateway, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified application gateway. +// Parameters: +// resourceGroupName - the name of the resource group. +// applicationGatewayName - the name of the application gateway. +func (client ApplicationGatewaysClient) Delete(ctx context.Context, resourceGroupName string, applicationGatewayName string) (result ApplicationGatewaysDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, applicationGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ApplicationGatewaysClient) DeletePreparer(ctx context.Context, resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (future ApplicationGatewaysDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified application gateway. +// Parameters: +// resourceGroupName - the name of the resource group. +// applicationGatewayName - the name of the application gateway. +func (client ApplicationGatewaysClient) Get(ctx context.Context, resourceGroupName string, applicationGatewayName string) (result ApplicationGateway, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, applicationGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ApplicationGatewaysClient) GetPreparer(ctx context.Context, resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) GetResponder(resp *http.Response) (result ApplicationGateway, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all application gateways in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client ApplicationGatewaysClient) List(ctx context.Context, resourceGroupName string) (result ApplicationGatewayListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.List") + defer func() { + sc := -1 + if result.aglr.Response.Response != nil { + sc = result.aglr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.aglr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure sending request") + return + } + + result.aglr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ApplicationGatewaysClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ApplicationGatewaysClient) listNextResults(ctx context.Context, lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, err error) { + req, err := lastResults.applicationGatewayListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ApplicationGatewaysClient) ListComplete(ctx context.Context, resourceGroupName string) (result ApplicationGatewayListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets all the application gateways in a subscription. +func (client ApplicationGatewaysClient) ListAll(ctx context.Context) (result ApplicationGatewayListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.ListAll") + defer func() { + sc := -1 + if result.aglr.Response.Response != nil { + sc = result.aglr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.aglr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure sending request") + return + } + + result.aglr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ApplicationGatewaysClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListAllResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client ApplicationGatewaysClient) listAllNextResults(ctx context.Context, lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, err error) { + req, err := lastResults.applicationGatewayListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client ApplicationGatewaysClient) ListAllComplete(ctx context.Context) (result ApplicationGatewayListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} + +// Start starts the specified application gateway. +// Parameters: +// resourceGroupName - the name of the resource group. +// applicationGatewayName - the name of the application gateway. +func (client ApplicationGatewaysClient) Start(ctx context.Context, resourceGroupName string, applicationGatewayName string) (result ApplicationGatewaysStartFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.Start") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.StartPreparer(ctx, resourceGroupName, applicationGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", nil, "Failure preparing request") + return + } + + result, err = client.StartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Start", result.Response(), "Failure sending request") + return + } + + return +} + +// StartPreparer prepares the Start request. +func (client ApplicationGatewaysClient) StartPreparer(ctx context.Context, resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) StartSender(req *http.Request) (future ApplicationGatewaysStartFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Stop stops the specified application gateway in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// applicationGatewayName - the name of the application gateway. +func (client ApplicationGatewaysClient) Stop(ctx context.Context, resourceGroupName string, applicationGatewayName string) (result ApplicationGatewaysStopFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewaysClient.Stop") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.StopPreparer(ctx, resourceGroupName, applicationGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", nil, "Failure preparing request") + return + } + + result, err = client.StopSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysClient", "Stop", result.Response(), "Failure sending request") + return + } + + return +} + +// StopPreparer prepares the Stop request. +func (client ApplicationGatewaysClient) StopPreparer(ctx context.Context, resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": autorest.Encode("path", applicationGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) StopSender(req *http.Request) (future ApplicationGatewaysStopFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/client.go new file mode 100644 index 000000000000..c301bdc13d2c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/client.go @@ -0,0 +1,134 @@ +// Package network implements the Azure ARM Network service API version 2015-06-15. +// +// Network Client +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +const ( + // DefaultBaseURI is the default URI used for the service Network + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Network. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with +// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} + +// CheckDNSNameAvailability checks whether a domain name in the cloudapp.net zone is available for use. +// Parameters: +// location - the location of the domain name. +// domainNameLabel - the domain name to be verified. It must conform to the following regular expression: +// ^[a-z][a-z0-9-]{1,61}[a-z0-9]$. +func (client BaseClient) CheckDNSNameAvailability(ctx context.Context, location string, domainNameLabel string) (result DNSNameAvailabilityResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CheckDNSNameAvailability") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CheckDNSNameAvailabilityPreparer(ctx, location, domainNameLabel) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BaseClient", "CheckDNSNameAvailability", nil, "Failure preparing request") + return + } + + resp, err := client.CheckDNSNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.BaseClient", "CheckDNSNameAvailability", resp, "Failure sending request") + return + } + + result, err = client.CheckDNSNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.BaseClient", "CheckDNSNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckDNSNameAvailabilityPreparer prepares the CheckDNSNameAvailability request. +func (client BaseClient) CheckDNSNameAvailabilityPreparer(ctx context.Context, location string, domainNameLabel string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(domainNameLabel) > 0 { + queryParameters["domainNameLabel"] = autorest.Encode("query", domainNameLabel) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckDNSNameAvailabilitySender sends the CheckDNSNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client BaseClient) CheckDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// CheckDNSNameAvailabilityResponder handles the response to the CheckDNSNameAvailability request. The method always +// closes the http.Response Body. +func (client BaseClient) CheckDNSNameAvailabilityResponder(resp *http.Response) (result DNSNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuitauthorizations.go new file mode 100644 index 000000000000..6be4acffc1b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuitauthorizations.go @@ -0,0 +1,391 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ExpressRouteCircuitAuthorizationsClient is the network Client +type ExpressRouteCircuitAuthorizationsClient struct { + BaseClient +} + +// NewExpressRouteCircuitAuthorizationsClient creates an instance of the ExpressRouteCircuitAuthorizationsClient +// client. +func NewExpressRouteCircuitAuthorizationsClient(subscriptionID string) ExpressRouteCircuitAuthorizationsClient { + return NewExpressRouteCircuitAuthorizationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitAuthorizationsClientWithBaseURI creates an instance of the +// ExpressRouteCircuitAuthorizationsClient client using a custom endpoint. Use this when interacting with an Azure +// cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewExpressRouteCircuitAuthorizationsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitAuthorizationsClient { + return ExpressRouteCircuitAuthorizationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an authorization in the specified express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the express route circuit. +// authorizationName - the name of the authorization. +// authorizationParameters - parameters supplied to the create or update express route circuit authorization +// operation. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization) (result ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitAuthorizationsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, circuitName, authorizationName, authorizationParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string, authorizationParameters ExpressRouteCircuitAuthorization) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationName": autorest.Encode("path", authorizationName), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters), + autorest.WithJSON(authorizationParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req *http.Request) (future ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuitAuthorization, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified authorization from the specified express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the express route circuit. +// authorizationName - the name of the authorization. +func (client ExpressRouteCircuitAuthorizationsClient) Delete(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string) (result ExpressRouteCircuitAuthorizationsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitAuthorizationsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, circuitName, authorizationName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationName": autorest.Encode("path", authorizationName), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Request) (future ExpressRouteCircuitAuthorizationsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified authorization from the specified express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the express route circuit. +// authorizationName - the name of the authorization. +func (client ExpressRouteCircuitAuthorizationsClient) Get(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string) (result ExpressRouteCircuitAuthorization, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitAuthorizationsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, circuitName, authorizationName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExpressRouteCircuitAuthorizationsClient) GetPreparer(ctx context.Context, resourceGroupName string, circuitName string, authorizationName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationName": autorest.Encode("path", authorizationName), + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuitAuthorization, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all authorizations in an express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the circuit. +func (client ExpressRouteCircuitAuthorizationsClient) List(ctx context.Context, resourceGroupName string, circuitName string) (result AuthorizationListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitAuthorizationsClient.List") + defer func() { + sc := -1 + if result.alr.Response.Response != nil { + sc = result.alr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, circuitName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.alr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure sending request") + return + } + + result.alr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteCircuitAuthorizationsClient) ListPreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitAuthorizationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitAuthorizationsClient) ListResponder(resp *http.Response) (result AuthorizationListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitAuthorizationsClient) listNextResults(ctx context.Context, lastResults AuthorizationListResult) (result AuthorizationListResult, err error) { + req, err := lastResults.authorizationListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ExpressRouteCircuitAuthorizationsClient) ListComplete(ctx context.Context, resourceGroupName string, circuitName string) (result AuthorizationListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitAuthorizationsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, circuitName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuitpeerings.go new file mode 100644 index 000000000000..2325150e6506 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuitpeerings.go @@ -0,0 +1,389 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ExpressRouteCircuitPeeringsClient is the network Client +type ExpressRouteCircuitPeeringsClient struct { + BaseClient +} + +// NewExpressRouteCircuitPeeringsClient creates an instance of the ExpressRouteCircuitPeeringsClient client. +func NewExpressRouteCircuitPeeringsClient(subscriptionID string) ExpressRouteCircuitPeeringsClient { + return NewExpressRouteCircuitPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitPeeringsClientWithBaseURI creates an instance of the ExpressRouteCircuitPeeringsClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). +func NewExpressRouteCircuitPeeringsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitPeeringsClient { + return ExpressRouteCircuitPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a peering in the specified express route circuits. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the express route circuit. +// peeringName - the name of the peering. +// peeringParameters - parameters supplied to the create or update express route circuit peering operation. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering) (result ExpressRouteCircuitPeeringsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitPeeringsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, circuitName, peeringName, peeringParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, circuitName string, peeringName string, peeringParameters ExpressRouteCircuitPeering) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters), + autorest.WithJSON(peeringParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.Request) (future ExpressRouteCircuitPeeringsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuitPeering, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified peering from the specified express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the express route circuit. +// peeringName - the name of the peering. +func (client ExpressRouteCircuitPeeringsClient) Delete(ctx context.Context, resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitPeeringsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitPeeringsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, circuitName, peeringName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(ctx context.Context, resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) (future ExpressRouteCircuitPeeringsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified authorization from the specified express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the express route circuit. +// peeringName - the name of the peering. +func (client ExpressRouteCircuitPeeringsClient) Get(ctx context.Context, resourceGroupName string, circuitName string, peeringName string) (result ExpressRouteCircuitPeering, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitPeeringsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, circuitName, peeringName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExpressRouteCircuitPeeringsClient) GetPreparer(ctx context.Context, resourceGroupName string, circuitName string, peeringName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "peeringName": autorest.Encode("path", peeringName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuitPeering, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all peerings in a specified express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the express route circuit. +func (client ExpressRouteCircuitPeeringsClient) List(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitPeeringListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitPeeringsClient.List") + defer func() { + sc := -1 + if result.ercplr.Response.Response != nil { + sc = result.ercplr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, circuitName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.ercplr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure sending request") + return + } + + result.ercplr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteCircuitPeeringsClient) ListPreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitPeeringsClient) ListResponder(resp *http.Response) (result ExpressRouteCircuitPeeringListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitPeeringsClient) listNextResults(ctx context.Context, lastResults ExpressRouteCircuitPeeringListResult) (result ExpressRouteCircuitPeeringListResult, err error) { + req, err := lastResults.expressRouteCircuitPeeringListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ExpressRouteCircuitPeeringsClient) ListComplete(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitPeeringListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitPeeringsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, circuitName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuits.go new file mode 100644 index 000000000000..8386bb3c034a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressroutecircuits.go @@ -0,0 +1,831 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ExpressRouteCircuitsClient is the network Client +type ExpressRouteCircuitsClient struct { + BaseClient +} + +// NewExpressRouteCircuitsClient creates an instance of the ExpressRouteCircuitsClient client. +func NewExpressRouteCircuitsClient(subscriptionID string) ExpressRouteCircuitsClient { + return NewExpressRouteCircuitsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitsClientWithBaseURI creates an instance of the ExpressRouteCircuitsClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewExpressRouteCircuitsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitsClient { + return ExpressRouteCircuitsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the circuit. +// parameters - parameters supplied to the create or update express route circuit operation. +func (client ExpressRouteCircuitsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, circuitName string, parameters ExpressRouteCircuit) (result ExpressRouteCircuitsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, circuitName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, circuitName string, parameters ExpressRouteCircuit) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) (future ExpressRouteCircuitsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteCircuit, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the express route circuit. +func (client ExpressRouteCircuitsClient) Delete(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, circuitName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ExpressRouteCircuitsClient) DeletePreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (future ExpressRouteCircuitsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about the specified express route circuit. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of express route circuit. +func (client ExpressRouteCircuitsClient) Get(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuit, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, circuitName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ExpressRouteCircuitsClient) GetPreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) GetResponder(resp *http.Response) (result ExpressRouteCircuit, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the express route circuits in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client ExpressRouteCircuitsClient) List(ctx context.Context, resourceGroupName string) (result ExpressRouteCircuitListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.List") + defer func() { + sc := -1 + if result.erclr.Response.Response != nil { + sc = result.erclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.erclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure sending request") + return + } + + result.erclr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteCircuitsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) listNextResults(ctx context.Context, lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) { + req, err := lastResults.expressRouteCircuitListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ExpressRouteCircuitsClient) ListComplete(ctx context.Context, resourceGroupName string) (result ExpressRouteCircuitListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets all the express route circuits in a subscription. +func (client ExpressRouteCircuitsClient) ListAll(ctx context.Context) (result ExpressRouteCircuitListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListAll") + defer func() { + sc := -1 + if result.erclr.Response.Response != nil { + sc = result.erclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.erclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure sending request") + return + } + + result.erclr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ExpressRouteCircuitsClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListAllResponder(resp *http.Response) (result ExpressRouteCircuitListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) listAllNextResults(ctx context.Context, lastResults ExpressRouteCircuitListResult) (result ExpressRouteCircuitListResult, err error) { + req, err := lastResults.expressRouteCircuitListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client ExpressRouteCircuitsClient) ListAllComplete(ctx context.Context) (result ExpressRouteCircuitListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} + +// ListArpTable the ListArpTable from ExpressRouteCircuit operation retrieves the currently advertised arp table +// associated with the ExpressRouteCircuits in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the circuit. +func (client ExpressRouteCircuitsClient) ListArpTable(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitsArpTableListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListArpTable") + defer func() { + sc := -1 + if result.ercatlr.Response.Response != nil { + sc = result.ercatlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listArpTableNextResults + req, err := client.ListArpTablePreparer(ctx, resourceGroupName, circuitName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", nil, "Failure preparing request") + return + } + + resp, err := client.ListArpTableSender(req) + if err != nil { + result.ercatlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure sending request") + return + } + + result.ercatlr, err = client.ListArpTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListArpTable", resp, "Failure responding to request") + } + + return +} + +// ListArpTablePreparer prepares the ListArpTable request. +func (client ExpressRouteCircuitsClient) ListArpTablePreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/arpTable", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListArpTableSender sends the ListArpTable request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListArpTableResponder handles the response to the ListArpTable request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Response) (result ExpressRouteCircuitsArpTableListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listArpTableNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) listArpTableNextResults(ctx context.Context, lastResults ExpressRouteCircuitsArpTableListResult) (result ExpressRouteCircuitsArpTableListResult, err error) { + req, err := lastResults.expressRouteCircuitsArpTableListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listArpTableNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListArpTableSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listArpTableNextResults", resp, "Failure sending next results request") + } + result, err = client.ListArpTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listArpTableNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListArpTableComplete enumerates all values, automatically crossing page boundaries as required. +func (client ExpressRouteCircuitsClient) ListArpTableComplete(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitsArpTableListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListArpTable") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListArpTable(ctx, resourceGroupName, circuitName) + return +} + +// ListRoutesTable the ListRoutesTable from ExpressRouteCircuit operation retrieves the currently advertised routes +// table associated with the ExpressRouteCircuits in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the circuit. +func (client ExpressRouteCircuitsClient) ListRoutesTable(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitsRoutesTableListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListRoutesTable") + defer func() { + sc := -1 + if result.ercrtlr.Response.Response != nil { + sc = result.ercrtlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listRoutesTableNextResults + req, err := client.ListRoutesTablePreparer(ctx, resourceGroupName, circuitName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", nil, "Failure preparing request") + return + } + + resp, err := client.ListRoutesTableSender(req) + if err != nil { + result.ercrtlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure sending request") + return + } + + result.ercrtlr, err = client.ListRoutesTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListRoutesTable", resp, "Failure responding to request") + } + + return +} + +// ListRoutesTablePreparer prepares the ListRoutesTable request. +func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/routesTable", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListRoutesTableSender sends the ListRoutesTable request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListRoutesTableResponder handles the response to the ListRoutesTable request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Response) (result ExpressRouteCircuitsRoutesTableListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listRoutesTableNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) listRoutesTableNextResults(ctx context.Context, lastResults ExpressRouteCircuitsRoutesTableListResult) (result ExpressRouteCircuitsRoutesTableListResult, err error) { + req, err := lastResults.expressRouteCircuitsRoutesTableListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listRoutesTableNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListRoutesTableSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listRoutesTableNextResults", resp, "Failure sending next results request") + } + result, err = client.ListRoutesTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listRoutesTableNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListRoutesTableComplete enumerates all values, automatically crossing page boundaries as required. +func (client ExpressRouteCircuitsClient) ListRoutesTableComplete(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitsRoutesTableListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListRoutesTable") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListRoutesTable(ctx, resourceGroupName, circuitName) + return +} + +// ListStats the ListStats ExpressRouteCircuit operation retrieves all the stats from a ExpressRouteCircuits in a +// resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// circuitName - the name of the loadBalancer. +func (client ExpressRouteCircuitsClient) ListStats(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitsStatsListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListStats") + defer func() { + sc := -1 + if result.ercslr.Response.Response != nil { + sc = result.ercslr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listStatsNextResults + req, err := client.ListStatsPreparer(ctx, resourceGroupName, circuitName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListStats", nil, "Failure preparing request") + return + } + + resp, err := client.ListStatsSender(req) + if err != nil { + result.ercslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListStats", resp, "Failure sending request") + return + } + + result.ercslr, err = client.ListStatsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "ListStats", resp, "Failure responding to request") + } + + return +} + +// ListStatsPreparer prepares the ListStats request. +func (client ExpressRouteCircuitsClient) ListStatsPreparer(ctx context.Context, resourceGroupName string, circuitName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "circuitName": autorest.Encode("path", circuitName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListStatsSender sends the ListStats request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteCircuitsClient) ListStatsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListStatsResponder handles the response to the ListStats request. The method always +// closes the http.Response Body. +func (client ExpressRouteCircuitsClient) ListStatsResponder(resp *http.Response) (result ExpressRouteCircuitsStatsListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listStatsNextResults retrieves the next set of results, if any. +func (client ExpressRouteCircuitsClient) listStatsNextResults(ctx context.Context, lastResults ExpressRouteCircuitsStatsListResult) (result ExpressRouteCircuitsStatsListResult, err error) { + req, err := lastResults.expressRouteCircuitsStatsListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listStatsNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListStatsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listStatsNextResults", resp, "Failure sending next results request") + } + result, err = client.ListStatsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsClient", "listStatsNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListStatsComplete enumerates all values, automatically crossing page boundaries as required. +func (client ExpressRouteCircuitsClient) ListStatsComplete(ctx context.Context, resourceGroupName string, circuitName string) (result ExpressRouteCircuitsStatsListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsClient.ListStats") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListStats(ctx, resourceGroupName, circuitName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressrouteserviceproviders.go new file mode 100644 index 000000000000..638897274734 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/expressrouteserviceproviders.go @@ -0,0 +1,151 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ExpressRouteServiceProvidersClient is the network Client +type ExpressRouteServiceProvidersClient struct { + BaseClient +} + +// NewExpressRouteServiceProvidersClient creates an instance of the ExpressRouteServiceProvidersClient client. +func NewExpressRouteServiceProvidersClient(subscriptionID string) ExpressRouteServiceProvidersClient { + return NewExpressRouteServiceProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteServiceProvidersClientWithBaseURI creates an instance of the ExpressRouteServiceProvidersClient +// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI +// (sovereign clouds, Azure stack). +func NewExpressRouteServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteServiceProvidersClient { + return ExpressRouteServiceProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets all the available express route service providers. +func (client ExpressRouteServiceProvidersClient) List(ctx context.Context) (result ExpressRouteServiceProviderListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProvidersClient.List") + defer func() { + sc := -1 + if result.ersplr.Response.Response != nil { + sc = result.ersplr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.ersplr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure sending request") + return + } + + result.ersplr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ExpressRouteServiceProvidersClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ExpressRouteServiceProvidersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ExpressRouteServiceProvidersClient) ListResponder(resp *http.Response) (result ExpressRouteServiceProviderListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ExpressRouteServiceProvidersClient) listNextResults(ctx context.Context, lastResults ExpressRouteServiceProviderListResult) (result ExpressRouteServiceProviderListResult, err error) { + req, err := lastResults.expressRouteServiceProviderListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteServiceProvidersClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ExpressRouteServiceProvidersClient) ListComplete(ctx context.Context) (result ExpressRouteServiceProviderListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProvidersClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/interfacesgroup.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/interfacesgroup.go new file mode 100644 index 000000000000..104d88befa28 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/interfacesgroup.go @@ -0,0 +1,804 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// InterfacesClient is the network Client +type InterfacesClient struct { + BaseClient +} + +// NewInterfacesClient creates an instance of the InterfacesClient client. +func NewInterfacesClient(subscriptionID string) InterfacesClient { + return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient client using a custom endpoint. Use this +// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) InterfacesClient { + return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a network interface. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkInterfaceName - the name of the network interface. +// parameters - parameters supplied to the create or update network interface operation. +func (client InterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters Interface) (result InterfacesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, networkInterfaceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client InterfacesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters Interface) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (future InterfacesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified network interface. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkInterfaceName - the name of the network interface. +func (client InterfacesClient) Delete(ctx context.Context, resourceGroupName string, networkInterfaceName string) (result InterfacesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, networkInterfaceName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client InterfacesClient) DeletePreparer(ctx context.Context, resourceGroupName string, networkInterfaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) DeleteSender(req *http.Request) (future InterfacesDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client InterfacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets information about the specified network interface. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkInterfaceName - the name of the network interface. +// expand - expands referenced resources. +func (client InterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result Interface, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, networkInterfaceName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client InterfacesClient) GetPreparer(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client InterfacesClient) GetResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetVirtualMachineScaleSetNetworkInterface get the specified network interface in a virtual machine scale set. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualMachineScaleSetName - the name of the virtual machine scale set. +// virtualmachineIndex - the virtual machine index. +// networkInterfaceName - the name of the network interface. +// expand - expands referenced resources. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.GetVirtualMachineScaleSetNetworkInterface") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", nil, "Failure preparing request") + return + } + + resp, err := client.GetVirtualMachineScaleSetNetworkInterfaceSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure sending request") + return + } + + result, err = client.GetVirtualMachineScaleSetNetworkInterfaceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure responding to request") + } + + return +} + +// GetVirtualMachineScaleSetNetworkInterfacePreparer prepares the GetVirtualMachineScaleSetNetworkInterface request. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualmachineIndex": autorest.Encode("path", virtualmachineIndex), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always +// closes the http.Response Body. +func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all network interfaces in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client InterfacesClient) List(ctx context.Context, resourceGroupName string) (result InterfaceListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.List") + defer func() { + sc := -1 + if result.ilr.Response.Response != nil { + sc = result.ilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.ilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending request") + return + } + + result.ilr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client InterfacesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client InterfacesClient) listNextResults(ctx context.Context, lastResults InterfaceListResult) (result InterfaceListResult, err error) { + req, err := lastResults.interfaceListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client InterfacesClient) ListComplete(ctx context.Context, resourceGroupName string) (result InterfaceListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets all network interfaces in a subscription. +func (client InterfacesClient) ListAll(ctx context.Context) (result InterfaceListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.ListAll") + defer func() { + sc := -1 + if result.ilr.Response.Response != nil { + sc = result.ilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.ilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending request") + return + } + + result.ilr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client InterfacesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListAllResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client InterfacesClient) listAllNextResults(ctx context.Context, lastResults InterfaceListResult) (result InterfaceListResult, err error) { + req, err := lastResults.interfaceListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client InterfacesClient) ListAllComplete(ctx context.Context) (result InterfaceListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} + +// ListVirtualMachineScaleSetNetworkInterfaces gets all network interfaces in a virtual machine scale set. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualMachineScaleSetName - the name of the virtual machine scale set. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.ListVirtualMachineScaleSetNetworkInterfaces") + defer func() { + sc := -1 + if result.ilr.Response.Response != nil { + sc = result.ilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listVirtualMachineScaleSetNetworkInterfacesNextResults + req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(ctx, resourceGroupName, virtualMachineScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing request") + return + } + + resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req) + if err != nil { + result.ilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending request") + return + } + + result.ilr, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to request") + } + + return +} + +// ListVirtualMachineScaleSetNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetNetworkInterfaces request. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPreparer(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listVirtualMachineScaleSetNetworkInterfacesNextResults retrieves the next set of results, if any. +func (client InterfacesClient) listVirtualMachineScaleSetNetworkInterfacesNextResults(ctx context.Context, lastResults InterfaceListResult) (result InterfaceListResult, err error) { + req, err := lastResults.interfaceListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetNetworkInterfacesNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetNetworkInterfacesNextResults", resp, "Failure sending next results request") + } + result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetNetworkInterfacesNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListVirtualMachineScaleSetNetworkInterfacesComplete enumerates all values, automatically crossing page boundaries as required. +func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesComplete(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.ListVirtualMachineScaleSetNetworkInterfaces") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListVirtualMachineScaleSetNetworkInterfaces(ctx, resourceGroupName, virtualMachineScaleSetName) + return +} + +// ListVirtualMachineScaleSetVMNetworkInterfaces gets information about all network interfaces in a virtual machine in +// a virtual machine scale set. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualMachineScaleSetName - the name of the virtual machine scale set. +// virtualmachineIndex - the virtual machine index. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.ListVirtualMachineScaleSetVMNetworkInterfaces") + defer func() { + sc := -1 + if result.ilr.Response.Response != nil { + sc = result.ilr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listVirtualMachineScaleSetVMNetworkInterfacesNextResults + req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing request") + return + } + + resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req) + if err != nil { + result.ilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending request") + return + } + + result.ilr, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to request") + } + + return +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetVMNetworkInterfaces request. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualmachineIndex": autorest.Encode("path", virtualmachineIndex), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listVirtualMachineScaleSetVMNetworkInterfacesNextResults retrieves the next set of results, if any. +func (client InterfacesClient) listVirtualMachineScaleSetVMNetworkInterfacesNextResults(ctx context.Context, lastResults InterfaceListResult) (result InterfaceListResult, err error) { + req, err := lastResults.interfaceListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetVMNetworkInterfacesNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetVMNetworkInterfacesNextResults", resp, "Failure sending next results request") + } + result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "listVirtualMachineScaleSetVMNetworkInterfacesNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListVirtualMachineScaleSetVMNetworkInterfacesComplete enumerates all values, automatically crossing page boundaries as required. +func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesComplete(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfacesClient.ListVirtualMachineScaleSetVMNetworkInterfaces") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListVirtualMachineScaleSetVMNetworkInterfaces(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/loadbalancers.go new file mode 100644 index 000000000000..c9a2713561f8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/loadbalancers.go @@ -0,0 +1,492 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// LoadBalancersClient is the network Client +type LoadBalancersClient struct { + BaseClient +} + +// NewLoadBalancersClient creates an instance of the LoadBalancersClient client. +func NewLoadBalancersClient(subscriptionID string) LoadBalancersClient { + return NewLoadBalancersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancersClientWithBaseURI creates an instance of the LoadBalancersClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewLoadBalancersClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancersClient { + return LoadBalancersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a load balancer. +// Parameters: +// resourceGroupName - the name of the resource group. +// loadBalancerName - the name of the load balancer. +// parameters - parameters supplied to the create or update load balancer operation. +func (client LoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters LoadBalancer) (result LoadBalancersCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, loadBalancerName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client LoadBalancersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, loadBalancerName string, parameters LoadBalancer) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (future LoadBalancersCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) (result LoadBalancer, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified load balancer. +// Parameters: +// resourceGroupName - the name of the resource group. +// loadBalancerName - the name of the load balancer. +func (client LoadBalancersClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (result LoadBalancersDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, loadBalancerName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LoadBalancersClient) DeletePreparer(ctx context.Context, resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) DeleteSender(req *http.Request) (future LoadBalancersDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified load balancer. +// Parameters: +// resourceGroupName - the name of the resource group. +// loadBalancerName - the name of the load balancer. +// expand - expands referenced resources. +func (client LoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result LoadBalancer, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, loadBalancerName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LoadBalancersClient) GetPreparer(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": autorest.Encode("path", loadBalancerName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) GetResponder(resp *http.Response) (result LoadBalancer, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the load balancers in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client LoadBalancersClient) List(ctx context.Context, resourceGroupName string) (result LoadBalancerListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.List") + defer func() { + sc := -1 + if result.lblr.Response.Response != nil { + sc = result.lblr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lblr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure sending request") + return + } + + result.lblr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LoadBalancersClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) ListResponder(resp *http.Response) (result LoadBalancerListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client LoadBalancersClient) listNextResults(ctx context.Context, lastResults LoadBalancerListResult) (result LoadBalancerListResult, err error) { + req, err := lastResults.loadBalancerListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client LoadBalancersClient) ListComplete(ctx context.Context, resourceGroupName string) (result LoadBalancerListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets all the load balancers in a subscription. +func (client LoadBalancersClient) ListAll(ctx context.Context) (result LoadBalancerListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.ListAll") + defer func() { + sc := -1 + if result.lblr.Response.Response != nil { + sc = result.lblr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.lblr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure sending request") + return + } + + result.lblr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client LoadBalancersClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) ListAllResponder(resp *http.Response) (result LoadBalancerListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client LoadBalancersClient) listAllNextResults(ctx context.Context, lastResults LoadBalancerListResult) (result LoadBalancerListResult, err error) { + req, err := lastResults.loadBalancerListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LoadBalancersClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client LoadBalancersClient) ListAllComplete(ctx context.Context) (result LoadBalancerListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancersClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/localnetworkgateways.go new file mode 100644 index 000000000000..3370c74e012d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/localnetworkgateways.go @@ -0,0 +1,381 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// LocalNetworkGatewaysClient is the network Client +type LocalNetworkGatewaysClient struct { + BaseClient +} + +// NewLocalNetworkGatewaysClient creates an instance of the LocalNetworkGatewaysClient client. +func NewLocalNetworkGatewaysClient(subscriptionID string) LocalNetworkGatewaysClient { + return NewLocalNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLocalNetworkGatewaysClientWithBaseURI creates an instance of the LocalNetworkGatewaysClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewLocalNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) LocalNetworkGatewaysClient { + return LocalNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a local network gateway in the specified resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// localNetworkGatewayName - the name of the local network gateway. +// parameters - parameters supplied to the create or update local network gateway operation. +func (client LocalNetworkGatewaysClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway) (result LocalNetworkGatewaysCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewaysClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, localNetworkGatewayName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (future LocalNetworkGatewaysCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result LocalNetworkGateway, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified local network gateway. +// Parameters: +// resourceGroupName - the name of the resource group. +// localNetworkGatewayName - the name of the local network gateway. +func (client LocalNetworkGatewaysClient) Delete(ctx context.Context, resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGatewaysDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewaysClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, localNetworkGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LocalNetworkGatewaysClient) DeletePreparer(ctx context.Context, resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (future LocalNetworkGatewaysDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified local network gateway in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// localNetworkGatewayName - the name of the local network gateway. +func (client LocalNetworkGatewaysClient) Get(ctx context.Context, resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGateway, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewaysClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, localNetworkGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LocalNetworkGatewaysClient) GetPreparer(ctx context.Context, resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": autorest.Encode("path", localNetworkGatewayName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) GetResponder(resp *http.Response) (result LocalNetworkGateway, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the local network gateways in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client LocalNetworkGatewaysClient) List(ctx context.Context, resourceGroupName string) (result LocalNetworkGatewayListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewaysClient.List") + defer func() { + sc := -1 + if result.lnglr.Response.Response != nil { + sc = result.lnglr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lnglr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure sending request") + return + } + + result.lnglr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LocalNetworkGatewaysClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) ListResponder(resp *http.Response) (result LocalNetworkGatewayListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client LocalNetworkGatewaysClient) listNextResults(ctx context.Context, lastResults LocalNetworkGatewayListResult) (result LocalNetworkGatewayListResult, err error) { + req, err := lastResults.localNetworkGatewayListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client LocalNetworkGatewaysClient) ListComplete(ctx context.Context, resourceGroupName string) (result LocalNetworkGatewayListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewaysClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/models.go new file mode 100644 index 000000000000..71e378c5d8c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/models.go @@ -0,0 +1,9163 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network" + +// ApplicationGatewayCookieBasedAffinity enumerates the values for application gateway cookie based affinity. +type ApplicationGatewayCookieBasedAffinity string + +const ( + // Disabled ... + Disabled ApplicationGatewayCookieBasedAffinity = "Disabled" + // Enabled ... + Enabled ApplicationGatewayCookieBasedAffinity = "Enabled" +) + +// PossibleApplicationGatewayCookieBasedAffinityValues returns an array of possible values for the ApplicationGatewayCookieBasedAffinity const type. +func PossibleApplicationGatewayCookieBasedAffinityValues() []ApplicationGatewayCookieBasedAffinity { + return []ApplicationGatewayCookieBasedAffinity{Disabled, Enabled} +} + +// ApplicationGatewayOperationalState enumerates the values for application gateway operational state. +type ApplicationGatewayOperationalState string + +const ( + // Running ... + Running ApplicationGatewayOperationalState = "Running" + // Starting ... + Starting ApplicationGatewayOperationalState = "Starting" + // Stopped ... + Stopped ApplicationGatewayOperationalState = "Stopped" + // Stopping ... + Stopping ApplicationGatewayOperationalState = "Stopping" +) + +// PossibleApplicationGatewayOperationalStateValues returns an array of possible values for the ApplicationGatewayOperationalState const type. +func PossibleApplicationGatewayOperationalStateValues() []ApplicationGatewayOperationalState { + return []ApplicationGatewayOperationalState{Running, Starting, Stopped, Stopping} +} + +// ApplicationGatewayProtocol enumerates the values for application gateway protocol. +type ApplicationGatewayProtocol string + +const ( + // HTTP ... + HTTP ApplicationGatewayProtocol = "Http" + // HTTPS ... + HTTPS ApplicationGatewayProtocol = "Https" +) + +// PossibleApplicationGatewayProtocolValues returns an array of possible values for the ApplicationGatewayProtocol const type. +func PossibleApplicationGatewayProtocolValues() []ApplicationGatewayProtocol { + return []ApplicationGatewayProtocol{HTTP, HTTPS} +} + +// ApplicationGatewayRequestRoutingRuleType enumerates the values for application gateway request routing rule +// type. +type ApplicationGatewayRequestRoutingRuleType string + +const ( + // Basic ... + Basic ApplicationGatewayRequestRoutingRuleType = "Basic" + // PathBasedRouting ... + PathBasedRouting ApplicationGatewayRequestRoutingRuleType = "PathBasedRouting" +) + +// PossibleApplicationGatewayRequestRoutingRuleTypeValues returns an array of possible values for the ApplicationGatewayRequestRoutingRuleType const type. +func PossibleApplicationGatewayRequestRoutingRuleTypeValues() []ApplicationGatewayRequestRoutingRuleType { + return []ApplicationGatewayRequestRoutingRuleType{Basic, PathBasedRouting} +} + +// ApplicationGatewaySkuName enumerates the values for application gateway sku name. +type ApplicationGatewaySkuName string + +const ( + // StandardLarge ... + StandardLarge ApplicationGatewaySkuName = "Standard_Large" + // StandardMedium ... + StandardMedium ApplicationGatewaySkuName = "Standard_Medium" + // StandardSmall ... + StandardSmall ApplicationGatewaySkuName = "Standard_Small" +) + +// PossibleApplicationGatewaySkuNameValues returns an array of possible values for the ApplicationGatewaySkuName const type. +func PossibleApplicationGatewaySkuNameValues() []ApplicationGatewaySkuName { + return []ApplicationGatewaySkuName{StandardLarge, StandardMedium, StandardSmall} +} + +// ApplicationGatewayTier enumerates the values for application gateway tier. +type ApplicationGatewayTier string + +const ( + // Standard ... + Standard ApplicationGatewayTier = "Standard" +) + +// PossibleApplicationGatewayTierValues returns an array of possible values for the ApplicationGatewayTier const type. +func PossibleApplicationGatewayTierValues() []ApplicationGatewayTier { + return []ApplicationGatewayTier{Standard} +} + +// AuthorizationUseStatus enumerates the values for authorization use status. +type AuthorizationUseStatus string + +const ( + // Available ... + Available AuthorizationUseStatus = "Available" + // InUse ... + InUse AuthorizationUseStatus = "InUse" +) + +// PossibleAuthorizationUseStatusValues returns an array of possible values for the AuthorizationUseStatus const type. +func PossibleAuthorizationUseStatusValues() []AuthorizationUseStatus { + return []AuthorizationUseStatus{Available, InUse} +} + +// ExpressRouteCircuitPeeringAdvertisedPublicPrefixState enumerates the values for express route circuit +// peering advertised public prefix state. +type ExpressRouteCircuitPeeringAdvertisedPublicPrefixState string + +const ( + // Configured ... + Configured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configured" + // Configuring ... + Configuring ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "Configuring" + // NotConfigured ... + NotConfigured ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "NotConfigured" + // ValidationNeeded ... + ValidationNeeded ExpressRouteCircuitPeeringAdvertisedPublicPrefixState = "ValidationNeeded" +) + +// PossibleExpressRouteCircuitPeeringAdvertisedPublicPrefixStateValues returns an array of possible values for the ExpressRouteCircuitPeeringAdvertisedPublicPrefixState const type. +func PossibleExpressRouteCircuitPeeringAdvertisedPublicPrefixStateValues() []ExpressRouteCircuitPeeringAdvertisedPublicPrefixState { + return []ExpressRouteCircuitPeeringAdvertisedPublicPrefixState{Configured, Configuring, NotConfigured, ValidationNeeded} +} + +// ExpressRouteCircuitPeeringState enumerates the values for express route circuit peering state. +type ExpressRouteCircuitPeeringState string + +const ( + // ExpressRouteCircuitPeeringStateDisabled ... + ExpressRouteCircuitPeeringStateDisabled ExpressRouteCircuitPeeringState = "Disabled" + // ExpressRouteCircuitPeeringStateEnabled ... + ExpressRouteCircuitPeeringStateEnabled ExpressRouteCircuitPeeringState = "Enabled" +) + +// PossibleExpressRouteCircuitPeeringStateValues returns an array of possible values for the ExpressRouteCircuitPeeringState const type. +func PossibleExpressRouteCircuitPeeringStateValues() []ExpressRouteCircuitPeeringState { + return []ExpressRouteCircuitPeeringState{ExpressRouteCircuitPeeringStateDisabled, ExpressRouteCircuitPeeringStateEnabled} +} + +// ExpressRouteCircuitPeeringType enumerates the values for express route circuit peering type. +type ExpressRouteCircuitPeeringType string + +const ( + // AzurePrivatePeering ... + AzurePrivatePeering ExpressRouteCircuitPeeringType = "AzurePrivatePeering" + // AzurePublicPeering ... + AzurePublicPeering ExpressRouteCircuitPeeringType = "AzurePublicPeering" + // MicrosoftPeering ... + MicrosoftPeering ExpressRouteCircuitPeeringType = "MicrosoftPeering" +) + +// PossibleExpressRouteCircuitPeeringTypeValues returns an array of possible values for the ExpressRouteCircuitPeeringType const type. +func PossibleExpressRouteCircuitPeeringTypeValues() []ExpressRouteCircuitPeeringType { + return []ExpressRouteCircuitPeeringType{AzurePrivatePeering, AzurePublicPeering, MicrosoftPeering} +} + +// ExpressRouteCircuitSkuFamily enumerates the values for express route circuit sku family. +type ExpressRouteCircuitSkuFamily string + +const ( + // MeteredData ... + MeteredData ExpressRouteCircuitSkuFamily = "MeteredData" + // UnlimitedData ... + UnlimitedData ExpressRouteCircuitSkuFamily = "UnlimitedData" +) + +// PossibleExpressRouteCircuitSkuFamilyValues returns an array of possible values for the ExpressRouteCircuitSkuFamily const type. +func PossibleExpressRouteCircuitSkuFamilyValues() []ExpressRouteCircuitSkuFamily { + return []ExpressRouteCircuitSkuFamily{MeteredData, UnlimitedData} +} + +// ExpressRouteCircuitSkuTier enumerates the values for express route circuit sku tier. +type ExpressRouteCircuitSkuTier string + +const ( + // ExpressRouteCircuitSkuTierPremium ... + ExpressRouteCircuitSkuTierPremium ExpressRouteCircuitSkuTier = "Premium" + // ExpressRouteCircuitSkuTierStandard ... + ExpressRouteCircuitSkuTierStandard ExpressRouteCircuitSkuTier = "Standard" +) + +// PossibleExpressRouteCircuitSkuTierValues returns an array of possible values for the ExpressRouteCircuitSkuTier const type. +func PossibleExpressRouteCircuitSkuTierValues() []ExpressRouteCircuitSkuTier { + return []ExpressRouteCircuitSkuTier{ExpressRouteCircuitSkuTierPremium, ExpressRouteCircuitSkuTierStandard} +} + +// IPAllocationMethod enumerates the values for ip allocation method. +type IPAllocationMethod string + +const ( + // Dynamic ... + Dynamic IPAllocationMethod = "Dynamic" + // Static ... + Static IPAllocationMethod = "Static" +) + +// PossibleIPAllocationMethodValues returns an array of possible values for the IPAllocationMethod const type. +func PossibleIPAllocationMethodValues() []IPAllocationMethod { + return []IPAllocationMethod{Dynamic, Static} +} + +// LoadDistribution enumerates the values for load distribution. +type LoadDistribution string + +const ( + // Default ... + Default LoadDistribution = "Default" + // SourceIP ... + SourceIP LoadDistribution = "SourceIP" + // SourceIPProtocol ... + SourceIPProtocol LoadDistribution = "SourceIPProtocol" +) + +// PossibleLoadDistributionValues returns an array of possible values for the LoadDistribution const type. +func PossibleLoadDistributionValues() []LoadDistribution { + return []LoadDistribution{Default, SourceIP, SourceIPProtocol} +} + +// OperationStatus enumerates the values for operation status. +type OperationStatus string + +const ( + // Failed ... + Failed OperationStatus = "Failed" + // InProgress ... + InProgress OperationStatus = "InProgress" + // Succeeded ... + Succeeded OperationStatus = "Succeeded" +) + +// PossibleOperationStatusValues returns an array of possible values for the OperationStatus const type. +func PossibleOperationStatusValues() []OperationStatus { + return []OperationStatus{Failed, InProgress, Succeeded} +} + +// ProbeProtocol enumerates the values for probe protocol. +type ProbeProtocol string + +const ( + // ProbeProtocolHTTP ... + ProbeProtocolHTTP ProbeProtocol = "Http" + // ProbeProtocolTCP ... + ProbeProtocolTCP ProbeProtocol = "Tcp" +) + +// PossibleProbeProtocolValues returns an array of possible values for the ProbeProtocol const type. +func PossibleProbeProtocolValues() []ProbeProtocol { + return []ProbeProtocol{ProbeProtocolHTTP, ProbeProtocolTCP} +} + +// ProcessorArchitecture enumerates the values for processor architecture. +type ProcessorArchitecture string + +const ( + // Amd64 ... + Amd64 ProcessorArchitecture = "Amd64" + // X86 ... + X86 ProcessorArchitecture = "X86" +) + +// PossibleProcessorArchitectureValues returns an array of possible values for the ProcessorArchitecture const type. +func PossibleProcessorArchitectureValues() []ProcessorArchitecture { + return []ProcessorArchitecture{Amd64, X86} +} + +// RouteNextHopType enumerates the values for route next hop type. +type RouteNextHopType string + +const ( + // RouteNextHopTypeInternet ... + RouteNextHopTypeInternet RouteNextHopType = "Internet" + // RouteNextHopTypeNone ... + RouteNextHopTypeNone RouteNextHopType = "None" + // RouteNextHopTypeVirtualAppliance ... + RouteNextHopTypeVirtualAppliance RouteNextHopType = "VirtualAppliance" + // RouteNextHopTypeVirtualNetworkGateway ... + RouteNextHopTypeVirtualNetworkGateway RouteNextHopType = "VirtualNetworkGateway" + // RouteNextHopTypeVnetLocal ... + RouteNextHopTypeVnetLocal RouteNextHopType = "VnetLocal" +) + +// PossibleRouteNextHopTypeValues returns an array of possible values for the RouteNextHopType const type. +func PossibleRouteNextHopTypeValues() []RouteNextHopType { + return []RouteNextHopType{RouteNextHopTypeInternet, RouteNextHopTypeNone, RouteNextHopTypeVirtualAppliance, RouteNextHopTypeVirtualNetworkGateway, RouteNextHopTypeVnetLocal} +} + +// SecurityRuleAccess enumerates the values for security rule access. +type SecurityRuleAccess string + +const ( + // Allow ... + Allow SecurityRuleAccess = "Allow" + // Deny ... + Deny SecurityRuleAccess = "Deny" +) + +// PossibleSecurityRuleAccessValues returns an array of possible values for the SecurityRuleAccess const type. +func PossibleSecurityRuleAccessValues() []SecurityRuleAccess { + return []SecurityRuleAccess{Allow, Deny} +} + +// SecurityRuleDirection enumerates the values for security rule direction. +type SecurityRuleDirection string + +const ( + // Inbound ... + Inbound SecurityRuleDirection = "Inbound" + // Outbound ... + Outbound SecurityRuleDirection = "Outbound" +) + +// PossibleSecurityRuleDirectionValues returns an array of possible values for the SecurityRuleDirection const type. +func PossibleSecurityRuleDirectionValues() []SecurityRuleDirection { + return []SecurityRuleDirection{Inbound, Outbound} +} + +// SecurityRuleProtocol enumerates the values for security rule protocol. +type SecurityRuleProtocol string + +const ( + // Asterisk ... + Asterisk SecurityRuleProtocol = "*" + // TCP ... + TCP SecurityRuleProtocol = "Tcp" + // UDP ... + UDP SecurityRuleProtocol = "Udp" +) + +// PossibleSecurityRuleProtocolValues returns an array of possible values for the SecurityRuleProtocol const type. +func PossibleSecurityRuleProtocolValues() []SecurityRuleProtocol { + return []SecurityRuleProtocol{Asterisk, TCP, UDP} +} + +// ServiceProviderProvisioningState enumerates the values for service provider provisioning state. +type ServiceProviderProvisioningState string + +const ( + // Deprovisioning ... + Deprovisioning ServiceProviderProvisioningState = "Deprovisioning" + // NotProvisioned ... + NotProvisioned ServiceProviderProvisioningState = "NotProvisioned" + // Provisioned ... + Provisioned ServiceProviderProvisioningState = "Provisioned" + // Provisioning ... + Provisioning ServiceProviderProvisioningState = "Provisioning" +) + +// PossibleServiceProviderProvisioningStateValues returns an array of possible values for the ServiceProviderProvisioningState const type. +func PossibleServiceProviderProvisioningStateValues() []ServiceProviderProvisioningState { + return []ServiceProviderProvisioningState{Deprovisioning, NotProvisioned, Provisioned, Provisioning} +} + +// TransportProtocol enumerates the values for transport protocol. +type TransportProtocol string + +const ( + // TransportProtocolTCP ... + TransportProtocolTCP TransportProtocol = "Tcp" + // TransportProtocolUDP ... + TransportProtocolUDP TransportProtocol = "Udp" +) + +// PossibleTransportProtocolValues returns an array of possible values for the TransportProtocol const type. +func PossibleTransportProtocolValues() []TransportProtocol { + return []TransportProtocol{TransportProtocolTCP, TransportProtocolUDP} +} + +// VirtualNetworkGatewayConnectionStatus enumerates the values for virtual network gateway connection status. +type VirtualNetworkGatewayConnectionStatus string + +const ( + // Connected ... + Connected VirtualNetworkGatewayConnectionStatus = "Connected" + // Connecting ... + Connecting VirtualNetworkGatewayConnectionStatus = "Connecting" + // NotConnected ... + NotConnected VirtualNetworkGatewayConnectionStatus = "NotConnected" + // Unknown ... + Unknown VirtualNetworkGatewayConnectionStatus = "Unknown" +) + +// PossibleVirtualNetworkGatewayConnectionStatusValues returns an array of possible values for the VirtualNetworkGatewayConnectionStatus const type. +func PossibleVirtualNetworkGatewayConnectionStatusValues() []VirtualNetworkGatewayConnectionStatus { + return []VirtualNetworkGatewayConnectionStatus{Connected, Connecting, NotConnected, Unknown} +} + +// VirtualNetworkGatewayConnectionType enumerates the values for virtual network gateway connection type. +type VirtualNetworkGatewayConnectionType string + +const ( + // ExpressRoute ... + ExpressRoute VirtualNetworkGatewayConnectionType = "ExpressRoute" + // IPsec ... + IPsec VirtualNetworkGatewayConnectionType = "IPsec" + // Vnet2Vnet ... + Vnet2Vnet VirtualNetworkGatewayConnectionType = "Vnet2Vnet" + // VPNClient ... + VPNClient VirtualNetworkGatewayConnectionType = "VPNClient" +) + +// PossibleVirtualNetworkGatewayConnectionTypeValues returns an array of possible values for the VirtualNetworkGatewayConnectionType const type. +func PossibleVirtualNetworkGatewayConnectionTypeValues() []VirtualNetworkGatewayConnectionType { + return []VirtualNetworkGatewayConnectionType{ExpressRoute, IPsec, Vnet2Vnet, VPNClient} +} + +// VirtualNetworkGatewaySkuName enumerates the values for virtual network gateway sku name. +type VirtualNetworkGatewaySkuName string + +const ( + // VirtualNetworkGatewaySkuNameBasic ... + VirtualNetworkGatewaySkuNameBasic VirtualNetworkGatewaySkuName = "Basic" + // VirtualNetworkGatewaySkuNameHighPerformance ... + VirtualNetworkGatewaySkuNameHighPerformance VirtualNetworkGatewaySkuName = "HighPerformance" + // VirtualNetworkGatewaySkuNameStandard ... + VirtualNetworkGatewaySkuNameStandard VirtualNetworkGatewaySkuName = "Standard" +) + +// PossibleVirtualNetworkGatewaySkuNameValues returns an array of possible values for the VirtualNetworkGatewaySkuName const type. +func PossibleVirtualNetworkGatewaySkuNameValues() []VirtualNetworkGatewaySkuName { + return []VirtualNetworkGatewaySkuName{VirtualNetworkGatewaySkuNameBasic, VirtualNetworkGatewaySkuNameHighPerformance, VirtualNetworkGatewaySkuNameStandard} +} + +// VirtualNetworkGatewaySkuTier enumerates the values for virtual network gateway sku tier. +type VirtualNetworkGatewaySkuTier string + +const ( + // VirtualNetworkGatewaySkuTierBasic ... + VirtualNetworkGatewaySkuTierBasic VirtualNetworkGatewaySkuTier = "Basic" + // VirtualNetworkGatewaySkuTierHighPerformance ... + VirtualNetworkGatewaySkuTierHighPerformance VirtualNetworkGatewaySkuTier = "HighPerformance" + // VirtualNetworkGatewaySkuTierStandard ... + VirtualNetworkGatewaySkuTierStandard VirtualNetworkGatewaySkuTier = "Standard" +) + +// PossibleVirtualNetworkGatewaySkuTierValues returns an array of possible values for the VirtualNetworkGatewaySkuTier const type. +func PossibleVirtualNetworkGatewaySkuTierValues() []VirtualNetworkGatewaySkuTier { + return []VirtualNetworkGatewaySkuTier{VirtualNetworkGatewaySkuTierBasic, VirtualNetworkGatewaySkuTierHighPerformance, VirtualNetworkGatewaySkuTierStandard} +} + +// VirtualNetworkGatewayType enumerates the values for virtual network gateway type. +type VirtualNetworkGatewayType string + +const ( + // VirtualNetworkGatewayTypeExpressRoute ... + VirtualNetworkGatewayTypeExpressRoute VirtualNetworkGatewayType = "ExpressRoute" + // VirtualNetworkGatewayTypeVpn ... + VirtualNetworkGatewayTypeVpn VirtualNetworkGatewayType = "Vpn" +) + +// PossibleVirtualNetworkGatewayTypeValues returns an array of possible values for the VirtualNetworkGatewayType const type. +func PossibleVirtualNetworkGatewayTypeValues() []VirtualNetworkGatewayType { + return []VirtualNetworkGatewayType{VirtualNetworkGatewayTypeExpressRoute, VirtualNetworkGatewayTypeVpn} +} + +// VpnType enumerates the values for vpn type. +type VpnType string + +const ( + // PolicyBased ... + PolicyBased VpnType = "PolicyBased" + // RouteBased ... + RouteBased VpnType = "RouteBased" +) + +// PossibleVpnTypeValues returns an array of possible values for the VpnType const type. +func PossibleVpnTypeValues() []VpnType { + return []VpnType{PolicyBased, RouteBased} +} + +// AddressSpace addressSpace contains an array of IP address ranges that can be used by subnets of the +// virtual network. +type AddressSpace struct { + // AddressPrefixes - A list of address blocks reserved for this virtual network in CIDR notation. + AddressPrefixes *[]string `json:"addressPrefixes,omitempty"` +} + +// ApplicationGateway application gateway resource +type ApplicationGateway struct { + autorest.Response `json:"-"` + *ApplicationGatewayPropertiesFormat `json:"properties,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ApplicationGateway. +func (ag ApplicationGateway) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ag.ApplicationGatewayPropertiesFormat != nil { + objectMap["properties"] = ag.ApplicationGatewayPropertiesFormat + } + if ag.Etag != nil { + objectMap["etag"] = ag.Etag + } + if ag.ID != nil { + objectMap["id"] = ag.ID + } + if ag.Location != nil { + objectMap["location"] = ag.Location + } + if ag.Tags != nil { + objectMap["tags"] = ag.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGateway struct. +func (ag *ApplicationGateway) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayPropertiesFormat ApplicationGatewayPropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayPropertiesFormat) + if err != nil { + return err + } + ag.ApplicationGatewayPropertiesFormat = &applicationGatewayPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + ag.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ag.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ag.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + ag.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + ag.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + ag.Tags = tags + } + } + } + + return nil +} + +// ApplicationGatewayBackendAddress backend address of an application gateway. +type ApplicationGatewayBackendAddress struct { + // Fqdn - Fully qualified domain name (FQDN). + Fqdn *string `json:"fqdn,omitempty"` + // IPAddress - IP address + IPAddress *string `json:"ipAddress,omitempty"` +} + +// ApplicationGatewayBackendAddressPool backend Address Pool of an application gateway. +type ApplicationGatewayBackendAddressPool struct { + *ApplicationGatewayBackendAddressPoolPropertiesFormat `json:"properties,omitempty"` + // Name - Resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayBackendAddressPool. +func (agbap ApplicationGatewayBackendAddressPool) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agbap.ApplicationGatewayBackendAddressPoolPropertiesFormat != nil { + objectMap["properties"] = agbap.ApplicationGatewayBackendAddressPoolPropertiesFormat + } + if agbap.Name != nil { + objectMap["name"] = agbap.Name + } + if agbap.Etag != nil { + objectMap["etag"] = agbap.Etag + } + if agbap.ID != nil { + objectMap["id"] = agbap.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayBackendAddressPool struct. +func (agbap *ApplicationGatewayBackendAddressPool) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayBackendAddressPoolPropertiesFormat ApplicationGatewayBackendAddressPoolPropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayBackendAddressPoolPropertiesFormat) + if err != nil { + return err + } + agbap.ApplicationGatewayBackendAddressPoolPropertiesFormat = &applicationGatewayBackendAddressPoolPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agbap.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agbap.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agbap.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayBackendAddressPoolPropertiesFormat properties of Backend Address Pool of an +// application gateway. +type ApplicationGatewayBackendAddressPoolPropertiesFormat struct { + // BackendIPConfigurations - Collection of references to IPs defined in network interfaces. + BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"` + // BackendAddresses - Backend addresses + BackendAddresses *[]ApplicationGatewayBackendAddress `json:"backendAddresses,omitempty"` + // ProvisioningState - Provisioning state of the backend address pool resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayBackendHTTPSettings backend address pool settings of an application gateway. +type ApplicationGatewayBackendHTTPSettings struct { + *ApplicationGatewayBackendHTTPSettingsPropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayBackendHTTPSettings. +func (agbhs ApplicationGatewayBackendHTTPSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agbhs.ApplicationGatewayBackendHTTPSettingsPropertiesFormat != nil { + objectMap["properties"] = agbhs.ApplicationGatewayBackendHTTPSettingsPropertiesFormat + } + if agbhs.Name != nil { + objectMap["name"] = agbhs.Name + } + if agbhs.Etag != nil { + objectMap["etag"] = agbhs.Etag + } + if agbhs.ID != nil { + objectMap["id"] = agbhs.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayBackendHTTPSettings struct. +func (agbhs *ApplicationGatewayBackendHTTPSettings) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayBackendHTTPSettingsPropertiesFormat ApplicationGatewayBackendHTTPSettingsPropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayBackendHTTPSettingsPropertiesFormat) + if err != nil { + return err + } + agbhs.ApplicationGatewayBackendHTTPSettingsPropertiesFormat = &applicationGatewayBackendHTTPSettingsPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agbhs.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agbhs.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agbhs.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayBackendHTTPSettingsPropertiesFormat properties of Backend address pool settings of an +// application gateway. +type ApplicationGatewayBackendHTTPSettingsPropertiesFormat struct { + // Port - Port + Port *int32 `json:"port,omitempty"` + // Protocol - Protocol. Possible values are: 'Http' and 'Https'. Possible values include: 'HTTP', 'HTTPS' + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + // CookieBasedAffinity - Cookie based affinity. Possible values are: 'Enabled' and 'Disabled'. Possible values include: 'Enabled', 'Disabled' + CookieBasedAffinity ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"` + // RequestTimeout - Request timeout in seconds. Application Gateway will fail the request if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400 seconds. + RequestTimeout *int32 `json:"requestTimeout,omitempty"` + // Probe - Probe resource of an application gateway. + Probe *SubResource `json:"probe,omitempty"` + // ProvisioningState - Gets or sets Provisioning state of the backend http settings resource Updating/Deleting/Failed + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayFrontendIPConfiguration frontend IP configuration of an application gateway. +type ApplicationGatewayFrontendIPConfiguration struct { + *ApplicationGatewayFrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayFrontendIPConfiguration. +func (agfic ApplicationGatewayFrontendIPConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agfic.ApplicationGatewayFrontendIPConfigurationPropertiesFormat != nil { + objectMap["properties"] = agfic.ApplicationGatewayFrontendIPConfigurationPropertiesFormat + } + if agfic.Name != nil { + objectMap["name"] = agfic.Name + } + if agfic.Etag != nil { + objectMap["etag"] = agfic.Etag + } + if agfic.ID != nil { + objectMap["id"] = agfic.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayFrontendIPConfiguration struct. +func (agfic *ApplicationGatewayFrontendIPConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayFrontendIPConfigurationPropertiesFormat ApplicationGatewayFrontendIPConfigurationPropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayFrontendIPConfigurationPropertiesFormat) + if err != nil { + return err + } + agfic.ApplicationGatewayFrontendIPConfigurationPropertiesFormat = &applicationGatewayFrontendIPConfigurationPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agfic.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agfic.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agfic.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayFrontendIPConfigurationPropertiesFormat properties of Frontend IP configuration of an +// application gateway. +type ApplicationGatewayFrontendIPConfigurationPropertiesFormat struct { + // PrivateIPAddress - PrivateIPAddress of the network interface IP Configuration. + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + // PrivateIPAllocationMethod - PrivateIP allocation method. Possible values are: 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + // Subnet - Reference of the subnet resource. + Subnet *SubResource `json:"subnet,omitempty"` + // PublicIPAddress - Reference of the PublicIP resource. + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + // ProvisioningState - Provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayFrontendPort frontend port of an application gateway. +type ApplicationGatewayFrontendPort struct { + *ApplicationGatewayFrontendPortPropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayFrontendPort. +func (agfp ApplicationGatewayFrontendPort) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agfp.ApplicationGatewayFrontendPortPropertiesFormat != nil { + objectMap["properties"] = agfp.ApplicationGatewayFrontendPortPropertiesFormat + } + if agfp.Name != nil { + objectMap["name"] = agfp.Name + } + if agfp.Etag != nil { + objectMap["etag"] = agfp.Etag + } + if agfp.ID != nil { + objectMap["id"] = agfp.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayFrontendPort struct. +func (agfp *ApplicationGatewayFrontendPort) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayFrontendPortPropertiesFormat ApplicationGatewayFrontendPortPropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayFrontendPortPropertiesFormat) + if err != nil { + return err + } + agfp.ApplicationGatewayFrontendPortPropertiesFormat = &applicationGatewayFrontendPortPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agfp.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agfp.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agfp.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayFrontendPortPropertiesFormat properties of Frontend port of an application gateway. +type ApplicationGatewayFrontendPortPropertiesFormat struct { + // Port - Frontend port + Port *int32 `json:"port,omitempty"` + // ProvisioningState - Provisioning state of the frontend port resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayHTTPListener http listener of an application gateway. +type ApplicationGatewayHTTPListener struct { + *ApplicationGatewayHTTPListenerPropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayHTTPListener. +func (aghl ApplicationGatewayHTTPListener) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if aghl.ApplicationGatewayHTTPListenerPropertiesFormat != nil { + objectMap["properties"] = aghl.ApplicationGatewayHTTPListenerPropertiesFormat + } + if aghl.Name != nil { + objectMap["name"] = aghl.Name + } + if aghl.Etag != nil { + objectMap["etag"] = aghl.Etag + } + if aghl.ID != nil { + objectMap["id"] = aghl.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayHTTPListener struct. +func (aghl *ApplicationGatewayHTTPListener) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayHTTPListenerPropertiesFormat ApplicationGatewayHTTPListenerPropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayHTTPListenerPropertiesFormat) + if err != nil { + return err + } + aghl.ApplicationGatewayHTTPListenerPropertiesFormat = &applicationGatewayHTTPListenerPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + aghl.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + aghl.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + aghl.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayHTTPListenerPropertiesFormat properties of HTTP listener of an application gateway. +type ApplicationGatewayHTTPListenerPropertiesFormat struct { + // FrontendIPConfiguration - Frontend IP configuration resource of an application gateway. + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + // FrontendPort - Frontend port resource of an application gateway. + FrontendPort *SubResource `json:"frontendPort,omitempty"` + // Protocol - Protocol. Possible values are: 'Http' and 'Https'. Possible values include: 'HTTP', 'HTTPS' + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + // HostName - Host name of HTTP listener. + HostName *string `json:"hostName,omitempty"` + // SslCertificate - SSL certificate resource of an application gateway. + SslCertificate *SubResource `json:"sslCertificate,omitempty"` + // RequireServerNameIndication - Applicable only if protocol is https. Enables SNI for multi-hosting. + RequireServerNameIndication *bool `json:"requireServerNameIndication,omitempty"` + // ProvisioningState - Provisioning state of the HTTP listener resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayIPConfiguration IP configuration of an application gateway. Currently 1 public and 1 +// private IP configuration is allowed. +type ApplicationGatewayIPConfiguration struct { + *ApplicationGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayIPConfiguration. +func (agic ApplicationGatewayIPConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agic.ApplicationGatewayIPConfigurationPropertiesFormat != nil { + objectMap["properties"] = agic.ApplicationGatewayIPConfigurationPropertiesFormat + } + if agic.Name != nil { + objectMap["name"] = agic.Name + } + if agic.Etag != nil { + objectMap["etag"] = agic.Etag + } + if agic.ID != nil { + objectMap["id"] = agic.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayIPConfiguration struct. +func (agic *ApplicationGatewayIPConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayIPConfigurationPropertiesFormat ApplicationGatewayIPConfigurationPropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayIPConfigurationPropertiesFormat) + if err != nil { + return err + } + agic.ApplicationGatewayIPConfigurationPropertiesFormat = &applicationGatewayIPConfigurationPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agic.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agic.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agic.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayIPConfigurationPropertiesFormat properties of IP configuration of an application +// gateway. +type ApplicationGatewayIPConfigurationPropertiesFormat struct { + // Subnet - Reference of the subnet resource. A subnet from where application gateway gets its private address. + Subnet *SubResource `json:"subnet,omitempty"` + // ProvisioningState - Provisioning state of the application gateway subnet resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayListResult response for ListApplicationGateways API service call. +type ApplicationGatewayListResult struct { + autorest.Response `json:"-"` + // Value - List of an application gateways in a resource group. + Value *[]ApplicationGateway `json:"value,omitempty"` + // NextLink - URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ApplicationGatewayListResultIterator provides access to a complete listing of ApplicationGateway values. +type ApplicationGatewayListResultIterator struct { + i int + page ApplicationGatewayListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ApplicationGatewayListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewayListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ApplicationGatewayListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ApplicationGatewayListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ApplicationGatewayListResultIterator) Response() ApplicationGatewayListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ApplicationGatewayListResultIterator) Value() ApplicationGateway { + if !iter.page.NotDone() { + return ApplicationGateway{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ApplicationGatewayListResultIterator type. +func NewApplicationGatewayListResultIterator(page ApplicationGatewayListResultPage) ApplicationGatewayListResultIterator { + return ApplicationGatewayListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (aglr ApplicationGatewayListResult) IsEmpty() bool { + return aglr.Value == nil || len(*aglr.Value) == 0 +} + +// applicationGatewayListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (aglr ApplicationGatewayListResult) applicationGatewayListResultPreparer(ctx context.Context) (*http.Request, error) { + if aglr.NextLink == nil || len(to.String(aglr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(aglr.NextLink))) +} + +// ApplicationGatewayListResultPage contains a page of ApplicationGateway values. +type ApplicationGatewayListResultPage struct { + fn func(context.Context, ApplicationGatewayListResult) (ApplicationGatewayListResult, error) + aglr ApplicationGatewayListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ApplicationGatewayListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ApplicationGatewayListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.aglr) + if err != nil { + return err + } + page.aglr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ApplicationGatewayListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ApplicationGatewayListResultPage) NotDone() bool { + return !page.aglr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ApplicationGatewayListResultPage) Response() ApplicationGatewayListResult { + return page.aglr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ApplicationGatewayListResultPage) Values() []ApplicationGateway { + if page.aglr.IsEmpty() { + return nil + } + return *page.aglr.Value +} + +// Creates a new instance of the ApplicationGatewayListResultPage type. +func NewApplicationGatewayListResultPage(getNextPage func(context.Context, ApplicationGatewayListResult) (ApplicationGatewayListResult, error)) ApplicationGatewayListResultPage { + return ApplicationGatewayListResultPage{fn: getNextPage} +} + +// ApplicationGatewayPathRule path rule of URL path map of an application gateway. +type ApplicationGatewayPathRule struct { + *ApplicationGatewayPathRulePropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayPathRule. +func (agpr ApplicationGatewayPathRule) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agpr.ApplicationGatewayPathRulePropertiesFormat != nil { + objectMap["properties"] = agpr.ApplicationGatewayPathRulePropertiesFormat + } + if agpr.Name != nil { + objectMap["name"] = agpr.Name + } + if agpr.Etag != nil { + objectMap["etag"] = agpr.Etag + } + if agpr.ID != nil { + objectMap["id"] = agpr.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayPathRule struct. +func (agpr *ApplicationGatewayPathRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayPathRulePropertiesFormat ApplicationGatewayPathRulePropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayPathRulePropertiesFormat) + if err != nil { + return err + } + agpr.ApplicationGatewayPathRulePropertiesFormat = &applicationGatewayPathRulePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agpr.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agpr.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agpr.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayPathRulePropertiesFormat properties of probe of an application gateway. +type ApplicationGatewayPathRulePropertiesFormat struct { + // Paths - Path rules of URL path map. + Paths *[]string `json:"paths,omitempty"` + // BackendAddressPool - Backend address pool resource of URL path map. + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + // BackendHTTPSettings - Backend http settings resource of URL path map. + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + // ProvisioningState - Path rule of URL path map resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayProbe probe of the application gateway. +type ApplicationGatewayProbe struct { + *ApplicationGatewayProbePropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayProbe. +func (agp ApplicationGatewayProbe) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agp.ApplicationGatewayProbePropertiesFormat != nil { + objectMap["properties"] = agp.ApplicationGatewayProbePropertiesFormat + } + if agp.Name != nil { + objectMap["name"] = agp.Name + } + if agp.Etag != nil { + objectMap["etag"] = agp.Etag + } + if agp.ID != nil { + objectMap["id"] = agp.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayProbe struct. +func (agp *ApplicationGatewayProbe) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayProbePropertiesFormat ApplicationGatewayProbePropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayProbePropertiesFormat) + if err != nil { + return err + } + agp.ApplicationGatewayProbePropertiesFormat = &applicationGatewayProbePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agp.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agp.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agp.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayProbePropertiesFormat properties of probe of an application gateway. +type ApplicationGatewayProbePropertiesFormat struct { + // Protocol - Protocol. Possible values are: 'Http' and 'Https'. Possible values include: 'HTTP', 'HTTPS' + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + // Host - Host name to send the probe to. + Host *string `json:"host,omitempty"` + // Path - Relative path of probe. Valid path starts from '/'. Probe is sent to ://: + Path *string `json:"path,omitempty"` + // Interval - The probing interval in seconds. This is the time interval between two consecutive probes. Acceptable values are from 1 second to 86400 seconds. + Interval *int32 `json:"interval,omitempty"` + // Timeout - the probe timeout in seconds. Probe marked as failed if valid response is not received with this timeout period. Acceptable values are from 1 second to 86400 seconds. + Timeout *int32 `json:"timeout,omitempty"` + // UnhealthyThreshold - The probe retry count. Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second to 20. + UnhealthyThreshold *int32 `json:"unhealthyThreshold,omitempty"` + // ProvisioningState - Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayPropertiesFormat properties of the application gateway. +type ApplicationGatewayPropertiesFormat struct { + // Sku - SKU of the application gateway resource. + Sku *ApplicationGatewaySku `json:"sku,omitempty"` + // OperationalState - READ-ONLY; Operational state of the application gateway resource. Possible values are: 'Stopped', 'Started', 'Running', and 'Stopping'. Possible values include: 'Stopped', 'Starting', 'Running', 'Stopping' + OperationalState ApplicationGatewayOperationalState `json:"operationalState,omitempty"` + // GatewayIPConfigurations - Gets or sets subnets of application gateway resource + GatewayIPConfigurations *[]ApplicationGatewayIPConfiguration `json:"gatewayIPConfigurations,omitempty"` + // SslCertificates - SSL certificates of the application gateway resource. + SslCertificates *[]ApplicationGatewaySslCertificate `json:"sslCertificates,omitempty"` + // FrontendIPConfigurations - Frontend IP addresses of the application gateway resource. + FrontendIPConfigurations *[]ApplicationGatewayFrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` + // FrontendPorts - Frontend ports of the application gateway resource. + FrontendPorts *[]ApplicationGatewayFrontendPort `json:"frontendPorts,omitempty"` + // Probes - Probes of the application gateway resource. + Probes *[]ApplicationGatewayProbe `json:"probes,omitempty"` + // BackendAddressPools - Backend address pool of the application gateway resource. + BackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"backendAddressPools,omitempty"` + // BackendHTTPSettingsCollection - Backend http settings of the application gateway resource. + BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHTTPSettings `json:"backendHttpSettingsCollection,omitempty"` + // HTTPListeners - Http listeners of the application gateway resource. + HTTPListeners *[]ApplicationGatewayHTTPListener `json:"httpListeners,omitempty"` + // URLPathMaps - URL path map of the application gateway resource. + URLPathMaps *[]ApplicationGatewayURLPathMap `json:"urlPathMaps,omitempty"` + // RequestRoutingRules - Request routing rules of the application gateway resource. + RequestRoutingRules *[]ApplicationGatewayRequestRoutingRule `json:"requestRoutingRules,omitempty"` + // ResourceGUID - Resource GUID property of the application gateway resource. + ResourceGUID *string `json:"resourceGuid,omitempty"` + // ProvisioningState - Provisioning state of the application gateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayRequestRoutingRule request routing rule of an application gateway. +type ApplicationGatewayRequestRoutingRule struct { + *ApplicationGatewayRequestRoutingRulePropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayRequestRoutingRule. +func (agrrr ApplicationGatewayRequestRoutingRule) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agrrr.ApplicationGatewayRequestRoutingRulePropertiesFormat != nil { + objectMap["properties"] = agrrr.ApplicationGatewayRequestRoutingRulePropertiesFormat + } + if agrrr.Name != nil { + objectMap["name"] = agrrr.Name + } + if agrrr.Etag != nil { + objectMap["etag"] = agrrr.Etag + } + if agrrr.ID != nil { + objectMap["id"] = agrrr.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayRequestRoutingRule struct. +func (agrrr *ApplicationGatewayRequestRoutingRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayRequestRoutingRulePropertiesFormat ApplicationGatewayRequestRoutingRulePropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayRequestRoutingRulePropertiesFormat) + if err != nil { + return err + } + agrrr.ApplicationGatewayRequestRoutingRulePropertiesFormat = &applicationGatewayRequestRoutingRulePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agrrr.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agrrr.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agrrr.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayRequestRoutingRulePropertiesFormat properties of request routing rule of the +// application gateway. +type ApplicationGatewayRequestRoutingRulePropertiesFormat struct { + // RuleType - Rule type. Possible values are: 'Basic' and 'PathBasedRouting'. Possible values include: 'Basic', 'PathBasedRouting' + RuleType ApplicationGatewayRequestRoutingRuleType `json:"ruleType,omitempty"` + // BackendAddressPool - Backend address pool resource of the application gateway. + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + // BackendHTTPSettings - Frontend port resource of the application gateway. + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + // HTTPListener - Http listener resource of the application gateway. + HTTPListener *SubResource `json:"httpListener,omitempty"` + // URLPathMap - URL path map resource of the application gateway. + URLPathMap *SubResource `json:"urlPathMap,omitempty"` + // ProvisioningState - Provisioning state of the request routing rule resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ApplicationGatewaysCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ApplicationGatewaysCreateOrUpdateFuture) Result(client ApplicationGatewaysClient) (ag ApplicationGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ag.Response.Response, err = future.GetResult(sender); err == nil && ag.Response.Response.StatusCode != http.StatusNoContent { + ag, err = client.CreateOrUpdateResponder(ag.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysCreateOrUpdateFuture", "Result", ag.Response.Response, "Failure responding to request") + } + } + return +} + +// ApplicationGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ApplicationGatewaysDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ApplicationGatewaysDeleteFuture) Result(client ApplicationGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ApplicationGatewaySku SKU of application gateway +type ApplicationGatewaySku struct { + // Name - Name of an application gateway SKU. Possible values are: 'Standard_Small', 'Standard_Medium', 'Standard_Large', 'WAF_Medium', and 'WAF_Large'. Possible values include: 'StandardSmall', 'StandardMedium', 'StandardLarge' + Name ApplicationGatewaySkuName `json:"name,omitempty"` + // Tier - Tier of an application gateway. Possible values include: 'Standard' + Tier ApplicationGatewayTier `json:"tier,omitempty"` + // Capacity - Capacity (instance count) of an application gateway. + Capacity *int32 `json:"capacity,omitempty"` +} + +// ApplicationGatewaySslCertificate SSL certificates of an application gateway. +type ApplicationGatewaySslCertificate struct { + *ApplicationGatewaySslCertificatePropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewaySslCertificate. +func (agsc ApplicationGatewaySslCertificate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agsc.ApplicationGatewaySslCertificatePropertiesFormat != nil { + objectMap["properties"] = agsc.ApplicationGatewaySslCertificatePropertiesFormat + } + if agsc.Name != nil { + objectMap["name"] = agsc.Name + } + if agsc.Etag != nil { + objectMap["etag"] = agsc.Etag + } + if agsc.ID != nil { + objectMap["id"] = agsc.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewaySslCertificate struct. +func (agsc *ApplicationGatewaySslCertificate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewaySslCertificatePropertiesFormat ApplicationGatewaySslCertificatePropertiesFormat + err = json.Unmarshal(*v, &applicationGatewaySslCertificatePropertiesFormat) + if err != nil { + return err + } + agsc.ApplicationGatewaySslCertificatePropertiesFormat = &applicationGatewaySslCertificatePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agsc.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agsc.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agsc.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewaySslCertificatePropertiesFormat properties of SSL certificates of an application +// gateway. +type ApplicationGatewaySslCertificatePropertiesFormat struct { + // Data - Base-64 encoded pfx certificate. Only applicable in PUT Request. + Data *string `json:"data,omitempty"` + // Password - Password for the pfx file specified in data. Only applicable in PUT request. + Password *string `json:"password,omitempty"` + // PublicCertData - Base-64 encoded Public cert data corresponding to pfx specified in data. Only applicable in GET request. + PublicCertData *string `json:"publicCertData,omitempty"` + // ProvisioningState - Provisioning state of the SSL certificate resource Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewaysStartFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ApplicationGatewaysStartFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ApplicationGatewaysStartFuture) Result(client ApplicationGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStartFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStartFuture") + return + } + ar.Response = future.Response() + return +} + +// ApplicationGatewaysStopFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ApplicationGatewaysStopFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ApplicationGatewaysStopFuture) Result(client ApplicationGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ApplicationGatewaysStopFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ApplicationGatewaysStopFuture") + return + } + ar.Response = future.Response() + return +} + +// ApplicationGatewayURLPathMap urlPathMaps give a url path to the backend mapping information for +// PathBasedRouting. +type ApplicationGatewayURLPathMap struct { + *ApplicationGatewayURLPathMapPropertiesFormat `json:"properties,omitempty"` + // Name - Name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ApplicationGatewayURLPathMap. +func (agupm ApplicationGatewayURLPathMap) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agupm.ApplicationGatewayURLPathMapPropertiesFormat != nil { + objectMap["properties"] = agupm.ApplicationGatewayURLPathMapPropertiesFormat + } + if agupm.Name != nil { + objectMap["name"] = agupm.Name + } + if agupm.Etag != nil { + objectMap["etag"] = agupm.Etag + } + if agupm.ID != nil { + objectMap["id"] = agupm.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ApplicationGatewayURLPathMap struct. +func (agupm *ApplicationGatewayURLPathMap) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var applicationGatewayURLPathMapPropertiesFormat ApplicationGatewayURLPathMapPropertiesFormat + err = json.Unmarshal(*v, &applicationGatewayURLPathMapPropertiesFormat) + if err != nil { + return err + } + agupm.ApplicationGatewayURLPathMapPropertiesFormat = &applicationGatewayURLPathMapPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agupm.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + agupm.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agupm.ID = &ID + } + } + } + + return nil +} + +// ApplicationGatewayURLPathMapPropertiesFormat properties of UrlPathMap of the application gateway. +type ApplicationGatewayURLPathMapPropertiesFormat struct { + // DefaultBackendAddressPool - Default backend address pool resource of URL path map. + DefaultBackendAddressPool *SubResource `json:"defaultBackendAddressPool,omitempty"` + // DefaultBackendHTTPSettings - Default backend http settings resource of URL path map. + DefaultBackendHTTPSettings *SubResource `json:"defaultBackendHttpSettings,omitempty"` + // PathRules - Path rule of URL path map resource. + PathRules *[]ApplicationGatewayPathRule `json:"pathRules,omitempty"` + // ProvisioningState - Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// AuthorizationListResult response for ListAuthorizations API service call retrieves all authorizations +// that belongs to an ExpressRouteCircuit. +type AuthorizationListResult struct { + autorest.Response `json:"-"` + // Value - The authorizations in an ExpressRoute Circuit. + Value *[]ExpressRouteCircuitAuthorization `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// AuthorizationListResultIterator provides access to a complete listing of +// ExpressRouteCircuitAuthorization values. +type AuthorizationListResultIterator struct { + i int + page AuthorizationListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AuthorizationListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AuthorizationListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AuthorizationListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AuthorizationListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AuthorizationListResultIterator) Response() AuthorizationListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AuthorizationListResultIterator) Value() ExpressRouteCircuitAuthorization { + if !iter.page.NotDone() { + return ExpressRouteCircuitAuthorization{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AuthorizationListResultIterator type. +func NewAuthorizationListResultIterator(page AuthorizationListResultPage) AuthorizationListResultIterator { + return AuthorizationListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (alr AuthorizationListResult) IsEmpty() bool { + return alr.Value == nil || len(*alr.Value) == 0 +} + +// authorizationListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (alr AuthorizationListResult) authorizationListResultPreparer(ctx context.Context) (*http.Request, error) { + if alr.NextLink == nil || len(to.String(alr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(alr.NextLink))) +} + +// AuthorizationListResultPage contains a page of ExpressRouteCircuitAuthorization values. +type AuthorizationListResultPage struct { + fn func(context.Context, AuthorizationListResult) (AuthorizationListResult, error) + alr AuthorizationListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AuthorizationListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AuthorizationListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.alr) + if err != nil { + return err + } + page.alr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AuthorizationListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AuthorizationListResultPage) NotDone() bool { + return !page.alr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AuthorizationListResultPage) Response() AuthorizationListResult { + return page.alr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AuthorizationListResultPage) Values() []ExpressRouteCircuitAuthorization { + if page.alr.IsEmpty() { + return nil + } + return *page.alr.Value +} + +// Creates a new instance of the AuthorizationListResultPage type. +func NewAuthorizationListResultPage(getNextPage func(context.Context, AuthorizationListResult) (AuthorizationListResult, error)) AuthorizationListResultPage { + return AuthorizationListResultPage{fn: getNextPage} +} + +// AuthorizationPropertiesFormat ... +type AuthorizationPropertiesFormat struct { + // AuthorizationKey - The authorization key. + AuthorizationKey *string `json:"authorizationKey,omitempty"` + // AuthorizationUseStatus - AuthorizationUseStatus. Possible values are: 'Available' and 'InUse'. Possible values include: 'Available', 'InUse' + AuthorizationUseStatus AuthorizationUseStatus `json:"authorizationUseStatus,omitempty"` + // ProvisioningState - Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// AzureAsyncOperationResult the response body contains the status of the specified asynchronous operation, +// indicating whether it has succeeded, is in progress, or has failed. Note that this status is distinct +// from the HTTP status code returned for the Get Operation Status operation itself. If the asynchronous +// operation succeeded, the response body includes the HTTP status code for the successful request. If the +// asynchronous operation failed, the response body includes the HTTP status code for the failed request +// and error information regarding the failure. +type AzureAsyncOperationResult struct { + // Status - Status of the Azure async operation. Possible values are: 'InProgress', 'Succeeded', and 'Failed'. Possible values include: 'InProgress', 'Succeeded', 'Failed' + Status OperationStatus `json:"status,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// BackendAddressPool pool of backend IP addresses. +type BackendAddressPool struct { + *BackendAddressPoolPropertiesFormat `json:"properties,omitempty"` + // Name - Gets name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for BackendAddressPool. +func (bap BackendAddressPool) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if bap.BackendAddressPoolPropertiesFormat != nil { + objectMap["properties"] = bap.BackendAddressPoolPropertiesFormat + } + if bap.Name != nil { + objectMap["name"] = bap.Name + } + if bap.Etag != nil { + objectMap["etag"] = bap.Etag + } + if bap.ID != nil { + objectMap["id"] = bap.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for BackendAddressPool struct. +func (bap *BackendAddressPool) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var backendAddressPoolPropertiesFormat BackendAddressPoolPropertiesFormat + err = json.Unmarshal(*v, &backendAddressPoolPropertiesFormat) + if err != nil { + return err + } + bap.BackendAddressPoolPropertiesFormat = &backendAddressPoolPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + bap.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + bap.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + bap.ID = &ID + } + } + } + + return nil +} + +// BackendAddressPoolPropertiesFormat properties of the backend address pool. +type BackendAddressPoolPropertiesFormat struct { + // BackendIPConfigurations - Gets collection of references to IP addresses defined in network interfaces. + BackendIPConfigurations *[]InterfaceIPConfiguration `json:"backendIPConfigurations,omitempty"` + // LoadBalancingRules - READ-ONLY; Gets load balancing rules that use this backend address pool. + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + // OutboundNatRule - Gets outbound rules that use this backend address pool. + OutboundNatRule *SubResource `json:"outboundNatRule,omitempty"` + // ProvisioningState - Get provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// BgpSettings ... +type BgpSettings struct { + // Asn - Gets or sets this BGP speaker's ASN + Asn *int64 `json:"asn,omitempty"` + // BgpPeeringAddress - Gets or sets the BGP peering address and BGP identifier of this BGP speaker + BgpPeeringAddress *string `json:"bgpPeeringAddress,omitempty"` + // PeerWeight - Gets or sets the weight added to routes learned from this BGP speaker + PeerWeight *int32 `json:"peerWeight,omitempty"` +} + +// ConnectionResetSharedKey ... +type ConnectionResetSharedKey struct { + autorest.Response `json:"-"` + // KeyLength - The virtual network connection reset shared key length + KeyLength *int64 `json:"keyLength,omitempty"` +} + +// ConnectionSharedKey response for GetConnectionSharedKey API service call +type ConnectionSharedKey struct { + autorest.Response `json:"-"` + // Value - The virtual network connection shared key value + Value *string `json:"value,omitempty"` +} + +// ConnectionSharedKeyResult response for CheckConnectionSharedKey API service call +type ConnectionSharedKeyResult struct { + autorest.Response `json:"-"` + // Value - The virtual network connection shared key value + Value *string `json:"value,omitempty"` +} + +// DhcpOptions dhcpOptions contains an array of DNS servers available to VMs deployed in the virtual +// network. Standard DHCP option for a subnet overrides VNET DHCP options. +type DhcpOptions struct { + // DNSServers - The list of DNS servers IP addresses. + DNSServers *[]string `json:"dnsServers,omitempty"` +} + +// DNSNameAvailabilityResult response for the CheckDnsNameAvailability API service call. +type DNSNameAvailabilityResult struct { + autorest.Response `json:"-"` + // Available - Domain availability (True/False). + Available *bool `json:"available,omitempty"` +} + +// Error error object properties +type Error struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` + Details *[]ErrorDetails `json:"details,omitempty"` + InnerError *string `json:"innerError,omitempty"` +} + +// ErrorDetails error details properties +type ErrorDetails struct { + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// ExpressRouteCircuit expressRouteCircuit resource +type ExpressRouteCircuit struct { + autorest.Response `json:"-"` + // Sku - The SKU. + Sku *ExpressRouteCircuitSku `json:"sku,omitempty"` + *ExpressRouteCircuitPropertiesFormat `json:"properties,omitempty"` + // Etag - Gets a unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ExpressRouteCircuit. +func (erc ExpressRouteCircuit) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if erc.Sku != nil { + objectMap["sku"] = erc.Sku + } + if erc.ExpressRouteCircuitPropertiesFormat != nil { + objectMap["properties"] = erc.ExpressRouteCircuitPropertiesFormat + } + if erc.Etag != nil { + objectMap["etag"] = erc.Etag + } + if erc.ID != nil { + objectMap["id"] = erc.ID + } + if erc.Location != nil { + objectMap["location"] = erc.Location + } + if erc.Tags != nil { + objectMap["tags"] = erc.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ExpressRouteCircuit struct. +func (erc *ExpressRouteCircuit) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku ExpressRouteCircuitSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + erc.Sku = &sku + } + case "properties": + if v != nil { + var expressRouteCircuitPropertiesFormat ExpressRouteCircuitPropertiesFormat + err = json.Unmarshal(*v, &expressRouteCircuitPropertiesFormat) + if err != nil { + return err + } + erc.ExpressRouteCircuitPropertiesFormat = &expressRouteCircuitPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + erc.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + erc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + erc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + erc.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + erc.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + erc.Tags = tags + } + } + } + + return nil +} + +// ExpressRouteCircuitArpTable the ARP table associated with the ExpressRouteCircuit. +type ExpressRouteCircuitArpTable struct { + // IPAddress - The IP address. + IPAddress *string `json:"ipAddress,omitempty"` + // MacAddress - The MAC address. + MacAddress *string `json:"macAddress,omitempty"` +} + +// ExpressRouteCircuitAuthorization authorization in an ExpressRouteCircuit resource. +type ExpressRouteCircuitAuthorization struct { + autorest.Response `json:"-"` + *AuthorizationPropertiesFormat `json:"properties,omitempty"` + // Name - Gets name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ExpressRouteCircuitAuthorization. +func (erca ExpressRouteCircuitAuthorization) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if erca.AuthorizationPropertiesFormat != nil { + objectMap["properties"] = erca.AuthorizationPropertiesFormat + } + if erca.Name != nil { + objectMap["name"] = erca.Name + } + if erca.Etag != nil { + objectMap["etag"] = erca.Etag + } + if erca.ID != nil { + objectMap["id"] = erca.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ExpressRouteCircuitAuthorization struct. +func (erca *ExpressRouteCircuitAuthorization) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var authorizationPropertiesFormat AuthorizationPropertiesFormat + err = json.Unmarshal(*v, &authorizationPropertiesFormat) + if err != nil { + return err + } + erca.AuthorizationPropertiesFormat = &authorizationPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + erca.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + erca.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + erca.ID = &ID + } + } + } + + return nil +} + +// ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture) Result(client ExpressRouteCircuitAuthorizationsClient) (erca ExpressRouteCircuitAuthorization, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erca.Response.Response, err = future.GetResult(sender); err == nil && erca.Response.Response.StatusCode != http.StatusNoContent { + erca, err = client.CreateOrUpdateResponder(erca.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsCreateOrUpdateFuture", "Result", erca.Response.Response, "Failure responding to request") + } + } + return +} + +// ExpressRouteCircuitAuthorizationsDeleteFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type ExpressRouteCircuitAuthorizationsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ExpressRouteCircuitAuthorizationsDeleteFuture) Result(client ExpressRouteCircuitAuthorizationsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitAuthorizationsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitAuthorizationsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ExpressRouteCircuitListResult response for ListExpressRouteCircuit API service call. +type ExpressRouteCircuitListResult struct { + autorest.Response `json:"-"` + // Value - A list of ExpressRouteCircuits in a resource group. + Value *[]ExpressRouteCircuit `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitListResultIterator provides access to a complete listing of ExpressRouteCircuit +// values. +type ExpressRouteCircuitListResultIterator struct { + i int + page ExpressRouteCircuitListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ExpressRouteCircuitListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ExpressRouteCircuitListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ExpressRouteCircuitListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ExpressRouteCircuitListResultIterator) Response() ExpressRouteCircuitListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ExpressRouteCircuitListResultIterator) Value() ExpressRouteCircuit { + if !iter.page.NotDone() { + return ExpressRouteCircuit{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ExpressRouteCircuitListResultIterator type. +func NewExpressRouteCircuitListResultIterator(page ExpressRouteCircuitListResultPage) ExpressRouteCircuitListResultIterator { + return ExpressRouteCircuitListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (erclr ExpressRouteCircuitListResult) IsEmpty() bool { + return erclr.Value == nil || len(*erclr.Value) == 0 +} + +// expressRouteCircuitListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (erclr ExpressRouteCircuitListResult) expressRouteCircuitListResultPreparer(ctx context.Context) (*http.Request, error) { + if erclr.NextLink == nil || len(to.String(erclr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(erclr.NextLink))) +} + +// ExpressRouteCircuitListResultPage contains a page of ExpressRouteCircuit values. +type ExpressRouteCircuitListResultPage struct { + fn func(context.Context, ExpressRouteCircuitListResult) (ExpressRouteCircuitListResult, error) + erclr ExpressRouteCircuitListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ExpressRouteCircuitListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.erclr) + if err != nil { + return err + } + page.erclr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ExpressRouteCircuitListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ExpressRouteCircuitListResultPage) NotDone() bool { + return !page.erclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ExpressRouteCircuitListResultPage) Response() ExpressRouteCircuitListResult { + return page.erclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ExpressRouteCircuitListResultPage) Values() []ExpressRouteCircuit { + if page.erclr.IsEmpty() { + return nil + } + return *page.erclr.Value +} + +// Creates a new instance of the ExpressRouteCircuitListResultPage type. +func NewExpressRouteCircuitListResultPage(getNextPage func(context.Context, ExpressRouteCircuitListResult) (ExpressRouteCircuitListResult, error)) ExpressRouteCircuitListResultPage { + return ExpressRouteCircuitListResultPage{fn: getNextPage} +} + +// ExpressRouteCircuitPeering peering in an ExpressRouteCircuit resource. +type ExpressRouteCircuitPeering struct { + autorest.Response `json:"-"` + *ExpressRouteCircuitPeeringPropertiesFormat `json:"properties,omitempty"` + // Name - Gets name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for ExpressRouteCircuitPeering. +func (ercp ExpressRouteCircuitPeering) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ercp.ExpressRouteCircuitPeeringPropertiesFormat != nil { + objectMap["properties"] = ercp.ExpressRouteCircuitPeeringPropertiesFormat + } + if ercp.Name != nil { + objectMap["name"] = ercp.Name + } + if ercp.Etag != nil { + objectMap["etag"] = ercp.Etag + } + if ercp.ID != nil { + objectMap["id"] = ercp.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ExpressRouteCircuitPeering struct. +func (ercp *ExpressRouteCircuitPeering) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var expressRouteCircuitPeeringPropertiesFormat ExpressRouteCircuitPeeringPropertiesFormat + err = json.Unmarshal(*v, &expressRouteCircuitPeeringPropertiesFormat) + if err != nil { + return err + } + ercp.ExpressRouteCircuitPeeringPropertiesFormat = &expressRouteCircuitPeeringPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ercp.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + ercp.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ercp.ID = &ID + } + } + } + + return nil +} + +// ExpressRouteCircuitPeeringConfig specifies the peering configuration. +type ExpressRouteCircuitPeeringConfig struct { + // AdvertisedPublicPrefixes - The reference of AdvertisedPublicPrefixes. + AdvertisedPublicPrefixes *[]string `json:"advertisedPublicPrefixes,omitempty"` + // AdvertisedPublicPrefixesState - AdvertisedPublicPrefixState of the Peering resource. Possible values are 'NotConfigured', 'Configuring', 'Configured', and 'ValidationNeeded'. Possible values include: 'NotConfigured', 'Configuring', 'Configured', 'ValidationNeeded' + AdvertisedPublicPrefixesState ExpressRouteCircuitPeeringAdvertisedPublicPrefixState `json:"advertisedPublicPrefixesState,omitempty"` + // CustomerASN - The CustomerASN of the peering. + CustomerASN *int32 `json:"customerASN,omitempty"` + // RoutingRegistryName - The RoutingRegistryName of the configuration. + RoutingRegistryName *string `json:"routingRegistryName,omitempty"` +} + +// ExpressRouteCircuitPeeringListResult response for ListPeering API service call retrieves all peerings +// that belong to an ExpressRouteCircuit. +type ExpressRouteCircuitPeeringListResult struct { + autorest.Response `json:"-"` + // Value - The peerings in an express route circuit. + Value *[]ExpressRouteCircuitPeering `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitPeeringListResultIterator provides access to a complete listing of +// ExpressRouteCircuitPeering values. +type ExpressRouteCircuitPeeringListResultIterator struct { + i int + page ExpressRouteCircuitPeeringListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ExpressRouteCircuitPeeringListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitPeeringListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ExpressRouteCircuitPeeringListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ExpressRouteCircuitPeeringListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ExpressRouteCircuitPeeringListResultIterator) Response() ExpressRouteCircuitPeeringListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ExpressRouteCircuitPeeringListResultIterator) Value() ExpressRouteCircuitPeering { + if !iter.page.NotDone() { + return ExpressRouteCircuitPeering{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ExpressRouteCircuitPeeringListResultIterator type. +func NewExpressRouteCircuitPeeringListResultIterator(page ExpressRouteCircuitPeeringListResultPage) ExpressRouteCircuitPeeringListResultIterator { + return ExpressRouteCircuitPeeringListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ercplr ExpressRouteCircuitPeeringListResult) IsEmpty() bool { + return ercplr.Value == nil || len(*ercplr.Value) == 0 +} + +// expressRouteCircuitPeeringListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ercplr ExpressRouteCircuitPeeringListResult) expressRouteCircuitPeeringListResultPreparer(ctx context.Context) (*http.Request, error) { + if ercplr.NextLink == nil || len(to.String(ercplr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ercplr.NextLink))) +} + +// ExpressRouteCircuitPeeringListResultPage contains a page of ExpressRouteCircuitPeering values. +type ExpressRouteCircuitPeeringListResultPage struct { + fn func(context.Context, ExpressRouteCircuitPeeringListResult) (ExpressRouteCircuitPeeringListResult, error) + ercplr ExpressRouteCircuitPeeringListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ExpressRouteCircuitPeeringListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitPeeringListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ercplr) + if err != nil { + return err + } + page.ercplr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ExpressRouteCircuitPeeringListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ExpressRouteCircuitPeeringListResultPage) NotDone() bool { + return !page.ercplr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ExpressRouteCircuitPeeringListResultPage) Response() ExpressRouteCircuitPeeringListResult { + return page.ercplr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ExpressRouteCircuitPeeringListResultPage) Values() []ExpressRouteCircuitPeering { + if page.ercplr.IsEmpty() { + return nil + } + return *page.ercplr.Value +} + +// Creates a new instance of the ExpressRouteCircuitPeeringListResultPage type. +func NewExpressRouteCircuitPeeringListResultPage(getNextPage func(context.Context, ExpressRouteCircuitPeeringListResult) (ExpressRouteCircuitPeeringListResult, error)) ExpressRouteCircuitPeeringListResultPage { + return ExpressRouteCircuitPeeringListResultPage{fn: getNextPage} +} + +// ExpressRouteCircuitPeeringPropertiesFormat ... +type ExpressRouteCircuitPeeringPropertiesFormat struct { + // PeeringType - The PeeringType. Possible values are: 'AzurePublicPeering', 'AzurePrivatePeering', and 'MicrosoftPeering'. Possible values include: 'AzurePublicPeering', 'AzurePrivatePeering', 'MicrosoftPeering' + PeeringType ExpressRouteCircuitPeeringType `json:"peeringType,omitempty"` + // State - The state of peering. Possible values are: 'Disabled' and 'Enabled'. Possible values include: 'ExpressRouteCircuitPeeringStateDisabled', 'ExpressRouteCircuitPeeringStateEnabled' + State ExpressRouteCircuitPeeringState `json:"state,omitempty"` + // AzureASN - The Azure ASN. + AzureASN *int32 `json:"azureASN,omitempty"` + // PeerASN - The peer ASN. + PeerASN *int32 `json:"peerASN,omitempty"` + // PrimaryPeerAddressPrefix - The primary address prefix. + PrimaryPeerAddressPrefix *string `json:"primaryPeerAddressPrefix,omitempty"` + // SecondaryPeerAddressPrefix - The secondary address prefix. + SecondaryPeerAddressPrefix *string `json:"secondaryPeerAddressPrefix,omitempty"` + // PrimaryAzurePort - The primary port. + PrimaryAzurePort *string `json:"primaryAzurePort,omitempty"` + // SecondaryAzurePort - The secondary port. + SecondaryAzurePort *string `json:"secondaryAzurePort,omitempty"` + // SharedKey - The shared key. + SharedKey *string `json:"sharedKey,omitempty"` + // VlanID - The VLAN ID. + VlanID *int32 `json:"vlanId,omitempty"` + // MicrosoftPeeringConfig - The Microsoft peering configuration. + MicrosoftPeeringConfig *ExpressRouteCircuitPeeringConfig `json:"microsoftPeeringConfig,omitempty"` + // Stats - Gets peering stats. + Stats *ExpressRouteCircuitStats `json:"stats,omitempty"` + // ProvisioningState - Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ExpressRouteCircuitPeeringsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type ExpressRouteCircuitPeeringsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ExpressRouteCircuitPeeringsCreateOrUpdateFuture) Result(client ExpressRouteCircuitPeeringsClient) (ercp ExpressRouteCircuitPeering, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if ercp.Response.Response, err = future.GetResult(sender); err == nil && ercp.Response.Response.StatusCode != http.StatusNoContent { + ercp, err = client.CreateOrUpdateResponder(ercp.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsCreateOrUpdateFuture", "Result", ercp.Response.Response, "Failure responding to request") + } + } + return +} + +// ExpressRouteCircuitPeeringsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ExpressRouteCircuitPeeringsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ExpressRouteCircuitPeeringsDeleteFuture) Result(client ExpressRouteCircuitPeeringsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitPeeringsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitPeeringsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ExpressRouteCircuitPropertiesFormat properties of ExpressRouteCircuit. +type ExpressRouteCircuitPropertiesFormat struct { + // CircuitProvisioningState - The CircuitProvisioningState state of the resource. + CircuitProvisioningState *string `json:"circuitProvisioningState,omitempty"` + // ServiceProviderProvisioningState - The ServiceProviderProvisioningState state of the resource. Possible values are 'NotProvisioned', 'Provisioning', 'Provisioned', and 'Deprovisioning'. Possible values include: 'NotProvisioned', 'Provisioning', 'Provisioned', 'Deprovisioning' + ServiceProviderProvisioningState ServiceProviderProvisioningState `json:"serviceProviderProvisioningState,omitempty"` + // Authorizations - The list of authorizations. + Authorizations *[]ExpressRouteCircuitAuthorization `json:"authorizations,omitempty"` + // Peerings - The list of peerings. + Peerings *[]ExpressRouteCircuitPeering `json:"peerings,omitempty"` + // ServiceKey - The ServiceKey. + ServiceKey *string `json:"serviceKey,omitempty"` + // ServiceProviderNotes - The ServiceProviderNotes. + ServiceProviderNotes *string `json:"serviceProviderNotes,omitempty"` + // ServiceProviderProperties - The ServiceProviderProperties. + ServiceProviderProperties *ExpressRouteCircuitServiceProviderProperties `json:"serviceProviderProperties,omitempty"` + // ProvisioningState - Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ExpressRouteCircuitRoutesTable the routes table associated with the ExpressRouteCircuit +type ExpressRouteCircuitRoutesTable struct { + // AddressPrefix - Gets AddressPrefix. + AddressPrefix *string `json:"addressPrefix,omitempty"` + // NextHopType - Gets NextHopType. Possible values include: 'RouteNextHopTypeVirtualNetworkGateway', 'RouteNextHopTypeVnetLocal', 'RouteNextHopTypeInternet', 'RouteNextHopTypeVirtualAppliance', 'RouteNextHopTypeNone' + NextHopType RouteNextHopType `json:"nextHopType,omitempty"` + // NextHopIP - Gets NextHopIP. + NextHopIP *string `json:"nextHopIP,omitempty"` + // AsPath - Gets AsPath. + AsPath *string `json:"asPath,omitempty"` +} + +// ExpressRouteCircuitsArpTableListResult response for ListArpTable associated with the Express Route +// Circuits API. +type ExpressRouteCircuitsArpTableListResult struct { + autorest.Response `json:"-"` + // Value - Gets list of the ARP table. + Value *[]ExpressRouteCircuitArpTable `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitsArpTableListResultIterator provides access to a complete listing of +// ExpressRouteCircuitArpTable values. +type ExpressRouteCircuitsArpTableListResultIterator struct { + i int + page ExpressRouteCircuitsArpTableListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ExpressRouteCircuitsArpTableListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsArpTableListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ExpressRouteCircuitsArpTableListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ExpressRouteCircuitsArpTableListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ExpressRouteCircuitsArpTableListResultIterator) Response() ExpressRouteCircuitsArpTableListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ExpressRouteCircuitsArpTableListResultIterator) Value() ExpressRouteCircuitArpTable { + if !iter.page.NotDone() { + return ExpressRouteCircuitArpTable{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ExpressRouteCircuitsArpTableListResultIterator type. +func NewExpressRouteCircuitsArpTableListResultIterator(page ExpressRouteCircuitsArpTableListResultPage) ExpressRouteCircuitsArpTableListResultIterator { + return ExpressRouteCircuitsArpTableListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ercatlr ExpressRouteCircuitsArpTableListResult) IsEmpty() bool { + return ercatlr.Value == nil || len(*ercatlr.Value) == 0 +} + +// expressRouteCircuitsArpTableListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ercatlr ExpressRouteCircuitsArpTableListResult) expressRouteCircuitsArpTableListResultPreparer(ctx context.Context) (*http.Request, error) { + if ercatlr.NextLink == nil || len(to.String(ercatlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ercatlr.NextLink))) +} + +// ExpressRouteCircuitsArpTableListResultPage contains a page of ExpressRouteCircuitArpTable values. +type ExpressRouteCircuitsArpTableListResultPage struct { + fn func(context.Context, ExpressRouteCircuitsArpTableListResult) (ExpressRouteCircuitsArpTableListResult, error) + ercatlr ExpressRouteCircuitsArpTableListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ExpressRouteCircuitsArpTableListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsArpTableListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ercatlr) + if err != nil { + return err + } + page.ercatlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ExpressRouteCircuitsArpTableListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ExpressRouteCircuitsArpTableListResultPage) NotDone() bool { + return !page.ercatlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ExpressRouteCircuitsArpTableListResultPage) Response() ExpressRouteCircuitsArpTableListResult { + return page.ercatlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ExpressRouteCircuitsArpTableListResultPage) Values() []ExpressRouteCircuitArpTable { + if page.ercatlr.IsEmpty() { + return nil + } + return *page.ercatlr.Value +} + +// Creates a new instance of the ExpressRouteCircuitsArpTableListResultPage type. +func NewExpressRouteCircuitsArpTableListResultPage(getNextPage func(context.Context, ExpressRouteCircuitsArpTableListResult) (ExpressRouteCircuitsArpTableListResult, error)) ExpressRouteCircuitsArpTableListResultPage { + return ExpressRouteCircuitsArpTableListResultPage{fn: getNextPage} +} + +// ExpressRouteCircuitsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ExpressRouteCircuitsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ExpressRouteCircuitsCreateOrUpdateFuture) Result(client ExpressRouteCircuitsClient) (erc ExpressRouteCircuit, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if erc.Response.Response, err = future.GetResult(sender); err == nil && erc.Response.Response.StatusCode != http.StatusNoContent { + erc, err = client.CreateOrUpdateResponder(erc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsCreateOrUpdateFuture", "Result", erc.Response.Response, "Failure responding to request") + } + } + return +} + +// ExpressRouteCircuitsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type ExpressRouteCircuitsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ExpressRouteCircuitsDeleteFuture) Result(client ExpressRouteCircuitsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.ExpressRouteCircuitsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.ExpressRouteCircuitsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// ExpressRouteCircuitServiceProviderProperties contains ServiceProviderProperties in an +// ExpressRouteCircuit. +type ExpressRouteCircuitServiceProviderProperties struct { + // ServiceProviderName - The serviceProviderName. + ServiceProviderName *string `json:"serviceProviderName,omitempty"` + // PeeringLocation - The peering location. + PeeringLocation *string `json:"peeringLocation,omitempty"` + // BandwidthInMbps - The BandwidthInMbps. + BandwidthInMbps *int32 `json:"bandwidthInMbps,omitempty"` +} + +// ExpressRouteCircuitSku contains SKU in an ExpressRouteCircuit. +type ExpressRouteCircuitSku struct { + // Name - The name of the SKU. + Name *string `json:"name,omitempty"` + // Tier - The tier of the SKU. Possible values are 'Standard' and 'Premium'. Possible values include: 'ExpressRouteCircuitSkuTierStandard', 'ExpressRouteCircuitSkuTierPremium' + Tier ExpressRouteCircuitSkuTier `json:"tier,omitempty"` + // Family - The family of the SKU. Possible values are: 'UnlimitedData' and 'MeteredData'. Possible values include: 'UnlimitedData', 'MeteredData' + Family ExpressRouteCircuitSkuFamily `json:"family,omitempty"` +} + +// ExpressRouteCircuitsRoutesTableListResult response for ListRoutesTable associated with the Express Route +// Circuits API. +type ExpressRouteCircuitsRoutesTableListResult struct { + autorest.Response `json:"-"` + // Value - The list of routes table. + Value *[]ExpressRouteCircuitRoutesTable `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitsRoutesTableListResultIterator provides access to a complete listing of +// ExpressRouteCircuitRoutesTable values. +type ExpressRouteCircuitsRoutesTableListResultIterator struct { + i int + page ExpressRouteCircuitsRoutesTableListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ExpressRouteCircuitsRoutesTableListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsRoutesTableListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ExpressRouteCircuitsRoutesTableListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ExpressRouteCircuitsRoutesTableListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ExpressRouteCircuitsRoutesTableListResultIterator) Response() ExpressRouteCircuitsRoutesTableListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ExpressRouteCircuitsRoutesTableListResultIterator) Value() ExpressRouteCircuitRoutesTable { + if !iter.page.NotDone() { + return ExpressRouteCircuitRoutesTable{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ExpressRouteCircuitsRoutesTableListResultIterator type. +func NewExpressRouteCircuitsRoutesTableListResultIterator(page ExpressRouteCircuitsRoutesTableListResultPage) ExpressRouteCircuitsRoutesTableListResultIterator { + return ExpressRouteCircuitsRoutesTableListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ercrtlr ExpressRouteCircuitsRoutesTableListResult) IsEmpty() bool { + return ercrtlr.Value == nil || len(*ercrtlr.Value) == 0 +} + +// expressRouteCircuitsRoutesTableListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ercrtlr ExpressRouteCircuitsRoutesTableListResult) expressRouteCircuitsRoutesTableListResultPreparer(ctx context.Context) (*http.Request, error) { + if ercrtlr.NextLink == nil || len(to.String(ercrtlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ercrtlr.NextLink))) +} + +// ExpressRouteCircuitsRoutesTableListResultPage contains a page of ExpressRouteCircuitRoutesTable values. +type ExpressRouteCircuitsRoutesTableListResultPage struct { + fn func(context.Context, ExpressRouteCircuitsRoutesTableListResult) (ExpressRouteCircuitsRoutesTableListResult, error) + ercrtlr ExpressRouteCircuitsRoutesTableListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ExpressRouteCircuitsRoutesTableListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsRoutesTableListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ercrtlr) + if err != nil { + return err + } + page.ercrtlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ExpressRouteCircuitsRoutesTableListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ExpressRouteCircuitsRoutesTableListResultPage) NotDone() bool { + return !page.ercrtlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ExpressRouteCircuitsRoutesTableListResultPage) Response() ExpressRouteCircuitsRoutesTableListResult { + return page.ercrtlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ExpressRouteCircuitsRoutesTableListResultPage) Values() []ExpressRouteCircuitRoutesTable { + if page.ercrtlr.IsEmpty() { + return nil + } + return *page.ercrtlr.Value +} + +// Creates a new instance of the ExpressRouteCircuitsRoutesTableListResultPage type. +func NewExpressRouteCircuitsRoutesTableListResultPage(getNextPage func(context.Context, ExpressRouteCircuitsRoutesTableListResult) (ExpressRouteCircuitsRoutesTableListResult, error)) ExpressRouteCircuitsRoutesTableListResultPage { + return ExpressRouteCircuitsRoutesTableListResultPage{fn: getNextPage} +} + +// ExpressRouteCircuitsStatsListResult response for ListStats from Express Route Circuits Api service call +type ExpressRouteCircuitsStatsListResult struct { + autorest.Response `json:"-"` + // Value - Gets List of Stats + Value *[]ExpressRouteCircuitStats `json:"value,omitempty"` + // NextLink - Gets the URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteCircuitsStatsListResultIterator provides access to a complete listing of +// ExpressRouteCircuitStats values. +type ExpressRouteCircuitsStatsListResultIterator struct { + i int + page ExpressRouteCircuitsStatsListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ExpressRouteCircuitsStatsListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsStatsListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ExpressRouteCircuitsStatsListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ExpressRouteCircuitsStatsListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ExpressRouteCircuitsStatsListResultIterator) Response() ExpressRouteCircuitsStatsListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ExpressRouteCircuitsStatsListResultIterator) Value() ExpressRouteCircuitStats { + if !iter.page.NotDone() { + return ExpressRouteCircuitStats{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ExpressRouteCircuitsStatsListResultIterator type. +func NewExpressRouteCircuitsStatsListResultIterator(page ExpressRouteCircuitsStatsListResultPage) ExpressRouteCircuitsStatsListResultIterator { + return ExpressRouteCircuitsStatsListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ercslr ExpressRouteCircuitsStatsListResult) IsEmpty() bool { + return ercslr.Value == nil || len(*ercslr.Value) == 0 +} + +// expressRouteCircuitsStatsListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ercslr ExpressRouteCircuitsStatsListResult) expressRouteCircuitsStatsListResultPreparer(ctx context.Context) (*http.Request, error) { + if ercslr.NextLink == nil || len(to.String(ercslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ercslr.NextLink))) +} + +// ExpressRouteCircuitsStatsListResultPage contains a page of ExpressRouteCircuitStats values. +type ExpressRouteCircuitsStatsListResultPage struct { + fn func(context.Context, ExpressRouteCircuitsStatsListResult) (ExpressRouteCircuitsStatsListResult, error) + ercslr ExpressRouteCircuitsStatsListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ExpressRouteCircuitsStatsListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteCircuitsStatsListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ercslr) + if err != nil { + return err + } + page.ercslr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ExpressRouteCircuitsStatsListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ExpressRouteCircuitsStatsListResultPage) NotDone() bool { + return !page.ercslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ExpressRouteCircuitsStatsListResultPage) Response() ExpressRouteCircuitsStatsListResult { + return page.ercslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ExpressRouteCircuitsStatsListResultPage) Values() []ExpressRouteCircuitStats { + if page.ercslr.IsEmpty() { + return nil + } + return *page.ercslr.Value +} + +// Creates a new instance of the ExpressRouteCircuitsStatsListResultPage type. +func NewExpressRouteCircuitsStatsListResultPage(getNextPage func(context.Context, ExpressRouteCircuitsStatsListResult) (ExpressRouteCircuitsStatsListResult, error)) ExpressRouteCircuitsStatsListResultPage { + return ExpressRouteCircuitsStatsListResultPage{fn: getNextPage} +} + +// ExpressRouteCircuitStats contains stats associated with the peering. +type ExpressRouteCircuitStats struct { + // BytesIn - Gets BytesIn of the peering. + BytesIn *int32 `json:"bytesIn,omitempty"` + // BytesOut - Gets BytesOut of the peering. + BytesOut *int32 `json:"bytesOut,omitempty"` +} + +// ExpressRouteServiceProvider a ExpressRouteResourceProvider object. +type ExpressRouteServiceProvider struct { + *ExpressRouteServiceProviderPropertiesFormat `json:"properties,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ExpressRouteServiceProvider. +func (ersp ExpressRouteServiceProvider) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ersp.ExpressRouteServiceProviderPropertiesFormat != nil { + objectMap["properties"] = ersp.ExpressRouteServiceProviderPropertiesFormat + } + if ersp.ID != nil { + objectMap["id"] = ersp.ID + } + if ersp.Location != nil { + objectMap["location"] = ersp.Location + } + if ersp.Tags != nil { + objectMap["tags"] = ersp.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ExpressRouteServiceProvider struct. +func (ersp *ExpressRouteServiceProvider) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var expressRouteServiceProviderPropertiesFormat ExpressRouteServiceProviderPropertiesFormat + err = json.Unmarshal(*v, &expressRouteServiceProviderPropertiesFormat) + if err != nil { + return err + } + ersp.ExpressRouteServiceProviderPropertiesFormat = &expressRouteServiceProviderPropertiesFormat + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ersp.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ersp.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + ersp.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + ersp.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + ersp.Tags = tags + } + } + } + + return nil +} + +// ExpressRouteServiceProviderBandwidthsOffered contains bandwidths offered in ExpressRouteServiceProvider +// resources. +type ExpressRouteServiceProviderBandwidthsOffered struct { + // OfferName - The OfferName. + OfferName *string `json:"offerName,omitempty"` + // ValueInMbps - The ValueInMbps. + ValueInMbps *int32 `json:"valueInMbps,omitempty"` +} + +// ExpressRouteServiceProviderListResult response for the ListExpressRouteServiceProvider API service call. +type ExpressRouteServiceProviderListResult struct { + autorest.Response `json:"-"` + // Value - A list of ExpressRouteResourceProvider resources. + Value *[]ExpressRouteServiceProvider `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ExpressRouteServiceProviderListResultIterator provides access to a complete listing of +// ExpressRouteServiceProvider values. +type ExpressRouteServiceProviderListResultIterator struct { + i int + page ExpressRouteServiceProviderListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ExpressRouteServiceProviderListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ExpressRouteServiceProviderListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ExpressRouteServiceProviderListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ExpressRouteServiceProviderListResultIterator) Response() ExpressRouteServiceProviderListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ExpressRouteServiceProviderListResultIterator) Value() ExpressRouteServiceProvider { + if !iter.page.NotDone() { + return ExpressRouteServiceProvider{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ExpressRouteServiceProviderListResultIterator type. +func NewExpressRouteServiceProviderListResultIterator(page ExpressRouteServiceProviderListResultPage) ExpressRouteServiceProviderListResultIterator { + return ExpressRouteServiceProviderListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ersplr ExpressRouteServiceProviderListResult) IsEmpty() bool { + return ersplr.Value == nil || len(*ersplr.Value) == 0 +} + +// expressRouteServiceProviderListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ersplr ExpressRouteServiceProviderListResult) expressRouteServiceProviderListResultPreparer(ctx context.Context) (*http.Request, error) { + if ersplr.NextLink == nil || len(to.String(ersplr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ersplr.NextLink))) +} + +// ExpressRouteServiceProviderListResultPage contains a page of ExpressRouteServiceProvider values. +type ExpressRouteServiceProviderListResultPage struct { + fn func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error) + ersplr ExpressRouteServiceProviderListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ExpressRouteServiceProviderListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteServiceProviderListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ersplr) + if err != nil { + return err + } + page.ersplr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ExpressRouteServiceProviderListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ExpressRouteServiceProviderListResultPage) NotDone() bool { + return !page.ersplr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ExpressRouteServiceProviderListResultPage) Response() ExpressRouteServiceProviderListResult { + return page.ersplr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ExpressRouteServiceProviderListResultPage) Values() []ExpressRouteServiceProvider { + if page.ersplr.IsEmpty() { + return nil + } + return *page.ersplr.Value +} + +// Creates a new instance of the ExpressRouteServiceProviderListResultPage type. +func NewExpressRouteServiceProviderListResultPage(getNextPage func(context.Context, ExpressRouteServiceProviderListResult) (ExpressRouteServiceProviderListResult, error)) ExpressRouteServiceProviderListResultPage { + return ExpressRouteServiceProviderListResultPage{fn: getNextPage} +} + +// ExpressRouteServiceProviderPropertiesFormat properties of ExpressRouteServiceProvider. +type ExpressRouteServiceProviderPropertiesFormat struct { + // PeeringLocations - Get a list of peering locations. + PeeringLocations *[]string `json:"peeringLocations,omitempty"` + // BandwidthsOffered - Gets bandwidths offered. + BandwidthsOffered *[]ExpressRouteServiceProviderBandwidthsOffered `json:"bandwidthsOffered,omitempty"` + // ProvisioningState - Gets the provisioning state of the resource. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// FrontendIPConfiguration frontend IP address of the load balancer. +type FrontendIPConfiguration struct { + *FrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for FrontendIPConfiguration. +func (fic FrontendIPConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if fic.FrontendIPConfigurationPropertiesFormat != nil { + objectMap["properties"] = fic.FrontendIPConfigurationPropertiesFormat + } + if fic.Name != nil { + objectMap["name"] = fic.Name + } + if fic.Etag != nil { + objectMap["etag"] = fic.Etag + } + if fic.ID != nil { + objectMap["id"] = fic.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for FrontendIPConfiguration struct. +func (fic *FrontendIPConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var frontendIPConfigurationPropertiesFormat FrontendIPConfigurationPropertiesFormat + err = json.Unmarshal(*v, &frontendIPConfigurationPropertiesFormat) + if err != nil { + return err + } + fic.FrontendIPConfigurationPropertiesFormat = &frontendIPConfigurationPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + fic.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + fic.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + fic.ID = &ID + } + } + } + + return nil +} + +// FrontendIPConfigurationPropertiesFormat properties of Frontend IP Configuration of the load balancer. +type FrontendIPConfigurationPropertiesFormat struct { + // InboundNatRules - Read only. Inbound rules URIs that use this frontend IP. + InboundNatRules *[]SubResource `json:"inboundNatRules,omitempty"` + // InboundNatPools - Read only. Inbound pools URIs that use this frontend IP. + InboundNatPools *[]SubResource `json:"inboundNatPools,omitempty"` + // OutboundNatRules - Read only. Outbound rules URIs that use this frontend IP. + OutboundNatRules *[]SubResource `json:"outboundNatRules,omitempty"` + // LoadBalancingRules - Gets load balancing rules URIs that use this frontend IP. + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + // PrivateIPAddress - The private IP address of the IP configuration. + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + // PrivateIPAllocationMethod - The Private IP allocation method. Possible values are: 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + // Subnet - The reference of the subnet resource. + Subnet *Subnet `json:"subnet,omitempty"` + // PublicIPAddress - The reference of the Public IP resource. + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + // ProvisioningState - Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InboundNatPool inbound NAT pool of the load balancer. +type InboundNatPool struct { + *InboundNatPoolPropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for InboundNatPool. +func (inp InboundNatPool) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if inp.InboundNatPoolPropertiesFormat != nil { + objectMap["properties"] = inp.InboundNatPoolPropertiesFormat + } + if inp.Name != nil { + objectMap["name"] = inp.Name + } + if inp.Etag != nil { + objectMap["etag"] = inp.Etag + } + if inp.ID != nil { + objectMap["id"] = inp.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for InboundNatPool struct. +func (inp *InboundNatPool) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var inboundNatPoolPropertiesFormat InboundNatPoolPropertiesFormat + err = json.Unmarshal(*v, &inboundNatPoolPropertiesFormat) + if err != nil { + return err + } + inp.InboundNatPoolPropertiesFormat = &inboundNatPoolPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + inp.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + inp.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + inp.ID = &ID + } + } + } + + return nil +} + +// InboundNatPoolPropertiesFormat properties of Inbound NAT pool. +type InboundNatPoolPropertiesFormat struct { + // FrontendIPConfiguration - A reference to frontend IP addresses. + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + // Protocol - The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp'. Possible values include: 'TransportProtocolUDP', 'TransportProtocolTCP' + Protocol TransportProtocol `json:"protocol,omitempty"` + // FrontendPortRangeStart - The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65534. + FrontendPortRangeStart *int32 `json:"frontendPortRangeStart,omitempty"` + // FrontendPortRangeEnd - The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range between 1 and 65535. + FrontendPortRangeEnd *int32 `json:"frontendPortRangeEnd,omitempty"` + // BackendPort - The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535. + BackendPort *int32 `json:"backendPort,omitempty"` + // ProvisioningState - Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InboundNatRule inbound NAT rule of the load balancer. +type InboundNatRule struct { + *InboundNatRulePropertiesFormat `json:"properties,omitempty"` + // Name - Gets name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for InboundNatRule. +func (inr InboundNatRule) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if inr.InboundNatRulePropertiesFormat != nil { + objectMap["properties"] = inr.InboundNatRulePropertiesFormat + } + if inr.Name != nil { + objectMap["name"] = inr.Name + } + if inr.Etag != nil { + objectMap["etag"] = inr.Etag + } + if inr.ID != nil { + objectMap["id"] = inr.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for InboundNatRule struct. +func (inr *InboundNatRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var inboundNatRulePropertiesFormat InboundNatRulePropertiesFormat + err = json.Unmarshal(*v, &inboundNatRulePropertiesFormat) + if err != nil { + return err + } + inr.InboundNatRulePropertiesFormat = &inboundNatRulePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + inr.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + inr.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + inr.ID = &ID + } + } + } + + return nil +} + +// InboundNatRulePropertiesFormat properties of the inbound NAT rule. +type InboundNatRulePropertiesFormat struct { + // FrontendIPConfiguration - A reference to frontend IP addresses. + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + // BackendIPConfiguration - A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backed IP. + BackendIPConfiguration *InterfaceIPConfiguration `json:"backendIPConfiguration,omitempty"` + // Protocol - The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp'. Possible values include: 'TransportProtocolUDP', 'TransportProtocolTCP' + Protocol TransportProtocol `json:"protocol,omitempty"` + // FrontendPort - The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534. + FrontendPort *int32 `json:"frontendPort,omitempty"` + // BackendPort - The port used for the internal endpoint. Acceptable values range from 1 to 65535. + BackendPort *int32 `json:"backendPort,omitempty"` + // IdleTimeoutInMinutes - The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + // EnableFloatingIP - Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. + EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + // ProvisioningState - Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Interface a network interface in a resource group. +type Interface struct { + autorest.Response `json:"-"` + *InterfacePropertiesFormat `json:"properties,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Interface. +func (i Interface) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if i.InterfacePropertiesFormat != nil { + objectMap["properties"] = i.InterfacePropertiesFormat + } + if i.Etag != nil { + objectMap["etag"] = i.Etag + } + if i.ID != nil { + objectMap["id"] = i.ID + } + if i.Location != nil { + objectMap["location"] = i.Location + } + if i.Tags != nil { + objectMap["tags"] = i.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Interface struct. +func (i *Interface) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var interfacePropertiesFormat InterfacePropertiesFormat + err = json.Unmarshal(*v, &interfacePropertiesFormat) + if err != nil { + return err + } + i.InterfacePropertiesFormat = &interfacePropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + i.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + i.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + i.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + i.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + i.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + i.Tags = tags + } + } + } + + return nil +} + +// InterfaceDNSSettings DNS settings of a network interface. +type InterfaceDNSSettings struct { + // DNSServers - List of DNS servers IP addresses. Use 'AzureProvidedDNS' to switch to azure provided DNS resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in dnsServers collection. + DNSServers *[]string `json:"dnsServers,omitempty"` + // AppliedDNSServers - If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs. + AppliedDNSServers *[]string `json:"appliedDnsServers,omitempty"` + // InternalDNSNameLabel - Relative DNS name for this NIC used for internal communications between VMs in the same virtual network. + InternalDNSNameLabel *string `json:"internalDnsNameLabel,omitempty"` + // InternalFqdn - Fully qualified DNS name supporting internal communications between VMs in the same virtual network. + InternalFqdn *string `json:"internalFqdn,omitempty"` +} + +// InterfaceIPConfiguration iPConfiguration in a network interface. +type InterfaceIPConfiguration struct { + *InterfaceIPConfigurationPropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for InterfaceIPConfiguration. +func (iic InterfaceIPConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if iic.InterfaceIPConfigurationPropertiesFormat != nil { + objectMap["properties"] = iic.InterfaceIPConfigurationPropertiesFormat + } + if iic.Name != nil { + objectMap["name"] = iic.Name + } + if iic.Etag != nil { + objectMap["etag"] = iic.Etag + } + if iic.ID != nil { + objectMap["id"] = iic.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for InterfaceIPConfiguration struct. +func (iic *InterfaceIPConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var interfaceIPConfigurationPropertiesFormat InterfaceIPConfigurationPropertiesFormat + err = json.Unmarshal(*v, &interfaceIPConfigurationPropertiesFormat) + if err != nil { + return err + } + iic.InterfaceIPConfigurationPropertiesFormat = &interfaceIPConfigurationPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + iic.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + iic.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + iic.ID = &ID + } + } + } + + return nil +} + +// InterfaceIPConfigurationPropertiesFormat properties of IP configuration. +type InterfaceIPConfigurationPropertiesFormat struct { + // LoadBalancerBackendAddressPools - The reference of LoadBalancerBackendAddressPool resource. + LoadBalancerBackendAddressPools *[]BackendAddressPool `json:"loadBalancerBackendAddressPools,omitempty"` + // LoadBalancerInboundNatRules - A list of references of LoadBalancerInboundNatRules. + LoadBalancerInboundNatRules *[]InboundNatRule `json:"loadBalancerInboundNatRules,omitempty"` + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + // PrivateIPAllocationMethod - Defines how a private IP address is assigned. Possible values are: 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *Subnet `json:"subnet,omitempty"` + // Primary - Gets whether this is a primary customer address on the network interface. + Primary *bool `json:"primary,omitempty"` + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InterfaceListResult response for the ListNetworkInterface API service call. +type InterfaceListResult struct { + autorest.Response `json:"-"` + // Value - A list of network interfaces in a resource group. + Value *[]Interface `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// InterfaceListResultIterator provides access to a complete listing of Interface values. +type InterfaceListResultIterator struct { + i int + page InterfaceListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *InterfaceListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfaceListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *InterfaceListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter InterfaceListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter InterfaceListResultIterator) Response() InterfaceListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter InterfaceListResultIterator) Value() Interface { + if !iter.page.NotDone() { + return Interface{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the InterfaceListResultIterator type. +func NewInterfaceListResultIterator(page InterfaceListResultPage) InterfaceListResultIterator { + return InterfaceListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ilr InterfaceListResult) IsEmpty() bool { + return ilr.Value == nil || len(*ilr.Value) == 0 +} + +// interfaceListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ilr InterfaceListResult) interfaceListResultPreparer(ctx context.Context) (*http.Request, error) { + if ilr.NextLink == nil || len(to.String(ilr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ilr.NextLink))) +} + +// InterfaceListResultPage contains a page of Interface values. +type InterfaceListResultPage struct { + fn func(context.Context, InterfaceListResult) (InterfaceListResult, error) + ilr InterfaceListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *InterfaceListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/InterfaceListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ilr) + if err != nil { + return err + } + page.ilr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *InterfaceListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page InterfaceListResultPage) NotDone() bool { + return !page.ilr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page InterfaceListResultPage) Response() InterfaceListResult { + return page.ilr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page InterfaceListResultPage) Values() []Interface { + if page.ilr.IsEmpty() { + return nil + } + return *page.ilr.Value +} + +// Creates a new instance of the InterfaceListResultPage type. +func NewInterfaceListResultPage(getNextPage func(context.Context, InterfaceListResult) (InterfaceListResult, error)) InterfaceListResultPage { + return InterfaceListResultPage{fn: getNextPage} +} + +// InterfacePropertiesFormat networkInterface properties. +type InterfacePropertiesFormat struct { + // VirtualMachine - The reference of a virtual machine. + VirtualMachine *SubResource `json:"virtualMachine,omitempty"` + // NetworkSecurityGroup - The reference of the NetworkSecurityGroup resource. + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` + // IPConfigurations - A list of IPConfigurations of the network interface. + IPConfigurations *[]InterfaceIPConfiguration `json:"ipConfigurations,omitempty"` + // DNSSettings - The DNS settings in network interface. + DNSSettings *InterfaceDNSSettings `json:"dnsSettings,omitempty"` + // MacAddress - The MAC address of the network interface. + MacAddress *string `json:"macAddress,omitempty"` + // Primary - Gets whether this is a primary network interface on a virtual machine. + Primary *bool `json:"primary,omitempty"` + // EnableIPForwarding - Indicates whether IP forwarding is enabled on this network interface. + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` + // ResourceGUID - The resource GUID property of the network interface resource. + ResourceGUID *string `json:"resourceGuid,omitempty"` + // ProvisioningState - The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InterfacesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type InterfacesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *InterfacesCreateOrUpdateFuture) Result(client InterfacesClient) (i Interface, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if i.Response.Response, err = future.GetResult(sender); err == nil && i.Response.Response.StatusCode != http.StatusNoContent { + i, err = client.CreateOrUpdateResponder(i.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesCreateOrUpdateFuture", "Result", i.Response.Response, "Failure responding to request") + } + } + return +} + +// InterfacesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type InterfacesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *InterfacesDeleteFuture) Result(client InterfacesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.InterfacesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// IPConfiguration iPConfiguration +type IPConfiguration struct { + *IPConfigurationPropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for IPConfiguration. +func (ic IPConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if ic.IPConfigurationPropertiesFormat != nil { + objectMap["properties"] = ic.IPConfigurationPropertiesFormat + } + if ic.Name != nil { + objectMap["name"] = ic.Name + } + if ic.Etag != nil { + objectMap["etag"] = ic.Etag + } + if ic.ID != nil { + objectMap["id"] = ic.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for IPConfiguration struct. +func (ic *IPConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var IPConfigurationPropertiesFormat IPConfigurationPropertiesFormat + err = json.Unmarshal(*v, &IPConfigurationPropertiesFormat) + if err != nil { + return err + } + ic.IPConfigurationPropertiesFormat = &IPConfigurationPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ic.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + ic.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + ic.ID = &ID + } + } + } + + return nil +} + +// IPConfigurationPropertiesFormat properties of IP configuration. +type IPConfigurationPropertiesFormat struct { + // PrivateIPAddress - The private IP address of the IP configuration. + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + // PrivateIPAllocationMethod - The private IP allocation method. Possible values are 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + // Subnet - The reference of the subnet resource. + Subnet *Subnet `json:"subnet,omitempty"` + // PublicIPAddress - The reference of the public IP resource. + PublicIPAddress *PublicIPAddress `json:"publicIPAddress,omitempty"` + // ProvisioningState - Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LoadBalancer loadBalancer resource +type LoadBalancer struct { + autorest.Response `json:"-"` + *LoadBalancerPropertiesFormat `json:"properties,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for LoadBalancer. +func (lb LoadBalancer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lb.LoadBalancerPropertiesFormat != nil { + objectMap["properties"] = lb.LoadBalancerPropertiesFormat + } + if lb.Etag != nil { + objectMap["etag"] = lb.Etag + } + if lb.ID != nil { + objectMap["id"] = lb.ID + } + if lb.Location != nil { + objectMap["location"] = lb.Location + } + if lb.Tags != nil { + objectMap["tags"] = lb.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for LoadBalancer struct. +func (lb *LoadBalancer) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var loadBalancerPropertiesFormat LoadBalancerPropertiesFormat + err = json.Unmarshal(*v, &loadBalancerPropertiesFormat) + if err != nil { + return err + } + lb.LoadBalancerPropertiesFormat = &loadBalancerPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + lb.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + lb.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + lb.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + lb.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + lb.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + lb.Tags = tags + } + } + } + + return nil +} + +// LoadBalancerListResult response for ListLoadBalancers API service call. +type LoadBalancerListResult struct { + autorest.Response `json:"-"` + // Value - A list of load balancers in a resource group. + Value *[]LoadBalancer `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// LoadBalancerListResultIterator provides access to a complete listing of LoadBalancer values. +type LoadBalancerListResultIterator struct { + i int + page LoadBalancerListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *LoadBalancerListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancerListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *LoadBalancerListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter LoadBalancerListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter LoadBalancerListResultIterator) Response() LoadBalancerListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter LoadBalancerListResultIterator) Value() LoadBalancer { + if !iter.page.NotDone() { + return LoadBalancer{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the LoadBalancerListResultIterator type. +func NewLoadBalancerListResultIterator(page LoadBalancerListResultPage) LoadBalancerListResultIterator { + return LoadBalancerListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (lblr LoadBalancerListResult) IsEmpty() bool { + return lblr.Value == nil || len(*lblr.Value) == 0 +} + +// loadBalancerListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lblr LoadBalancerListResult) loadBalancerListResultPreparer(ctx context.Context) (*http.Request, error) { + if lblr.NextLink == nil || len(to.String(lblr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lblr.NextLink))) +} + +// LoadBalancerListResultPage contains a page of LoadBalancer values. +type LoadBalancerListResultPage struct { + fn func(context.Context, LoadBalancerListResult) (LoadBalancerListResult, error) + lblr LoadBalancerListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *LoadBalancerListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancerListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.lblr) + if err != nil { + return err + } + page.lblr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *LoadBalancerListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page LoadBalancerListResultPage) NotDone() bool { + return !page.lblr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page LoadBalancerListResultPage) Response() LoadBalancerListResult { + return page.lblr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page LoadBalancerListResultPage) Values() []LoadBalancer { + if page.lblr.IsEmpty() { + return nil + } + return *page.lblr.Value +} + +// Creates a new instance of the LoadBalancerListResultPage type. +func NewLoadBalancerListResultPage(getNextPage func(context.Context, LoadBalancerListResult) (LoadBalancerListResult, error)) LoadBalancerListResultPage { + return LoadBalancerListResultPage{fn: getNextPage} +} + +// LoadBalancerPropertiesFormat properties of the load balancer. +type LoadBalancerPropertiesFormat struct { + // FrontendIPConfigurations - Object representing the frontend IPs to be used for the load balancer + FrontendIPConfigurations *[]FrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` + // BackendAddressPools - Collection of backend address pools used by a load balancer + BackendAddressPools *[]BackendAddressPool `json:"backendAddressPools,omitempty"` + // LoadBalancingRules - Object collection representing the load balancing rules Gets the provisioning + LoadBalancingRules *[]LoadBalancingRule `json:"loadBalancingRules,omitempty"` + // Probes - Collection of probe objects used in the load balancer + Probes *[]Probe `json:"probes,omitempty"` + // InboundNatRules - Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules. + InboundNatRules *[]InboundNatRule `json:"inboundNatRules,omitempty"` + // InboundNatPools - Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules. + InboundNatPools *[]InboundNatPool `json:"inboundNatPools,omitempty"` + // OutboundNatRules - The outbound NAT rules. + OutboundNatRules *[]OutboundNatRule `json:"outboundNatRules,omitempty"` + // ResourceGUID - The resource GUID property of the load balancer resource. + ResourceGUID *string `json:"resourceGuid,omitempty"` + // ProvisioningState - Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LoadBalancersCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type LoadBalancersCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LoadBalancersCreateOrUpdateFuture) Result(client LoadBalancersClient) (lb LoadBalancer, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LoadBalancersCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lb.Response.Response, err = future.GetResult(sender); err == nil && lb.Response.Response.StatusCode != http.StatusNoContent { + lb, err = client.CreateOrUpdateResponder(lb.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersCreateOrUpdateFuture", "Result", lb.Response.Response, "Failure responding to request") + } + } + return +} + +// LoadBalancersDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LoadBalancersDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LoadBalancersDeleteFuture) Result(client LoadBalancersClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LoadBalancersDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LoadBalancersDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// LoadBalancingRule a load balancing rule for a load balancer. +type LoadBalancingRule struct { + *LoadBalancingRulePropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for LoadBalancingRule. +func (lbr LoadBalancingRule) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lbr.LoadBalancingRulePropertiesFormat != nil { + objectMap["properties"] = lbr.LoadBalancingRulePropertiesFormat + } + if lbr.Name != nil { + objectMap["name"] = lbr.Name + } + if lbr.Etag != nil { + objectMap["etag"] = lbr.Etag + } + if lbr.ID != nil { + objectMap["id"] = lbr.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for LoadBalancingRule struct. +func (lbr *LoadBalancingRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var loadBalancingRulePropertiesFormat LoadBalancingRulePropertiesFormat + err = json.Unmarshal(*v, &loadBalancingRulePropertiesFormat) + if err != nil { + return err + } + lbr.LoadBalancingRulePropertiesFormat = &loadBalancingRulePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + lbr.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + lbr.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + lbr.ID = &ID + } + } + } + + return nil +} + +// LoadBalancingRulePropertiesFormat properties of the load balancer. +type LoadBalancingRulePropertiesFormat struct { + // FrontendIPConfiguration - A reference to frontend IP addresses. + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + // BackendAddressPool - A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs. + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + // Probe - The reference of the load balancer probe used by the load balancing rule. + Probe *SubResource `json:"probe,omitempty"` + // Protocol - The transport protocol for the external endpoint. Possible values are 'Udp' or 'Tcp'. Possible values include: 'TransportProtocolUDP', 'TransportProtocolTCP' + Protocol TransportProtocol `json:"protocol,omitempty"` + // LoadDistribution - The load distribution policy for this rule. Possible values are 'Default', 'SourceIP', and 'SourceIPProtocol'. Possible values include: 'Default', 'SourceIP', 'SourceIPProtocol' + LoadDistribution LoadDistribution `json:"loadDistribution,omitempty"` + // FrontendPort - The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values are between 1 and 65534. + FrontendPort *int32 `json:"frontendPort,omitempty"` + // BackendPort - The port used for internal connections on the endpoint. Acceptable values are between 1 and 65535. + BackendPort *int32 `json:"backendPort,omitempty"` + // IdleTimeoutInMinutes - The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + // EnableFloatingIP - Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint. + EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + // ProvisioningState - Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LocalNetworkGateway a common class for general resource information +type LocalNetworkGateway struct { + autorest.Response `json:"-"` + *LocalNetworkGatewayPropertiesFormat `json:"properties,omitempty"` + // Etag - Gets a unique read-only string that changes whenever the resource is updated + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for LocalNetworkGateway. +func (lng LocalNetworkGateway) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lng.LocalNetworkGatewayPropertiesFormat != nil { + objectMap["properties"] = lng.LocalNetworkGatewayPropertiesFormat + } + if lng.Etag != nil { + objectMap["etag"] = lng.Etag + } + if lng.ID != nil { + objectMap["id"] = lng.ID + } + if lng.Location != nil { + objectMap["location"] = lng.Location + } + if lng.Tags != nil { + objectMap["tags"] = lng.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for LocalNetworkGateway struct. +func (lng *LocalNetworkGateway) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var localNetworkGatewayPropertiesFormat LocalNetworkGatewayPropertiesFormat + err = json.Unmarshal(*v, &localNetworkGatewayPropertiesFormat) + if err != nil { + return err + } + lng.LocalNetworkGatewayPropertiesFormat = &localNetworkGatewayPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + lng.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + lng.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + lng.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + lng.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + lng.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + lng.Tags = tags + } + } + } + + return nil +} + +// LocalNetworkGatewayListResult response for ListLocalNetworkGateways API service call. +type LocalNetworkGatewayListResult struct { + autorest.Response `json:"-"` + // Value - A list of local network gateways that exists in a resource group. + Value *[]LocalNetworkGateway `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// LocalNetworkGatewayListResultIterator provides access to a complete listing of LocalNetworkGateway +// values. +type LocalNetworkGatewayListResultIterator struct { + i int + page LocalNetworkGatewayListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *LocalNetworkGatewayListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewayListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *LocalNetworkGatewayListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter LocalNetworkGatewayListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter LocalNetworkGatewayListResultIterator) Response() LocalNetworkGatewayListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter LocalNetworkGatewayListResultIterator) Value() LocalNetworkGateway { + if !iter.page.NotDone() { + return LocalNetworkGateway{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the LocalNetworkGatewayListResultIterator type. +func NewLocalNetworkGatewayListResultIterator(page LocalNetworkGatewayListResultPage) LocalNetworkGatewayListResultIterator { + return LocalNetworkGatewayListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (lnglr LocalNetworkGatewayListResult) IsEmpty() bool { + return lnglr.Value == nil || len(*lnglr.Value) == 0 +} + +// localNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lnglr LocalNetworkGatewayListResult) localNetworkGatewayListResultPreparer(ctx context.Context) (*http.Request, error) { + if lnglr.NextLink == nil || len(to.String(lnglr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lnglr.NextLink))) +} + +// LocalNetworkGatewayListResultPage contains a page of LocalNetworkGateway values. +type LocalNetworkGatewayListResultPage struct { + fn func(context.Context, LocalNetworkGatewayListResult) (LocalNetworkGatewayListResult, error) + lnglr LocalNetworkGatewayListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *LocalNetworkGatewayListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LocalNetworkGatewayListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.lnglr) + if err != nil { + return err + } + page.lnglr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *LocalNetworkGatewayListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page LocalNetworkGatewayListResultPage) NotDone() bool { + return !page.lnglr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page LocalNetworkGatewayListResultPage) Response() LocalNetworkGatewayListResult { + return page.lnglr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page LocalNetworkGatewayListResultPage) Values() []LocalNetworkGateway { + if page.lnglr.IsEmpty() { + return nil + } + return *page.lnglr.Value +} + +// Creates a new instance of the LocalNetworkGatewayListResultPage type. +func NewLocalNetworkGatewayListResultPage(getNextPage func(context.Context, LocalNetworkGatewayListResult) (LocalNetworkGatewayListResult, error)) LocalNetworkGatewayListResultPage { + return LocalNetworkGatewayListResultPage{fn: getNextPage} +} + +// LocalNetworkGatewayPropertiesFormat localNetworkGateway properties +type LocalNetworkGatewayPropertiesFormat struct { + // LocalNetworkAddressSpace - Local network site address space. + LocalNetworkAddressSpace *AddressSpace `json:"localNetworkAddressSpace,omitempty"` + // GatewayIPAddress - IP address of local network gateway. + GatewayIPAddress *string `json:"gatewayIpAddress,omitempty"` + // BgpSettings - Local network gateway's BGP speaker settings. + BgpSettings *BgpSettings `json:"bgpSettings,omitempty"` + // ResourceGUID - The resource GUID property of the LocalNetworkGateway resource. + ResourceGUID *string `json:"resourceGuid,omitempty"` + // ProvisioningState - Gets or sets Provisioning state of the LocalNetworkGateway resource Updating/Deleting/Failed + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LocalNetworkGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type LocalNetworkGatewaysCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LocalNetworkGatewaysCreateOrUpdateFuture) Result(client LocalNetworkGatewaysClient) (lng LocalNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if lng.Response.Response, err = future.GetResult(sender); err == nil && lng.Response.Response.StatusCode != http.StatusNoContent { + lng, err = client.CreateOrUpdateResponder(lng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysCreateOrUpdateFuture", "Result", lng.Response.Response, "Failure responding to request") + } + } + return +} + +// LocalNetworkGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type LocalNetworkGatewaysDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *LocalNetworkGatewaysDeleteFuture) Result(client LocalNetworkGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.LocalNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.LocalNetworkGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// OutboundNatRule outbound NAT pool of the load balancer. +type OutboundNatRule struct { + *OutboundNatRulePropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for OutboundNatRule. +func (onr OutboundNatRule) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if onr.OutboundNatRulePropertiesFormat != nil { + objectMap["properties"] = onr.OutboundNatRulePropertiesFormat + } + if onr.Name != nil { + objectMap["name"] = onr.Name + } + if onr.Etag != nil { + objectMap["etag"] = onr.Etag + } + if onr.ID != nil { + objectMap["id"] = onr.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for OutboundNatRule struct. +func (onr *OutboundNatRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var outboundNatRulePropertiesFormat OutboundNatRulePropertiesFormat + err = json.Unmarshal(*v, &outboundNatRulePropertiesFormat) + if err != nil { + return err + } + onr.OutboundNatRulePropertiesFormat = &outboundNatRulePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + onr.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + onr.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + onr.ID = &ID + } + } + } + + return nil +} + +// OutboundNatRulePropertiesFormat outbound NAT pool of the load balancer. +type OutboundNatRulePropertiesFormat struct { + // AllocatedOutboundPorts - The number of outbound ports to be used for NAT. + AllocatedOutboundPorts *int32 `json:"allocatedOutboundPorts,omitempty"` + // FrontendIPConfigurations - The Frontend IP addresses of the load balancer. + FrontendIPConfigurations *[]SubResource `json:"frontendIPConfigurations,omitempty"` + // BackendAddressPool - A reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs. + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + // ProvisioningState - Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Probe a load balancer probe. +type Probe struct { + *ProbePropertiesFormat `json:"properties,omitempty"` + // Name - Gets name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for Probe. +func (p Probe) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if p.ProbePropertiesFormat != nil { + objectMap["properties"] = p.ProbePropertiesFormat + } + if p.Name != nil { + objectMap["name"] = p.Name + } + if p.Etag != nil { + objectMap["etag"] = p.Etag + } + if p.ID != nil { + objectMap["id"] = p.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Probe struct. +func (p *Probe) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var probePropertiesFormat ProbePropertiesFormat + err = json.Unmarshal(*v, &probePropertiesFormat) + if err != nil { + return err + } + p.ProbePropertiesFormat = &probePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + p.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + p.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + p.ID = &ID + } + } + } + + return nil +} + +// ProbePropertiesFormat ... +type ProbePropertiesFormat struct { + // LoadBalancingRules - The load balancer rules that use this probe. + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + // Protocol - The protocol of the end point. Possible values are: 'Http' or 'Tcp'. If 'Tcp' is specified, a received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the specifies URI is required for the probe to be successful. Possible values include: 'ProbeProtocolHTTP', 'ProbeProtocolTCP' + Protocol ProbeProtocol `json:"protocol,omitempty"` + // Port - The port for communicating the probe. Possible values range from 1 to 65535, inclusive. + Port *int32 `json:"port,omitempty"` + // IntervalInSeconds - The interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5. + IntervalInSeconds *int32 `json:"intervalInSeconds,omitempty"` + // NumberOfProbes - The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure. + NumberOfProbes *int32 `json:"numberOfProbes,omitempty"` + // RequestPath - The URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value. + RequestPath *string `json:"requestPath,omitempty"` + // ProvisioningState - Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// PublicIPAddress public IP address resource. +type PublicIPAddress struct { + autorest.Response `json:"-"` + *PublicIPAddressPropertiesFormat `json:"properties,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for PublicIPAddress. +func (pia PublicIPAddress) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if pia.PublicIPAddressPropertiesFormat != nil { + objectMap["properties"] = pia.PublicIPAddressPropertiesFormat + } + if pia.Etag != nil { + objectMap["etag"] = pia.Etag + } + if pia.ID != nil { + objectMap["id"] = pia.ID + } + if pia.Location != nil { + objectMap["location"] = pia.Location + } + if pia.Tags != nil { + objectMap["tags"] = pia.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for PublicIPAddress struct. +func (pia *PublicIPAddress) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var publicIPAddressPropertiesFormat PublicIPAddressPropertiesFormat + err = json.Unmarshal(*v, &publicIPAddressPropertiesFormat) + if err != nil { + return err + } + pia.PublicIPAddressPropertiesFormat = &publicIPAddressPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + pia.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + pia.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + pia.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + pia.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + pia.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + pia.Tags = tags + } + } + } + + return nil +} + +// PublicIPAddressDNSSettings contains FQDN of the DNS record associated with the public IP address +type PublicIPAddressDNSSettings struct { + // DomainNameLabel - Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system. + DomainNameLabel *string `json:"domainNameLabel,omitempty"` + // Fqdn - Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone. + Fqdn *string `json:"fqdn,omitempty"` + // ReverseFqdn - Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN. + ReverseFqdn *string `json:"reverseFqdn,omitempty"` +} + +// PublicIPAddressesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type PublicIPAddressesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *PublicIPAddressesCreateOrUpdateFuture) Result(client PublicIPAddressesClient) (pia PublicIPAddress, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if pia.Response.Response, err = future.GetResult(sender); err == nil && pia.Response.Response.StatusCode != http.StatusNoContent { + pia, err = client.CreateOrUpdateResponder(pia.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesCreateOrUpdateFuture", "Result", pia.Response.Response, "Failure responding to request") + } + } + return +} + +// PublicIPAddressesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type PublicIPAddressesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *PublicIPAddressesDeleteFuture) Result(client PublicIPAddressesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.PublicIPAddressesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// PublicIPAddressListResult response for ListPublicIpAddresses API service call. +type PublicIPAddressListResult struct { + autorest.Response `json:"-"` + // Value - A list of public IP addresses that exists in a resource group. + Value *[]PublicIPAddress `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// PublicIPAddressListResultIterator provides access to a complete listing of PublicIPAddress values. +type PublicIPAddressListResultIterator struct { + i int + page PublicIPAddressListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *PublicIPAddressListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *PublicIPAddressListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter PublicIPAddressListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter PublicIPAddressListResultIterator) Response() PublicIPAddressListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter PublicIPAddressListResultIterator) Value() PublicIPAddress { + if !iter.page.NotDone() { + return PublicIPAddress{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the PublicIPAddressListResultIterator type. +func NewPublicIPAddressListResultIterator(page PublicIPAddressListResultPage) PublicIPAddressListResultIterator { + return PublicIPAddressListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (pialr PublicIPAddressListResult) IsEmpty() bool { + return pialr.Value == nil || len(*pialr.Value) == 0 +} + +// publicIPAddressListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (pialr PublicIPAddressListResult) publicIPAddressListResultPreparer(ctx context.Context) (*http.Request, error) { + if pialr.NextLink == nil || len(to.String(pialr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(pialr.NextLink))) +} + +// PublicIPAddressListResultPage contains a page of PublicIPAddress values. +type PublicIPAddressListResultPage struct { + fn func(context.Context, PublicIPAddressListResult) (PublicIPAddressListResult, error) + pialr PublicIPAddressListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *PublicIPAddressListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.pialr) + if err != nil { + return err + } + page.pialr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *PublicIPAddressListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page PublicIPAddressListResultPage) NotDone() bool { + return !page.pialr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page PublicIPAddressListResultPage) Response() PublicIPAddressListResult { + return page.pialr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page PublicIPAddressListResultPage) Values() []PublicIPAddress { + if page.pialr.IsEmpty() { + return nil + } + return *page.pialr.Value +} + +// Creates a new instance of the PublicIPAddressListResultPage type. +func NewPublicIPAddressListResultPage(getNextPage func(context.Context, PublicIPAddressListResult) (PublicIPAddressListResult, error)) PublicIPAddressListResultPage { + return PublicIPAddressListResultPage{fn: getNextPage} +} + +// PublicIPAddressPropertiesFormat public IP address properties. +type PublicIPAddressPropertiesFormat struct { + // PublicIPAllocationMethod - The public IP allocation method. Possible values are: 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' + PublicIPAllocationMethod IPAllocationMethod `json:"publicIPAllocationMethod,omitempty"` + IPConfiguration *IPConfiguration `json:"ipConfiguration,omitempty"` + // DNSSettings - The FQDN of the DNS record associated with the public IP address. + DNSSettings *PublicIPAddressDNSSettings `json:"dnsSettings,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` + // IdleTimeoutInMinutes - The idle timeout of the public IP address. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + // ResourceGUID - The resource GUID property of the public IP resource. + ResourceGUID *string `json:"resourceGuid,omitempty"` + // ProvisioningState - The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Resource azure resource manager resource properties. +type Resource struct { + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.ID != nil { + objectMap["id"] = r.ID + } + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// Route route resource +type Route struct { + autorest.Response `json:"-"` + *RoutePropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for Route. +func (r Route) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.RoutePropertiesFormat != nil { + objectMap["properties"] = r.RoutePropertiesFormat + } + if r.Name != nil { + objectMap["name"] = r.Name + } + if r.Etag != nil { + objectMap["etag"] = r.Etag + } + if r.ID != nil { + objectMap["id"] = r.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Route struct. +func (r *Route) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var routePropertiesFormat RoutePropertiesFormat + err = json.Unmarshal(*v, &routePropertiesFormat) + if err != nil { + return err + } + r.RoutePropertiesFormat = &routePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + r.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + r.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + r.ID = &ID + } + } + } + + return nil +} + +// RouteListResult response for the ListRoute API service call +type RouteListResult struct { + autorest.Response `json:"-"` + // Value - Gets a list of routes in a resource group. + Value *[]Route `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// RouteListResultIterator provides access to a complete listing of Route values. +type RouteListResultIterator struct { + i int + page RouteListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RouteListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *RouteListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RouteListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RouteListResultIterator) Response() RouteListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RouteListResultIterator) Value() Route { + if !iter.page.NotDone() { + return Route{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the RouteListResultIterator type. +func NewRouteListResultIterator(page RouteListResultPage) RouteListResultIterator { + return RouteListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rlr RouteListResult) IsEmpty() bool { + return rlr.Value == nil || len(*rlr.Value) == 0 +} + +// routeListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rlr RouteListResult) routeListResultPreparer(ctx context.Context) (*http.Request, error) { + if rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rlr.NextLink))) +} + +// RouteListResultPage contains a page of Route values. +type RouteListResultPage struct { + fn func(context.Context, RouteListResult) (RouteListResult, error) + rlr RouteListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RouteListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rlr) + if err != nil { + return err + } + page.rlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *RouteListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RouteListResultPage) NotDone() bool { + return !page.rlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RouteListResultPage) Response() RouteListResult { + return page.rlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RouteListResultPage) Values() []Route { + if page.rlr.IsEmpty() { + return nil + } + return *page.rlr.Value +} + +// Creates a new instance of the RouteListResultPage type. +func NewRouteListResultPage(getNextPage func(context.Context, RouteListResult) (RouteListResult, error)) RouteListResultPage { + return RouteListResultPage{fn: getNextPage} +} + +// RoutePropertiesFormat route resource +type RoutePropertiesFormat struct { + // AddressPrefix - The destination CIDR to which the route applies. + AddressPrefix *string `json:"addressPrefix,omitempty"` + // NextHopType - The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal', 'Internet', 'VirtualAppliance', and 'None'. Possible values include: 'RouteNextHopTypeVirtualNetworkGateway', 'RouteNextHopTypeVnetLocal', 'RouteNextHopTypeInternet', 'RouteNextHopTypeVirtualAppliance', 'RouteNextHopTypeNone' + NextHopType RouteNextHopType `json:"nextHopType,omitempty"` + // NextHopIPAddress - The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance. + NextHopIPAddress *string `json:"nextHopIpAddress,omitempty"` + // ProvisioningState - The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// RoutesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type RoutesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RoutesCreateOrUpdateFuture) Result(client RoutesClient) (r Route, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RoutesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if r.Response.Response, err = future.GetResult(sender); err == nil && r.Response.Response.StatusCode != http.StatusNoContent { + r, err = client.CreateOrUpdateResponder(r.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesCreateOrUpdateFuture", "Result", r.Response.Response, "Failure responding to request") + } + } + return +} + +// RoutesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type RoutesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RoutesDeleteFuture) Result(client RoutesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RoutesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// RouteTable route table resource. +type RouteTable struct { + autorest.Response `json:"-"` + *RouteTablePropertiesFormat `json:"properties,omitempty"` + // Etag - Gets a unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for RouteTable. +func (rt RouteTable) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if rt.RouteTablePropertiesFormat != nil { + objectMap["properties"] = rt.RouteTablePropertiesFormat + } + if rt.Etag != nil { + objectMap["etag"] = rt.Etag + } + if rt.ID != nil { + objectMap["id"] = rt.ID + } + if rt.Location != nil { + objectMap["location"] = rt.Location + } + if rt.Tags != nil { + objectMap["tags"] = rt.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for RouteTable struct. +func (rt *RouteTable) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var routeTablePropertiesFormat RouteTablePropertiesFormat + err = json.Unmarshal(*v, &routeTablePropertiesFormat) + if err != nil { + return err + } + rt.RouteTablePropertiesFormat = &routeTablePropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + rt.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + rt.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + rt.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + rt.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + rt.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + rt.Tags = tags + } + } + } + + return nil +} + +// RouteTableListResult response for the ListRouteTable API service call. +type RouteTableListResult struct { + autorest.Response `json:"-"` + // Value - Gets a list of route tables in a resource group. + Value *[]RouteTable `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// RouteTableListResultIterator provides access to a complete listing of RouteTable values. +type RouteTableListResultIterator struct { + i int + page RouteTableListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RouteTableListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteTableListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *RouteTableListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RouteTableListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RouteTableListResultIterator) Response() RouteTableListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RouteTableListResultIterator) Value() RouteTable { + if !iter.page.NotDone() { + return RouteTable{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the RouteTableListResultIterator type. +func NewRouteTableListResultIterator(page RouteTableListResultPage) RouteTableListResultIterator { + return RouteTableListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (rtlr RouteTableListResult) IsEmpty() bool { + return rtlr.Value == nil || len(*rtlr.Value) == 0 +} + +// routeTableListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rtlr RouteTableListResult) routeTableListResultPreparer(ctx context.Context) (*http.Request, error) { + if rtlr.NextLink == nil || len(to.String(rtlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rtlr.NextLink))) +} + +// RouteTableListResultPage contains a page of RouteTable values. +type RouteTableListResultPage struct { + fn func(context.Context, RouteTableListResult) (RouteTableListResult, error) + rtlr RouteTableListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RouteTableListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteTableListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.rtlr) + if err != nil { + return err + } + page.rtlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *RouteTableListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RouteTableListResultPage) NotDone() bool { + return !page.rtlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RouteTableListResultPage) Response() RouteTableListResult { + return page.rtlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RouteTableListResultPage) Values() []RouteTable { + if page.rtlr.IsEmpty() { + return nil + } + return *page.rtlr.Value +} + +// Creates a new instance of the RouteTableListResultPage type. +func NewRouteTableListResultPage(getNextPage func(context.Context, RouteTableListResult) (RouteTableListResult, error)) RouteTableListResultPage { + return RouteTableListResultPage{fn: getNextPage} +} + +// RouteTablePropertiesFormat route Table resource +type RouteTablePropertiesFormat struct { + // Routes - Collection of routes contained within a route table. + Routes *[]Route `json:"routes,omitempty"` + // Subnets - A collection of references to subnets. + Subnets *[]Subnet `json:"subnets,omitempty"` + // ProvisioningState - The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// RouteTablesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type RouteTablesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RouteTablesCreateOrUpdateFuture) Result(client RouteTablesClient) (rt RouteTable, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteTablesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if rt.Response.Response, err = future.GetResult(sender); err == nil && rt.Response.Response.StatusCode != http.StatusNoContent { + rt, err = client.CreateOrUpdateResponder(rt.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesCreateOrUpdateFuture", "Result", rt.Response.Response, "Failure responding to request") + } + } + return +} + +// RouteTablesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type RouteTablesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *RouteTablesDeleteFuture) Result(client RouteTablesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.RouteTablesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// SecurityGroup networkSecurityGroup resource. +type SecurityGroup struct { + autorest.Response `json:"-"` + *SecurityGroupPropertiesFormat `json:"properties,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for SecurityGroup. +func (sg SecurityGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sg.SecurityGroupPropertiesFormat != nil { + objectMap["properties"] = sg.SecurityGroupPropertiesFormat + } + if sg.Etag != nil { + objectMap["etag"] = sg.Etag + } + if sg.ID != nil { + objectMap["id"] = sg.ID + } + if sg.Location != nil { + objectMap["location"] = sg.Location + } + if sg.Tags != nil { + objectMap["tags"] = sg.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SecurityGroup struct. +func (sg *SecurityGroup) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var securityGroupPropertiesFormat SecurityGroupPropertiesFormat + err = json.Unmarshal(*v, &securityGroupPropertiesFormat) + if err != nil { + return err + } + sg.SecurityGroupPropertiesFormat = &securityGroupPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + sg.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + sg.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + sg.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + sg.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + sg.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + sg.Tags = tags + } + } + } + + return nil +} + +// SecurityGroupListResult response for ListNetworkSecurityGroups API service call. +type SecurityGroupListResult struct { + autorest.Response `json:"-"` + // Value - A list of NetworkSecurityGroup resources. + Value *[]SecurityGroup `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SecurityGroupListResultIterator provides access to a complete listing of SecurityGroup values. +type SecurityGroupListResultIterator struct { + i int + page SecurityGroupListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SecurityGroupListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SecurityGroupListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SecurityGroupListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SecurityGroupListResultIterator) Response() SecurityGroupListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SecurityGroupListResultIterator) Value() SecurityGroup { + if !iter.page.NotDone() { + return SecurityGroup{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SecurityGroupListResultIterator type. +func NewSecurityGroupListResultIterator(page SecurityGroupListResultPage) SecurityGroupListResultIterator { + return SecurityGroupListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (sglr SecurityGroupListResult) IsEmpty() bool { + return sglr.Value == nil || len(*sglr.Value) == 0 +} + +// securityGroupListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (sglr SecurityGroupListResult) securityGroupListResultPreparer(ctx context.Context) (*http.Request, error) { + if sglr.NextLink == nil || len(to.String(sglr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(sglr.NextLink))) +} + +// SecurityGroupListResultPage contains a page of SecurityGroup values. +type SecurityGroupListResultPage struct { + fn func(context.Context, SecurityGroupListResult) (SecurityGroupListResult, error) + sglr SecurityGroupListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SecurityGroupListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.sglr) + if err != nil { + return err + } + page.sglr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SecurityGroupListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SecurityGroupListResultPage) NotDone() bool { + return !page.sglr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SecurityGroupListResultPage) Response() SecurityGroupListResult { + return page.sglr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SecurityGroupListResultPage) Values() []SecurityGroup { + if page.sglr.IsEmpty() { + return nil + } + return *page.sglr.Value +} + +// Creates a new instance of the SecurityGroupListResultPage type. +func NewSecurityGroupListResultPage(getNextPage func(context.Context, SecurityGroupListResult) (SecurityGroupListResult, error)) SecurityGroupListResultPage { + return SecurityGroupListResultPage{fn: getNextPage} +} + +// SecurityGroupPropertiesFormat network Security Group resource. +type SecurityGroupPropertiesFormat struct { + // SecurityRules - A collection of security rules of the network security group. + SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` + // DefaultSecurityRules - The default security rules of network security group. + DefaultSecurityRules *[]SecurityRule `json:"defaultSecurityRules,omitempty"` + // NetworkInterfaces - A collection of references to network interfaces. + NetworkInterfaces *[]Interface `json:"networkInterfaces,omitempty"` + // Subnets - A collection of references to subnets. + Subnets *[]Subnet `json:"subnets,omitempty"` + // ResourceGUID - The resource GUID property of the network security group resource. + ResourceGUID *string `json:"resourceGuid,omitempty"` + // ProvisioningState - The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SecurityGroupsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type SecurityGroupsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SecurityGroupsCreateOrUpdateFuture) Result(client SecurityGroupsClient) (sg SecurityGroup, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sg.Response.Response, err = future.GetResult(sender); err == nil && sg.Response.Response.StatusCode != http.StatusNoContent { + sg, err = client.CreateOrUpdateResponder(sg.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsCreateOrUpdateFuture", "Result", sg.Response.Response, "Failure responding to request") + } + } + return +} + +// SecurityGroupsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SecurityGroupsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SecurityGroupsDeleteFuture) Result(client SecurityGroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityGroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// SecurityRule network security rule. +type SecurityRule struct { + autorest.Response `json:"-"` + *SecurityRulePropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for SecurityRule. +func (sr SecurityRule) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if sr.SecurityRulePropertiesFormat != nil { + objectMap["properties"] = sr.SecurityRulePropertiesFormat + } + if sr.Name != nil { + objectMap["name"] = sr.Name + } + if sr.Etag != nil { + objectMap["etag"] = sr.Etag + } + if sr.ID != nil { + objectMap["id"] = sr.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SecurityRule struct. +func (sr *SecurityRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var securityRulePropertiesFormat SecurityRulePropertiesFormat + err = json.Unmarshal(*v, &securityRulePropertiesFormat) + if err != nil { + return err + } + sr.SecurityRulePropertiesFormat = &securityRulePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + sr.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + sr.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + sr.ID = &ID + } + } + } + + return nil +} + +// SecurityRuleListResult response for ListSecurityRule API service call. Retrieves all security rules that +// belongs to a network security group. +type SecurityRuleListResult struct { + autorest.Response `json:"-"` + // Value - The security rules in a network security group. + Value *[]SecurityRule `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SecurityRuleListResultIterator provides access to a complete listing of SecurityRule values. +type SecurityRuleListResultIterator struct { + i int + page SecurityRuleListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SecurityRuleListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityRuleListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SecurityRuleListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SecurityRuleListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SecurityRuleListResultIterator) Response() SecurityRuleListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SecurityRuleListResultIterator) Value() SecurityRule { + if !iter.page.NotDone() { + return SecurityRule{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SecurityRuleListResultIterator type. +func NewSecurityRuleListResultIterator(page SecurityRuleListResultPage) SecurityRuleListResultIterator { + return SecurityRuleListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (srlr SecurityRuleListResult) IsEmpty() bool { + return srlr.Value == nil || len(*srlr.Value) == 0 +} + +// securityRuleListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (srlr SecurityRuleListResult) securityRuleListResultPreparer(ctx context.Context) (*http.Request, error) { + if srlr.NextLink == nil || len(to.String(srlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(srlr.NextLink))) +} + +// SecurityRuleListResultPage contains a page of SecurityRule values. +type SecurityRuleListResultPage struct { + fn func(context.Context, SecurityRuleListResult) (SecurityRuleListResult, error) + srlr SecurityRuleListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SecurityRuleListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityRuleListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.srlr) + if err != nil { + return err + } + page.srlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SecurityRuleListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SecurityRuleListResultPage) NotDone() bool { + return !page.srlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SecurityRuleListResultPage) Response() SecurityRuleListResult { + return page.srlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SecurityRuleListResultPage) Values() []SecurityRule { + if page.srlr.IsEmpty() { + return nil + } + return *page.srlr.Value +} + +// Creates a new instance of the SecurityRuleListResultPage type. +func NewSecurityRuleListResultPage(getNextPage func(context.Context, SecurityRuleListResult) (SecurityRuleListResult, error)) SecurityRuleListResultPage { + return SecurityRuleListResultPage{fn: getNextPage} +} + +// SecurityRulePropertiesFormat ... +type SecurityRulePropertiesFormat struct { + // Description - A description for this rule. Restricted to 140 chars. + Description *string `json:"description,omitempty"` + // Protocol - Network protocol this rule applies to. Possible values are 'Tcp', 'Udp', and '*'. Possible values include: 'TCP', 'UDP', 'Asterisk' + Protocol SecurityRuleProtocol `json:"protocol,omitempty"` + // SourcePortRange - The source port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. + SourcePortRange *string `json:"sourcePortRange,omitempty"` + // DestinationPortRange - The destination port or range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports. + DestinationPortRange *string `json:"destinationPortRange,omitempty"` + // SourceAddressPrefix - The CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. + SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` + // DestinationAddressPrefix - The destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. + DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` + // Access - The network traffic is allowed or denied. Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow', 'Deny' + Access SecurityRuleAccess `json:"access,omitempty"` + // Priority - The priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule. + Priority *int32 `json:"priority,omitempty"` + // Direction - The direction of the rule. The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are: 'Inbound' and 'Outbound'. Possible values include: 'Inbound', 'Outbound' + Direction SecurityRuleDirection `json:"direction,omitempty"` + // ProvisioningState - The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SecurityRulesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type SecurityRulesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SecurityRulesCreateOrUpdateFuture) Result(client SecurityRulesClient) (sr SecurityRule, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityRulesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if sr.Response.Response, err = future.GetResult(sender); err == nil && sr.Response.Response.StatusCode != http.StatusNoContent { + sr, err = client.CreateOrUpdateResponder(sr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesCreateOrUpdateFuture", "Result", sr.Response.Response, "Failure responding to request") + } + } + return +} + +// SecurityRulesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SecurityRulesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SecurityRulesDeleteFuture) Result(client SecurityRulesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SecurityRulesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// String ... +type String struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` +} + +// Subnet subnet in a virtual network resource. +type Subnet struct { + autorest.Response `json:"-"` + *SubnetPropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for Subnet. +func (s Subnet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if s.SubnetPropertiesFormat != nil { + objectMap["properties"] = s.SubnetPropertiesFormat + } + if s.Name != nil { + objectMap["name"] = s.Name + } + if s.Etag != nil { + objectMap["etag"] = s.Etag + } + if s.ID != nil { + objectMap["id"] = s.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Subnet struct. +func (s *Subnet) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var subnetPropertiesFormat SubnetPropertiesFormat + err = json.Unmarshal(*v, &subnetPropertiesFormat) + if err != nil { + return err + } + s.SubnetPropertiesFormat = &subnetPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + s.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + s.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + s.ID = &ID + } + } + } + + return nil +} + +// SubnetListResult response for ListSubnets API service callRetrieves all subnet that belongs to a virtual +// network +type SubnetListResult struct { + autorest.Response `json:"-"` + // Value - The subnets in a virtual network. + Value *[]Subnet `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// SubnetListResultIterator provides access to a complete listing of Subnet values. +type SubnetListResultIterator struct { + i int + page SubnetListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SubnetListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubnetListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *SubnetListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SubnetListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SubnetListResultIterator) Response() SubnetListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SubnetListResultIterator) Value() Subnet { + if !iter.page.NotDone() { + return Subnet{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the SubnetListResultIterator type. +func NewSubnetListResultIterator(page SubnetListResultPage) SubnetListResultIterator { + return SubnetListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (slr SubnetListResult) IsEmpty() bool { + return slr.Value == nil || len(*slr.Value) == 0 +} + +// subnetListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (slr SubnetListResult) subnetListResultPreparer(ctx context.Context) (*http.Request, error) { + if slr.NextLink == nil || len(to.String(slr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(slr.NextLink))) +} + +// SubnetListResultPage contains a page of Subnet values. +type SubnetListResultPage struct { + fn func(context.Context, SubnetListResult) (SubnetListResult, error) + slr SubnetListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SubnetListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubnetListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.slr) + if err != nil { + return err + } + page.slr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *SubnetListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SubnetListResultPage) NotDone() bool { + return !page.slr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SubnetListResultPage) Response() SubnetListResult { + return page.slr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SubnetListResultPage) Values() []Subnet { + if page.slr.IsEmpty() { + return nil + } + return *page.slr.Value +} + +// Creates a new instance of the SubnetListResultPage type. +func NewSubnetListResultPage(getNextPage func(context.Context, SubnetListResult) (SubnetListResult, error)) SubnetListResultPage { + return SubnetListResultPage{fn: getNextPage} +} + +// SubnetPropertiesFormat ... +type SubnetPropertiesFormat struct { + // AddressPrefix - The address prefix for the subnet. + AddressPrefix *string `json:"addressPrefix,omitempty"` + // NetworkSecurityGroup - The reference of the NetworkSecurityGroup resource. + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` + // RouteTable - The reference of the RouteTable resource. + RouteTable *RouteTable `json:"routeTable,omitempty"` + // IPConfigurations - Gets an array of references to the network interface IP configurations using subnet. + IPConfigurations *[]IPConfiguration `json:"ipConfigurations,omitempty"` + // ProvisioningState - The provisioning state of the resource. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SubnetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SubnetsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SubnetsCreateOrUpdateFuture) Result(client SubnetsClient) (s Subnet, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.CreateOrUpdateResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsCreateOrUpdateFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + +// SubnetsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type SubnetsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *SubnetsDeleteFuture) Result(client SubnetsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.SubnetsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// SubResource azure resource manager sub resource properties. +type SubResource struct { + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// Usage describes network resource usage. +type Usage struct { + // Unit - An enum describing the unit of measurement. + Unit *string `json:"unit,omitempty"` + // CurrentValue - The current value of the usage. + CurrentValue *int64 `json:"currentValue,omitempty"` + // Limit - The limit of usage. + Limit *int64 `json:"limit,omitempty"` + // Name - The name of the type of usage. + Name *UsageName `json:"name,omitempty"` +} + +// UsageName the usage names. +type UsageName struct { + // Value - A string describing the resource name. + Value *string `json:"value,omitempty"` + // LocalizedValue - A localized string describing the resource name. + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// UsagesListResult the list usages operation response. +type UsagesListResult struct { + autorest.Response `json:"-"` + // Value - The list network resource usages. + Value *[]Usage `json:"value,omitempty"` + // NextLink - URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// UsagesListResultIterator provides access to a complete listing of Usage values. +type UsagesListResultIterator struct { + i int + page UsagesListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *UsagesListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/UsagesListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *UsagesListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter UsagesListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter UsagesListResultIterator) Response() UsagesListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter UsagesListResultIterator) Value() Usage { + if !iter.page.NotDone() { + return Usage{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the UsagesListResultIterator type. +func NewUsagesListResultIterator(page UsagesListResultPage) UsagesListResultIterator { + return UsagesListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (ulr UsagesListResult) IsEmpty() bool { + return ulr.Value == nil || len(*ulr.Value) == 0 +} + +// usagesListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ulr UsagesListResult) usagesListResultPreparer(ctx context.Context) (*http.Request, error) { + if ulr.NextLink == nil || len(to.String(ulr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ulr.NextLink))) +} + +// UsagesListResultPage contains a page of Usage values. +type UsagesListResultPage struct { + fn func(context.Context, UsagesListResult) (UsagesListResult, error) + ulr UsagesListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *UsagesListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/UsagesListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.ulr) + if err != nil { + return err + } + page.ulr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *UsagesListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page UsagesListResultPage) NotDone() bool { + return !page.ulr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page UsagesListResultPage) Response() UsagesListResult { + return page.ulr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page UsagesListResultPage) Values() []Usage { + if page.ulr.IsEmpty() { + return nil + } + return *page.ulr.Value +} + +// Creates a new instance of the UsagesListResultPage type. +func NewUsagesListResultPage(getNextPage func(context.Context, UsagesListResult) (UsagesListResult, error)) UsagesListResultPage { + return UsagesListResultPage{fn: getNextPage} +} + +// VirtualNetwork virtual Network resource. +type VirtualNetwork struct { + autorest.Response `json:"-"` + *VirtualNetworkPropertiesFormat `json:"properties,omitempty"` + // Etag - Gets a unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualNetwork. +func (vn VirtualNetwork) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vn.VirtualNetworkPropertiesFormat != nil { + objectMap["properties"] = vn.VirtualNetworkPropertiesFormat + } + if vn.Etag != nil { + objectMap["etag"] = vn.Etag + } + if vn.ID != nil { + objectMap["id"] = vn.ID + } + if vn.Location != nil { + objectMap["location"] = vn.Location + } + if vn.Tags != nil { + objectMap["tags"] = vn.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualNetwork struct. +func (vn *VirtualNetwork) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var virtualNetworkPropertiesFormat VirtualNetworkPropertiesFormat + err = json.Unmarshal(*v, &virtualNetworkPropertiesFormat) + if err != nil { + return err + } + vn.VirtualNetworkPropertiesFormat = &virtualNetworkPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + vn.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vn.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vn.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vn.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + vn.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vn.Tags = tags + } + } + } + + return nil +} + +// VirtualNetworkGateway a common class for general resource information +type VirtualNetworkGateway struct { + autorest.Response `json:"-"` + *VirtualNetworkGatewayPropertiesFormat `json:"properties,omitempty"` + // Etag - Gets a unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualNetworkGateway. +func (vng VirtualNetworkGateway) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vng.VirtualNetworkGatewayPropertiesFormat != nil { + objectMap["properties"] = vng.VirtualNetworkGatewayPropertiesFormat + } + if vng.Etag != nil { + objectMap["etag"] = vng.Etag + } + if vng.ID != nil { + objectMap["id"] = vng.ID + } + if vng.Location != nil { + objectMap["location"] = vng.Location + } + if vng.Tags != nil { + objectMap["tags"] = vng.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualNetworkGateway struct. +func (vng *VirtualNetworkGateway) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var virtualNetworkGatewayPropertiesFormat VirtualNetworkGatewayPropertiesFormat + err = json.Unmarshal(*v, &virtualNetworkGatewayPropertiesFormat) + if err != nil { + return err + } + vng.VirtualNetworkGatewayPropertiesFormat = &virtualNetworkGatewayPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + vng.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vng.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vng.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vng.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + vng.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vng.Tags = tags + } + } + } + + return nil +} + +// VirtualNetworkGatewayConnection a common class for general resource information +type VirtualNetworkGatewayConnection struct { + autorest.Response `json:"-"` + *VirtualNetworkGatewayConnectionPropertiesFormat `json:"properties,omitempty"` + // Etag - Gets a unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type. + Type *string `json:"type,omitempty"` + // Location - Resource location. + Location *string `json:"location,omitempty"` + // Tags - Resource tags. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for VirtualNetworkGatewayConnection. +func (vngc VirtualNetworkGatewayConnection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vngc.VirtualNetworkGatewayConnectionPropertiesFormat != nil { + objectMap["properties"] = vngc.VirtualNetworkGatewayConnectionPropertiesFormat + } + if vngc.Etag != nil { + objectMap["etag"] = vngc.Etag + } + if vngc.ID != nil { + objectMap["id"] = vngc.ID + } + if vngc.Location != nil { + objectMap["location"] = vngc.Location + } + if vngc.Tags != nil { + objectMap["tags"] = vngc.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualNetworkGatewayConnection struct. +func (vngc *VirtualNetworkGatewayConnection) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var virtualNetworkGatewayConnectionPropertiesFormat VirtualNetworkGatewayConnectionPropertiesFormat + err = json.Unmarshal(*v, &virtualNetworkGatewayConnectionPropertiesFormat) + if err != nil { + return err + } + vngc.VirtualNetworkGatewayConnectionPropertiesFormat = &virtualNetworkGatewayConnectionPropertiesFormat + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + vngc.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vngc.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vngc.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vngc.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + vngc.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + vngc.Tags = tags + } + } + } + + return nil +} + +// VirtualNetworkGatewayConnectionListResult response for the ListVirtualNetworkGatewayConnections API +// service call +type VirtualNetworkGatewayConnectionListResult struct { + autorest.Response `json:"-"` + // Value - Gets a list of VirtualNetworkGatewayConnection resources that exists in a resource group. + Value *[]VirtualNetworkGatewayConnection `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkGatewayConnectionListResultIterator provides access to a complete listing of +// VirtualNetworkGatewayConnection values. +type VirtualNetworkGatewayConnectionListResultIterator struct { + i int + page VirtualNetworkGatewayConnectionListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualNetworkGatewayConnectionListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *VirtualNetworkGatewayConnectionListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualNetworkGatewayConnectionListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualNetworkGatewayConnectionListResultIterator) Response() VirtualNetworkGatewayConnectionListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualNetworkGatewayConnectionListResultIterator) Value() VirtualNetworkGatewayConnection { + if !iter.page.NotDone() { + return VirtualNetworkGatewayConnection{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the VirtualNetworkGatewayConnectionListResultIterator type. +func NewVirtualNetworkGatewayConnectionListResultIterator(page VirtualNetworkGatewayConnectionListResultPage) VirtualNetworkGatewayConnectionListResultIterator { + return VirtualNetworkGatewayConnectionListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (vngclr VirtualNetworkGatewayConnectionListResult) IsEmpty() bool { + return vngclr.Value == nil || len(*vngclr.Value) == 0 +} + +// virtualNetworkGatewayConnectionListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vngclr VirtualNetworkGatewayConnectionListResult) virtualNetworkGatewayConnectionListResultPreparer(ctx context.Context) (*http.Request, error) { + if vngclr.NextLink == nil || len(to.String(vngclr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vngclr.NextLink))) +} + +// VirtualNetworkGatewayConnectionListResultPage contains a page of VirtualNetworkGatewayConnection values. +type VirtualNetworkGatewayConnectionListResultPage struct { + fn func(context.Context, VirtualNetworkGatewayConnectionListResult) (VirtualNetworkGatewayConnectionListResult, error) + vngclr VirtualNetworkGatewayConnectionListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualNetworkGatewayConnectionListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.vngclr) + if err != nil { + return err + } + page.vngclr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *VirtualNetworkGatewayConnectionListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualNetworkGatewayConnectionListResultPage) NotDone() bool { + return !page.vngclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualNetworkGatewayConnectionListResultPage) Response() VirtualNetworkGatewayConnectionListResult { + return page.vngclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualNetworkGatewayConnectionListResultPage) Values() []VirtualNetworkGatewayConnection { + if page.vngclr.IsEmpty() { + return nil + } + return *page.vngclr.Value +} + +// Creates a new instance of the VirtualNetworkGatewayConnectionListResultPage type. +func NewVirtualNetworkGatewayConnectionListResultPage(getNextPage func(context.Context, VirtualNetworkGatewayConnectionListResult) (VirtualNetworkGatewayConnectionListResult, error)) VirtualNetworkGatewayConnectionListResultPage { + return VirtualNetworkGatewayConnectionListResultPage{fn: getNextPage} +} + +// VirtualNetworkGatewayConnectionPropertiesFormat virtualNetworkGatewayConnection properties +type VirtualNetworkGatewayConnectionPropertiesFormat struct { + // AuthorizationKey - The authorizationKey. + AuthorizationKey *string `json:"authorizationKey,omitempty"` + VirtualNetworkGateway1 *VirtualNetworkGateway `json:"virtualNetworkGateway1,omitempty"` + VirtualNetworkGateway2 *VirtualNetworkGateway `json:"virtualNetworkGateway2,omitempty"` + LocalNetworkGateway2 *LocalNetworkGateway `json:"localNetworkGateway2,omitempty"` + // ConnectionType - Gateway connection type. Possible values are: 'IPsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient' + ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` + // RoutingWeight - The routing weight. + RoutingWeight *int32 `json:"routingWeight,omitempty"` + // SharedKey - The IPSec shared key. + SharedKey *string `json:"sharedKey,omitempty"` + // ConnectionStatus - Virtual network Gateway connection status. Possible values are 'Unknown', 'Connecting', 'Connected' and 'NotConnected'. Possible values include: 'Unknown', 'Connecting', 'Connected', 'NotConnected' + ConnectionStatus VirtualNetworkGatewayConnectionStatus `json:"connectionStatus,omitempty"` + // EgressBytesTransferred - The egress bytes transferred in this connection. + EgressBytesTransferred *int64 `json:"egressBytesTransferred,omitempty"` + // IngressBytesTransferred - The ingress bytes transferred in this connection. + IngressBytesTransferred *int64 `json:"ingressBytesTransferred,omitempty"` + // Peer - The reference to peerings resource. + Peer *SubResource `json:"peer,omitempty"` + // EnableBgp - EnableBgp flag + EnableBgp *bool `json:"enableBgp,omitempty"` + // ResourceGUID - The resource GUID property of the VirtualNetworkGatewayConnection resource. + ResourceGUID *string `json:"resourceGuid,omitempty"` + // ProvisioningState - The provisioning state of the VirtualNetworkGatewayConnection resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayConnectionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type VirtualNetworkGatewayConnectionsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworkGatewayConnectionsCreateOrUpdateFuture) Result(client VirtualNetworkGatewayConnectionsClient) (vngc VirtualNetworkGatewayConnection, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vngc.Response.Response, err = future.GetResult(sender); err == nil && vngc.Response.Response.StatusCode != http.StatusNoContent { + vngc, err = client.CreateOrUpdateResponder(vngc.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsCreateOrUpdateFuture", "Result", vngc.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualNetworkGatewayConnectionsDeleteFuture an abstraction for monitoring and retrieving the results of +// a long-running operation. +type VirtualNetworkGatewayConnectionsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworkGatewayConnectionsDeleteFuture) Result(client VirtualNetworkGatewayConnectionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualNetworkGatewayConnectionsResetSharedKeyFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type VirtualNetworkGatewayConnectionsResetSharedKeyFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworkGatewayConnectionsResetSharedKeyFuture) Result(client VirtualNetworkGatewayConnectionsClient) (crsk ConnectionResetSharedKey, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if crsk.Response.Response, err = future.GetResult(sender); err == nil && crsk.Response.Response.StatusCode != http.StatusNoContent { + crsk, err = client.ResetSharedKeyResponder(crsk.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsResetSharedKeyFuture", "Result", crsk.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualNetworkGatewayConnectionsSetSharedKeyFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type VirtualNetworkGatewayConnectionsSetSharedKeyFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworkGatewayConnectionsSetSharedKeyFuture) Result(client VirtualNetworkGatewayConnectionsClient) (csk ConnectionSharedKey, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if csk.Response.Response, err = future.GetResult(sender); err == nil && csk.Response.Response.StatusCode != http.StatusNoContent { + csk, err = client.SetSharedKeyResponder(csk.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsSetSharedKeyFuture", "Result", csk.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualNetworkGatewayIPConfiguration IP configuration for virtual network gateway +type VirtualNetworkGatewayIPConfiguration struct { + *VirtualNetworkGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VirtualNetworkGatewayIPConfiguration. +func (vngic VirtualNetworkGatewayIPConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vngic.VirtualNetworkGatewayIPConfigurationPropertiesFormat != nil { + objectMap["properties"] = vngic.VirtualNetworkGatewayIPConfigurationPropertiesFormat + } + if vngic.Name != nil { + objectMap["name"] = vngic.Name + } + if vngic.Etag != nil { + objectMap["etag"] = vngic.Etag + } + if vngic.ID != nil { + objectMap["id"] = vngic.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VirtualNetworkGatewayIPConfiguration struct. +func (vngic *VirtualNetworkGatewayIPConfiguration) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var virtualNetworkGatewayIPConfigurationPropertiesFormat VirtualNetworkGatewayIPConfigurationPropertiesFormat + err = json.Unmarshal(*v, &virtualNetworkGatewayIPConfigurationPropertiesFormat) + if err != nil { + return err + } + vngic.VirtualNetworkGatewayIPConfigurationPropertiesFormat = &virtualNetworkGatewayIPConfigurationPropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vngic.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + vngic.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vngic.ID = &ID + } + } + } + + return nil +} + +// VirtualNetworkGatewayIPConfigurationPropertiesFormat properties of VirtualNetworkGatewayIPConfiguration +type VirtualNetworkGatewayIPConfigurationPropertiesFormat struct { + // PrivateIPAddress - Gets or sets the privateIPAddress of the IP Configuration + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + // PrivateIPAllocationMethod - The private IP allocation method. Possible values are: 'Static' and 'Dynamic'. Possible values include: 'Static', 'Dynamic' + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + // Subnet - The reference of the subnet resource. + Subnet *SubResource `json:"subnet,omitempty"` + // PublicIPAddress - The reference of the public IP resource. + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + // ProvisioningState - The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayListResult response for the ListVirtualNetworkGateways API service call. +type VirtualNetworkGatewayListResult struct { + autorest.Response `json:"-"` + // Value - Gets a list of VirtualNetworkGateway resources that exists in a resource group. + Value *[]VirtualNetworkGateway `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkGatewayListResultIterator provides access to a complete listing of VirtualNetworkGateway +// values. +type VirtualNetworkGatewayListResultIterator struct { + i int + page VirtualNetworkGatewayListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualNetworkGatewayListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *VirtualNetworkGatewayListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualNetworkGatewayListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualNetworkGatewayListResultIterator) Response() VirtualNetworkGatewayListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualNetworkGatewayListResultIterator) Value() VirtualNetworkGateway { + if !iter.page.NotDone() { + return VirtualNetworkGateway{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the VirtualNetworkGatewayListResultIterator type. +func NewVirtualNetworkGatewayListResultIterator(page VirtualNetworkGatewayListResultPage) VirtualNetworkGatewayListResultIterator { + return VirtualNetworkGatewayListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (vnglr VirtualNetworkGatewayListResult) IsEmpty() bool { + return vnglr.Value == nil || len(*vnglr.Value) == 0 +} + +// virtualNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vnglr VirtualNetworkGatewayListResult) virtualNetworkGatewayListResultPreparer(ctx context.Context) (*http.Request, error) { + if vnglr.NextLink == nil || len(to.String(vnglr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vnglr.NextLink))) +} + +// VirtualNetworkGatewayListResultPage contains a page of VirtualNetworkGateway values. +type VirtualNetworkGatewayListResultPage struct { + fn func(context.Context, VirtualNetworkGatewayListResult) (VirtualNetworkGatewayListResult, error) + vnglr VirtualNetworkGatewayListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualNetworkGatewayListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.vnglr) + if err != nil { + return err + } + page.vnglr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *VirtualNetworkGatewayListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualNetworkGatewayListResultPage) NotDone() bool { + return !page.vnglr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualNetworkGatewayListResultPage) Response() VirtualNetworkGatewayListResult { + return page.vnglr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualNetworkGatewayListResultPage) Values() []VirtualNetworkGateway { + if page.vnglr.IsEmpty() { + return nil + } + return *page.vnglr.Value +} + +// Creates a new instance of the VirtualNetworkGatewayListResultPage type. +func NewVirtualNetworkGatewayListResultPage(getNextPage func(context.Context, VirtualNetworkGatewayListResult) (VirtualNetworkGatewayListResult, error)) VirtualNetworkGatewayListResultPage { + return VirtualNetworkGatewayListResultPage{fn: getNextPage} +} + +// VirtualNetworkGatewayPropertiesFormat virtualNetworkGateway properties +type VirtualNetworkGatewayPropertiesFormat struct { + // IPConfigurations - IP configurations for virtual network gateway. + IPConfigurations *[]VirtualNetworkGatewayIPConfiguration `json:"ipConfigurations,omitempty"` + // GatewayType - The type of this virtual network gateway. Possible values are: 'Vpn' and 'ExpressRoute'. Possible values include: 'VirtualNetworkGatewayTypeVpn', 'VirtualNetworkGatewayTypeExpressRoute' + GatewayType VirtualNetworkGatewayType `json:"gatewayType,omitempty"` + // VpnType - The type of this virtual network gateway. Possible values are: 'PolicyBased' and 'RouteBased'. Possible values include: 'PolicyBased', 'RouteBased' + VpnType VpnType `json:"vpnType,omitempty"` + // EnableBgp - Whether BGP is enabled for this virtual network gateway or not. + EnableBgp *bool `json:"enableBgp,omitempty"` + // GatewayDefaultSite - The reference of the LocalNetworkGateway resource which represents local network site having default routes. Assign Null value in case of removing existing default site setting. + GatewayDefaultSite *SubResource `json:"gatewayDefaultSite,omitempty"` + // Sku - The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway. + Sku *VirtualNetworkGatewaySku `json:"sku,omitempty"` + // VpnClientConfiguration - The reference of the VpnClientConfiguration resource which represents the P2S VpnClient configurations. + VpnClientConfiguration *VpnClientConfiguration `json:"vpnClientConfiguration,omitempty"` + // BgpSettings - Virtual network gateway's BGP speaker settings. + BgpSettings *BgpSettings `json:"bgpSettings,omitempty"` + // ResourceGUID - The resource GUID property of the VirtualNetworkGateway resource. + ResourceGUID *string `json:"resourceGuid,omitempty"` + // ProvisioningState - The provisioning state of the VirtualNetworkGateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewaysCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualNetworkGatewaysCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworkGatewaysCreateOrUpdateFuture) Result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vng.Response.Response, err = future.GetResult(sender); err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { + vng, err = client.CreateOrUpdateResponder(vng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysCreateOrUpdateFuture", "Result", vng.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualNetworkGatewaysDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualNetworkGatewaysDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworkGatewaysDeleteFuture) Result(client VirtualNetworkGatewaysClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// VirtualNetworkGatewaysGeneratevpnclientpackageFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type VirtualNetworkGatewaysGeneratevpnclientpackageFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworkGatewaysGeneratevpnclientpackageFuture) Result(client VirtualNetworkGatewaysClient) (s String, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if s.Response.Response, err = future.GetResult(sender); err == nil && s.Response.Response.StatusCode != http.StatusNoContent { + s, err = client.GeneratevpnclientpackageResponder(s.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysGeneratevpnclientpackageFuture", "Result", s.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualNetworkGatewaySku virtualNetworkGatewaySku details +type VirtualNetworkGatewaySku struct { + // Name - Gateway sku name -Basic/HighPerformance/Standard. Possible values include: 'VirtualNetworkGatewaySkuNameBasic', 'VirtualNetworkGatewaySkuNameHighPerformance', 'VirtualNetworkGatewaySkuNameStandard' + Name VirtualNetworkGatewaySkuName `json:"name,omitempty"` + // Tier - Gateway sku tier -Basic/HighPerformance/Standard. Possible values include: 'VirtualNetworkGatewaySkuTierBasic', 'VirtualNetworkGatewaySkuTierHighPerformance', 'VirtualNetworkGatewaySkuTierStandard' + Tier VirtualNetworkGatewaySkuTier `json:"tier,omitempty"` + // Capacity - The capacity + Capacity *int32 `json:"capacity,omitempty"` +} + +// VirtualNetworkGatewaysResetFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualNetworkGatewaysResetFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworkGatewaysResetFuture) Result(client VirtualNetworkGatewaysClient) (vng VirtualNetworkGateway, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworkGatewaysResetFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vng.Response.Response, err = future.GetResult(sender); err == nil && vng.Response.Response.StatusCode != http.StatusNoContent { + vng, err = client.ResetResponder(vng.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysResetFuture", "Result", vng.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualNetworkListResult response for the ListVirtualNetworks API service call. +type VirtualNetworkListResult struct { + autorest.Response `json:"-"` + // Value - Gets a list of VirtualNetwork resources in a resource group. + Value *[]VirtualNetwork `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkListResultIterator provides access to a complete listing of VirtualNetwork values. +type VirtualNetworkListResultIterator struct { + i int + page VirtualNetworkListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualNetworkListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *VirtualNetworkListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualNetworkListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualNetworkListResultIterator) Response() VirtualNetworkListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualNetworkListResultIterator) Value() VirtualNetwork { + if !iter.page.NotDone() { + return VirtualNetwork{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the VirtualNetworkListResultIterator type. +func NewVirtualNetworkListResultIterator(page VirtualNetworkListResultPage) VirtualNetworkListResultIterator { + return VirtualNetworkListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (vnlr VirtualNetworkListResult) IsEmpty() bool { + return vnlr.Value == nil || len(*vnlr.Value) == 0 +} + +// virtualNetworkListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vnlr VirtualNetworkListResult) virtualNetworkListResultPreparer(ctx context.Context) (*http.Request, error) { + if vnlr.NextLink == nil || len(to.String(vnlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vnlr.NextLink))) +} + +// VirtualNetworkListResultPage contains a page of VirtualNetwork values. +type VirtualNetworkListResultPage struct { + fn func(context.Context, VirtualNetworkListResult) (VirtualNetworkListResult, error) + vnlr VirtualNetworkListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualNetworkListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.vnlr) + if err != nil { + return err + } + page.vnlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *VirtualNetworkListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualNetworkListResultPage) NotDone() bool { + return !page.vnlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualNetworkListResultPage) Response() VirtualNetworkListResult { + return page.vnlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualNetworkListResultPage) Values() []VirtualNetwork { + if page.vnlr.IsEmpty() { + return nil + } + return *page.vnlr.Value +} + +// Creates a new instance of the VirtualNetworkListResultPage type. +func NewVirtualNetworkListResultPage(getNextPage func(context.Context, VirtualNetworkListResult) (VirtualNetworkListResult, error)) VirtualNetworkListResultPage { + return VirtualNetworkListResultPage{fn: getNextPage} +} + +// VirtualNetworkPropertiesFormat ... +type VirtualNetworkPropertiesFormat struct { + // AddressSpace - The AddressSpace that contains an array of IP address ranges that can be used by subnets. + AddressSpace *AddressSpace `json:"addressSpace,omitempty"` + // DhcpOptions - The dhcpOptions that contains an array of DNS servers available to VMs deployed in the virtual network. + DhcpOptions *DhcpOptions `json:"dhcpOptions,omitempty"` + // Subnets - A list of subnets in a Virtual Network. + Subnets *[]Subnet `json:"subnets,omitempty"` + // ResourceGUID - The resourceGuid property of the Virtual Network resource. + ResourceGUID *string `json:"resourceGuid,omitempty"` + // ProvisioningState - The provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualNetworksCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworksCreateOrUpdateFuture) Result(client VirtualNetworksClient) (vn VirtualNetwork, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if vn.Response.Response, err = future.GetResult(sender); err == nil && vn.Response.Response.StatusCode != http.StatusNoContent { + vn, err = client.CreateOrUpdateResponder(vn.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksCreateOrUpdateFuture", "Result", vn.Response.Response, "Failure responding to request") + } + } + return +} + +// VirtualNetworksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualNetworksDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualNetworksDeleteFuture) Result(client VirtualNetworksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("network.VirtualNetworksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// VpnClientConfiguration vpnClientConfiguration for P2S client +type VpnClientConfiguration struct { + // VpnClientAddressPool - Gets or sets the reference of the Address space resource which represents Address space for P2S VpnClient. + VpnClientAddressPool *AddressSpace `json:"vpnClientAddressPool,omitempty"` + // VpnClientRootCertificates - VpnClientRootCertificate for Virtual network gateway. + VpnClientRootCertificates *[]VpnClientRootCertificate `json:"vpnClientRootCertificates,omitempty"` + // VpnClientRevokedCertificates - VpnClientRevokedCertificate for Virtual network gateway. + VpnClientRevokedCertificates *[]VpnClientRevokedCertificate `json:"vpnClientRevokedCertificates,omitempty"` +} + +// VpnClientParameters vpnClientParameters +type VpnClientParameters struct { + // ProcessorArchitecture - VPN client Processor Architecture. Possible values are: 'AMD64' and 'X86'. Possible values include: 'Amd64', 'X86' + ProcessorArchitecture ProcessorArchitecture `json:"ProcessorArchitecture,omitempty"` +} + +// VpnClientRevokedCertificate VPN client revoked certificate of virtual network gateway. +type VpnClientRevokedCertificate struct { + *VpnClientRevokedCertificatePropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VpnClientRevokedCertificate. +func (vcrc VpnClientRevokedCertificate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vcrc.VpnClientRevokedCertificatePropertiesFormat != nil { + objectMap["properties"] = vcrc.VpnClientRevokedCertificatePropertiesFormat + } + if vcrc.Name != nil { + objectMap["name"] = vcrc.Name + } + if vcrc.Etag != nil { + objectMap["etag"] = vcrc.Etag + } + if vcrc.ID != nil { + objectMap["id"] = vcrc.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VpnClientRevokedCertificate struct. +func (vcrc *VpnClientRevokedCertificate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var vpnClientRevokedCertificatePropertiesFormat VpnClientRevokedCertificatePropertiesFormat + err = json.Unmarshal(*v, &vpnClientRevokedCertificatePropertiesFormat) + if err != nil { + return err + } + vcrc.VpnClientRevokedCertificatePropertiesFormat = &vpnClientRevokedCertificatePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vcrc.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + vcrc.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vcrc.ID = &ID + } + } + } + + return nil +} + +// VpnClientRevokedCertificatePropertiesFormat properties of the revoked VPN client certificate of virtual +// network gateway. +type VpnClientRevokedCertificatePropertiesFormat struct { + // Thumbprint - The revoked VPN client certificate thumbprint. + Thumbprint *string `json:"thumbprint,omitempty"` + // ProvisioningState - The provisioning state of the VPN client revoked certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VpnClientRootCertificate VPN client root certificate of virtual network gateway +type VpnClientRootCertificate struct { + *VpnClientRootCertificatePropertiesFormat `json:"properties,omitempty"` + // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. + Name *string `json:"name,omitempty"` + // Etag - A unique read-only string that changes whenever the resource is updated. + Etag *string `json:"etag,omitempty"` + // ID - Resource Identifier. + ID *string `json:"id,omitempty"` +} + +// MarshalJSON is the custom marshaler for VpnClientRootCertificate. +func (vcrc VpnClientRootCertificate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vcrc.VpnClientRootCertificatePropertiesFormat != nil { + objectMap["properties"] = vcrc.VpnClientRootCertificatePropertiesFormat + } + if vcrc.Name != nil { + objectMap["name"] = vcrc.Name + } + if vcrc.Etag != nil { + objectMap["etag"] = vcrc.Etag + } + if vcrc.ID != nil { + objectMap["id"] = vcrc.ID + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VpnClientRootCertificate struct. +func (vcrc *VpnClientRootCertificate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var vpnClientRootCertificatePropertiesFormat VpnClientRootCertificatePropertiesFormat + err = json.Unmarshal(*v, &vpnClientRootCertificatePropertiesFormat) + if err != nil { + return err + } + vcrc.VpnClientRootCertificatePropertiesFormat = &vpnClientRootCertificatePropertiesFormat + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vcrc.Name = &name + } + case "etag": + if v != nil { + var etag string + err = json.Unmarshal(*v, &etag) + if err != nil { + return err + } + vcrc.Etag = &etag + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vcrc.ID = &ID + } + } + } + + return nil +} + +// VpnClientRootCertificatePropertiesFormat properties of SSL certificates of application gateway +type VpnClientRootCertificatePropertiesFormat struct { + // PublicCertData - Gets or sets the certificate public data + PublicCertData *string `json:"publicCertData,omitempty"` + // ProvisioningState - The provisioning state of the VPN client root certificate resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. + ProvisioningState *string `json:"provisioningState,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/publicipaddresses.go new file mode 100644 index 000000000000..a7ef7fdfa7d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/publicipaddresses.go @@ -0,0 +1,505 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// PublicIPAddressesClient is the network Client +type PublicIPAddressesClient struct { + BaseClient +} + +// NewPublicIPAddressesClient creates an instance of the PublicIPAddressesClient client. +func NewPublicIPAddressesClient(subscriptionID string) PublicIPAddressesClient { + return NewPublicIPAddressesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPublicIPAddressesClientWithBaseURI creates an instance of the PublicIPAddressesClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). +func NewPublicIPAddressesClientWithBaseURI(baseURI string, subscriptionID string) PublicIPAddressesClient { + return PublicIPAddressesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a static or dynamic public IP address. +// Parameters: +// resourceGroupName - the name of the resource group. +// publicIPAddressName - the name of the public IP address. +// parameters - parameters supplied to the create or update public IP address operation. +func (client PublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress) (result PublicIPAddressesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.PublicIPAddressPropertiesFormat.IPConfiguration.IPConfigurationPropertiesFormat.PublicIPAddress", Name: validation.Null, Rule: false, Chain: nil}}}, + }}, + }}}}}); err != nil { + return result, validation.NewError("network.PublicIPAddressesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, publicIPAddressName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client PublicIPAddressesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (future PublicIPAddressesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Response) (result PublicIPAddress, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified public IP address. +// Parameters: +// resourceGroupName - the name of the resource group. +// publicIPAddressName - the name of the subnet. +func (client PublicIPAddressesClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) (result PublicIPAddressesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, publicIPAddressName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client PublicIPAddressesClient) DeletePreparer(ctx context.Context, resourceGroupName string, publicIPAddressName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (future PublicIPAddressesDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified public IP address in a specified resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// publicIPAddressName - the name of the subnet. +// expand - expands referenced resources. +func (client PublicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result PublicIPAddress, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, publicIPAddressName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client PublicIPAddressesClient) GetPreparer(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": autorest.Encode("path", publicIPAddressName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) GetResponder(resp *http.Response) (result PublicIPAddress, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all public IP addresses in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client PublicIPAddressesClient) List(ctx context.Context, resourceGroupName string) (result PublicIPAddressListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.List") + defer func() { + sc := -1 + if result.pialr.Response.Response != nil { + sc = result.pialr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.pialr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure sending request") + return + } + + result.pialr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client PublicIPAddressesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) listNextResults(ctx context.Context, lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) { + req, err := lastResults.publicIPAddressListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client PublicIPAddressesClient) ListComplete(ctx context.Context, resourceGroupName string) (result PublicIPAddressListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets all the public IP addresses in a subscription. +func (client PublicIPAddressesClient) ListAll(ctx context.Context) (result PublicIPAddressListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.ListAll") + defer func() { + sc := -1 + if result.pialr.Response.Response != nil { + sc = result.pialr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.pialr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure sending request") + return + } + + result.pialr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client PublicIPAddressesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListAllResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) listAllNextResults(ctx context.Context, lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, err error) { + req, err := lastResults.publicIPAddressListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.PublicIPAddressesClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client PublicIPAddressesClient) ListAllComplete(ctx context.Context) (result PublicIPAddressListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/PublicIPAddressesClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/routes.go new file mode 100644 index 000000000000..552e74f1ea81 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/routes.go @@ -0,0 +1,388 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RoutesClient is the network Client +type RoutesClient struct { + BaseClient +} + +// NewRoutesClient creates an instance of the RoutesClient client. +func NewRoutesClient(subscriptionID string) RoutesClient { + return NewRoutesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRoutesClientWithBaseURI creates an instance of the RoutesClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewRoutesClientWithBaseURI(baseURI string, subscriptionID string) RoutesClient { + return RoutesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a route in the specified route table. +// Parameters: +// resourceGroupName - the name of the resource group. +// routeTableName - the name of the route table. +// routeName - the name of the route. +// routeParameters - parameters supplied to the create or update route operation. +func (client RoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters Route) (result RoutesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RoutesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, routeTableName, routeName, routeParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RoutesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters Route) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), + autorest.WithJSON(routeParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (future RoutesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RoutesClient) CreateOrUpdateResponder(resp *http.Response) (result Route, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified route from a route table. +// Parameters: +// resourceGroupName - the name of the resource group. +// routeTableName - the name of the route table. +// routeName - the name of the route. +func (client RoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (result RoutesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RoutesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, routeTableName, routeName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RoutesClient) DeletePreparer(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) DeleteSender(req *http.Request) (future RoutesDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RoutesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified route from a route table. +// Parameters: +// resourceGroupName - the name of the resource group. +// routeTableName - the name of the route table. +// routeName - the name of the route. +func (client RoutesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (result Route, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RoutesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, routeTableName, routeName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RoutesClient) GetPreparer(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeName": autorest.Encode("path", routeName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RoutesClient) GetResponder(resp *http.Response) (result Route, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all routes in a route table. +// Parameters: +// resourceGroupName - the name of the resource group. +// routeTableName - the name of the route table. +func (client RoutesClient) List(ctx context.Context, resourceGroupName string, routeTableName string) (result RouteListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RoutesClient.List") + defer func() { + sc := -1 + if result.rlr.Response.Response != nil { + sc = result.rlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, routeTableName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure sending request") + return + } + + result.rlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RoutesClient) ListPreparer(ctx context.Context, resourceGroupName string, routeTableName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RoutesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RoutesClient) ListResponder(resp *http.Response) (result RouteListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client RoutesClient) listNextResults(ctx context.Context, lastResults RouteListResult) (result RouteListResult, err error) { + req, err := lastResults.routeListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RoutesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RoutesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client RoutesClient) ListComplete(ctx context.Context, resourceGroupName string, routeTableName string) (result RouteListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RoutesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, routeTableName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/routetables.go new file mode 100644 index 000000000000..0f9451d4f206 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/routetables.go @@ -0,0 +1,492 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// RouteTablesClient is the network Client +type RouteTablesClient struct { + BaseClient +} + +// NewRouteTablesClient creates an instance of the RouteTablesClient client. +func NewRouteTablesClient(subscriptionID string) RouteTablesClient { + return NewRouteTablesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRouteTablesClientWithBaseURI creates an instance of the RouteTablesClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewRouteTablesClientWithBaseURI(baseURI string, subscriptionID string) RouteTablesClient { + return RouteTablesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or updates a route table in a specified resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// routeTableName - the name of the route table. +// parameters - parameters supplied to the create or update route table operation. +func (client RouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, parameters RouteTable) (result RouteTablesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, routeTableName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client RouteTablesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, routeTableName string, parameters RouteTable) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (future RouteTablesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) CreateOrUpdateResponder(resp *http.Response) (result RouteTable, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified route table. +// Parameters: +// resourceGroupName - the name of the resource group. +// routeTableName - the name of the route table. +func (client RouteTablesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string) (result RouteTablesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, routeTableName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client RouteTablesClient) DeletePreparer(ctx context.Context, resourceGroupName string, routeTableName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) DeleteSender(req *http.Request) (future RouteTablesDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified route table. +// Parameters: +// resourceGroupName - the name of the resource group. +// routeTableName - the name of the route table. +// expand - expands referenced resources. +func (client RouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result RouteTable, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, routeTableName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client RouteTablesClient) GetPreparer(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "routeTableName": autorest.Encode("path", routeTableName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) GetResponder(resp *http.Response) (result RouteTable, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all route tables in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client RouteTablesClient) List(ctx context.Context, resourceGroupName string) (result RouteTableListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.List") + defer func() { + sc := -1 + if result.rtlr.Response.Response != nil { + sc = result.rtlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rtlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure sending request") + return + } + + result.rtlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client RouteTablesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) ListResponder(resp *http.Response) (result RouteTableListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client RouteTablesClient) listNextResults(ctx context.Context, lastResults RouteTableListResult) (result RouteTableListResult, err error) { + req, err := lastResults.routeTableListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client RouteTablesClient) ListComplete(ctx context.Context, resourceGroupName string) (result RouteTableListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets all route tables in a subscription. +func (client RouteTablesClient) ListAll(ctx context.Context) (result RouteTableListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.ListAll") + defer func() { + sc := -1 + if result.rtlr.Response.Response != nil { + sc = result.rtlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.rtlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure sending request") + return + } + + result.rtlr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client RouteTablesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client RouteTablesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client RouteTablesClient) ListAllResponder(resp *http.Response) (result RouteTableListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client RouteTablesClient) listAllNextResults(ctx context.Context, lastResults RouteTableListResult) (result RouteTableListResult, err error) { + req, err := lastResults.routeTableListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.RouteTablesClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.RouteTablesClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client RouteTablesClient) ListAllComplete(ctx context.Context) (result RouteTableListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/RouteTablesClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/securitygroups.go new file mode 100644 index 000000000000..cfe35218c97e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/securitygroups.go @@ -0,0 +1,492 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SecurityGroupsClient is the network Client +type SecurityGroupsClient struct { + BaseClient +} + +// NewSecurityGroupsClient creates an instance of the SecurityGroupsClient client. +func NewSecurityGroupsClient(subscriptionID string) SecurityGroupsClient { + return NewSecurityGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityGroupsClientWithBaseURI creates an instance of the SecurityGroupsClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) SecurityGroupsClient { + return SecurityGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a network security group in the specified resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkSecurityGroupName - the name of the network security group. +// parameters - parameters supplied to the create or update network security group operation. +func (client SecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (result SecurityGroupsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, networkSecurityGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SecurityGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (future SecurityGroupsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityGroup, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified network security group. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkSecurityGroupName - the name of the network security group. +func (client SecurityGroupsClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (result SecurityGroupsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, networkSecurityGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SecurityGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) DeleteSender(req *http.Request) (future SecurityGroupsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified network security group. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkSecurityGroupName - the name of the network security group. +// expand - expands referenced resources. +func (client SecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result SecurityGroup, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, networkSecurityGroupName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SecurityGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) GetResponder(resp *http.Response) (result SecurityGroup, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all network security groups in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client SecurityGroupsClient) List(ctx context.Context, resourceGroupName string) (result SecurityGroupListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.List") + defer func() { + sc := -1 + if result.sglr.Response.Response != nil { + sc = result.sglr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.sglr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure sending request") + return + } + + result.sglr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SecurityGroupsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) ListResponder(resp *http.Response) (result SecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SecurityGroupsClient) listNextResults(ctx context.Context, lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) { + req, err := lastResults.securityGroupListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SecurityGroupsClient) ListComplete(ctx context.Context, resourceGroupName string) (result SecurityGroupListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets all network security groups in a subscription. +func (client SecurityGroupsClient) ListAll(ctx context.Context) (result SecurityGroupListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.ListAll") + defer func() { + sc := -1 + if result.sglr.Response.Response != nil { + sc = result.sglr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.sglr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure sending request") + return + } + + result.sglr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client SecurityGroupsClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) ListAllResponder(resp *http.Response) (result SecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client SecurityGroupsClient) listAllNextResults(ctx context.Context, lastResults SecurityGroupListResult) (result SecurityGroupListResult, err error) { + req, err := lastResults.securityGroupListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityGroupsClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client SecurityGroupsClient) ListAllComplete(ctx context.Context) (result SecurityGroupListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityGroupsClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/securityrules.go new file mode 100644 index 000000000000..a51418c674be --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/securityrules.go @@ -0,0 +1,398 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SecurityRulesClient is the network Client +type SecurityRulesClient struct { + BaseClient +} + +// NewSecurityRulesClient creates an instance of the SecurityRulesClient client. +func NewSecurityRulesClient(subscriptionID string) SecurityRulesClient { + return NewSecurityRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityRulesClientWithBaseURI creates an instance of the SecurityRulesClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSecurityRulesClientWithBaseURI(baseURI string, subscriptionID string) SecurityRulesClient { + return SecurityRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a security rule in the specified network security group. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkSecurityGroupName - the name of the network security group. +// securityRuleName - the name of the security rule. +// securityRuleParameters - parameters supplied to the create or update network security rule operation. +func (client SecurityRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule) (result SecurityRulesCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityRulesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: securityRuleParameters, + Constraints: []validation.Constraint{{Target: "securityRuleParameters.SecurityRulePropertiesFormat", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "securityRuleParameters.SecurityRulePropertiesFormat.SourceAddressPrefix", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "securityRuleParameters.SecurityRulePropertiesFormat.DestinationAddressPrefix", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("network.SecurityRulesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SecurityRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityRuleName": autorest.Encode("path", securityRuleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters), + autorest.WithJSON(securityRuleParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (future SecurityRulesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityRule, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified network security rule. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkSecurityGroupName - the name of the network security group. +// securityRuleName - the name of the security rule. +func (client SecurityRulesClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRulesDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityRulesClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, networkSecurityGroupName, securityRuleName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SecurityRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityRuleName": autorest.Encode("path", securityRuleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) DeleteSender(req *http.Request) (future SecurityRulesDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get the specified network security rule. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkSecurityGroupName - the name of the network security group. +// securityRuleName - the name of the security rule. +func (client SecurityRulesClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRule, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityRulesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, networkSecurityGroupName, securityRuleName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SecurityRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "securityRuleName": autorest.Encode("path", securityRuleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) GetResponder(resp *http.Response) (result SecurityRule, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all security rules in a network security group. +// Parameters: +// resourceGroupName - the name of the resource group. +// networkSecurityGroupName - the name of the network security group. +func (client SecurityRulesClient) List(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityRulesClient.List") + defer func() { + sc := -1 + if result.srlr.Response.Response != nil { + sc = result.srlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, networkSecurityGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.srlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure sending request") + return + } + + result.srlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SecurityRulesClient) ListPreparer(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": autorest.Encode("path", networkSecurityGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) ListResponder(resp *http.Response) (result SecurityRuleListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SecurityRulesClient) listNextResults(ctx context.Context, lastResults SecurityRuleListResult) (result SecurityRuleListResult, err error) { + req, err := lastResults.securityRuleListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SecurityRulesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SecurityRulesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SecurityRulesClient) ListComplete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SecurityRulesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, networkSecurityGroupName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/subnets.go new file mode 100644 index 000000000000..c7ba6dda8966 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/subnets.go @@ -0,0 +1,392 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// SubnetsClient is the network Client +type SubnetsClient struct { + BaseClient +} + +// NewSubnetsClient creates an instance of the SubnetsClient client. +func NewSubnetsClient(subscriptionID string) SubnetsClient { + return NewSubnetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSubnetsClientWithBaseURI creates an instance of the SubnetsClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewSubnetsClientWithBaseURI(baseURI string, subscriptionID string) SubnetsClient { + return SubnetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a subnet in the specified virtual network. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkName - the name of the virtual network. +// subnetName - the name of the subnet. +// subnetParameters - parameters supplied to the create or update subnet operation. +func (client SubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet) (result SubnetsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubnetsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SubnetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters), + autorest.WithJSON(subnetParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (future SubnetsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result Subnet, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified subnet. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkName - the name of the virtual network. +// subnetName - the name of the subnet. +func (client SubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (result SubnetsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubnetsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, virtualNetworkName, subnetName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SubnetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) DeleteSender(req *http.Request) (future SubnetsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SubnetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified subnet by virtual network and resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkName - the name of the virtual network. +// subnetName - the name of the subnet. +// expand - expands referenced resources. +func (client SubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result Subnet, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubnetsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, virtualNetworkName, subnetName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SubnetsClient) GetPreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subnetName": autorest.Encode("path", subnetName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SubnetsClient) GetResponder(resp *http.Response) (result Subnet, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all subnets in a virtual network. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkName - the name of the virtual network. +func (client SubnetsClient) List(ctx context.Context, resourceGroupName string, virtualNetworkName string) (result SubnetListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubnetsClient.List") + defer func() { + sc := -1 + if result.slr.Response.Response != nil { + sc = result.slr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, virtualNetworkName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.slr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure sending request") + return + } + + result.slr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SubnetsClient) ListPreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SubnetsClient) ListResponder(resp *http.Response) (result SubnetListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SubnetsClient) listNextResults(ctx context.Context, lastResults SubnetListResult) (result SubnetListResult, err error) { + req, err := lastResults.subnetListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.SubnetsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.SubnetsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SubnetsClient) ListComplete(ctx context.Context, resourceGroupName string, virtualNetworkName string) (result SubnetListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/SubnetsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, virtualNetworkName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/usages.go new file mode 100644 index 000000000000..ac059da85394 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/usages.go @@ -0,0 +1,160 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// UsagesClient is the network Client +type UsagesClient struct { + BaseClient +} + +// NewUsagesClient creates an instance of the UsagesClient client. +func NewUsagesClient(subscriptionID string) UsagesClient { + return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client using a custom endpoint. Use this when +// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient { + return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists compute usages for a subscription. +// Parameters: +// location - the location where resource usage is queried. +func (client UsagesClient) List(ctx context.Context, location string) (result UsagesListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/UsagesClient.List") + defer func() { + sc := -1 + if result.ulr.Response.Response != nil { + sc = result.ulr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("network.UsagesClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.ulr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure sending request") + return + } + + result.ulr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.UsagesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsagesClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsagesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsagesClient) ListResponder(resp *http.Response) (result UsagesListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client UsagesClient) listNextResults(ctx context.Context, lastResults UsagesListResult) (result UsagesListResult, err error) { + req, err := lastResults.usagesListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.UsagesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.UsagesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.UsagesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client UsagesClient) ListComplete(ctx context.Context, location string) (result UsagesListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/UsagesClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, location) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/version.go new file mode 100644 index 000000000000..3d647a07c844 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/version.go @@ -0,0 +1,30 @@ +package network + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + Version() + " network/2015-06-15" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworkgatewayconnections.go new file mode 100644 index 000000000000..62cea73c9166 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworkgatewayconnections.go @@ -0,0 +1,620 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// VirtualNetworkGatewayConnectionsClient is the network Client +type VirtualNetworkGatewayConnectionsClient struct { + BaseClient +} + +// NewVirtualNetworkGatewayConnectionsClient creates an instance of the VirtualNetworkGatewayConnectionsClient client. +func NewVirtualNetworkGatewayConnectionsClient(subscriptionID string) VirtualNetworkGatewayConnectionsClient { + return NewVirtualNetworkGatewayConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewayConnectionsClientWithBaseURI creates an instance of the +// VirtualNetworkGatewayConnectionsClient client using a custom endpoint. Use this when interacting with an Azure +// cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewVirtualNetworkGatewayConnectionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewayConnectionsClient { + return VirtualNetworkGatewayConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a virtual network gateway connection in the specified resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayConnectionName - the name of the virtual network gateway connection. +// parameters - parameters supplied to the create or update virtual network gateway connection operation. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection) (result VirtualNetworkGatewayConnectionsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualNetworkGatewayConnectionsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified virtual network Gateway connection. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayConnectionName - the name of the virtual network gateway connection. +func (client VirtualNetworkGatewayConnectionsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnectionsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Request) (future VirtualNetworkGatewayConnectionsDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified virtual network gateway connection by resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayConnectionName - the name of the virtual network gateway connection. +func (client VirtualNetworkGatewayConnectionsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) GetResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetSharedKey the Get VirtualNetworkGatewayConnectionSharedKey operation retrieves information about the specified +// virtual network gateway connection shared key through Network resource provider. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayConnectionName - the virtual network gateway connection shared key name. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKey(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string) (result ConnectionSharedKeyResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.GetSharedKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetSharedKeyPreparer(ctx, resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", nil, "Failure preparing request") + return + } + + resp, err := client.GetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure sending request") + return + } + + result, err = client.GetSharedKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "GetSharedKey", resp, "Failure responding to request") + } + + return +} + +// GetSharedKeyPreparer prepares the GetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSharedKeySender sends the GetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetSharedKeyResponder handles the response to the GetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKeyResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List VirtualNetworkGatewayConnections operation retrieves all the virtual network gateways connections +// created. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client VirtualNetworkGatewayConnectionsClient) List(ctx context.Context, resourceGroupName string) (result VirtualNetworkGatewayConnectionListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.List") + defer func() { + sc := -1 + if result.vngclr.Response.Response != nil { + sc = result.vngclr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.vngclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure sending request") + return + } + + result.vngclr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayConnectionListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client VirtualNetworkGatewayConnectionsClient) listNextResults(ctx context.Context, lastResults VirtualNetworkGatewayConnectionListResult) (result VirtualNetworkGatewayConnectionListResult, err error) { + req, err := lastResults.virtualNetworkGatewayConnectionListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualNetworkGatewayConnectionsClient) ListComplete(ctx context.Context, resourceGroupName string) (result VirtualNetworkGatewayConnectionListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ResetSharedKey the VirtualNetworkGatewayConnectionResetSharedKey operation resets the virtual network gateway +// connection shared key for passed virtual network gateway connection in the specified resource group through Network +// resource provider. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayConnectionName - the virtual network gateway connection reset shared key Name. +// parameters - parameters supplied to the begin reset virtual network gateway connection shared key operation +// through network resource provider. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey) (result VirtualNetworkGatewayConnectionsResetSharedKeyFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.ResetSharedKey") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ResetSharedKeyPreparer(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", nil, "Failure preparing request") + return + } + + result, err = client.ResetSharedKeySender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", result.Response(), "Failure sending request") + return + } + + return +} + +// ResetSharedKeyPreparer prepares the ResetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ResetSharedKeySender sends the ResetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *http.Request) (future VirtualNetworkGatewayConnectionsResetSharedKeyFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ResetSharedKeyResponder handles the response to the ResetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(resp *http.Response) (result ConnectionResetSharedKey, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation sets the virtual network gateway connection +// shared key for passed virtual network gateway connection in the specified resource group through Network resource +// provider. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayConnectionName - the virtual network gateway connection name. +// parameters - parameters supplied to the Begin Set Virtual Network Gateway connection Shared key operation +// throughNetwork resource provider. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey) (result VirtualNetworkGatewayConnectionsSetSharedKeyFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewayConnectionsClient.SetSharedKey") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.SetSharedKeyPreparer(ctx, resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", nil, "Failure preparing request") + return + } + + result, err = client.SetSharedKeySender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewayConnectionsClient", "SetSharedKey", result.Response(), "Failure sending request") + return + } + + return +} + +// SetSharedKeyPreparer prepares the SetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayConnectionName": autorest.Encode("path", virtualNetworkGatewayConnectionName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// SetSharedKeySender sends the SetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *http.Request) (future VirtualNetworkGatewayConnectionsSetSharedKeyFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// SetSharedKeyResponder handles the response to the SetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKey, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworkgateways.go new file mode 100644 index 000000000000..e666fb7d6b22 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworkgateways.go @@ -0,0 +1,539 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// VirtualNetworkGatewaysClient is the network Client +type VirtualNetworkGatewaysClient struct { + BaseClient +} + +// NewVirtualNetworkGatewaysClient creates an instance of the VirtualNetworkGatewaysClient client. +func NewVirtualNetworkGatewaysClient(subscriptionID string) VirtualNetworkGatewaysClient { + return NewVirtualNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewaysClientWithBaseURI creates an instance of the VirtualNetworkGatewaysClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). +func NewVirtualNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewaysClient { + return VirtualNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a virtual network gateway in the specified resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayName - the name of the virtual network gateway. +// parameters - parameters supplied to create or update virtual network gateway operation. +func (client VirtualNetworkGatewaysClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (result VirtualNetworkGatewaysCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (future VirtualNetworkGatewaysCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified virtual network gateway. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayName - the name of the virtual network gateway. +func (client VirtualNetworkGatewaysClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGatewaysDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, virtualNetworkGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkGatewaysClient) DeletePreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (future VirtualNetworkGatewaysDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Generatevpnclientpackage generates VPN client package for P2S client of the virtual network gateway in the specified +// resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayName - the name of the virtual network gateway. +// parameters - parameters supplied to the generate virtual network gateway VPN client package operation. +func (client VirtualNetworkGatewaysClient) Generatevpnclientpackage(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (result VirtualNetworkGatewaysGeneratevpnclientpackageFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.Generatevpnclientpackage") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GeneratevpnclientpackagePreparer(ctx, resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", nil, "Failure preparing request") + return + } + + result, err = client.GeneratevpnclientpackageSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Generatevpnclientpackage", result.Response(), "Failure sending request") + return + } + + return +} + +// GeneratevpnclientpackagePreparer prepares the Generatevpnclientpackage request. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VpnClientParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GeneratevpnclientpackageSender sends the Generatevpnclientpackage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *http.Request) (future VirtualNetworkGatewaysGeneratevpnclientpackageFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// GeneratevpnclientpackageResponder handles the response to the Generatevpnclientpackage request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageResponder(resp *http.Response) (result String, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets the specified virtual network gateway by resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayName - the name of the virtual network gateway. +func (client VirtualNetworkGatewaysClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGateway, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, virtualNetworkGatewayName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkGatewaysClient) GetPreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all virtual network gateways by resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client VirtualNetworkGatewaysClient) List(ctx context.Context, resourceGroupName string) (result VirtualNetworkGatewayListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.List") + defer func() { + sc := -1 + if result.vnglr.Response.Response != nil { + sc = result.vnglr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.vnglr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure sending request") + return + } + + result.vnglr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkGatewaysClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client VirtualNetworkGatewaysClient) listNextResults(ctx context.Context, lastResults VirtualNetworkGatewayListResult) (result VirtualNetworkGatewayListResult, err error) { + req, err := lastResults.virtualNetworkGatewayListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualNetworkGatewaysClient) ListComplete(ctx context.Context, resourceGroupName string) (result VirtualNetworkGatewayListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// Reset resets the primary of the virtual network gateway in the specified resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkGatewayName - the name of the virtual network gateway. +// parameters - virtual network gateway vip address supplied to the begin reset of the active-active feature +// enabled gateway. +func (client VirtualNetworkGatewaysClient) Reset(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (result VirtualNetworkGatewaysResetFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworkGatewaysClient.Reset") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ResetPreparer(ctx, resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", nil, "Failure preparing request") + return + } + + result, err = client.ResetSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkGatewaysClient", "Reset", result.Response(), "Failure sending request") + return + } + + return +} + +// ResetPreparer prepares the Reset request. +func (client VirtualNetworkGatewaysClient) ResetPreparer(ctx context.Context, resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkGatewayName": autorest.Encode("path", virtualNetworkGatewayName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ResetSender sends the Reset request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (future VirtualNetworkGatewaysResetFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ResetResponder handles the response to the Reset request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworks.go new file mode 100644 index 000000000000..d911c06b6b09 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network/virtualnetworks.go @@ -0,0 +1,492 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// VirtualNetworksClient is the network Client +type VirtualNetworksClient struct { + BaseClient +} + +// NewVirtualNetworksClient creates an instance of the VirtualNetworksClient client. +func NewVirtualNetworksClient(subscriptionID string) VirtualNetworksClient { + return NewVirtualNetworksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworksClientWithBaseURI creates an instance of the VirtualNetworksClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). +func NewVirtualNetworksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworksClient { + return VirtualNetworksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a virtual network in the specified resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkName - the name of the virtual network. +// parameters - parameters supplied to the create or update virtual network operation +func (client VirtualNetworksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork) (result VirtualNetworksCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, virtualNetworkName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (future VirtualNetworksCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetwork, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified virtual network. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkName - the name of the virtual network. +func (client VirtualNetworksClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string) (result VirtualNetworksDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, virtualNetworkName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworksClient) DeletePreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) DeleteSender(req *http.Request) (future VirtualNetworksDeleteFuture, err error) { + var resp *http.Response + resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the specified virtual network by resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// virtualNetworkName - the name of the virtual network. +// expand - expands referenced resources. +func (client VirtualNetworksClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, expand string) (result VirtualNetwork, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, virtualNetworkName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworksClient) GetPreparer(ctx context.Context, resourceGroupName string, virtualNetworkName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) GetResponder(resp *http.Response) (result VirtualNetwork, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all virtual networks in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client VirtualNetworksClient) List(ctx context.Context, resourceGroupName string) (result VirtualNetworkListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.List") + defer func() { + sc := -1 + if result.vnlr.Response.Response != nil { + sc = result.vnlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.vnlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending request") + return + } + + result.vnlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworksClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) ListResponder(resp *http.Response) (result VirtualNetworkListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client VirtualNetworksClient) listNextResults(ctx context.Context, lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) { + req, err := lastResults.virtualNetworkListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualNetworksClient) ListComplete(ctx context.Context, resourceGroupName string) (result VirtualNetworkListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets all virtual networks in a subscription. +func (client VirtualNetworksClient) ListAll(ctx context.Context) (result VirtualNetworkListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.ListAll") + defer func() { + sc := -1 + if result.vnlr.Response.Response != nil { + sc = result.vnlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.vnlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending request") + return + } + + result.vnlr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualNetworksClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-06-15" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualnetworks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) ListAllResponder(resp *http.Response) (result VirtualNetworkListResult, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client VirtualNetworksClient) listAllNextResults(ctx context.Context, lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) { + req, err := lastResults.virtualNetworkListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualNetworksClient) ListAllComplete(ctx context.Context) (result VirtualNetworkListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VirtualNetworksClient.ListAll") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAll(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/classicadministrators.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/classicadministrators.go index c0dc1eeeb41d..966562ccdac8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/classicadministrators.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/classicadministrators.go @@ -35,7 +35,9 @@ func NewClassicAdministratorsClient(subscriptionID string) ClassicAdministrators return NewClassicAdministratorsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewClassicAdministratorsClientWithBaseURI creates an instance of the ClassicAdministratorsClient client. +// NewClassicAdministratorsClientWithBaseURI creates an instance of the ClassicAdministratorsClient client using a +// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, +// Azure stack). func NewClassicAdministratorsClientWithBaseURI(baseURI string, subscriptionID string) ClassicAdministratorsClient { return ClassicAdministratorsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -96,8 +98,7 @@ func (client ClassicAdministratorsClient) ListPreparer(ctx context.Context) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ClassicAdministratorsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -105,7 +106,6 @@ func (client ClassicAdministratorsClient) ListSender(req *http.Request) (*http.R func (client ClassicAdministratorsClient) ListResponder(resp *http.Response) (result ClassicAdministratorListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/client.go index 362420bdfa36..1958a67601a6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/client.go @@ -41,7 +41,8 @@ func New(subscriptionID string) BaseClient { return NewWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewWithBaseURI creates an instance of the BaseClient client. +// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with +// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { return BaseClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/globaladministrator.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/globaladministrator.go index ec555ff00193..0bfb4187ff48 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/globaladministrator.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/globaladministrator.go @@ -35,7 +35,9 @@ func NewGlobalAdministratorClient(subscriptionID string) GlobalAdministratorClie return NewGlobalAdministratorClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewGlobalAdministratorClientWithBaseURI creates an instance of the GlobalAdministratorClient client. +// NewGlobalAdministratorClientWithBaseURI creates an instance of the GlobalAdministratorClient client using a custom +// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure +// stack). func NewGlobalAdministratorClientWithBaseURI(baseURI string, subscriptionID string) GlobalAdministratorClient { return GlobalAdministratorClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -91,8 +93,7 @@ func (client GlobalAdministratorClient) ElevateAccessPreparer(ctx context.Contex // ElevateAccessSender sends the ElevateAccess request. The method will close the // http.Response Body if it receives an error. func (client GlobalAdministratorClient) ElevateAccessSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ElevateAccessResponder handles the response to the ElevateAccess request. The method always @@ -100,7 +101,6 @@ func (client GlobalAdministratorClient) ElevateAccessSender(req *http.Request) ( func (client GlobalAdministratorClient) ElevateAccessResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) result.Response = resp diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/permissions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/permissions.go index c99b1e4ae6a1..a8e6bd4826bf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/permissions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/permissions.go @@ -35,7 +35,8 @@ func NewPermissionsClient(subscriptionID string) PermissionsClient { return NewPermissionsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewPermissionsClientWithBaseURI creates an instance of the PermissionsClient client. +// NewPermissionsClientWithBaseURI creates an instance of the PermissionsClient client using a custom endpoint. Use +// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewPermissionsClientWithBaseURI(baseURI string, subscriptionID string) PermissionsClient { return PermissionsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -107,8 +108,7 @@ func (client PermissionsClient) ListForResourcePreparer(ctx context.Context, res // ListForResourceSender sends the ListForResource request. The method will close the // http.Response Body if it receives an error. func (client PermissionsClient) ListForResourceSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListForResourceResponder handles the response to the ListForResource request. The method always @@ -116,7 +116,6 @@ func (client PermissionsClient) ListForResourceSender(req *http.Request) (*http. func (client PermissionsClient) ListForResourceResponder(resp *http.Response) (result PermissionGetResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -220,8 +219,7 @@ func (client PermissionsClient) ListForResourceGroupPreparer(ctx context.Context // ListForResourceGroupSender sends the ListForResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client PermissionsClient) ListForResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListForResourceGroupResponder handles the response to the ListForResourceGroup request. The method always @@ -229,7 +227,6 @@ func (client PermissionsClient) ListForResourceGroupSender(req *http.Request) (* func (client PermissionsClient) ListForResourceGroupResponder(resp *http.Response) (result PermissionGetResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/provideroperationsmetadata.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/provideroperationsmetadata.go index 4e915dca25ae..73775eb6b343 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/provideroperationsmetadata.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/provideroperationsmetadata.go @@ -36,7 +36,9 @@ func NewProviderOperationsMetadataClient(subscriptionID string) ProviderOperatio return NewProviderOperationsMetadataClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewProviderOperationsMetadataClientWithBaseURI creates an instance of the ProviderOperationsMetadataClient client. +// NewProviderOperationsMetadataClientWithBaseURI creates an instance of the ProviderOperationsMetadataClient client +// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign +// clouds, Azure stack). func NewProviderOperationsMetadataClientWithBaseURI(baseURI string, subscriptionID string) ProviderOperationsMetadataClient { return ProviderOperationsMetadataClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -104,8 +106,7 @@ func (client ProviderOperationsMetadataClient) GetPreparer(ctx context.Context, // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ProviderOperationsMetadataClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always @@ -113,7 +114,6 @@ func (client ProviderOperationsMetadataClient) GetSender(req *http.Request) (*ht func (client ProviderOperationsMetadataClient) GetResponder(resp *http.Response) (result ProviderOperationsMetadata, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -180,8 +180,7 @@ func (client ProviderOperationsMetadataClient) ListPreparer(ctx context.Context, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ProviderOperationsMetadataClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -189,7 +188,6 @@ func (client ProviderOperationsMetadataClient) ListSender(req *http.Request) (*h func (client ProviderOperationsMetadataClient) ListResponder(resp *http.Response) (result ProviderOperationsMetadataListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roleassignments.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roleassignments.go index 39954b869dfa..3fa919c0134e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roleassignments.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roleassignments.go @@ -36,7 +36,8 @@ func NewRoleAssignmentsClient(subscriptionID string) RoleAssignmentsClient { return NewRoleAssignmentsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewRoleAssignmentsClientWithBaseURI creates an instance of the RoleAssignmentsClient client. +// NewRoleAssignmentsClientWithBaseURI creates an instance of the RoleAssignmentsClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewRoleAssignmentsClientWithBaseURI(baseURI string, subscriptionID string) RoleAssignmentsClient { return RoleAssignmentsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -116,8 +117,7 @@ func (client RoleAssignmentsClient) CreatePreparer(ctx context.Context, scope st // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) CreateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateResponder handles the response to the Create request. The method always @@ -125,7 +125,6 @@ func (client RoleAssignmentsClient) CreateSender(req *http.Request) (*http.Respo func (client RoleAssignmentsClient) CreateResponder(resp *http.Response) (result RoleAssignment, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -202,8 +201,7 @@ func (client RoleAssignmentsClient) CreateByIDPreparer(ctx context.Context, role // CreateByIDSender sends the CreateByID request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) CreateByIDSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateByIDResponder handles the response to the CreateByID request. The method always @@ -211,7 +209,6 @@ func (client RoleAssignmentsClient) CreateByIDSender(req *http.Request) (*http.R func (client RoleAssignmentsClient) CreateByIDResponder(resp *http.Response) (result RoleAssignment, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -278,8 +275,7 @@ func (client RoleAssignmentsClient) DeletePreparer(ctx context.Context, scope st // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteResponder handles the response to the Delete request. The method always @@ -287,7 +283,6 @@ func (client RoleAssignmentsClient) DeleteSender(req *http.Request) (*http.Respo func (client RoleAssignmentsClient) DeleteResponder(resp *http.Response) (result RoleAssignment, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -352,8 +347,7 @@ func (client RoleAssignmentsClient) DeleteByIDPreparer(ctx context.Context, role // DeleteByIDSender sends the DeleteByID request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) DeleteByIDSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteByIDResponder handles the response to the DeleteByID request. The method always @@ -361,7 +355,6 @@ func (client RoleAssignmentsClient) DeleteByIDSender(req *http.Request) (*http.R func (client RoleAssignmentsClient) DeleteByIDResponder(resp *http.Response) (result RoleAssignment, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -428,8 +421,7 @@ func (client RoleAssignmentsClient) GetPreparer(ctx context.Context, scope strin // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always @@ -437,7 +429,6 @@ func (client RoleAssignmentsClient) GetSender(req *http.Request) (*http.Response func (client RoleAssignmentsClient) GetResponder(resp *http.Response) (result RoleAssignment, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -502,8 +493,7 @@ func (client RoleAssignmentsClient) GetByIDPreparer(ctx context.Context, roleID // GetByIDSender sends the GetByID request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) GetByIDSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetByIDResponder handles the response to the GetByID request. The method always @@ -511,7 +501,6 @@ func (client RoleAssignmentsClient) GetByIDSender(req *http.Request) (*http.Resp func (client RoleAssignmentsClient) GetByIDResponder(resp *http.Response) (result RoleAssignment, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -582,8 +571,7 @@ func (client RoleAssignmentsClient) ListPreparer(ctx context.Context, filter str // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -591,7 +579,6 @@ func (client RoleAssignmentsClient) ListSender(req *http.Request) (*http.Respons func (client RoleAssignmentsClient) ListResponder(resp *http.Response) (result RoleAssignmentListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -709,8 +696,7 @@ func (client RoleAssignmentsClient) ListForResourcePreparer(ctx context.Context, // ListForResourceSender sends the ListForResource request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) ListForResourceSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListForResourceResponder handles the response to the ListForResource request. The method always @@ -718,7 +704,6 @@ func (client RoleAssignmentsClient) ListForResourceSender(req *http.Request) (*h func (client RoleAssignmentsClient) ListForResourceResponder(resp *http.Response) (result RoleAssignmentListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -828,8 +813,7 @@ func (client RoleAssignmentsClient) ListForResourceGroupPreparer(ctx context.Con // ListForResourceGroupSender sends the ListForResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) ListForResourceGroupSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListForResourceGroupResponder handles the response to the ListForResourceGroup request. The method always @@ -837,7 +821,6 @@ func (client RoleAssignmentsClient) ListForResourceGroupSender(req *http.Request func (client RoleAssignmentsClient) ListForResourceGroupResponder(resp *http.Response) (result RoleAssignmentListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -946,8 +929,7 @@ func (client RoleAssignmentsClient) ListForScopePreparer(ctx context.Context, sc // ListForScopeSender sends the ListForScope request. The method will close the // http.Response Body if it receives an error. func (client RoleAssignmentsClient) ListForScopeSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListForScopeResponder handles the response to the ListForScope request. The method always @@ -955,7 +937,6 @@ func (client RoleAssignmentsClient) ListForScopeSender(req *http.Request) (*http func (client RoleAssignmentsClient) ListForScopeResponder(resp *http.Response) (result RoleAssignmentListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roledefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roledefinitions.go index 4c42845fdfc7..bb17aee7b3ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roledefinitions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/roledefinitions.go @@ -35,7 +35,8 @@ func NewRoleDefinitionsClient(subscriptionID string) RoleDefinitionsClient { return NewRoleDefinitionsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// NewRoleDefinitionsClientWithBaseURI creates an instance of the RoleDefinitionsClient client. +// NewRoleDefinitionsClientWithBaseURI creates an instance of the RoleDefinitionsClient client using a custom endpoint. +// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewRoleDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) RoleDefinitionsClient { return RoleDefinitionsClient{NewWithBaseURI(baseURI, subscriptionID)} } @@ -105,8 +106,7 @@ func (client RoleDefinitionsClient) CreateOrUpdatePreparer(ctx context.Context, // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -114,7 +114,6 @@ func (client RoleDefinitionsClient) CreateOrUpdateSender(req *http.Request) (*ht func (client RoleDefinitionsClient) CreateOrUpdateResponder(resp *http.Response) (result RoleDefinition, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -181,8 +180,7 @@ func (client RoleDefinitionsClient) DeletePreparer(ctx context.Context, scope st // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) DeleteSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // DeleteResponder handles the response to the Delete request. The method always @@ -190,7 +188,6 @@ func (client RoleDefinitionsClient) DeleteSender(req *http.Request) (*http.Respo func (client RoleDefinitionsClient) DeleteResponder(resp *http.Response) (result RoleDefinition, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -257,8 +254,7 @@ func (client RoleDefinitionsClient) GetPreparer(ctx context.Context, scope strin // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) GetSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetResponder handles the response to the Get request. The method always @@ -266,7 +262,6 @@ func (client RoleDefinitionsClient) GetSender(req *http.Request) (*http.Response func (client RoleDefinitionsClient) GetResponder(resp *http.Response) (result RoleDefinition, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -334,8 +329,7 @@ func (client RoleDefinitionsClient) GetByIDPreparer(ctx context.Context, roleID // GetByIDSender sends the GetByID request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) GetByIDSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // GetByIDResponder handles the response to the GetByID request. The method always @@ -343,7 +337,6 @@ func (client RoleDefinitionsClient) GetByIDSender(req *http.Request) (*http.Resp func (client RoleDefinitionsClient) GetByIDResponder(resp *http.Response) (result RoleDefinition, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) @@ -414,8 +407,7 @@ func (client RoleDefinitionsClient) ListPreparer(ctx context.Context, scope stri // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RoleDefinitionsClient) ListSender(req *http.Request) (*http.Response, error) { - sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - return autorest.SendWithSender(client, req, sd...) + return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always @@ -423,7 +415,6 @@ func (client RoleDefinitionsClient) ListSender(req *http.Request) (*http.Respons func (client RoleDefinitionsClient) ListResponder(resp *http.Response) (result RoleDefinitionListResult, err error) { err = autorest.Respond( resp, - client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/version.go index 2833e0b21000..f0a7e474c8c9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization/version.go @@ -21,7 +21,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " authorization/2018-01-01-preview" + return "Azure-SDK-For-Go/" + Version() + " authorization/2018-01-01-preview" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index 073281bb853f..2fe9aaac0303 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -18,4 +18,4 @@ package version // Changes may cause incorrect behavior and will be lost if the code is regenerated. // Number contains the semantic version of this SDK. -const Number = "v36.2.0" +const Number = "v44.0.0" diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 000000000000..3350aaf70648 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 000000000000..d1f596bfc9b9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 000000000000..a434e73ac49d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 000000000000..dc6e3e633e6d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 000000000000..1fc28659696c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 000000000000..b9d6a27ea92e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 000000000000..de1e19a44df9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod index a030eb42da8b..02a3d39ff4b2 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -3,10 +3,10 @@ module github.com/Azure/go-autorest/autorest/adal go 1.12 require ( - github.com/Azure/go-autorest/autorest v0.9.0 - github.com/Azure/go-autorest/autorest/date v0.2.0 - github.com/Azure/go-autorest/autorest/mocks v0.3.0 - github.com/Azure/go-autorest/tracing v0.5.0 + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/date v0.3.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.0 + github.com/Azure/go-autorest/tracing v0.6.0 github.com/dgrijalva/jwt-go v3.2.0+incompatible - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum index e43cf6498d0d..bbda1a9a9891 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -1,26 +1,17 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go index 28a4bfc4c437..7551b79235de 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package adal // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index f43e1a6ed5a1..15138b642f2e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -138,6 +138,11 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { } } +// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. +func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { + return ba.tokenProvider +} + // BearerAuthorizerCallbackFunc is the authentication callback signature. type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) @@ -331,7 +336,7 @@ func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { for i := range auxTokens { auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) } - return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", "))) }) } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go index 5f02026b391c..2b37e0d068c6 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -466,7 +466,7 @@ func decode(b []byte) ([]byte, error) { } func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { - // Compare dafault base URI from the SDK to the endpoints from the public cloud + // Compare default base URI from the SDK to the endpoints from the public cloud // Base URI and token resource are the same string. This func finds the authentication // file field that matches the SDK base URI. The SDK defines the public cloud // endpoint as its default base URI @@ -713,8 +713,8 @@ type MSIConfig struct { ClientID string } -// Authorizer gets the authorizer from MSI. -func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { +// ServicePrincipalToken creates a ServicePrincipalToken from MSI. +func (mc MSIConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { msiEndpoint, err := adal.GetMSIEndpoint() if err != nil { return nil, err @@ -733,5 +733,15 @@ func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { } } + return spToken, nil +} + +// Authorizer gets the authorizer from MSI. +func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := mc.ServicePrincipalToken() + if err != nil { + return nil, err + } + return autorest.NewBearerAuthorizer(spToken), nil } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod index 4bebbdc242af..0bf97ae93e34 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod @@ -3,9 +3,10 @@ module github.com/Azure/go-autorest/autorest/azure/auth go 1.12 require ( - github.com/Azure/go-autorest/autorest v0.9.3 - github.com/Azure/go-autorest/autorest/adal v0.8.1 - github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest v0.11.0 + github.com/Azure/go-autorest/autorest/adal v0.9.0 + github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 github.com/dimchansky/utfbom v1.1.0 - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum index 9ea3fa575190..7cf95fd8109b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum @@ -1,29 +1,19 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= -github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk= -github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.0 h1:tnO41Uo+/0sxTMFY/U7aKg2abek3JOnnXcuSuba74jI= +github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= @@ -31,8 +21,8 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go index 2f09cd177aa5..38e4900ad0fb 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package auth // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod index a58302914dc5..087f737e929f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod @@ -3,9 +3,9 @@ module github.com/Azure/go-autorest/autorest/azure/cli go 1.12 require ( - github.com/Azure/go-autorest/autorest v0.9.0 - github.com/Azure/go-autorest/autorest/adal v0.8.0 - github.com/Azure/go-autorest/autorest/date v0.2.0 + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/adal v0.9.0 + github.com/Azure/go-autorest/autorest/date v0.3.0 github.com/dimchansky/utfbom v1.1.0 github.com/mitchellh/go-homedir v1.1.0 ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum index 542806b94711..90d0dd23909a 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum @@ -1,23 +1,13 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0 h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= @@ -25,5 +15,9 @@ github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go index 618bed392fc5..861ce2984e64 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package cli // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 85ed9635f801..faff93275991 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -127,7 +127,7 @@ var ( KeyVaultDNSSuffix: "vault.usgovcloudapi.net", ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", ContainerRegistryDNSSuffix: "azurecr.us", CosmosDBDNSSuffix: "documents.azure.us", TokenAudience: "https://management.usgovcloudapi.net/", @@ -160,7 +160,7 @@ var ( KeyVaultDNSSuffix: "vault.azure.cn", ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", ContainerRegistryDNSSuffix: "azurecr.cn", CosmosDBDNSSuffix: "documents.azure.cn", TokenAudience: "https://management.chinacloudapi.cn/", diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod index 3adc4804c3d2..f88ecc4022d6 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -2,4 +2,4 @@ module github.com/Azure/go-autorest/autorest/date go 1.12 -require github.com/Azure/go-autorest/autorest v0.9.0 +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum index 9e2ee7a94844..1fc56a962ee4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -1,16 +1,2 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go index 55adf930f4ab..4e0543207171 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package date // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod index 499c56de48ae..b66c78da2ccf 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -3,9 +3,10 @@ module github.com/Azure/go-autorest/autorest go 1.12 require ( - github.com/Azure/go-autorest/autorest/adal v0.8.2 - github.com/Azure/go-autorest/autorest/mocks v0.3.0 - github.com/Azure/go-autorest/logger v0.1.0 - github.com/Azure/go-autorest/tracing v0.5.0 - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/adal v0.9.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.0 + github.com/Azure/go-autorest/logger v0.2.0 + github.com/Azure/go-autorest/tracing v0.6.0 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum index 37398d1d48a8..96d2ad0fcd86 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -1,28 +1,21 @@ -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go new file mode 100644 index 000000000000..da65e1041eb0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod index 48fd8c6e57b4..8fd041e2baef 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod @@ -2,4 +2,4 @@ module github.com/Azure/go-autorest/autorest/to go 1.12 -require github.com/Azure/go-autorest/autorest v0.9.0 +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.sum b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum index d7ee6b462311..1fc56a962ee4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/to/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.sum @@ -1,17 +1,2 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go index 8e8292107071..b7310f6b868f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package to // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod index b3f9b6a09609..a0a69e9aed53 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod @@ -3,6 +3,6 @@ module github.com/Azure/go-autorest/autorest/validation go 1.12 require ( - github.com/Azure/go-autorest/autorest v0.9.0 + github.com/Azure/go-autorest v14.2.0+incompatible github.com/stretchr/testify v1.3.0 ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum index 6b9010a736d3..6c1119aab96b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum @@ -1,24 +1,9 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go index 2b2668581e87..cf1436291a72 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package validation // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index ee6e48f8baf9..28973b04d6c9 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -19,7 +19,7 @@ import ( "runtime" ) -const number = "v14.1.0" +const number = "v14.2.0" var ( userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 000000000000..6fb8404fd01d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 000000000000..99ae6ca988a4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod index f22ed56bcde4..bedeaee039e0 100644 --- a/vendor/github.com/Azure/go-autorest/logger/go.mod +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -1,3 +1,5 @@ module github.com/Azure/go-autorest/logger go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/logger/go.sum b/vendor/github.com/Azure/go-autorest/logger/go.sum new file mode 100644 index 000000000000..1fc56a962ee4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go new file mode 100644 index 000000000000..0aa27680db9b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod index 25c34c1085a7..a2cdec78c810 100644 --- a/vendor/github.com/Azure/go-autorest/tracing/go.mod +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -1,3 +1,5 @@ module github.com/Azure/go-autorest/tracing go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/Azure/go-autorest/tracing/go.sum new file mode 100644 index 000000000000..1fc56a962ee4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 000000000000..e163975cd4e1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/denverdino/aliyungo/LICENSE.txt b/vendor/github.com/denverdino/aliyungo/LICENSE.txt new file mode 100644 index 000000000000..918297133253 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/LICENSE.txt @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015-2015 Li Yi (denverdino@gmail.com). + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/denverdino/aliyungo/common/client.go b/vendor/github.com/denverdino/aliyungo/common/client.go new file mode 100644 index 000000000000..a59789fee44e --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/common/client.go @@ -0,0 +1,357 @@ +package common + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "log" + "net/http" + "net/url" + "strings" + "time" + + "github.com/denverdino/aliyungo/util" +) + +// RemovalPolicy.N add index to array item +// RemovalPolicy=["a", "b"] => RemovalPolicy.1="a" RemovalPolicy.2="b" +type FlattenArray []string + +// string contains underline which will be replaced with dot +// SystemDisk_Category => SystemDisk.Category +type UnderlineString string + +// A Client represents a client of ECS services +type Client struct { + AccessKeyId string //Access Key Id + AccessKeySecret string //Access Key Secret + debug bool + httpClient *http.Client + endpoint string + version string + serviceCode string + regionID Region + businessInfo string + userAgent string +} + +// NewClient creates a new instance of ECS client +func (client *Client) Init(endpoint, version, accessKeyId, accessKeySecret string) { + client.AccessKeyId = accessKeyId + client.AccessKeySecret = accessKeySecret + "&" + client.debug = false + client.httpClient = &http.Client{} + client.endpoint = endpoint + client.version = version +} + +func (client *Client) NewInit(endpoint, version, accessKeyId, accessKeySecret, serviceCode string, regionID Region) { + client.Init(endpoint, version, accessKeyId, accessKeySecret) + client.serviceCode = serviceCode + client.regionID = regionID + client.setEndpointByLocation(regionID, serviceCode, accessKeyId, accessKeySecret) +} + +//NewClient using location service +func (client *Client) setEndpointByLocation(region Region, serviceCode, accessKeyId, accessKeySecret string) { + locationClient := NewLocationClient(accessKeyId, accessKeySecret) + ep := locationClient.DescribeOpenAPIEndpoint(region, serviceCode) + if ep == "" { + ep = loadEndpointFromFile(region, serviceCode) + } + + if ep != "" { + client.endpoint = ep + } +} + +// SetEndpoint sets custom endpoint +func (client *Client) SetEndpoint(endpoint string) { + client.endpoint = endpoint +} + +// SetEndpoint sets custom version +func (client *Client) SetVersion(version string) { + client.version = version +} + +func (client *Client) SetRegionID(regionID Region) { + client.regionID = regionID +} + +//SetServiceCode sets serviceCode +func (client *Client) SetServiceCode(serviceCode string) { + client.serviceCode = serviceCode +} + +// SetAccessKeyId sets new AccessKeyId +func (client *Client) SetAccessKeyId(id string) { + client.AccessKeyId = id +} + +// SetAccessKeySecret sets new AccessKeySecret +func (client *Client) SetAccessKeySecret(secret string) { + client.AccessKeySecret = secret + "&" +} + +// SetDebug sets debug mode to log the request/response message +func (client *Client) SetDebug(debug bool) { + client.debug = debug +} + +// SetBusinessInfo sets business info to log the request/response message +func (client *Client) SetBusinessInfo(businessInfo string) { + if strings.HasPrefix(businessInfo, "/") { + client.businessInfo = businessInfo + } else if businessInfo != "" { + client.businessInfo = "/" + businessInfo + } +} + +// SetUserAgent sets user agent to the request/response message +func (client *Client) SetUserAgent(userAgent string) { + client.userAgent = userAgent +} + +// Invoke sends the raw HTTP request for ECS services +func (client *Client) Invoke(action string, args interface{}, response interface{}) error { + + request := Request{} + request.init(client.version, action, client.AccessKeyId) + + query := util.ConvertToQueryValues(request) + util.SetQueryValues(args, &query) + + // Sign request + signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret) + + // Generate the request URL + requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature) + + httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil) + + if err != nil { + return GetClientError(err) + } + + // TODO move to util and add build val flag + httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) + + httpReq.Header.Set("User-Agent", httpReq.UserAgent()+ " " +client.userAgent) + + t0 := time.Now() + httpResp, err := client.httpClient.Do(httpReq) + t1 := time.Now() + if err != nil { + return GetClientError(err) + } + statusCode := httpResp.StatusCode + + if client.debug { + log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0)) + } + + defer httpResp.Body.Close() + body, err := ioutil.ReadAll(httpResp.Body) + + if err != nil { + return GetClientError(err) + } + + if client.debug { + var prettyJSON bytes.Buffer + err = json.Indent(&prettyJSON, body, "", " ") + log.Println(string(prettyJSON.Bytes())) + } + + if statusCode >= 400 && statusCode <= 599 { + errorResponse := ErrorResponse{} + err = json.Unmarshal(body, &errorResponse) + ecsError := &Error{ + ErrorResponse: errorResponse, + StatusCode: statusCode, + } + return ecsError + } + + err = json.Unmarshal(body, response) + //log.Printf("%++v", response) + if err != nil { + return GetClientError(err) + } + + return nil +} + +// Invoke sends the raw HTTP request for ECS services +func (client *Client) InvokeByFlattenMethod(action string, args interface{}, response interface{}) error { + + request := Request{} + request.init(client.version, action, client.AccessKeyId) + + query := util.ConvertToQueryValues(request) + + util.SetQueryValueByFlattenMethod(args, &query) + + // Sign request + signature := util.CreateSignatureForRequest(ECSRequestMethod, &query, client.AccessKeySecret) + + // Generate the request URL + requestURL := client.endpoint + "?" + query.Encode() + "&Signature=" + url.QueryEscape(signature) + + httpReq, err := http.NewRequest(ECSRequestMethod, requestURL, nil) + + if err != nil { + return GetClientError(err) + } + + // TODO move to util and add build val flag + httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) + + httpReq.Header.Set("User-Agent", httpReq.UserAgent()+ " " +client.userAgent) + + t0 := time.Now() + httpResp, err := client.httpClient.Do(httpReq) + t1 := time.Now() + if err != nil { + return GetClientError(err) + } + statusCode := httpResp.StatusCode + + if client.debug { + log.Printf("Invoke %s %s %d (%v)", ECSRequestMethod, requestURL, statusCode, t1.Sub(t0)) + } + + defer httpResp.Body.Close() + body, err := ioutil.ReadAll(httpResp.Body) + + if err != nil { + return GetClientError(err) + } + + if client.debug { + var prettyJSON bytes.Buffer + err = json.Indent(&prettyJSON, body, "", " ") + log.Println(string(prettyJSON.Bytes())) + } + + if statusCode >= 400 && statusCode <= 599 { + errorResponse := ErrorResponse{} + err = json.Unmarshal(body, &errorResponse) + ecsError := &Error{ + ErrorResponse: errorResponse, + StatusCode: statusCode, + } + return ecsError + } + + err = json.Unmarshal(body, response) + //log.Printf("%++v", response) + if err != nil { + return GetClientError(err) + } + + return nil +} + +// Invoke sends the raw HTTP request for ECS services +//改进了一下上面那个方法,可以使用各种Http方法 +//2017.1.30 增加了一个path参数,用来拓展访问的地址 +func (client *Client) InvokeByAnyMethod(method, action, path string, args interface{}, response interface{}) error { + + request := Request{} + request.init(client.version, action, client.AccessKeyId) + + data := util.ConvertToQueryValues(request) + util.SetQueryValues(args, &data) + + // Sign request + signature := util.CreateSignatureForRequest(method, &data, client.AccessKeySecret) + + data.Add("Signature", signature) + // Generate the request URL + var ( + httpReq *http.Request + err error + ) + if method == http.MethodGet { + requestURL := client.endpoint + path + "?" + data.Encode() + //fmt.Println(requestURL) + httpReq, err = http.NewRequest(method, requestURL, nil) + } else { + //fmt.Println(client.endpoint + path) + httpReq, err = http.NewRequest(method, client.endpoint+path, strings.NewReader(data.Encode())) + httpReq.Header.Set("Content-Type", "application/x-www-form-urlencoded") + } + + if err != nil { + return GetClientError(err) + } + + // TODO move to util and add build val flag + httpReq.Header.Set("X-SDK-Client", `AliyunGO/`+Version+client.businessInfo) + + httpReq.Header.Set("User-Agent", httpReq.Header.Get("User-Agent")+ " " +client.userAgent) + + t0 := time.Now() + httpResp, err := client.httpClient.Do(httpReq) + t1 := time.Now() + if err != nil { + return GetClientError(err) + } + statusCode := httpResp.StatusCode + + if client.debug { + log.Printf("Invoke %s %s %d (%v) %v", ECSRequestMethod, client.endpoint, statusCode, t1.Sub(t0), data.Encode()) + } + + defer httpResp.Body.Close() + body, err := ioutil.ReadAll(httpResp.Body) + + if err != nil { + return GetClientError(err) + } + + if client.debug { + var prettyJSON bytes.Buffer + err = json.Indent(&prettyJSON, body, "", " ") + log.Println(string(prettyJSON.Bytes())) + } + + if statusCode >= 400 && statusCode <= 599 { + errorResponse := ErrorResponse{} + err = json.Unmarshal(body, &errorResponse) + ecsError := &Error{ + ErrorResponse: errorResponse, + StatusCode: statusCode, + } + return ecsError + } + + err = json.Unmarshal(body, response) + //log.Printf("%++v", response) + if err != nil { + return GetClientError(err) + } + + return nil +} + +// GenerateClientToken generates the Client Token with random string +func (client *Client) GenerateClientToken() string { + return util.CreateRandomString() +} + +func GetClientErrorFromString(str string) error { + return &Error{ + ErrorResponse: ErrorResponse{ + Code: "AliyunGoClientFailure", + Message: str, + }, + StatusCode: -1, + } +} + +func GetClientError(err error) error { + return GetClientErrorFromString(err.Error()) +} diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoint.go b/vendor/github.com/denverdino/aliyungo/common/endpoint.go new file mode 100644 index 000000000000..16bcbf9d620b --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/common/endpoint.go @@ -0,0 +1,118 @@ +package common + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // LocationDefaultEndpoint is the default API endpoint of Location services + locationDefaultEndpoint = "https://location.aliyuncs.com" + locationAPIVersion = "2015-06-12" + HTTP_PROTOCOL = "http" + HTTPS_PROTOCOL = "https" +) + +var ( + endpoints = make(map[Region]map[string]string) +) + +//init endpoints from file +func init() { + +} + +func NewLocationClient(accessKeyId, accessKeySecret string) *Client { + endpoint := os.Getenv("LOCATION_ENDPOINT") + if endpoint == "" { + endpoint = locationDefaultEndpoint + } + + client := &Client{} + client.Init(endpoint, locationAPIVersion, accessKeyId, accessKeySecret) + return client +} + +func (client *Client) DescribeEndpoint(args *DescribeEndpointArgs) (*DescribeEndpointResponse, error) { + response := &DescribeEndpointResponse{} + err := client.Invoke("DescribeEndpoint", args, response) + if err != nil { + return nil, err + } + return response, err +} + +func getProductRegionEndpoint(region Region, serviceCode string) string { + if sp, ok := endpoints[region]; ok { + if endpoint, ok := sp[serviceCode]; ok { + return endpoint + } + } + + return "" +} + +func setProductRegionEndpoint(region Region, serviceCode string, endpoint string) { + endpoints[region] = map[string]string{ + serviceCode: endpoint, + } +} + +func (client *Client) DescribeOpenAPIEndpoint(region Region, serviceCode string) string { + if endpoint := getProductRegionEndpoint(region, serviceCode); endpoint != "" { + return endpoint + } + + defaultProtocols := HTTP_PROTOCOL + + args := &DescribeEndpointArgs{ + Id: region, + ServiceCode: serviceCode, + Type: "openAPI", + } + + endpoint, err := client.DescribeEndpoint(args) + if err != nil || endpoint.Endpoint == "" { + return "" + } + + for _, protocol := range endpoint.Protocols.Protocols { + if strings.ToLower(protocol) == HTTPS_PROTOCOL { + defaultProtocols = HTTPS_PROTOCOL + break + } + } + + ep := fmt.Sprintf("%s://%s", defaultProtocols, endpoint.Endpoint) + + setProductRegionEndpoint(region, serviceCode, ep) + return ep +} + +func loadEndpointFromFile(region Region, serviceCode string) string { + data, err := ioutil.ReadFile("./endpoints.xml") + if err != nil { + return "" + } + + var endpoints Endpoints + err = xml.Unmarshal(data, &endpoints) + if err != nil { + return "" + } + + for _, endpoint := range endpoints.Endpoint { + if endpoint.RegionIds.RegionId == string(region) { + for _, product := range endpoint.Products.Product { + if strings.ToLower(product.ProductName) == serviceCode { + return fmt.Sprintf("%s://%s", HTTPS_PROTOCOL, product.DomainName) + } + } + } + } + + return "" +} diff --git a/vendor/github.com/denverdino/aliyungo/common/endpoints.xml b/vendor/github.com/denverdino/aliyungo/common/endpoints.xml new file mode 100644 index 000000000000..4079bcd2b508 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/common/endpoints.xml @@ -0,0 +1,1349 @@ + + + + jp-fudao-1 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + me-east-1 + + Rdsrds.me-east-1.aliyuncs.com + Ecsecs.me-east-1.aliyuncs.com + Vpcvpc.me-east-1.aliyuncs.com + Kmskms.me-east-1.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.me-east-1.aliyuncs.com + + + + us-east-1 + + CScs.aliyuncs.com + Pushcloudpush.aliyuncs.com + COScos.aliyuncs.com + Essess.aliyuncs.com + Ace-opsace-ops.cn-hangzhou.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Emremr.aliyuncs.com + Smssms.aliyuncs.com + Jaqjaq.aliyuncs.com + HPChpc.aliyuncs.com + Locationlocation.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + Msgmsg-inner.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Bssbss.aliyuncs.com + Workorderworkorder.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Greengreen.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Mscmsc-inner.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + jaqjaq.aliyuncs.com + Omsoms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + Ubsmsubsms.aliyuncs.com + Vpcvpc.aliyuncs.com + Alertalert.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + AMSams.aliyuncs.com + ROSros.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + Rdsrds.aliyuncs.com + BatchComputebatchCompute.aliyuncs.com + CFcf.aliyuncs.com + Drdsdrds.aliyuncs.com + Acsacs.aliyun-inc.com + Httpdnshttpdns-api.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Stssts.aliyuncs.com + Dtsdts.aliyuncs.com + Drcdrc.aliyuncs.com + Vpc-innervpc-inner.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + Ossoss-cn-hangzhou.aliyuncs.com + Ramram.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Alidnsalidns.aliyuncs.com + Onsons.aliyuncs.com + Cdncdn.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + + + + ap-northeast-1 + + Rdsrds.ap-northeast-1.aliyuncs.com + Kmskms.ap-northeast-1.aliyuncs.com + Vpcvpc.ap-northeast-1.aliyuncs.com + Ecsecs.ap-northeast-1.aliyuncs.com + Cmsmetrics.ap-northeast-1.aliyuncs.com + Kvstorer-kvstore.ap-northeast-1.aliyuncs.com + Slbslb.ap-northeast-1.aliyuncs.com + + + + cn-hangzhou-bj-b01 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + cn-hongkong + + Pushcloudpush.aliyuncs.com + COScos.aliyuncs.com + Onsons.aliyuncs.com + Essess.aliyuncs.com + Ace-opsace-ops.cn-hangzhou.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Emremr.aliyuncs.com + Smssms.aliyuncs.com + Jaqjaq.aliyuncs.com + CScs.aliyuncs.com + Kmskms.cn-hongkong.aliyuncs.com + Locationlocation.aliyuncs.com + Msgmsg-inner.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Alertalert.aliyuncs.com + Mscmsc-inner.aliyuncs.com + Drcdrc.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Dmdm.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + HPChpc.aliyuncs.com + jaqjaq.aliyuncs.com + Omsoms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + Vpcvpc.aliyuncs.com + BatchComputebatchCompute.aliyuncs.com + AMSams.aliyuncs.com + ROSros.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + Bssbss.aliyuncs.com + Ubsmsubsms.aliyuncs.com + CloudAPIapigateway.cn-hongkong.aliyuncs.com + Stssts.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + Mtsmts.cn-hongkong.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + CFcf.aliyuncs.com + Acsacs.aliyun-inc.com + Httpdnshttpdns-api.aliyuncs.com + Greengreen.aliyuncs.com + Aasaas.aliyuncs.com + Alidnsalidns.aliyuncs.com + Dtsdts.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Vpc-innervpc-inner.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Drdsdrds.aliyuncs.com + Rdsrds.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + Ossoss-cn-hongkong.aliyuncs.com + + + + cn-beijing-nu16-b01 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + cn-beijing-am13-c01 + + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + + + + in-west-antgroup-1 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + cn-guizhou-gov + + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + + + + in-west-antgroup-2 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + cn-qingdao-cm9 + + CScs.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + COScos.aliyuncs.com + Essess.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Alidnsalidns.aliyuncs.com + Smssms.aliyuncs.com + Drdsdrds.aliyuncs.com + HPChpc.aliyuncs.com + Locationlocation.aliyuncs.com + Msgmsg-inner.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Alertalert.aliyuncs.com + Mscmsc-inner.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Greengreen.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + jaqjaq.aliyuncs.com + Omsoms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + Ubsmsubsms.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + AMSams.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + Bssbss.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + Mtsmts.cn-qingdao.aliyuncs.com + CFcf.aliyuncs.com + Httpdnshttpdns-api.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Stssts.aliyuncs.com + Dtsdts.aliyuncs.com + Emremr.aliyuncs.com + Drcdrc.aliyuncs.com + Pushcloudpush.aliyuncs.com + Cmsmetrics.aliyuncs.com + Slbslb.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + ROSros.aliyuncs.com + Ossoss-cn-hangzhou.aliyuncs.com + Ramram.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + Rdsrds.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Onsons.aliyuncs.com + Cdncdn.aliyuncs.com + + + + tw-snowcloud-kaohsiung + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + cn-shanghai-finance-1 + + Kmskms.cn-shanghai-finance-1.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + Rdsrds.aliyuncs.com + + + + cn-guizhou + + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + + + + cn-qingdao-finance + + Ossoss-cn-qdjbp-a.aliyuncs.com + + + + cn-beijing-gov-1 + + Ossoss-cn-haidian-a.aliyuncs.com + Rdsrds.aliyuncs.com + + + + cn-shanghai + + ARMSarms.cn-shanghai.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + COScos.aliyuncs.com + HPChpc.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Drcdrc.aliyuncs.com + Alidnsalidns.aliyuncs.com + Smssms.aliyuncs.com + Drdsdrds.aliyuncs.com + CScs.aliyuncs.com + Kmskms.cn-shanghai.aliyuncs.com + Locationlocation.aliyuncs.com + Msgmsg-inner.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Alertalert.aliyuncs.com + Mscmsc-inner.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Greengreen.cn-shanghai.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + Bssbss.aliyuncs.com + Omsoms.aliyuncs.com + Ubsmsubsms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + Ace-opsace-ops.cn-hangzhou.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + BatchComputebatchCompute.aliyuncs.com + AMSams.aliyuncs.com + ROSros.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + Apigatewayapigateway.cn-shanghai.aliyuncs.com + CloudAPIapigateway.cn-shanghai.aliyuncs.com + Stssts.aliyuncs.com + Vpcvpc.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + Mtsmts.cn-shanghai.aliyuncs.com + Ddsmongodb.aliyuncs.com + CFcf.aliyuncs.com + Acsacs.aliyun-inc.com + Httpdnshttpdns-api.aliyuncs.com + Pushcloudpush.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Emremr.aliyuncs.com + Dtsdts.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Jaqjaq.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + jaqjaq.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + Vpc-innervpc-inner.aliyuncs.com + Rdsrds.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Onsons.aliyuncs.com + Essess.aliyuncs.com + Ossoss-cn-shanghai.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + vodvod.cn-shanghai.aliyuncs.com + + + + cn-shenzhen-inner + + Riskrisk-cn-hangzhou.aliyuncs.com + COScos.aliyuncs.com + Onsons.aliyuncs.com + Essess.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Alidnsalidns.aliyuncs.com + Smssms.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + HPChpc.aliyuncs.com + Locationlocation.aliyuncs.com + Msgmsg-inner.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + jaqjaq.aliyuncs.com + Mscmsc-inner.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Bssbss.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + Alertalert.aliyuncs.com + Omsoms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + Ubsmsubsms.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + AMSams.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + Stssts.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + Mtsmts.cn-shenzhen.aliyuncs.com + CFcf.aliyuncs.com + Httpdnshttpdns-api.aliyuncs.com + Greengreen.aliyuncs.com + Aasaas.aliyuncs.com + Emremr.aliyuncs.com + CScs.aliyuncs.com + Drcdrc.aliyuncs.com + Pushcloudpush.aliyuncs.com + Cmsmetrics.aliyuncs.com + Slbslb.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + Dtsdts.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + ROSros.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Drdsdrds.aliyuncs.com + Rdsrds.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ossoss-cn-hangzhou.aliyuncs.com + + + + cn-fujian + + Ecsecs-cn-hangzhou.aliyuncs.com + Rdsrds.aliyuncs.com + + + + in-mumbai-alipay + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + us-west-1 + + CScs.aliyuncs.com + COScos.aliyuncs.com + Essess.aliyuncs.com + Ace-opsace-ops.cn-hangzhou.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Stssts.aliyuncs.com + Smssms.aliyuncs.com + Jaqjaq.aliyuncs.com + Pushcloudpush.aliyuncs.com + Alidnsalidns.aliyuncs.com + Locationlocation.aliyuncs.com + Msgmsg-inner.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Bssbss.aliyuncs.com + Mscmsc-inner.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Greengreen.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + Alertalert.aliyuncs.com + Omsoms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + Vpcvpc.aliyuncs.com + BatchComputebatchCompute.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + AMSams.aliyuncs.com + ROSros.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + Ubsmsubsms.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + Rdsrds.aliyuncs.com + Mtsmts.us-west-1.aliyuncs.com + CFcf.aliyuncs.com + Acsacs.aliyun-inc.com + Httpdnshttpdns-api.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Emremr.aliyuncs.com + HPChpc.aliyuncs.com + Drcdrc.aliyuncs.com + Vpc-innervpc-inner.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + Dtsdts.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + jaqjaq.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Drdsdrds.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + Onsons.aliyuncs.com + Ossoss-us-west-1.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + + + + cn-shanghai-inner + + CScs.aliyuncs.com + COScos.aliyuncs.com + Essess.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Emremr.aliyuncs.com + Smssms.aliyuncs.com + Drdsdrds.aliyuncs.com + HPChpc.aliyuncs.com + Locationlocation.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + Msgmsg-inner.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + jaqjaq.aliyuncs.com + Mscmsc-inner.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Greengreen.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + Bssbss.aliyuncs.com + Omsoms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + Alertalert.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + AMSams.aliyuncs.com + ROSros.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + Ubsmsubsms.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Rdsrds.aliyuncs.com + Mtsmts.cn-shanghai.aliyuncs.com + CFcf.aliyuncs.com + Httpdnshttpdns-api.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Stssts.aliyuncs.com + Dtsdts.aliyuncs.com + Drcdrc.aliyuncs.com + Pushcloudpush.aliyuncs.com + Cmsmetrics.aliyuncs.com + Slbslb.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + Ossoss-cn-hangzhou.aliyuncs.com + Ramram.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Alidnsalidns.aliyuncs.com + Onsons.aliyuncs.com + Cdncdn.aliyuncs.com + + + + cn-anhui-gov-1 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + cn-hangzhou-finance + + Ossoss-cn-hzjbp-b-console.aliyuncs.com + + + + cn-hangzhou + + ARMSarms.cn-hangzhou.aliyuncs.com + CScs.aliyuncs.com + COScos.aliyuncs.com + Essess.aliyuncs.com + Ace-opsace-ops.cn-hangzhou.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Stssts.aliyuncs.com + Smssms.aliyuncs.com + Msgmsg-inner.aliyuncs.com + Jaqjaq.aliyuncs.com + Pushcloudpush.aliyuncs.com + Livelive.aliyuncs.com + Kmskms.cn-hangzhou.aliyuncs.com + Locationlocation.aliyuncs.com + Hpchpc.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Alertalert.aliyuncs.com + Mscmsc-inner.aliyuncs.com + Drcdrc.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Dmdm.aliyuncs.com + Greengreen.cn-hangzhou.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + jaqjaq.aliyuncs.com + Omsoms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + Vpcvpc.aliyuncs.com + BatchComputebatchCompute.aliyuncs.com + Domaindomain.aliyuncs.com + AMSams.aliyuncs.com + ROSros.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + Ubsmsubsms.aliyuncs.com + Apigatewayapigateway.cn-hangzhou.aliyuncs.com + CloudAPIapigateway.cn-hangzhou.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + Mtsmts.cn-hangzhou.aliyuncs.com + Oascn-hangzhou.oas.aliyuncs.com + CFcf.aliyuncs.com + Acsacs.aliyun-inc.com + Httpdnshttpdns-api.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Alidnsalidns.aliyuncs.com + HPChpc.aliyuncs.com + Emremr.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Vpc-innervpc-inner.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + Dtsdts.aliyuncs.com + Bssbss.aliyuncs.com + Otsots-pop.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Drdsdrds.aliyuncs.com + Rdsrds.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + Onsons.aliyuncs.com + Ossoss-cn-hangzhou.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + + + + cn-beijing-inner + + Riskrisk-cn-hangzhou.aliyuncs.com + COScos.aliyuncs.com + HPChpc.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Emremr.aliyuncs.com + Smssms.aliyuncs.com + Drdsdrds.aliyuncs.com + CScs.aliyuncs.com + Locationlocation.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + Msgmsg-inner.aliyuncs.com + Essess.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Bssbss.aliyuncs.com + Workorderworkorder.aliyuncs.com + Drcdrc.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Dmdm.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Mscmsc-inner.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + jaqjaq.aliyuncs.com + Omsoms.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + Alertalert.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + AMSams.aliyuncs.com + Otsots-pop.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + Ubsmsubsms.aliyuncs.com + Stssts.aliyuncs.com + Rdsrds.aliyuncs.com + Mtsmts.cn-beijing.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + CFcf.aliyuncs.com + Httpdnshttpdns-api.aliyuncs.com + Greengreen.aliyuncs.com + Aasaas.aliyuncs.com + Alidnsalidns.aliyuncs.com + Pushcloudpush.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Cmsmetrics.aliyuncs.com + Slbslb.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + Dtsdts.aliyuncs.com + Domaindomain.aliyuncs.com + ROSros.aliyuncs.com + Ossoss-cn-hangzhou.aliyuncs.com + Ramram.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Onsons.aliyuncs.com + Cdncdn.aliyuncs.com + + + + cn-haidian-cm12-c01 + + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + Rdsrds.aliyuncs.com + + + + cn-anhui-gov + + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + + + + cn-shenzhen + + ARMSarms.cn-shenzhen.aliyuncs.com + CScs.aliyuncs.com + COScos.aliyuncs.com + Onsons.aliyuncs.com + Essess.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Alidnsalidns.aliyuncs.com + Smssms.aliyuncs.com + Jaqjaq.aliyuncs.com + Pushcloudpush.aliyuncs.com + Kmskms.cn-shenzhen.aliyuncs.com + Locationlocation.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Alertalert.aliyuncs.com + Drcdrc.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Iotiot.aliyuncs.com + HPChpc.aliyuncs.com + Bssbss.aliyuncs.com + Omsoms.aliyuncs.com + Ubsmsubsms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + BatchComputebatchcompute.cn-shenzhen.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + ROSros.aliyuncs.com + PTSpts.aliyuncs.com + Ace-opsace-ops.cn-hangzhou.aliyuncs.com + Apigatewayapigateway.cn-shenzhen.aliyuncs.com + CloudAPIapigateway.cn-shenzhen.aliyuncs.com + Stssts.aliyuncs.com + Vpcvpc.aliyuncs.com + Rdsrds.aliyuncs.com + Mtsmts.cn-shenzhen.aliyuncs.com + Oascn-shenzhen.oas.aliyuncs.com + CFcf.aliyuncs.com + Acsacs.aliyun-inc.com + Crmcrm-cn-hangzhou.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Emremr.aliyuncs.com + Dtsdts.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Vpc-innervpc-inner.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + jaqjaq.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Drdsdrds.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Greengreen.aliyuncs.com + Httpdnshttpdns-api.aliyuncs.com + Ossoss-cn-shenzhen.aliyuncs.com + + + + ap-southeast-2 + + Rdsrds.ap-southeast-2.aliyuncs.com + Kmskms.ap-southeast-2.aliyuncs.com + Vpcvpc.ap-southeast-2.aliyuncs.com + Ecsecs.ap-southeast-2.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.ap-southeast-2.aliyuncs.com + + + + cn-qingdao + + CScs.aliyuncs.com + COScos.aliyuncs.com + HPChpc.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Emremr.cn-qingdao.aliyuncs.com + Smssms.aliyuncs.com + Jaqjaq.aliyuncs.com + Dtsdts.aliyuncs.com + Locationlocation.aliyuncs.com + Essess.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Alertalert.aliyuncs.com + Drcdrc.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.cn-qingdao.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Dmdm.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Iotiot.aliyuncs.com + Bssbss.aliyuncs.com + Omsoms.aliyuncs.com + Ubsmsubsms.cn-qingdao.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + BatchComputebatchcompute.cn-qingdao.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + Otsots-pop.aliyuncs.com + PTSpts.aliyuncs.com + Ace-opsace-ops.cn-hangzhou.aliyuncs.com + Apigatewayapigateway.cn-qingdao.aliyuncs.com + CloudAPIapigateway.cn-qingdao.aliyuncs.com + Stssts.aliyuncs.com + Rdsrds.aliyuncs.com + Mtsmts.cn-qingdao.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + CFcf.aliyuncs.com + Acsacs.aliyun-inc.com + Httpdnshttpdns-api.aliyuncs.com + Greengreen.aliyuncs.com + Aasaas.aliyuncs.com + Alidnsalidns.aliyuncs.com + Pushcloudpush.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Vpc-innervpc-inner.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + Domaindomain.aliyuncs.com + ROSros.aliyuncs.com + jaqjaq.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Drdsdrds.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Onsons.aliyuncs.com + Ossoss-cn-qingdao.aliyuncs.com + + + + cn-shenzhen-su18-b02 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + cn-shenzhen-su18-b03 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + cn-shenzhen-su18-b01 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + ap-southeast-antgroup-1 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + oss-cn-bjzwy + + Ossoss-cn-bjzwy.aliyuncs.com + + + + cn-henan-am12001 + + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + + + + cn-beijing + + ARMSarms.cn-beijing.aliyuncs.com + CScs.aliyuncs.com + COScos.aliyuncs.com + Jaqjaq.aliyuncs.com + Essess.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Stssts.aliyuncs.com + Smssms.aliyuncs.com + Msgmsg-inner.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + HPChpc.aliyuncs.com + Oascn-beijing.oas.aliyuncs.com + Locationlocation.aliyuncs.com + Onsons.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + Hpchpc.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + jaqjaq.aliyuncs.com + Workorderworkorder.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Bssbss.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Mscmsc-inner.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + Alertalert.aliyuncs.com + Omsoms.aliyuncs.com + Ubsmsubsms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + Ace-opsace-ops.cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + BatchComputebatchCompute.aliyuncs.com + AMSams.aliyuncs.com + ROSros.aliyuncs.com + PTSpts.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + Apigatewayapigateway.cn-beijing.aliyuncs.com + CloudAPIapigateway.cn-beijing.aliyuncs.com + Kmskms.cn-beijing.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + Mtsmts.cn-beijing.aliyuncs.com + CFcf.aliyuncs.com + Acsacs.aliyun-inc.com + Httpdnshttpdns-api.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Emremr.aliyuncs.com + Dtsdts.aliyuncs.com + Drcdrc.aliyuncs.com + Pushcloudpush.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + Ossoss-cn-beijing.aliyuncs.com + Ramram.aliyuncs.com + Drdsdrds.aliyuncs.com + Vpc-innervpc-inner.aliyuncs.com + Rdsrds.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Alidnsalidns.aliyuncs.com + Greengreen.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Cdncdn.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + vodvod.cn-beijing.aliyuncs.com + + + + cn-hangzhou-d + + CScs.aliyuncs.com + COScos.aliyuncs.com + Essess.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Emremr.aliyuncs.com + Smssms.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + Dtsdts.aliyuncs.com + Locationlocation.aliyuncs.com + Msgmsg-inner.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Bssbss.aliyuncs.com + Mscmsc-inner.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Alidnsalidns.aliyuncs.com + Iotiot.aliyuncs.com + HPChpc.aliyuncs.com + jaqjaq.aliyuncs.com + Omsoms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + Alertalert.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + AMSams.aliyuncs.com + Otsots-pop.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + Ubsmsubsms.aliyuncs.com + Rdsrds.aliyuncs.com + Mtsmts.cn-hangzhou.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + CFcf.aliyuncs.com + Httpdnshttpdns-api.aliyuncs.com + Greengreen.aliyuncs.com + Aasaas.aliyuncs.com + Stssts.aliyuncs.com + Pushcloudpush.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Cmsmetrics.aliyuncs.com + Slbslb.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + Domaindomain.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + ROSros.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Drdsdrds.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Onsons.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Drcdrc.aliyuncs.com + Ossoss-cn-hangzhou.aliyuncs.com + + + + cn-gansu-am6 + + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + Rdsrds.aliyuncs.com + + + + cn-ningxiazhongwei + + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + + + + cn-shanghai-et2-b01 + + CScs.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + COScos.aliyuncs.com + Onsons.aliyuncs.com + Essess.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Alidnsalidns.aliyuncs.com + Smssms.aliyuncs.com + Jaqjaq.aliyuncs.com + Dtsdts.aliyuncs.com + Locationlocation.aliyuncs.com + Msgmsg-inner.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Bssbss.aliyuncs.com + Mscmsc-inner.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Dmdm.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + jaqjaq.aliyuncs.com + Omsoms.aliyuncs.com + Ubsmsubsms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + Ace-opsace-ops.cn-hangzhou.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + BatchComputebatchCompute.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + AMSams.aliyuncs.com + Otsots-pop.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + Rdsrds.aliyuncs.com + Mtsmts.cn-hangzhou.aliyuncs.com + CFcf.aliyuncs.com + Acsacs.aliyun-inc.com + Httpdnshttpdns-api.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Stssts.aliyuncs.com + HPChpc.aliyuncs.com + Emremr.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Pushcloudpush.aliyuncs.com + Cmsmetrics.aliyuncs.com + Slbslb.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + Alertalert.aliyuncs.com + Domaindomain.aliyuncs.com + ROSros.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Drdsdrds.aliyuncs.com + Vpc-innervpc-inner.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + Greengreen.aliyuncs.com + Drcdrc.aliyuncs.com + Ossoss-cn-hangzhou.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + + + + cn-ningxia-am7-c01 + + Ecsecs-cn-hangzhou.aliyuncs.com + Vpcvpc.aliyuncs.com + + + + cn-shenzhen-finance-1 + + Kmskms.cn-shenzhen-finance-1.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + Rdsrds.aliyuncs.com + Vpcvpc.aliyuncs.com + + + + ap-southeast-1 + + CScs.aliyuncs.com + Riskrisk-cn-hangzhou.aliyuncs.com + COScos.aliyuncs.com + Essess.aliyuncs.com + Billingbilling.aliyuncs.com + Dqsdqs.aliyuncs.com + Ddsmongodb.aliyuncs.com + Alidnsalidns.aliyuncs.com + Smssms.aliyuncs.com + Drdsdrds.aliyuncs.com + Dtsdts.aliyuncs.com + Kmskms.ap-southeast-1.aliyuncs.com + Locationlocation.aliyuncs.com + Msgmsg-inner.aliyuncs.com + ChargingServicechargingservice.aliyuncs.com + R-kvstorer-kvstore-cn-hangzhou.aliyuncs.com + Alertalert.aliyuncs.com + Mscmsc-inner.aliyuncs.com + HighDDosyd-highddos-cn-hangzhou.aliyuncs.com + Yundunyundun-cn-hangzhou.aliyuncs.com + Ubsms-innerubsms-inner.aliyuncs.com + Ocsm-kvstore.aliyuncs.com + Dmdm.aliyuncs.com + Greengreen.aliyuncs.com + Commondrivercommon.driver.aliyuncs.com + oceanbaseoceanbase.aliyuncs.com + Workorderworkorder.aliyuncs.com + Yundunhsmyundunhsm.aliyuncs.com + Iotiot.aliyuncs.com + HPChpc.aliyuncs.com + jaqjaq.aliyuncs.com + Omsoms.aliyuncs.com + livelive.aliyuncs.com + Ecsecs-cn-hangzhou.aliyuncs.com + M-kvstorem-kvstore.aliyuncs.com + Vpcvpc.aliyuncs.com + BatchComputebatchCompute.aliyuncs.com + AMSams.aliyuncs.com + ROSros.aliyuncs.com + PTSpts.aliyuncs.com + Qualitycheckqualitycheck.aliyuncs.com + Bssbss.aliyuncs.com + Ubsmsubsms.aliyuncs.com + Apigatewayapigateway.ap-southeast-1.aliyuncs.com + CloudAPIapigateway.ap-southeast-1.aliyuncs.com + Stssts.aliyuncs.com + CmsSiteMonitorsitemonitor.aliyuncs.com + Aceace.cn-hangzhou.aliyuncs.com + Mtsmts.ap-southeast-1.aliyuncs.com + CFcf.aliyuncs.com + Crmcrm-cn-hangzhou.aliyuncs.com + Location-innerlocation-inner.aliyuncs.com + Aasaas.aliyuncs.com + Emremr.ap-southeast-1.aliyuncs.com + Httpdnshttpdns-api.aliyuncs.com + Drcdrc.aliyuncs.com + Pushcloudpush.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.aliyuncs.com + YundunDdosinner-yundun-ddos.cn-hangzhou.aliyuncs.com + Domaindomain.aliyuncs.com + Otsots-pop.aliyuncs.com + Cdncdn.aliyuncs.com + Ramram.aliyuncs.com + Salessales.cn-hangzhou.aliyuncs.com + Rdsrds.aliyuncs.com + OssAdminoss-admin.aliyuncs.com + Onsons.aliyuncs.com + Ossoss-ap-southeast-1.aliyuncs.com + + + + cn-shenzhen-st4-d01 + + Ecsecs-cn-hangzhou.aliyuncs.com + + + + eu-central-1 + + Rdsrds.eu-central-1.aliyuncs.com + Ecsecs.eu-central-1.aliyuncs.com + Vpcvpc.eu-central-1.aliyuncs.com + Kmskms.eu-central-1.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.eu-central-1.aliyuncs.com + + + + cn-zhangjiakou + + Rdsrds.cn-zhangjiakou.aliyuncs.com + Ecsecs.cn-zhangjiakou.aliyuncs.com + Vpcvpc.cn-zhangjiakou.aliyuncs.com + Cmsmetrics.cn-hangzhou.aliyuncs.com + Slbslb.cn-zhangjiakou.aliyuncs.com + + + diff --git a/vendor/github.com/denverdino/aliyungo/common/regions.go b/vendor/github.com/denverdino/aliyungo/common/regions.go new file mode 100644 index 000000000000..62e6e9d814f6 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/common/regions.go @@ -0,0 +1,34 @@ +package common + +// Region represents ECS region +type Region string + +// Constants of region definition +const ( + Hangzhou = Region("cn-hangzhou") + Qingdao = Region("cn-qingdao") + Beijing = Region("cn-beijing") + Hongkong = Region("cn-hongkong") + Shenzhen = Region("cn-shenzhen") + Shanghai = Region("cn-shanghai") + Zhangjiakou = Region("cn-zhangjiakou") + + APSouthEast1 = Region("ap-southeast-1") + APNorthEast1 = Region("ap-northeast-1") + APSouthEast2 = Region("ap-southeast-2") + + USWest1 = Region("us-west-1") + USEast1 = Region("us-east-1") + + MEEast1 = Region("me-east-1") + + EUCentral1 = Region("eu-central-1") +) + +var ValidRegions = []Region{ + Hangzhou, Qingdao, Beijing, Shenzhen, Hongkong, Shanghai, Zhangjiakou, + USWest1, USEast1, + APNorthEast1, APSouthEast1, APSouthEast2, + MEEast1, + EUCentral1, +} diff --git a/vendor/github.com/denverdino/aliyungo/common/request.go b/vendor/github.com/denverdino/aliyungo/common/request.go new file mode 100644 index 000000000000..2a883f19b204 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/common/request.go @@ -0,0 +1,101 @@ +package common + +import ( + "fmt" + "log" + "time" + + "github.com/denverdino/aliyungo/util" +) + +// Constants for Aliyun API requests +const ( + SignatureVersion = "1.0" + SignatureMethod = "HMAC-SHA1" + JSONResponseFormat = "JSON" + XMLResponseFormat = "XML" + ECSRequestMethod = "GET" +) + +type Request struct { + Format string + Version string + AccessKeyId string + Signature string + SignatureMethod string + Timestamp util.ISO6801Time + SignatureVersion string + SignatureNonce string + ResourceOwnerAccount string + Action string +} + +func (request *Request) init(version string, action string, AccessKeyId string) { + request.Format = JSONResponseFormat + request.Timestamp = util.NewISO6801Time(time.Now().UTC()) + request.Version = version + request.SignatureVersion = SignatureVersion + request.SignatureMethod = SignatureMethod + request.SignatureNonce = util.CreateRandomString() + request.Action = action + request.AccessKeyId = AccessKeyId +} + +type Response struct { + RequestId string +} + +type ErrorResponse struct { + Response + HostId string + Code string + Message string +} + +// An Error represents a custom error for Aliyun API failure response +type Error struct { + ErrorResponse + StatusCode int //Status Code of HTTP Response +} + +func (e *Error) Error() string { + return fmt.Sprintf("Aliyun API Error: RequestId: %s Status Code: %d Code: %s Message: %s", e.RequestId, e.StatusCode, e.Code, e.Message) +} + +type Pagination struct { + PageNumber int + PageSize int +} + +func (p *Pagination) SetPageSize(size int) { + p.PageSize = size +} + +func (p *Pagination) Validate() { + if p.PageNumber < 0 { + log.Printf("Invalid PageNumber: %d", p.PageNumber) + p.PageNumber = 1 + } + if p.PageSize < 0 { + log.Printf("Invalid PageSize: %d", p.PageSize) + p.PageSize = 10 + } else if p.PageSize > 50 { + log.Printf("Invalid PageSize: %d", p.PageSize) + p.PageSize = 50 + } +} + +// A PaginationResponse represents a response with pagination information +type PaginationResult struct { + TotalCount int + PageNumber int + PageSize int +} + +// NextPage gets the next page of the result set +func (r *PaginationResult) NextPage() *Pagination { + if r.PageNumber*r.PageSize >= r.TotalCount { + return nil + } + return &Pagination{PageNumber: r.PageNumber + 1, PageSize: r.PageSize} +} diff --git a/vendor/github.com/denverdino/aliyungo/common/types.go b/vendor/github.com/denverdino/aliyungo/common/types.go new file mode 100644 index 000000000000..a74e150e9f15 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/common/types.go @@ -0,0 +1,89 @@ +package common + +type InternetChargeType string + +const ( + PayByBandwidth = InternetChargeType("PayByBandwidth") + PayByTraffic = InternetChargeType("PayByTraffic") +) + +type InstanceChargeType string + +const ( + PrePaid = InstanceChargeType("PrePaid") + PostPaid = InstanceChargeType("PostPaid") +) + +type DescribeEndpointArgs struct { + Id Region + ServiceCode string + Type string +} + +type EndpointItem struct { + Protocols struct { + Protocols []string + } + Type string + Namespace string + Id Region + SerivceCode string + Endpoint string +} + +type DescribeEndpointResponse struct { + Response + EndpointItem +} + +type NetType string + +const ( + Internet = NetType("Internet") + Intranet = NetType("Intranet") +) + +type TimeType string + +const ( + Hour = TimeType("Hour") + Day = TimeType("Day") + Month = TimeType("Month") + Year = TimeType("Year") +) + +type NetworkType string + +const ( + Classic = NetworkType("Classic") + VPC = NetworkType("VPC") +) + +type BusinessInfo struct { + Pack string `json:"pack,omitempty"` + ActivityId string `json:"activityId,omitempty"` +} + +//xml +type Endpoints struct { + Endpoint []Endpoint `xml:"Endpoint"` +} + +type Endpoint struct { + Name string `xml:"name,attr"` + RegionIds RegionIds `xml:"RegionIds"` + Products Products `xml:"Products"` +} + +type RegionIds struct { + RegionId string `xml:"RegionId"` +} + +type Products struct { + Product []Product `xml:"Product"` +} + +type Product struct { + ProductName string `xml:"ProductName"` + DomainName string `xml:"DomainName"` +} diff --git a/vendor/github.com/denverdino/aliyungo/common/version.go b/vendor/github.com/denverdino/aliyungo/common/version.go new file mode 100644 index 000000000000..7cb3d3aff054 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/common/version.go @@ -0,0 +1,3 @@ +package common + +const Version = "0.1" diff --git a/vendor/github.com/denverdino/aliyungo/ecs/client.go b/vendor/github.com/denverdino/aliyungo/ecs/client.go new file mode 100644 index 000000000000..d70a1554eb23 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/client.go @@ -0,0 +1,74 @@ +package ecs + +import ( + "os" + + "github.com/denverdino/aliyungo/common" +) + +// Interval for checking status in WaitForXXX method +const DefaultWaitForInterval = 5 + +// Default timeout value for WaitForXXX method +const DefaultTimeout = 60 + +type Client struct { + common.Client +} + +const ( + // ECSDefaultEndpoint is the default API endpoint of ECS services + ECSDefaultEndpoint = "https://ecs-cn-hangzhou.aliyuncs.com" + ECSAPIVersion = "2014-05-26" + + ECSServiceCode = "ecs" + + VPCDefaultEndpoint = "https://vpc.aliyuncs.com" + VPCAPIVersion = "2016-04-28" + VPCServiceCode = "vpc" +) + +// NewClient creates a new instance of ECS client +func NewClient(accessKeyId, accessKeySecret string) *Client { + endpoint := os.Getenv("ECS_ENDPOINT") + if endpoint == "" { + endpoint = ECSDefaultEndpoint + } + return NewClientWithEndpoint(endpoint, accessKeyId, accessKeySecret) +} + +func NewECSClient(accessKeyId, accessKeySecret string, regionID common.Region) *Client { + endpoint := os.Getenv("ECS_ENDPOINT") + if endpoint == "" { + endpoint = ECSDefaultEndpoint + } + + return NewClientWithRegion(endpoint, accessKeyId, accessKeySecret, regionID) +} + +func NewClientWithRegion(endpoint string, accessKeyId, accessKeySecret string, regionID common.Region) *Client { + client := &Client{} + client.NewInit(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret, ECSServiceCode, regionID) + return client +} + +func NewClientWithEndpoint(endpoint string, accessKeyId, accessKeySecret string) *Client { + client := &Client{} + client.Init(endpoint, ECSAPIVersion, accessKeyId, accessKeySecret) + return client +} + +func NewVPCClient(accessKeyId, accessKeySecret string, regionID common.Region) *Client { + endpoint := os.Getenv("VPC_ENDPOINT") + if endpoint == "" { + endpoint = VPCDefaultEndpoint + } + + return NewVPCClientWithRegion(endpoint, accessKeyId, accessKeySecret, regionID) +} + +func NewVPCClientWithRegion(endpoint string, accessKeyId, accessKeySecret string, regionID common.Region) *Client { + client := &Client{} + client.NewInit(endpoint, VPCAPIVersion, accessKeyId, accessKeySecret, VPCServiceCode, regionID) + return client +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/disks.go b/vendor/github.com/denverdino/aliyungo/ecs/disks.go new file mode 100644 index 000000000000..2cfbffc379fe --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/disks.go @@ -0,0 +1,340 @@ +package ecs + +import ( + "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +// Types of disks +type DiskType string + +const ( + DiskTypeAll = DiskType("all") //Default + DiskTypeAllSystem = DiskType("system") + DiskTypeAllData = DiskType("data") +) + +// Categories of disks +type DiskCategory string + +const ( + DiskCategoryAll = DiskCategory("all") //Default + DiskCategoryCloud = DiskCategory("cloud") + DiskCategoryEphemeral = DiskCategory("ephemeral") + DiskCategoryEphemeralSSD = DiskCategory("ephemeral_ssd") + DiskCategoryCloudEfficiency = DiskCategory("cloud_efficiency") + DiskCategoryCloudSSD = DiskCategory("cloud_ssd") +) + +// Status of disks +type DiskStatus string + +const ( + DiskStatusInUse = DiskStatus("In_use") + DiskStatusAvailable = DiskStatus("Available") + DiskStatusAttaching = DiskStatus("Attaching") + DiskStatusDetaching = DiskStatus("Detaching") + DiskStatusCreating = DiskStatus("Creating") + DiskStatusReIniting = DiskStatus("ReIniting") + DiskStatusAll = DiskStatus("All") //Default +) + +// Charge type of disks +type DiskChargeType string + +const ( + PrePaid = DiskChargeType("PrePaid") + PostPaid = DiskChargeType("PostPaid") +) + +// A DescribeDisksArgs defines the arguments to describe disks +type DescribeDisksArgs struct { + RegionId common.Region + ZoneId string + DiskIds []string + InstanceId string + DiskType DiskType //enum for all(default) | system | data + Category DiskCategory //enum for all(default) | cloud | ephemeral + Status DiskStatus //enum for In_use | Available | Attaching | Detaching | Creating | ReIniting | All(default) + SnapshotId string + Name string + Portable *bool //optional + DeleteWithInstance *bool //optional + DeleteAutoSnapshot *bool //optional + EnableAutoSnapshot *bool //optional + DiskChargeType DiskChargeType + Tag map[string]string + common.Pagination +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&diskitemtype +type DiskItemType struct { + DiskId string + RegionId common.Region + ZoneId string + DiskName string + Description string + Type DiskType + Category DiskCategory + Size int + ImageId string + SourceSnapshotId string + ProductCode string + Portable bool + Status DiskStatus + OperationLocks OperationLocksType + InstanceId string + Device string + DeleteWithInstance bool + DeleteAutoSnapshot bool + EnableAutoSnapshot bool + CreationTime util.ISO6801Time + AttachedTime util.ISO6801Time + DetachedTime util.ISO6801Time + DiskChargeType DiskChargeType +} + +type DescribeDisksResponse struct { + common.Response + common.PaginationResult + RegionId common.Region + Disks struct { + Disk []DiskItemType + } +} + +// DescribeDisks describes Disks +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/disk&describedisks +func (client *Client) DescribeDisks(args *DescribeDisksArgs) (disks []DiskItemType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeDisksWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.Disks.Disk, &response.PaginationResult, err +} + +func (client *Client) DescribeDisksWithRaw(args *DescribeDisksArgs) (response *DescribeDisksResponse, err error) { + response = &DescribeDisksResponse{} + + err = client.Invoke("DescribeDisks", args, response) + + if err != nil { + return nil, err + } + + return response, err +} + +type CreateDiskArgs struct { + RegionId common.Region + ZoneId string + DiskName string + Description string + Encrypted bool + DiskCategory DiskCategory + Size int + SnapshotId string + ClientToken string +} + +type CreateDisksResponse struct { + common.Response + DiskId string +} + +// CreateDisk creates a new disk +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/disk&createdisk +func (client *Client) CreateDisk(args *CreateDiskArgs) (diskId string, err error) { + response := CreateDisksResponse{} + err = client.Invoke("CreateDisk", args, &response) + if err != nil { + return "", err + } + return response.DiskId, err +} + +type DeleteDiskArgs struct { + DiskId string +} + +type DeleteDiskResponse struct { + common.Response +} + +// DeleteDisk deletes disk +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/disk&deletedisk +func (client *Client) DeleteDisk(diskId string) error { + args := DeleteDiskArgs{ + DiskId: diskId, + } + response := DeleteDiskResponse{} + err := client.Invoke("DeleteDisk", &args, &response) + return err +} + +type ReInitDiskArgs struct { + DiskId string +} + +type ReInitDiskResponse struct { + common.Response +} + +// ReInitDisk reinitizes disk +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/disk&reinitdisk +func (client *Client) ReInitDisk(diskId string) error { + args := ReInitDiskArgs{ + DiskId: diskId, + } + response := ReInitDiskResponse{} + err := client.Invoke("ReInitDisk", &args, &response) + return err +} + +type AttachDiskArgs struct { + InstanceId string + DiskId string + Device string + DeleteWithInstance bool +} + +type AttachDiskResponse struct { + common.Response +} + +// AttachDisk attaches disk to instance +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/disk&attachdisk +func (client *Client) AttachDisk(args *AttachDiskArgs) error { + response := AttachDiskResponse{} + err := client.Invoke("AttachDisk", args, &response) + return err +} + +type DetachDiskArgs struct { + InstanceId string + DiskId string +} + +type DetachDiskResponse struct { + common.Response +} + +// DetachDisk detaches disk from instance +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/disk&detachdisk +func (client *Client) DetachDisk(instanceId string, diskId string) error { + args := DetachDiskArgs{ + InstanceId: instanceId, + DiskId: diskId, + } + response := DetachDiskResponse{} + err := client.Invoke("DetachDisk", &args, &response) + return err +} + +type ResetDiskArgs struct { + DiskId string + SnapshotId string +} + +type ResetDiskResponse struct { + common.Response +} + +// ResetDisk resets disk to original status +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/disk&resetdisk +func (client *Client) ResetDisk(diskId string, snapshotId string) error { + args := ResetDiskArgs{ + SnapshotId: snapshotId, + DiskId: diskId, + } + response := ResetDiskResponse{} + err := client.Invoke("ResetDisk", &args, &response) + return err +} + +type ModifyDiskAttributeArgs struct { + DiskId string + DiskName string + Description string + DeleteWithInstance *bool + DeleteAutoSnapshot *bool + EnableAutoSnapshot *bool +} + +type ModifyDiskAttributeResponse struct { + common.Response +} + +// ModifyDiskAttribute modifies disk attribute +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/disk&modifydiskattribute +func (client *Client) ModifyDiskAttribute(args *ModifyDiskAttributeArgs) error { + response := ModifyDiskAttributeResponse{} + err := client.Invoke("ModifyDiskAttribute", args, &response) + return err +} + +type ReplaceSystemDiskArgs struct { + InstanceId string + ImageId string + SystemDisk SystemDiskType + ClientToken string +} + +type ReplaceSystemDiskResponse struct { + common.Response + DiskId string +} + +// ReplaceSystemDisk replace system disk +// +// You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/disk/replacesystemdisk.html +func (client *Client) ReplaceSystemDisk(args *ReplaceSystemDiskArgs) (diskId string, err error) { + response := ReplaceSystemDiskResponse{} + err = client.Invoke("ReplaceSystemDisk", args, &response) + if err != nil { + return "", err + } + return response.DiskId, nil +} + +// WaitForDisk waits for disk to given status +func (client *Client) WaitForDisk(regionId common.Region, diskId string, status DiskStatus, timeout int) error { + if timeout <= 0 { + timeout = DefaultTimeout + } + args := DescribeDisksArgs{ + RegionId: regionId, + DiskIds: []string{diskId}, + } + + for { + disks, _, err := client.DescribeDisks(&args) + if err != nil { + return err + } + if disks == nil || len(disks) == 0 { + return common.GetClientErrorFromString("Not found") + } + if disks[0].Status == status { + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + } + return nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go b/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go new file mode 100644 index 000000000000..ad716a1a54bd --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/forward_entry.go @@ -0,0 +1,112 @@ +package ecs + +import "github.com/denverdino/aliyungo/common" + +type CreateForwardEntryArgs struct { + RegionId common.Region + ForwardTableId string + ExternalIp string + ExternalPort string + IpProtocol string + InternalIp string + InternalPort string +} + +type CreateForwardEntryResponse struct { + common.Response + ForwardEntryId string +} + +type DescribeForwardTableEntriesArgs struct { + RegionId common.Region + ForwardTableId string + common.Pagination +} + +type ForwardTableEntrySetType struct { + RegionId common.Region + ExternalIp string + ExternalPort string + ForwardEntryId string + ForwardTableId string + InternalIp string + InternalPort string + IpProtocol string + Status string +} + +type DescribeForwardTableEntriesResponse struct { + common.Response + common.PaginationResult + ForwardTableEntries struct { + ForwardTableEntry []ForwardTableEntrySetType + } +} + +type ModifyForwardEntryArgs struct { + RegionId common.Region + ForwardTableId string + ForwardEntryId string + ExternalIp string + IpProtocol string + ExternalPort string + InternalIp string + InternalPort string +} + +type ModifyForwardEntryResponse struct { + common.Response +} + +type DeleteForwardEntryArgs struct { + RegionId common.Region + ForwardTableId string + ForwardEntryId string +} + +type DeleteForwardEntryResponse struct { + common.Response +} + +func (client *Client) CreateForwardEntry(args *CreateForwardEntryArgs) (resp *CreateForwardEntryResponse, err error) { + response := CreateForwardEntryResponse{} + err = client.Invoke("CreateForwardEntry", args, &response) + if err != nil { + return nil, err + } + return &response, err +} + +func (client *Client) DescribeForwardTableEntries(args *DescribeForwardTableEntriesArgs) (forwardTableEntries []ForwardTableEntrySetType, + pagination *common.PaginationResult, err error) { + response, err := client.DescribeForwardTableEntriesWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.ForwardTableEntries.ForwardTableEntry, &response.PaginationResult, nil +} + +func (client *Client) DescribeForwardTableEntriesWithRaw(args *DescribeForwardTableEntriesArgs) (response *DescribeForwardTableEntriesResponse, err error) { + args.Validate() + response = &DescribeForwardTableEntriesResponse{} + + err = client.Invoke("DescribeForwardTableEntries", args, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (client *Client) ModifyForwardEntry(args *ModifyForwardEntryArgs) error { + response := ModifyForwardEntryResponse{} + return client.Invoke("ModifyForwardEntry", args, &response) +} + +func (client *Client) DeleteForwardEntry(args *DeleteForwardEntryArgs) error { + response := DeleteForwardEntryResponse{} + err := client.Invoke("DeleteForwardEntry", args, &response) + return err +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/images.go b/vendor/github.com/denverdino/aliyungo/ecs/images.go new file mode 100644 index 000000000000..54fe86defe9f --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/images.go @@ -0,0 +1,323 @@ +package ecs + +import ( + "net/url" + "strconv" + "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +// ImageOwnerAlias represents image owner +type ImageOwnerAlias string + +// Constants of image owner +const ( + ImageOwnerSystem = ImageOwnerAlias("system") + ImageOwnerSelf = ImageOwnerAlias("self") + ImageOwnerOthers = ImageOwnerAlias("others") + ImageOwnerMarketplace = ImageOwnerAlias("marketplace") + ImageOwnerDefault = ImageOwnerAlias("") //Return the values for system, self, and others +) + +type ImageStatus string + +const ( + ImageStatusAvailable = ImageStatus("Available") + ImageStatusUnAvailable = ImageStatus("UnAvailable") + ImageStatusCreating = ImageStatus("Creating") + ImageStatusCreateFailed = ImageStatus("CreateFailed") +) + +type ImageUsage string + +const ( + ImageUsageInstance = ImageUsage("instance") + ImageUsageNone = ImageUsage("none") +) + +// DescribeImagesArgs repsents arguments to describe images +type DescribeImagesArgs struct { + RegionId common.Region + ImageId string + SnapshotId string + ImageName string + Status ImageStatus + ImageOwnerAlias ImageOwnerAlias + common.Pagination +} + +type DescribeImagesResponse struct { + common.Response + common.PaginationResult + + RegionId common.Region + Images struct { + Image []ImageType + } +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&diskdevicemapping +type DiskDeviceMapping struct { + SnapshotId string + //Why Size Field is string-type. + Size string + Device string + //For import images + Format string + OSSBucket string + OSSObject string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&imagetype +type ImageType struct { + ImageId string + ImageVersion string + Architecture string + ImageName string + Description string + Size int + ImageOwnerAlias string + OSName string + OSType string + Platform string + DiskDeviceMappings struct { + DiskDeviceMapping []DiskDeviceMapping + } + ProductCode string + IsSubscribed bool + IsSelfShared string + IsCopied bool + IsSupportIoOptimized bool + Progress string + Usage ImageUsage + Status ImageStatus + CreationTime util.ISO6801Time +} + +// DescribeImages describes images +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/image&describeimages +func (client *Client) DescribeImages(args *DescribeImagesArgs) (images []ImageType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeImagesWithRaw(args) + if err != nil { + return nil, nil, err + } + return response.Images.Image, &response.PaginationResult, nil +} + +func (client *Client) DescribeImagesWithRaw(args *DescribeImagesArgs) (response *DescribeImagesResponse, err error) { + args.Validate() + response = &DescribeImagesResponse{} + err = client.Invoke("DescribeImages", args, response) + if err != nil { + return nil, err + } + return response, nil +} + +// CreateImageArgs repsents arguments to create image +type CreateImageArgs struct { + RegionId common.Region + SnapshotId string + InstanceId string + ImageName string + ImageVersion string + Description string + ClientToken string +} + +type CreateImageResponse struct { + common.Response + + ImageId string +} + +// CreateImage creates a new image +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/image&createimage +func (client *Client) CreateImage(args *CreateImageArgs) (imageId string, err error) { + response := &CreateImageResponse{} + err = client.Invoke("CreateImage", args, &response) + if err != nil { + return "", err + } + return response.ImageId, nil +} + +type DeleteImageArgs struct { + RegionId common.Region + ImageId string +} + +type DeleteImageResponse struct { + common.Response +} + +// DeleteImage deletes Image +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/image&deleteimage +func (client *Client) DeleteImage(regionId common.Region, imageId string) error { + args := DeleteImageArgs{ + RegionId: regionId, + ImageId: imageId, + } + + response := &DeleteImageResponse{} + return client.Invoke("DeleteImage", &args, &response) +} + +// ModifyImageSharePermission repsents arguments to share image +type ModifyImageSharePermissionArgs struct { + RegionId common.Region + ImageId string + AddAccount []string + RemoveAccount []string +} + +// You can read doc at http://help.aliyun.com/document_detail/ecs/open-api/image/modifyimagesharepermission.html +func (client *Client) ModifyImageSharePermission(args *ModifyImageSharePermissionArgs) error { + req := url.Values{} + req.Add("RegionId", string(args.RegionId)) + req.Add("ImageId", args.ImageId) + + for i, item := range args.AddAccount { + req.Add("AddAccount."+strconv.Itoa(i+1), item) + } + for i, item := range args.RemoveAccount { + req.Add("RemoveAccount."+strconv.Itoa(i+1), item) + } + + return client.Invoke("ModifyImageSharePermission", req, &common.Response{}) +} + +type AccountType struct { + AliyunId string +} +type ImageSharePermissionResponse struct { + common.Response + ImageId string + RegionId string + Accounts struct { + Account []AccountType + } + TotalCount int + PageNumber int + PageSize int +} + +func (client *Client) DescribeImageSharePermission(args *ModifyImageSharePermissionArgs) (*ImageSharePermissionResponse, error) { + response := ImageSharePermissionResponse{} + err := client.Invoke("DescribeImageSharePermission", args, &response) + return &response, err +} + +type CopyImageArgs struct { + RegionId common.Region + ImageId string + DestinationRegionId common.Region + DestinationImageName string + DestinationDescription string + ClientToken string +} + +type CopyImageResponse struct { + common.Response + ImageId string +} + +// You can read doc at https://help.aliyun.com/document_detail/25538.html +func (client *Client) CopyImage(args *CopyImageArgs) (string, error) { + response := &CopyImageResponse{} + err := client.Invoke("CopyImage", args, &response) + if err != nil { + return "", err + } + return response.ImageId, nil +} + +// ImportImageArgs repsents arguments to import image from oss +type ImportImageArgs struct { + RegionId common.Region + ImageName string + ImageVersion string + Description string + ClientToken string + Architecture string + OSType string + Platform string + DiskDeviceMappings struct { + DiskDeviceMapping []DiskDeviceMapping + } +} + +func (client *Client) ImportImage(args *ImportImageArgs) (string, error) { + response := &CopyImageResponse{} + err := client.Invoke("ImportImage", args, &response) + if err != nil { + return "", err + } + return response.ImageId, nil +} + +type ImportImageResponse struct { + common.Response + RegionId common.Region + ImageId string + ImportTaskId string +} + +// Default timeout value for WaitForImageReady method +const ImageDefaultTimeout = 120 + +//Wait Image ready +func (client *Client) WaitForImageReady(regionId common.Region, imageId string, timeout int) error { + if timeout <= 0 { + timeout = ImageDefaultTimeout + } + for { + args := DescribeImagesArgs{ + RegionId: regionId, + ImageId: imageId, + Status: ImageStatusCreating, + } + + images, _, err := client.DescribeImages(&args) + if err != nil { + return err + } + if images == nil || len(images) == 0 { + args.Status = ImageStatusAvailable + images, _, er := client.DescribeImages(&args) + if er == nil && len(images) == 1 { + break + } else { + return common.GetClientErrorFromString("Not found") + } + } + if images[0].Progress == "100%" { + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + } + return nil +} + +type CancelCopyImageRequest struct { + regionId common.Region + ImageId string +} + +// You can read doc at https://help.aliyun.com/document_detail/25539.html +func (client *Client) CancelCopyImage(regionId common.Region, imageId string) error { + response := &common.Response{} + err := client.Invoke("CancelCopyImage", &CancelCopyImageRequest{regionId, imageId}, &response) + return err +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/instance_types.go b/vendor/github.com/denverdino/aliyungo/ecs/instance_types.go new file mode 100644 index 000000000000..a133806f1c35 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/instance_types.go @@ -0,0 +1,82 @@ +package ecs + +import "github.com/denverdino/aliyungo/common" + +type DescribeInstanceTypesArgs struct { + InstanceTypeFamily string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instancetypeitemtype +type InstanceTypeItemType struct { + InstanceTypeId string + CpuCoreCount int + MemorySize float64 + InstanceTypeFamily string +} + +type DescribeInstanceTypesResponse struct { + common.Response + InstanceTypes struct { + InstanceType []InstanceTypeItemType + } +} + +// DescribeInstanceTypes describes all instance types +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/other&describeinstancetypes +func (client *Client) DescribeInstanceTypes() (instanceTypes []InstanceTypeItemType, err error) { + response := DescribeInstanceTypesResponse{} + + err = client.Invoke("DescribeInstanceTypes", &DescribeInstanceTypesArgs{}, &response) + + if err != nil { + return []InstanceTypeItemType{}, err + } + return response.InstanceTypes.InstanceType, nil + +} + +// support user args +func (client *Client) DescribeInstanceTypesNew(args *DescribeInstanceTypesArgs) (instanceTypes []InstanceTypeItemType, err error) { + response := DescribeInstanceTypesResponse{} + + err = client.Invoke("DescribeInstanceTypes", args, &response) + + if err != nil { + return []InstanceTypeItemType{}, err + } + return response.InstanceTypes.InstanceType, nil + +} + +type DescribeInstanceTypeFamiliesArgs struct { + RegionId common.Region + Generation string +} + +type InstanceTypeFamilies struct { + InstanceTypeFamily []InstanceTypeFamily +} + +type InstanceTypeFamily struct { + InstanceTypeFamilyId string + Generation string +} + +type DescribeInstanceTypeFamiliesResponse struct { + common.Response + + InstanceTypeFamilies InstanceTypeFamilies +} + +func (client *Client) DescribeInstanceTypeFamilies(args *DescribeInstanceTypeFamiliesArgs) (*DescribeInstanceTypeFamiliesResponse, error) { + response := &DescribeInstanceTypeFamiliesResponse{} + + err := client.Invoke("DescribeInstanceTypeFamilies", args, response) + if err != nil { + return nil, err + } + + return response, nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/instances.go b/vendor/github.com/denverdino/aliyungo/ecs/instances.go new file mode 100644 index 000000000000..a10bcc053571 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/instances.go @@ -0,0 +1,681 @@ +package ecs + +import ( + "encoding/base64" + "encoding/json" + "strconv" + "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +// InstanceStatus represents instance status +type InstanceStatus string + +// Constants of InstanceStatus +const ( + Creating = InstanceStatus("Creating") // For backward compatibility + Pending = InstanceStatus("Pending") + Running = InstanceStatus("Running") + Starting = InstanceStatus("Starting") + + Stopped = InstanceStatus("Stopped") + Stopping = InstanceStatus("Stopping") + Deleted = InstanceStatus("Deleted") +) + +type LockReason string + +const ( + LockReasonFinancial = LockReason("financial") + LockReasonSecurity = LockReason("security") +) + +type LockReasonType struct { + LockReason LockReason +} + +type DescribeUserdataArgs struct { + RegionId common.Region + InstanceId string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instancestatusitemtype +type DescribeUserdataItemType struct { + UserData string + InstanceId string + RegionId string +} + +type DescribeUserdataResponse struct { + common.Response + DescribeUserdataItemType +} + +// DescribeInstanceStatus describes instance status +// +// You can read doc at https://intl.aliyun.com/help/doc-detail/49227.htm +func (client *Client) DescribeUserdata(args *DescribeUserdataArgs) (userData *DescribeUserdataItemType, err error) { + response := DescribeUserdataResponse{} + + err = client.Invoke("DescribeUserdata", args, &response) + + if err == nil { + return &response.DescribeUserdataItemType, nil + } + + return nil, err +} + +type DescribeInstanceStatusArgs struct { + RegionId common.Region + ZoneId string + common.Pagination +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instancestatusitemtype +type InstanceStatusItemType struct { + InstanceId string + Status InstanceStatus +} + +type DescribeInstanceStatusResponse struct { + common.Response + common.PaginationResult + InstanceStatuses struct { + InstanceStatus []InstanceStatusItemType + } +} + +// DescribeInstanceStatus describes instance status +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstancestatus +func (client *Client) DescribeInstanceStatus(args *DescribeInstanceStatusArgs) (instanceStatuses []InstanceStatusItemType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeInstanceStatusWithRaw(args) + + if err == nil { + return response.InstanceStatuses.InstanceStatus, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeInstanceStatusWithRaw(args *DescribeInstanceStatusArgs) (response *DescribeInstanceStatusResponse, err error) { + args.Validate() + response = &DescribeInstanceStatusResponse{} + + err = client.Invoke("DescribeInstanceStatus", args, response) + if err != nil { + return nil, err + } + + return response, nil +} + +type StopInstanceArgs struct { + InstanceId string + ForceStop bool +} + +type StopInstanceResponse struct { + common.Response +} + +// StopInstance stops instance +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&stopinstance +func (client *Client) StopInstance(instanceId string, forceStop bool) error { + args := StopInstanceArgs{ + InstanceId: instanceId, + ForceStop: forceStop, + } + response := StopInstanceResponse{} + err := client.Invoke("StopInstance", &args, &response) + return err +} + +type StartInstanceArgs struct { + InstanceId string +} + +type StartInstanceResponse struct { + common.Response +} + +// StartInstance starts instance +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&startinstance +func (client *Client) StartInstance(instanceId string) error { + args := StartInstanceArgs{InstanceId: instanceId} + response := StartInstanceResponse{} + err := client.Invoke("StartInstance", &args, &response) + return err +} + +type RebootInstanceArgs struct { + InstanceId string + ForceStop bool +} + +type RebootInstanceResponse struct { + common.Response +} + +// RebootInstance reboot instance +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&rebootinstance +func (client *Client) RebootInstance(instanceId string, forceStop bool) error { + request := RebootInstanceArgs{ + InstanceId: instanceId, + ForceStop: forceStop, + } + response := RebootInstanceResponse{} + err := client.Invoke("RebootInstance", &request, &response) + return err +} + +type DescribeInstanceAttributeArgs struct { + InstanceId string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&operationlockstype +type OperationLocksType struct { + LockReason []LockReasonType //enum for financial, security +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&securitygroupidsettype +type SecurityGroupIdSetType struct { + SecurityGroupId string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&ipaddresssettype +type IpAddressSetType struct { + IpAddress []string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&vpcattributestype +type VpcAttributesType struct { + VpcId string + VSwitchId string + PrivateIpAddress IpAddressSetType + NatIpAddress string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&eipaddressassociatetype +type EipAddressAssociateType struct { + AllocationId string + IpAddress string + Bandwidth int + InternetChargeType common.InternetChargeType +} + +// Experimental feature +type SpotStrategyType string + +// Constants of SpotStrategyType +const ( + NoSpot = SpotStrategyType("NoSpot") + SpotWithPriceLimit = SpotStrategyType("SpotWithPriceLimit") +) + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instanceattributestype +type InstanceAttributesType struct { + InstanceId string + InstanceName string + Description string + ImageId string + RegionId common.Region + ZoneId string + CPU int + Memory int + ClusterId string + InstanceType string + InstanceTypeFamily string + HostName string + SerialNumber string + Status InstanceStatus + OperationLocks OperationLocksType + SecurityGroupIds struct { + SecurityGroupId []string + } + PublicIpAddress IpAddressSetType + InnerIpAddress IpAddressSetType + InstanceNetworkType string //enum Classic | Vpc + InternetMaxBandwidthIn int + InternetMaxBandwidthOut int + InternetChargeType common.InternetChargeType + CreationTime util.ISO6801Time //time.Time + VpcAttributes VpcAttributesType + EipAddress EipAddressAssociateType + IoOptimized StringOrBool + InstanceChargeType common.InstanceChargeType + ExpiredTime util.ISO6801Time + Tags struct { + Tag []TagItemType + } + SpotStrategy SpotStrategyType + KeyPairName string +} + +type DescribeInstanceAttributeResponse struct { + common.Response + InstanceAttributesType +} + +// DescribeInstanceAttribute describes instance attribute +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstanceattribute +func (client *Client) DescribeInstanceAttribute(instanceId string) (instance *InstanceAttributesType, err error) { + args := DescribeInstanceAttributeArgs{InstanceId: instanceId} + + response := DescribeInstanceAttributeResponse{} + err = client.Invoke("DescribeInstanceAttribute", &args, &response) + if err != nil { + return nil, err + } + return &response.InstanceAttributesType, err +} + +type ModifyInstanceAttributeArgs struct { + InstanceId string + InstanceName string + Description string + Password string + HostName string + UserData string +} + +type ModifyInstanceAttributeResponse struct { + common.Response +} + +//ModifyInstanceAttribute modify instance attrbute +// +// You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/instance/modifyinstanceattribute.html +func (client *Client) ModifyInstanceAttribute(args *ModifyInstanceAttributeArgs) error { + response := ModifyInstanceAttributeResponse{} + err := client.Invoke("ModifyInstanceAttribute", args, &response) + return err +} + +// Default timeout value for WaitForInstance method +const InstanceDefaultTimeout = 120 + +// WaitForInstance waits for instance to given status +func (client *Client) WaitForInstance(instanceId string, status InstanceStatus, timeout int) error { + if timeout <= 0 { + timeout = InstanceDefaultTimeout + } + for { + instance, err := client.DescribeInstanceAttribute(instanceId) + if err != nil { + return err + } + if instance.Status == status { + //TODO + //Sleep one more time for timing issues + time.Sleep(DefaultWaitForInterval * time.Second) + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + + } + return nil +} + +// WaitForInstance waits for instance to given status +// when instance.NotFound wait until timeout +func (client *Client) WaitForInstanceAsyn(instanceId string, status InstanceStatus, timeout int) error { + if timeout <= 0 { + timeout = InstanceDefaultTimeout + } + for { + instance, err := client.DescribeInstanceAttribute(instanceId) + if err != nil { + e, _ := err.(*common.Error) + if e.Code != "InvalidInstanceId.NotFound" && e.Code != "Forbidden.InstanceNotFound" { + return err + } + } else if instance != nil && instance.Status == status { + //TODO + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + + } + return nil +} + +type DescribeInstanceVncUrlArgs struct { + RegionId common.Region + InstanceId string +} + +type DescribeInstanceVncUrlResponse struct { + common.Response + VncUrl string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstancevncurl +func (client *Client) DescribeInstanceVncUrl(args *DescribeInstanceVncUrlArgs) (string, error) { + response := DescribeInstanceVncUrlResponse{} + + err := client.Invoke("DescribeInstanceVncUrl", args, &response) + + if err == nil { + return response.VncUrl, nil + } + + return "", err +} + +type DescribeInstancesArgs struct { + RegionId common.Region + VpcId string + VSwitchId string + ZoneId string + InstanceIds string + InstanceNetworkType string + InstanceName string + Status InstanceStatus + PrivateIpAddresses string + InnerIpAddresses string + PublicIpAddresses string + SecurityGroupId string + Tag map[string]string + InstanceType string + SpotStrategy SpotStrategyType + common.Pagination +} + +type DescribeInstancesResponse struct { + common.Response + common.PaginationResult + Instances struct { + Instance []InstanceAttributesType + } +} + +// DescribeInstances describes instances +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&describeinstances +func (client *Client) DescribeInstances(args *DescribeInstancesArgs) (instances []InstanceAttributesType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeInstancesWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.Instances.Instance, &response.PaginationResult, nil +} + +func (client *Client) DescribeInstancesWithRaw(args *DescribeInstancesArgs) (response *DescribeInstancesResponse, err error) { + args.Validate() + response = &DescribeInstancesResponse{} + + err = client.Invoke("DescribeInstances", args, &response) + if err != nil { + return nil, err + } + + return response, nil +} + +type DeleteInstanceArgs struct { + InstanceId string +} + +type DeleteInstanceResponse struct { + common.Response +} + +// DeleteInstance deletes instance +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&deleteinstance +func (client *Client) DeleteInstance(instanceId string) error { + args := DeleteInstanceArgs{InstanceId: instanceId} + response := DeleteInstanceResponse{} + err := client.Invoke("DeleteInstance", &args, &response) + return err +} + +type DataDiskType struct { + Size int + Category DiskCategory //Enum cloud, ephemeral, ephemeral_ssd + SnapshotId string + DiskName string + Description string + Device string + DeleteWithInstance bool +} + +type SystemDiskType struct { + Size int + Category DiskCategory //Enum cloud, ephemeral, ephemeral_ssd + DiskName string + Description string +} + +type IoOptimized string + +type StringOrBool struct { + Value bool +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (io *StringOrBool) UnmarshalJSON(value []byte) error { + if value[0] == '"' { + var str string + err := json.Unmarshal(value, &str) + if err == nil { + io.Value = (str == "true" || str == "optimized") + } + return err + } + var boolVal bool + err := json.Unmarshal(value, &boolVal) + if err == nil { + io.Value = boolVal + } + return err +} + +func (io StringOrBool) Bool() bool { + return io.Value +} + +func (io StringOrBool) String() string { + return strconv.FormatBool(io.Value) +} + +var ( + IoOptimizedNone = IoOptimized("none") + IoOptimizedOptimized = IoOptimized("optimized") +) + +type CreateInstanceArgs struct { + RegionId common.Region + ZoneId string + ImageId string + InstanceType string + SecurityGroupId string + InstanceName string + Description string + InternetChargeType common.InternetChargeType + InternetMaxBandwidthIn int + InternetMaxBandwidthOut int + HostName string + Password string + IoOptimized IoOptimized + SystemDisk SystemDiskType + DataDisk []DataDiskType + VSwitchId string + PrivateIpAddress string + ClientToken string + InstanceChargeType common.InstanceChargeType + Period int + UserData string + AutoRenew bool + AutoRenewPeriod int + SpotStrategy SpotStrategyType + KeyPairName string + RamRoleName string +} + +type CreateInstanceResponse struct { + common.Response + InstanceId string +} + +// CreateInstance creates instance +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance +func (client *Client) CreateInstance(args *CreateInstanceArgs) (instanceId string, err error) { + if args.UserData != "" { + // Encode to base64 string + args.UserData = base64.StdEncoding.EncodeToString([]byte(args.UserData)) + } + response := CreateInstanceResponse{} + err = client.Invoke("CreateInstance", args, &response) + if err != nil { + return "", err + } + return response.InstanceId, err +} + +type RunInstanceArgs struct { + CreateInstanceArgs + MinAmount int + MaxAmount int + AutoReleaseTime string + NetworkType string + InnerIpAddress string + BusinessInfo string +} + +type RunInstanceResponse struct { + common.Response + InstanceIdSets InstanceIdSets +} + +type InstanceIdSets struct { + InstanceIdSet []string +} + +type BusinessInfo struct { + Pack string `json:"pack,omitempty"` + ActivityId string `json:"activityId,omitempty"` +} + +func (client *Client) RunInstances(args *RunInstanceArgs) (instanceIdSet []string, err error) { + if args.UserData != "" { + // Encode to base64 string + args.UserData = base64.StdEncoding.EncodeToString([]byte(args.UserData)) + } + response := RunInstanceResponse{} + err = client.Invoke("RunInstances", args, &response) + if err != nil { + return nil, err + } + return response.InstanceIdSets.InstanceIdSet, err +} + +type SecurityGroupArgs struct { + InstanceId string + SecurityGroupId string +} + +type SecurityGroupResponse struct { + common.Response +} + +//JoinSecurityGroup +// +//You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/instance/joinsecuritygroup.html +func (client *Client) JoinSecurityGroup(instanceId string, securityGroupId string) error { + args := SecurityGroupArgs{InstanceId: instanceId, SecurityGroupId: securityGroupId} + response := SecurityGroupResponse{} + err := client.Invoke("JoinSecurityGroup", &args, &response) + return err +} + +//LeaveSecurityGroup +// +//You can read doc at https://help.aliyun.com/document_detail/ecs/open-api/instance/leavesecuritygroup.html +func (client *Client) LeaveSecurityGroup(instanceId string, securityGroupId string) error { + args := SecurityGroupArgs{InstanceId: instanceId, SecurityGroupId: securityGroupId} + response := SecurityGroupResponse{} + err := client.Invoke("LeaveSecurityGroup", &args, &response) + return err +} + +type AttachInstancesArgs struct { + RegionId common.Region + RamRoleName string + InstanceIds string +} + +// AttachInstanceRamRole attach instances to ram role +// +// You can read doc at https://help.aliyun.com/document_detail/54244.html?spm=5176.doc54245.6.811.zEJcS5 +func (client *Client) AttachInstanceRamRole(args *AttachInstancesArgs) (err error) { + response := common.Response{} + err = client.Invoke("AttachInstanceRamRole", args, &response) + if err != nil { + return err + } + return nil +} + +// DetachInstanceRamRole detach instances from ram role +// +// You can read doc at https://help.aliyun.com/document_detail/54245.html?spm=5176.doc54243.6.813.bt8RB3 +func (client *Client) DetachInstanceRamRole(args *AttachInstancesArgs) (err error) { + response := common.Response{} + err = client.Invoke("DetachInstanceRamRole", args, &response) + if err != nil { + return err + } + return nil +} + +type DescribeInstanceRamRoleResponse struct { + common.Response + InstanceRamRoleSets struct{ + InstanceRamRoleSet []InstanceRamRoleSetType + } +} + +type InstanceRamRoleSetType struct { + InstanceId string + RamRoleName string +} + +// DescribeInstanceRamRole +// +// You can read doc at https://help.aliyun.com/document_detail/54243.html?spm=5176.doc54245.6.812.RgNCoi +func (client *Client) DescribeInstanceRamRole(args *AttachInstancesArgs) (resp *DescribeInstanceRamRoleResponse, err error) { + response := &DescribeInstanceRamRoleResponse{} + err = client.Invoke("DescribeInstanceRamRole", args, response) + if err != nil { + return response, err + } + return response, nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/monitoring.go b/vendor/github.com/denverdino/aliyungo/ecs/monitoring.go new file mode 100644 index 000000000000..6123d28b7174 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/monitoring.go @@ -0,0 +1,136 @@ +package ecs + +import ( + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +type DescribeInstanceMonitorDataArgs struct { + InstanceId string + StartTime util.ISO6801Time + EndTime util.ISO6801Time + Period int //Default 60s +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&instancemonitordatatype +type InstanceMonitorDataType struct { + InstanceId string + CPU int + IntranetRX int + IntranetTX int + IntranetBandwidth int + InternetRX int + InternetTX int + InternetBandwidth int + IOPSRead int + IOPSWrite int + BPSRead int + BPSWrite int + TimeStamp util.ISO6801Time +} + +type DescribeInstanceMonitorDataResponse struct { + common.Response + MonitorData struct { + InstanceMonitorData []InstanceMonitorDataType + } +} + +// DescribeInstanceMonitorData describes instance monitoring data +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/monitor&describeinstancemonitordata +func (client *Client) DescribeInstanceMonitorData(args *DescribeInstanceMonitorDataArgs) (monitorData []InstanceMonitorDataType, err error) { + if args.Period == 0 { + args.Period = 60 + } + response := DescribeInstanceMonitorDataResponse{} + err = client.Invoke("DescribeInstanceMonitorData", args, &response) + if err != nil { + return nil, err + } + return response.MonitorData.InstanceMonitorData, err +} + +type DescribeEipMonitorDataArgs struct { + AllocationId string + StartTime util.ISO6801Time + EndTime util.ISO6801Time + Period int //Default 60s +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&eipmonitordatatype +type EipMonitorDataType struct { + EipRX int + EipTX int + EipFlow int + EipBandwidth int + EipPackets int + TimeStamp util.ISO6801Time +} + +type DescribeEipMonitorDataResponse struct { + common.Response + EipMonitorDatas struct { + EipMonitorData []EipMonitorDataType + } +} + +// DescribeEipMonitorData describes EIP monitoring data +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/monitor&describeeipmonitordata +func (client *Client) DescribeEipMonitorData(args *DescribeEipMonitorDataArgs) (monitorData []EipMonitorDataType, err error) { + if args.Period == 0 { + args.Period = 60 + } + response := DescribeEipMonitorDataResponse{} + err = client.Invoke("DescribeEipMonitorData", args, &response) + if err != nil { + return nil, err + } + return response.EipMonitorDatas.EipMonitorData, err +} + +type DescribeDiskMonitorDataArgs struct { + DiskId string + StartTime util.ISO6801Time + EndTime util.ISO6801Time + Period int //Default 60s +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&diskmonitordatatype +type DiskMonitorDataType struct { + DiskId string + IOPSRead int + IOPSWrite int + IOPSTotal int + BPSRead int + BPSWrite int + BPSTotal int + TimeStamp util.ISO6801Time +} + +type DescribeDiskMonitorDataResponse struct { + common.Response + TotalCount int + MonitorData struct { + DiskMonitorData []DiskMonitorDataType + } +} + +// DescribeDiskMonitorData describes disk monitoring data +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/monitor&describediskmonitordata +func (client *Client) DescribeDiskMonitorData(args *DescribeDiskMonitorDataArgs) (monitorData []DiskMonitorDataType, totalCount int, err error) { + if args.Period == 0 { + args.Period = 60 + } + response := DescribeDiskMonitorDataResponse{} + err = client.Invoke("DescribeDiskMonitorData", args, &response) + if err != nil { + return nil, 0, err + } + return response.MonitorData.DiskMonitorData, response.TotalCount, err +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/nat_gateway.go b/vendor/github.com/denverdino/aliyungo/ecs/nat_gateway.go new file mode 100644 index 000000000000..10e00638e0f2 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/nat_gateway.go @@ -0,0 +1,210 @@ +package ecs + +import ( + "github.com/denverdino/aliyungo/common" +) + +type BandwidthPackageType struct { + IpCount int + Bandwidth int + Zone string +} + +type CreateNatGatewayArgs struct { + RegionId common.Region + VpcId string + Spec string + BandwidthPackage []BandwidthPackageType + Name string + Description string + ClientToken string +} + +type ForwardTableIdType struct { + ForwardTableId []string +} + +type SnatTableIdType struct { + SnatTableId []string +} + +type BandwidthPackageIdType struct { + BandwidthPackageId []string +} + +type CreateNatGatewayResponse struct { + common.Response + NatGatewayId string + ForwardTableIds ForwardTableIdType + BandwidthPackageIds BandwidthPackageIdType +} + +// CreateNatGateway creates Virtual Private Cloud +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vpc&createvpc +func (client *Client) CreateNatGateway(args *CreateNatGatewayArgs) (resp *CreateNatGatewayResponse, err error) { + response := CreateNatGatewayResponse{} + err = client.Invoke("CreateNatGateway", args, &response) + if err != nil { + return nil, err + } + return &response, err +} + +type NatGatewaySetType struct { + BusinessStatus string + Description string + BandwidthPackageIds BandwidthPackageIdType + ForwardTableIds ForwardTableIdType + SnatTableIds SnatTableIdType + InstanceChargeType string + Name string + NatGatewayId string + RegionId common.Region + Spec string + Status string + VpcId string +} + +type DescribeNatGatewayResponse struct { + common.Response + common.PaginationResult + NatGateways struct { + NatGateway []NatGatewaySetType + } +} + +type DescribeNatGatewaysArgs struct { + RegionId common.Region + NatGatewayId string + VpcId string + common.Pagination +} + +func (client *Client) DescribeNatGateways(args *DescribeNatGatewaysArgs) (natGateways []NatGatewaySetType, + pagination *common.PaginationResult, err error) { + response, err := client.DescribeNatGatewaysWithRaw(args) + if err == nil { + return response.NatGateways.NatGateway, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeNatGatewaysWithRaw(args *DescribeNatGatewaysArgs) (response *DescribeNatGatewayResponse, err error) { + args.Validate() + response = &DescribeNatGatewayResponse{} + + err = client.Invoke("DescribeNatGateways", args, response) + + if err == nil { + return response, nil + } + + return nil, err +} + +type ModifyNatGatewayAttributeArgs struct { + RegionId common.Region + NatGatewayId string + Name string + Description string +} + +type ModifyNatGatewayAttributeResponse struct { + common.Response +} + +func (client *Client) ModifyNatGatewayAttribute(args *ModifyNatGatewayAttributeArgs) error { + response := ModifyNatGatewayAttributeResponse{} + return client.Invoke("ModifyNatGatewayAttribute", args, &response) +} + +type ModifyNatGatewaySpecArgs struct { + RegionId common.Region + NatGatewayId string + Spec NatGatewaySpec +} + +func (client *Client) ModifyNatGatewaySpec(args *ModifyNatGatewaySpecArgs) error { + response := ModifyNatGatewayAttributeResponse{} + return client.Invoke("ModifyNatGatewaySpec", args, &response) +} + +type DeleteNatGatewayArgs struct { + RegionId common.Region + NatGatewayId string +} + +type DeleteNatGatewayResponse struct { + common.Response +} + +func (client *Client) DeleteNatGateway(args *DeleteNatGatewayArgs) error { + response := DeleteNatGatewayResponse{} + err := client.Invoke("DeleteNatGateway", args, &response) + return err +} + +type DescribeBandwidthPackagesArgs struct { + RegionId common.Region + BandwidthPackageId string + NatGatewayId string +} + +type PublicIpAddresseType struct { + AllocationId string + IpAddress string +} + +type DescribeBandwidthPackageType struct { + Bandwidth string + BandwidthPackageId string + IpCount string + PublicIpAddresses struct { + PublicIpAddresse []PublicIpAddresseType + } + + ZoneId string +} + +type DescribeBandwidthPackagesResponse struct { + common.Response + BandwidthPackages struct { + BandwidthPackage []DescribeBandwidthPackageType + } +} + +func (client *Client) DescribeBandwidthPackages(args *DescribeBandwidthPackagesArgs) ([]DescribeBandwidthPackageType, error) { + response := &DescribeBandwidthPackagesResponse{} + + err := client.Invoke("DescribeBandwidthPackages", args, response) + if err != nil { + return nil, err + } + + return response.BandwidthPackages.BandwidthPackage, err +} + +type DeleteBandwidthPackageArgs struct { + RegionId common.Region + BandwidthPackageId string +} + +type DeleteBandwidthPackageResponse struct { + common.Response +} + +func (client *Client) DeleteBandwidthPackage(args *DeleteBandwidthPackageArgs) error { + response := DeleteBandwidthPackageResponse{} + err := client.Invoke("DeleteBandwidthPackage", args, &response) + return err +} + +type NatGatewaySpec string + +const ( + NatGatewaySmallSpec = NatGatewaySpec("Small") + NatGatewayMiddleSpec = NatGatewaySpec("Middle") + NatGatewayLargeSpec = NatGatewaySpec("Large") +) diff --git a/vendor/github.com/denverdino/aliyungo/ecs/networks.go b/vendor/github.com/denverdino/aliyungo/ecs/networks.go new file mode 100644 index 000000000000..4c4cd7150de4 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/networks.go @@ -0,0 +1,258 @@ +// API on Network + +package ecs + +import ( + "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +type AllocatePublicIpAddressArgs struct { + InstanceId string +} + +type AllocatePublicIpAddressResponse struct { + common.Response + + IpAddress string +} + +// AllocatePublicIpAddress allocates Public Ip Address +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/network&allocatepublicipaddress +func (client *Client) AllocatePublicIpAddress(instanceId string) (ipAddress string, err error) { + args := AllocatePublicIpAddressArgs{ + InstanceId: instanceId, + } + response := AllocatePublicIpAddressResponse{} + err = client.Invoke("AllocatePublicIpAddress", &args, &response) + if err != nil { + return "", err + } + return response.IpAddress, nil +} + +type ModifyInstanceNetworkSpec struct { + InstanceId string + InternetMaxBandwidthOut int + InternetMaxBandwidthIn int +} + +type ModifyInstanceNetworkSpecResponse struct { + common.Response +} + +// ModifyInstanceNetworkSpec modifies instance network spec +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/network&modifyinstancenetworkspec +func (client *Client) ModifyInstanceNetworkSpec(args *ModifyInstanceNetworkSpec) error { + + response := ModifyInstanceNetworkSpecResponse{} + return client.Invoke("ModifyInstanceNetworkSpec", args, &response) +} + +type AllocateEipAddressArgs struct { + RegionId common.Region + Bandwidth int + InternetChargeType common.InternetChargeType + ClientToken string +} + +type AllocateEipAddressResponse struct { + common.Response + EipAddress string + AllocationId string +} + +// AllocateEipAddress allocates Eip Address +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/network&allocateeipaddress +func (client *Client) AllocateEipAddress(args *AllocateEipAddressArgs) (EipAddress string, AllocationId string, err error) { + if args.Bandwidth == 0 { + args.Bandwidth = 5 + } + response := AllocateEipAddressResponse{} + err = client.Invoke("AllocateEipAddress", args, &response) + if err != nil { + return "", "", err + } + return response.EipAddress, response.AllocationId, nil +} + +type AssociateEipAddressArgs struct { + AllocationId string + InstanceId string +} + +type AssociateEipAddressResponse struct { + common.Response +} + +// AssociateEipAddress associates EIP address to VM instance +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/network&associateeipaddress +func (client *Client) AssociateEipAddress(allocationId string, instanceId string) error { + args := AssociateEipAddressArgs{ + AllocationId: allocationId, + InstanceId: instanceId, + } + response := ModifyInstanceNetworkSpecResponse{} + return client.Invoke("AssociateEipAddress", &args, &response) +} + +// Status of disks +type EipStatus string + +const ( + EipStatusAssociating = EipStatus("Associating") + EipStatusUnassociating = EipStatus("Unassociating") + EipStatusInUse = EipStatus("InUse") + EipStatusAvailable = EipStatus("Available") +) + +type DescribeEipAddressesArgs struct { + RegionId common.Region + Status EipStatus //enum Associating | Unassociating | InUse | Available + EipAddress string + AllocationId string + common.Pagination +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&eipaddresssettype +type EipAddressSetType struct { + RegionId common.Region + IpAddress string + AllocationId string + Status EipStatus + InstanceId string + Bandwidth string // Why string + InternetChargeType common.InternetChargeType + OperationLocks OperationLocksType + AllocationTime util.ISO6801Time +} + +type DescribeEipAddressesResponse struct { + common.Response + common.PaginationResult + EipAddresses struct { + EipAddress []EipAddressSetType + } +} + +// DescribeInstanceStatus describes instance status +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/network&describeeipaddresses +func (client *Client) DescribeEipAddresses(args *DescribeEipAddressesArgs) (eipAddresses []EipAddressSetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeEipAddressesWithRaw(args) + if err == nil { + return response.EipAddresses.EipAddress, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeEipAddressesWithRaw(args *DescribeEipAddressesArgs) (response *DescribeEipAddressesResponse, err error) { + args.Validate() + response = &DescribeEipAddressesResponse{} + + err = client.Invoke("DescribeEipAddresses", args, response) + + if err == nil { + return response, nil + } + + return nil, err +} + +type ModifyEipAddressAttributeArgs struct { + AllocationId string + Bandwidth int +} + +type ModifyEipAddressAttributeResponse struct { + common.Response +} + +// ModifyEipAddressAttribute Modifies EIP attribute +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/network&modifyeipaddressattribute +func (client *Client) ModifyEipAddressAttribute(allocationId string, bandwidth int) error { + args := ModifyEipAddressAttributeArgs{ + AllocationId: allocationId, + Bandwidth: bandwidth, + } + response := ModifyEipAddressAttributeResponse{} + return client.Invoke("ModifyEipAddressAttribute", &args, &response) +} + +type UnallocateEipAddressArgs struct { + AllocationId string + InstanceId string +} + +type UnallocateEipAddressResponse struct { + common.Response +} + +// UnassociateEipAddress unallocates Eip Address from instance +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/network&unassociateeipaddress +func (client *Client) UnassociateEipAddress(allocationId string, instanceId string) error { + args := UnallocateEipAddressArgs{ + AllocationId: allocationId, + InstanceId: instanceId, + } + response := UnallocateEipAddressResponse{} + return client.Invoke("UnassociateEipAddress", &args, &response) +} + +type ReleaseEipAddressArgs struct { + AllocationId string +} + +type ReleaseEipAddressResponse struct { + common.Response +} + +// ReleaseEipAddress releases Eip address +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/network&releaseeipaddress +func (client *Client) ReleaseEipAddress(allocationId string) error { + args := ReleaseEipAddressArgs{ + AllocationId: allocationId, + } + response := ReleaseEipAddressResponse{} + return client.Invoke("ReleaseEipAddress", &args, &response) +} + +// WaitForVSwitchAvailable waits for VSwitch to given status +func (client *Client) WaitForEip(regionId common.Region, allocationId string, status EipStatus, timeout int) error { + if timeout <= 0 { + timeout = DefaultTimeout + } + args := DescribeEipAddressesArgs{ + RegionId: regionId, + AllocationId: allocationId, + } + for { + eips, _, err := client.DescribeEipAddresses(&args) + if err != nil { + return err + } + if len(eips) == 0 { + return common.GetClientErrorFromString("Not found") + } + if eips[0].Status == status { + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + } + return nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/regions.go b/vendor/github.com/denverdino/aliyungo/ecs/regions.go new file mode 100644 index 000000000000..d8ed72e8d36b --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/regions.go @@ -0,0 +1,34 @@ +package ecs + +import "github.com/denverdino/aliyungo/common" + +type DescribeRegionsArgs struct { +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype®iontype +type RegionType struct { + RegionId common.Region + LocalName string +} + +type DescribeRegionsResponse struct { + common.Response + Regions struct { + Region []RegionType + } +} + +// DescribeRegions describes regions +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/region&describeregions +func (client *Client) DescribeRegions() (regions []RegionType, err error) { + response := DescribeRegionsResponse{} + + err = client.Invoke("DescribeRegions", &DescribeRegionsArgs{}, &response) + + if err != nil { + return []RegionType{}, err + } + return response.Regions.Region, nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/route_tables.go b/vendor/github.com/denverdino/aliyungo/ecs/route_tables.go new file mode 100644 index 000000000000..cc85cb129ec8 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/route_tables.go @@ -0,0 +1,185 @@ +package ecs + +import ( + "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +type DescribeRouteTablesArgs struct { + VRouterId string + RouteTableId string + common.Pagination +} + +type RouteTableType string + +const ( + RouteTableSystem = RouteTableType("System") + RouteTableCustom = RouteTableType("Custom") +) + +type RouteEntryStatus string + +const ( + RouteEntryStatusPending = RouteEntryStatus("Pending") + RouteEntryStatusAvailable = RouteEntryStatus("Available") + RouteEntryStatusModifying = RouteEntryStatus("Modifying") +) + +type NextHopListType struct { + NextHopList struct { + NextHopItem []NextHopItemType + } +} + +type NextHopItemType struct { + NextHopType string + NextHopId string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&routeentrysettype +type RouteEntrySetType struct { + RouteTableId string + DestinationCidrBlock string + Type RouteTableType + NextHopType string + NextHopId string + NextHopList NextHopListType + InstanceId string + Status RouteEntryStatus // enum Pending | Available | Modifying +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&routetablesettype +type RouteTableSetType struct { + VRouterId string + RouteTableId string + RouteEntrys struct { + RouteEntry []RouteEntrySetType + } + RouteTableType RouteTableType + CreationTime util.ISO6801Time +} + +type DescribeRouteTablesResponse struct { + common.Response + common.PaginationResult + RouteTables struct { + RouteTable []RouteTableSetType + } +} + +// DescribeRouteTables describes Virtual Routers +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/routertable&describeroutetables +func (client *Client) DescribeRouteTables(args *DescribeRouteTablesArgs) (routeTables []RouteTableSetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeRouteTablesWithRaw(args) + if err == nil { + return response.RouteTables.RouteTable, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeRouteTablesWithRaw(args *DescribeRouteTablesArgs) (response *DescribeRouteTablesResponse, err error) { + args.Validate() + response = &DescribeRouteTablesResponse{} + + err = client.Invoke("DescribeRouteTables", args, &response) + + if err == nil { + return response, nil + } + + return nil, err +} + +type NextHopType string + +const ( + NextHopIntance = NextHopType("Instance") //Default + NextHopTunnel = NextHopType("Tunnel") +) + +type CreateRouteEntryArgs struct { + RouteTableId string + DestinationCidrBlock string + NextHopType NextHopType + NextHopId string + ClientToken string +} + +type CreateRouteEntryResponse struct { + common.Response +} + +// CreateRouteEntry creates route entry +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/routertable&createrouteentry +func (client *Client) CreateRouteEntry(args *CreateRouteEntryArgs) error { + response := CreateRouteEntryResponse{} + return client.Invoke("CreateRouteEntry", args, &response) +} + +type DeleteRouteEntryArgs struct { + RouteTableId string + DestinationCidrBlock string + NextHopId string +} + +type DeleteRouteEntryResponse struct { + common.Response +} + +// DeleteRouteEntry deletes route entry +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/routertable&deleterouteentry +func (client *Client) DeleteRouteEntry(args *DeleteRouteEntryArgs) error { + response := DeleteRouteEntryResponse{} + return client.Invoke("DeleteRouteEntry", args, &response) +} + +// WaitForAllRouteEntriesAvailable waits for all route entries to Available status +func (client *Client) WaitForAllRouteEntriesAvailable(vrouterId string, routeTableId string, timeout int) error { + if timeout <= 0 { + timeout = DefaultTimeout + } + args := DescribeRouteTablesArgs{ + VRouterId: vrouterId, + RouteTableId: routeTableId, + } + for { + + routeTables, _, err := client.DescribeRouteTables(&args) + + if err != nil { + return err + } + if len(routeTables) == 0 { + return common.GetClientErrorFromString("Not found") + } + success := true + + loop: + for _, routeTable := range routeTables { + for _, routeEntry := range routeTable.RouteEntrys.RouteEntry { + if routeEntry.Status != RouteEntryStatusAvailable { + success = false + break loop + } + } + } + if success { + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + } + return nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/router_interface.go b/vendor/github.com/denverdino/aliyungo/ecs/router_interface.go new file mode 100644 index 000000000000..62af67860236 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/router_interface.go @@ -0,0 +1,227 @@ +package ecs + +import ( + "github.com/denverdino/aliyungo/common" +) + +type EcsCommonResponse struct { + common.Response +} +type RouterType string +type InterfaceStatus string +type Role string +type Spec string + +const ( + VRouter = RouterType("VRouter") + VBR = RouterType("VBR") + + Idl = InterfaceStatus("Idl") + Active = InterfaceStatus("Active") + Inactive = InterfaceStatus("Inactive") + + InitiatingSide = Role("InitiatingSide") + AcceptingSide = Role("AcceptingSide") + + Small1 = Spec("Small.1") + Small2 = Spec("Small.2") + Small5 = Spec("Small.5") + Middle1 = Spec("Middle.1") + Middle2 = Spec("Middle.2") + Middle5 = Spec("Middle.5") + Large1 = Spec("Large.1") + Large2 = Spec("Large.2") +) + +type CreateRouterInterfaceArgs struct { + RegionId common.Region + OppositeRegionId common.Region + RouterType RouterType + OppositeRouterType RouterType + RouterId string + OppositeRouterId string + Role Role + Spec Spec + AccessPointId string + OppositeAccessPointId string + OppositeInterfaceId string + OppositeInterfaceOwnerId string + Name string + Description string + HealthCheckSourceIp string + HealthCheckTargetIp string +} + +type CreateRouterInterfaceResponse struct { + common.Response + RouterInterfaceId string +} + +// CreateRouterInterface create Router interface +// +// You can read doc at https://help.aliyun.com/document_detail/36032.html?spm=5176.product27706.6.664.EbBsxC +func (client *Client) CreateRouterInterface(args *CreateRouterInterfaceArgs) (response *CreateRouterInterfaceResponse, err error) { + response = &CreateRouterInterfaceResponse{} + err = client.Invoke("CreateRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +type Filter struct { + Key string + Value []string +} + +type DescribeRouterInterfacesArgs struct { + RegionId common.Region + common.Pagination + Filter []Filter +} + +type RouterInterfaceItemType struct { + ChargeType string + RouterInterfaceId string + AccessPointId string + OppositeRegionId string + OppositeAccessPointId string + Role Role + Spec Spec + Name string + Description string + RouterId string + RouterType RouterType + CreationTime string + Status string + BusinessStatus string + ConnectedTime string + OppositeInterfaceId string + OppositeInterfaceSpec string + OppositeInterfaceStatus string + OppositeInterfaceBusinessStatus string + OppositeRouterId string + OppositeRouterType RouterType + OppositeInterfaceOwnerId string + HealthCheckSourceIp string + HealthCheckTargetIp string +} + +type DescribeRouterInterfacesResponse struct { + RouterInterfaceSet struct { + RouterInterfaceType []RouterInterfaceItemType + } + common.PaginationResult +} + +// DescribeRouterInterfaces describe Router interfaces +// +// You can read doc at https://help.aliyun.com/document_detail/36032.html?spm=5176.product27706.6.664.EbBsxC +func (client *Client) DescribeRouterInterfaces(args *DescribeRouterInterfacesArgs) (response *DescribeRouterInterfacesResponse, err error) { + response = &DescribeRouterInterfacesResponse{} + err = client.Invoke("DescribeRouterInterfaces", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +type OperateRouterInterfaceArgs struct { + RegionId common.Region + RouterInterfaceId string +} + +// ConnectRouterInterface +// +// You can read doc at https://help.aliyun.com/document_detail/36031.html?spm=5176.doc36035.6.666.wkyljN +func (client *Client) ConnectRouterInterface(args *OperateRouterInterfaceArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("ConnectRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +// ActivateRouterInterface active Router Interface +// +// You can read doc at https://help.aliyun.com/document_detail/36030.html?spm=5176.doc36031.6.667.DAuZLD +func (client *Client) ActivateRouterInterface(args *OperateRouterInterfaceArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("ActivateRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +// DeactivateRouterInterface deactivate Router Interface +// +// You can read doc at https://help.aliyun.com/document_detail/36033.html?spm=5176.doc36030.6.668.JqCWUz +func (client *Client) DeactivateRouterInterface(args *OperateRouterInterfaceArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("DeactivateRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +type ModifyRouterInterfaceSpecArgs struct { + RegionId common.Region + RouterInterfaceId string + Spec Spec +} + +type ModifyRouterInterfaceSpecResponse struct { + common.Response + Spec Spec +} + +// ModifyRouterInterfaceSpec +// +// You can read doc at https://help.aliyun.com/document_detail/36037.html?spm=5176.doc36036.6.669.McKiye +func (client *Client) ModifyRouterInterfaceSpec(args *ModifyRouterInterfaceSpecArgs) (response *ModifyRouterInterfaceSpecResponse, err error) { + response = &ModifyRouterInterfaceSpecResponse{} + err = client.Invoke("ModifyRouterInterfaceSpec", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +type ModifyRouterInterfaceAttributeArgs struct { + RegionId common.Region + RouterInterfaceId string + Name string + Description string + OppositeInterfaceId string + OppositeRouterId string + OppositeInterfaceOwnerId string + HealthCheckSourceIp string + HealthCheckTargetIp string +} + +// ModifyRouterInterfaceAttribute +// +// You can read doc at https://help.aliyun.com/document_detail/36036.html?spm=5176.doc36037.6.670.Dcz3xS +func (client *Client) ModifyRouterInterfaceAttribute(args *ModifyRouterInterfaceAttributeArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("ModifyRouterInterfaceAttribute", args, &response) + if err != nil { + return response, err + } + return response, nil +} + +// DeleteRouterInterface delete Router Interface +// +// You can read doc at https://help.aliyun.com/document_detail/36034.html?spm=5176.doc36036.6.671.y2xpNt +func (client *Client) DeleteRouterInterface(args *OperateRouterInterfaceArgs) (response *EcsCommonResponse, err error) { + response = &EcsCommonResponse{} + err = client.Invoke("DeleteRouterInterface", args, &response) + if err != nil { + return response, err + } + return response, nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/security_groups.go b/vendor/github.com/denverdino/aliyungo/ecs/security_groups.go new file mode 100644 index 000000000000..54af3a798249 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/security_groups.go @@ -0,0 +1,282 @@ +package ecs + +import ( + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +type NicType string + +const ( + NicTypeInternet = NicType("internet") + NicTypeIntranet = NicType("intranet") +) + +type IpProtocol string + +const ( + IpProtocolAll = IpProtocol("all") + IpProtocolTCP = IpProtocol("tcp") + IpProtocolUDP = IpProtocol("udp") + IpProtocolICMP = IpProtocol("icmp") + IpProtocolGRE = IpProtocol("gre") +) + +type PermissionPolicy string + +const ( + PermissionPolicyAccept = PermissionPolicy("accept") + PermissionPolicyDrop = PermissionPolicy("drop") +) + +type DescribeSecurityGroupAttributeArgs struct { + SecurityGroupId string + RegionId common.Region + NicType NicType //enum for internet (default) |intranet + Direction string // enum ingress egress +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&permissiontype +type PermissionType struct { + IpProtocol IpProtocol + PortRange string + SourceCidrIp string + SourceGroupId string + SourceGroupOwnerAccount string + DestCidrIp string + DestGroupId string + DestGroupOwnerAccount string + Policy PermissionPolicy + NicType NicType + Priority int + Direction string + Description string +} + +type DescribeSecurityGroupAttributeResponse struct { + common.Response + + SecurityGroupId string + SecurityGroupName string + RegionId common.Region + Description string + Permissions struct { + Permission []PermissionType + } + VpcId string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/securitygroup&describesecuritygroupattribute +func (client *Client) DescribeSecurityGroupAttribute(args *DescribeSecurityGroupAttributeArgs) (response *DescribeSecurityGroupAttributeResponse, err error) { + response = &DescribeSecurityGroupAttributeResponse{} + err = client.Invoke("DescribeSecurityGroupAttribute", args, response) + if err != nil { + return nil, err + } + return response, nil +} + +type DescribeSecurityGroupsArgs struct { + RegionId common.Region + VpcId string + common.Pagination +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&securitygroupitemtype +type SecurityGroupItemType struct { + SecurityGroupId string + SecurityGroupName string + Description string + VpcId string + CreationTime util.ISO6801Time +} + +type DescribeSecurityGroupsResponse struct { + common.Response + common.PaginationResult + + RegionId common.Region + SecurityGroups struct { + SecurityGroup []SecurityGroupItemType + } +} + +// DescribeSecurityGroups describes security groups +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/securitygroup&describesecuritygroups +func (client *Client) DescribeSecurityGroups(args *DescribeSecurityGroupsArgs) (securityGroupItems []SecurityGroupItemType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeSecurityGroupsWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.SecurityGroups.SecurityGroup, &response.PaginationResult, nil +} + +func (client *Client) DescribeSecurityGroupsWithRaw(args *DescribeSecurityGroupsArgs) (response *DescribeSecurityGroupsResponse, err error) { + args.Validate() + response = &DescribeSecurityGroupsResponse{} + + err = client.Invoke("DescribeSecurityGroups", args, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +type CreateSecurityGroupArgs struct { + RegionId common.Region + SecurityGroupName string + Description string + VpcId string + ClientToken string +} + +type CreateSecurityGroupResponse struct { + common.Response + + SecurityGroupId string +} + +// CreateSecurityGroup creates security group +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/securitygroup&createsecuritygroup +func (client *Client) CreateSecurityGroup(args *CreateSecurityGroupArgs) (securityGroupId string, err error) { + response := CreateSecurityGroupResponse{} + err = client.Invoke("CreateSecurityGroup", args, &response) + if err != nil { + return "", err + } + return response.SecurityGroupId, err +} + +type DeleteSecurityGroupArgs struct { + RegionId common.Region + SecurityGroupId string +} + +type DeleteSecurityGroupResponse struct { + common.Response +} + +// DeleteSecurityGroup deletes security group +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/securitygroup&deletesecuritygroup +func (client *Client) DeleteSecurityGroup(regionId common.Region, securityGroupId string) error { + args := DeleteSecurityGroupArgs{ + RegionId: regionId, + SecurityGroupId: securityGroupId, + } + response := DeleteSecurityGroupResponse{} + err := client.Invoke("DeleteSecurityGroup", &args, &response) + return err +} + +type ModifySecurityGroupAttributeArgs struct { + RegionId common.Region + SecurityGroupId string + SecurityGroupName string + Description string +} + +type ModifySecurityGroupAttributeResponse struct { + common.Response +} + +// ModifySecurityGroupAttribute modifies attribute of security group +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/securitygroup&modifysecuritygroupattribute +func (client *Client) ModifySecurityGroupAttribute(args *ModifySecurityGroupAttributeArgs) error { + response := ModifySecurityGroupAttributeResponse{} + err := client.Invoke("ModifySecurityGroupAttribute", args, &response) + return err +} + +type AuthorizeSecurityGroupArgs struct { + SecurityGroupId string + RegionId common.Region + IpProtocol IpProtocol + PortRange string + SourceGroupId string + SourceGroupOwnerAccount string + SourceGroupOwnerID string + SourceCidrIp string // IPv4 only, default 0.0.0.0/0 + Policy PermissionPolicy // enum of accept (default) | drop + Priority int // 1 - 100, default 1 + NicType NicType // enum of internet | intranet (default) +} + +type AuthorizeSecurityGroupResponse struct { + common.Response +} + +// AuthorizeSecurityGroup authorize permissions to security group +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/securitygroup&authorizesecuritygroup +func (client *Client) AuthorizeSecurityGroup(args *AuthorizeSecurityGroupArgs) error { + response := AuthorizeSecurityGroupResponse{} + err := client.Invoke("AuthorizeSecurityGroup", args, &response) + return err +} + +type RevokeSecurityGroupArgs struct { + AuthorizeSecurityGroupArgs +} + +type RevokeSecurityGroupResponse struct { + common.Response +} + +// You can read doc at https://help.aliyun.com/document_detail/25557.html?spm=5176.doc25554.6.755.O6Tjz0 +func (client *Client) RevokeSecurityGroup(args *RevokeSecurityGroupArgs) error { + response := RevokeSecurityGroupResponse{} + err := client.Invoke("RevokeSecurityGroup", args, &response) + return err +} + +type AuthorizeSecurityGroupEgressArgs struct { + SecurityGroupId string + RegionId common.Region + IpProtocol IpProtocol + PortRange string + DestGroupId string + DestGroupOwnerAccount string + DestGroupOwnerId string + DestCidrIp string // IPv4 only, default 0.0.0.0/0 + Policy PermissionPolicy // enum of accept (default) | drop + Priority int // 1 - 100, default 1 + NicType NicType // enum of internet | intranet (default) +} + +type AuthorizeSecurityGroupEgressResponse struct { + common.Response +} + +// AuthorizeSecurityGroup authorize permissions to security group +// +// You can read doc at https://help.aliyun.com/document_detail/25560.html +func (client *Client) AuthorizeSecurityGroupEgress(args *AuthorizeSecurityGroupEgressArgs) error { + response := AuthorizeSecurityGroupEgressResponse{} + err := client.Invoke("AuthorizeSecurityGroupEgress", args, &response) + return err +} + +type RevokeSecurityGroupEgressArgs struct { + AuthorizeSecurityGroupEgressArgs +} + +type RevokeSecurityGroupEgressResponse struct { + common.Response +} + +// You can read doc at https://help.aliyun.com/document_detail/25561.html?spm=5176.doc25557.6.759.qcR4Az +func (client *Client) RevokeSecurityGroupEgress(args *RevokeSecurityGroupEgressArgs) error { + response := RevokeSecurityGroupEgressResponse{} + err := client.Invoke("RevokeSecurityGroupEgress", args, &response) + return err +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/snapshots.go b/vendor/github.com/denverdino/aliyungo/ecs/snapshots.go new file mode 100644 index 000000000000..f3c1b09c55e6 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/snapshots.go @@ -0,0 +1,140 @@ +package ecs + +import ( + "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +type DescribeSnapshotsArgs struct { + RegionId common.Region + InstanceId string + DiskId string + SnapshotIds []string //["s-xxxxxxxxx", "s-yyyyyyyyy", ..."s-zzzzzzzzz"] + common.Pagination +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&snapshottype +type SnapshotType struct { + SnapshotId string + SnapshotName string + Description string + Progress string + SourceDiskId string + SourceDiskSize int + SourceDiskType string //enum for System | Data + ProductCode string + CreationTime util.ISO6801Time +} + +type DescribeSnapshotsResponse struct { + common.Response + common.PaginationResult + Snapshots struct { + Snapshot []SnapshotType + } +} + +// DescribeSnapshots describe snapshots +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/snapshot&describesnapshots +func (client *Client) DescribeSnapshots(args *DescribeSnapshotsArgs) (snapshots []SnapshotType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeSnapshotsWithRaw(args) + if err != nil { + return nil, nil, err + } + return response.Snapshots.Snapshot, &response.PaginationResult, nil + +} + +func (client *Client) DescribeSnapshotsWithRaw(args *DescribeSnapshotsArgs) (response *DescribeSnapshotsResponse, err error) { + args.Validate() + response = &DescribeSnapshotsResponse{} + + err = client.Invoke("DescribeSnapshots", args, response) + + if err != nil { + return nil, err + } + return response, nil + +} + +type DeleteSnapshotArgs struct { + SnapshotId string +} + +type DeleteSnapshotResponse struct { + common.Response +} + +// DeleteSnapshot deletes snapshot +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/snapshot&deletesnapshot +func (client *Client) DeleteSnapshot(snapshotId string) error { + args := DeleteSnapshotArgs{SnapshotId: snapshotId} + response := DeleteSnapshotResponse{} + + return client.Invoke("DeleteSnapshot", &args, &response) +} + +type CreateSnapshotArgs struct { + DiskId string + SnapshotName string + Description string + ClientToken string +} + +type CreateSnapshotResponse struct { + common.Response + SnapshotId string +} + +// CreateSnapshot creates a new snapshot +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/snapshot&createsnapshot +func (client *Client) CreateSnapshot(args *CreateSnapshotArgs) (snapshotId string, err error) { + + response := CreateSnapshotResponse{} + + err = client.Invoke("CreateSnapshot", args, &response) + if err == nil { + snapshotId = response.SnapshotId + } + return snapshotId, err +} + +// Default timeout value for WaitForSnapShotReady method +const SnapshotDefaultTimeout = 120 + +// WaitForSnapShotReady waits for snapshot ready +func (client *Client) WaitForSnapShotReady(regionId common.Region, snapshotId string, timeout int) error { + if timeout <= 0 { + timeout = SnapshotDefaultTimeout + } + for { + args := DescribeSnapshotsArgs{ + RegionId: regionId, + SnapshotIds: []string{snapshotId}, + } + + snapshots, _, err := client.DescribeSnapshots(&args) + if err != nil { + return err + } + if snapshots == nil || len(snapshots) == 0 { + return common.GetClientErrorFromString("Not found") + } + if snapshots[0].Progress == "100%" { + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + } + return nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/snat_entry.go b/vendor/github.com/denverdino/aliyungo/ecs/snat_entry.go new file mode 100644 index 000000000000..d9806d769606 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/snat_entry.go @@ -0,0 +1,103 @@ +package ecs + +import "github.com/denverdino/aliyungo/common" + +type CreateSnatEntryArgs struct { + RegionId common.Region + SnatTableId string + SourceVSwitchId string + SnatIp string +} + +type CreateSnatEntryResponse struct { + common.Response + SnatEntryId string +} + +type SnatEntrySetType struct { + RegionId common.Region + SnatEntryId string + SnatIp string + SnatTableId string + SourceCIDR string + SourceVSwitchId string + Status string +} + +type DescribeSnatTableEntriesArgs struct { + RegionId common.Region + SnatTableId string + common.Pagination +} + +type DescribeSnatTableEntriesResponse struct { + common.Response + common.PaginationResult + SnatTableEntries struct { + SnatTableEntry []SnatEntrySetType + } +} + +type ModifySnatEntryArgs struct { + RegionId common.Region + SnatTableId string + SnatEntryId string + SnatIp string +} + +type ModifySnatEntryResponse struct { + common.Response +} + +type DeleteSnatEntryArgs struct { + RegionId common.Region + SnatTableId string + SnatEntryId string +} + +type DeleteSnatEntryResponse struct { + common.Response +} + +func (client *Client) CreateSnatEntry(args *CreateSnatEntryArgs) (resp *CreateSnatEntryResponse, err error) { + response := CreateSnatEntryResponse{} + err = client.Invoke("CreateSnatEntry", args, &response) + if err != nil { + return nil, err + } + return &response, err +} + +func (client *Client) DescribeSnatTableEntries(args *DescribeSnatTableEntriesArgs) (snatTableEntries []SnatEntrySetType, + pagination *common.PaginationResult, err error) { + response, err := client.DescribeSnatTableEntriesWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.SnatTableEntries.SnatTableEntry, &response.PaginationResult, nil +} + +func (client *Client) DescribeSnatTableEntriesWithRaw(args *DescribeSnatTableEntriesArgs) ( response *DescribeSnatTableEntriesResponse, err error) { + args.Validate() + response = &DescribeSnatTableEntriesResponse{} + + err = client.Invoke("DescribeSnatTableEntries", args, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (client *Client) ModifySnatEntry(args *ModifySnatEntryArgs) error { + response := ModifySnatEntryResponse{} + return client.Invoke("ModifySnatEntry", args, &response) +} + +func (client *Client) DeleteSnatEntry(args *DeleteSnatEntryArgs) error { + response := DeleteSnatEntryResponse{} + err := client.Invoke("DeleteSnatEntry", args, &response) + return err +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/ssh_key_pair.go b/vendor/github.com/denverdino/aliyungo/ecs/ssh_key_pair.go new file mode 100644 index 000000000000..76f1a3803488 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/ssh_key_pair.go @@ -0,0 +1,153 @@ +package ecs + +import ( + "github.com/denverdino/aliyungo/common" +) + +type CreateKeyPairArgs struct { + RegionId common.Region + KeyPairName string +} + +type CreateKeyPairResponse struct { + common.Response + KeyPairName string + KeyPairFingerPrint string + PrivateKeyBody string +} + +// CreateKeyPair creates keypair +// +// You can read doc at https://help.aliyun.com/document_detail/51771.html?spm=5176.doc51775.6.910.cedjfr +func (client *Client) CreateKeyPair(args *CreateKeyPairArgs) (resp *CreateKeyPairResponse,err error) { + response := CreateKeyPairResponse{} + err = client.Invoke("CreateKeyPair", args, &response) + if err != nil { + return nil, err + } + return &response, err +} + +type ImportKeyPairArgs struct { + RegionId common.Region + PublicKeyBody string + KeyPairName string +} + +type ImportKeyPairResponse struct { + common.Response + KeyPairName string + KeyPairFingerPrint string +} + +// ImportKeyPair import keypair +// +// You can read doc at https://help.aliyun.com/document_detail/51774.html?spm=5176.doc51771.6.911.BicQq2 +func (client *Client) ImportKeyPair(args *ImportKeyPairArgs) (resp *ImportKeyPairResponse,err error) { + response := ImportKeyPairResponse{} + err = client.Invoke("ImportKeyPair", args, &response) + if err != nil { + return nil, err + } + return &response, err +} + +type DescribeKeyPairsArgs struct { + RegionId common.Region + KeyPairFingerPrint string + KeyPairName string + common.Pagination +} + +type KeyPairItemType struct { + KeyPairName string + KeyPairFingerPrint string +} + +type DescribeKeyPairsResponse struct { + common.Response + common.PaginationResult + RegionId common.Region + KeyPairs struct { + KeyPair []KeyPairItemType + } +} + +// DescribeKeyPairs describe keypairs +// +// You can read doc at https://help.aliyun.com/document_detail/51773.html?spm=5176.doc51774.6.912.lyE0iX +func (client *Client) DescribeKeyPairs(args *DescribeKeyPairsArgs) (KeyPairs []KeyPairItemType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeKeyPairsWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.KeyPairs.KeyPair, &response.PaginationResult, err +} + +func (client *Client) DescribeKeyPairsWithRaw(args *DescribeKeyPairsArgs) (response *DescribeKeyPairsResponse, err error) { + response = &DescribeKeyPairsResponse{} + + err = client.Invoke("DescribeKeyPairs", args, response) + + if err != nil { + return nil, err + } + + return response, err +} + +type AttachKeyPairArgs struct { + RegionId common.Region + KeyPairName string + InstanceIds string +} + +// AttachKeyPair keypars to instances +// +// You can read doc at https://help.aliyun.com/document_detail/51775.html?spm=5176.doc51773.6.913.igEem4 +func (client *Client) AttachKeyPair(args *AttachKeyPairArgs) (err error) { + response := common.Response{} + err = client.Invoke("AttachKeyPair", args, &response) + if err != nil { + return err + } + return nil +} + +type DetachKeyPairArgs struct { + RegionId common.Region + KeyPairName string + InstanceIds string +} + +// DetachKeyPair keyparis from instances +// +// You can read doc at https://help.aliyun.com/document_detail/51776.html?spm=5176.doc51775.6.914.DJ7Gmq +func (client *Client) DetachKeyPair(args *DetachKeyPairArgs) (err error) { + response := common.Response{} + err = client.Invoke("DetachKeyPair", args, &response) + if err != nil { + return err + } + return nil +} + +type DeleteKeyPairsArgs struct { + RegionId common.Region + KeyPairNames string +} + +// DeleteKeyPairs delete keypairs +// +// You can read doc at https://help.aliyun.com/document_detail/51772.html?spm=5176.doc51776.6.915.Qqcv2Q +func (client *Client) DeleteKeyPairs(args *DeleteKeyPairsArgs) (err error) { + response := common.Response{} + err = client.Invoke("DeleteKeyPairs", args, &response) + if err != nil { + return err + } + return nil +} + + diff --git a/vendor/github.com/denverdino/aliyungo/ecs/tags.go b/vendor/github.com/denverdino/aliyungo/ecs/tags.go new file mode 100644 index 000000000000..fb2519a1db70 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/tags.go @@ -0,0 +1,128 @@ +package ecs + +import "github.com/denverdino/aliyungo/common" + +type TagResourceType string + +const ( + TagResourceImage = TagResourceType("image") + TagResourceInstance = TagResourceType("instance") + TagResourceSnapshot = TagResourceType("snapshot") + TagResourceDisk = TagResourceType("disk") +) + +type AddTagsArgs struct { + ResourceId string + ResourceType TagResourceType //image, instance, snapshot or disk + RegionId common.Region + Tag map[string]string +} + +type AddTagsResponse struct { + common.Response +} + +// AddTags Add tags to resource +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/tags&addtags +func (client *Client) AddTags(args *AddTagsArgs) error { + response := AddTagsResponse{} + err := client.Invoke("AddTags", args, &response) + return err +} + +type RemoveTagsArgs struct { + ResourceId string + ResourceType TagResourceType //image, instance, snapshot or disk + RegionId common.Region + Tag map[string]string +} + +type RemoveTagsResponse struct { + common.Response +} + +// RemoveTags remove tags to resource +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/tags&removetags +func (client *Client) RemoveTags(args *RemoveTagsArgs) error { + response := RemoveTagsResponse{} + err := client.Invoke("RemoveTags", args, &response) + return err +} + +type ResourceItemType struct { + ResourceId string + ResourceType TagResourceType + RegionId common.Region +} + +type DescribeResourceByTagsArgs struct { + ResourceType TagResourceType //image, instance, snapshot or disk + RegionId common.Region + Tag map[string]string + common.Pagination +} + +type DescribeResourceByTagsResponse struct { + common.Response + common.PaginationResult + Resources struct { + Resource []ResourceItemType + } +} + +// DescribeResourceByTags describe resource by tags +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/tags&describeresourcebytags +func (client *Client) DescribeResourceByTags(args *DescribeResourceByTagsArgs) (resources []ResourceItemType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeResourceByTagsWithRaw(args) + if err != nil { + return nil, nil, err + } + return response.Resources.Resource, &response.PaginationResult, nil +} + +func (client *Client) DescribeResourceByTagsWithRaw(args *DescribeResourceByTagsArgs) (response *DescribeResourceByTagsResponse, err error) { + args.Validate() + response = &DescribeResourceByTagsResponse{} + err = client.Invoke("DescribeResourceByTags", args, response) + if err != nil { + return nil, err + } + return response, nil +} + +type TagItemType struct { + TagKey string + TagValue string +} + +type DescribeTagsArgs struct { + RegionId common.Region + ResourceType TagResourceType //image, instance, snapshot or disk + ResourceId string + Tag map[string]string + common.Pagination +} + +type DescribeTagsResponse struct { + common.Response + common.PaginationResult + Tags struct { + Tag []TagItemType + } +} + +// DescribeResourceByTags describe resource by tags +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/tags&describeresourcebytags +func (client *Client) DescribeTags(args *DescribeTagsArgs) (tags []TagItemType, pagination *common.PaginationResult, err error) { + args.Validate() + response := DescribeTagsResponse{} + err = client.Invoke("DescribeTags", args, &response) + if err != nil { + return nil, nil, err + } + return response.Tags.Tag, &response.PaginationResult, nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go b/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go new file mode 100644 index 000000000000..d66fe10272cc --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/vpcs.go @@ -0,0 +1,160 @@ +package ecs + +import ( + "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +type CreateVpcArgs struct { + RegionId common.Region + CidrBlock string //192.168.0.0/16 or 172.16.0.0/16 (default) + VpcName string + Description string + ClientToken string +} + +type CreateVpcResponse struct { + common.Response + VpcId string + VRouterId string + RouteTableId string +} + +// CreateVpc creates Virtual Private Cloud +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vpc&createvpc +func (client *Client) CreateVpc(args *CreateVpcArgs) (resp *CreateVpcResponse, err error) { + response := CreateVpcResponse{} + err = client.Invoke("CreateVpc", args, &response) + if err != nil { + return nil, err + } + return &response, err +} + +type DeleteVpcArgs struct { + VpcId string +} + +type DeleteVpcResponse struct { + common.Response +} + +// DeleteVpc deletes Virtual Private Cloud +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vpc&deletevpc +func (client *Client) DeleteVpc(vpcId string) error { + args := DeleteVpcArgs{ + VpcId: vpcId, + } + response := DeleteVpcResponse{} + return client.Invoke("DeleteVpc", &args, &response) +} + +type VpcStatus string + +const ( + VpcStatusPending = VpcStatus("Pending") + VpcStatusAvailable = VpcStatus("Available") +) + +type DescribeVpcsArgs struct { + VpcId string + RegionId common.Region + common.Pagination +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&vpcsettype +type VpcSetType struct { + VpcId string + RegionId common.Region + Status VpcStatus // enum Pending | Available + VpcName string + VSwitchIds struct { + VSwitchId []string + } + CidrBlock string + VRouterId string + Description string + IsDefault bool + CreationTime util.ISO6801Time +} + +type DescribeVpcsResponse struct { + common.Response + common.PaginationResult + Vpcs struct { + Vpc []VpcSetType + } +} + +// DescribeInstanceStatus describes instance status +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vpc&describevpcs +func (client *Client) DescribeVpcs(args *DescribeVpcsArgs) (vpcs []VpcSetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeVpcsWithRaw(args) + if err != nil { + return nil, nil, err + } + + return response.Vpcs.Vpc, &response.PaginationResult, nil +} + +func (client *Client) DescribeVpcsWithRaw(args *DescribeVpcsArgs) (response *DescribeVpcsResponse, err error) { + args.Validate() + response = &DescribeVpcsResponse{} + + err = client.Invoke("DescribeVpcs", args, response) + if err != nil { + return nil, err + } + + return response, err +} + +type ModifyVpcAttributeArgs struct { + VpcId string + VpcName string + Description string +} + +type ModifyVpcAttributeResponse struct { + common.Response +} + +// ModifyVpcAttribute modifies attribute of Virtual Private Cloud +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vpc&modifyvpcattribute +func (client *Client) ModifyVpcAttribute(args *ModifyVpcAttributeArgs) error { + response := ModifyVpcAttributeResponse{} + return client.Invoke("ModifyVpcAttribute", args, &response) +} + +// WaitForInstance waits for instance to given status +func (client *Client) WaitForVpcAvailable(regionId common.Region, vpcId string, timeout int) error { + if timeout <= 0 { + timeout = DefaultTimeout + } + args := DescribeVpcsArgs{ + RegionId: regionId, + VpcId: vpcId, + } + for { + vpcs, _, err := client.DescribeVpcs(&args) + if err != nil { + return err + } + if len(vpcs) > 0 && vpcs[0].Status == VpcStatusAvailable { + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + } + return nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/vrouters.go b/vendor/github.com/denverdino/aliyungo/ecs/vrouters.go new file mode 100644 index 000000000000..04d43daa87b9 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/vrouters.go @@ -0,0 +1,77 @@ +package ecs + +import ( + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +type DescribeVRoutersArgs struct { + VRouterId string + RegionId common.Region + common.Pagination +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&vroutersettype +type VRouterSetType struct { + VRouterId string + RegionId common.Region + VpcId string + RouteTableIds struct { + RouteTableId []string + } + VRouterName string + Description string + CreationTime util.ISO6801Time +} + +type DescribeVRoutersResponse struct { + common.Response + common.PaginationResult + VRouters struct { + VRouter []VRouterSetType + } +} + +// DescribeVRouters describes Virtual Routers +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vrouter&describevrouters +func (client *Client) DescribeVRouters(args *DescribeVRoutersArgs) (vrouters []VRouterSetType, pagination *common.PaginationResult, err error) { + response, err := client.DescribeVRoutersWithRaw(args) + if err == nil { + return response.VRouters.VRouter, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeVRoutersWithRaw(args *DescribeVRoutersArgs) (response *DescribeVRoutersResponse, err error) { + args.Validate() + response = &DescribeVRoutersResponse{} + + err = client.Invoke("DescribeVRouters", args, response) + + if err == nil { + return response, nil + } + + return nil, err +} + +type ModifyVRouterAttributeArgs struct { + VRouterId string + VRouterName string + Description string +} + +type ModifyVRouterAttributeResponse struct { + common.Response +} + +// ModifyVRouterAttribute modifies attribute of Virtual Router +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vrouter&modifyvrouterattribute +func (client *Client) ModifyVRouterAttribute(args *ModifyVRouterAttributeArgs) error { + response := ModifyVRouterAttributeResponse{} + return client.Invoke("ModifyVRouterAttribute", args, &response) +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go b/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go new file mode 100644 index 000000000000..22d5ec8ea7e0 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/vswitches.go @@ -0,0 +1,164 @@ +package ecs + +import ( + "time" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/util" +) + +type CreateVSwitchArgs struct { + ZoneId string + CidrBlock string + VpcId string + VSwitchName string + Description string + ClientToken string +} + +type CreateVSwitchResponse struct { + common.Response + VSwitchId string +} + +// CreateVSwitch creates Virtual Switch +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vswitch&createvswitch +func (client *Client) CreateVSwitch(args *CreateVSwitchArgs) (vswitchId string, err error) { + response := CreateVSwitchResponse{} + err = client.Invoke("CreateVSwitch", args, &response) + if err != nil { + return "", err + } + return response.VSwitchId, err +} + +type DeleteVSwitchArgs struct { + VSwitchId string +} + +type DeleteVSwitchResponse struct { + common.Response +} + +// DeleteVSwitch deletes Virtual Switch +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vswitch&deletevswitch +func (client *Client) DeleteVSwitch(VSwitchId string) error { + args := DeleteVSwitchArgs{ + VSwitchId: VSwitchId, + } + response := DeleteVSwitchResponse{} + return client.Invoke("DeleteVSwitch", &args, &response) +} + +type DescribeVSwitchesArgs struct { + RegionId common.Region + VpcId string + VSwitchId string + ZoneId string + common.Pagination +} + +type VSwitchStatus string + +const ( + VSwitchStatusPending = VSwitchStatus("Pending") + VSwitchStatusAvailable = VSwitchStatus("Available") +) + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&vswitchsettype +type VSwitchSetType struct { + VSwitchId string + VpcId string + Status VSwitchStatus // enum Pending | Available + CidrBlock string + ZoneId string + AvailableIpAddressCount int + Description string + VSwitchName string + IsDefault bool + CreationTime util.ISO6801Time +} + +type DescribeVSwitchesResponse struct { + common.Response + common.PaginationResult + VSwitches struct { + VSwitch []VSwitchSetType + } +} + +// DescribeVSwitches describes Virtual Switches +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vswitch&describevswitches +func (client *Client) DescribeVSwitches(args *DescribeVSwitchesArgs) (vswitches []VSwitchSetType, pagination *common.PaginationResult, err error) { + args.Validate() + response, err := client.DescribeVSwitchesWithRaw(args) + if err == nil { + return response.VSwitches.VSwitch, &response.PaginationResult, nil + } + + return nil, nil, err +} + +func (client *Client) DescribeVSwitchesWithRaw(args *DescribeVSwitchesArgs) (response *DescribeVSwitchesResponse, err error) { + args.Validate() + response = &DescribeVSwitchesResponse{} + + err = client.Invoke("DescribeVSwitches", args, &response) + + if err == nil { + return response, nil + } + + return nil, err +} + +type ModifyVSwitchAttributeArgs struct { + VSwitchId string + VSwitchName string + Description string +} + +type ModifyVSwitchAttributeResponse struct { + common.Response +} + +// ModifyVSwitchAttribute modifies attribute of Virtual Private Cloud +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/vswitch&modifyvswitchattribute +func (client *Client) ModifyVSwitchAttribute(args *ModifyVSwitchAttributeArgs) error { + response := ModifyVSwitchAttributeResponse{} + return client.Invoke("ModifyVSwitchAttribute", args, &response) +} + +// WaitForVSwitchAvailable waits for VSwitch to given status +func (client *Client) WaitForVSwitchAvailable(vpcId string, vswitchId string, timeout int) error { + if timeout <= 0 { + timeout = DefaultTimeout + } + args := DescribeVSwitchesArgs{ + VpcId: vpcId, + VSwitchId: vswitchId, + } + for { + vswitches, _, err := client.DescribeVSwitches(&args) + if err != nil { + return err + } + if len(vswitches) == 0 { + return common.GetClientErrorFromString("Not found") + } + if vswitches[0].Status == VSwitchStatusAvailable { + break + } + timeout = timeout - DefaultWaitForInterval + if timeout <= 0 { + return common.GetClientErrorFromString("Timeout") + } + time.Sleep(DefaultWaitForInterval * time.Second) + } + return nil +} diff --git a/vendor/github.com/denverdino/aliyungo/ecs/zones.go b/vendor/github.com/denverdino/aliyungo/ecs/zones.go new file mode 100644 index 000000000000..61ecb0759416 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/ecs/zones.go @@ -0,0 +1,103 @@ +package ecs + +import "github.com/denverdino/aliyungo/common" + +type ResourceType string + +const ( + ResourceTypeInstance = ResourceType("Instance") + ResourceTypeDisk = ResourceType("Disk") + ResourceTypeVSwitch = ResourceType("VSwitch") + ResourceTypeIOOptimizedInstance = ResourceType("IoOptimized") +) + +// The sub-item of the type AvailableResourcesType +type SupportedResourceType string + +const ( + SupportedInstanceType = SupportedResourceType("supportedInstanceType") + SupportedInstanceTypeFamily = SupportedResourceType("supportedInstanceTypeFamily") + SupportedInstanceGeneration = SupportedResourceType("supportedInstanceGeneration") + SupportedSystemDiskCategory = SupportedResourceType("supportedSystemDiskCategory") + SupportedDataDiskCategory = SupportedResourceType("supportedDataDiskCategory") + SupportedNetworkCategory = SupportedResourceType("supportedNetworkCategory") + +) +// +// You can read doc at https://help.aliyun.com/document_detail/25670.html?spm=5176.doc25640.2.1.J24zQt +type ResourcesInfoType struct { + ResourcesInfo []AvailableResourcesType +} +// Because the sub-item of AvailableResourcesType starts with supported and golang struct cann't refer them, this uses map to parse ResourcesInfo +type AvailableResourcesType struct { + IoOptimized bool + NetworkTypes map[SupportedResourceType][]string + InstanceGenerations map[SupportedResourceType][]string + InstanceTypeFamilies map[SupportedResourceType][]string + InstanceTypes map[SupportedResourceType][]string + SystemDiskCategories map[SupportedResourceType][]DiskCategory + DataDiskCategories map[SupportedResourceType][]DiskCategory +} + +type DescribeZonesArgs struct { + RegionId common.Region +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&availableresourcecreationtype +type AvailableResourceCreationType struct { + ResourceTypes []ResourceType //enum for Instance, Disk, VSwitch +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&availablediskcategoriestype +type AvailableDiskCategoriesType struct { + DiskCategories []DiskCategory //enum for cloud, ephemeral, ephemeral_ssd +} + +type AvailableInstanceTypesType struct { + InstanceTypes []string +} + +// +// You can read doc at http://docs.aliyun.com/#/pub/ecs/open-api/datatype&zonetype +type ZoneType struct { + ZoneId string + LocalName string + AvailableResources ResourcesInfoType + AvailableInstanceTypes AvailableInstanceTypesType + AvailableResourceCreation AvailableResourceCreationType + AvailableDiskCategories AvailableDiskCategoriesType +} + +type DescribeZonesResponse struct { + common.Response + Zones struct { + Zone []ZoneType + } +} + +// DescribeZones describes zones +func (client *Client) DescribeZones(regionId common.Region) (zones []ZoneType, err error) { + response, err := client.DescribeZonesWithRaw(regionId) + if err == nil { + return response.Zones.Zone, nil + } + + return []ZoneType{}, err +} + +func (client *Client) DescribeZonesWithRaw(regionId common.Region) (response *DescribeZonesResponse, err error) { + args := DescribeZonesArgs{ + RegionId: regionId, + } + response = &DescribeZonesResponse{} + + err = client.Invoke("DescribeZones", &args, response) + + if err == nil { + return response, nil + } + + return nil, err +} diff --git a/vendor/github.com/denverdino/aliyungo/util/attempt.go b/vendor/github.com/denverdino/aliyungo/util/attempt.go new file mode 100644 index 000000000000..2d07f03a83bf --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/util/attempt.go @@ -0,0 +1,76 @@ +package util + +import ( + "time" +) + +// AttemptStrategy is reused from the goamz package + +// AttemptStrategy represents a strategy for waiting for an action +// to complete successfully. This is an internal type used by the +// implementation of other packages. +type AttemptStrategy struct { + Total time.Duration // total duration of attempt. + Delay time.Duration // interval between each try in the burst. + Min int // minimum number of retries; overrides Total +} + +type Attempt struct { + strategy AttemptStrategy + last time.Time + end time.Time + force bool + count int +} + +// Start begins a new sequence of attempts for the given strategy. +func (s AttemptStrategy) Start() *Attempt { + now := time.Now() + return &Attempt{ + strategy: s, + last: now, + end: now.Add(s.Total), + force: true, + } +} + +// Next waits until it is time to perform the next attempt or returns +// false if it is time to stop trying. +func (a *Attempt) Next() bool { + now := time.Now() + sleep := a.nextSleep(now) + if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { + return false + } + a.force = false + if sleep > 0 && a.count > 0 { + time.Sleep(sleep) + now = time.Now() + } + a.count++ + a.last = now + return true +} + +func (a *Attempt) nextSleep(now time.Time) time.Duration { + sleep := a.strategy.Delay - now.Sub(a.last) + if sleep < 0 { + return 0 + } + return sleep +} + +// HasNext returns whether another attempt will be made if the current +// one fails. If it returns true, the following call to Next is +// guaranteed to return true. +func (a *Attempt) HasNext() bool { + if a.force || a.strategy.Min > a.count { + return true + } + now := time.Now() + if now.Add(a.nextSleep(now)).Before(a.end) { + a.force = true + return true + } + return false +} diff --git a/vendor/github.com/denverdino/aliyungo/util/encoding.go b/vendor/github.com/denverdino/aliyungo/util/encoding.go new file mode 100644 index 000000000000..8cb58828803b --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/util/encoding.go @@ -0,0 +1,314 @@ +package util + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// change instance=["a", "b"] +// to instance.1="a" instance.2="b" +func FlattenFn(fieldName string, field reflect.Value, values *url.Values) { + l := field.Len() + if l > 0 { + for i := 0; i < l; i++ { + str := field.Index(i).String() + values.Set(fieldName+"."+strconv.Itoa(i+1), str) + } + } +} + +func Underline2Dot(name string) string { + return strings.Replace(name, "_", ".", -1) +} + +//ConvertToQueryValues converts the struct to url.Values +func ConvertToQueryValues(ifc interface{}) url.Values { + values := url.Values{} + SetQueryValues(ifc, &values) + return values +} + +//SetQueryValues sets the struct to existing url.Values following ECS encoding rules +func SetQueryValues(ifc interface{}, values *url.Values) { + setQueryValues(ifc, values, "") +} + +func SetQueryValueByFlattenMethod(ifc interface{}, values *url.Values) { + setQueryValuesByFlattenMethod(ifc, values, "") +} + +func setQueryValues(i interface{}, values *url.Values, prefix string) { + // add to support url.Values + mapValues, ok := i.(url.Values) + if ok { + for k, _ := range mapValues { + values.Set(k, mapValues.Get(k)) + } + return + } + + elem := reflect.ValueOf(i) + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + elemType := elem.Type() + for i := 0; i < elem.NumField(); i++ { + + fieldName := elemType.Field(i).Name + anonymous := elemType.Field(i).Anonymous + field := elem.Field(i) + // TODO Use Tag for validation + // tag := typ.Field(i).Tag.Get("tagname") + kind := field.Kind() + if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { + continue + } + if kind == reflect.Ptr { + field = field.Elem() + kind = field.Kind() + } + var value string + //switch field.Interface().(type) { + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := field.Int() + if i != 0 { + value = strconv.FormatInt(i, 10) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i := field.Uint() + if i != 0 { + value = strconv.FormatUint(i, 10) + } + case reflect.Float32: + value = strconv.FormatFloat(field.Float(), 'f', 4, 32) + case reflect.Float64: + value = strconv.FormatFloat(field.Float(), 'f', 4, 64) + case reflect.Bool: + value = strconv.FormatBool(field.Bool()) + case reflect.String: + value = field.String() + case reflect.Map: + ifc := field.Interface() + m := ifc.(map[string]string) + if m != nil { + j := 0 + for k, v := range m { + j++ + keyName := fmt.Sprintf("%s.%d.Key", fieldName, j) + values.Set(keyName, k) + valueName := fmt.Sprintf("%s.%d.Value", fieldName, j) + values.Set(valueName, v) + } + } + case reflect.Slice: + switch field.Type().Elem().Kind() { + case reflect.Uint8: + value = string(field.Bytes()) + case reflect.String: + l := field.Len() + if l > 0 { + strArray := make([]string, l) + for i := 0; i < l; i++ { + strArray[i] = field.Index(i).String() + } + bytes, err := json.Marshal(strArray) + if err == nil { + value = string(bytes) + } else { + log.Printf("Failed to convert JSON: %v", err) + } + } + default: + l := field.Len() + for j := 0; j < l; j++ { + prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) + ifc := field.Index(j).Interface() + //log.Printf("%s : %v", prefixName, ifc) + if ifc != nil { + setQueryValues(ifc, values, prefixName) + } + } + continue + } + + default: + switch field.Interface().(type) { + case ISO6801Time: + t := field.Interface().(ISO6801Time) + value = t.String() + case time.Time: + t := field.Interface().(time.Time) + value = GetISO8601TimeStamp(t) + default: + ifc := field.Interface() + if ifc != nil { + if anonymous { + SetQueryValues(ifc, values) + } else { + prefixName := fieldName + "." + setQueryValues(ifc, values, prefixName) + } + continue + } + } + } + if value != "" { + name := elemType.Field(i).Tag.Get("ArgName") + if name == "" { + name = fieldName + } + if prefix != "" { + name = prefix + name + } + values.Set(name, value) + } + } +} + +func setQueryValuesByFlattenMethod(i interface{}, values *url.Values, prefix string) { + // add to support url.Values + mapValues, ok := i.(url.Values) + if ok { + for k, _ := range mapValues { + values.Set(k, mapValues.Get(k)) + } + return + } + + elem := reflect.ValueOf(i) + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + elemType := elem.Type() + for i := 0; i < elem.NumField(); i++ { + + fieldName := elemType.Field(i).Name + anonymous := elemType.Field(i).Anonymous + field := elem.Field(i) + + // TODO Use Tag for validation + // tag := typ.Field(i).Tag.Get("tagname") + kind := field.Kind() + + if (kind == reflect.Ptr || kind == reflect.Array || kind == reflect.Slice || kind == reflect.Map || kind == reflect.Chan) && field.IsNil() { + continue + } + if kind == reflect.Ptr { + field = field.Elem() + kind = field.Kind() + } + + var value string + //switch field.Interface().(type) { + switch kind { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := field.Int() + if i != 0 { + value = strconv.FormatInt(i, 10) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i := field.Uint() + if i != 0 { + value = strconv.FormatUint(i, 10) + } + case reflect.Float32: + value = strconv.FormatFloat(field.Float(), 'f', 4, 32) + case reflect.Float64: + value = strconv.FormatFloat(field.Float(), 'f', 4, 64) + case reflect.Bool: + value = strconv.FormatBool(field.Bool()) + case reflect.String: + value = field.String() + case reflect.Map: + ifc := field.Interface() + m := ifc.(map[string]string) + if m != nil { + j := 0 + for k, v := range m { + j++ + keyName := fmt.Sprintf("%s.%d.Key", fieldName, j) + values.Set(keyName, k) + valueName := fmt.Sprintf("%s.%d.Value", fieldName, j) + values.Set(valueName, v) + } + } + case reflect.Slice: + if field.Type().Name() == "FlattenArray" { + FlattenFn(fieldName, field, values) + } else { + switch field.Type().Elem().Kind() { + case reflect.Uint8: + value = string(field.Bytes()) + case reflect.String: + l := field.Len() + if l > 0 { + strArray := make([]string, l) + for i := 0; i < l; i++ { + strArray[i] = field.Index(i).String() + } + bytes, err := json.Marshal(strArray) + if err == nil { + value = string(bytes) + } else { + log.Printf("Failed to convert JSON: %v", err) + } + } + default: + l := field.Len() + for j := 0; j < l; j++ { + prefixName := fmt.Sprintf("%s.%d.", fieldName, (j + 1)) + ifc := field.Index(j).Interface() + //log.Printf("%s : %v", prefixName, ifc) + if ifc != nil { + setQueryValuesByFlattenMethod(ifc, values, prefixName) + } + } + continue + } + } + + default: + switch field.Interface().(type) { + case ISO6801Time: + t := field.Interface().(ISO6801Time) + value = t.String() + case time.Time: + t := field.Interface().(time.Time) + value = GetISO8601TimeStamp(t) + default: + + ifc := field.Interface() + if ifc != nil { + if anonymous { + SetQueryValues(ifc, values) + } else { + prefixName := fieldName + "." + setQueryValuesByFlattenMethod(ifc, values, prefixName) + } + continue + } + } + } + if value != "" { + name := elemType.Field(i).Tag.Get("ArgName") + if name == "" { + name = fieldName + } + if prefix != "" { + name = prefix + name + } + // NOTE: here we will change name to underline style when the type is UnderlineString + if field.Type().Name() == "UnderlineString" { + name = Underline2Dot(name) + } + values.Set(name, value) + } + } +} diff --git a/vendor/github.com/denverdino/aliyungo/util/iso6801.go b/vendor/github.com/denverdino/aliyungo/util/iso6801.go new file mode 100644 index 000000000000..9c25e8f68834 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/util/iso6801.go @@ -0,0 +1,80 @@ +package util + +import ( + "fmt" + "strconv" + "time" +) + +// GetISO8601TimeStamp gets timestamp string in ISO8601 format +func GetISO8601TimeStamp(ts time.Time) string { + t := ts.UTC() + return fmt.Sprintf("%04d-%02d-%02dT%02d:%02d:%02dZ", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) +} + +const formatISO8601 = "2006-01-02T15:04:05Z" +const jsonFormatISO8601 = `"` + formatISO8601 + `"` +const formatISO8601withoutSeconds = "2006-01-02T15:04Z" +const jsonFormatISO8601withoutSeconds = `"` + formatISO8601withoutSeconds + `"` + +// A ISO6801Time represents a time in ISO8601 format +type ISO6801Time time.Time + +// New constructs a new iso8601.Time instance from an existing +// time.Time instance. This causes the nanosecond field to be set to +// 0, and its time zone set to a fixed zone with no offset from UTC +// (but it is *not* UTC itself). +func NewISO6801Time(t time.Time) ISO6801Time { + return ISO6801Time(time.Date( + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + 0, + time.UTC, + )) +} + +// IsDefault checks if the time is default +func (it *ISO6801Time) IsDefault() bool { + return *it == ISO6801Time{} +} + +// MarshalJSON serializes the ISO6801Time into JSON string +func (it ISO6801Time) MarshalJSON() ([]byte, error) { + return []byte(time.Time(it).Format(jsonFormatISO8601)), nil +} + +// UnmarshalJSON deserializes the ISO6801Time from JSON string +func (it *ISO6801Time) UnmarshalJSON(data []byte) error { + str := string(data) + + if str == "\"\"" || len(data) == 0 { + return nil + } + var t time.Time + var err error + if str[0] == '"' { + t, err = time.ParseInLocation(jsonFormatISO8601, str, time.UTC) + if err != nil { + t, err = time.ParseInLocation(jsonFormatISO8601withoutSeconds, str, time.UTC) + } + } else { + var i int64 + i, err = strconv.ParseInt(str, 10, 64) + if err == nil { + t = time.Unix(i/1000, i%1000) + } + } + if err == nil { + *it = ISO6801Time(t) + } + return err +} + +// String returns the time in ISO6801Time format +func (it ISO6801Time) String() string { + return time.Time(it).Format(formatISO8601) +} diff --git a/vendor/github.com/denverdino/aliyungo/util/signature.go b/vendor/github.com/denverdino/aliyungo/util/signature.go new file mode 100644 index 000000000000..a00b27c19bc8 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/util/signature.go @@ -0,0 +1,40 @@ +package util + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "net/url" + "strings" +) + +//CreateSignature creates signature for string following Aliyun rules +func CreateSignature(stringToSignature, accessKeySecret string) string { + // Crypto by HMAC-SHA1 + hmacSha1 := hmac.New(sha1.New, []byte(accessKeySecret)) + hmacSha1.Write([]byte(stringToSignature)) + sign := hmacSha1.Sum(nil) + + // Encode to Base64 + base64Sign := base64.StdEncoding.EncodeToString(sign) + + return base64Sign +} + +func percentReplace(str string) string { + str = strings.Replace(str, "+", "%20", -1) + str = strings.Replace(str, "*", "%2A", -1) + str = strings.Replace(str, "%7E", "~", -1) + + return str +} + +// CreateSignatureForRequest creates signature for query string values +func CreateSignatureForRequest(method string, values *url.Values, accessKeySecret string) string { + + canonicalizedQueryString := percentReplace(values.Encode()) + + stringToSign := method + "&%2F&" + url.QueryEscape(canonicalizedQueryString) + + return CreateSignature(stringToSign, accessKeySecret) +} diff --git a/vendor/github.com/denverdino/aliyungo/util/util.go b/vendor/github.com/denverdino/aliyungo/util/util.go new file mode 100644 index 000000000000..dd68214e3b96 --- /dev/null +++ b/vendor/github.com/denverdino/aliyungo/util/util.go @@ -0,0 +1,147 @@ +package util + +import ( + "bytes" + srand "crypto/rand" + "encoding/binary" + "math/rand" + "net/http" + "net/url" + "sort" + "time" +) + +const dictionary = "_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + +//CreateRandomString create random string +func CreateRandomString() string { + b := make([]byte, 32) + l := len(dictionary) + + _, err := srand.Read(b) + + if err != nil { + // fail back to insecure rand + rand.Seed(time.Now().UnixNano()) + for i := range b { + b[i] = dictionary[rand.Int()%l] + } + } else { + for i, v := range b { + b[i] = dictionary[v%byte(l)] + } + } + + return string(b) +} + +// Encode encodes the values into ``URL encoded'' form +// ("acl&bar=baz&foo=quux") sorted by key. +func Encode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := url.QueryEscape(k) + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + if v != "" { + buf.WriteString("=") + buf.WriteString(url.QueryEscape(v)) + } + } + } + return buf.String() +} + +func GetGMTime() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +// + +func randUint32() uint32 { + return randUint32Slice(1)[0] +} + +func randUint32Slice(c int) []uint32 { + b := make([]byte, c*4) + + _, err := srand.Read(b) + + if err != nil { + // fail back to insecure rand + rand.Seed(time.Now().UnixNano()) + for i := range b { + b[i] = byte(rand.Int()) + } + } + + n := make([]uint32, c) + + for i := range n { + n[i] = binary.BigEndian.Uint32(b[i*4 : i*4+4]) + } + + return n +} + +func toByte(n uint32, st, ed byte) byte { + return byte(n%uint32(ed-st+1) + uint32(st)) +} + +func toDigit(n uint32) byte { + return toByte(n, '0', '9') +} + +func toLowerLetter(n uint32) byte { + return toByte(n, 'a', 'z') +} + +func toUpperLetter(n uint32) byte { + return toByte(n, 'A', 'Z') +} + +type convFunc func(uint32) byte + +var convFuncs = []convFunc{toDigit, toLowerLetter, toUpperLetter} + +// tools for generating a random ECS instance password +// from 8 to 30 char MUST contain digit upper, case letter and upper case letter +// http://docs.aliyun.com/#/pub/ecs/open-api/instance&createinstance +func GenerateRandomECSPassword() string { + + // [8, 30] + l := int(randUint32()%23 + 8) + + n := randUint32Slice(l) + + b := make([]byte, l) + + b[0] = toDigit(n[0]) + b[1] = toLowerLetter(n[1]) + b[2] = toUpperLetter(n[2]) + + for i := 3; i < l; i++ { + b[i] = convFuncs[n[i]%3](n[i]) + } + + s := make([]byte, l) + perm := rand.Perm(l) + for i, v := range perm { + s[v] = b[i] + } + + return string(s) + +} diff --git a/vendor/github.com/digitalocean/godo/.gitignore b/vendor/github.com/digitalocean/godo/.gitignore new file mode 100644 index 000000000000..48b8bf9072d8 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/.gitignore @@ -0,0 +1 @@ +vendor/ diff --git a/vendor/github.com/digitalocean/godo/.travis.yml b/vendor/github.com/digitalocean/godo/.travis.yml new file mode 100644 index 000000000000..5bbdb40f0fec --- /dev/null +++ b/vendor/github.com/digitalocean/godo/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.7 + - 1.8 + - 1.9 + - "1.10" + - tip diff --git a/vendor/github.com/digitalocean/godo/CHANGELOG.md b/vendor/github.com/digitalocean/godo/CHANGELOG.md new file mode 100644 index 000000000000..31184887a459 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/CHANGELOG.md @@ -0,0 +1,90 @@ +# Change Log + +## [v1.7.5] - 2019-03-04 + +- #207 Add support for custom subdomains for Spaces CDN [beta] - @xornivore + +## [v1.7.4] - 2019-02-08 + +- #202 Allow tagging volumes - @mchitten + +## [v1.7.3] - 2018-12-18 + +- #196 Expose tag support for creating Load Balancers. + +## [v1.7.2] - 2018-12-04 + +- #192 Exposes more options for Kubernetes clusters. + +## [v1.7.1] - 2018-11-27 + +- #190 Expose constants for the state of Kubernetes clusters. + +## [v1.7.0] - 2018-11-13 + +- #188 Kubernetes support [beta] - @aybabtme + +## [v1.6.0] - 2018-10-16 + +- #185 Projects support [beta] - @mchitten + +## [v1.5.0] - 2018-10-01 + +- #181 Adding tagging images support - @hugocorbucci + +## [v1.4.2] - 2018-08-30 + +- #178 Allowing creating domain records with weight of 0 - @TFaga +- #177 Adding `VolumeLimit` to account - @lxfontes + +## [v1.4.1] - 2018-08-23 + +- #176 Fix cdn flush cache API endpoint - @sunny-b + +## [v1.4.0] - 2018-08-22 + +- #175 Add support for Spaces CDN - @sunny-b + +## [v1.3.0] - 2018-05-24 + +- #170 Add support for volume formatting - @adamwg + +## [v1.2.0] - 2018-05-08 + +- #166 Remove support for Go 1.6 - @iheanyi +- #165 Add support for Let's Encrypt Certificates - @viola + +## [v1.1.3] - 2018-03-07 + +- #156 Handle non-json errors from the API - @aknuds1 +- #158 Update droplet example to use latest instance type - @dan-v + +## [v1.1.2] - 2018-03-06 + +- #157 storage: list volumes should handle only name or only region params - @andrewsykim +- #154 docs: replace first example with fully-runnable example - @xmudrii +- #152 Handle flags & tag properties of domain record - @jaymecd + +## [v1.1.1] - 2017-09-29 + +- #151 Following user agent field recommendations - @joonas +- #148 AsRequest method to create load balancers requests - @lukegb + +## [v1.1.0] - 2017-06-06 + +### Added +- #145 Add FirewallsService for managing Firewalls with the DigitalOcean API. - @viola +- #139 Add TTL field to the Domains. - @xmudrii + +### Fixed +- #143 Fix oauth2.NoContext depreciation. - @jbowens +- #141 Fix DropletActions on tagged resources. - @xmudrii + +## [v1.0.0] - 2017-03-10 + +### Added +- #130 Add Convert to ImageActionsService. - @xmudrii +- #126 Add CertificatesService for managing certificates with the DigitalOcean API. - @viola +- #125 Add LoadBalancersService for managing load balancers with the DigitalOcean API. - @viola +- #122 Add GetVolumeByName to StorageService. - @protochron +- #113 Add context.Context to all calls. - @aybabtme diff --git a/vendor/github.com/digitalocean/godo/CONTRIBUTING.md b/vendor/github.com/digitalocean/godo/CONTRIBUTING.md new file mode 100644 index 000000000000..7e16f89db207 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/CONTRIBUTING.md @@ -0,0 +1,24 @@ +# Contributing + +If you submit a pull request, please keep the following guidelines in mind: + +1. Code should be `go fmt` compliant. +2. Types, structs and funcs should be documented. +3. Tests pass. + +## Getting set up + +Assuming your `$GOPATH` is set up according to your desires, run: + +```sh +go get github.com/digitalocean/godo +go get -u github.com/stretchr/testify/assert +``` + +## Running tests + +When working on code in this repository, tests can be run via: + +```sh +go test . +``` diff --git a/vendor/github.com/digitalocean/godo/LICENSE.txt b/vendor/github.com/digitalocean/godo/LICENSE.txt new file mode 100644 index 000000000000..43c5d2eee7d9 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/LICENSE.txt @@ -0,0 +1,55 @@ +Copyright (c) 2014-2016 The godo AUTHORS. All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +====================== +Portions of the client are based on code at: +https://github.com/google/go-github/ + +Copyright (c) 2013 The go-github AUTHORS. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/digitalocean/godo/README.md b/vendor/github.com/digitalocean/godo/README.md new file mode 100644 index 000000000000..ffd3988b7abc --- /dev/null +++ b/vendor/github.com/digitalocean/godo/README.md @@ -0,0 +1,150 @@ +[![Build Status](https://travis-ci.org/digitalocean/godo.svg)](https://travis-ci.org/digitalocean/godo) + +# Godo + +Godo is a Go client library for accessing the DigitalOcean V2 API. + +You can view the client API docs here: [http://godoc.org/github.com/digitalocean/godo](http://godoc.org/github.com/digitalocean/godo) + +You can view DigitalOcean API docs here: [https://developers.digitalocean.com/documentation/v2/](https://developers.digitalocean.com/documentation/v2/) + + +## Usage + +```go +import "github.com/digitalocean/godo" +``` + +Create a new DigitalOcean client, then use the exposed services to +access different parts of the DigitalOcean API. + +### Authentication + +Currently, Personal Access Token (PAT) is the only method of +authenticating with the API. You can manage your tokens +at the DigitalOcean Control Panel [Applications Page](https://cloud.digitalocean.com/settings/applications). + +You can then use your token to create a new client: + +```go +package main + +import ( + "context" + "github.com/digitalocean/godo" + "golang.org/x/oauth2" +) + +const ( + pat = "mytoken" +) + +type TokenSource struct { + AccessToken string +} + +func (t *TokenSource) Token() (*oauth2.Token, error) { + token := &oauth2.Token{ + AccessToken: t.AccessToken, + } + return token, nil +} + +func main() { + tokenSource := &TokenSource{ + AccessToken: pat, + } + + oauthClient := oauth2.NewClient(context.Background(), tokenSource) + client := godo.NewClient(oauthClient) +} +``` + +## Examples + + +To create a new Droplet: + +```go +dropletName := "super-cool-droplet" + +createRequest := &godo.DropletCreateRequest{ + Name: dropletName, + Region: "nyc3", + Size: "s-1vcpu-1gb", + Image: godo.DropletCreateImage{ + Slug: "ubuntu-14-04-x64", + }, +} + +ctx := context.TODO() + +newDroplet, _, err := client.Droplets.Create(ctx, createRequest) + +if err != nil { + fmt.Printf("Something bad happened: %s\n\n", err) + return err +} +``` + +### Pagination + +If a list of items is paginated by the API, you must request pages individually. For example, to fetch all Droplets: + +```go +func DropletList(ctx context.Context, client *godo.Client) ([]godo.Droplet, error) { + // create a list to hold our droplets + list := []godo.Droplet{} + + // create options. initially, these will be blank + opt := &godo.ListOptions{} + for { + droplets, resp, err := client.Droplets.List(ctx, opt) + if err != nil { + return nil, err + } + + // append the current page's droplets to our list + for _, d := range droplets { + list = append(list, d) + } + + // if we are at the last page, break out the for loop + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + + page, err := resp.Links.CurrentPage() + if err != nil { + return nil, err + } + + // set the page we want for the next request + opt.Page = page + 1 + } + + return list, nil +} +``` + +## Versioning + +Each version of the client is tagged and the version is updated accordingly. + +Since Go does not have a built-in versioning, a package management tool is +recommended - a good one that works with git tags is +[gopkg.in](http://labix.org/gopkg.in). + +To see the list of past versions, run `git tag`. + + +## Documentation + +For a comprehensive list of examples, check out the [API documentation](https://developers.digitalocean.com/documentation/v2/). + +For details on all the functionality in this library, see the [GoDoc](http://godoc.org/github.com/digitalocean/godo) documentation. + + +## Contributing + +We love pull requests! Please see the [contribution guidelines](CONTRIBUTING.md). diff --git a/vendor/github.com/digitalocean/godo/account.go b/vendor/github.com/digitalocean/godo/account.go new file mode 100644 index 000000000000..7d3e105d34f6 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/account.go @@ -0,0 +1,60 @@ +package godo + +import ( + "context" + "net/http" +) + +// AccountService is an interface for interfacing with the Account +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2/#account +type AccountService interface { + Get(context.Context) (*Account, *Response, error) +} + +// AccountServiceOp handles communication with the Account related methods of +// the DigitalOcean API. +type AccountServiceOp struct { + client *Client +} + +var _ AccountService = &AccountServiceOp{} + +// Account represents a DigitalOcean Account +type Account struct { + DropletLimit int `json:"droplet_limit,omitempty"` + FloatingIPLimit int `json:"floating_ip_limit,omitempty"` + VolumeLimit int `json:"volume_limit,omitempty"` + Email string `json:"email,omitempty"` + UUID string `json:"uuid,omitempty"` + EmailVerified bool `json:"email_verified,omitempty"` + Status string `json:"status,omitempty"` + StatusMessage string `json:"status_message,omitempty"` +} + +type accountRoot struct { + Account *Account `json:"account"` +} + +func (r Account) String() string { + return Stringify(r) +} + +// Get DigitalOcean account info +func (s *AccountServiceOp) Get(ctx context.Context) (*Account, *Response, error) { + + path := "v2/account" + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(accountRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Account, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/action.go b/vendor/github.com/digitalocean/godo/action.go new file mode 100644 index 000000000000..67ef3ab8d51d --- /dev/null +++ b/vendor/github.com/digitalocean/godo/action.go @@ -0,0 +1,104 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +const ( + actionsBasePath = "v2/actions" + + // ActionInProgress is an in progress action status + ActionInProgress = "in-progress" + + //ActionCompleted is a completed action status + ActionCompleted = "completed" +) + +// ActionsService handles communction with action related methods of the +// DigitalOcean API: https://developers.digitalocean.com/documentation/v2#actions +type ActionsService interface { + List(context.Context, *ListOptions) ([]Action, *Response, error) + Get(context.Context, int) (*Action, *Response, error) +} + +// ActionsServiceOp handles communition with the image action related methods of the +// DigitalOcean API. +type ActionsServiceOp struct { + client *Client +} + +var _ ActionsService = &ActionsServiceOp{} + +type actionsRoot struct { + Actions []Action `json:"actions"` + Links *Links `json:"links"` +} + +type actionRoot struct { + Event *Action `json:"action"` +} + +// Action represents a DigitalOcean Action +type Action struct { + ID int `json:"id"` + Status string `json:"status"` + Type string `json:"type"` + StartedAt *Timestamp `json:"started_at"` + CompletedAt *Timestamp `json:"completed_at"` + ResourceID int `json:"resource_id"` + ResourceType string `json:"resource_type"` + Region *Region `json:"region,omitempty"` + RegionSlug string `json:"region_slug,omitempty"` +} + +// List all actions +func (s *ActionsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Action, *Response, error) { + path := actionsBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Actions, resp, err +} + +// Get an action by ID. +func (s *ActionsServiceOp) Get(ctx context.Context, id int) (*Action, *Response, error) { + if id < 1 { + return nil, nil, NewArgError("id", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", actionsBasePath, id) + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err +} + +func (a Action) String() string { + return Stringify(a) +} diff --git a/vendor/github.com/digitalocean/godo/cdn.go b/vendor/github.com/digitalocean/godo/cdn.go new file mode 100644 index 000000000000..2e8f475aa73f --- /dev/null +++ b/vendor/github.com/digitalocean/godo/cdn.go @@ -0,0 +1,214 @@ +package godo + +import ( + "context" + "fmt" + "net/http" + "time" +) + +const cdnBasePath = "v2/cdn/endpoints" + +// CDNService is an interface for managing Spaces CDN with the DigitalOcean API. +type CDNService interface { + List(context.Context, *ListOptions) ([]CDN, *Response, error) + Get(context.Context, string) (*CDN, *Response, error) + Create(context.Context, *CDNCreateRequest) (*CDN, *Response, error) + UpdateTTL(context.Context, string, *CDNUpdateTTLRequest) (*CDN, *Response, error) + UpdateCustomDomain(context.Context, string, *CDNUpdateCustomDomainRequest) (*CDN, *Response, error) + FlushCache(context.Context, string, *CDNFlushCacheRequest) (*Response, error) + Delete(context.Context, string) (*Response, error) +} + +// CDNServiceOp handles communication with the CDN related methods of the +// DigitalOcean API. +type CDNServiceOp struct { + client *Client +} + +var _ CDNService = &CDNServiceOp{} + +// CDN represents a DigitalOcean CDN +type CDN struct { + ID string `json:"id"` + Origin string `json:"origin"` + Endpoint string `json:"endpoint"` + CreatedAt time.Time `json:"created_at"` + TTL uint32 `json:"ttl"` + CertificateID string `json:"certificate_id,omitempty"` + CustomDomain string `json:"custom_domain,omitempty"` +} + +// CDNRoot represents a response from the DigitalOcean API +type cdnRoot struct { + Endpoint *CDN `json:"endpoint"` +} + +type cdnsRoot struct { + Endpoints []CDN `json:"endpoints"` + Links *Links `json:"links"` +} + +// CDNCreateRequest represents a request to create a CDN. +type CDNCreateRequest struct { + Origin string `json:"origin"` + TTL uint32 `json:"ttl"` + CustomDomain string `json:"custom_domain,omitempty"` + CertificateID string `json:"certificate_id,omitempty"` +} + +// CDNUpdateTTLRequest represents a request to update the ttl of a CDN. +type CDNUpdateTTLRequest struct { + TTL uint32 `json:"ttl"` +} + +// CDNUpdateCustomDomainRequest represents a request to update the custom domain of a CDN. +type CDNUpdateCustomDomainRequest struct { + CustomDomain string `json:"custom_domain"` + CertificateID string `json:"certificate_id"` +} + +// CDNFlushCacheRequest represents a request to flush cache of a CDN. +type CDNFlushCacheRequest struct { + Files []string `json:"files"` +} + +// List all CDN endpoints +func (c CDNServiceOp) List(ctx context.Context, opt *ListOptions) ([]CDN, *Response, error) { + path, err := addOptions(cdnBasePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(cdnsRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Endpoints, resp, err +} + +// Get individual CDN. It requires a non-empty cdn id. +func (c CDNServiceOp) Get(ctx context.Context, id string) (*CDN, *Response, error) { + if len(id) == 0 { + return nil, nil, NewArgError("id", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s", cdnBasePath, id) + + req, err := c.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(cdnRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Endpoint, resp, err +} + +// Create a new CDN +func (c CDNServiceOp) Create(ctx context.Context, createRequest *CDNCreateRequest) (*CDN, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + req, err := c.client.NewRequest(ctx, http.MethodPost, cdnBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(cdnRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Endpoint, resp, err +} + +// UpdateTTL updates the ttl of an individual CDN +func (c CDNServiceOp) UpdateTTL(ctx context.Context, id string, updateRequest *CDNUpdateTTLRequest) (*CDN, *Response, error) { + return c.update(ctx, id, updateRequest) +} + +// UpdateCustomDomain sets or removes the custom domain of an individual CDN +func (c CDNServiceOp) UpdateCustomDomain(ctx context.Context, id string, updateRequest *CDNUpdateCustomDomainRequest) (*CDN, *Response, error) { + return c.update(ctx, id, updateRequest) +} + +func (c CDNServiceOp) update(ctx context.Context, id string, updateRequest interface{}) (*CDN, *Response, error) { + if updateRequest == nil { + return nil, nil, NewArgError("updateRequest", "cannot be nil") + } + + if len(id) == 0 { + return nil, nil, NewArgError("id", "cannot be an empty string") + } + path := fmt.Sprintf("%s/%s", cdnBasePath, id) + + req, err := c.client.NewRequest(ctx, http.MethodPut, path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(cdnRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Endpoint, resp, err +} + +// FlushCache flushes the cache of an individual CDN. Requires a non-empty slice of file paths and/or wildcards +func (c CDNServiceOp) FlushCache(ctx context.Context, id string, flushCacheRequest *CDNFlushCacheRequest) (*Response, error) { + if flushCacheRequest == nil { + return nil, NewArgError("flushCacheRequest", "cannot be nil") + } + + if len(id) == 0 { + return nil, NewArgError("id", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s/cache", cdnBasePath, id) + + req, err := c.client.NewRequest(ctx, http.MethodDelete, path, flushCacheRequest) + if err != nil { + return nil, err + } + + resp, err := c.client.Do(ctx, req, nil) + + return resp, err +} + +// Delete an individual CDN +func (c CDNServiceOp) Delete(ctx context.Context, id string) (*Response, error) { + if len(id) == 0 { + return nil, NewArgError("id", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s", cdnBasePath, id) + + req, err := c.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := c.client.Do(ctx, req, nil) + + return resp, err +} diff --git a/vendor/github.com/digitalocean/godo/certificates.go b/vendor/github.com/digitalocean/godo/certificates.go new file mode 100644 index 000000000000..cbc3e16ae822 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/certificates.go @@ -0,0 +1,126 @@ +package godo + +import ( + "context" + "net/http" + "path" +) + +const certificatesBasePath = "/v2/certificates" + +// CertificatesService is an interface for managing certificates with the DigitalOcean API. +// See: https://developers.digitalocean.com/documentation/v2/#certificates +type CertificatesService interface { + Get(context.Context, string) (*Certificate, *Response, error) + List(context.Context, *ListOptions) ([]Certificate, *Response, error) + Create(context.Context, *CertificateRequest) (*Certificate, *Response, error) + Delete(context.Context, string) (*Response, error) +} + +// Certificate represents a DigitalOcean certificate configuration. +type Certificate struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + DNSNames []string `json:"dns_names,omitempty"` + NotAfter string `json:"not_after,omitempty"` + SHA1Fingerprint string `json:"sha1_fingerprint,omitempty"` + Created string `json:"created_at,omitempty"` + State string `json:"state,omitempty"` + Type string `json:"type,omitempty"` +} + +// CertificateRequest represents configuration for a new certificate. +type CertificateRequest struct { + Name string `json:"name,omitempty"` + DNSNames []string `json:"dns_names,omitempty"` + PrivateKey string `json:"private_key,omitempty"` + LeafCertificate string `json:"leaf_certificate,omitempty"` + CertificateChain string `json:"certificate_chain,omitempty"` + Type string `json:"type,omitempty"` +} + +type certificateRoot struct { + Certificate *Certificate `json:"certificate"` +} + +type certificatesRoot struct { + Certificates []Certificate `json:"certificates"` + Links *Links `json:"links"` +} + +// CertificatesServiceOp handles communication with certificates methods of the DigitalOcean API. +type CertificatesServiceOp struct { + client *Client +} + +var _ CertificatesService = &CertificatesServiceOp{} + +// Get an existing certificate by its identifier. +func (c *CertificatesServiceOp) Get(ctx context.Context, cID string) (*Certificate, *Response, error) { + urlStr := path.Join(certificatesBasePath, cID) + + req, err := c.client.NewRequest(ctx, http.MethodGet, urlStr, nil) + if err != nil { + return nil, nil, err + } + + root := new(certificateRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Certificate, resp, nil +} + +// List all certificates. +func (c *CertificatesServiceOp) List(ctx context.Context, opt *ListOptions) ([]Certificate, *Response, error) { + urlStr, err := addOptions(certificatesBasePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := c.client.NewRequest(ctx, http.MethodGet, urlStr, nil) + if err != nil { + return nil, nil, err + } + + root := new(certificatesRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Certificates, resp, nil +} + +// Create a new certificate with provided configuration. +func (c *CertificatesServiceOp) Create(ctx context.Context, cr *CertificateRequest) (*Certificate, *Response, error) { + req, err := c.client.NewRequest(ctx, http.MethodPost, certificatesBasePath, cr) + if err != nil { + return nil, nil, err + } + + root := new(certificateRoot) + resp, err := c.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Certificate, resp, nil +} + +// Delete a certificate by its identifier. +func (c *CertificatesServiceOp) Delete(ctx context.Context, cID string) (*Response, error) { + urlStr := path.Join(certificatesBasePath, cID) + + req, err := c.client.NewRequest(ctx, http.MethodDelete, urlStr, nil) + if err != nil { + return nil, err + } + + return c.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/digitalocean/godo/doc.go b/vendor/github.com/digitalocean/godo/doc.go new file mode 100644 index 000000000000..e660f794a2b8 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/doc.go @@ -0,0 +1,2 @@ +// Package godo is the DigtalOcean API v2 client for Go +package godo diff --git a/vendor/github.com/digitalocean/godo/domains.go b/vendor/github.com/digitalocean/godo/domains.go new file mode 100644 index 000000000000..1014e9b9588f --- /dev/null +++ b/vendor/github.com/digitalocean/godo/domains.go @@ -0,0 +1,337 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +const domainsBasePath = "v2/domains" + +// DomainsService is an interface for managing DNS with the DigitalOcean API. +// See: https://developers.digitalocean.com/documentation/v2#domains and +// https://developers.digitalocean.com/documentation/v2#domain-records +type DomainsService interface { + List(context.Context, *ListOptions) ([]Domain, *Response, error) + Get(context.Context, string) (*Domain, *Response, error) + Create(context.Context, *DomainCreateRequest) (*Domain, *Response, error) + Delete(context.Context, string) (*Response, error) + + Records(context.Context, string, *ListOptions) ([]DomainRecord, *Response, error) + Record(context.Context, string, int) (*DomainRecord, *Response, error) + DeleteRecord(context.Context, string, int) (*Response, error) + EditRecord(context.Context, string, int, *DomainRecordEditRequest) (*DomainRecord, *Response, error) + CreateRecord(context.Context, string, *DomainRecordEditRequest) (*DomainRecord, *Response, error) +} + +// DomainsServiceOp handles communication with the domain related methods of the +// DigitalOcean API. +type DomainsServiceOp struct { + client *Client +} + +var _ DomainsService = &DomainsServiceOp{} + +// Domain represents a DigitalOcean domain +type Domain struct { + Name string `json:"name"` + TTL int `json:"ttl"` + ZoneFile string `json:"zone_file"` +} + +// domainRoot represents a response from the DigitalOcean API +type domainRoot struct { + Domain *Domain `json:"domain"` +} + +type domainsRoot struct { + Domains []Domain `json:"domains"` + Links *Links `json:"links"` +} + +// DomainCreateRequest respresents a request to create a domain. +type DomainCreateRequest struct { + Name string `json:"name"` + IPAddress string `json:"ip_address,omitempty"` +} + +// DomainRecordRoot is the root of an individual Domain Record response +type domainRecordRoot struct { + DomainRecord *DomainRecord `json:"domain_record"` +} + +// DomainRecordsRoot is the root of a group of Domain Record responses +type domainRecordsRoot struct { + DomainRecords []DomainRecord `json:"domain_records"` + Links *Links `json:"links"` +} + +// DomainRecord represents a DigitalOcean DomainRecord +type DomainRecord struct { + ID int `json:"id,float64,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Data string `json:"data,omitempty"` + Priority int `json:"priority"` + Port int `json:"port,omitempty"` + TTL int `json:"ttl,omitempty"` + Weight int `json:"weight"` + Flags int `json:"flags"` + Tag string `json:"tag,omitempty"` +} + +// DomainRecordEditRequest represents a request to update a domain record. +type DomainRecordEditRequest struct { + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Data string `json:"data,omitempty"` + Priority int `json:"priority"` + Port int `json:"port,omitempty"` + TTL int `json:"ttl,omitempty"` + Weight int `json:"weight"` + Flags int `json:"flags"` + Tag string `json:"tag,omitempty"` +} + +func (d Domain) String() string { + return Stringify(d) +} + +func (d Domain) URN() string { + return ToURN("Domain", d.Name) +} + +// List all domains. +func (s DomainsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Domain, *Response, error) { + path := domainsBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(domainsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Domains, resp, err +} + +// Get individual domain. It requires a non-empty domain name. +func (s *DomainsServiceOp) Get(ctx context.Context, name string) (*Domain, *Response, error) { + if len(name) < 1 { + return nil, nil, NewArgError("name", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s", domainsBasePath, name) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(domainRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Domain, resp, err +} + +// Create a new domain +func (s *DomainsServiceOp) Create(ctx context.Context, createRequest *DomainCreateRequest) (*Domain, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + path := domainsBasePath + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(domainRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Domain, resp, err +} + +// Delete domain +func (s *DomainsServiceOp) Delete(ctx context.Context, name string) (*Response, error) { + if len(name) < 1 { + return nil, NewArgError("name", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s", domainsBasePath, name) + + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + + return resp, err +} + +// Converts a DomainRecord to a string. +func (d DomainRecord) String() string { + return Stringify(d) +} + +// Converts a DomainRecordEditRequest to a string. +func (d DomainRecordEditRequest) String() string { + return Stringify(d) +} + +// Records returns a slice of DomainRecords for a domain +func (s *DomainsServiceOp) Records(ctx context.Context, domain string, opt *ListOptions) ([]DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be an empty string") + } + + path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(domainRecordsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.DomainRecords, resp, err +} + +// Record returns the record id from a domain +func (s *DomainsServiceOp) Record(ctx context.Context, domain string, id int) (*DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be an empty string") + } + + if id < 1 { + return nil, nil, NewArgError("id", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + record := new(domainRecordRoot) + resp, err := s.client.Do(ctx, req, record) + if err != nil { + return nil, resp, err + } + + return record.DomainRecord, resp, err +} + +// DeleteRecord deletes a record from a domain identified by id +func (s *DomainsServiceOp) DeleteRecord(ctx context.Context, domain string, id int) (*Response, error) { + if len(domain) < 1 { + return nil, NewArgError("domain", "cannot be an empty string") + } + + if id < 1 { + return nil, NewArgError("id", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id) + + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + + return resp, err +} + +// EditRecord edits a record using a DomainRecordEditRequest +func (s *DomainsServiceOp) EditRecord(ctx context.Context, + domain string, + id int, + editRequest *DomainRecordEditRequest, +) (*DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be an empty string") + } + + if id < 1 { + return nil, nil, NewArgError("id", "cannot be less than 1") + } + + if editRequest == nil { + return nil, nil, NewArgError("editRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s/records/%d", domainsBasePath, domain, id) + + req, err := s.client.NewRequest(ctx, http.MethodPut, path, editRequest) + if err != nil { + return nil, nil, err + } + + root := new(domainRecordRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.DomainRecord, resp, err +} + +// CreateRecord creates a record using a DomainRecordEditRequest +func (s *DomainsServiceOp) CreateRecord(ctx context.Context, + domain string, + createRequest *DomainRecordEditRequest) (*DomainRecord, *Response, error) { + if len(domain) < 1 { + return nil, nil, NewArgError("domain", "cannot be empty string") + } + + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s/records", domainsBasePath, domain) + req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) + + if err != nil { + return nil, nil, err + } + + d := new(domainRecordRoot) + resp, err := s.client.Do(ctx, req, d) + if err != nil { + return nil, resp, err + } + + return d.DomainRecord, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/droplet_actions.go b/vendor/github.com/digitalocean/godo/droplet_actions.go new file mode 100644 index 000000000000..ddeacfc86249 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/droplet_actions.go @@ -0,0 +1,329 @@ +package godo + +import ( + "context" + "fmt" + "net/http" + "net/url" +) + +// ActionRequest reprents DigitalOcean Action Request +type ActionRequest map[string]interface{} + +// DropletActionsService is an interface for interfacing with the Droplet actions +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#droplet-actions +type DropletActionsService interface { + Shutdown(context.Context, int) (*Action, *Response, error) + ShutdownByTag(context.Context, string) ([]Action, *Response, error) + PowerOff(context.Context, int) (*Action, *Response, error) + PowerOffByTag(context.Context, string) ([]Action, *Response, error) + PowerOn(context.Context, int) (*Action, *Response, error) + PowerOnByTag(context.Context, string) ([]Action, *Response, error) + PowerCycle(context.Context, int) (*Action, *Response, error) + PowerCycleByTag(context.Context, string) ([]Action, *Response, error) + Reboot(context.Context, int) (*Action, *Response, error) + Restore(context.Context, int, int) (*Action, *Response, error) + Resize(context.Context, int, string, bool) (*Action, *Response, error) + Rename(context.Context, int, string) (*Action, *Response, error) + Snapshot(context.Context, int, string) (*Action, *Response, error) + SnapshotByTag(context.Context, string, string) ([]Action, *Response, error) + EnableBackups(context.Context, int) (*Action, *Response, error) + EnableBackupsByTag(context.Context, string) ([]Action, *Response, error) + DisableBackups(context.Context, int) (*Action, *Response, error) + DisableBackupsByTag(context.Context, string) ([]Action, *Response, error) + PasswordReset(context.Context, int) (*Action, *Response, error) + RebuildByImageID(context.Context, int, int) (*Action, *Response, error) + RebuildByImageSlug(context.Context, int, string) (*Action, *Response, error) + ChangeKernel(context.Context, int, int) (*Action, *Response, error) + EnableIPv6(context.Context, int) (*Action, *Response, error) + EnableIPv6ByTag(context.Context, string) ([]Action, *Response, error) + EnablePrivateNetworking(context.Context, int) (*Action, *Response, error) + EnablePrivateNetworkingByTag(context.Context, string) ([]Action, *Response, error) + Get(context.Context, int, int) (*Action, *Response, error) + GetByURI(context.Context, string) (*Action, *Response, error) +} + +// DropletActionsServiceOp handles communication with the Droplet action related +// methods of the DigitalOcean API. +type DropletActionsServiceOp struct { + client *Client +} + +var _ DropletActionsService = &DropletActionsServiceOp{} + +// Shutdown a Droplet +func (s *DropletActionsServiceOp) Shutdown(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "shutdown"} + return s.doAction(ctx, id, request) +} + +// ShutdownByTag shuts down Droplets matched by a Tag. +func (s *DropletActionsServiceOp) ShutdownByTag(ctx context.Context, tag string) ([]Action, *Response, error) { + request := &ActionRequest{"type": "shutdown"} + return s.doActionByTag(ctx, tag, request) +} + +// PowerOff a Droplet +func (s *DropletActionsServiceOp) PowerOff(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "power_off"} + return s.doAction(ctx, id, request) +} + +// PowerOffByTag powers off Droplets matched by a Tag. +func (s *DropletActionsServiceOp) PowerOffByTag(ctx context.Context, tag string) ([]Action, *Response, error) { + request := &ActionRequest{"type": "power_off"} + return s.doActionByTag(ctx, tag, request) +} + +// PowerOn a Droplet +func (s *DropletActionsServiceOp) PowerOn(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "power_on"} + return s.doAction(ctx, id, request) +} + +// PowerOnByTag powers on Droplets matched by a Tag. +func (s *DropletActionsServiceOp) PowerOnByTag(ctx context.Context, tag string) ([]Action, *Response, error) { + request := &ActionRequest{"type": "power_on"} + return s.doActionByTag(ctx, tag, request) +} + +// PowerCycle a Droplet +func (s *DropletActionsServiceOp) PowerCycle(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "power_cycle"} + return s.doAction(ctx, id, request) +} + +// PowerCycleByTag power cycles Droplets matched by a Tag. +func (s *DropletActionsServiceOp) PowerCycleByTag(ctx context.Context, tag string) ([]Action, *Response, error) { + request := &ActionRequest{"type": "power_cycle"} + return s.doActionByTag(ctx, tag, request) +} + +// Reboot a Droplet +func (s *DropletActionsServiceOp) Reboot(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "reboot"} + return s.doAction(ctx, id, request) +} + +// Restore an image to a Droplet +func (s *DropletActionsServiceOp) Restore(ctx context.Context, id, imageID int) (*Action, *Response, error) { + requestType := "restore" + request := &ActionRequest{ + "type": requestType, + "image": float64(imageID), + } + return s.doAction(ctx, id, request) +} + +// Resize a Droplet +func (s *DropletActionsServiceOp) Resize(ctx context.Context, id int, sizeSlug string, resizeDisk bool) (*Action, *Response, error) { + requestType := "resize" + request := &ActionRequest{ + "type": requestType, + "size": sizeSlug, + "disk": resizeDisk, + } + return s.doAction(ctx, id, request) +} + +// Rename a Droplet +func (s *DropletActionsServiceOp) Rename(ctx context.Context, id int, name string) (*Action, *Response, error) { + requestType := "rename" + request := &ActionRequest{ + "type": requestType, + "name": name, + } + return s.doAction(ctx, id, request) +} + +// Snapshot a Droplet. +func (s *DropletActionsServiceOp) Snapshot(ctx context.Context, id int, name string) (*Action, *Response, error) { + requestType := "snapshot" + request := &ActionRequest{ + "type": requestType, + "name": name, + } + return s.doAction(ctx, id, request) +} + +// SnapshotByTag snapshots Droplets matched by a Tag. +func (s *DropletActionsServiceOp) SnapshotByTag(ctx context.Context, tag string, name string) ([]Action, *Response, error) { + requestType := "snapshot" + request := &ActionRequest{ + "type": requestType, + "name": name, + } + return s.doActionByTag(ctx, tag, request) +} + +// EnableBackups enables backups for a Droplet. +func (s *DropletActionsServiceOp) EnableBackups(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "enable_backups"} + return s.doAction(ctx, id, request) +} + +// EnableBackupsByTag enables backups for Droplets matched by a Tag. +func (s *DropletActionsServiceOp) EnableBackupsByTag(ctx context.Context, tag string) ([]Action, *Response, error) { + request := &ActionRequest{"type": "enable_backups"} + return s.doActionByTag(ctx, tag, request) +} + +// DisableBackups disables backups for a Droplet. +func (s *DropletActionsServiceOp) DisableBackups(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "disable_backups"} + return s.doAction(ctx, id, request) +} + +// DisableBackupsByTag disables backups for Droplet matched by a Tag. +func (s *DropletActionsServiceOp) DisableBackupsByTag(ctx context.Context, tag string) ([]Action, *Response, error) { + request := &ActionRequest{"type": "disable_backups"} + return s.doActionByTag(ctx, tag, request) +} + +// PasswordReset resets the password for a Droplet. +func (s *DropletActionsServiceOp) PasswordReset(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "password_reset"} + return s.doAction(ctx, id, request) +} + +// RebuildByImageID rebuilds a Droplet from an image with a given id. +func (s *DropletActionsServiceOp) RebuildByImageID(ctx context.Context, id, imageID int) (*Action, *Response, error) { + request := &ActionRequest{"type": "rebuild", "image": imageID} + return s.doAction(ctx, id, request) +} + +// RebuildByImageSlug rebuilds a Droplet from an Image matched by a given Slug. +func (s *DropletActionsServiceOp) RebuildByImageSlug(ctx context.Context, id int, slug string) (*Action, *Response, error) { + request := &ActionRequest{"type": "rebuild", "image": slug} + return s.doAction(ctx, id, request) +} + +// ChangeKernel changes the kernel for a Droplet. +func (s *DropletActionsServiceOp) ChangeKernel(ctx context.Context, id, kernelID int) (*Action, *Response, error) { + request := &ActionRequest{"type": "change_kernel", "kernel": kernelID} + return s.doAction(ctx, id, request) +} + +// EnableIPv6 enables IPv6 for a Droplet. +func (s *DropletActionsServiceOp) EnableIPv6(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "enable_ipv6"} + return s.doAction(ctx, id, request) +} + +// EnableIPv6ByTag enables IPv6 for Droplets matched by a Tag. +func (s *DropletActionsServiceOp) EnableIPv6ByTag(ctx context.Context, tag string) ([]Action, *Response, error) { + request := &ActionRequest{"type": "enable_ipv6"} + return s.doActionByTag(ctx, tag, request) +} + +// EnablePrivateNetworking enables private networking for a Droplet. +func (s *DropletActionsServiceOp) EnablePrivateNetworking(ctx context.Context, id int) (*Action, *Response, error) { + request := &ActionRequest{"type": "enable_private_networking"} + return s.doAction(ctx, id, request) +} + +// EnablePrivateNetworkingByTag enables private networking for Droplets matched by a Tag. +func (s *DropletActionsServiceOp) EnablePrivateNetworkingByTag(ctx context.Context, tag string) ([]Action, *Response, error) { + request := &ActionRequest{"type": "enable_private_networking"} + return s.doActionByTag(ctx, tag, request) +} + +func (s *DropletActionsServiceOp) doAction(ctx context.Context, id int, request *ActionRequest) (*Action, *Response, error) { + if id < 1 { + return nil, nil, NewArgError("id", "cannot be less than 1") + } + + if request == nil { + return nil, nil, NewArgError("request", "request can't be nil") + } + + path := dropletActionPath(id) + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, request) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err +} + +func (s *DropletActionsServiceOp) doActionByTag(ctx context.Context, tag string, request *ActionRequest) ([]Action, *Response, error) { + if tag == "" { + return nil, nil, NewArgError("tag", "cannot be empty") + } + + if request == nil { + return nil, nil, NewArgError("request", "request can't be nil") + } + + path := dropletActionPathByTag(tag) + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, request) + if err != nil { + return nil, nil, err + } + + root := new(actionsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Actions, resp, err +} + +// Get an action for a particular Droplet by id. +func (s *DropletActionsServiceOp) Get(ctx context.Context, dropletID, actionID int) (*Action, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + if actionID < 1 { + return nil, nil, NewArgError("actionID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", dropletActionPath(dropletID), actionID) + return s.get(ctx, path) +} + +// GetByURI gets an action for a particular Droplet by id. +func (s *DropletActionsServiceOp) GetByURI(ctx context.Context, rawurl string) (*Action, *Response, error) { + u, err := url.Parse(rawurl) + if err != nil { + return nil, nil, err + } + + return s.get(ctx, u.Path) + +} + +func (s *DropletActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err + +} + +func dropletActionPath(dropletID int) string { + return fmt.Sprintf("v2/droplets/%d/actions", dropletID) +} + +func dropletActionPathByTag(tag string) string { + return fmt.Sprintf("v2/droplets/actions?tag_name=%s", tag) +} diff --git a/vendor/github.com/digitalocean/godo/droplets.go b/vendor/github.com/digitalocean/godo/droplets.go new file mode 100644 index 000000000000..ab508f1c080e --- /dev/null +++ b/vendor/github.com/digitalocean/godo/droplets.go @@ -0,0 +1,570 @@ +package godo + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" +) + +const dropletBasePath = "v2/droplets" + +var errNoNetworks = errors.New("no networks have been defined") + +// DropletsService is an interface for interfacing with the Droplet +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#droplets +type DropletsService interface { + List(context.Context, *ListOptions) ([]Droplet, *Response, error) + ListByTag(context.Context, string, *ListOptions) ([]Droplet, *Response, error) + Get(context.Context, int) (*Droplet, *Response, error) + Create(context.Context, *DropletCreateRequest) (*Droplet, *Response, error) + CreateMultiple(context.Context, *DropletMultiCreateRequest) ([]Droplet, *Response, error) + Delete(context.Context, int) (*Response, error) + DeleteByTag(context.Context, string) (*Response, error) + Kernels(context.Context, int, *ListOptions) ([]Kernel, *Response, error) + Snapshots(context.Context, int, *ListOptions) ([]Image, *Response, error) + Backups(context.Context, int, *ListOptions) ([]Image, *Response, error) + Actions(context.Context, int, *ListOptions) ([]Action, *Response, error) + Neighbors(context.Context, int) ([]Droplet, *Response, error) +} + +// DropletsServiceOp handles communication with the Droplet related methods of the +// DigitalOcean API. +type DropletsServiceOp struct { + client *Client +} + +var _ DropletsService = &DropletsServiceOp{} + +// Droplet represents a DigitalOcean Droplet +type Droplet struct { + ID int `json:"id,float64,omitempty"` + Name string `json:"name,omitempty"` + Memory int `json:"memory,omitempty"` + Vcpus int `json:"vcpus,omitempty"` + Disk int `json:"disk,omitempty"` + Region *Region `json:"region,omitempty"` + Image *Image `json:"image,omitempty"` + Size *Size `json:"size,omitempty"` + SizeSlug string `json:"size_slug,omitempty"` + BackupIDs []int `json:"backup_ids,omitempty"` + NextBackupWindow *BackupWindow `json:"next_backup_window,omitempty"` + SnapshotIDs []int `json:"snapshot_ids,omitempty"` + Features []string `json:"features,omitempty"` + Locked bool `json:"locked,bool,omitempty"` + Status string `json:"status,omitempty"` + Networks *Networks `json:"networks,omitempty"` + Created string `json:"created_at,omitempty"` + Kernel *Kernel `json:"kernel,omitempty"` + Tags []string `json:"tags,omitempty"` + VolumeIDs []string `json:"volume_ids"` +} + +// PublicIPv4 returns the public IPv4 address for the Droplet. +func (d *Droplet) PublicIPv4() (string, error) { + if d.Networks == nil { + return "", errNoNetworks + } + + for _, v4 := range d.Networks.V4 { + if v4.Type == "public" { + return v4.IPAddress, nil + } + } + + return "", nil +} + +// PrivateIPv4 returns the private IPv4 address for the Droplet. +func (d *Droplet) PrivateIPv4() (string, error) { + if d.Networks == nil { + return "", errNoNetworks + } + + for _, v4 := range d.Networks.V4 { + if v4.Type == "private" { + return v4.IPAddress, nil + } + } + + return "", nil +} + +// PublicIPv6 returns the public IPv6 address for the Droplet. +func (d *Droplet) PublicIPv6() (string, error) { + if d.Networks == nil { + return "", errNoNetworks + } + + for _, v6 := range d.Networks.V6 { + if v6.Type == "public" { + return v6.IPAddress, nil + } + } + + return "", nil +} + +// Kernel object +type Kernel struct { + ID int `json:"id,float64,omitempty"` + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` +} + +// BackupWindow object +type BackupWindow struct { + Start *Timestamp `json:"start,omitempty"` + End *Timestamp `json:"end,omitempty"` +} + +// Convert Droplet to a string +func (d Droplet) String() string { + return Stringify(d) +} + +func (d Droplet) URN() string { + return ToURN("Droplet", d.ID) +} + +// DropletRoot represents a Droplet root +type dropletRoot struct { + Droplet *Droplet `json:"droplet"` + Links *Links `json:"links,omitempty"` +} + +type dropletsRoot struct { + Droplets []Droplet `json:"droplets"` + Links *Links `json:"links"` +} + +type kernelsRoot struct { + Kernels []Kernel `json:"kernels,omitempty"` + Links *Links `json:"links"` +} + +type dropletSnapshotsRoot struct { + Snapshots []Image `json:"snapshots,omitempty"` + Links *Links `json:"links"` +} + +type backupsRoot struct { + Backups []Image `json:"backups,omitempty"` + Links *Links `json:"links"` +} + +// DropletCreateImage identifies an image for the create request. It prefers slug over ID. +type DropletCreateImage struct { + ID int + Slug string +} + +// MarshalJSON returns either the slug or id of the image. It returns the id +// if the slug is empty. +func (d DropletCreateImage) MarshalJSON() ([]byte, error) { + if d.Slug != "" { + return json.Marshal(d.Slug) + } + + return json.Marshal(d.ID) +} + +// DropletCreateVolume identifies a volume to attach for the create request. It +// prefers Name over ID, +type DropletCreateVolume struct { + ID string + Name string +} + +// MarshalJSON returns an object with either the name or id of the volume. It +// returns the id if the name is empty. +func (d DropletCreateVolume) MarshalJSON() ([]byte, error) { + if d.Name != "" { + return json.Marshal(struct { + Name string `json:"name"` + }{Name: d.Name}) + } + + return json.Marshal(struct { + ID string `json:"id"` + }{ID: d.ID}) +} + +// DropletCreateSSHKey identifies a SSH Key for the create request. It prefers fingerprint over ID. +type DropletCreateSSHKey struct { + ID int + Fingerprint string +} + +// MarshalJSON returns either the fingerprint or id of the ssh key. It returns +// the id if the fingerprint is empty. +func (d DropletCreateSSHKey) MarshalJSON() ([]byte, error) { + if d.Fingerprint != "" { + return json.Marshal(d.Fingerprint) + } + + return json.Marshal(d.ID) +} + +// DropletCreateRequest represents a request to create a Droplet. +type DropletCreateRequest struct { + Name string `json:"name"` + Region string `json:"region"` + Size string `json:"size"` + Image DropletCreateImage `json:"image"` + SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` + Backups bool `json:"backups"` + IPv6 bool `json:"ipv6"` + PrivateNetworking bool `json:"private_networking"` + Monitoring bool `json:"monitoring"` + UserData string `json:"user_data,omitempty"` + Volumes []DropletCreateVolume `json:"volumes,omitempty"` + Tags []string `json:"tags"` +} + +// DropletMultiCreateRequest is a request to create multiple Droplets. +type DropletMultiCreateRequest struct { + Names []string `json:"names"` + Region string `json:"region"` + Size string `json:"size"` + Image DropletCreateImage `json:"image"` + SSHKeys []DropletCreateSSHKey `json:"ssh_keys"` + Backups bool `json:"backups"` + IPv6 bool `json:"ipv6"` + PrivateNetworking bool `json:"private_networking"` + Monitoring bool `json:"monitoring"` + UserData string `json:"user_data,omitempty"` + Tags []string `json:"tags"` +} + +func (d DropletCreateRequest) String() string { + return Stringify(d) +} + +func (d DropletMultiCreateRequest) String() string { + return Stringify(d) +} + +// Networks represents the Droplet's Networks. +type Networks struct { + V4 []NetworkV4 `json:"v4,omitempty"` + V6 []NetworkV6 `json:"v6,omitempty"` +} + +// NetworkV4 represents a DigitalOcean IPv4 Network. +type NetworkV4 struct { + IPAddress string `json:"ip_address,omitempty"` + Netmask string `json:"netmask,omitempty"` + Gateway string `json:"gateway,omitempty"` + Type string `json:"type,omitempty"` +} + +func (n NetworkV4) String() string { + return Stringify(n) +} + +// NetworkV6 represents a DigitalOcean IPv6 network. +type NetworkV6 struct { + IPAddress string `json:"ip_address,omitempty"` + Netmask int `json:"netmask,omitempty"` + Gateway string `json:"gateway,omitempty"` + Type string `json:"type,omitempty"` +} + +func (n NetworkV6) String() string { + return Stringify(n) +} + +// Performs a list request given a path. +func (s *DropletsServiceOp) list(ctx context.Context, path string) ([]Droplet, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Droplets, resp, err +} + +// List all Droplets. +func (s *DropletsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Droplet, *Response, error) { + path := dropletBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + return s.list(ctx, path) +} + +// ListByTag lists all Droplets matched by a Tag. +func (s *DropletsServiceOp) ListByTag(ctx context.Context, tag string, opt *ListOptions) ([]Droplet, *Response, error) { + path := fmt.Sprintf("%s?tag_name=%s", dropletBasePath, tag) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + return s.list(ctx, path) +} + +// Get individual Droplet. +func (s *DropletsServiceOp) Get(ctx context.Context, dropletID int) (*Droplet, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Droplet, resp, err +} + +// Create Droplet +func (s *DropletsServiceOp) Create(ctx context.Context, createRequest *DropletCreateRequest) (*Droplet, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + path := dropletBasePath + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(dropletRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Droplet, resp, err +} + +// CreateMultiple creates multiple Droplets. +func (s *DropletsServiceOp) CreateMultiple(ctx context.Context, createRequest *DropletMultiCreateRequest) ([]Droplet, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + path := dropletBasePath + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(dropletsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Droplets, resp, err +} + +// Performs a delete request given a path +func (s *DropletsServiceOp) delete(ctx context.Context, path string) (*Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + + return resp, err +} + +// Delete Droplet. +func (s *DropletsServiceOp) Delete(ctx context.Context, dropletID int) (*Response, error) { + if dropletID < 1 { + return nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID) + + return s.delete(ctx, path) +} + +// DeleteByTag deletes Droplets matched by a Tag. +func (s *DropletsServiceOp) DeleteByTag(ctx context.Context, tag string) (*Response, error) { + if tag == "" { + return nil, NewArgError("tag", "cannot be empty") + } + + path := fmt.Sprintf("%s?tag_name=%s", dropletBasePath, tag) + + return s.delete(ctx, path) +} + +// Kernels lists kernels available for a Droplet. +func (s *DropletsServiceOp) Kernels(ctx context.Context, dropletID int, opt *ListOptions) ([]Kernel, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/kernels", dropletBasePath, dropletID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(kernelsRoot) + resp, err := s.client.Do(ctx, req, root) + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Kernels, resp, err +} + +// Actions lists the actions for a Droplet. +func (s *DropletsServiceOp) Actions(ctx context.Context, dropletID int, opt *ListOptions) ([]Action, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/actions", dropletBasePath, dropletID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Actions, resp, err +} + +// Backups lists the backups for a Droplet. +func (s *DropletsServiceOp) Backups(ctx context.Context, dropletID int, opt *ListOptions) ([]Image, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/backups", dropletBasePath, dropletID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(backupsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Backups, resp, err +} + +// Snapshots lists the snapshots available for a Droplet. +func (s *DropletsServiceOp) Snapshots(ctx context.Context, dropletID int, opt *ListOptions) ([]Image, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/snapshots", dropletBasePath, dropletID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletSnapshotsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Snapshots, resp, err +} + +// Neighbors lists the neighbors for a Droplet. +func (s *DropletsServiceOp) Neighbors(ctx context.Context, dropletID int) ([]Droplet, *Response, error) { + if dropletID < 1 { + return nil, nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d/neighbors", dropletBasePath, dropletID) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(dropletsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Droplets, resp, err +} + +func (s *DropletsServiceOp) dropletActionStatus(ctx context.Context, uri string) (string, error) { + action, _, err := s.client.DropletActions.GetByURI(ctx, uri) + + if err != nil { + return "", err + } + + return action.Status, nil +} diff --git a/vendor/github.com/digitalocean/godo/errors.go b/vendor/github.com/digitalocean/godo/errors.go new file mode 100644 index 000000000000..a65ebd76b6ce --- /dev/null +++ b/vendor/github.com/digitalocean/godo/errors.go @@ -0,0 +1,24 @@ +package godo + +import "fmt" + +// ArgError is an error that represents an error with an input to godo. It +// identifies the argument and the cause (if possible). +type ArgError struct { + arg string + reason string +} + +var _ error = &ArgError{} + +// NewArgError creates an InputError. +func NewArgError(arg, reason string) *ArgError { + return &ArgError{ + arg: arg, + reason: reason, + } +} + +func (e *ArgError) Error() string { + return fmt.Sprintf("%s is invalid because %s", e.arg, e.reason) +} diff --git a/vendor/github.com/digitalocean/godo/firewalls.go b/vendor/github.com/digitalocean/godo/firewalls.go new file mode 100644 index 000000000000..c28cac03b816 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/firewalls.go @@ -0,0 +1,267 @@ +package godo + +import ( + "context" + "net/http" + "path" + "strconv" +) + +const firewallsBasePath = "/v2/firewalls" + +// FirewallsService is an interface for managing Firewalls with the DigitalOcean API. +// See: https://developers.digitalocean.com/documentation/documentation/v2/#firewalls +type FirewallsService interface { + Get(context.Context, string) (*Firewall, *Response, error) + Create(context.Context, *FirewallRequest) (*Firewall, *Response, error) + Update(context.Context, string, *FirewallRequest) (*Firewall, *Response, error) + Delete(context.Context, string) (*Response, error) + List(context.Context, *ListOptions) ([]Firewall, *Response, error) + ListByDroplet(context.Context, int, *ListOptions) ([]Firewall, *Response, error) + AddDroplets(context.Context, string, ...int) (*Response, error) + RemoveDroplets(context.Context, string, ...int) (*Response, error) + AddTags(context.Context, string, ...string) (*Response, error) + RemoveTags(context.Context, string, ...string) (*Response, error) + AddRules(context.Context, string, *FirewallRulesRequest) (*Response, error) + RemoveRules(context.Context, string, *FirewallRulesRequest) (*Response, error) +} + +// FirewallsServiceOp handles communication with Firewalls methods of the DigitalOcean API. +type FirewallsServiceOp struct { + client *Client +} + +// Firewall represents a DigitalOcean Firewall configuration. +type Firewall struct { + ID string `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + InboundRules []InboundRule `json:"inbound_rules"` + OutboundRules []OutboundRule `json:"outbound_rules"` + DropletIDs []int `json:"droplet_ids"` + Tags []string `json:"tags"` + Created string `json:"created_at"` + PendingChanges []PendingChange `json:"pending_changes"` +} + +// String creates a human-readable description of a Firewall. +func (fw Firewall) String() string { + return Stringify(fw) +} + +func (fw Firewall) URN() string { + return ToURN("Firewall", fw.ID) +} + +// FirewallRequest represents the configuration to be applied to an existing or a new Firewall. +type FirewallRequest struct { + Name string `json:"name"` + InboundRules []InboundRule `json:"inbound_rules"` + OutboundRules []OutboundRule `json:"outbound_rules"` + DropletIDs []int `json:"droplet_ids"` + Tags []string `json:"tags"` +} + +// FirewallRulesRequest represents rules configuration to be applied to an existing Firewall. +type FirewallRulesRequest struct { + InboundRules []InboundRule `json:"inbound_rules"` + OutboundRules []OutboundRule `json:"outbound_rules"` +} + +// InboundRule represents a DigitalOcean Firewall inbound rule. +type InboundRule struct { + Protocol string `json:"protocol,omitempty"` + PortRange string `json:"ports,omitempty"` + Sources *Sources `json:"sources"` +} + +// OutboundRule represents a DigitalOcean Firewall outbound rule. +type OutboundRule struct { + Protocol string `json:"protocol,omitempty"` + PortRange string `json:"ports,omitempty"` + Destinations *Destinations `json:"destinations"` +} + +// Sources represents a DigitalOcean Firewall InboundRule sources. +type Sources struct { + Addresses []string `json:"addresses,omitempty"` + Tags []string `json:"tags,omitempty"` + DropletIDs []int `json:"droplet_ids,omitempty"` + LoadBalancerUIDs []string `json:"load_balancer_uids,omitempty"` +} + +// PendingChange represents a DigitalOcean Firewall status details. +type PendingChange struct { + DropletID int `json:"droplet_id,omitempty"` + Removing bool `json:"removing,omitempty"` + Status string `json:"status,omitempty"` +} + +// Destinations represents a DigitalOcean Firewall OutboundRule destinations. +type Destinations struct { + Addresses []string `json:"addresses,omitempty"` + Tags []string `json:"tags,omitempty"` + DropletIDs []int `json:"droplet_ids,omitempty"` + LoadBalancerUIDs []string `json:"load_balancer_uids,omitempty"` +} + +var _ FirewallsService = &FirewallsServiceOp{} + +// Get an existing Firewall by its identifier. +func (fw *FirewallsServiceOp) Get(ctx context.Context, fID string) (*Firewall, *Response, error) { + path := path.Join(firewallsBasePath, fID) + + req, err := fw.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(firewallRoot) + resp, err := fw.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Firewall, resp, err +} + +// Create a new Firewall with a given configuration. +func (fw *FirewallsServiceOp) Create(ctx context.Context, fr *FirewallRequest) (*Firewall, *Response, error) { + req, err := fw.client.NewRequest(ctx, http.MethodPost, firewallsBasePath, fr) + if err != nil { + return nil, nil, err + } + + root := new(firewallRoot) + resp, err := fw.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Firewall, resp, err +} + +// Update an existing Firewall with new configuration. +func (fw *FirewallsServiceOp) Update(ctx context.Context, fID string, fr *FirewallRequest) (*Firewall, *Response, error) { + path := path.Join(firewallsBasePath, fID) + + req, err := fw.client.NewRequest(ctx, "PUT", path, fr) + if err != nil { + return nil, nil, err + } + + root := new(firewallRoot) + resp, err := fw.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Firewall, resp, err +} + +// Delete a Firewall by its identifier. +func (fw *FirewallsServiceOp) Delete(ctx context.Context, fID string) (*Response, error) { + path := path.Join(firewallsBasePath, fID) + return fw.createAndDoReq(ctx, http.MethodDelete, path, nil) +} + +// List Firewalls. +func (fw *FirewallsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Firewall, *Response, error) { + path, err := addOptions(firewallsBasePath, opt) + if err != nil { + return nil, nil, err + } + + return fw.listHelper(ctx, path) +} + +// ListByDroplet Firewalls. +func (fw *FirewallsServiceOp) ListByDroplet(ctx context.Context, dID int, opt *ListOptions) ([]Firewall, *Response, error) { + basePath := path.Join(dropletBasePath, strconv.Itoa(dID), "firewalls") + path, err := addOptions(basePath, opt) + if err != nil { + return nil, nil, err + } + + return fw.listHelper(ctx, path) +} + +// AddDroplets to a Firewall. +func (fw *FirewallsServiceOp) AddDroplets(ctx context.Context, fID string, dropletIDs ...int) (*Response, error) { + path := path.Join(firewallsBasePath, fID, "droplets") + return fw.createAndDoReq(ctx, http.MethodPost, path, &dropletsRequest{IDs: dropletIDs}) +} + +// RemoveDroplets from a Firewall. +func (fw *FirewallsServiceOp) RemoveDroplets(ctx context.Context, fID string, dropletIDs ...int) (*Response, error) { + path := path.Join(firewallsBasePath, fID, "droplets") + return fw.createAndDoReq(ctx, http.MethodDelete, path, &dropletsRequest{IDs: dropletIDs}) +} + +// AddTags to a Firewall. +func (fw *FirewallsServiceOp) AddTags(ctx context.Context, fID string, tags ...string) (*Response, error) { + path := path.Join(firewallsBasePath, fID, "tags") + return fw.createAndDoReq(ctx, http.MethodPost, path, &tagsRequest{Tags: tags}) +} + +// RemoveTags from a Firewall. +func (fw *FirewallsServiceOp) RemoveTags(ctx context.Context, fID string, tags ...string) (*Response, error) { + path := path.Join(firewallsBasePath, fID, "tags") + return fw.createAndDoReq(ctx, http.MethodDelete, path, &tagsRequest{Tags: tags}) +} + +// AddRules to a Firewall. +func (fw *FirewallsServiceOp) AddRules(ctx context.Context, fID string, rr *FirewallRulesRequest) (*Response, error) { + path := path.Join(firewallsBasePath, fID, "rules") + return fw.createAndDoReq(ctx, http.MethodPost, path, rr) +} + +// RemoveRules from a Firewall. +func (fw *FirewallsServiceOp) RemoveRules(ctx context.Context, fID string, rr *FirewallRulesRequest) (*Response, error) { + path := path.Join(firewallsBasePath, fID, "rules") + return fw.createAndDoReq(ctx, http.MethodDelete, path, rr) +} + +type dropletsRequest struct { + IDs []int `json:"droplet_ids"` +} + +type tagsRequest struct { + Tags []string `json:"tags"` +} + +type firewallRoot struct { + Firewall *Firewall `json:"firewall"` +} + +type firewallsRoot struct { + Firewalls []Firewall `json:"firewalls"` + Links *Links `json:"links"` +} + +func (fw *FirewallsServiceOp) createAndDoReq(ctx context.Context, method, path string, v interface{}) (*Response, error) { + req, err := fw.client.NewRequest(ctx, method, path, v) + if err != nil { + return nil, err + } + + return fw.client.Do(ctx, req, nil) +} + +func (fw *FirewallsServiceOp) listHelper(ctx context.Context, path string) ([]Firewall, *Response, error) { + req, err := fw.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(firewallsRoot) + resp, err := fw.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Firewalls, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/floating_ips.go b/vendor/github.com/digitalocean/godo/floating_ips.go new file mode 100644 index 000000000000..4545e90373f7 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/floating_ips.go @@ -0,0 +1,139 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +const floatingBasePath = "v2/floating_ips" + +// FloatingIPsService is an interface for interfacing with the floating IPs +// endpoints of the Digital Ocean API. +// See: https://developers.digitalocean.com/documentation/v2#floating-ips +type FloatingIPsService interface { + List(context.Context, *ListOptions) ([]FloatingIP, *Response, error) + Get(context.Context, string) (*FloatingIP, *Response, error) + Create(context.Context, *FloatingIPCreateRequest) (*FloatingIP, *Response, error) + Delete(context.Context, string) (*Response, error) +} + +// FloatingIPsServiceOp handles communication with the floating IPs related methods of the +// DigitalOcean API. +type FloatingIPsServiceOp struct { + client *Client +} + +var _ FloatingIPsService = &FloatingIPsServiceOp{} + +// FloatingIP represents a Digital Ocean floating IP. +type FloatingIP struct { + Region *Region `json:"region"` + Droplet *Droplet `json:"droplet"` + IP string `json:"ip"` +} + +func (f FloatingIP) String() string { + return Stringify(f) +} + +func (f FloatingIP) URN() string { + return ToURN("FloatingIP", f.IP) +} + +type floatingIPsRoot struct { + FloatingIPs []FloatingIP `json:"floating_ips"` + Links *Links `json:"links"` +} + +type floatingIPRoot struct { + FloatingIP *FloatingIP `json:"floating_ip"` + Links *Links `json:"links,omitempty"` +} + +// FloatingIPCreateRequest represents a request to create a floating IP. +// If DropletID is not empty, the floating IP will be assigned to the +// droplet. +type FloatingIPCreateRequest struct { + Region string `json:"region"` + DropletID int `json:"droplet_id,omitempty"` +} + +// List all floating IPs. +func (f *FloatingIPsServiceOp) List(ctx context.Context, opt *ListOptions) ([]FloatingIP, *Response, error) { + path := floatingBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := f.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(floatingIPsRoot) + resp, err := f.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.FloatingIPs, resp, err +} + +// Get an individual floating IP. +func (f *FloatingIPsServiceOp) Get(ctx context.Context, ip string) (*FloatingIP, *Response, error) { + path := fmt.Sprintf("%s/%s", floatingBasePath, ip) + + req, err := f.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(floatingIPRoot) + resp, err := f.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.FloatingIP, resp, err +} + +// Create a floating IP. If the DropletID field of the request is not empty, +// the floating IP will also be assigned to the droplet. +func (f *FloatingIPsServiceOp) Create(ctx context.Context, createRequest *FloatingIPCreateRequest) (*FloatingIP, *Response, error) { + path := floatingBasePath + + req, err := f.client.NewRequest(ctx, http.MethodPost, path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(floatingIPRoot) + resp, err := f.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.FloatingIP, resp, err +} + +// Delete a floating IP. +func (f *FloatingIPsServiceOp) Delete(ctx context.Context, ip string) (*Response, error) { + path := fmt.Sprintf("%s/%s", floatingBasePath, ip) + + req, err := f.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := f.client.Do(ctx, req, nil) + + return resp, err +} diff --git a/vendor/github.com/digitalocean/godo/floating_ips_actions.go b/vendor/github.com/digitalocean/godo/floating_ips_actions.go new file mode 100644 index 000000000000..74ad279f9500 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/floating_ips_actions.go @@ -0,0 +1,109 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +// FloatingIPActionsService is an interface for interfacing with the +// floating IPs actions endpoints of the Digital Ocean API. +// See: https://developers.digitalocean.com/documentation/v2#floating-ips-action +type FloatingIPActionsService interface { + Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error) + Unassign(ctx context.Context, ip string) (*Action, *Response, error) + Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error) + List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error) +} + +// FloatingIPActionsServiceOp handles communication with the floating IPs +// action related methods of the DigitalOcean API. +type FloatingIPActionsServiceOp struct { + client *Client +} + +// Assign a floating IP to a droplet. +func (s *FloatingIPActionsServiceOp) Assign(ctx context.Context, ip string, dropletID int) (*Action, *Response, error) { + request := &ActionRequest{ + "type": "assign", + "droplet_id": dropletID, + } + return s.doAction(ctx, ip, request) +} + +// Unassign a floating IP from the droplet it is currently assigned to. +func (s *FloatingIPActionsServiceOp) Unassign(ctx context.Context, ip string) (*Action, *Response, error) { + request := &ActionRequest{"type": "unassign"} + return s.doAction(ctx, ip, request) +} + +// Get an action for a particular floating IP by id. +func (s *FloatingIPActionsServiceOp) Get(ctx context.Context, ip string, actionID int) (*Action, *Response, error) { + path := fmt.Sprintf("%s/%d", floatingIPActionPath(ip), actionID) + return s.get(ctx, path) +} + +// List the actions for a particular floating IP. +func (s *FloatingIPActionsServiceOp) List(ctx context.Context, ip string, opt *ListOptions) ([]Action, *Response, error) { + path := floatingIPActionPath(ip) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + return s.list(ctx, path) +} + +func (s *FloatingIPActionsServiceOp) doAction(ctx context.Context, ip string, request *ActionRequest) (*Action, *Response, error) { + path := floatingIPActionPath(ip) + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, request) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err +} + +func (s *FloatingIPActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err +} + +func (s *FloatingIPActionsServiceOp) list(ctx context.Context, path string) ([]Action, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Actions, resp, err +} + +func floatingIPActionPath(ip string) string { + return fmt.Sprintf("%s/%s/actions", floatingBasePath, ip) +} diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go new file mode 100644 index 000000000000..95e3c7ecd239 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -0,0 +1,400 @@ +package godo + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "time" + + "github.com/google/go-querystring/query" +) + +const ( + libraryVersion = "1.7.5" + defaultBaseURL = "https://api.digitalocean.com/" + userAgent = "godo/" + libraryVersion + mediaType = "application/json" + + headerRateLimit = "RateLimit-Limit" + headerRateRemaining = "RateLimit-Remaining" + headerRateReset = "RateLimit-Reset" +) + +// Client manages communication with DigitalOcean V2 API. +type Client struct { + // HTTP client used to communicate with the DO API. + client *http.Client + + // Base URL for API requests. + BaseURL *url.URL + + // User agent for client + UserAgent string + + // Rate contains the current rate limit for the client as determined by the most recent + // API call. + Rate Rate + + // Services used for communicating with the API + Account AccountService + Actions ActionsService + CDNs CDNService + Domains DomainsService + Droplets DropletsService + DropletActions DropletActionsService + Images ImagesService + ImageActions ImageActionsService + Keys KeysService + Regions RegionsService + Sizes SizesService + FloatingIPs FloatingIPsService + FloatingIPActions FloatingIPActionsService + Snapshots SnapshotsService + Storage StorageService + StorageActions StorageActionsService + Tags TagsService + LoadBalancers LoadBalancersService + Certificates CertificatesService + Firewalls FirewallsService + Projects ProjectsService + Kubernetes KubernetesService + + // Optional function called after every successful request made to the DO APIs + onRequestCompleted RequestCompletionCallback +} + +// RequestCompletionCallback defines the type of the request callback function +type RequestCompletionCallback func(*http.Request, *http.Response) + +// ListOptions specifies the optional parameters to various List methods that +// support pagination. +type ListOptions struct { + // For paginated result sets, page of results to retrieve. + Page int `url:"page,omitempty"` + + // For paginated result sets, the number of results to include per page. + PerPage int `url:"per_page,omitempty"` +} + +// Response is a DigitalOcean response. This wraps the standard http.Response returned from DigitalOcean. +type Response struct { + *http.Response + + // Links that were returned with the response. These are parsed from + // request body and not the header. + Links *Links + + // Monitoring URI + Monitor string + + Rate +} + +// An ErrorResponse reports the error caused by an API request +type ErrorResponse struct { + // HTTP response that caused this error + Response *http.Response + + // Error message + Message string `json:"message"` + + // RequestID returned from the API, useful to contact support. + RequestID string `json:"request_id"` +} + +// Rate contains the rate limit for the current client. +type Rate struct { + // The number of request per hour the client is currently limited to. + Limit int `json:"limit"` + + // The number of remaining requests the client can make this hour. + Remaining int `json:"remaining"` + + // The time at which the current rate limit will reset. + Reset Timestamp `json:"reset"` +} + +func addOptions(s string, opt interface{}) (string, error) { + v := reflect.ValueOf(opt) + + if v.Kind() == reflect.Ptr && v.IsNil() { + return s, nil + } + + origURL, err := url.Parse(s) + if err != nil { + return s, err + } + + origValues := origURL.Query() + + newValues, err := query.Values(opt) + if err != nil { + return s, err + } + + for k, v := range newValues { + origValues[k] = v + } + + origURL.RawQuery = origValues.Encode() + return origURL.String(), nil +} + +// NewClient returns a new DigitalOcean API client. +func NewClient(httpClient *http.Client) *Client { + if httpClient == nil { + httpClient = http.DefaultClient + } + + baseURL, _ := url.Parse(defaultBaseURL) + + c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent} + c.Account = &AccountServiceOp{client: c} + c.Actions = &ActionsServiceOp{client: c} + c.CDNs = &CDNServiceOp{client: c} + c.Certificates = &CertificatesServiceOp{client: c} + c.Domains = &DomainsServiceOp{client: c} + c.Droplets = &DropletsServiceOp{client: c} + c.DropletActions = &DropletActionsServiceOp{client: c} + c.Firewalls = &FirewallsServiceOp{client: c} + c.FloatingIPs = &FloatingIPsServiceOp{client: c} + c.FloatingIPActions = &FloatingIPActionsServiceOp{client: c} + c.Images = &ImagesServiceOp{client: c} + c.ImageActions = &ImageActionsServiceOp{client: c} + c.Keys = &KeysServiceOp{client: c} + c.LoadBalancers = &LoadBalancersServiceOp{client: c} + c.Projects = &ProjectsServiceOp{client: c} + c.Regions = &RegionsServiceOp{client: c} + c.Sizes = &SizesServiceOp{client: c} + c.Snapshots = &SnapshotsServiceOp{client: c} + c.Storage = &StorageServiceOp{client: c} + c.StorageActions = &StorageActionsServiceOp{client: c} + c.Tags = &TagsServiceOp{client: c} + c.Kubernetes = &KubernetesServiceOp{client: c} + + return c +} + +// ClientOpt are options for New. +type ClientOpt func(*Client) error + +// New returns a new DIgitalOcean API client instance. +func New(httpClient *http.Client, opts ...ClientOpt) (*Client, error) { + c := NewClient(httpClient) + for _, opt := range opts { + if err := opt(c); err != nil { + return nil, err + } + } + + return c, nil +} + +// SetBaseURL is a client option for setting the base URL. +func SetBaseURL(bu string) ClientOpt { + return func(c *Client) error { + u, err := url.Parse(bu) + if err != nil { + return err + } + + c.BaseURL = u + return nil + } +} + +// SetUserAgent is a client option for setting the user agent. +func SetUserAgent(ua string) ClientOpt { + return func(c *Client) error { + c.UserAgent = fmt.Sprintf("%s %s", ua, c.UserAgent) + return nil + } +} + +// NewRequest creates an API request. A relative URL can be provided in urlStr, which will be resolved to the +// BaseURL of the Client. Relative URLS should always be specified without a preceding slash. If specified, the +// value pointed to by body is JSON encoded and included in as the request body. +func (c *Client) NewRequest(ctx context.Context, method, urlStr string, body interface{}) (*http.Request, error) { + rel, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + u := c.BaseURL.ResolveReference(rel) + + buf := new(bytes.Buffer) + if body != nil { + err = json.NewEncoder(buf).Encode(body) + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest(method, u.String(), buf) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", mediaType) + req.Header.Add("Accept", mediaType) + req.Header.Add("User-Agent", c.UserAgent) + return req, nil +} + +// OnRequestCompleted sets the DO API request completion callback +func (c *Client) OnRequestCompleted(rc RequestCompletionCallback) { + c.onRequestCompleted = rc +} + +// newResponse creates a new Response for the provided http.Response +func newResponse(r *http.Response) *Response { + response := Response{Response: r} + response.populateRate() + + return &response +} + +// populateRate parses the rate related headers and populates the response Rate. +func (r *Response) populateRate() { + if limit := r.Header.Get(headerRateLimit); limit != "" { + r.Rate.Limit, _ = strconv.Atoi(limit) + } + if remaining := r.Header.Get(headerRateRemaining); remaining != "" { + r.Rate.Remaining, _ = strconv.Atoi(remaining) + } + if reset := r.Header.Get(headerRateReset); reset != "" { + if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 { + r.Rate.Reset = Timestamp{time.Unix(v, 0)} + } + } +} + +// Do sends an API request and returns the API response. The API response is JSON decoded and stored in the value +// pointed to by v, or returned as an error if an API error has occurred. If v implements the io.Writer interface, +// the raw response will be written to v, without attempting to decode it. +func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) { + resp, err := DoRequestWithClient(ctx, c.client, req) + if err != nil { + return nil, err + } + if c.onRequestCompleted != nil { + c.onRequestCompleted(req, resp) + } + + defer func() { + if rerr := resp.Body.Close(); err == nil { + err = rerr + } + }() + + response := newResponse(resp) + c.Rate = response.Rate + + err = CheckResponse(resp) + if err != nil { + return response, err + } + + if v != nil { + if w, ok := v.(io.Writer); ok { + _, err = io.Copy(w, resp.Body) + if err != nil { + return nil, err + } + } else { + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return nil, err + } + } + } + + return response, err +} + +// DoRequest submits an HTTP request. +func DoRequest(ctx context.Context, req *http.Request) (*http.Response, error) { + return DoRequestWithClient(ctx, http.DefaultClient, req) +} + +// DoRequestWithClient submits an HTTP request using the specified client. +func DoRequestWithClient( + ctx context.Context, + client *http.Client, + req *http.Request) (*http.Response, error) { + req = req.WithContext(ctx) + return client.Do(req) +} + +func (r *ErrorResponse) Error() string { + if r.RequestID != "" { + return fmt.Sprintf("%v %v: %d (request %q) %v", + r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.RequestID, r.Message) + } + return fmt.Sprintf("%v %v: %d %v", + r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.Message) +} + +// CheckResponse checks the API response for errors, and returns them if present. A response is considered an +// error if it has a status code outside the 200 range. API error responses are expected to have either no response +// body, or a JSON response body that maps to ErrorResponse. Any other response body will be silently ignored. +func CheckResponse(r *http.Response) error { + if c := r.StatusCode; c >= 200 && c <= 299 { + return nil + } + + errorResponse := &ErrorResponse{Response: r} + data, err := ioutil.ReadAll(r.Body) + if err == nil && len(data) > 0 { + err := json.Unmarshal(data, errorResponse) + if err != nil { + errorResponse.Message = string(data) + } + } + + return errorResponse +} + +func (r Rate) String() string { + return Stringify(r) +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + p := new(string) + *p = v + return p +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int { + p := new(int) + *p = v + return p +} + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + p := new(bool) + *p = v + return p +} + +// StreamToString converts a reader to a string +func StreamToString(stream io.Reader) string { + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(stream) + return buf.String() +} diff --git a/vendor/github.com/digitalocean/godo/image_actions.go b/vendor/github.com/digitalocean/godo/image_actions.go new file mode 100644 index 000000000000..976f7c687243 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/image_actions.go @@ -0,0 +1,102 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +// ImageActionsService is an interface for interfacing with the image actions +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#image-actions +type ImageActionsService interface { + Get(context.Context, int, int) (*Action, *Response, error) + Transfer(context.Context, int, *ActionRequest) (*Action, *Response, error) + Convert(context.Context, int) (*Action, *Response, error) +} + +// ImageActionsServiceOp handles communition with the image action related methods of the +// DigitalOcean API. +type ImageActionsServiceOp struct { + client *Client +} + +var _ ImageActionsService = &ImageActionsServiceOp{} + +// Transfer an image +func (i *ImageActionsServiceOp) Transfer(ctx context.Context, imageID int, transferRequest *ActionRequest) (*Action, *Response, error) { + if imageID < 1 { + return nil, nil, NewArgError("imageID", "cannot be less than 1") + } + + if transferRequest == nil { + return nil, nil, NewArgError("transferRequest", "cannot be nil") + } + + path := fmt.Sprintf("v2/images/%d/actions", imageID) + + req, err := i.client.NewRequest(ctx, http.MethodPost, path, transferRequest) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := i.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err +} + +// Convert an image to a snapshot +func (i *ImageActionsServiceOp) Convert(ctx context.Context, imageID int) (*Action, *Response, error) { + if imageID < 1 { + return nil, nil, NewArgError("imageID", "cannont be less than 1") + } + + path := fmt.Sprintf("v2/images/%d/actions", imageID) + + convertRequest := &ActionRequest{ + "type": "convert", + } + + req, err := i.client.NewRequest(ctx, http.MethodPost, path, convertRequest) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := i.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err +} + +// Get an action for a particular image by id. +func (i *ImageActionsServiceOp) Get(ctx context.Context, imageID, actionID int) (*Action, *Response, error) { + if imageID < 1 { + return nil, nil, NewArgError("imageID", "cannot be less than 1") + } + + if actionID < 1 { + return nil, nil, NewArgError("actionID", "cannot be less than 1") + } + + path := fmt.Sprintf("v2/images/%d/actions/%d", imageID, actionID) + + req, err := i.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := i.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/images.go b/vendor/github.com/digitalocean/godo/images.go new file mode 100644 index 000000000000..69de4c075017 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/images.go @@ -0,0 +1,241 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +const imageBasePath = "v2/images" + +// ImagesService is an interface for interfacing with the images +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#images +type ImagesService interface { + List(context.Context, *ListOptions) ([]Image, *Response, error) + ListDistribution(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) + ListApplication(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) + ListUser(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) + ListByTag(ctx context.Context, tag string, opt *ListOptions) ([]Image, *Response, error) + GetByID(context.Context, int) (*Image, *Response, error) + GetBySlug(context.Context, string) (*Image, *Response, error) + Create(context.Context, *CustomImageCreateRequest) (*Image, *Response, error) + Update(context.Context, int, *ImageUpdateRequest) (*Image, *Response, error) + Delete(context.Context, int) (*Response, error) +} + +// ImagesServiceOp handles communication with the image related methods of the +// DigitalOcean API. +type ImagesServiceOp struct { + client *Client +} + +var _ ImagesService = &ImagesServiceOp{} + +// Image represents a DigitalOcean Image +type Image struct { + ID int `json:"id,float64,omitempty"` + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Distribution string `json:"distribution,omitempty"` + Slug string `json:"slug,omitempty"` + Public bool `json:"public,omitempty"` + Regions []string `json:"regions,omitempty"` + MinDiskSize int `json:"min_disk_size,omitempty"` + SizeGigaBytes float64 `json:"size_gigabytes,omitempty"` + Created string `json:"created_at,omitempty"` + Description string `json:"description,omitempty"` + Tags []string `json:"tags,omitempty"` + Status string `json:"status,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` +} + +// ImageUpdateRequest represents a request to update an image. +type ImageUpdateRequest struct { + Name string `json:"name"` +} + +// CustomImageCreateRequest represents a request to create a custom image. +type CustomImageCreateRequest struct { + Name string `json:"name"` + Url string `json:"url"` + Region string `json:"region"` + Distribution string `json:"distribution,omitempty"` + Description string `json:"description,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +type imageRoot struct { + Image *Image +} + +type imagesRoot struct { + Images []Image + Links *Links `json:"links"` +} + +type listImageOptions struct { + Private bool `url:"private,omitempty"` + Type string `url:"type,omitempty"` + Tag string `url:"tag_name,omitempty"` +} + +func (i Image) String() string { + return Stringify(i) +} + +// List lists all the images available. +func (s *ImagesServiceOp) List(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) { + return s.list(ctx, opt, nil) +} + +// ListDistribution lists all the distribution images. +func (s *ImagesServiceOp) ListDistribution(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) { + listOpt := listImageOptions{Type: "distribution"} + return s.list(ctx, opt, &listOpt) +} + +// ListApplication lists all the application images. +func (s *ImagesServiceOp) ListApplication(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) { + listOpt := listImageOptions{Type: "application"} + return s.list(ctx, opt, &listOpt) +} + +// ListUser lists all the user images. +func (s *ImagesServiceOp) ListUser(ctx context.Context, opt *ListOptions) ([]Image, *Response, error) { + listOpt := listImageOptions{Private: true} + return s.list(ctx, opt, &listOpt) +} + +// ListByTag lists all images with a specific tag applied. +func (s *ImagesServiceOp) ListByTag(ctx context.Context, tag string, opt *ListOptions) ([]Image, *Response, error) { + listOpt := listImageOptions{Tag: tag} + return s.list(ctx, opt, &listOpt) +} + +// GetByID retrieves an image by id. +func (s *ImagesServiceOp) GetByID(ctx context.Context, imageID int) (*Image, *Response, error) { + if imageID < 1 { + return nil, nil, NewArgError("imageID", "cannot be less than 1") + } + + return s.get(ctx, interface{}(imageID)) +} + +// GetBySlug retrieves an image by slug. +func (s *ImagesServiceOp) GetBySlug(ctx context.Context, slug string) (*Image, *Response, error) { + if len(slug) < 1 { + return nil, nil, NewArgError("slug", "cannot be blank") + } + + return s.get(ctx, interface{}(slug)) +} + +func (s *ImagesServiceOp) Create(ctx context.Context, createRequest *CustomImageCreateRequest) (*Image, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + req, err := s.client.NewRequest(ctx, http.MethodPost, imageBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(imageRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Image, resp, err +} + +// Update an image name. +func (s *ImagesServiceOp) Update(ctx context.Context, imageID int, updateRequest *ImageUpdateRequest) (*Image, *Response, error) { + if imageID < 1 { + return nil, nil, NewArgError("imageID", "cannot be less than 1") + } + + if updateRequest == nil { + return nil, nil, NewArgError("updateRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%d", imageBasePath, imageID) + req, err := s.client.NewRequest(ctx, http.MethodPut, path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(imageRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Image, resp, err +} + +// Delete an image. +func (s *ImagesServiceOp) Delete(ctx context.Context, imageID int) (*Response, error) { + if imageID < 1 { + return nil, NewArgError("imageID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", imageBasePath, imageID) + + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + + return resp, err +} + +// Helper method for getting an individual image +func (s *ImagesServiceOp) get(ctx context.Context, ID interface{}) (*Image, *Response, error) { + path := fmt.Sprintf("%s/%v", imageBasePath, ID) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(imageRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Image, resp, err +} + +// Helper method for listing images +func (s *ImagesServiceOp) list(ctx context.Context, opt *ListOptions, listOpt *listImageOptions) ([]Image, *Response, error) { + path := imageBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + path, err = addOptions(path, listOpt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(imagesRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Images, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/keys.go b/vendor/github.com/digitalocean/godo/keys.go new file mode 100644 index 000000000000..9695c21f48b1 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/keys.go @@ -0,0 +1,226 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +const keysBasePath = "v2/account/keys" + +// KeysService is an interface for interfacing with the keys +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#keys +type KeysService interface { + List(context.Context, *ListOptions) ([]Key, *Response, error) + GetByID(context.Context, int) (*Key, *Response, error) + GetByFingerprint(context.Context, string) (*Key, *Response, error) + Create(context.Context, *KeyCreateRequest) (*Key, *Response, error) + UpdateByID(context.Context, int, *KeyUpdateRequest) (*Key, *Response, error) + UpdateByFingerprint(context.Context, string, *KeyUpdateRequest) (*Key, *Response, error) + DeleteByID(context.Context, int) (*Response, error) + DeleteByFingerprint(context.Context, string) (*Response, error) +} + +// KeysServiceOp handles communication with key related method of the +// DigitalOcean API. +type KeysServiceOp struct { + client *Client +} + +var _ KeysService = &KeysServiceOp{} + +// Key represents a DigitalOcean Key. +type Key struct { + ID int `json:"id,float64,omitempty"` + Name string `json:"name,omitempty"` + Fingerprint string `json:"fingerprint,omitempty"` + PublicKey string `json:"public_key,omitempty"` +} + +// KeyUpdateRequest represents a request to update a DigitalOcean key. +type KeyUpdateRequest struct { + Name string `json:"name"` +} + +type keysRoot struct { + SSHKeys []Key `json:"ssh_keys"` + Links *Links `json:"links"` +} + +type keyRoot struct { + SSHKey *Key `json:"ssh_key"` +} + +func (s Key) String() string { + return Stringify(s) +} + +// KeyCreateRequest represents a request to create a new key. +type KeyCreateRequest struct { + Name string `json:"name"` + PublicKey string `json:"public_key"` +} + +// List all keys +func (s *KeysServiceOp) List(ctx context.Context, opt *ListOptions) ([]Key, *Response, error) { + path := keysBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(keysRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.SSHKeys, resp, err +} + +// Performs a get given a path +func (s *KeysServiceOp) get(ctx context.Context, path string) (*Key, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(keyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.SSHKey, resp, err +} + +// GetByID gets a Key by id +func (s *KeysServiceOp) GetByID(ctx context.Context, keyID int) (*Key, *Response, error) { + if keyID < 1 { + return nil, nil, NewArgError("keyID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", keysBasePath, keyID) + return s.get(ctx, path) +} + +// GetByFingerprint gets a Key by by fingerprint +func (s *KeysServiceOp) GetByFingerprint(ctx context.Context, fingerprint string) (*Key, *Response, error) { + if len(fingerprint) < 1 { + return nil, nil, NewArgError("fingerprint", "cannot not be empty") + } + + path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint) + return s.get(ctx, path) +} + +// Create a key using a KeyCreateRequest +func (s *KeysServiceOp) Create(ctx context.Context, createRequest *KeyCreateRequest) (*Key, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + req, err := s.client.NewRequest(ctx, http.MethodPost, keysBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(keyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.SSHKey, resp, err +} + +// UpdateByID updates a key name by ID. +func (s *KeysServiceOp) UpdateByID(ctx context.Context, keyID int, updateRequest *KeyUpdateRequest) (*Key, *Response, error) { + if keyID < 1 { + return nil, nil, NewArgError("keyID", "cannot be less than 1") + } + + if updateRequest == nil { + return nil, nil, NewArgError("updateRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%d", keysBasePath, keyID) + req, err := s.client.NewRequest(ctx, "PUT", path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(keyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.SSHKey, resp, err +} + +// UpdateByFingerprint updates a key name by fingerprint. +func (s *KeysServiceOp) UpdateByFingerprint(ctx context.Context, fingerprint string, updateRequest *KeyUpdateRequest) (*Key, *Response, error) { + if len(fingerprint) < 1 { + return nil, nil, NewArgError("fingerprint", "cannot be empty") + } + + if updateRequest == nil { + return nil, nil, NewArgError("updateRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint) + req, err := s.client.NewRequest(ctx, "PUT", path, updateRequest) + if err != nil { + return nil, nil, err + } + + root := new(keyRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.SSHKey, resp, err +} + +// Delete key using a path +func (s *KeysServiceOp) delete(ctx context.Context, path string) (*Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + + return resp, err +} + +// DeleteByID deletes a key by its id +func (s *KeysServiceOp) DeleteByID(ctx context.Context, keyID int) (*Response, error) { + if keyID < 1 { + return nil, NewArgError("keyID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", keysBasePath, keyID) + return s.delete(ctx, path) +} + +// DeleteByFingerprint deletes a key by its fingerprint +func (s *KeysServiceOp) DeleteByFingerprint(ctx context.Context, fingerprint string) (*Response, error) { + if len(fingerprint) < 1 { + return nil, NewArgError("fingerprint", "cannot be empty") + } + + path := fmt.Sprintf("%s/%s", keysBasePath, fingerprint) + return s.delete(ctx, path) +} diff --git a/vendor/github.com/digitalocean/godo/kubernetes.go b/vendor/github.com/digitalocean/godo/kubernetes.go new file mode 100644 index 000000000000..4466059cb19b --- /dev/null +++ b/vendor/github.com/digitalocean/godo/kubernetes.go @@ -0,0 +1,432 @@ +package godo + +import ( + "bytes" + "context" + "encoding" + "fmt" + "net/http" + "strings" + "time" +) + +const ( + kubernetesBasePath = "/v2/kubernetes" + kubernetesClustersPath = kubernetesBasePath + "/clusters" + kubernetesOptionsPath = kubernetesBasePath + "/options" +) + +// KubernetesService is an interface for interfacing with the kubernetes endpoints +// of the DigitalOcean API. +// See: https://developers.digitalocean.com/documentation/v2#kubernetes +type KubernetesService interface { + Create(context.Context, *KubernetesClusterCreateRequest) (*KubernetesCluster, *Response, error) + Get(context.Context, string) (*KubernetesCluster, *Response, error) + GetKubeConfig(context.Context, string) (*KubernetesClusterConfig, *Response, error) + List(context.Context, *ListOptions) ([]*KubernetesCluster, *Response, error) + Update(context.Context, string, *KubernetesClusterUpdateRequest) (*KubernetesCluster, *Response, error) + Delete(context.Context, string) (*Response, error) + + CreateNodePool(ctx context.Context, clusterID string, req *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error) + GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error) + ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error) + UpdateNodePool(ctx context.Context, clusterID, poolID string, req *KubernetesNodePoolUpdateRequest) (*KubernetesNodePool, *Response, error) + RecycleNodePoolNodes(ctx context.Context, clusterID, poolID string, req *KubernetesNodePoolRecycleNodesRequest) (*Response, error) + DeleteNodePool(ctx context.Context, clusterID, poolID string) (*Response, error) + + GetOptions(context.Context) (*KubernetesOptions, *Response, error) +} + +var _ KubernetesService = &KubernetesServiceOp{} + +// KubernetesServiceOp handles communication with Kubernetes methods of the DigitalOcean API. +type KubernetesServiceOp struct { + client *Client +} + +// KubernetesClusterCreateRequest represents a request to create a Kubernetes cluster. +type KubernetesClusterCreateRequest struct { + Name string `json:"name,omitempty"` + RegionSlug string `json:"region,omitempty"` + VersionSlug string `json:"version,omitempty"` + Tags []string `json:"tags,omitempty"` + + NodePools []*KubernetesNodePoolCreateRequest `json:"node_pools,omitempty"` +} + +// KubernetesClusterUpdateRequest represents a request to update a Kubernetes cluster. +type KubernetesClusterUpdateRequest struct { + Name string `json:"name,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +// KubernetesNodePoolCreateRequest represents a request to create a node pool for a +// Kubernetes cluster. +type KubernetesNodePoolCreateRequest struct { + Name string `json:"name,omitempty"` + Size string `json:"size,omitempty"` + Count int `json:"count,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +// KubernetesNodePoolUpdateRequest represents a request to update a node pool in a +// Kubernetes cluster. +type KubernetesNodePoolUpdateRequest struct { + Name string `json:"name,omitempty"` + Count int `json:"count,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +// KubernetesNodePoolRecycleNodesRequest represents a request to recycle a set of +// nodes in a node pool. This will recycle the nodes by ID. +type KubernetesNodePoolRecycleNodesRequest struct { + Nodes []string `json:"nodes,omitempty"` +} + +// KubernetesCluster represents a Kubernetes cluster. +type KubernetesCluster struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + RegionSlug string `json:"region,omitempty"` + VersionSlug string `json:"version,omitempty"` + ClusterSubnet string `json:"cluster_subnet,omitempty"` + ServiceSubnet string `json:"service_subnet,omitempty"` + IPv4 string `json:"ipv4,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + Tags []string `json:"tags,omitempty"` + + NodePools []*KubernetesNodePool `json:"node_pools,omitempty"` + + Status *KubernetesClusterStatus `json:"status,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +// Possible states for a cluster. +const ( + KubernetesClusterStatusProvisioning = KubernetesClusterStatusState("provisioning") + KubernetesClusterStatusRunning = KubernetesClusterStatusState("running") + KubernetesClusterStatusDegraded = KubernetesClusterStatusState("degraded") + KubernetesClusterStatusError = KubernetesClusterStatusState("error") + KubernetesClusterStatusDeleted = KubernetesClusterStatusState("deleted") + KubernetesClusterStatusInvalid = KubernetesClusterStatusState("invalid") +) + +// KubernetesClusterStatusState represents states for a cluster. +type KubernetesClusterStatusState string + +var _ encoding.TextUnmarshaler = (*KubernetesClusterStatusState)(nil) + +// UnmarshalText unmarshals the state. +func (s *KubernetesClusterStatusState) UnmarshalText(text []byte) error { + switch KubernetesClusterStatusState(strings.ToLower(string(text))) { + case KubernetesClusterStatusProvisioning: + *s = KubernetesClusterStatusProvisioning + case KubernetesClusterStatusRunning: + *s = KubernetesClusterStatusRunning + case KubernetesClusterStatusDegraded: + *s = KubernetesClusterStatusDegraded + case KubernetesClusterStatusError: + *s = KubernetesClusterStatusError + case KubernetesClusterStatusDeleted: + *s = KubernetesClusterStatusDeleted + case "", KubernetesClusterStatusInvalid: + *s = KubernetesClusterStatusInvalid + default: + return fmt.Errorf("unknown cluster state %q", string(text)) + } + return nil +} + +// KubernetesClusterStatus describes the status of a cluster. +type KubernetesClusterStatus struct { + State KubernetesClusterStatusState `json:"state,omitempty"` + Message string `json:"message,omitempty"` +} + +// KubernetesNodePool represents a node pool in a Kubernetes cluster. +type KubernetesNodePool struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Size string `json:"size,omitempty"` + Count int `json:"count,omitempty"` + Tags []string `json:"tags,omitempty"` + + Nodes []*KubernetesNode `json:"nodes,omitempty"` +} + +// KubernetesNode represents a Node in a node pool in a Kubernetes cluster. +type KubernetesNode struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Status *KubernetesNodeStatus `json:"status,omitempty"` + + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +// KubernetesNodeStatus represents the status of a particular Node in a Kubernetes cluster. +type KubernetesNodeStatus struct { + State string `json:"state,omitempty"` + Message string `json:"message,omitempty"` +} + +// KubernetesOptions represents options available for creating Kubernetes clusters. +type KubernetesOptions struct { + Versions []*KubernetesVersion `json:"versions,omitempty"` + Regions []*KubernetesRegion `json:"regions,omitempty"` + Sizes []*KubernetesNodeSize `json:"sizes,omitempty"` +} + +// KubernetesVersion is a DigitalOcean Kubernetes release. +type KubernetesVersion struct { + Slug string `json:"slug,omitempty"` + KubernetesVersion string `json:"kubernetes_version,omitempty"` +} + +// KubernetesNodeSize is a node sizes supported for Kubernetes clusters. +type KubernetesNodeSize struct { + Name string `json:"name"` + Slug string `json:"slug"` +} + +// KubernetesRegion is a region usable by Kubernetes clusters. +type KubernetesRegion struct { + Name string `json:"name"` + Slug string `json:"slug"` +} + +type kubernetesClustersRoot struct { + Clusters []*KubernetesCluster `json:"kubernetes_clusters,omitempty"` + Links *Links `json:"links,omitempty"` +} + +type kubernetesClusterRoot struct { + Cluster *KubernetesCluster `json:"kubernetes_cluster,omitempty"` +} + +type kubernetesNodePoolRoot struct { + NodePool *KubernetesNodePool `json:"node_pool,omitempty"` +} + +type kubernetesNodePoolsRoot struct { + NodePools []*KubernetesNodePool `json:"node_pools,omitempty"` + Links *Links `json:"links,omitempty"` +} + +// Get retrieves the details of a Kubernetes cluster. +func (svc *KubernetesServiceOp) Get(ctx context.Context, clusterID string) (*KubernetesCluster, *Response, error) { + path := fmt.Sprintf("%s/%s", kubernetesClustersPath, clusterID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(kubernetesClusterRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Cluster, resp, nil +} + +// Create creates a Kubernetes cluster. +func (svc *KubernetesServiceOp) Create(ctx context.Context, create *KubernetesClusterCreateRequest) (*KubernetesCluster, *Response, error) { + path := kubernetesClustersPath + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, create) + if err != nil { + return nil, nil, err + } + root := new(kubernetesClusterRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Cluster, resp, nil +} + +// Delete deletes a Kubernetes cluster. There is no way to recover a cluster +// once it has been destroyed. +func (svc *KubernetesServiceOp) Delete(ctx context.Context, clusterID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", kubernetesClustersPath, clusterID) + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// List returns a list of the Kubernetes clusters visible with the caller's API token. +func (svc *KubernetesServiceOp) List(ctx context.Context, opts *ListOptions) ([]*KubernetesCluster, *Response, error) { + path := kubernetesClustersPath + path, err := addOptions(path, opts) + if err != nil { + return nil, nil, err + } + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(kubernetesClustersRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Clusters, resp, nil +} + +// KubernetesClusterConfig is the content of a Kubernetes config file, which can be +// used to interact with your Kubernetes cluster using `kubectl`. +// See: https://kubernetes.io/docs/tasks/tools/install-kubectl/ +type KubernetesClusterConfig struct { + KubeconfigYAML []byte +} + +// GetKubeConfig returns a Kubernetes config file for the specified cluster. +func (svc *KubernetesServiceOp) GetKubeConfig(ctx context.Context, clusterID string) (*KubernetesClusterConfig, *Response, error) { + path := fmt.Sprintf("%s/%s/kubeconfig", kubernetesClustersPath, clusterID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + configBytes := bytes.NewBuffer(nil) + resp, err := svc.client.Do(ctx, req, configBytes) + if err != nil { + return nil, resp, err + } + res := &KubernetesClusterConfig{ + KubeconfigYAML: configBytes.Bytes(), + } + return res, resp, nil +} + +// Update updates a Kubernetes cluster's properties. +func (svc *KubernetesServiceOp) Update(ctx context.Context, clusterID string, update *KubernetesClusterUpdateRequest) (*KubernetesCluster, *Response, error) { + path := fmt.Sprintf("%s/%s", kubernetesClustersPath, clusterID) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, update) + if err != nil { + return nil, nil, err + } + root := new(kubernetesClusterRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Cluster, resp, nil +} + +// CreateNodePool creates a new node pool in an existing Kubernetes cluster. +func (svc *KubernetesServiceOp) CreateNodePool(ctx context.Context, clusterID string, create *KubernetesNodePoolCreateRequest) (*KubernetesNodePool, *Response, error) { + path := fmt.Sprintf("%s/%s/node_pools", kubernetesClustersPath, clusterID) + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, create) + if err != nil { + return nil, nil, err + } + root := new(kubernetesNodePoolRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.NodePool, resp, nil +} + +// GetNodePool retrieves an existing node pool in a Kubernetes cluster. +func (svc *KubernetesServiceOp) GetNodePool(ctx context.Context, clusterID, poolID string) (*KubernetesNodePool, *Response, error) { + path := fmt.Sprintf("%s/%s/node_pools/%s", kubernetesClustersPath, clusterID, poolID) + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(kubernetesNodePoolRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.NodePool, resp, nil +} + +// ListNodePools lists all the node pools found in a Kubernetes cluster. +func (svc *KubernetesServiceOp) ListNodePools(ctx context.Context, clusterID string, opts *ListOptions) ([]*KubernetesNodePool, *Response, error) { + path := fmt.Sprintf("%s/%s/node_pools", kubernetesClustersPath, clusterID) + path, err := addOptions(path, opts) + if err != nil { + return nil, nil, err + } + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(kubernetesNodePoolsRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.NodePools, resp, nil +} + +// UpdateNodePool updates the details of an existing node pool. +func (svc *KubernetesServiceOp) UpdateNodePool(ctx context.Context, clusterID, poolID string, update *KubernetesNodePoolUpdateRequest) (*KubernetesNodePool, *Response, error) { + path := fmt.Sprintf("%s/%s/node_pools/%s", kubernetesClustersPath, clusterID, poolID) + req, err := svc.client.NewRequest(ctx, http.MethodPut, path, update) + if err != nil { + return nil, nil, err + } + root := new(kubernetesNodePoolRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.NodePool, resp, nil +} + +// RecycleNodePoolNodes schedules nodes in a node pool for recycling. +func (svc *KubernetesServiceOp) RecycleNodePoolNodes(ctx context.Context, clusterID, poolID string, recycle *KubernetesNodePoolRecycleNodesRequest) (*Response, error) { + path := fmt.Sprintf("%s/%s/node_pools/%s/recycle", kubernetesClustersPath, clusterID, poolID) + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, recycle) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +// DeleteNodePool deletes a node pool, and subsequently all the nodes in that pool. +func (svc *KubernetesServiceOp) DeleteNodePool(ctx context.Context, clusterID, poolID string) (*Response, error) { + path := fmt.Sprintf("%s/%s/node_pools/%s", kubernetesClustersPath, clusterID, poolID) + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + resp, err := svc.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + return resp, nil +} + +type kubernetesOptionsRoot struct { + Options *KubernetesOptions `json:"options,omitempty"` + Links *Links `json:"links,omitempty"` +} + +// GetOptions returns options about the Kubernetes service, such as the versions available for +// cluster creation. +func (svc *KubernetesServiceOp) GetOptions(ctx context.Context) (*KubernetesOptions, *Response, error) { + path := kubernetesOptionsPath + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + root := new(kubernetesOptionsRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Options, resp, nil +} diff --git a/vendor/github.com/digitalocean/godo/links.go b/vendor/github.com/digitalocean/godo/links.go new file mode 100644 index 000000000000..2098441729df --- /dev/null +++ b/vendor/github.com/digitalocean/godo/links.go @@ -0,0 +1,83 @@ +package godo + +import ( + "context" + "net/url" + "strconv" +) + +// Links manages links that are returned along with a List +type Links struct { + Pages *Pages `json:"pages,omitempty"` + Actions []LinkAction `json:"actions,omitempty"` +} + +// Pages are pages specified in Links +type Pages struct { + First string `json:"first,omitempty"` + Prev string `json:"prev,omitempty"` + Last string `json:"last,omitempty"` + Next string `json:"next,omitempty"` +} + +// LinkAction is a pointer to an action +type LinkAction struct { + ID int `json:"id,omitempty"` + Rel string `json:"rel,omitempty"` + HREF string `json:"href,omitempty"` +} + +// CurrentPage is current page of the list +func (l *Links) CurrentPage() (int, error) { + return l.Pages.current() +} + +func (p *Pages) current() (int, error) { + switch { + case p == nil: + return 1, nil + case p.Prev == "" && p.Next != "": + return 1, nil + case p.Prev != "": + prevPage, err := pageForURL(p.Prev) + if err != nil { + return 0, err + } + + return prevPage + 1, nil + } + + return 0, nil +} + +// IsLastPage returns true if the current page is the last +func (l *Links) IsLastPage() bool { + if l.Pages == nil { + return true + } + return l.Pages.isLast() +} + +func (p *Pages) isLast() bool { + return p.Last == "" +} + +func pageForURL(urlText string) (int, error) { + u, err := url.ParseRequestURI(urlText) + if err != nil { + return 0, err + } + + pageStr := u.Query().Get("page") + page, err := strconv.Atoi(pageStr) + if err != nil { + return 0, err + } + + return page, nil +} + +// Get a link action by id. +func (la *LinkAction) Get(ctx context.Context, client *Client) (*Action, *Response, error) { + return client.Actions.Get(ctx, la.ID) +} diff --git a/vendor/github.com/digitalocean/godo/load_balancers.go b/vendor/github.com/digitalocean/godo/load_balancers.go new file mode 100644 index 000000000000..b873483c7f7e --- /dev/null +++ b/vendor/github.com/digitalocean/godo/load_balancers.go @@ -0,0 +1,311 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +const loadBalancersBasePath = "/v2/load_balancers" +const forwardingRulesPath = "forwarding_rules" + +const dropletsPath = "droplets" + +// LoadBalancersService is an interface for managing load balancers with the DigitalOcean API. +// See: https://developers.digitalocean.com/documentation/v2#load-balancers +type LoadBalancersService interface { + Get(context.Context, string) (*LoadBalancer, *Response, error) + List(context.Context, *ListOptions) ([]LoadBalancer, *Response, error) + Create(context.Context, *LoadBalancerRequest) (*LoadBalancer, *Response, error) + Update(ctx context.Context, lbID string, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) + Delete(ctx context.Context, lbID string) (*Response, error) + AddDroplets(ctx context.Context, lbID string, dropletIDs ...int) (*Response, error) + RemoveDroplets(ctx context.Context, lbID string, dropletIDs ...int) (*Response, error) + AddForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) + RemoveForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) +} + +// LoadBalancer represents a DigitalOcean load balancer configuration. +// Tags can only be provided upon the creation of a Load Balancer. +type LoadBalancer struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + IP string `json:"ip,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Status string `json:"status,omitempty"` + Created string `json:"created_at,omitempty"` + ForwardingRules []ForwardingRule `json:"forwarding_rules,omitempty"` + HealthCheck *HealthCheck `json:"health_check,omitempty"` + StickySessions *StickySessions `json:"sticky_sessions,omitempty"` + Region *Region `json:"region,omitempty"` + DropletIDs []int `json:"droplet_ids,omitempty"` + Tag string `json:"tag,omitempty"` + Tags []string `json:"tags,omitempty"` + RedirectHttpToHttps bool `json:"redirect_http_to_https,omitempty"` +} + +// String creates a human-readable description of a LoadBalancer. +func (l LoadBalancer) String() string { + return Stringify(l) +} + +func (l LoadBalancer) URN() string { + return ToURN("LoadBalancer", l.ID) +} + +// AsRequest creates a LoadBalancerRequest that can be submitted to Update with the current values of the LoadBalancer. +// Modifying the returned LoadBalancerRequest will not modify the original LoadBalancer. +func (l LoadBalancer) AsRequest() *LoadBalancerRequest { + r := LoadBalancerRequest{ + Name: l.Name, + Algorithm: l.Algorithm, + ForwardingRules: append([]ForwardingRule(nil), l.ForwardingRules...), + DropletIDs: append([]int(nil), l.DropletIDs...), + Tag: l.Tag, + RedirectHttpToHttps: l.RedirectHttpToHttps, + HealthCheck: l.HealthCheck, + } + + if l.HealthCheck != nil { + r.HealthCheck = &HealthCheck{} + *r.HealthCheck = *l.HealthCheck + } + if l.StickySessions != nil { + r.StickySessions = &StickySessions{} + *r.StickySessions = *l.StickySessions + } + if l.Region != nil { + r.Region = l.Region.Slug + } + return &r +} + +// ForwardingRule represents load balancer forwarding rules. +type ForwardingRule struct { + EntryProtocol string `json:"entry_protocol,omitempty"` + EntryPort int `json:"entry_port,omitempty"` + TargetProtocol string `json:"target_protocol,omitempty"` + TargetPort int `json:"target_port,omitempty"` + CertificateID string `json:"certificate_id,omitempty"` + TlsPassthrough bool `json:"tls_passthrough,omitempty"` +} + +// String creates a human-readable description of a ForwardingRule. +func (f ForwardingRule) String() string { + return Stringify(f) +} + +// HealthCheck represents optional load balancer health check rules. +type HealthCheck struct { + Protocol string `json:"protocol,omitempty"` + Port int `json:"port,omitempty"` + Path string `json:"path,omitempty"` + CheckIntervalSeconds int `json:"check_interval_seconds,omitempty"` + ResponseTimeoutSeconds int `json:"response_timeout_seconds,omitempty"` + HealthyThreshold int `json:"healthy_threshold,omitempty"` + UnhealthyThreshold int `json:"unhealthy_threshold,omitempty"` +} + +// String creates a human-readable description of a HealthCheck. +func (h HealthCheck) String() string { + return Stringify(h) +} + +// StickySessions represents optional load balancer session affinity rules. +type StickySessions struct { + Type string `json:"type,omitempty"` + CookieName string `json:"cookie_name,omitempty"` + CookieTtlSeconds int `json:"cookie_ttl_seconds,omitempty"` +} + +// String creates a human-readable description of a StickySessions instance. +func (s StickySessions) String() string { + return Stringify(s) +} + +// LoadBalancerRequest represents the configuration to be applied to an existing or a new load balancer. +type LoadBalancerRequest struct { + Name string `json:"name,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + Region string `json:"region,omitempty"` + ForwardingRules []ForwardingRule `json:"forwarding_rules,omitempty"` + HealthCheck *HealthCheck `json:"health_check,omitempty"` + StickySessions *StickySessions `json:"sticky_sessions,omitempty"` + DropletIDs []int `json:"droplet_ids,omitempty"` + Tag string `json:"tag,omitempty"` + Tags []string `json:"tags,omitempty"` + RedirectHttpToHttps bool `json:"redirect_http_to_https,omitempty"` +} + +// String creates a human-readable description of a LoadBalancerRequest. +func (l LoadBalancerRequest) String() string { + return Stringify(l) +} + +type forwardingRulesRequest struct { + Rules []ForwardingRule `json:"forwarding_rules,omitempty"` +} + +func (l forwardingRulesRequest) String() string { + return Stringify(l) +} + +type dropletIDsRequest struct { + IDs []int `json:"droplet_ids,omitempty"` +} + +func (l dropletIDsRequest) String() string { + return Stringify(l) +} + +type loadBalancersRoot struct { + LoadBalancers []LoadBalancer `json:"load_balancers"` + Links *Links `json:"links"` +} + +type loadBalancerRoot struct { + LoadBalancer *LoadBalancer `json:"load_balancer"` +} + +// LoadBalancersServiceOp handles communication with load balancer-related methods of the DigitalOcean API. +type LoadBalancersServiceOp struct { + client *Client +} + +var _ LoadBalancersService = &LoadBalancersServiceOp{} + +// Get an existing load balancer by its identifier. +func (l *LoadBalancersServiceOp) Get(ctx context.Context, lbID string) (*LoadBalancer, *Response, error) { + path := fmt.Sprintf("%s/%s", loadBalancersBasePath, lbID) + + req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(loadBalancerRoot) + resp, err := l.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.LoadBalancer, resp, err +} + +// List load balancers, with optional pagination. +func (l *LoadBalancersServiceOp) List(ctx context.Context, opt *ListOptions) ([]LoadBalancer, *Response, error) { + path, err := addOptions(loadBalancersBasePath, opt) + if err != nil { + return nil, nil, err + } + + req, err := l.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(loadBalancersRoot) + resp, err := l.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.LoadBalancers, resp, err +} + +// Create a new load balancer with a given configuration. +func (l *LoadBalancersServiceOp) Create(ctx context.Context, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) { + req, err := l.client.NewRequest(ctx, http.MethodPost, loadBalancersBasePath, lbr) + if err != nil { + return nil, nil, err + } + + root := new(loadBalancerRoot) + resp, err := l.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.LoadBalancer, resp, err +} + +// Update an existing load balancer with new configuration. +func (l *LoadBalancersServiceOp) Update(ctx context.Context, lbID string, lbr *LoadBalancerRequest) (*LoadBalancer, *Response, error) { + path := fmt.Sprintf("%s/%s", loadBalancersBasePath, lbID) + + req, err := l.client.NewRequest(ctx, "PUT", path, lbr) + if err != nil { + return nil, nil, err + } + + root := new(loadBalancerRoot) + resp, err := l.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.LoadBalancer, resp, err +} + +// Delete a load balancer by its identifier. +func (l *LoadBalancersServiceOp) Delete(ctx context.Context, ldID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", loadBalancersBasePath, ldID) + + req, err := l.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + return l.client.Do(ctx, req, nil) +} + +// AddDroplets adds droplets to a load balancer. +func (l *LoadBalancersServiceOp) AddDroplets(ctx context.Context, lbID string, dropletIDs ...int) (*Response, error) { + path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, lbID, dropletsPath) + + req, err := l.client.NewRequest(ctx, http.MethodPost, path, &dropletIDsRequest{IDs: dropletIDs}) + if err != nil { + return nil, err + } + + return l.client.Do(ctx, req, nil) +} + +// RemoveDroplets removes droplets from a load balancer. +func (l *LoadBalancersServiceOp) RemoveDroplets(ctx context.Context, lbID string, dropletIDs ...int) (*Response, error) { + path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, lbID, dropletsPath) + + req, err := l.client.NewRequest(ctx, http.MethodDelete, path, &dropletIDsRequest{IDs: dropletIDs}) + if err != nil { + return nil, err + } + + return l.client.Do(ctx, req, nil) +} + +// AddForwardingRules adds forwarding rules to a load balancer. +func (l *LoadBalancersServiceOp) AddForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) { + path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, lbID, forwardingRulesPath) + + req, err := l.client.NewRequest(ctx, http.MethodPost, path, &forwardingRulesRequest{Rules: rules}) + if err != nil { + return nil, err + } + + return l.client.Do(ctx, req, nil) +} + +// RemoveForwardingRules removes forwarding rules from a load balancer. +func (l *LoadBalancersServiceOp) RemoveForwardingRules(ctx context.Context, lbID string, rules ...ForwardingRule) (*Response, error) { + path := fmt.Sprintf("%s/%s/%s", loadBalancersBasePath, lbID, forwardingRulesPath) + + req, err := l.client.NewRequest(ctx, http.MethodDelete, path, &forwardingRulesRequest{Rules: rules}) + if err != nil { + return nil, err + } + + return l.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/digitalocean/godo/projects.go b/vendor/github.com/digitalocean/godo/projects.go new file mode 100644 index 000000000000..f78308cfe3a5 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/projects.go @@ -0,0 +1,302 @@ +package godo + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "path" +) + +const ( + // DefaultProject is the ID you should use if you are working with your + // default project. + DefaultProject = "default" + + projectsBasePath = "/v2/projects" +) + +// ProjectsService is an interface for creating and managing Projects with the DigitalOcean API. +// See: https://developers.digitalocean.com/documentation/documentation/v2/#projects +type ProjectsService interface { + List(context.Context, *ListOptions) ([]Project, *Response, error) + GetDefault(context.Context) (*Project, *Response, error) + Get(context.Context, string) (*Project, *Response, error) + Create(context.Context, *CreateProjectRequest) (*Project, *Response, error) + Update(context.Context, string, *UpdateProjectRequest) (*Project, *Response, error) + Delete(context.Context, string) (*Response, error) + + ListResources(context.Context, string, *ListOptions) ([]ProjectResource, *Response, error) + AssignResources(context.Context, string, ...interface{}) ([]ProjectResource, *Response, error) +} + +// ProjectsServiceOp handles communication with Projects methods of the DigitalOcean API. +type ProjectsServiceOp struct { + client *Client +} + +// Project represents a DigitalOcean Project configuration. +type Project struct { + ID string `json:"id"` + OwnerUUID string `json:"owner_uuid"` + OwnerID uint64 `json:"owner_id"` + Name string `json:"name"` + Description string `json:"description"` + Purpose string `json:"purpose"` + Environment string `json:"environment"` + IsDefault bool `json:"is_default"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +// String creates a human-readable description of a Project. +func (p Project) String() string { + return Stringify(p) +} + +// CreateProjectRequest represents the request to create a new project. +type CreateProjectRequest struct { + Name string `json:"name"` + Description string `json:"description"` + Purpose string `json:"purpose"` + Environment string `json:"environment"` +} + +// UpdateProjectRequest represents the request to update project information. +// This type expects certain attribute types, but is built this way to allow +// nil values as well. See `updateProjectRequest` for the "real" types. +type UpdateProjectRequest struct { + Name interface{} + Description interface{} + Purpose interface{} + Environment interface{} + IsDefault interface{} +} + +type updateProjectRequest struct { + Name *string `json:"name"` + Description *string `json:"description"` + Purpose *string `json:"purpose"` + Environment *string `json:"environment"` + IsDefault *bool `json:"is_default"` +} + +// MarshalJSON takes an UpdateRequest and converts it to the "typed" request +// which is sent to the projects API. This is a PATCH request, which allows +// partial attributes, so `null` values are OK. +func (upr *UpdateProjectRequest) MarshalJSON() ([]byte, error) { + d := &updateProjectRequest{} + if str, ok := upr.Name.(string); ok { + d.Name = &str + } + if str, ok := upr.Description.(string); ok { + d.Description = &str + } + if str, ok := upr.Purpose.(string); ok { + d.Purpose = &str + } + if str, ok := upr.Environment.(string); ok { + d.Environment = &str + } + if val, ok := upr.IsDefault.(bool); ok { + d.IsDefault = &val + } + + return json.Marshal(d) +} + +type assignResourcesRequest struct { + Resources []string `json:"resources"` +} + +// ProjectResource is the projects API's representation of a resource. +type ProjectResource struct { + URN string `json:"urn"` + AssignedAt string `json:"assigned_at"` + Links *ProjectResourceLinks `json:"links"` + Status string `json:"status,omitempty"` +} + +// ProjetResourceLinks specify the link for more information about the resource. +type ProjectResourceLinks struct { + Self string `json:"self"` +} + +type projectsRoot struct { + Projects []Project `json:"projects"` + Links *Links `json:"links"` +} + +type projectRoot struct { + Project *Project `json:"project"` +} + +type projectResourcesRoot struct { + Resources []ProjectResource `json:"resources"` + Links *Links `json:"links,omitempty"` +} + +var _ ProjectsService = &ProjectsServiceOp{} + +// List Projects. +func (p *ProjectsServiceOp) List(ctx context.Context, opts *ListOptions) ([]Project, *Response, error) { + path, err := addOptions(projectsBasePath, opts) + if err != nil { + return nil, nil, err + } + + req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(projectsRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Projects, resp, err +} + +// GetDefault project. +func (p *ProjectsServiceOp) GetDefault(ctx context.Context) (*Project, *Response, error) { + return p.getHelper(ctx, "default") +} + +// Get retrieves a single project by its ID. +func (p *ProjectsServiceOp) Get(ctx context.Context, projectID string) (*Project, *Response, error) { + return p.getHelper(ctx, projectID) +} + +// Create a new project. +func (p *ProjectsServiceOp) Create(ctx context.Context, cr *CreateProjectRequest) (*Project, *Response, error) { + req, err := p.client.NewRequest(ctx, http.MethodPost, projectsBasePath, cr) + if err != nil { + return nil, nil, err + } + + root := new(projectRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Project, resp, err +} + +// Update an existing project. +func (p *ProjectsServiceOp) Update(ctx context.Context, projectID string, ur *UpdateProjectRequest) (*Project, *Response, error) { + path := path.Join(projectsBasePath, projectID) + req, err := p.client.NewRequest(ctx, http.MethodPatch, path, ur) + if err != nil { + return nil, nil, err + } + + root := new(projectRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Project, resp, err +} + +// Delete an existing project. You cannot have any resources in a project +// before deleting it. See the API documentation for more details. +func (p *ProjectsServiceOp) Delete(ctx context.Context, projectID string) (*Response, error) { + path := path.Join(projectsBasePath, projectID) + req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + return p.client.Do(ctx, req, nil) +} + +// ListResources lists all resources in a project. +func (p *ProjectsServiceOp) ListResources(ctx context.Context, projectID string, opts *ListOptions) ([]ProjectResource, *Response, error) { + basePath := path.Join(projectsBasePath, projectID, "resources") + path, err := addOptions(basePath, opts) + if err != nil { + return nil, nil, err + } + + req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(projectResourcesRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Resources, resp, err +} + +// AssignResources assigns one or more resources to a project. AssignResources +// accepts resources in two possible formats: + +// 1. The resource type, like `&Droplet{ID: 1}` or `&FloatingIP{IP: "1.2.3.4"}` +// 2. A valid DO URN as a string, like "do:droplet:1234" +// +// There is no unassign. To move a resource to another project, just assign +// it to that other project. +func (p *ProjectsServiceOp) AssignResources(ctx context.Context, projectID string, resources ...interface{}) ([]ProjectResource, *Response, error) { + path := path.Join(projectsBasePath, projectID, "resources") + + ar := &assignResourcesRequest{ + Resources: make([]string, len(resources)), + } + + for i, resource := range resources { + switch resource := resource.(type) { + case ResourceWithURN: + ar.Resources[i] = resource.URN() + case string: + ar.Resources[i] = resource + default: + return nil, nil, fmt.Errorf("%T must either be a string or have a valid URN method", resource) + } + } + req, err := p.client.NewRequest(ctx, http.MethodPost, path, ar) + if err != nil { + return nil, nil, err + } + + root := new(projectResourcesRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Resources, resp, err +} + +func (p *ProjectsServiceOp) getHelper(ctx context.Context, projectID string) (*Project, *Response, error) { + path := path.Join(projectsBasePath, projectID) + + req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(projectRoot) + resp, err := p.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Project, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/regions.go b/vendor/github.com/digitalocean/godo/regions.go new file mode 100644 index 000000000000..409959d951df --- /dev/null +++ b/vendor/github.com/digitalocean/godo/regions.go @@ -0,0 +1,64 @@ +package godo + +import ( + "context" + "net/http" +) + +// RegionsService is an interface for interfacing with the regions +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#regions +type RegionsService interface { + List(context.Context, *ListOptions) ([]Region, *Response, error) +} + +// RegionsServiceOp handles communication with the region related methods of the +// DigitalOcean API. +type RegionsServiceOp struct { + client *Client +} + +var _ RegionsService = &RegionsServiceOp{} + +// Region represents a DigitalOcean Region +type Region struct { + Slug string `json:"slug,omitempty"` + Name string `json:"name,omitempty"` + Sizes []string `json:"sizes,omitempty"` + Available bool `json:"available,omitempty"` + Features []string `json:"features,omitempty"` +} + +type regionsRoot struct { + Regions []Region + Links *Links `json:"links"` +} + +func (r Region) String() string { + return Stringify(r) +} + +// List all regions +func (s *RegionsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Region, *Response, error) { + path := "v2/regions" + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(regionsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Regions, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/sizes.go b/vendor/github.com/digitalocean/godo/sizes.go new file mode 100644 index 000000000000..da8207c08919 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/sizes.go @@ -0,0 +1,68 @@ +package godo + +import ( + "context" + "net/http" +) + +// SizesService is an interface for interfacing with the size +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#sizes +type SizesService interface { + List(context.Context, *ListOptions) ([]Size, *Response, error) +} + +// SizesServiceOp handles communication with the size related methods of the +// DigitalOcean API. +type SizesServiceOp struct { + client *Client +} + +var _ SizesService = &SizesServiceOp{} + +// Size represents a DigitalOcean Size +type Size struct { + Slug string `json:"slug,omitempty"` + Memory int `json:"memory,omitempty"` + Vcpus int `json:"vcpus,omitempty"` + Disk int `json:"disk,omitempty"` + PriceMonthly float64 `json:"price_monthly,omitempty"` + PriceHourly float64 `json:"price_hourly,omitempty"` + Regions []string `json:"regions,omitempty"` + Available bool `json:"available,omitempty"` + Transfer float64 `json:"transfer,omitempty"` +} + +func (s Size) String() string { + return Stringify(s) +} + +type sizesRoot struct { + Sizes []Size + Links *Links `json:"links"` +} + +// List all images +func (s *SizesServiceOp) List(ctx context.Context, opt *ListOptions) ([]Size, *Response, error) { + path := "v2/sizes" + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(sizesRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Sizes, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/snapshots.go b/vendor/github.com/digitalocean/godo/snapshots.go new file mode 100644 index 000000000000..181188db2c4e --- /dev/null +++ b/vendor/github.com/digitalocean/godo/snapshots.go @@ -0,0 +1,140 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +const snapshotBasePath = "v2/snapshots" + +// SnapshotsService is an interface for interfacing with the snapshots +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#snapshots +type SnapshotsService interface { + List(context.Context, *ListOptions) ([]Snapshot, *Response, error) + ListVolume(context.Context, *ListOptions) ([]Snapshot, *Response, error) + ListDroplet(context.Context, *ListOptions) ([]Snapshot, *Response, error) + Get(context.Context, string) (*Snapshot, *Response, error) + Delete(context.Context, string) (*Response, error) +} + +// SnapshotsServiceOp handles communication with the snapshot related methods of the +// DigitalOcean API. +type SnapshotsServiceOp struct { + client *Client +} + +var _ SnapshotsService = &SnapshotsServiceOp{} + +// Snapshot represents a DigitalOcean Snapshot +type Snapshot struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + ResourceID string `json:"resource_id,omitempty"` + ResourceType string `json:"resource_type,omitempty"` + Regions []string `json:"regions,omitempty"` + MinDiskSize int `json:"min_disk_size,omitempty"` + SizeGigaBytes float64 `json:"size_gigabytes,omitempty"` + Created string `json:"created_at,omitempty"` +} + +type snapshotRoot struct { + Snapshot *Snapshot `json:"snapshot"` +} + +type snapshotsRoot struct { + Snapshots []Snapshot `json:"snapshots"` + Links *Links `json:"links,omitempty"` +} + +type listSnapshotOptions struct { + ResourceType string `url:"resource_type,omitempty"` +} + +func (s Snapshot) String() string { + return Stringify(s) +} + +// List lists all the snapshots available. +func (s *SnapshotsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) { + return s.list(ctx, opt, nil) +} + +// ListDroplet lists all the Droplet snapshots. +func (s *SnapshotsServiceOp) ListDroplet(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) { + listOpt := listSnapshotOptions{ResourceType: "droplet"} + return s.list(ctx, opt, &listOpt) +} + +// ListVolume lists all the volume snapshots. +func (s *SnapshotsServiceOp) ListVolume(ctx context.Context, opt *ListOptions) ([]Snapshot, *Response, error) { + listOpt := listSnapshotOptions{ResourceType: "volume"} + return s.list(ctx, opt, &listOpt) +} + +// Get retrieves an snapshot by id. +func (s *SnapshotsServiceOp) Get(ctx context.Context, snapshotID string) (*Snapshot, *Response, error) { + return s.get(ctx, snapshotID) +} + +// Delete an snapshot. +func (s *SnapshotsServiceOp) Delete(ctx context.Context, snapshotID string) (*Response, error) { + path := fmt.Sprintf("%s/%s", snapshotBasePath, snapshotID) + + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + + return resp, err +} + +// Helper method for getting an individual snapshot +func (s *SnapshotsServiceOp) get(ctx context.Context, ID string) (*Snapshot, *Response, error) { + path := fmt.Sprintf("%s/%s", snapshotBasePath, ID) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(snapshotRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Snapshot, resp, err +} + +// Helper method for listing snapshots +func (s *SnapshotsServiceOp) list(ctx context.Context, opt *ListOptions, listOpt *listSnapshotOptions) ([]Snapshot, *Response, error) { + path := snapshotBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + path, err = addOptions(path, listOpt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(snapshotsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Snapshots, resp, err +} diff --git a/vendor/github.com/digitalocean/godo/storage.go b/vendor/github.com/digitalocean/godo/storage.go new file mode 100644 index 000000000000..a79332a79e8d --- /dev/null +++ b/vendor/github.com/digitalocean/godo/storage.go @@ -0,0 +1,251 @@ +package godo + +import ( + "context" + "fmt" + "net/http" + "time" +) + +const ( + storageBasePath = "v2" + storageAllocPath = storageBasePath + "/volumes" + storageSnapPath = storageBasePath + "/snapshots" +) + +// StorageService is an interface for interfacing with the storage +// endpoints of the Digital Ocean API. +// See: https://developers.digitalocean.com/documentation/v2#storage +type StorageService interface { + ListVolumes(context.Context, *ListVolumeParams) ([]Volume, *Response, error) + GetVolume(context.Context, string) (*Volume, *Response, error) + CreateVolume(context.Context, *VolumeCreateRequest) (*Volume, *Response, error) + DeleteVolume(context.Context, string) (*Response, error) + ListSnapshots(ctx context.Context, volumeID string, opts *ListOptions) ([]Snapshot, *Response, error) + GetSnapshot(context.Context, string) (*Snapshot, *Response, error) + CreateSnapshot(context.Context, *SnapshotCreateRequest) (*Snapshot, *Response, error) + DeleteSnapshot(context.Context, string) (*Response, error) +} + +// StorageServiceOp handles communication with the storage volumes related methods of the +// DigitalOcean API. +type StorageServiceOp struct { + client *Client +} + +// ListVolumeParams stores the options you can set for a ListVolumeCall +type ListVolumeParams struct { + Region string `json:"region"` + Name string `json:"name"` + ListOptions *ListOptions `json:"list_options,omitempty"` +} + +var _ StorageService = &StorageServiceOp{} + +// Volume represents a Digital Ocean block store volume. +type Volume struct { + ID string `json:"id"` + Region *Region `json:"region"` + Name string `json:"name"` + SizeGigaBytes int64 `json:"size_gigabytes"` + Description string `json:"description"` + DropletIDs []int `json:"droplet_ids"` + CreatedAt time.Time `json:"created_at"` + FilesystemType string `json:"filesystem_type"` + FilesystemLabel string `json:"filesystem_label"` +} + +func (f Volume) String() string { + return Stringify(f) +} + +func (f Volume) URN() string { + return ToURN("Volume", f.ID) +} + +type storageVolumesRoot struct { + Volumes []Volume `json:"volumes"` + Links *Links `json:"links"` +} + +type storageVolumeRoot struct { + Volume *Volume `json:"volume"` + Links *Links `json:"links,omitempty"` +} + +// VolumeCreateRequest represents a request to create a block store +// volume. +type VolumeCreateRequest struct { + Region string `json:"region"` + Name string `json:"name"` + Description string `json:"description"` + SizeGigaBytes int64 `json:"size_gigabytes"` + SnapshotID string `json:"snapshot_id"` + FilesystemType string `json:"filesystem_type"` + FilesystemLabel string `json:"filesystem_label"` +} + +// ListVolumes lists all storage volumes. +func (svc *StorageServiceOp) ListVolumes(ctx context.Context, params *ListVolumeParams) ([]Volume, *Response, error) { + path := storageAllocPath + if params != nil { + if params.Region != "" && params.Name != "" { + path = fmt.Sprintf("%s?name=%s®ion=%s", path, params.Name, params.Region) + } else if params.Region != "" { + path = fmt.Sprintf("%s?region=%s", path, params.Region) + } else if params.Name != "" { + path = fmt.Sprintf("%s?name=%s", path, params.Name) + } + + if params.ListOptions != nil { + var err error + path, err = addOptions(path, params.ListOptions) + if err != nil { + return nil, nil, err + } + } + } + + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(storageVolumesRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Volumes, resp, nil +} + +// CreateVolume creates a storage volume. The name must be unique. +func (svc *StorageServiceOp) CreateVolume(ctx context.Context, createRequest *VolumeCreateRequest) (*Volume, *Response, error) { + path := storageAllocPath + + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(storageVolumeRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Volume, resp, nil +} + +// GetVolume retrieves an individual storage volume. +func (svc *StorageServiceOp) GetVolume(ctx context.Context, id string) (*Volume, *Response, error) { + path := fmt.Sprintf("%s/%s", storageAllocPath, id) + + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(storageVolumeRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Volume, resp, nil +} + +// DeleteVolume deletes a storage volume. +func (svc *StorageServiceOp) DeleteVolume(ctx context.Context, id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", storageAllocPath, id) + + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + return svc.client.Do(ctx, req, nil) +} + +// SnapshotCreateRequest represents a request to create a block store +// volume. +type SnapshotCreateRequest struct { + VolumeID string `json:"volume_id"` + Name string `json:"name"` + Description string `json:"description"` +} + +// ListSnapshots lists all snapshots related to a storage volume. +func (svc *StorageServiceOp) ListSnapshots(ctx context.Context, volumeID string, opt *ListOptions) ([]Snapshot, *Response, error) { + path := fmt.Sprintf("%s/%s/snapshots", storageAllocPath, volumeID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(snapshotsRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Snapshots, resp, nil +} + +// CreateSnapshot creates a snapshot of a storage volume. +func (svc *StorageServiceOp) CreateSnapshot(ctx context.Context, createRequest *SnapshotCreateRequest) (*Snapshot, *Response, error) { + path := fmt.Sprintf("%s/%s/snapshots", storageAllocPath, createRequest.VolumeID) + + req, err := svc.client.NewRequest(ctx, http.MethodPost, path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(snapshotRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + return root.Snapshot, resp, nil +} + +// GetSnapshot retrieves an individual snapshot. +func (svc *StorageServiceOp) GetSnapshot(ctx context.Context, id string) (*Snapshot, *Response, error) { + path := fmt.Sprintf("%s/%s", storageSnapPath, id) + + req, err := svc.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(snapshotRoot) + resp, err := svc.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Snapshot, resp, nil +} + +// DeleteSnapshot deletes a snapshot. +func (svc *StorageServiceOp) DeleteSnapshot(ctx context.Context, id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", storageSnapPath, id) + + req, err := svc.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + return svc.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/digitalocean/godo/storage_actions.go b/vendor/github.com/digitalocean/godo/storage_actions.go new file mode 100644 index 000000000000..9488975681c8 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/storage_actions.go @@ -0,0 +1,129 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +// StorageActionsService is an interface for interfacing with the +// storage actions endpoints of the Digital Ocean API. +// See: https://developers.digitalocean.com/documentation/v2#storage-actions +type StorageActionsService interface { + Attach(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error) + DetachByDropletID(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error) + Get(ctx context.Context, volumeID string, actionID int) (*Action, *Response, error) + List(ctx context.Context, volumeID string, opt *ListOptions) ([]Action, *Response, error) + Resize(ctx context.Context, volumeID string, sizeGigabytes int, regionSlug string) (*Action, *Response, error) +} + +// StorageActionsServiceOp handles communication with the storage volumes +// action related methods of the DigitalOcean API. +type StorageActionsServiceOp struct { + client *Client +} + +// StorageAttachment represents the attachement of a block storage +// volume to a specific Droplet under the device name. +type StorageAttachment struct { + DropletID int `json:"droplet_id"` +} + +// Attach a storage volume to a Droplet. +func (s *StorageActionsServiceOp) Attach(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error) { + request := &ActionRequest{ + "type": "attach", + "droplet_id": dropletID, + } + return s.doAction(ctx, volumeID, request) +} + +// DetachByDropletID a storage volume from a Droplet by Droplet ID. +func (s *StorageActionsServiceOp) DetachByDropletID(ctx context.Context, volumeID string, dropletID int) (*Action, *Response, error) { + request := &ActionRequest{ + "type": "detach", + "droplet_id": dropletID, + } + return s.doAction(ctx, volumeID, request) +} + +// Get an action for a particular storage volume by id. +func (s *StorageActionsServiceOp) Get(ctx context.Context, volumeID string, actionID int) (*Action, *Response, error) { + path := fmt.Sprintf("%s/%d", storageAllocationActionPath(volumeID), actionID) + return s.get(ctx, path) +} + +// List the actions for a particular storage volume. +func (s *StorageActionsServiceOp) List(ctx context.Context, volumeID string, opt *ListOptions) ([]Action, *Response, error) { + path := storageAllocationActionPath(volumeID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + return s.list(ctx, path) +} + +// Resize a storage volume. +func (s *StorageActionsServiceOp) Resize(ctx context.Context, volumeID string, sizeGigabytes int, regionSlug string) (*Action, *Response, error) { + request := &ActionRequest{ + "type": "resize", + "size_gigabytes": sizeGigabytes, + "region": regionSlug, + } + return s.doAction(ctx, volumeID, request) +} + +func (s *StorageActionsServiceOp) doAction(ctx context.Context, volumeID string, request *ActionRequest) (*Action, *Response, error) { + path := storageAllocationActionPath(volumeID) + + req, err := s.client.NewRequest(ctx, http.MethodPost, path, request) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err +} + +func (s *StorageActionsServiceOp) get(ctx context.Context, path string) (*Action, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Event, resp, err +} + +func (s *StorageActionsServiceOp) list(ctx context.Context, path string) ([]Action, *Response, error) { + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(actionsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Actions, resp, err +} + +func storageAllocationActionPath(volumeID string) string { + return fmt.Sprintf("%s/%s/actions", storageAllocPath, volumeID) +} diff --git a/vendor/github.com/digitalocean/godo/strings.go b/vendor/github.com/digitalocean/godo/strings.go new file mode 100644 index 000000000000..4d5c0ad22079 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/strings.go @@ -0,0 +1,102 @@ +package godo + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +var timestampType = reflect.TypeOf(Timestamp{}) + +type ResourceWithURN interface { + URN() string +} + +// ToURN converts the resource type and ID to a valid DO API URN. +func ToURN(resourceType string, id interface{}) string { + return fmt.Sprintf("%s:%s:%v", "do", strings.ToLower(resourceType), id) +} + +// Stringify attempts to create a string representation of DigitalOcean types +func Stringify(message interface{}) string { + var buf bytes.Buffer + v := reflect.ValueOf(message) + stringifyValue(&buf, v) + return buf.String() +} + +// stringifyValue was graciously cargoculted from the goprotubuf library +func stringifyValue(w io.Writer, val reflect.Value) { + if val.Kind() == reflect.Ptr && val.IsNil() { + _, _ = w.Write([]byte("")) + return + } + + v := reflect.Indirect(val) + + switch v.Kind() { + case reflect.String: + fmt.Fprintf(w, `"%s"`, v) + case reflect.Slice: + stringifySlice(w, v) + return + case reflect.Struct: + stringifyStruct(w, v) + default: + if v.CanInterface() { + fmt.Fprint(w, v.Interface()) + } + } +} + +func stringifySlice(w io.Writer, v reflect.Value) { + _, _ = w.Write([]byte{'['}) + for i := 0; i < v.Len(); i++ { + if i > 0 { + _, _ = w.Write([]byte{' '}) + } + + stringifyValue(w, v.Index(i)) + } + + _, _ = w.Write([]byte{']'}) +} + +func stringifyStruct(w io.Writer, v reflect.Value) { + if v.Type().Name() != "" { + _, _ = w.Write([]byte(v.Type().String())) + } + + // special handling of Timestamp values + if v.Type() == timestampType { + fmt.Fprintf(w, "{%s}", v.Interface()) + return + } + + _, _ = w.Write([]byte{'{'}) + + var sep bool + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + if fv.Kind() == reflect.Ptr && fv.IsNil() { + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + continue + } + + if sep { + _, _ = w.Write([]byte(", ")) + } else { + sep = true + } + + _, _ = w.Write([]byte(v.Type().Field(i).Name)) + _, _ = w.Write([]byte{':'}) + stringifyValue(w, fv) + } + + _, _ = w.Write([]byte{'}'}) +} diff --git a/vendor/github.com/digitalocean/godo/tags.go b/vendor/github.com/digitalocean/godo/tags.go new file mode 100644 index 000000000000..295a2e9778e8 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/tags.go @@ -0,0 +1,231 @@ +package godo + +import ( + "context" + "fmt" + "net/http" +) + +const tagsBasePath = "v2/tags" + +// TagsService is an interface for interfacing with the tags +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#tags +type TagsService interface { + List(context.Context, *ListOptions) ([]Tag, *Response, error) + Get(context.Context, string) (*Tag, *Response, error) + Create(context.Context, *TagCreateRequest) (*Tag, *Response, error) + Delete(context.Context, string) (*Response, error) + + TagResources(context.Context, string, *TagResourcesRequest) (*Response, error) + UntagResources(context.Context, string, *UntagResourcesRequest) (*Response, error) +} + +// TagsServiceOp handles communication with tag related method of the +// DigitalOcean API. +type TagsServiceOp struct { + client *Client +} + +var _ TagsService = &TagsServiceOp{} + +// ResourceType represents a class of resource, currently only droplet are supported +type ResourceType string + +const ( + //DropletResourceType holds the string representing our ResourceType of Droplet. + DropletResourceType ResourceType = "droplet" + //ImageResourceType holds the string representing our ResourceType of Image. + ImageResourceType ResourceType = "image" + //VolumeResourceType holds the string representing our ResourceType of Volume. + VolumeResourceType ResourceType = "volume" + //LoadBalancerResourceType holds the string representing our ResourceType of LoadBalancer. + LoadBalancerResourceType ResourceType = "load_balancer" +) + +// Resource represent a single resource for associating/disassociating with tags +type Resource struct { + ID string `json:"resource_id,omit_empty"` + Type ResourceType `json:"resource_type,omit_empty"` +} + +// TaggedResources represent the set of resources a tag is attached to +type TaggedResources struct { + Count int `json:"count"` + LastTaggedURI string `json:"last_tagged_uri,omitempty"` + Droplets *TaggedDropletsResources `json:"droplets,omitempty"` + Images *TaggedImagesResources `json:"images"` + Volumes *TaggedVolumesResources `json:"volumes"` +} + +// TaggedDropletsResources represent the droplet resources a tag is attached to +type TaggedDropletsResources struct { + Count int `json:"count,float64,omitempty"` + LastTagged *Droplet `json:"last_tagged,omitempty"` + LastTaggedURI string `json:"last_tagged_uri,omitempty"` +} + +// TaggedResourcesData represent the generic resources a tag is attached to +type TaggedResourcesData struct { + Count int `json:"count,float64,omitempty"` + LastTaggedURI string `json:"last_tagged_uri,omitempty"` +} + +// TaggedImagesResources represent the image resources a tag is attached to +type TaggedImagesResources TaggedResourcesData + +// TaggedVolumesResources represent the volume resources a tag is attached to +type TaggedVolumesResources TaggedResourcesData + +// Tag represent DigitalOcean tag +type Tag struct { + Name string `json:"name,omitempty"` + Resources *TaggedResources `json:"resources,omitempty"` +} + +//TagCreateRequest represents the JSON structure of a request of that type. +type TagCreateRequest struct { + Name string `json:"name"` +} + +// TagResourcesRequest represents the JSON structure of a request of that type. +type TagResourcesRequest struct { + Resources []Resource `json:"resources"` +} + +// UntagResourcesRequest represents the JSON structure of a request of that type. +type UntagResourcesRequest struct { + Resources []Resource `json:"resources"` +} + +type tagsRoot struct { + Tags []Tag `json:"tags"` + Links *Links `json:"links"` +} + +type tagRoot struct { + Tag *Tag `json:"tag"` +} + +// List all tags +func (s *TagsServiceOp) List(ctx context.Context, opt *ListOptions) ([]Tag, *Response, error) { + path := tagsBasePath + path, err := addOptions(path, opt) + + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(tagsRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Tags, resp, err +} + +// Get a single tag +func (s *TagsServiceOp) Get(ctx context.Context, name string) (*Tag, *Response, error) { + path := fmt.Sprintf("%s/%s", tagsBasePath, name) + + req, err := s.client.NewRequest(ctx, http.MethodGet, path, nil) + if err != nil { + return nil, nil, err + } + + root := new(tagRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Tag, resp, err +} + +// Create a new tag +func (s *TagsServiceOp) Create(ctx context.Context, createRequest *TagCreateRequest) (*Tag, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + req, err := s.client.NewRequest(ctx, http.MethodPost, tagsBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(tagRoot) + resp, err := s.client.Do(ctx, req, root) + if err != nil { + return nil, resp, err + } + + return root.Tag, resp, err +} + +// Delete an existing tag +func (s *TagsServiceOp) Delete(ctx context.Context, name string) (*Response, error) { + if name == "" { + return nil, NewArgError("name", "cannot be empty") + } + + path := fmt.Sprintf("%s/%s", tagsBasePath, name) + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + + return resp, err +} + +// TagResources associates resources with a given Tag. +func (s *TagsServiceOp) TagResources(ctx context.Context, name string, tagRequest *TagResourcesRequest) (*Response, error) { + if name == "" { + return nil, NewArgError("name", "cannot be empty") + } + + if tagRequest == nil { + return nil, NewArgError("tagRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s/resources", tagsBasePath, name) + req, err := s.client.NewRequest(ctx, http.MethodPost, path, tagRequest) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + + return resp, err +} + +// UntagResources dissociates resources with a given Tag. +func (s *TagsServiceOp) UntagResources(ctx context.Context, name string, untagRequest *UntagResourcesRequest) (*Response, error) { + if name == "" { + return nil, NewArgError("name", "cannot be empty") + } + + if untagRequest == nil { + return nil, NewArgError("tagRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s/resources", tagsBasePath, name) + req, err := s.client.NewRequest(ctx, http.MethodDelete, path, untagRequest) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + + return resp, err +} diff --git a/vendor/github.com/digitalocean/godo/timestamp.go b/vendor/github.com/digitalocean/godo/timestamp.go new file mode 100644 index 000000000000..37a28e5f2f2e --- /dev/null +++ b/vendor/github.com/digitalocean/godo/timestamp.go @@ -0,0 +1,35 @@ +package godo + +import ( + "strconv" + "time" +) + +// Timestamp represents a time that can be unmarshalled from a JSON string +// formatted as either an RFC3339 or Unix timestamp. All +// exported methods of time.Time can be called on Timestamp. +type Timestamp struct { + time.Time +} + +func (t Timestamp) String() string { + return t.Time.String() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// Time is expected in RFC3339 or Unix format. +func (t *Timestamp) UnmarshalJSON(data []byte) error { + str := string(data) + i, err := strconv.ParseInt(str, 10, 64) + if err == nil { + t.Time = time.Unix(i, 0) + } else { + t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str) + } + return err +} + +// Equal reports whether t and u are equal based on time.Equal +func (t Timestamp) Equal(u Timestamp) bool { + return t.Time.Equal(u.Time) +} diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 64869af347a5..386c2a457a8b 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -3,7 +3,7 @@ gofuzz gofuzz is a library for populating go objects with random values. -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) +[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) [![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) This is useful for testing: diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index 1dfa80a6fca9..da0a5f93800d 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -20,6 +20,7 @@ import ( "fmt" "math/rand" "reflect" + "regexp" "time" ) @@ -28,13 +29,14 @@ type fuzzFuncMap map[reflect.Type]reflect.Value // Fuzzer knows how to fill any object with random fields. type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int + skipFieldPatterns []*regexp.Regexp } // New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, @@ -150,6 +152,13 @@ func (f *Fuzzer) MaxDepth(d int) *Fuzzer { return f } +// Skip fields which match the supplied pattern. Call this multiple times if needed +// This is useful to skip XXX_ fields generated by protobuf +func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { + f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) + return f +} + // Fuzz recursively fills all of obj's fields with something random. First // this tries to find a custom fuzz function (see Funcs). If there is no // custom function this tests whether the object implements fuzz.Interface and, @@ -274,7 +283,17 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { v.Set(reflect.Zero(v.Type())) case reflect.Struct: for i := 0; i < v.NumField(); i++ { - fc.doFuzz(v.Field(i), 0) + skipField := false + fieldName := v.Type().Field(i).Name + for _, pattern := range fc.fuzzer.skipFieldPatterns { + if pattern.MatchString(fieldName) { + skipField = true + break + } + } + if !skipField { + fc.doFuzz(v.Field(i), 0) + } } case reflect.Chan: fallthrough diff --git a/vendor/github.com/gophercloud/gophercloud/.gitignore b/vendor/github.com/gophercloud/gophercloud/.gitignore new file mode 100644 index 000000000000..dd91ed205597 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.gitignore @@ -0,0 +1,3 @@ +**/*.swp +.idea +.vscode diff --git a/vendor/github.com/gophercloud/gophercloud/.travis.yml b/vendor/github.com/gophercloud/gophercloud/.travis.yml new file mode 100644 index 000000000000..9153a00fc55f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.travis.yml @@ -0,0 +1,25 @@ +language: go +sudo: false +install: +- GO111MODULE=off go get golang.org/x/crypto/ssh +- GO111MODULE=off go get -v -tags 'fixtures acceptance' ./... +- GO111MODULE=off go get github.com/wadey/gocovmerge +- GO111MODULE=off go get github.com/mattn/goveralls +- GO111MODULE=off go get golang.org/x/tools/cmd/goimports +go: +- "1.10" +- "1.11" +- "1.12" +- "tip" +env: + global: + - secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ=" + - GO111MODULE=on +before_script: +- go vet ./... +script: +- ./script/coverage +- ./script/unittest +- ./script/format +after_success: +- $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml new file mode 100644 index 000000000000..135e3b203a8d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml @@ -0,0 +1,114 @@ +- job: + name: gophercloud-unittest + parent: golang-test + description: | + Run gophercloud unit test + run: .zuul/playbooks/gophercloud-unittest/run.yaml + nodeset: ubuntu-xenial-ut + +- job: + name: gophercloud-acceptance-test + parent: golang-test + description: | + Run gophercloud acceptance test on master branch + run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml + +- job: + name: gophercloud-acceptance-test-ironic + parent: golang-test + description: | + Run gophercloud ironic acceptance test on master branch + run: .zuul/playbooks/gophercloud-acceptance-test-ironic/run.yaml + +- job: + name: gophercloud-acceptance-test-stein + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on stein branch + vars: + global_env: + OS_BRANCH: stable/stein + +- job: + name: gophercloud-acceptance-test-rocky + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on rocky branch + vars: + global_env: + OS_BRANCH: stable/rocky + +- job: + name: gophercloud-acceptance-test-queens + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on queens branch + vars: + global_env: + OS_BRANCH: stable/queens + +- job: + name: gophercloud-acceptance-test-pike + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on pike branch + vars: + global_env: + OS_BRANCH: stable/pike + +- job: + name: gophercloud-acceptance-test-ocata + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on ocata branch + vars: + global_env: + OS_BRANCH: stable/ocata + +- job: + name: gophercloud-acceptance-test-newton + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on newton branch + vars: + global_env: + OS_BRANCH: stable/newton + +- job: + name: gophercloud-acceptance-test-mitaka + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on mitaka branch + vars: + global_env: + OS_BRANCH: stable/mitaka + nodeset: ubuntu-trusty + +- project: + name: gophercloud/gophercloud + check: + jobs: + - gophercloud-unittest + - gophercloud-acceptance-test + - gophercloud-acceptance-test-ironic + recheck-mitaka: + jobs: + - gophercloud-acceptance-test-mitaka + recheck-newton: + jobs: + - gophercloud-acceptance-test-newton + recheck-ocata: + jobs: + - gophercloud-acceptance-test-ocata + recheck-pike: + jobs: + - gophercloud-acceptance-test-pike + recheck-queens: + jobs: + - gophercloud-acceptance-test-queens + recheck-rocky: + jobs: + - gophercloud-acceptance-test-rocky + recheck-stein: + jobs: + - gophercloud-acceptance-test-stein diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vendor/github.com/gophercloud/gophercloud/LICENSE b/vendor/github.com/gophercloud/gophercloud/LICENSE new file mode 100644 index 000000000000..fbbbc9e4cbad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/LICENSE @@ -0,0 +1,191 @@ +Copyright 2012-2013 Rackspace, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md new file mode 100644 index 000000000000..ad29041d9bfe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -0,0 +1,159 @@ +# Gophercloud: an OpenStack SDK for Go +[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud) +[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) + +Gophercloud is an OpenStack Go SDK. + +## Useful links + +* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud) +* [Effective Go](https://golang.org/doc/effective_go.html) + +## How to install + +Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) +is pointing to an appropriate directory where you want to install Gophercloud: + +```bash +mkdir $HOME/go +export GOPATH=$HOME/go +``` + +To protect yourself against changes in your dependencies, we highly recommend choosing a +[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for +your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install +Gophercloud as a dependency like so: + +```bash +go get github.com/gophercloud/gophercloud + +# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud" + +godep save ./... +``` + +This will install all the source files you need into a `Godeps/_workspace` directory, which is +referenceable from your own source files when you use the `godep go` command. + +## Getting started + +### Credentials + +Because you'll be hitting an API, you will need to retrieve your OpenStack +credentials and either store them as environment variables or in your local Go +files. The first method is recommended because it decouples credential +information from source code, allowing you to push the latter to your version +control system without any security risk. + +You will need to retrieve the following: + +* username +* password +* a valid Keystone identity URL + +For users that have the OpenStack dashboard installed, there's a shortcut. If +you visit the `project/access_and_security` path in Horizon and click on the +"Download OpenStack RC File" button at the top right hand corner, you will +download a bash file that exports all of your access details to environment +variables. To execute the file, run `source admin-openrc.sh` and you will be +prompted for your password. + +### Authentication + +Once you have access to your credentials, you can begin plugging them into +Gophercloud. The next step is authentication, and this is handled by a base +"Provider" struct. To get one, you can either pass in your credentials +explicitly, or tell Gophercloud to use environment variables: + +```go +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +// Option 1: Pass in the values yourself +opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", +} + +// Option 2: Use a utility function to retrieve all your environment variables +opts, err := openstack.AuthOptionsFromEnv() +``` + +Once you have the `opts` variable, you can pass it in and get back a +`ProviderClient` struct: + +```go +provider, err := openstack.AuthenticatedClient(opts) +``` + +The `ProviderClient` is the top-level client that all of your OpenStack services +derive from. The provider contains all of the authentication details that allow +your Go code to access the API - such as the base URL and token ID. + +### Provision a server + +Once we have a base Provider, we inject it as a dependency into each OpenStack +service. In order to work with the Compute API, we need a Compute service +client; which can be created like so: + +```go +client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), +}) +``` + +We then use this `client` for any Compute API operation we want. In our case, +we want to provision a new server - so we invoke the `Create` method and pass +in the flavor ID (hardware specification) and image ID (operating system) we're +interested in: + +```go +import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + +server, err := servers.Create(client, servers.CreateOpts{ + Name: "My new server!", + FlavorRef: "flavor_id", + ImageRef: "image_id", +}).Extract() +``` + +The above code sample creates a new server with the parameters, and embodies the +new resource in the `server` variable (a +[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct). + +## Advanced Usage + +Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gophercloud works. + +## Backwards-Compatibility Guarantees + +None. Vendor it and write tests covering the parts you use. + +## Contributing + +See the [contributing guide](./.github/CONTRIBUTING.md). + +## Help and feedback + +If you're struggling with something or have spotted a potential bug, feel free +to submit an issue to our [bug tracker](https://github.com/gophercloud/gophercloud/issues). + +## Thank You + +We'd like to extend special thanks and appreciation to the following: + +### OpenLab + + + +OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. + +### VEXXHOST + + + +VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go new file mode 100644 index 000000000000..5ffa8d1e0a76 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/auth_options.go @@ -0,0 +1,437 @@ +package gophercloud + +/* +AuthOptions stores information needed to authenticate to an OpenStack Cloud. +You can populate one manually, or use a provider's AuthOptionsFromEnv() function +to read relevant information from the standard environment variables. Pass one +to a provider's AuthenticatedClient function to authenticate and obtain a +ProviderClient representing an active session on that provider. + +Its fields are the union of those recognized by each identity implementation and +provider. + +An example of manually providing authentication information: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +An example of using AuthOptionsFromEnv(), where the environment variables can +be read from a file, such as a standard openrc file: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed by + // all of the identity services, it will often be populated by a provider-level + // function. + // + // The IdentityEndpoint is typically referred to as the "auth_url" or + // "OS_AUTH_URL" in the information provided by the cloud operator. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"-"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // The same fields are known as project_id and project_name in the Identity + // V3 API, but are collected as TenantID and TenantName here in both cases. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + // If DomainID or DomainName are provided, they will also apply to TenantName. + // It is not currently possible to authenticate with Username and a Domain + // and scope to a Project in a different Domain by using TenantName. To + // accomplish that, the ProjectID will need to be provided as the TenantID + // option. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud to + // cache your credentials in memory, and to allow Gophercloud to attempt to + // re-authenticate automatically if/when your token expires. If you set it to + // false, it will not cache these settings, but re-authentication will not be + // possible. This setting defaults to false. + // + // NOTE: The reauth function will try to re-authenticate endlessly if left + // unchecked. The way to limit the number of attempts is to provide a custom + // HTTP client to the provider client and provide a transport that implements + // the RoundTripper interface and stores the number of failed retries. For an + // example of this, see here: + // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` + + // Scope determines the scoping of the authentication request. + Scope *AuthScope `json:"-"` + + // Authentication through Application Credentials requires supplying name, project and secret + // For project we can use TenantID + ApplicationCredentialID string `json:"-"` + ApplicationCredentialName string `json:"-"` + ApplicationCredentialSecret string `json:"-"` +} + +// AuthScope allows a created token to be limited to a specific domain or project. +type AuthScope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string +} + +// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder +// interface in the v2 tokens package +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + // Populate the request map. + authMap := make(map[string]interface{}) + + if opts.Username != "" { + if opts.Password != "" { + authMap["passwordCredentials"] = map[string]interface{}{ + "username": opts.Username, + "password": opts.Password, + } + } else { + return nil, ErrMissingInput{Argument: "Password"} + } + } else if opts.TokenID != "" { + authMap["token"] = map[string]interface{}{ + "id": opts.TokenID, + } + } else { + return nil, ErrMissingInput{Argument: "Username"} + } + + if opts.TenantID != "" { + authMap["tenantId"] = opts.TenantID + } + if opts.TenantName != "" { + authMap["tenantName"] = opts.TenantName + } + + return map[string]interface{}{"auth": authMap}, nil +} + +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + type domainReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + } + + type projectReq struct { + Domain *domainReq `json:"domain,omitempty"` + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + } + + type userReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Password string `json:"password,omitempty"` + Domain *domainReq `json:"domain,omitempty"` + } + + type passwordReq struct { + User userReq `json:"user"` + } + + type tokenReq struct { + ID string `json:"id"` + } + + type applicationCredentialReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + User *userReq `json:"user,omitempty"` + Secret *string `json:"secret,omitempty"` + } + + type identityReq struct { + Methods []string `json:"methods"` + Password *passwordReq `json:"password,omitempty"` + Token *tokenReq `json:"token,omitempty"` + ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"` + } + + type authReq struct { + Identity identityReq `json:"identity"` + } + + type request struct { + Auth authReq `json:"auth"` + } + + // Populate the request structure based on the provided arguments. Create and return an error + // if insufficient or incompatible information is present. + var req request + + if opts.Password == "" { + if opts.TokenID != "" { + // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication + // parameters. + if opts.Username != "" { + return nil, ErrUsernameWithToken{} + } + if opts.UserID != "" { + return nil, ErrUserIDWithToken{} + } + if opts.DomainID != "" { + return nil, ErrDomainIDWithToken{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithToken{} + } + + // Configure the request for Token authentication. + req.Auth.Identity.Methods = []string{"token"} + req.Auth.Identity.Token = &tokenReq{ + ID: opts.TokenID, + } + + } else if opts.ApplicationCredentialID != "" { + // Configure the request for ApplicationCredentialID authentication. + // https://github.com/openstack/keystoneauth/blob/stable/rocky/keystoneauth1/identity/v3/application_credential.py#L48-L67 + // There are three kinds of possible application_credential requests + // 1. application_credential id + secret + // 2. application_credential name + secret + user_id + // 3. application_credential name + secret + username + domain_id / domain_name + if opts.ApplicationCredentialSecret == "" { + return nil, ErrAppCredMissingSecret{} + } + req.Auth.Identity.Methods = []string{"application_credential"} + req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ + ID: &opts.ApplicationCredentialID, + Secret: &opts.ApplicationCredentialSecret, + } + } else if opts.ApplicationCredentialName != "" { + if opts.ApplicationCredentialSecret == "" { + return nil, ErrAppCredMissingSecret{} + } + + var userRequest *userReq + + if opts.UserID != "" { + // UserID could be used without the domain information + userRequest = &userReq{ + ID: &opts.UserID, + } + } + + if userRequest == nil && opts.Username == "" { + // Make sure that Username or UserID are provided + return nil, ErrUsernameOrUserID{} + } + + if userRequest == nil && opts.DomainID != "" { + userRequest = &userReq{ + Name: &opts.Username, + Domain: &domainReq{ID: &opts.DomainID}, + } + } + + if userRequest == nil && opts.DomainName != "" { + userRequest = &userReq{ + Name: &opts.Username, + Domain: &domainReq{Name: &opts.DomainName}, + } + } + + // Make sure that DomainID or DomainName are provided among Username + if userRequest == nil { + return nil, ErrDomainIDOrDomainName{} + } + + req.Auth.Identity.Methods = []string{"application_credential"} + req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ + Name: &opts.ApplicationCredentialName, + User: userRequest, + Secret: &opts.ApplicationCredentialSecret, + } + } else { + // If no password or token ID or ApplicationCredential are available, authentication can't continue. + return nil, ErrMissingPassword{} + } + } else { + // Password authentication. + req.Auth.Identity.Methods = []string{"password"} + + // At least one of Username and UserID must be specified. + if opts.Username == "" && opts.UserID == "" { + return nil, ErrUsernameOrUserID{} + } + + if opts.Username != "" { + // If Username is provided, UserID may not be provided. + if opts.UserID != "" { + return nil, ErrUsernameOrUserID{} + } + + // Either DomainID or DomainName must also be specified. + if opts.DomainID == "" && opts.DomainName == "" { + return nil, ErrDomainIDOrDomainName{} + } + + if opts.DomainID != "" { + if opts.DomainName != "" { + return nil, ErrDomainIDOrDomainName{} + } + + // Configure the request for Username and Password authentication with a DomainID. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{ID: &opts.DomainID}, + }, + } + } + + if opts.DomainName != "" { + // Configure the request for Username and Password authentication with a DomainName. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{Name: &opts.DomainName}, + }, + } + } + } + + if opts.UserID != "" { + // If UserID is specified, neither DomainID nor DomainName may be. + if opts.DomainID != "" { + return nil, ErrDomainIDWithUserID{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithUserID{} + } + + // Configure the request for UserID and Password authentication. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ID: &opts.UserID, Password: opts.Password}, + } + } + } + + b, err := BuildRequestBody(req, "") + if err != nil { + return nil, err + } + + if len(scope) != 0 { + b["auth"].(map[string]interface{})["scope"] = scope + } + + return b, nil +} + +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + // For backwards compatibility. + // If AuthOptions.Scope was not set, try to determine it. + // This works well for common scenarios. + if opts.Scope == nil { + opts.Scope = new(AuthScope) + if opts.TenantID != "" { + opts.Scope.ProjectID = opts.TenantID + } else { + if opts.TenantName != "" { + opts.Scope.ProjectName = opts.TenantName + opts.Scope.DomainID = opts.DomainID + opts.Scope.DomainName = opts.DomainName + } + } + } + + if opts.Scope.ProjectName != "" { + // ProjectName provided: either DomainID or DomainName must also be supplied. + // ProjectID may not be supplied. + if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + if opts.Scope.ProjectID != "" { + return nil, ErrScopeProjectIDOrProjectName{} + } + + if opts.Scope.DomainID != "" { + // ProjectName + DomainID + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, + }, + }, nil + } + + if opts.Scope.DomainName != "" { + // ProjectName + DomainName + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, + }, + }, nil + } + } else if opts.Scope.ProjectID != "" { + // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. + if opts.Scope.DomainID != "" { + return nil, ErrScopeProjectIDAlone{} + } + if opts.Scope.DomainName != "" { + return nil, ErrScopeProjectIDAlone{} + } + + // ProjectID + return map[string]interface{}{ + "project": map[string]interface{}{ + "id": &opts.Scope.ProjectID, + }, + }, nil + } else if opts.Scope.DomainID != "" { + // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. + if opts.Scope.DomainName != "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + + // DomainID + return map[string]interface{}{ + "domain": map[string]interface{}{ + "id": &opts.Scope.DomainID, + }, + }, nil + } else if opts.Scope.DomainName != "" { + // DomainName + return map[string]interface{}{ + "domain": map[string]interface{}{ + "name": &opts.Scope.DomainName, + }, + }, nil + } + + return nil, nil +} + +func (opts AuthOptions) CanReauth() bool { + return opts.AllowReauth +} diff --git a/vendor/github.com/gophercloud/gophercloud/auth_result.go b/vendor/github.com/gophercloud/gophercloud/auth_result.go new file mode 100644 index 000000000000..2e4699b978ce --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/auth_result.go @@ -0,0 +1,52 @@ +package gophercloud + +/* +AuthResult is the result from the request that was used to obtain a provider +client's Keystone token. It is returned from ProviderClient.GetAuthResult(). + +The following types satisfy this interface: + + github.com/gophercloud/gophercloud/openstack/identity/v2/tokens.CreateResult + github.com/gophercloud/gophercloud/openstack/identity/v3/tokens.CreateResult + +Usage example: + + import ( + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + ) + + func GetAuthenticatedUserID(providerClient *gophercloud.ProviderClient) (string, error) { + r := providerClient.GetAuthResult() + if r == nil { + //ProviderClient did not use openstack.Authenticate(), e.g. because token + //was set manually with ProviderClient.SetToken() + return "", errors.New("no AuthResult available") + } + switch r := r.(type) { + case tokens2.CreateResult: + u, err := r.ExtractUser() + if err != nil { + return "", err + } + return u.ID, nil + case tokens3.CreateResult: + u, err := r.ExtractUser() + if err != nil { + return "", err + } + return u.ID, nil + default: + panic(fmt.Sprintf("got unexpected AuthResult type %t", r)) + } + } + +Both implementing types share a lot of methods by name, like ExtractUser() in +this example. But those methods cannot be part of the AuthResult interface +because the return types are different (in this case, type tokens2.User vs. +type tokens3.User). +*/ +type AuthResult interface { + ExtractTokenID() (string, error) +} diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go new file mode 100644 index 000000000000..953ca822a97a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/doc.go @@ -0,0 +1,110 @@ +/* +Package gophercloud provides a multi-vendor interface to OpenStack-compatible +clouds. The library has a three-level hierarchy: providers, services, and +resources. + +Authenticating with Providers + +Provider structs represent the cloud providers that offer and manage a +collection of services. You will generally want to create one Provider +client per OpenStack cloud. + + It is now recommended to use the `clientconfig` package found at + https://github.com/gophercloud/utils/tree/master/openstack/clientconfig + for all authentication purposes. + + The below documentation is still relevant. clientconfig simply implements + the below and presents it in an easier and more flexible way. + +Use your OpenStack credentials to create a Provider client. The +IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in +information provided by the cloud operator. Additionally, the cloud may refer to +TenantID or TenantName as project_id and project_name. Credentials are +specified like so: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +You can authenticate with a token by doing: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + TokenID: "{token_id}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +You may also use the openstack.AuthOptionsFromEnv() helper function. This +function reads in standard environment variables frequently found in an +OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" +instead of "project". + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) + +Service Clients + +Service structs are specific to a provider and handle all of the logic and +operations for a particular OpenStack service. Examples of services include: +Compute, Object Storage, Block Storage. In order to define one, you need to +pass in the parent provider, like so: + + opts := gophercloud.EndpointOpts{Region: "RegionOne"} + + client, err := openstack.NewComputeV2(provider, opts) + +Resources + +Resource structs are the domain models that services make use of in order +to work with and represent the state of API resources: + + server, err := servers.Get(client, "{serverId}").Extract() + +Intermediate Result structs are returned for API operations, which allow +generic access to the HTTP headers, response body, and any errors associated +with the network transaction. To turn a result into a usable resource struct, +you must call the Extract method which is chained to the response, or an +Extract function from an applicable extension: + + result := servers.Get(client, "{serverId}") + + // Attempt to extract the disk configuration from the OS-DCF disk config + // extension: + config, err := diskconfig.ExtractGet(result) + +All requests that enumerate a collection return a Pager struct that is used to +iterate through the results one page at a time. Use the EachPage method on that +Pager to handle each successive Page in a closure, then use the appropriate +extraction method from that request's package to interpret that Page as a slice +of results: + + err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { + s, err := servers.ExtractServers(page) + if err != nil { + return false, err + } + + // Handle the []servers.Server slice. + + // Return "false" or an error to prematurely stop fetching new pages. + return true, nil + }) + +If you want to obtain the entire collection of pages without doing any +intermediary processing on each page, you can use the AllPages method: + + allPages, err := servers.List(client, nil).AllPages() + allServers, err := servers.ExtractServers(allPages) + +This top-level package contains utility functions and data types that are used +throughout the provider and service packages. Of particular note for end users +are the AuthOptions and EndpointOpts structs. +*/ +package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go new file mode 100644 index 000000000000..2fbc3c97f14f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go @@ -0,0 +1,76 @@ +package gophercloud + +// Availability indicates to whom a specific service endpoint is accessible: +// the internet at large, internal networks only, or only to administrators. +// Different identity services use different terminology for these. Identity v2 +// lists them as different kinds of URLs within the service catalog ("adminURL", +// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an +// endpoint's response. +type Availability string + +const ( + // AvailabilityAdmin indicates that an endpoint is only available to + // administrators. + AvailabilityAdmin Availability = "admin" + + // AvailabilityPublic indicates that an endpoint is available to everyone on + // the internet. + AvailabilityPublic Availability = "public" + + // AvailabilityInternal indicates that an endpoint is only available within + // the cluster's internal network. + AvailabilityInternal Availability = "internal" +) + +// EndpointOpts specifies search criteria used by queries against an +// OpenStack service catalog. The options must contain enough information to +// unambiguously identify one, and only one, endpoint within the catalog. +// +// Usually, these are passed to service client factory functions in a provider +// package, like "openstack.NewComputeV2()". +type EndpointOpts struct { + // Type [required] is the service type for the client (e.g., "compute", + // "object-store"). Generally, this will be supplied by the service client + // function, but a user-given value will be honored if provided. + Type string + + // Name [optional] is the service name for the client (e.g., "nova") as it + // appears in the service catalog. Services can have the same Type but a + // different Name, which is why both Type and Name are sometimes needed. + Name string + + // Region [required] is the geographic region in which the endpoint resides, + // generally specifying which datacenter should house your resources. + // Required only for services that span multiple regions. + Region string + + // Availability [optional] is the visibility of the endpoint to be returned. + // Valid types include the constants AvailabilityPublic, AvailabilityInternal, + // or AvailabilityAdmin from this package. + // + // Availability is not required, and defaults to AvailabilityPublic. Not all + // providers or services offer all Availability options. + Availability Availability +} + +/* +EndpointLocator is an internal function to be used by provider implementations. + +It provides an implementation that locates a single endpoint from a service +catalog for a specific ProviderClient based on user-provided EndpointOpts. The +provider then uses it to discover related ServiceClients. +*/ +type EndpointLocator func(EndpointOpts) (string, error) + +// ApplyDefaults is an internal method to be used by provider implementations. +// +// It sets EndpointOpts fields if not already set, including a default type. +// Currently, EndpointOpts.Availability defaults to the public endpoint. +func (eo *EndpointOpts) ApplyDefaults(t string) { + if eo.Type == "" { + eo.Type = t + } + if eo.Availability == "" { + eo.Availability = AvailabilityPublic + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go new file mode 100644 index 000000000000..0bcb3af7f00c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -0,0 +1,471 @@ +package gophercloud + +import ( + "fmt" + "strings" +) + +// BaseError is an error type that all other error types embed. +type BaseError struct { + DefaultErrString string + Info string +} + +func (e BaseError) Error() string { + e.DefaultErrString = "An error occurred while executing a Gophercloud request." + return e.choseErrString() +} + +func (e BaseError) choseErrString() string { + if e.Info != "" { + return e.Info + } + return e.DefaultErrString +} + +// ErrMissingInput is the error when input is required in a particular +// situation but not provided by the user +type ErrMissingInput struct { + BaseError + Argument string +} + +func (e ErrMissingInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) + return e.choseErrString() +} + +// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. +type ErrInvalidInput struct { + ErrMissingInput + Value interface{} +} + +func (e ErrInvalidInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) + return e.choseErrString() +} + +// ErrMissingEnvironmentVariable is the error when environment variable is required +// in a particular situation but not provided by the user +type ErrMissingEnvironmentVariable struct { + BaseError + EnvironmentVariable string +} + +func (e ErrMissingEnvironmentVariable) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing environment variable [%s]", e.EnvironmentVariable) + return e.choseErrString() +} + +// ErrMissingAnyoneOfEnvironmentVariables is the error when anyone of the environment variables +// is required in a particular situation but not provided by the user +type ErrMissingAnyoneOfEnvironmentVariables struct { + BaseError + EnvironmentVariables []string +} + +func (e ErrMissingAnyoneOfEnvironmentVariables) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Missing one of the following environment variables [%s]", + strings.Join(e.EnvironmentVariables, ", "), + ) + return e.choseErrString() +} + +// ErrUnexpectedResponseCode is returned by the Request method when a response code other than +// those listed in OkCodes is encountered. +type ErrUnexpectedResponseCode struct { + BaseError + URL string + Method string + Expected []int + Actual int + Body []byte +} + +func (e ErrUnexpectedResponseCode) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", + e.Expected, e.Method, e.URL, e.Actual, e.Body, + ) + return e.choseErrString() +} + +// ErrDefault400 is the default error type returned on a 400 HTTP response code. +type ErrDefault400 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault401 is the default error type returned on a 401 HTTP response code. +type ErrDefault401 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault403 is the default error type returned on a 403 HTTP response code. +type ErrDefault403 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault404 is the default error type returned on a 404 HTTP response code. +type ErrDefault404 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault405 is the default error type returned on a 405 HTTP response code. +type ErrDefault405 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault408 is the default error type returned on a 408 HTTP response code. +type ErrDefault408 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault409 is the default error type returned on a 409 HTTP response code. +type ErrDefault409 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault429 is the default error type returned on a 429 HTTP response code. +type ErrDefault429 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault500 is the default error type returned on a 500 HTTP response code. +type ErrDefault500 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault503 is the default error type returned on a 503 HTTP response code. +type ErrDefault503 struct { + ErrUnexpectedResponseCode +} + +func (e ErrDefault400) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Bad request with: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() +} +func (e ErrDefault401) Error() string { + return "Authentication failed" +} +func (e ErrDefault403) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Request forbidden: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() +} +func (e ErrDefault404) Error() string { + return "Resource not found" +} +func (e ErrDefault405) Error() string { + return "Method not allowed" +} +func (e ErrDefault408) Error() string { + return "The server timed out waiting for the request" +} +func (e ErrDefault429) Error() string { + return "Too many requests have been sent in a given amount of time. Pause" + + " requests, wait up to one minute, and try again." +} +func (e ErrDefault500) Error() string { + return "Internal Server Error" +} +func (e ErrDefault503) Error() string { + return "The service is currently unable to handle the request due to a temporary" + + " overloading or maintenance. This is a temporary condition. Try again later." +} + +// Err400er is the interface resource error types implement to override the error message +// from a 400 error. +type Err400er interface { + Error400(ErrUnexpectedResponseCode) error +} + +// Err401er is the interface resource error types implement to override the error message +// from a 401 error. +type Err401er interface { + Error401(ErrUnexpectedResponseCode) error +} + +// Err403er is the interface resource error types implement to override the error message +// from a 403 error. +type Err403er interface { + Error403(ErrUnexpectedResponseCode) error +} + +// Err404er is the interface resource error types implement to override the error message +// from a 404 error. +type Err404er interface { + Error404(ErrUnexpectedResponseCode) error +} + +// Err405er is the interface resource error types implement to override the error message +// from a 405 error. +type Err405er interface { + Error405(ErrUnexpectedResponseCode) error +} + +// Err408er is the interface resource error types implement to override the error message +// from a 408 error. +type Err408er interface { + Error408(ErrUnexpectedResponseCode) error +} + +// Err409er is the interface resource error types implement to override the error message +// from a 409 error. +type Err409er interface { + Error409(ErrUnexpectedResponseCode) error +} + +// Err429er is the interface resource error types implement to override the error message +// from a 429 error. +type Err429er interface { + Error429(ErrUnexpectedResponseCode) error +} + +// Err500er is the interface resource error types implement to override the error message +// from a 500 error. +type Err500er interface { + Error500(ErrUnexpectedResponseCode) error +} + +// Err503er is the interface resource error types implement to override the error message +// from a 503 error. +type Err503er interface { + Error503(ErrUnexpectedResponseCode) error +} + +// ErrTimeOut is the error type returned when an operations times out. +type ErrTimeOut struct { + BaseError +} + +func (e ErrTimeOut) Error() string { + e.DefaultErrString = "A time out occurred" + return e.choseErrString() +} + +// ErrUnableToReauthenticate is the error type returned when reauthentication fails. +type ErrUnableToReauthenticate struct { + BaseError + ErrOriginal error +} + +func (e ErrUnableToReauthenticate) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrErrorAfterReauthentication is the error type returned when reauthentication +// succeeds, but an error occurs afterword (usually an HTTP error). +type ErrErrorAfterReauthentication struct { + BaseError + ErrOriginal error +} + +func (e ErrErrorAfterReauthentication) Error() string { + e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrServiceNotFound is returned when no service in a service catalog matches +// the provided EndpointOpts. This is generally returned by provider service +// factory methods like "NewComputeV2()" and can mean that a service is not +// enabled for your account. +type ErrServiceNotFound struct { + BaseError +} + +func (e ErrServiceNotFound) Error() string { + e.DefaultErrString = "No suitable service could be found in the service catalog." + return e.choseErrString() +} + +// ErrEndpointNotFound is returned when no available endpoints match the +// provided EndpointOpts. This is also generally returned by provider service +// factory methods, and usually indicates that a region was specified +// incorrectly. +type ErrEndpointNotFound struct { + BaseError +} + +func (e ErrEndpointNotFound) Error() string { + e.DefaultErrString = "No suitable endpoint could be found in the service catalog." + return e.choseErrString() +} + +// ErrResourceNotFound is the error when trying to retrieve a resource's +// ID by name and the resource doesn't exist. +type ErrResourceNotFound struct { + BaseError + Name string + ResourceType string +} + +func (e ErrResourceNotFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrMultipleResourcesFound is the error when trying to retrieve a resource's +// ID by name and multiple resources have the user-provided name. +type ErrMultipleResourcesFound struct { + BaseError + Name string + Count int + ResourceType string +} + +func (e ErrMultipleResourcesFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrUnexpectedType is the error when an unexpected type is encountered +type ErrUnexpectedType struct { + BaseError + Expected string + Actual string +} + +func (e ErrUnexpectedType) Error() string { + e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual) + return e.choseErrString() +} + +func unacceptedAttributeErr(attribute string) string { + return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute) +} + +func redundantWithTokenErr(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute) +} + +func redundantWithUserID(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute) +} + +// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. +type ErrAPIKeyProvided struct{ BaseError } + +func (e ErrAPIKeyProvided) Error() string { + return unacceptedAttributeErr("APIKey") +} + +// ErrTenantIDProvided indicates that a TenantID was provided but can't be used. +type ErrTenantIDProvided struct{ BaseError } + +func (e ErrTenantIDProvided) Error() string { + return unacceptedAttributeErr("TenantID") +} + +// ErrTenantNameProvided indicates that a TenantName was provided but can't be used. +type ErrTenantNameProvided struct{ BaseError } + +func (e ErrTenantNameProvided) Error() string { + return unacceptedAttributeErr("TenantName") +} + +// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. +type ErrUsernameWithToken struct{ BaseError } + +func (e ErrUsernameWithToken) Error() string { + return redundantWithTokenErr("Username") +} + +// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. +type ErrUserIDWithToken struct{ BaseError } + +func (e ErrUserIDWithToken) Error() string { + return redundantWithTokenErr("UserID") +} + +// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. +type ErrDomainIDWithToken struct{ BaseError } + +func (e ErrDomainIDWithToken) Error() string { + return redundantWithTokenErr("DomainID") +} + +// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s +type ErrDomainNameWithToken struct{ BaseError } + +func (e ErrDomainNameWithToken) Error() string { + return redundantWithTokenErr("DomainName") +} + +// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. +type ErrUsernameOrUserID struct{ BaseError } + +func (e ErrUsernameOrUserID) Error() string { + return "Exactly one of Username and UserID must be provided for password authentication" +} + +// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. +type ErrDomainIDWithUserID struct{ BaseError } + +func (e ErrDomainIDWithUserID) Error() string { + return redundantWithUserID("DomainID") +} + +// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. +type ErrDomainNameWithUserID struct{ BaseError } + +func (e ErrDomainNameWithUserID) Error() string { + return redundantWithUserID("DomainName") +} + +// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. +// It may also indicate that both a DomainID and a DomainName were provided at once. +type ErrDomainIDOrDomainName struct{ BaseError } + +func (e ErrDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName to authenticate by Username" +} + +// ErrMissingPassword indicates that no password was provided and no token is available. +type ErrMissingPassword struct{ BaseError } + +func (e ErrMissingPassword) Error() string { + return "You must provide a password to authenticate" +} + +// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. +type ErrScopeDomainIDOrDomainName struct{ BaseError } + +func (e ErrScopeDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName" +} + +// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. +type ErrScopeProjectIDOrProjectName struct{ BaseError } + +func (e ErrScopeProjectIDOrProjectName) Error() string { + return "You must provide at most one of ProjectID or ProjectName in a Scope" +} + +// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. +type ErrScopeProjectIDAlone struct{ BaseError } + +func (e ErrScopeProjectIDAlone) Error() string { + return "ProjectID must be supplied alone in a Scope" +} + +// ErrScopeEmpty indicates that no credentials were provided in a Scope. +type ErrScopeEmpty struct{ BaseError } + +func (e ErrScopeEmpty) Error() string { + return "You must provide either a Project or Domain in a Scope" +} + +// ErrAppCredMissingSecret indicates that no Application Credential Secret was provided with Application Credential ID or Name +type ErrAppCredMissingSecret struct{ BaseError } + +func (e ErrAppCredMissingSecret) Error() string { + return "You must provide an Application Credential Secret" +} diff --git a/vendor/github.com/gophercloud/gophercloud/go.mod b/vendor/github.com/gophercloud/gophercloud/go.mod new file mode 100644 index 000000000000..d1ee3b472ec8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/go.mod @@ -0,0 +1,7 @@ +module github.com/gophercloud/gophercloud + +require ( + golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 + golang.org/x/sys v0.0.0-20190209173611-3b5209105503 // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/gophercloud/gophercloud/go.sum b/vendor/github.com/gophercloud/gophercloud/go.sum new file mode 100644 index 000000000000..33cb0be8aa31 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/go.sum @@ -0,0 +1,8 @@ +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go new file mode 100644 index 000000000000..0e8d90ff8262 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -0,0 +1,125 @@ +package openstack + +import ( + "os" + + "github.com/gophercloud/gophercloud" +) + +var nilOptions = gophercloud.AuthOptions{} + +/* +AuthOptionsFromEnv fills out an identity.AuthOptions structure with the +settings found on the various OpenStack OS_* environment variables. + +The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, +OS_PASSWORD and OS_PROJECT_ID. + +Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, +or an error will result. OS_PROJECT_ID, is optional. + +OS_TENANT_ID and OS_TENANT_NAME are deprecated forms of OS_PROJECT_ID and +OS_PROJECT_NAME and the latter are expected against a v3 auth api. + +If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred +as "tenant" in Gophercloud. + +If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to +handle projects not on the default domain. + +To use this function, first set the OS_* environment variables (for example, +by sourcing an `openrc` file), then: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { + authURL := os.Getenv("OS_AUTH_URL") + username := os.Getenv("OS_USERNAME") + userID := os.Getenv("OS_USERID") + password := os.Getenv("OS_PASSWORD") + tenantID := os.Getenv("OS_TENANT_ID") + tenantName := os.Getenv("OS_TENANT_NAME") + domainID := os.Getenv("OS_DOMAIN_ID") + domainName := os.Getenv("OS_DOMAIN_NAME") + applicationCredentialID := os.Getenv("OS_APPLICATION_CREDENTIAL_ID") + applicationCredentialName := os.Getenv("OS_APPLICATION_CREDENTIAL_NAME") + applicationCredentialSecret := os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET") + + // If OS_PROJECT_ID is set, overwrite tenantID with the value. + if v := os.Getenv("OS_PROJECT_ID"); v != "" { + tenantID = v + } + + // If OS_PROJECT_NAME is set, overwrite tenantName with the value. + if v := os.Getenv("OS_PROJECT_NAME"); v != "" { + tenantName = v + } + + if authURL == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_AUTH_URL", + } + return nilOptions, err + } + + if userID == "" && username == "" { + // Empty username and userID could be ignored, when applicationCredentialID and applicationCredentialSecret are set + if applicationCredentialID == "" && applicationCredentialSecret == "" { + err := gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, + } + return nilOptions, err + } + } + + if password == "" && applicationCredentialID == "" && applicationCredentialName == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PASSWORD", + } + return nilOptions, err + } + + if (applicationCredentialID != "" || applicationCredentialName != "") && applicationCredentialSecret == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_APPLICATION_CREDENTIAL_SECRET", + } + return nilOptions, err + } + + if domainID == "" && domainName == "" && tenantID == "" && tenantName != "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PROJECT_ID", + } + return nilOptions, err + } + + if applicationCredentialID == "" && applicationCredentialName != "" && applicationCredentialSecret != "" { + if userID == "" && username == "" { + return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, + } + } + if username != "" && domainID == "" && domainName == "" { + return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_DOMAIN_ID", "OS_DOMAIN_NAME"}, + } + } + } + + ao := gophercloud.AuthOptions{ + IdentityEndpoint: authURL, + UserID: userID, + Username: username, + Password: password, + TenantID: tenantID, + TenantName: tenantName, + DomainID: domainID, + DomainName: domainName, + ApplicationCredentialID: applicationCredentialID, + ApplicationCredentialName: applicationCredentialName, + ApplicationCredentialSecret: applicationCredentialSecret, + } + + return ao, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go new file mode 100644 index 000000000000..50f239711e72 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -0,0 +1,438 @@ +package openstack + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +const ( + // v2 represents Keystone v2. + // It should never increase beyond 2.0. + v2 = "v2.0" + + // v3 represents Keystone v3. + // The version can be anything from v3 to v3.x. + v3 = "v3" +) + +/* +NewClient prepares an unauthenticated ProviderClient instance. +Most users will probably prefer using the AuthenticatedClient function +instead. + +This is useful if you wish to explicitly control the version of the identity +service that's used for authentication explicitly, for example. + +A basic example of using this would be: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.NewClient(ao.IdentityEndpoint) + client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) +*/ +func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { + base, err := utils.BaseEndpoint(endpoint) + if err != nil { + return nil, err + } + + endpoint = gophercloud.NormalizeURL(endpoint) + base = gophercloud.NormalizeURL(base) + + p := new(gophercloud.ProviderClient) + p.IdentityBase = base + p.IdentityEndpoint = endpoint + p.UseTokenLock() + + return p, nil +} + +/* +AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint +specified by the options, acquires a token, and returns a Provider Client +instance that's ready to operate. + +If the full path to a versioned identity endpoint was specified (example: +http://example.com:5000/v3), that path will be used as the endpoint to query. + +If a versionless endpoint was specified (example: http://example.com:5000/), +the endpoint will be queried to determine which versions of the identity service +are available, then chooses the most recent or most supported version. + +Example: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { + client, err := NewClient(options.IdentityEndpoint) + if err != nil { + return nil, err + } + + err = Authenticate(client, options) + if err != nil { + return nil, err + } + return client, nil +} + +// Authenticate or re-authenticate against the most recent identity service +// supported at the provided endpoint. +func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + versions := []*utils.Version{ + {ID: v2, Priority: 20, Suffix: "/v2.0/"}, + {ID: v3, Priority: 30, Suffix: "/v3/"}, + } + + chosen, endpoint, err := utils.ChooseVersion(client, versions) + if err != nil { + return err + } + + switch chosen.ID { + case v2: + return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) + case v3: + return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) + default: + // The switch statement must be out of date from the versions list. + return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) + } +} + +// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. +func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + return v2auth(client, "", options, eo) +} + +func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + v2Client, err := NewIdentityV2(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v2Client.Endpoint = endpoint + } + + v2Opts := tokens2.AuthOptions{ + IdentityEndpoint: options.IdentityEndpoint, + Username: options.Username, + Password: options.Password, + TenantID: options.TenantID, + TenantName: options.TenantName, + AllowReauth: options.AllowReauth, + TokenID: options.TokenID, + } + + result := tokens2.Create(v2Client, v2Opts) + + err = client.SetTokenAndAuthResult(result) + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + if options.AllowReauth { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.SetThrowaway(true) + tac.ReauthFunc = nil + tac.SetTokenAndAuthResult(nil) + tao := options + tao.AllowReauth = false + client.ReauthFunc = func() error { + err := v2auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.CopyTokenFrom(&tac) + return nil + } + } + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V2EndpointURL(catalog, opts) + } + + return nil +} + +// AuthenticateV3 explicitly authenticates against the identity v3 service. +func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + return v3auth(client, "", options, eo) +} + +func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + // Override the generated service endpoint with the one returned by the version endpoint. + v3Client, err := NewIdentityV3(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v3Client.Endpoint = endpoint + } + + result := tokens3.Create(v3Client, opts) + + err = client.SetTokenAndAuthResult(result) + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + if opts.CanReauth() { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.SetThrowaway(true) + tac.ReauthFunc = nil + tac.SetTokenAndAuthResult(nil) + var tao tokens3.AuthOptionsBuilder + switch ot := opts.(type) { + case *gophercloud.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + case *tokens3.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + default: + tao = opts + } + client.ReauthFunc = func() error { + err := v3auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.CopyTokenFrom(&tac) + return nil + } + } + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V3EndpointURL(catalog, opts) + } + + return nil +} + +// NewIdentityV2 creates a ServiceClient that may be used to interact with the +// v2 identity service. +func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v2.0/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +// NewIdentityV3 creates a ServiceClient that may be used to access the v3 +// identity service. +func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v3/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + // Ensure endpoint still has a suffix of v3. + // This is because EndpointLocator might have found a versionless + // endpoint or the published endpoint is still /v2.0. In both + // cases, we need to fix the endpoint to point to /v3. + base, err := utils.BaseEndpoint(endpoint) + if err != nil { + return nil, err + } + + base = gophercloud.NormalizeURL(base) + + endpoint = base + "v3/" + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { + sc := new(gophercloud.ServiceClient) + eo.ApplyDefaults(clientType) + url, err := client.EndpointLocator(eo) + if err != nil { + return sc, err + } + sc.ProviderClient = client + sc.Endpoint = url + sc.Type = clientType + return sc, nil +} + +// NewBareMetalV1 creates a ServiceClient that may be used with the v1 +// bare metal package. +func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "baremetal") +} + +// NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 +// bare metal introspection package. +func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "baremetal-inspector") +} + +// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 +// object storage package. +func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "object-store") +} + +// NewComputeV2 creates a ServiceClient that may be used with the v2 compute +// package. +func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "compute") +} + +// NewNetworkV2 creates a ServiceClient that may be used with the v2 network +// package. +func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "network") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 +// block storage service. +func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volume") +} + +// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 +// block storage service. +func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev2") +} + +// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. +func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev3") +} + +// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. +func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "sharev2") +} + +// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 +// CDN service. +func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "cdn") +} + +// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 +// orchestration service. +func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "orchestration") +} + +// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. +func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "database") +} + +// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS +// service. +func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "dns") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} + +// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 +// image service. +func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "image") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} + +// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 +// load balancer service. +func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "load-balancer") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering +// package. +func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "clustering") +} + +// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging +// service. +func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "messaging") + sc.MoreHeaders = map[string]string{"Client-ID": clientID} + return sc, err +} + +// NewContainerV1 creates a ServiceClient that may be used with v1 container package +func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container") +} + +// NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key +// manager service. +func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "key-manager") + sc.ResourceBase = sc.Endpoint + "v1/" + return sc, err +} + +// NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management +// package. +func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container-infra") +} + +// NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. +func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "workflowv2") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go new file mode 100644 index 000000000000..34d8764fadb2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go @@ -0,0 +1,137 @@ +/* +Package flavors provides information and interaction with the flavor API +in the OpenStack Compute service. + +A flavor is an available hardware configuration for a server. Each flavor +has a unique combination of disk space, memory capacity and priority for CPU +time. + +Example to List Flavors + + listOpts := flavors.ListOpts{ + AccessType: flavors.PublicAccess, + } + + allPages, err := flavors.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFlavors, err := flavors.ExtractFlavors(allPages) + if err != nil { + panic(err) + } + + for _, flavor := range allFlavors { + fmt.Printf("%+v\n", flavor) + } + +Example to Create a Flavor + + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: gophercloud.IntToPointer(1), + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + flavor, err := flavors.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to List Flavor Access + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + allPages, err := flavors.ListAccesses(computeClient, flavorID).AllPages() + if err != nil { + panic(err) + } + + allAccesses, err := flavors.ExtractAccesses(allPages) + if err != nil { + panic(err) + } + + for _, access := range allAccesses { + fmt.Printf("%+v", access) + } + +Example to Grant Access to a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + accessOpts := flavors.AddAccessOpts{ + Tenant: "15153a0979884b59b0592248ef947921", + } + + accessList, err := flavors.AddAccess(computeClient, flavor.ID, accessOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove/Revoke Access to a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + accessOpts := flavors.RemoveAccessOpts{ + Tenant: "15153a0979884b59b0592248ef947921", + } + + accessList, err := flavors.RemoveAccess(computeClient, flavor.ID, accessOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + createOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY", + } + createdExtraSpecs, err := flavors.CreateExtraSpecs(computeClient, flavorID, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", createdExtraSpecs) + +Example to Get Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + extraSpecs, err := flavors.ListExtraSpecs(computeClient, flavorID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", extraSpecs) + +Example to Update Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + updateOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_thread_policy": "CPU-THREAD-POLICY-UPDATED", + } + updatedExtraSpec, err := flavors.UpdateExtraSpec(computeClient, flavorID, updateOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", updatedExtraSpec) + +Example to Delete an Extra Spec for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + err := flavors.DeleteExtraSpec(computeClient, flavorID, "hw:cpu_thread_policy").ExtractErr() + if err != nil { + panic(err) + } +*/ +package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go new file mode 100644 index 000000000000..753024a18b72 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go @@ -0,0 +1,357 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFlavorListQuery() (string, error) +} + +/* + AccessType maps to OpenStack's Flavor.is_public field. Although the is_public + field is boolean, the request options are ternary, which is why AccessType is + a string. The following values are allowed: + + The AccessType arguement is optional, and if it is not supplied, OpenStack + returns the PublicAccess flavors. +*/ +type AccessType string + +const ( + // PublicAccess returns public flavors and private flavors associated with + // that project. + PublicAccess AccessType = "true" + + // PrivateAccess (admin only) returns private flavors, across all projects. + PrivateAccess AccessType = "false" + + // AllAccess (admin only) returns public and private flavors across all + // projects. + AllAccess AccessType = "None" +) + +/* + ListOpts filters the results returned by the List() function. + For example, a flavor with a minDisk field of 10 will not be returned if you + specify MinDisk set to 20. + + Typically, software will use the last ID of the previous call to List to set + the Marker for the current call. +*/ +type ListOpts struct { + // ChangesSince, if provided, instructs List to return only those things which + // have changed since the timestamp provided. + ChangesSince string `q:"changes-since"` + + // MinDisk and MinRAM, if provided, elides flavors which do not meet your + // criteria. + MinDisk int `q:"minDisk"` + MinRAM int `q:"minRam"` + + // SortDir allows to select sort direction. + // It can be "asc" or "desc" (default). + SortDir string `q:"sort_dir"` + + // SortKey allows to sort by one of the flavors attributes. + // Default is flavorid. + SortKey string `q:"sort_key"` + + // Marker and Limit control paging. + // Marker instructs List where to start listing from. + Marker string `q:"marker"` + + // Limit instructs List to refrain from sending excessively large lists of + // flavors. + Limit int `q:"limit"` + + // AccessType, if provided, instructs List which set of flavors to return. + // If IsPublic not provided, flavors for the current project are returned. + AccessType AccessType `q:"is_public"` +} + +// ToFlavorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFlavorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail instructs OpenStack to provide a list of flavors. +// You may provide criteria by which List curtails its results for easier +// processing. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToFlavorListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return FlavorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type CreateOptsBuilder interface { + ToFlavorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters used for creating a flavor. +type CreateOpts struct { + // Name is the name of the flavor. + Name string `json:"name" required:"true"` + + // RAM is the memory of the flavor, measured in MB. + RAM int `json:"ram" required:"true"` + + // VCPUs is the number of vcpus for the flavor. + VCPUs int `json:"vcpus" required:"true"` + + // Disk the amount of root disk space, measured in GB. + Disk *int `json:"disk" required:"true"` + + // ID is a unique ID for the flavor. + ID string `json:"id,omitempty"` + + // Swap is the amount of swap space for the flavor, measured in MB. + Swap *int `json:"swap,omitempty"` + + // RxTxFactor alters the network bandwidth of a flavor. + RxTxFactor float64 `json:"rxtx_factor,omitempty"` + + // IsPublic flags a flavor as being available to all projects or not. + IsPublic *bool `json:"os-flavor-access:is_public,omitempty"` + + // Ephemeral is the amount of ephemeral disk space, measured in GB. + Ephemeral *int `json:"OS-FLV-EXT-DATA:ephemeral,omitempty"` +} + +// ToFlavorCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToFlavorCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "flavor") +} + +// Create requests the creation of a new flavor. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFlavorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get retrieves details of a single flavor. Use Extract to convert its +// result into a Flavor. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete deletes the specified flavor ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListAccesses retrieves the tenants which have access to a flavor. +func ListAccesses(client *gophercloud.ServiceClient, id string) pagination.Pager { + url := accessURL(client, id) + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return AccessPage{pagination.SinglePageBase(r)} + }) +} + +// AddAccessOptsBuilder allows extensions to add additional parameters to the +// AddAccess requests. +type AddAccessOptsBuilder interface { + ToFlavorAddAccessMap() (map[string]interface{}, error) +} + +// AddAccessOpts represents options for adding access to a flavor. +type AddAccessOpts struct { + // Tenant is the project/tenant ID to grant access. + Tenant string `json:"tenant"` +} + +// ToFlavorAddAccessMap constructs a request body from AddAccessOpts. +func (opts AddAccessOpts) ToFlavorAddAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "addTenantAccess") +} + +// AddAccess grants a tenant/project access to a flavor. +func AddAccess(client *gophercloud.ServiceClient, id string, opts AddAccessOptsBuilder) (r AddAccessResult) { + b, err := opts.ToFlavorAddAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(accessActionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveAccessOptsBuilder allows extensions to add additional parameters to the +// RemoveAccess requests. +type RemoveAccessOptsBuilder interface { + ToFlavorRemoveAccessMap() (map[string]interface{}, error) +} + +// RemoveAccessOpts represents options for removing access to a flavor. +type RemoveAccessOpts struct { + // Tenant is the project/tenant ID to grant access. + Tenant string `json:"tenant"` +} + +// ToFlavorRemoveAccessMap constructs a request body from RemoveAccessOpts. +func (opts RemoveAccessOpts) ToFlavorRemoveAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "removeTenantAccess") +} + +// RemoveAccess removes/revokes a tenant/project access to a flavor. +func RemoveAccess(client *gophercloud.ServiceClient, id string, opts RemoveAccessOptsBuilder) (r RemoveAccessResult) { + b, err := opts.ToFlavorRemoveAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(accessActionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ExtraSpecs requests all the extra-specs for the given flavor ID. +func ListExtraSpecs(client *gophercloud.ServiceClient, flavorID string) (r ListExtraSpecsResult) { + _, r.Err = client.Get(extraSpecsListURL(client, flavorID), &r.Body, nil) + return +} + +func GetExtraSpec(client *gophercloud.ServiceClient, flavorID string, key string) (r GetExtraSpecResult) { + _, r.Err = client.Get(extraSpecsGetURL(client, flavorID, key), &r.Body, nil) + return +} + +// CreateExtraSpecsOptsBuilder allows extensions to add additional parameters to the +// CreateExtraSpecs requests. +type CreateExtraSpecsOptsBuilder interface { + ToFlavorExtraSpecsCreateMap() (map[string]interface{}, error) +} + +// ExtraSpecsOpts is a map that contains key-value pairs. +type ExtraSpecsOpts map[string]string + +// ToFlavorExtraSpecsCreateMap assembles a body for a Create request based on +// the contents of ExtraSpecsOpts. +func (opts ExtraSpecsOpts) ToFlavorExtraSpecsCreateMap() (map[string]interface{}, error) { + return map[string]interface{}{"extra_specs": opts}, nil +} + +// CreateExtraSpecs will create or update the extra-specs key-value pairs for +// the specified Flavor. +func CreateExtraSpecs(client *gophercloud.ServiceClient, flavorID string, opts CreateExtraSpecsOptsBuilder) (r CreateExtraSpecsResult) { + b, err := opts.ToFlavorExtraSpecsCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(extraSpecsCreateURL(client, flavorID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateExtraSpecOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateExtraSpecOptsBuilder interface { + ToFlavorExtraSpecUpdateMap() (map[string]string, string, error) +} + +// ToFlavorExtraSpecUpdateMap assembles a body for an Update request based on +// the contents of a ExtraSpecOpts. +func (opts ExtraSpecsOpts) ToFlavorExtraSpecUpdateMap() (map[string]string, string, error) { + if len(opts) != 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "flavors.ExtraSpecOpts" + err.Info = "Must have 1 and only one key-value pair" + return nil, "", err + } + + var key string + for k := range opts { + key = k + } + + return opts, key, nil +} + +// UpdateExtraSpec will updates the value of the specified flavor's extra spec +// for the key in opts. +func UpdateExtraSpec(client *gophercloud.ServiceClient, flavorID string, opts UpdateExtraSpecOptsBuilder) (r UpdateExtraSpecResult) { + b, key, err := opts.ToFlavorExtraSpecUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(extraSpecUpdateURL(client, flavorID, key), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteExtraSpec will delete the key-value pair with the given key for the given +// flavor ID. +func DeleteExtraSpec(client *gophercloud.ServiceClient, flavorID, key string) (r DeleteExtraSpecResult) { + _, r.Err = client.Delete(extraSpecDeleteURL(client, flavorID, key), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a flavor's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + allPages, err := ListDetail(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractFlavors(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + err := &gophercloud.ErrResourceNotFound{} + err.ResourceType = "flavor" + err.Name = name + return "", err + case 1: + return id, nil + default: + err := &gophercloud.ErrMultipleResourcesFound{} + err.ResourceType = "flavor" + err.Name = name + err.Count = count + return "", err + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go new file mode 100644 index 000000000000..92fe1b1809d4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go @@ -0,0 +1,252 @@ +package flavors + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// CreateResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. +type CreateResult struct { + commonResult +} + +// GetResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. +type GetResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract provides access to the individual Flavor returned by the Get and +// Create functions. +func (r commonResult) Extract() (*Flavor, error) { + var s struct { + Flavor *Flavor `json:"flavor"` + } + err := r.ExtractInto(&s) + return s.Flavor, err +} + +// Flavor represent (virtual) hardware configurations for server resources +// in a region. +type Flavor struct { + // ID is the flavor's unique ID. + ID string `json:"id"` + + // Disk is the amount of root disk, measured in GB. + Disk int `json:"disk"` + + // RAM is the amount of memory, measured in MB. + RAM int `json:"ram"` + + // Name is the name of the flavor. + Name string `json:"name"` + + // RxTxFactor describes bandwidth alterations of the flavor. + RxTxFactor float64 `json:"rxtx_factor"` + + // Swap is the amount of swap space, measured in MB. + Swap int `json:"-"` + + // VCPUs indicates how many (virtual) CPUs are available for this flavor. + VCPUs int `json:"vcpus"` + + // IsPublic indicates whether the flavor is public. + IsPublic bool `json:"os-flavor-access:is_public"` + + // Ephemeral is the amount of ephemeral disk space, measured in GB. + Ephemeral int `json:"OS-FLV-EXT-DATA:ephemeral"` +} + +func (r *Flavor) UnmarshalJSON(b []byte) error { + type tmp Flavor + var s struct { + tmp + Swap interface{} `json:"swap"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Flavor(s.tmp) + + switch t := s.Swap.(type) { + case float64: + r.Swap = int(t) + case string: + switch t { + case "": + r.Swap = 0 + default: + swap, err := strconv.ParseFloat(t, 64) + if err != nil { + return err + } + r.Swap = int(swap) + } + } + + return nil +} + +// FlavorPage contains a single page of all flavors from a ListDetails call. +type FlavorPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a FlavorPage contains any results. +func (page FlavorPage) IsEmpty() (bool, error) { + flavors, err := ExtractFlavors(page) + return len(flavors) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (page FlavorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"flavors_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractFlavors provides access to the list of flavors in a page acquired +// from the ListDetail operation. +func ExtractFlavors(r pagination.Page) ([]Flavor, error) { + var s struct { + Flavors []Flavor `json:"flavors"` + } + err := (r.(FlavorPage)).ExtractInto(&s) + return s.Flavors, err +} + +// AccessPage contains a single page of all FlavorAccess entries for a flavor. +type AccessPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether an AccessPage is empty. +func (page AccessPage) IsEmpty() (bool, error) { + v, err := ExtractAccesses(page) + return len(v) == 0, err +} + +// ExtractAccesses interprets a page of results as a slice of FlavorAccess. +func ExtractAccesses(r pagination.Page) ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := (r.(AccessPage)).ExtractInto(&s) + return s.FlavorAccesses, err +} + +type accessResult struct { + gophercloud.Result +} + +// AddAccessResult is the response of an AddAccess operation. Call its +// Extract method to interpret it as a slice of FlavorAccess. +type AddAccessResult struct { + accessResult +} + +// RemoveAccessResult is the response of a RemoveAccess operation. Call its +// Extract method to interpret it as a slice of FlavorAccess. +type RemoveAccessResult struct { + accessResult +} + +// Extract provides access to the result of an access create or delete. +// The result will be all accesses that the flavor has. +func (r accessResult) Extract() ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := r.ExtractInto(&s) + return s.FlavorAccesses, err +} + +// FlavorAccess represents an ACL of tenant access to a specific Flavor. +type FlavorAccess struct { + // FlavorID is the unique ID of the flavor. + FlavorID string `json:"flavor_id"` + + // TenantID is the unique ID of the tenant. + TenantID string `json:"tenant_id"` +} + +// Extract interprets any extraSpecsResult as ExtraSpecs, if possible. +func (r extraSpecsResult) Extract() (map[string]string, error) { + var s struct { + ExtraSpecs map[string]string `json:"extra_specs"` + } + err := r.ExtractInto(&s) + return s.ExtraSpecs, err +} + +// extraSpecsResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. +type extraSpecsResult struct { + gophercloud.Result +} + +// ListExtraSpecsResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type ListExtraSpecsResult struct { + extraSpecsResult +} + +// CreateExtraSpecResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. +type CreateExtraSpecsResult struct { + extraSpecsResult +} + +// extraSpecResult contains the result of a call for individual a single +// key-value pair. +type extraSpecResult struct { + gophercloud.Result +} + +// GetExtraSpecResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetExtraSpecResult struct { + extraSpecResult +} + +// UpdateExtraSpecResult contains the result of an Update operation. Call its +// Extract method to interpret it as a map[string]interface. +type UpdateExtraSpecResult struct { + extraSpecResult +} + +// DeleteExtraSpecResult contains the result of a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DeleteExtraSpecResult struct { + gophercloud.ErrResult +} + +// Extract interprets any extraSpecResult as an ExtraSpec, if possible. +func (r extraSpecResult) Extract() (map[string]string, error) { + var s map[string]string + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go new file mode 100644 index 000000000000..8620dd78ad0a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go @@ -0,0 +1,49 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" +) + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors", "detail") +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func accessURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-flavor-access") +} + +func accessActionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "action") +} + +func extraSpecsListURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} + +func extraSpecsGetURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} + +func extraSpecsCreateURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} + +func extraSpecUpdateURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} + +func extraSpecDeleteURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go new file mode 100644 index 000000000000..22410a79a27c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go @@ -0,0 +1,32 @@ +/* +Package images provides information and interaction with the images through +the OpenStack Compute service. + +This API is deprecated and will be removed from a future version of the Nova +API service. + +An image is a collection of files used to create or rebuild a server. +Operators provide a number of pre-built OS images by default. You may also +create custom images from cloud servers you have launched. + +Example to List Images + + listOpts := images.ListOpts{ + Limit: 2, + } + + allPages, err := images.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + panic(err) + } + + for _, image := range allImages { + fmt.Printf("%+v\n", image) + } +*/ +package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go new file mode 100644 index 000000000000..558b481b9e7d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go @@ -0,0 +1,109 @@ +package images + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// ListDetail request. +type ListOptsBuilder interface { + ToImageListQuery() (string, error) +} + +// ListOpts contain options filtering Images returned from a call to ListDetail. +type ListOpts struct { + // ChangesSince filters Images based on the last changed status (in date-time + // format). + ChangesSince string `q:"changes-since"` + + // Limit limits the number of Images to return. + Limit int `q:"limit"` + + // Mark is an Image UUID at which to set a marker. + Marker string `q:"marker"` + + // Name is the name of the Image. + Name string `q:"name"` + + // Server is the name of the Server (in URL format). + Server string `q:"server"` + + // Status is the current status of the Image. + Status string `q:"status"` + + // Type is the type of image (e.g. BASE, SERVER, ALL). + Type string `q:"type"` +} + +// ToImageListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToImageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail enumerates the available images. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToImageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ImagePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get returns data about a specific image by its ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete deletes the specified image ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// IDFromName is a convienience function that returns an image's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + allPages, err := ListDetail(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractImages(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + err := &gophercloud.ErrResourceNotFound{} + err.ResourceType = "image" + err.Name = name + return "", err + case 1: + return id, nil + default: + err := &gophercloud.ErrMultipleResourcesFound{} + err.ResourceType = "image" + err.Name = name + err.Count = count + return "", err + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go new file mode 100644 index 000000000000..70d1018c7217 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go @@ -0,0 +1,95 @@ +package images + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as an Image. +type GetResult struct { + gophercloud.Result +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract interprets a GetResult as an Image. +func (r GetResult) Extract() (*Image, error) { + var s struct { + Image *Image `json:"image"` + } + err := r.ExtractInto(&s) + return s.Image, err +} + +// Image represents an Image returned by the Compute API. +type Image struct { + // ID is the unique ID of an image. + ID string + + // Created is the date when the image was created. + Created string + + // MinDisk is the minimum amount of disk a flavor must have to be able + // to create a server based on the image, measured in GB. + MinDisk int + + // MinRAM is the minimum amount of RAM a flavor must have to be able + // to create a server based on the image, measured in MB. + MinRAM int + + // Name provides a human-readable moniker for the OS image. + Name string + + // The Progress and Status fields indicate image-creation status. + Progress int + + // Status is the current status of the image. + Status string + + // Update is the date when the image was updated. + Updated string + + // Metadata provides free-form key/value pairs that further describe the + // image. + Metadata map[string]interface{} +} + +// ImagePage contains a single page of all Images returne from a ListDetail +// operation. Use ExtractImages to convert it into a slice of usable structs. +type ImagePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if an ImagePage contains no Image results. +func (page ImagePage) IsEmpty() (bool, error) { + images, err := ExtractImages(page) + return len(images) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (page ImagePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"images_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractImages converts a page of List results into a slice of usable Image +// structs. +func ExtractImages(r pagination.Page) ([]Image, error) { + var s struct { + Images []Image `json:"images"` + } + err := (r.(ImagePage)).ExtractInto(&s) + return s.Images, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go new file mode 100644 index 000000000000..57787fb725ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go @@ -0,0 +1,15 @@ +package images + +import "github.com/gophercloud/gophercloud" + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("images", "detail") +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go new file mode 100644 index 000000000000..3b0ab783626a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go @@ -0,0 +1,115 @@ +/* +Package servers provides information and interaction with the server API +resource in the OpenStack Compute service. + +A server is a virtual machine instance in the compute system. In order for +one to be provisioned, a valid flavor and image are required. + +Example to List Servers + + listOpts := servers.ListOpts{ + AllTenants: true, + } + + allPages, err := servers.List(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allServers, err := servers.ExtractServers(allPages) + if err != nil { + panic(err) + } + + for _, server := range allServers { + fmt.Printf("%+v\n", server) + } + +Example to Create a Server + + createOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.Delete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Force Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.ForceDelete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Reboot a Server + + rebootOpts := servers.RebootOpts{ + Type: servers.SoftReboot, + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Reboot(computeClient, serverID, rebootOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Rebuild a Server + + rebuildOpts := servers.RebuildOpts{ + Name: "new_name", + ImageID: "image-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + server, err := servers.Rebuilt(computeClient, serverID, rebuildOpts).Extract() + if err != nil { + panic(err) + } + +Example to Resize a Server + + resizeOpts := servers.ResizeOpts{ + FlavorRef: "flavor-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Resize(computeClient, serverID, resizeOpts).ExtractErr() + if err != nil { + panic(err) + } + + err = servers.ConfirmResize(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Snapshot a Server + + snapshotOpts := servers.CreateImageOpts{ + Name: "snapshot_name", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + image, err := servers.CreateImage(computeClient, serverID, snapshotOpts).ExtractImageID() + if err != nil { + panic(err) + } +*/ +package servers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go new file mode 100644 index 000000000000..c9f0e3c20b5d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go @@ -0,0 +1,71 @@ +package servers + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" +) + +// ErrNeitherImageIDNorImageNameProvided is the error when neither the image +// ID nor the image name is provided for a server operation +type ErrNeitherImageIDNorImageNameProvided struct{ gophercloud.ErrMissingInput } + +func (e ErrNeitherImageIDNorImageNameProvided) Error() string { + return "One and only one of the image ID and the image name must be provided." +} + +// ErrNeitherFlavorIDNorFlavorNameProvided is the error when neither the flavor +// ID nor the flavor name is provided for a server operation +type ErrNeitherFlavorIDNorFlavorNameProvided struct{ gophercloud.ErrMissingInput } + +func (e ErrNeitherFlavorIDNorFlavorNameProvided) Error() string { + return "One and only one of the flavor ID and the flavor name must be provided." +} + +type ErrNoClientProvidedForIDByName struct{ gophercloud.ErrMissingInput } + +func (e ErrNoClientProvidedForIDByName) Error() string { + return "A service client must be provided to find a resource ID by name." +} + +// ErrInvalidHowParameterProvided is the error when an unknown value is given +// for the `how` argument +type ErrInvalidHowParameterProvided struct{ gophercloud.ErrInvalidInput } + +// ErrNoAdminPassProvided is the error when an administrative password isn't +// provided for a server operation +type ErrNoAdminPassProvided struct{ gophercloud.ErrMissingInput } + +// ErrNoImageIDProvided is the error when an image ID isn't provided for a server +// operation +type ErrNoImageIDProvided struct{ gophercloud.ErrMissingInput } + +// ErrNoIDProvided is the error when a server ID isn't provided for a server +// operation +type ErrNoIDProvided struct{ gophercloud.ErrMissingInput } + +// ErrServer is a generic error type for servers HTTP operations. +type ErrServer struct { + gophercloud.ErrUnexpectedResponseCode + ID string +} + +func (se ErrServer) Error() string { + return fmt.Sprintf("Error while executing HTTP request for server [%s]", se.ID) +} + +// Error404 overrides the generic 404 error message. +func (se ErrServer) Error404(e gophercloud.ErrUnexpectedResponseCode) error { + se.ErrUnexpectedResponseCode = e + return &ErrServerNotFound{se} +} + +// ErrServerNotFound is the error when a 404 is received during server HTTP +// operations. +type ErrServerNotFound struct { + ErrServer +} + +func (e ErrServerNotFound) Error() string { + return fmt.Sprintf("I couldn't find server [%s]", e.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/microversions.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/microversions.go new file mode 100644 index 000000000000..84ec9f31d355 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/microversions.go @@ -0,0 +1,11 @@ +package servers + +// ExtractTags will extract the tags of a server. +// This requires the client to be set to microversion 2.26 or later. +func (r serverResult) ExtractTags() ([]string, error) { + var s struct { + Tags []string `json:"tags"` + } + err := r.ExtractInto(&s) + return s.Tags, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go new file mode 100644 index 000000000000..ee8e93b1dcbf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -0,0 +1,812 @@ +package servers + +import ( + "encoding/base64" + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/images" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToServerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +type ListOpts struct { + // ChangesSince is a time/date stamp for when the server last changed status. + ChangesSince string `q:"changes-since"` + + // Image is the name of the image in URL format. + Image string `q:"image"` + + // Flavor is the name of the flavor in URL format. + Flavor string `q:"flavor"` + + // Name of the server as a string; can be queried with regular expressions. + // Realize that ?name=bob returns both bob and bobb. If you need to match bob + // only, you can use a regular expression matching the syntax of the + // underlying database server implemented for Compute. + Name string `q:"name"` + + // Status is the value of the status of the server so that you can filter on + // "ACTIVE" for example. + Status string `q:"status"` + + // Host is the name of the host as a string. + Host string `q:"host"` + + // Marker is a UUID of the server at which you want to set a marker. + Marker string `q:"marker"` + + // Limit is an integer value for the limit of values to return. + Limit int `q:"limit"` + + // AllTenants is a bool to show all tenants. + AllTenants bool `q:"all_tenants"` + + // TenantID lists servers for a particular tenant. + // Setting "AllTenants = true" is required. + TenantID string `q:"tenant_id"` +} + +// ToServerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToServerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List makes a request against the API to list servers accessible to you. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToServerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ServerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToServerCreateMap() (map[string]interface{}, error) +} + +// Network is used within CreateOpts to control a new server's network +// attachments. +type Network struct { + // UUID of a network to attach to the newly provisioned server. + // Required unless Port is provided. + UUID string + + // Port of a neutron network to attach to the newly provisioned server. + // Required unless UUID is provided. + Port string + + // FixedIP specifies a fixed IPv4 address to be used on this network. + FixedIP string +} + +// Personality is an array of files that are injected into the server at launch. +type Personality []*File + +// File is used within CreateOpts and RebuildOpts to inject a file into the +// server at launch. +// File implements the json.Marshaler interface, so when a Create or Rebuild +// operation is requested, json.Marshal will call File's MarshalJSON method. +type File struct { + // Path of the file. + Path string + + // Contents of the file. Maximum content size is 255 bytes. + Contents []byte +} + +// MarshalJSON marshals the escaped file, base64 encoding the contents. +func (f *File) MarshalJSON() ([]byte, error) { + file := struct { + Path string `json:"path"` + Contents string `json:"contents"` + }{ + Path: f.Path, + Contents: base64.StdEncoding.EncodeToString(f.Contents), + } + return json.Marshal(file) +} + +// CreateOpts specifies server creation parameters. +type CreateOpts struct { + // Name is the name to assign to the newly launched server. + Name string `json:"name" required:"true"` + + // ImageRef [optional; required if ImageName is not provided] is the ID or + // full URL to the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageRef string `json:"imageRef"` + + // ImageName [optional; required if ImageRef is not provided] is the name of + // the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageName string `json:"-"` + + // FlavorRef [optional; required if FlavorName is not provided] is the ID or + // full URL to the flavor that describes the server's specs. + FlavorRef string `json:"flavorRef"` + + // FlavorName [optional; required if FlavorRef is not provided] is the name of + // the flavor that describes the server's specs. + FlavorName string `json:"-"` + + // SecurityGroups lists the names of the security groups to which this server + // should belong. + SecurityGroups []string `json:"-"` + + // UserData contains configuration information or scripts to use upon launch. + // Create will base64-encode it for you, if it isn't already. + UserData []byte `json:"-"` + + // AvailabilityZone in which to launch the server. + AvailabilityZone string `json:"availability_zone,omitempty"` + + // Networks dictates how this server will be attached to available networks. + // By default, the server will be attached to all isolated networks for the + // tenant. + Networks []Network `json:"-"` + + // Metadata contains key-value pairs (up to 255 bytes each) to attach to the + // server. + Metadata map[string]string `json:"metadata,omitempty"` + + // Personality includes files to inject into the server at launch. + // Create will base64-encode file contents for you. + Personality Personality `json:"personality,omitempty"` + + // ConfigDrive enables metadata injection through a configuration drive. + ConfigDrive *bool `json:"config_drive,omitempty"` + + // AdminPass sets the root user password. If not set, a randomly-generated + // password will be created and returned in the response. + AdminPass string `json:"adminPass,omitempty"` + + // AccessIPv4 specifies an IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 specifies an IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` + + // Min specifies Minimum number of servers to launch. + Min int `json:"min_count,omitempty"` + + // Max specifies Maximum number of servers to launch. + Max int `json:"max_count,omitempty"` + + // ServiceClient will allow calls to be made to retrieve an image or + // flavor ID by name. + ServiceClient *gophercloud.ServiceClient `json:"-"` + + // Tags allows a server to be tagged with single-word metadata. + // Requires microversion 2.52 or later. + Tags []string `json:"tags,omitempty"` +} + +// ToServerCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { + sc := opts.ServiceClient + opts.ServiceClient = nil + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.UserData != nil { + var userData string + if _, err := base64.StdEncoding.DecodeString(string(opts.UserData)); err != nil { + userData = base64.StdEncoding.EncodeToString(opts.UserData) + } else { + userData = string(opts.UserData) + } + b["user_data"] = &userData + } + + if len(opts.SecurityGroups) > 0 { + securityGroups := make([]map[string]interface{}, len(opts.SecurityGroups)) + for i, groupName := range opts.SecurityGroups { + securityGroups[i] = map[string]interface{}{"name": groupName} + } + b["security_groups"] = securityGroups + } + + if len(opts.Networks) > 0 { + networks := make([]map[string]interface{}, len(opts.Networks)) + for i, net := range opts.Networks { + networks[i] = make(map[string]interface{}) + if net.UUID != "" { + networks[i]["uuid"] = net.UUID + } + if net.Port != "" { + networks[i]["port"] = net.Port + } + if net.FixedIP != "" { + networks[i]["fixed_ip"] = net.FixedIP + } + } + b["networks"] = networks + } + + // If ImageRef isn't provided, check if ImageName was provided to ascertain + // the image ID. + if opts.ImageRef == "" { + if opts.ImageName != "" { + if sc == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + imageID, err := images.IDFromName(sc, opts.ImageName) + if err != nil { + return nil, err + } + b["imageRef"] = imageID + } + } + + // If FlavorRef isn't provided, use FlavorName to ascertain the flavor ID. + if opts.FlavorRef == "" { + if opts.FlavorName == "" { + err := ErrNeitherFlavorIDNorFlavorNameProvided{} + err.Argument = "FlavorRef/FlavorName" + return nil, err + } + if sc == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + flavorID, err := flavors.IDFromName(sc, opts.FlavorName) + if err != nil { + return nil, err + } + b["flavorRef"] = flavorID + } + + if opts.Min != 0 { + b["min_count"] = opts.Min + } + + if opts.Max != 0 { + b["max_count"] = opts.Max + } + + return map[string]interface{}{"server": b}, nil +} + +// Create requests a server to be provisioned to the user in the current tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + reqBody, err := opts.ToServerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(listURL(client), reqBody, &r.Body, nil) + return +} + +// Delete requests that a server previously provisioned be removed from your +// account. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ForceDelete forces the deletion of a server. +func ForceDelete(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"forceDelete": ""}, nil, nil) + return +} + +// Get requests details on a single server, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToServerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// server. +type UpdateOpts struct { + // Name changes the displayed name of the server. + // The server host name will *not* change. + // Server names are not constrained to be unique, even within the same tenant. + Name string `json:"name,omitempty"` + + // AccessIPv4 provides a new IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 provides a new IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` +} + +// ToServerUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToServerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "server") +} + +// Update requests that various attributes of the indicated server be changed. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToServerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ChangeAdminPassword alters the administrator or root password for a specified +// server. +func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) (r ActionResult) { + b := map[string]interface{}{ + "changePassword": map[string]string{ + "adminPass": newPassword, + }, + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// RebootMethod describes the mechanisms by which a server reboot can be requested. +type RebootMethod string + +// These constants determine how a server should be rebooted. +// See the Reboot() function for further details. +const ( + SoftReboot RebootMethod = "SOFT" + HardReboot RebootMethod = "HARD" + OSReboot = SoftReboot + PowerCycle = HardReboot +) + +// RebootOptsBuilder allows extensions to add additional parameters to the +// reboot request. +type RebootOptsBuilder interface { + ToServerRebootMap() (map[string]interface{}, error) +} + +// RebootOpts provides options to the reboot request. +type RebootOpts struct { + // Type is the type of reboot to perform on the server. + Type RebootMethod `json:"type" required:"true"` +} + +// ToServerRebootMap builds a body for the reboot request. +func (opts RebootOpts) ToServerRebootMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "reboot") +} + +/* + Reboot requests that a given server reboot. + + Two methods exist for rebooting a server: + + HardReboot (aka PowerCycle) starts the server instance by physically cutting + power to the machine, or if a VM, terminating it at the hypervisor level. + It's done. Caput. Full stop. + Then, after a brief while, power is rtored or the VM instance restarted. + + SoftReboot (aka OSReboot) simply tells the OS to restart under its own + procedure. + E.g., in Linux, asking it to enter runlevel 6, or executing + "sudo shutdown -r now", or by asking Windows to rtart the machine. +*/ +func Reboot(client *gophercloud.ServiceClient, id string, opts RebootOptsBuilder) (r ActionResult) { + b, err := opts.ToServerRebootMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// RebuildOptsBuilder allows extensions to provide additional parameters to the +// rebuild request. +type RebuildOptsBuilder interface { + ToServerRebuildMap() (map[string]interface{}, error) +} + +// RebuildOpts represents the configuration options used in a server rebuild +// operation. +type RebuildOpts struct { + // AdminPass is the server's admin password + AdminPass string `json:"adminPass,omitempty"` + + // ImageID is the ID of the image you want your server to be provisioned on. + ImageID string `json:"imageRef"` + + // ImageName is readable name of an image. + ImageName string `json:"-"` + + // Name to set the server to + Name string `json:"name,omitempty"` + + // AccessIPv4 [optional] provides a new IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 [optional] provides a new IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` + + // Metadata [optional] contains key-value pairs (up to 255 bytes each) + // to attach to the server. + Metadata map[string]string `json:"metadata,omitempty"` + + // Personality [optional] includes files to inject into the server at launch. + // Rebuild will base64-encode file contents for you. + Personality Personality `json:"personality,omitempty"` + + // ServiceClient will allow calls to be made to retrieve an image or + // flavor ID by name. + ServiceClient *gophercloud.ServiceClient `json:"-"` +} + +// ToServerRebuildMap formats a RebuildOpts struct into a map for use in JSON +func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + // If ImageRef isn't provided, check if ImageName was provided to ascertain + // the image ID. + if opts.ImageID == "" { + if opts.ImageName != "" { + if opts.ServiceClient == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + imageID, err := images.IDFromName(opts.ServiceClient, opts.ImageName) + if err != nil { + return nil, err + } + b["imageRef"] = imageID + } + } + + return map[string]interface{}{"rebuild": b}, nil +} + +// Rebuild will reprovision the server according to the configuration options +// provided in the RebuildOpts struct. +func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuilder) (r RebuildResult) { + b, err := opts.ToServerRebuildMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, nil) + return +} + +// ResizeOptsBuilder allows extensions to add additional parameters to the +// resize request. +type ResizeOptsBuilder interface { + ToServerResizeMap() (map[string]interface{}, error) +} + +// ResizeOpts represents the configuration options used to control a Resize +// operation. +type ResizeOpts struct { + // FlavorRef is the ID of the flavor you wish your server to become. + FlavorRef string `json:"flavorRef" required:"true"` +} + +// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON +// request body for the Resize request. +func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "resize") +} + +// Resize instructs the provider to change the flavor of the server. +// +// Note that this implies rebuilding it. +// +// Unfortunately, one cannot pass rebuild parameters to the resize function. +// When the resize completes, the server will be in VERIFY_RESIZE state. +// While in this state, you can explore the use of the new server's +// configuration. If you like it, call ConfirmResize() to commit the resize +// permanently. Otherwise, call RevertResize() to restore the old configuration. +func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) (r ActionResult) { + b, err := opts.ToServerResizeMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// ConfirmResize confirms a previous resize operation on a server. +// See Resize() for more details. +func ConfirmResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"confirmResize": nil}, nil, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202, 204}, + }) + return +} + +// RevertResize cancels a previous resize operation on a server. +// See Resize() for more details. +func RevertResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"revertResize": nil}, nil, nil) + return +} + +// ResetMetadataOptsBuilder allows extensions to add additional parameters to +// the Reset request. +type ResetMetadataOptsBuilder interface { + ToMetadataResetMap() (map[string]interface{}, error) +} + +// MetadataOpts is a map that contains key-value pairs. +type MetadataOpts map[string]string + +// ToMetadataResetMap assembles a body for a Reset request based on the contents +// of a MetadataOpts. +func (opts MetadataOpts) ToMetadataResetMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ToMetadataUpdateMap assembles a body for an Update request based on the +// contents of a MetadataOpts. +func (opts MetadataOpts) ToMetadataUpdateMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ResetMetadata will create multiple new key-value pairs for the given server +// ID. +// Note: Using this operation will erase any already-existing metadata and +// create the new metadata provided. To keep any already-existing metadata, +// use the UpdateMetadatas or UpdateMetadata function. +func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetadataOptsBuilder) (r ResetMetadataResult) { + b, err := opts.ToMetadataResetMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Metadata requests all the metadata for the given server ID. +func Metadata(client *gophercloud.ServiceClient, id string) (r GetMetadataResult) { + _, r.Err = client.Get(metadataURL(client, id), &r.Body, nil) + return +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Create request. +type UpdateMetadataOptsBuilder interface { + ToMetadataUpdateMap() (map[string]interface{}, error) +} + +// UpdateMetadata updates (or creates) all the metadata specified by opts for +// the given server ID. This operation does not affect already-existing metadata +// that is not specified by opts. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { + b, err := opts.ToMetadataUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// MetadatumOptsBuilder allows extensions to add additional parameters to the +// Create request. +type MetadatumOptsBuilder interface { + ToMetadatumCreateMap() (map[string]interface{}, string, error) +} + +// MetadatumOpts is a map of length one that contains a key-value pair. +type MetadatumOpts map[string]string + +// ToMetadatumCreateMap assembles a body for a Create request based on the +// contents of a MetadataumOpts. +func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, string, error) { + if len(opts) != 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "servers.MetadatumOpts" + err.Info = "Must have 1 and only 1 key-value pair" + return nil, "", err + } + metadatum := map[string]interface{}{"meta": opts} + var key string + for k := range metadatum["meta"].(MetadatumOpts) { + key = k + } + return metadatum, key, nil +} + +// CreateMetadatum will create or update the key-value pair with the given key +// for the given server ID. +func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts MetadatumOptsBuilder) (r CreateMetadatumResult) { + b, key, err := opts.ToMetadatumCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadatumURL(client, id, key), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Metadatum requests the key-value pair with the given key for the given +// server ID. +func Metadatum(client *gophercloud.ServiceClient, id, key string) (r GetMetadatumResult) { + _, r.Err = client.Get(metadatumURL(client, id, key), &r.Body, nil) + return +} + +// DeleteMetadatum will delete the key-value pair with the given key for the +// given server ID. +func DeleteMetadatum(client *gophercloud.ServiceClient, id, key string) (r DeleteMetadatumResult) { + _, r.Err = client.Delete(metadatumURL(client, id, key), nil) + return +} + +// ListAddresses makes a request against the API to list the servers IP +// addresses. +func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { + return pagination.NewPager(client, listAddressesURL(client, id), func(r pagination.PageResult) pagination.Page { + return AddressPage{pagination.SinglePageBase(r)} + }) +} + +// ListAddressesByNetwork makes a request against the API to list the servers IP +// addresses for the given network. +func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { + return pagination.NewPager(client, listAddressesByNetworkURL(client, id, network), func(r pagination.PageResult) pagination.Page { + return NetworkAddressPage{pagination.SinglePageBase(r)} + }) +} + +// CreateImageOptsBuilder allows extensions to add additional parameters to the +// CreateImage request. +type CreateImageOptsBuilder interface { + ToServerCreateImageMap() (map[string]interface{}, error) +} + +// CreateImageOpts provides options to pass to the CreateImage request. +type CreateImageOpts struct { + // Name of the image/snapshot. + Name string `json:"name" required:"true"` + + // Metadata contains key-value pairs (up to 255 bytes each) to attach to + // the created image. + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToServerCreateImageMap formats a CreateImageOpts structure into a request +// body. +func (opts CreateImageOpts) ToServerCreateImageMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "createImage") +} + +// CreateImage makes a request against the nova API to schedule an image to be +// created of the server +func CreateImage(client *gophercloud.ServiceClient, id string, opts CreateImageOptsBuilder) (r CreateImageResult) { + b, err := opts.ToServerCreateImageMap() + if err != nil { + r.Err = err + return + } + resp, err := client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + r.Err = err + r.Header = resp.Header + return +} + +// IDFromName is a convienience function that returns a server's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + allPages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractServers(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "server"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "server"} + } +} + +// GetPassword makes a request against the nova API to get the encrypted +// administrative password. +func GetPassword(client *gophercloud.ServiceClient, serverId string) (r GetPasswordResult) { + _, r.Err = client.Get(passwordURL(client, serverId), &r.Body, nil) + return +} + +// ShowConsoleOutputOptsBuilder is the interface types must satisfy in order to be +// used as ShowConsoleOutput options +type ShowConsoleOutputOptsBuilder interface { + ToServerShowConsoleOutputMap() (map[string]interface{}, error) +} + +// ShowConsoleOutputOpts satisfies the ShowConsoleOutputOptsBuilder +type ShowConsoleOutputOpts struct { + // The number of lines to fetch from the end of console log. + // All lines will be returned if this is not specified. + Length int `json:"length,omitempty"` +} + +// ToServerShowConsoleOutputMap formats a ShowConsoleOutputOpts structure into a request body. +func (opts ShowConsoleOutputOpts) ToServerShowConsoleOutputMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-getConsoleOutput") +} + +// ShowConsoleOutput makes a request against the nova API to get console log from the server +func ShowConsoleOutput(client *gophercloud.ServiceClient, id string, opts ShowConsoleOutputOptsBuilder) (r ShowConsoleOutputResult) { + b, err := opts.ToServerShowConsoleOutputMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go new file mode 100644 index 000000000000..f973d1ea0e1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go @@ -0,0 +1,414 @@ +package servers + +import ( + "crypto/rsa" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "path" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type serverResult struct { + gophercloud.Result +} + +// Extract interprets any serverResult as a Server, if possible. +func (r serverResult) Extract() (*Server, error) { + var s Server + err := r.ExtractInto(&s) + return &s, err +} + +func (r serverResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "server") +} + +func ExtractServersInto(r pagination.Page, v interface{}) error { + return r.(ServerPage).Result.ExtractIntoSlicePtr(v, "servers") +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as a Server. +type CreateResult struct { + serverResult +} + +// GetResult is the response from a Get operation. Call its Extract +// method to interpret it as a Server. +type GetResult struct { + serverResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Server. +type UpdateResult struct { + serverResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// RebuildResult is the response from a Rebuild operation. Call its Extract +// method to interpret it as a Server. +type RebuildResult struct { + serverResult +} + +// ActionResult represents the result of server action operations, like reboot. +// Call its ExtractErr method to determine if the action succeeded or failed. +type ActionResult struct { + gophercloud.ErrResult +} + +// CreateImageResult is the response from a CreateImage operation. Call its +// ExtractImageID method to retrieve the ID of the newly created image. +type CreateImageResult struct { + gophercloud.Result +} + +// ShowConsoleOutputResult represents the result of console output from a server +type ShowConsoleOutputResult struct { + gophercloud.Result +} + +// Extract will return the console output from a ShowConsoleOutput request. +func (r ShowConsoleOutputResult) Extract() (string, error) { + var s struct { + Output string `json:"output"` + } + + err := r.ExtractInto(&s) + return s.Output, err +} + +// GetPasswordResult represent the result of a get os-server-password operation. +// Call its ExtractPassword method to retrieve the password. +type GetPasswordResult struct { + gophercloud.Result +} + +// ExtractPassword gets the encrypted password. +// If privateKey != nil the password is decrypted with the private key. +// If privateKey == nil the encrypted password is returned and can be decrypted +// with: +// echo '' | base64 -D | openssl rsautl -decrypt -inkey +func (r GetPasswordResult) ExtractPassword(privateKey *rsa.PrivateKey) (string, error) { + var s struct { + Password string `json:"password"` + } + err := r.ExtractInto(&s) + if err == nil && privateKey != nil && s.Password != "" { + return decryptPassword(s.Password, privateKey) + } + return s.Password, err +} + +func decryptPassword(encryptedPassword string, privateKey *rsa.PrivateKey) (string, error) { + b64EncryptedPassword := make([]byte, base64.StdEncoding.DecodedLen(len(encryptedPassword))) + + n, err := base64.StdEncoding.Decode(b64EncryptedPassword, []byte(encryptedPassword)) + if err != nil { + return "", fmt.Errorf("Failed to base64 decode encrypted password: %s", err) + } + password, err := rsa.DecryptPKCS1v15(nil, privateKey, b64EncryptedPassword[0:n]) + if err != nil { + return "", fmt.Errorf("Failed to decrypt password: %s", err) + } + + return string(password), nil +} + +// ExtractImageID gets the ID of the newly created server image from the header. +func (r CreateImageResult) ExtractImageID() (string, error) { + if r.Err != nil { + return "", r.Err + } + // Get the image id from the header + u, err := url.ParseRequestURI(r.Header.Get("Location")) + if err != nil { + return "", err + } + imageID := path.Base(u.Path) + if imageID == "." || imageID == "/" { + return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) + } + return imageID, nil +} + +// Server represents a server/instance in the OpenStack cloud. +type Server struct { + // ID uniquely identifies this server amongst all other servers, + // including those not accessible to the current tenant. + ID string `json:"id"` + + // TenantID identifies the tenant owning this server resource. + TenantID string `json:"tenant_id"` + + // UserID uniquely identifies the user account owning the tenant. + UserID string `json:"user_id"` + + // Name contains the human-readable name for the server. + Name string `json:"name"` + + // Updated and Created contain ISO-8601 timestamps of when the state of the + // server last changed, and when it was created. + Updated time.Time `json:"updated"` + Created time.Time `json:"created"` + + // HostID is the host where the server is located in the cloud. + HostID string `json:"hostid"` + + // Status contains the current operational status of the server, + // such as IN_PROGRESS or ACTIVE. + Status string `json:"status"` + + // Progress ranges from 0..100. + // A request made against the server completes only once Progress reaches 100. + Progress int `json:"progress"` + + // AccessIPv4 and AccessIPv6 contain the IP addresses of the server, + // suitable for remote access for administration. + AccessIPv4 string `json:"accessIPv4"` + AccessIPv6 string `json:"accessIPv6"` + + // Image refers to a JSON object, which itself indicates the OS image used to + // deploy the server. + Image map[string]interface{} `json:"-"` + + // Flavor refers to a JSON object, which itself indicates the hardware + // configuration of the deployed server. + Flavor map[string]interface{} `json:"flavor"` + + // Addresses includes a list of all IP addresses assigned to the server, + // keyed by pool. + Addresses map[string]interface{} `json:"addresses"` + + // Metadata includes a list of all user-specified key-value pairs attached + // to the server. + Metadata map[string]string `json:"metadata"` + + // Links includes HTTP references to the itself, useful for passing along to + // other APIs that might want a server reference. + Links []interface{} `json:"links"` + + // KeyName indicates which public key was injected into the server on launch. + KeyName string `json:"key_name"` + + // AdminPass will generally be empty (""). However, it will contain the + // administrative password chosen when provisioning a new server without a + // set AdminPass setting in the first place. + // Note that this is the ONLY time this field will be valid. + AdminPass string `json:"adminPass"` + + // SecurityGroups includes the security groups that this instance has applied + // to it. + SecurityGroups []map[string]interface{} `json:"security_groups"` + + // Fault contains failure information about a server. + Fault Fault `json:"fault"` +} + +type Fault struct { + Code int `json:"code"` + Created time.Time `json:"created"` + Details string `json:"details"` + Message string `json:"message"` +} + +func (r *Server) UnmarshalJSON(b []byte) error { + type tmp Server + var s struct { + tmp + Image interface{} `json:"image"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Server(s.tmp) + + switch t := s.Image.(type) { + case map[string]interface{}: + r.Image = t + case string: + switch t { + case "": + r.Image = nil + } + } + + return err +} + +// ServerPage abstracts the raw results of making a List() request against +// the API. As OpenStack extensions may freely alter the response bodies of +// structures returned to the client, you may only safely access the data +// provided through the ExtractServers call. +type ServerPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a page contains no Server results. +func (r ServerPage) IsEmpty() (bool, error) { + s, err := ExtractServers(r) + return len(s) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r ServerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"servers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractServers interprets the results of a single page from a List() call, +// producing a slice of Server entities. +func ExtractServers(r pagination.Page) ([]Server, error) { + var s []Server + err := ExtractServersInto(r, &s) + return s, err +} + +// MetadataResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. +type MetadataResult struct { + gophercloud.Result +} + +// GetMetadataResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetMetadataResult struct { + MetadataResult +} + +// ResetMetadataResult contains the result of a Reset operation. Call its +// Extract method to interpret it as a map[string]interface. +type ResetMetadataResult struct { + MetadataResult +} + +// UpdateMetadataResult contains the result of an Update operation. Call its +// Extract method to interpret it as a map[string]interface. +type UpdateMetadataResult struct { + MetadataResult +} + +// MetadatumResult contains the result of a call for individual a single +// key-value pair. +type MetadatumResult struct { + gophercloud.Result +} + +// GetMetadatumResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetMetadatumResult struct { + MetadatumResult +} + +// CreateMetadatumResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. +type CreateMetadatumResult struct { + MetadatumResult +} + +// DeleteMetadatumResult contains the result of a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DeleteMetadatumResult struct { + gophercloud.ErrResult +} + +// Extract interprets any MetadataResult as a Metadata, if possible. +func (r MetadataResult) Extract() (map[string]string, error) { + var s struct { + Metadata map[string]string `json:"metadata"` + } + err := r.ExtractInto(&s) + return s.Metadata, err +} + +// Extract interprets any MetadatumResult as a Metadatum, if possible. +func (r MetadatumResult) Extract() (map[string]string, error) { + var s struct { + Metadatum map[string]string `json:"meta"` + } + err := r.ExtractInto(&s) + return s.Metadatum, err +} + +// Address represents an IP address. +type Address struct { + Version int `json:"version"` + Address string `json:"addr"` +} + +// AddressPage abstracts the raw results of making a ListAddresses() request +// against the API. As OpenStack extensions may freely alter the response bodies +// of structures returned to the client, you may only safely access the data +// provided through the ExtractAddresses call. +type AddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if an AddressPage contains no networks. +func (r AddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractAddresses(r) + return len(addresses) == 0, err +} + +// ExtractAddresses interprets the results of a single page from a +// ListAddresses() call, producing a map of addresses. +func ExtractAddresses(r pagination.Page) (map[string][]Address, error) { + var s struct { + Addresses map[string][]Address `json:"addresses"` + } + err := (r.(AddressPage)).ExtractInto(&s) + return s.Addresses, err +} + +// NetworkAddressPage abstracts the raw results of making a +// ListAddressesByNetwork() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures +// returned to the client, you may only safely access the data provided through +// the ExtractAddresses call. +type NetworkAddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NetworkAddressPage contains no addresses. +func (r NetworkAddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractNetworkAddresses(r) + return len(addresses) == 0, err +} + +// ExtractNetworkAddresses interprets the results of a single page from a +// ListAddressesByNetwork() call, producing a slice of addresses. +func ExtractNetworkAddresses(r pagination.Page) ([]Address, error) { + var s map[string][]Address + err := (r.(NetworkAddressPage)).ExtractInto(&s) + if err != nil { + return nil, err + } + + var key string + for k := range s { + key = k + } + + return s[key], err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go new file mode 100644 index 000000000000..e892e8d92597 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go @@ -0,0 +1,51 @@ +package servers + +import "github.com/gophercloud/gophercloud" + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers") +} + +func listURL(client *gophercloud.ServiceClient) string { + return createURL(client) +} + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers", "detail") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +func metadatumURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("servers", id, "metadata", key) +} + +func metadataURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "metadata") +} + +func listAddressesURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "ips") +} + +func listAddressesByNetworkURL(client *gophercloud.ServiceClient, id, network string) string { + return client.ServiceURL("servers", id, "ips", network) +} + +func passwordURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "os-server-password") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go new file mode 100644 index 000000000000..cadef054506a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go @@ -0,0 +1,21 @@ +package servers + +import "github.com/gophercloud/gophercloud" + +// WaitForStatus will continually poll a server until it successfully +// transitions to a specified status. It will do this for at most the number +// of seconds specified. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go new file mode 100644 index 000000000000..cedf1f4d3a38 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go @@ -0,0 +1,14 @@ +/* +Package openstack contains resources for the individual OpenStack projects +supported in Gophercloud. It also includes functions to authenticate to an +OpenStack cloud and for provisioning various service-level clients. + +Example of Creating a Service Client + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go new file mode 100644 index 000000000000..12c8aebcf77c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go @@ -0,0 +1,107 @@ +package openstack + +import ( + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +/* +V2EndpointURL discovers the endpoint URL for a specific service from a +ServiceCatalog acquired during the v2 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. + var endpoints = make([]tokens2.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Region == "" || endpoint.Region == opts.Region { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + err := &ErrMultipleMatchingEndpointsV2{} + err.Endpoints = endpoints + return "", err + } + + // Extract the appropriate URL from the matching Endpoint. + for _, endpoint := range endpoints { + switch opts.Availability { + case gophercloud.AvailabilityPublic: + return gophercloud.NormalizeURL(endpoint.PublicURL), nil + case gophercloud.AvailabilityInternal: + return gophercloud.NormalizeURL(endpoint.InternalURL), nil + case gophercloud.AvailabilityAdmin: + return gophercloud.NormalizeURL(endpoint.AdminURL), nil + default: + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} + +/* +V3EndpointURL discovers the endpoint URL for a specific service from a Catalog +acquired during the v3 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Interface, + // Name if provided, and Region if provided. + var endpoints = make([]tokens3.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && + (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + return "", ErrMultipleMatchingEndpointsV3{Endpoints: endpoints} + } + + // Extract the URL from the matching Endpoint. + for _, endpoint := range endpoints { + return gophercloud.NormalizeURL(endpoint.URL), nil + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go new file mode 100644 index 000000000000..df410b1c6114 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go @@ -0,0 +1,71 @@ +package openstack + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +// ErrEndpointNotFound is the error when no suitable endpoint can be found +// in the user's catalog +type ErrEndpointNotFound struct{ gophercloud.BaseError } + +func (e ErrEndpointNotFound) Error() string { + return "No suitable endpoint could be found in the service catalog." +} + +// ErrInvalidAvailabilityProvided is the error when an invalid endpoint +// availability is provided +type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput } + +func (e ErrInvalidAvailabilityProvided) Error() string { + return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value) +} + +// ErrMultipleMatchingEndpointsV2 is the error when more than one endpoint +// for the given options is found in the v2 catalog +type ErrMultipleMatchingEndpointsV2 struct { + gophercloud.BaseError + Endpoints []tokens2.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV2) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrMultipleMatchingEndpointsV3 is the error when more than one endpoint +// for the given options is found in the v3 catalog +type ErrMultipleMatchingEndpointsV3 struct { + gophercloud.BaseError + Endpoints []tokens3.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV3) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not +// found +type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoAuthURL) Error() string { + return "Environment variable OS_AUTH_URL needs to be set." +} + +// ErrNoUsername is the error when the OS_USERNAME environment variable is not +// found +type ErrNoUsername struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoUsername) Error() string { + return "Environment variable OS_USERNAME needs to be set." +} + +// ErrNoPassword is the error when the OS_PASSWORD environment variable is not +// found +type ErrNoPassword struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoPassword) Error() string { + return "Environment variable OS_PASSWORD needs to be set." +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go new file mode 100644 index 000000000000..45623369e18e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go @@ -0,0 +1,65 @@ +/* +Package tenants provides information and interaction with the +tenants API resource for the OpenStack Identity service. + +See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 +and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants +for more information. + +Example to List Tenants + + listOpts := tenants.ListOpts{ + Limit: 2, + } + + allPages, err := tenants.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allTenants, err := tenants.ExtractTenants(allPages) + if err != nil { + panic(err) + } + + for _, tenant := range allTenants { + fmt.Printf("%+v\n", tenant) + } + +Example to Create a Tenant + + createOpts := tenants.CreateOpts{ + Name: "tenant_name", + Description: "this is a tenant", + Enabled: gophercloud.Enabled, + } + + tenant, err := tenants.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + updateOpts := tenants.UpdateOpts{ + Description: "this is a new description", + Enabled: gophercloud.Disabled, + } + + tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + err := tenants.Delete(identitYClient, tenantID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package tenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go new file mode 100644 index 000000000000..f21a58f10c86 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go @@ -0,0 +1,116 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts filters the Tenants that are returned by the List call. +type ListOpts struct { + // Marker is the ID of the last Tenant on the previous page. + Marker string `q:"marker"` + + // Limit specifies the page size. + Limit int `q:"limit"` +} + +// List enumerates the Tenants to which the current token has access. +func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { + url := listURL(client) + if opts != nil { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + url += q.String() + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return TenantPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOpts represents the options needed when creating new tenant. +type CreateOpts struct { + // Name is the name of the tenant. + Name string `json:"name" required:"true"` + + // Description is the description of the tenant. + Description string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// CreateOptsBuilder enables extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToTenantCreateMap() (map[string]interface{}, error) +} + +// ToTenantCreateMap assembles a request body based on the contents of +// a CreateOpts. +func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Create is the operation responsible for creating new tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToTenantCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get requests details on a single tenant by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToTenantUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// tenant. +type UpdateOpts struct { + // Name is the name of the tenant. + Name string `json:"name,omitempty"` + + // Description is the description of the tenant. + Description *string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// ToTenantUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Update is the operation responsible for updating exist tenants by their TenantID. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToTenantUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete is the operation responsible for permanently deleting a tenant. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go new file mode 100644 index 000000000000..bb6c2c6b08a5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go @@ -0,0 +1,91 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Tenant is a grouping of users in the identity service. +type Tenant struct { + // ID is a unique identifier for this tenant. + ID string `json:"id"` + + // Name is a friendlier user-facing name for this tenant. + Name string `json:"name"` + + // Description is a human-readable explanation of this Tenant's purpose. + Description string `json:"description"` + + // Enabled indicates whether or not a tenant is active. + Enabled bool `json:"enabled"` +} + +// TenantPage is a single page of Tenant results. +type TenantPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Tenants contains any results. +func (r TenantPage) IsEmpty() (bool, error) { + tenants, err := ExtractTenants(r) + return len(tenants) == 0, err +} + +// NextPageURL extracts the "next" link from the tenants_links section of the result. +func (r TenantPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"tenants_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractTenants returns a slice of Tenants contained in a single page of +// results. +func ExtractTenants(r pagination.Page) ([]Tenant, error) { + var s struct { + Tenants []Tenant `json:"tenants"` + } + err := (r.(TenantPage)).ExtractInto(&s) + return s.Tenants, err +} + +type tenantResult struct { + gophercloud.Result +} + +// Extract interprets any tenantResults as a Tenant. +func (r tenantResult) Extract() (*Tenant, error) { + var s struct { + Tenant *Tenant `json:"tenant"` + } + err := r.ExtractInto(&s) + return s.Tenant, err +} + +// GetResult is the response from a Get request. Call its Extract method to +// interpret it as a Tenant. +type GetResult struct { + tenantResult +} + +// CreateResult is the response from a Create request. Call its Extract method +// to interpret it as a Tenant. +type CreateResult struct { + tenantResult +} + +// DeleteResult is the response from a Get request. Call its ExtractErr method +// to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the response from a Update request. Call its Extract method +// to interpret it as a Tenant. +type UpdateResult struct { + tenantResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go new file mode 100644 index 000000000000..0f026690790c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go @@ -0,0 +1,23 @@ +package tenants + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func getURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func deleteURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func updateURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go new file mode 100644 index 000000000000..5375eea87267 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go @@ -0,0 +1,46 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 + +Example to Create an Unscoped Token from a Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "pass" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant ID and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantID: "fc394f2ab2df4114bde39905f800dc57" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant Name and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantName: "tenantname" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go new file mode 100644 index 000000000000..ab32368cc6ef --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go @@ -0,0 +1,103 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// PasswordCredentialsV2 represents the required options to authenticate +// with a username and password. +type PasswordCredentialsV2 struct { + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` +} + +// TokenCredentialsV2 represents the required options to authenticate +// with a token. +type TokenCredentialsV2 struct { + ID string `json:"id,omitempty" required:"true"` +} + +// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the +// AuthOptionsBuilder interface. +type AuthOptionsV2 struct { + PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // TokenCredentials allows users to authenticate (possibly as another user) + // with an authentication token ID. + TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` +} + +// AuthOptionsBuilder allows extensions to add additional parameters to the +// token create request. +type AuthOptionsBuilder interface { + // ToTokenCreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV2CreateMap() (map[string]interface{}, error) +} + +// AuthOptions are the valid options for Openstack Identity v2 authentication. +// For field descriptions, see gophercloud.AuthOptions. +type AuthOptions struct { + IdentityEndpoint string `json:"-"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + AllowReauth bool `json:"-"` + TokenID string +} + +// ToTokenV2CreateMap builds a token request body from the given AuthOptions. +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + v2Opts := AuthOptionsV2{ + TenantID: opts.TenantID, + TenantName: opts.TenantName, + } + + if opts.Password != "" { + v2Opts.PasswordCredentials = &PasswordCredentialsV2{ + Username: opts.Username, + Password: opts.Password, + } + } else { + v2Opts.TokenCredentials = &TokenCredentialsV2{ + ID: opts.TokenID, + } + } + + b, err := gophercloud.BuildRequestBody(v2Opts, "auth") + if err != nil { + return nil, err + } + return b, nil +} + +// Create authenticates to the identity service and attempts to acquire a Token. +// Generally, rather than interact with this call directly, end users should +// call openstack.AuthenticatedClient(), which abstracts all of the gory details +// about navigating service catalogs and such. +func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { + b, err := auth.ToTokenV2CreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + return +} + +// Get validates and retrieves information for user's token. +func Get(client *gophercloud.ServiceClient, token string) (r GetResult) { + _, r.Err = client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go new file mode 100644 index 000000000000..ee5da37f465d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go @@ -0,0 +1,174 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" +) + +// Token provides only the most basic information related to an authentication +// token. +type Token struct { + // ID provides the primary means of identifying a user to the OpenStack API. + // OpenStack defines this field as an opaque value, so do not depend on its + // content. It is safe, however, to compare for equality. + ID string + + // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the + // authentication token becomes invalid. After this point in time, future + // API requests made using this authentication token will respond with + // errors. Either the caller will need to reauthenticate manually, or more + // preferably, the caller should exploit automatic re-authentication. + // See the AuthOptions structure for more details. + ExpiresAt time.Time + + // Tenant provides information about the tenant to which this token grants + // access. + Tenant tenants.Tenant +} + +// Role is a role for a user. +type Role struct { + Name string `json:"name"` +} + +// User is an OpenStack user. +type User struct { + ID string `json:"id"` + Name string `json:"name"` + UserName string `json:"username"` + Roles []Role `json:"roles"` +} + +// Endpoint represents a single API endpoint offered by a service. +// It provides the public and internal URLs, if supported, along with a region +// specifier, again if provided. +// +// The significance of the Region field will depend upon your provider. +// +// In addition, the interface offered by the service will have version +// information associated with it through the VersionId, VersionInfo, and +// VersionList fields, if provided or supported. +// +// In all cases, fields which aren't supported by the provider and service +// combined will assume a zero-value (""). +type Endpoint struct { + TenantID string `json:"tenantId"` + PublicURL string `json:"publicURL"` + InternalURL string `json:"internalURL"` + AdminURL string `json:"adminURL"` + Region string `json:"region"` + VersionID string `json:"versionId"` + VersionInfo string `json:"versionInfo"` + VersionList string `json:"versionList"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V2 service +// catalog listing. +// +// Each class of service, such as cloud DNS or block storage services, will have +// a single CatalogEntry representing it. +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may assign + // their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry +} + +// CreateResult is the response from a Create request. Use ExtractToken() to +// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a +// service catalog. +type CreateResult struct { + gophercloud.Result +} + +// GetResult is the deferred response from a Get call, which is the same with a +// Created token. Use ExtractUser() to interpret it as a User. +type GetResult struct { + CreateResult +} + +// ExtractToken returns the just-created Token from a CreateResult. +func (r CreateResult) ExtractToken() (*Token, error) { + var s struct { + Access struct { + Token struct { + Expires string `json:"expires"` + ID string `json:"id"` + Tenant tenants.Tenant `json:"tenant"` + } `json:"token"` + } `json:"access"` + } + + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires) + if err != nil { + return nil, err + } + + return &Token{ + ID: s.Access.Token.ID, + ExpiresAt: expiresTs, + Tenant: s.Access.Token.Tenant, + }, nil +} + +// ExtractTokenID implements the gophercloud.AuthResult interface. The returned +// string is the same as the ID field of the Token struct returned from +// ExtractToken(). +func (r CreateResult) ExtractTokenID() (string, error) { + var s struct { + Access struct { + Token struct { + ID string `json:"id"` + } `json:"token"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return s.Access.Token.ID, err +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s struct { + Access struct { + Entries []CatalogEntry `json:"serviceCatalog"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &ServiceCatalog{Entries: s.Access.Entries}, err +} + +// ExtractUser returns the User from a GetResult. +func (r GetResult) ExtractUser() (*User, error) { + var s struct { + Access struct { + User User `json:"user"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &s.Access.User, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go new file mode 100644 index 000000000000..ee0a28f2004c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go @@ -0,0 +1,13 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// CreateURL generates the URL used to create new Tokens. +func CreateURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tokens") +} + +// GetURL generates the URL used to Validate Tokens. +func GetURL(client *gophercloud.ServiceClient, token string) string { + return client.ServiceURL("tokens", token) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go new file mode 100644 index 000000000000..966e128f1281 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go @@ -0,0 +1,108 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 + +Example to Create a Token From a Username and Password + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Username, Password, and Domain + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainID: "default", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + + authOptions = tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainName: "default", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Token + + authOptions := tokens.AuthOptions{ + TokenID: "token_id", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project ID Scope + + scope := tokens.Scope{ + ProjectID: "0fe36e73809d46aeae6705c39077b1b3", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Domain ID Scope + + scope := tokens.Scope{ + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project Name Scope + + scope := tokens.Scope{ + ProjectName: "project_name", + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go new file mode 100644 index 000000000000..e4d766b2327e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -0,0 +1,162 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// Scope allows a created token to be limited to a specific domain or project. +type Scope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string +} + +// AuthOptionsBuilder provides the ability for extensions to add additional +// parameters to AuthOptions. Extensions must satisfy all required methods. +type AuthOptionsBuilder interface { + // ToTokenV3CreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) + ToTokenV3ScopeMap() (map[string]interface{}, error) + CanReauth() bool +} + +// AuthOptions represents options for authenticating a user. +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed + // by all of the identity services, it will often be populated by a + // provider-level function. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"id,omitempty"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud + // to cache your credentials in memory, and to allow Gophercloud to attempt + // to re-authenticate automatically if/when your token expires. If you set + // it to false, it will not cache these settings, but re-authentication will + // not be possible. This setting defaults to false. + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` + + // Authentication through Application Credentials requires supplying name, project and secret + // For project we can use TenantID + ApplicationCredentialID string `json:"-"` + ApplicationCredentialName string `json:"-"` + ApplicationCredentialSecret string `json:"-"` + + Scope Scope `json:"-"` +} + +// ToTokenV3CreateMap builds a request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + gophercloudAuthOpts := gophercloud.AuthOptions{ + Username: opts.Username, + UserID: opts.UserID, + Password: opts.Password, + DomainID: opts.DomainID, + DomainName: opts.DomainName, + AllowReauth: opts.AllowReauth, + TokenID: opts.TokenID, + ApplicationCredentialID: opts.ApplicationCredentialID, + ApplicationCredentialName: opts.ApplicationCredentialName, + ApplicationCredentialSecret: opts.ApplicationCredentialSecret, + } + + return gophercloudAuthOpts.ToTokenV3CreateMap(scope) +} + +// ToTokenV3CreateMap builds a scope request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + scope := gophercloud.AuthScope(opts.Scope) + + gophercloudAuthOpts := gophercloud.AuthOptions{ + Scope: &scope, + DomainID: opts.DomainID, + DomainName: opts.DomainName, + } + + return gophercloudAuthOpts.ToTokenV3ScopeMap() +} + +func (opts *AuthOptions) CanReauth() bool { + return opts.AllowReauth +} + +func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { + return map[string]string{ + "X-Subject-Token": subjectToken, + } +} + +// Create authenticates and either generates a new token, or changes the Scope +// of an existing token. +func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { + scope, err := opts.ToTokenV3ScopeMap() + if err != nil { + r.Err = err + return + } + + b, err := opts.ToTokenV3CreateMap(scope) + if err != nil { + r.Err = err + return + } + + resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + return +} + +// Get validates and retrieves information about another token. +func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { + resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 203}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// Validate determines if a specified token is valid or not. +func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { + resp, err := c.Head(tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 204, 404}, + }) + if err != nil { + return false, err + } + + return resp.StatusCode == 200 || resp.StatusCode == 204, nil +} + +// Revoke immediately makes specified token invalid. +func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) { + _, r.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go new file mode 100644 index 000000000000..6f26c96bcdc9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go @@ -0,0 +1,178 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" +) + +// Endpoint represents a single API endpoint offered by a service. +// It matches either a public, internal or admin URL. +// If supported, it contains a region specifier, again if provided. +// The significance of the Region field will depend upon your provider. +type Endpoint struct { + ID string `json:"id"` + Region string `json:"region"` + RegionID string `json:"region_id"` + Interface string `json:"interface"` + URL string `json:"url"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V3 service +// catalog listing. Each class of service, such as cloud DNS or block storage +// services, could have multiple CatalogEntry representing it (one by interface +// type, e.g public, admin or internal). +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Service ID + ID string `json:"id"` + + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may + // assign their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry `json:"catalog"` +} + +// Domain provides information about the domain to which this token grants +// access. +type Domain struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// User represents a user resource that exists in the Identity Service. +type User struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// Role provides information about roles to which User is authorized. +type Role struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// Project provides information about project to which User is authorized. +type Project struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// commonResult is the response from a request. A commonResult has various +// methods which can be used to extract different details about the result. +type commonResult struct { + gophercloud.Result +} + +// Extract is a shortcut for ExtractToken. +// This function is deprecated and still present for backward compatibility. +func (r commonResult) Extract() (*Token, error) { + return r.ExtractToken() +} + +// ExtractToken interprets a commonResult as a Token. +func (r commonResult) ExtractToken() (*Token, error) { + var s Token + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + // Parse the token itself from the stored headers. + s.ID = r.Header.Get("X-Subject-Token") + + return &s, err +} + +// ExtractTokenID implements the gophercloud.AuthResult interface. The returned +// string is the same as the ID field of the Token struct returned from +// ExtractToken(). +func (r CreateResult) ExtractTokenID() (string, error) { + return r.Header.Get("X-Subject-Token"), r.Err +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s ServiceCatalog + err := r.ExtractInto(&s) + return &s, err +} + +// ExtractUser returns the User that is the owner of the Token. +func (r commonResult) ExtractUser() (*User, error) { + var s struct { + User *User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// ExtractRoles returns Roles to which User is authorized. +func (r commonResult) ExtractRoles() ([]Role, error) { + var s struct { + Roles []Role `json:"roles"` + } + err := r.ExtractInto(&s) + return s.Roles, err +} + +// ExtractProject returns Project to which User is authorized. +func (r commonResult) ExtractProject() (*Project, error) { + var s struct { + Project *Project `json:"project"` + } + err := r.ExtractInto(&s) + return s.Project, err +} + +// CreateResult is the response from a Create request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type CreateResult struct { + commonResult +} + +// GetResult is the response from a Get request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type GetResult struct { + commonResult +} + +// RevokeResult is response from a Revoke request. +type RevokeResult struct { + commonResult +} + +// Token is a string that grants a user access to a controlled set of services +// in an OpenStack provider. Each Token is valid for a set length of time. +type Token struct { + // ID is the issued token. + ID string `json:"id"` + + // ExpiresAt is the timestamp at which this token will no longer be accepted. + ExpiresAt time.Time `json:"expires_at"` +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.ExtractIntoStructPtr(v, "token") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go new file mode 100644 index 000000000000..2f864a31c8bf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go @@ -0,0 +1,7 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +func tokenURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("auth", "tokens") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go new file mode 100644 index 000000000000..40080f7af203 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go @@ -0,0 +1,28 @@ +package utils + +import ( + "net/url" + "regexp" + "strings" +) + +// BaseEndpoint will return a URL without the /vX.Y +// portion of the URL. +func BaseEndpoint(endpoint string) (string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return "", err + } + + u.RawQuery, u.Fragment = "", "" + + path := u.Path + versionRe := regexp.MustCompile("v[0-9.]+/?") + + if version := versionRe.FindString(path); version != "" { + versionIndex := strings.Index(path, version) + u.Path = path[:versionIndex] + } + + return u.String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go new file mode 100644 index 000000000000..27da19f91a8d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go @@ -0,0 +1,111 @@ +package utils + +import ( + "fmt" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// Version is a supported API version, corresponding to a vN package within the appropriate service. +type Version struct { + ID string + Suffix string + Priority int +} + +var goodStatus = map[string]bool{ + "current": true, + "supported": true, + "stable": true, +} + +// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's +// published versions. +// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. +func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { + type linkResp struct { + Href string `json:"href"` + Rel string `json:"rel"` + } + + type valueResp struct { + ID string `json:"id"` + Status string `json:"status"` + Links []linkResp `json:"links"` + } + + type versionsResp struct { + Values []valueResp `json:"values"` + } + + type response struct { + Versions versionsResp `json:"versions"` + } + + normalize := func(endpoint string) string { + if !strings.HasSuffix(endpoint, "/") { + return endpoint + "/" + } + return endpoint + } + identityEndpoint := normalize(client.IdentityEndpoint) + + // If a full endpoint is specified, check version suffixes for a match first. + for _, v := range recognized { + if strings.HasSuffix(identityEndpoint, v.Suffix) { + return v, identityEndpoint, nil + } + } + + var resp response + _, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{ + JSONResponse: &resp, + OkCodes: []int{200, 300}, + }) + + if err != nil { + return nil, "", err + } + + var highest *Version + var endpoint string + + for _, value := range resp.Versions.Values { + href := "" + for _, link := range value.Links { + if link.Rel == "self" { + href = normalize(link.Href) + } + } + + for _, version := range recognized { + if strings.Contains(value.ID, version.ID) { + // Prefer a version that exactly matches the provided endpoint. + if href == identityEndpoint { + if href == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + } + return version, href, nil + } + + // Otherwise, find the highest-priority version with a whitelisted status. + if goodStatus[strings.ToLower(value.Status)] { + if highest == nil || version.Priority > highest.Priority { + highest = version + endpoint = href + } + } + } + } + } + + if highest == nil { + return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) + } + if endpoint == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) + } + + return highest, endpoint, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/http.go b/vendor/github.com/gophercloud/gophercloud/pagination/http.go new file mode 100644 index 000000000000..757295c423ad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/http.go @@ -0,0 +1,60 @@ +package pagination + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// PageResult stores the HTTP response that returned the current page of results. +type PageResult struct { + gophercloud.Result + url.URL +} + +// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the +// results, interpreting it as JSON if the content type indicates. +func PageResultFrom(resp *http.Response) (PageResult, error) { + var parsedBody interface{} + + defer resp.Body.Close() + rawBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PageResult{}, err + } + + if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { + err = json.Unmarshal(rawBody, &parsedBody) + if err != nil { + return PageResult{}, err + } + } else { + parsedBody = rawBody + } + + return PageResultFromParsed(resp, parsedBody), err +} + +// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its +// body parsed as JSON (and closed). +func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { + return PageResult{ + Result: gophercloud.Result{ + Body: body, + Header: resp.Header, + }, + URL: *resp.Request.URL, + } +} + +// Request performs an HTTP request and extracts the http.Response from the result. +func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { + return client.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: headers, + OkCodes: []int{200, 204, 300}, + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go new file mode 100644 index 000000000000..3656fb7f8f47 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go @@ -0,0 +1,92 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. +type LinkedPageBase struct { + PageResult + + // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. + // If any link along the path is missing, an empty URL will be returned. + // If any link results in an unexpected value type, an error will be returned. + // When left as "nil", []string{"links", "next"} will be used as a default. + LinkPath []string +} + +// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. +// It assumes that the links are available in a "links" element of the top-level response object. +// If this is not the case, override NextPageURL on your result type. +func (current LinkedPageBase) NextPageURL() (string, error) { + var path []string + var key string + + if current.LinkPath == nil { + path = []string{"links", "next"} + } else { + path = current.LinkPath + } + + submap, ok := current.Body.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return "", err + } + + for { + key, path = path[0], path[1:len(path)] + + value, ok := submap[key] + if !ok { + return "", nil + } + + if len(path) > 0 { + submap, ok = value.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + } else { + if value == nil { + // Actual null element. + return "", nil + } + + url, ok := value.(string) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "string" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + + return url, nil + } + } +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current LinkedPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current LinkedPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go new file mode 100644 index 000000000000..52e53bae8503 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go @@ -0,0 +1,58 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. +// For convenience, embed the MarkedPageBase struct. +type MarkerPage interface { + Page + + // LastMarker returns the last "marker" value on this page. + LastMarker() (string, error) +} + +// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. +type MarkerPageBase struct { + PageResult + + // Owner is a reference to the embedding struct. + Owner MarkerPage +} + +// NextPageURL generates the URL for the page of results after this one. +func (current MarkerPageBase) NextPageURL() (string, error) { + currentURL := current.URL + + mark, err := current.Owner.LastMarker() + if err != nil { + return "", err + } + + q := currentURL.Query() + q.Set("marker", mark) + currentURL.RawQuery = q.Encode() + + return currentURL.String(), nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current MarkerPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current MarkerPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go new file mode 100644 index 000000000000..42c0b2dbe5b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go @@ -0,0 +1,251 @@ +package pagination + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "strings" + + "github.com/gophercloud/gophercloud" +) + +var ( + // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. + ErrPageNotAvailable = errors.New("The requested page does not exist.") +) + +// Page must be satisfied by the result type of any resource collection. +// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. +// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, +// instead. +// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type +// will need to implement. +type Page interface { + // NextPageURL generates the URL for the page of data that follows this collection. + // Return "" if no such page exists. + NextPageURL() (string, error) + + // IsEmpty returns true if this Page has no items in it. + IsEmpty() (bool, error) + + // GetBody returns the Page Body. This is used in the `AllPages` method. + GetBody() interface{} +} + +// Pager knows how to advance through a specific resource collection, one page at a time. +type Pager struct { + client *gophercloud.ServiceClient + + initialURL string + + createPage func(r PageResult) Page + + firstPage Page + + Err error + + // Headers supplies additional HTTP headers to populate on each paged request. + Headers map[string]string +} + +// NewPager constructs a manually-configured pager. +// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. +func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { + return Pager{ + client: client, + initialURL: initialURL, + createPage: createPage, + } +} + +// WithPageCreator returns a new Pager that substitutes a different page creation function. This is +// useful for overriding List functions in delegation. +func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { + return Pager{ + client: p.client, + initialURL: p.initialURL, + createPage: createPage, + } +} + +func (p Pager) fetchNextPage(url string) (Page, error) { + resp, err := Request(p.client, p.Headers, url) + if err != nil { + return nil, err + } + + remembered, err := PageResultFrom(resp) + if err != nil { + return nil, err + } + + return p.createPage(remembered), nil +} + +// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. +// Return "false" from the handler to prematurely stop iterating. +func (p Pager) EachPage(handler func(Page) (bool, error)) error { + if p.Err != nil { + return p.Err + } + currentURL := p.initialURL + for { + var currentPage Page + + // if first page has already been fetched, no need to fetch it again + if p.firstPage != nil { + currentPage = p.firstPage + p.firstPage = nil + } else { + var err error + currentPage, err = p.fetchNextPage(currentURL) + if err != nil { + return err + } + } + + empty, err := currentPage.IsEmpty() + if err != nil { + return err + } + if empty { + return nil + } + + ok, err := handler(currentPage) + if err != nil { + return err + } + if !ok { + return nil + } + + currentURL, err = currentPage.NextPageURL() + if err != nil { + return err + } + if currentURL == "" { + return nil + } + } +} + +// AllPages returns all the pages from a `List` operation in a single page, +// allowing the user to retrieve all the pages at once. +func (p Pager) AllPages() (Page, error) { + // pagesSlice holds all the pages until they get converted into as Page Body. + var pagesSlice []interface{} + // body will contain the final concatenated Page body. + var body reflect.Value + + // Grab a first page to ascertain the page body type. + firstPage, err := p.fetchNextPage(p.initialURL) + if err != nil { + return nil, err + } + // Store the page type so we can use reflection to create a new mega-page of + // that type. + pageType := reflect.TypeOf(firstPage) + + // if it's a single page, just return the firstPage (first page) + if _, found := pageType.FieldByName("SinglePageBase"); found { + return firstPage, nil + } + + // store the first page to avoid getting it twice + p.firstPage = firstPage + + // Switch on the page body type. Recognized types are `map[string]interface{}`, + // `[]byte`, and `[]interface{}`. + switch pb := firstPage.GetBody().(type) { + case map[string]interface{}: + // key is the map key for the page body if the body type is `map[string]interface{}`. + var key string + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().(map[string]interface{}) + for k, v := range b { + // If it's a linked page, we don't want the `links`, we want the other one. + if !strings.HasSuffix(k, "links") { + // check the field's type. we only want []interface{} (which is really []map[string]interface{}) + switch vt := v.(type) { + case []interface{}: + key = k + pagesSlice = append(pagesSlice, vt...) + } + } + } + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `map[string]interface{}` + body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) + body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) + case []byte: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]byte) + pagesSlice = append(pagesSlice, b) + // seperate pages with a comma + pagesSlice = append(pagesSlice, []byte{10}) + return true, nil + }) + if err != nil { + return nil, err + } + if len(pagesSlice) > 0 { + // Remove the trailing comma. + pagesSlice = pagesSlice[:len(pagesSlice)-1] + } + var b []byte + // Combine the slice of slices in to a single slice. + for _, slice := range pagesSlice { + b = append(b, slice.([]byte)...) + } + // Set body to value of type `bytes`. + body = reflect.New(reflect.TypeOf(b)).Elem() + body.SetBytes(b) + case []interface{}: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]interface{}) + pagesSlice = append(pagesSlice, b...) + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `[]interface{}` + body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) + for i, s := range pagesSlice { + body.Index(i).Set(reflect.ValueOf(s)) + } + default: + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}/[]byte/[]interface{}" + err.Actual = fmt.Sprintf("%T", pb) + return nil, err + } + + // Each `Extract*` function is expecting a specific type of page coming back, + // otherwise the type assertion in those functions will fail. pageType is needed + // to create a type in this method that has the same type that the `Extract*` + // function is expecting and set the Body of that object to the concatenated + // pages. + page := reflect.New(pageType) + // Set the page body to be the concatenated pages. + page.Elem().FieldByName("Body").Set(body) + // Set any additional headers that were pass along. The `objectstorage` pacakge, + // for example, passes a Content-Type header. + h := make(http.Header) + for k, v := range p.Headers { + h.Add(k, v) + } + page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) + // Type assert the page to a Page interface so that the type assertion in the + // `Extract*` methods will work. + return page.Elem().Interface().(Page), err +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go new file mode 100644 index 000000000000..912daea36426 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go @@ -0,0 +1,4 @@ +/* +Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. +*/ +package pagination diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/single.go b/vendor/github.com/gophercloud/gophercloud/pagination/single.go new file mode 100644 index 000000000000..4251d6491efe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/single.go @@ -0,0 +1,33 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. +type SinglePageBase PageResult + +// NextPageURL always returns "" to indicate that there are no more pages to return. +func (current SinglePageBase) NextPageURL() (string, error) { + return "", nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current SinglePageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the single page's body. This method is needed to satisfy the +// Page interface. +func (current SinglePageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go new file mode 100644 index 000000000000..b9986660cbda --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -0,0 +1,491 @@ +package gophercloud + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +/* +BuildRequestBody builds a map[string]interface from the given `struct`. If +parent is not an empty string, the final map[string]interface returned will +encapsulate the built one. For example: + + disk := 1 + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: &disk, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + body, err := gophercloud.BuildRequestBody(createOpts, "flavor") + +The above example can be run as-is, however it is recommended to look at how +BuildRequestBody is used within Gophercloud to more fully understand how it +fits within the request process as a whole rather than use it directly as shown +above. +*/ +func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]interface{}) + if optsValue.Kind() == reflect.Struct { + //fmt.Printf("optsValue.Kind() is a reflect.Struct: %+v\n", optsValue.Kind()) + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + + if f.Name != strings.Title(f.Name) { + //fmt.Printf("Skipping field: %s...\n", f.Name) + continue + } + + //fmt.Printf("Starting on field: %s...\n", f.Name) + + zero := isZero(v) + //fmt.Printf("v is zero?: %v\n", zero) + + // if the field has a required tag that's set to "true" + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + //fmt.Printf("Checking required field [%s]:\n\tv: %+v\n\tisZero:%v\n", f.Name, v.Interface(), zero) + // if the field's value is zero, return a missing-argument error + if zero { + // if the field has a 'required' tag, it can't have a zero-value + err := ErrMissingInput{} + err.Argument = f.Name + return nil, err + } + } + + if xorTag := f.Tag.Get("xor"); xorTag != "" { + //fmt.Printf("Checking `xor` tag for field [%s] with value %+v:\n\txorTag: %s\n", f.Name, v, xorTag) + xorField := optsValue.FieldByName(xorTag) + var xorFieldIsZero bool + if reflect.ValueOf(xorField.Interface()) == reflect.Zero(xorField.Type()) { + xorFieldIsZero = true + } else { + if xorField.Kind() == reflect.Ptr { + xorField = xorField.Elem() + } + xorFieldIsZero = isZero(xorField) + } + if !(zero != xorFieldIsZero) { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) + err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) + return nil, err + } + } + + if orTag := f.Tag.Get("or"); orTag != "" { + //fmt.Printf("Checking `or` tag for field with:\n\tname: %+v\n\torTag:%s\n", f.Name, orTag) + //fmt.Printf("field is zero?: %v\n", zero) + if zero { + orField := optsValue.FieldByName(orTag) + var orFieldIsZero bool + if reflect.ValueOf(orField.Interface()) == reflect.Zero(orField.Type()) { + orFieldIsZero = true + } else { + if orField.Kind() == reflect.Ptr { + orField = orField.Elem() + } + orFieldIsZero = isZero(orField) + } + if orFieldIsZero { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, orTag) + err.Info = fmt.Sprintf("At least one of %s and %s must be provided", f.Name, orTag) + return nil, err + } + } + } + + jsonTag := f.Tag.Get("json") + if jsonTag == "-" { + continue + } + + if v.Kind() == reflect.Slice || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Slice) { + sliceValue := v + if sliceValue.Kind() == reflect.Ptr { + sliceValue = sliceValue.Elem() + } + + for i := 0; i < sliceValue.Len(); i++ { + element := sliceValue.Index(i) + if element.Kind() == reflect.Struct || (element.Kind() == reflect.Ptr && element.Elem().Kind() == reflect.Struct) { + _, err := BuildRequestBody(element.Interface(), "") + if err != nil { + return nil, err + } + } + } + } + if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { + if zero { + //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) + if jsonTag != "" { + jsonTagPieces := strings.Split(jsonTag, ",") + if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { + if v.CanSet() { + if !v.IsNil() { + if v.Kind() == reflect.Ptr { + v.Set(reflect.Zero(v.Type())) + } + } + //fmt.Printf("value after change: %+v\n", optsValue.Field(i)) + } + } + } + continue + } + + //fmt.Printf("Calling BuildRequestBody with:\n\tv: %+v\n\tf.Name:%s\n", v.Interface(), f.Name) + _, err := BuildRequestBody(v.Interface(), f.Name) + if err != nil { + return nil, err + } + } + } + + //fmt.Printf("opts: %+v \n", opts) + + b, err := json.Marshal(opts) + if err != nil { + return nil, err + } + + //fmt.Printf("string(b): %s\n", string(b)) + + err = json.Unmarshal(b, &optsMap) + if err != nil { + return nil, err + } + + //fmt.Printf("optsMap: %+v\n", optsMap) + + if parent != "" { + optsMap = map[string]interface{}{parent: optsMap} + } + //fmt.Printf("optsMap after parent added: %+v\n", optsMap) + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +// EnabledState is a convenience type, mostly used in Create and Update +// operations. Because the zero value of a bool is FALSE, we need to use a +// pointer instead to indicate zero-ness. +type EnabledState *bool + +// Convenience vars for EnabledState values. +var ( + iTrue = true + iFalse = false + + Enabled EnabledState = &iTrue + Disabled EnabledState = &iFalse +) + +// IPVersion is a type for the possible IP address versions. Valid instances +// are IPv4 and IPv6 +type IPVersion int + +const ( + // IPv4 is used for IP version 4 addresses + IPv4 IPVersion = 4 + // IPv6 is used for IP version 6 addresses + IPv6 IPVersion = 6 +) + +// IntToPointer is a function for converting integers into integer pointers. +// This is useful when passing in options to operations. +func IntToPointer(i int) *int { + return &i +} + +/* +MaybeString is an internal function to be used by request methods in individual +resource packages. + +It takes a string that might be a zero value and returns either a pointer to its +address or nil. This is useful for allowing users to conveniently omit values +from an options struct by leaving them zeroed, but still pass nil to the JSON +serializer so they'll be omitted from the request body. +*/ +func MaybeString(original string) *string { + if original != "" { + return &original + } + return nil +} + +/* +MaybeInt is an internal function to be used by request methods in individual +resource packages. + +Like MaybeString, it accepts an int that may or may not be a zero value, and +returns either a pointer to its address or nil. It's intended to hint that the +JSON serializer should omit its field. +*/ +func MaybeInt(original int) *int { + if original != 0 { + return &original + } + return nil +} + +/* +func isUnderlyingStructZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr: + return isUnderlyingStructZero(v.Elem()) + default: + return isZero(v) + } +} +*/ + +var t time.Time + +func isZero(v reflect.Value) bool { + //fmt.Printf("\n\nchecking isZero for value: %+v\n", v) + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + return true + } + return false + case reflect.Func, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + z := true + for i := 0; i < v.Len(); i++ { + z = z && isZero(v.Index(i)) + } + return z + case reflect.Struct: + if v.Type() == reflect.TypeOf(t) { + if v.Interface().(time.Time).IsZero() { + return true + } + return false + } + z := true + for i := 0; i < v.NumField(); i++ { + z = z && isZero(v.Field(i)) + } + return z + } + // Compare other types directly: + z := reflect.Zero(v.Type()) + //fmt.Printf("zero type for value: %+v\n\n\n", z) + return v.Interface() == z.Interface() +} + +/* +BuildQueryString is an internal function to be used by request methods in +individual resource packages. + +It accepts a tagged structure and expands it into a URL struct. Field names are +converted into query parameters based on a "q" tag. For example: + + type struct Something { + Bar string `q:"x_bar"` + Baz int `q:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into "?x_bar=AAA&lorem_ipsum=BBB". + +The struct's fields may be strings, integers, or boolean values. Fields left at +their type's zero value will be omitted from the query. +*/ +func BuildQueryString(opts interface{}) (*url.URL, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + params := url.Values{} + + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + qTag := f.Tag.Get("q") + + // if the field has a 'q' tag, it goes in the query string + if qTag != "" { + tags := strings.Split(qTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + loop: + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + goto loop + case reflect.String: + params.Add(tags[0], v.String()) + case reflect.Int: + params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) + case reflect.Bool: + params.Add(tags[0], strconv.FormatBool(v.Bool())) + case reflect.Slice: + switch v.Type().Elem() { + case reflect.TypeOf(0): + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) + } + default: + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], v.Index(i).String()) + } + } + case reflect.Map: + if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { + var s []string + for _, k := range v.MapKeys() { + value := v.MapIndex(k).String() + s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) + } + params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) + } + } + } else { + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + } + } + } + } + + return &url.URL{RawQuery: params.Encode()}, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +/* +BuildHeaders is an internal function to be used by request methods in +individual resource packages. + +It accepts an arbitrary tagged structure and produces a string map that's +suitable for use as the HTTP headers of an outgoing request. Field names are +mapped to header names based in "h" tags. + + type struct Something { + Bar string `h:"x_bar"` + Baz int `h:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into: + + map[string]string{ + "x_bar": "AAA", + "lorem_ipsum": "BBB", + } + +Untagged fields and fields left at their zero values are skipped. Integers, +booleans and string values are supported. +*/ +func BuildHeaders(opts interface{}) (map[string]string, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]string) + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + hTag := f.Tag.Get("h") + + // if the field has a 'h' tag, it goes in the header + if hTag != "" { + tags := strings.Split(hTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + switch v.Kind() { + case reflect.String: + optsMap[tags[0]] = v.String() + case reflect.Int: + optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) + case reflect.Bool: + optsMap[tags[0]] = strconv.FormatBool(v.Bool()) + } + } else { + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) + } + } + } + + } + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return optsMap, fmt.Errorf("Options type is not a struct.") +} + +// IDSliceToQueryString takes a slice of elements and converts them into a query +// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the +// result would be `?name=20&name=40&name=60' +func IDSliceToQueryString(name string, ids []int) string { + str := "" + for k, v := range ids { + if k == 0 { + str += "?" + } else { + str += "&" + } + str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) + } + return str +} + +// IntWithinRange returns TRUE if an integer falls within a defined range, and +// FALSE if not. +func IntWithinRange(val, min, max int) bool { + return val > min && val < max +} diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go new file mode 100644 index 000000000000..fce00462fd36 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -0,0 +1,501 @@ +package gophercloud + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "strings" + "sync" +) + +// DefaultUserAgent is the default User-Agent string set in the request header. +const DefaultUserAgent = "gophercloud/2.0.0" + +// UserAgent represents a User-Agent header. +type UserAgent struct { + // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. + // All the strings to prepend are accumulated and prepended in the Join method. + prepend []string +} + +// Prepend prepends a user-defined string to the default User-Agent string. Users +// may pass in one or more strings to prepend. +func (ua *UserAgent) Prepend(s ...string) { + ua.prepend = append(s, ua.prepend...) +} + +// Join concatenates all the user-defined User-Agend strings with the default +// Gophercloud User-Agent string. +func (ua *UserAgent) Join() string { + uaSlice := append(ua.prepend, DefaultUserAgent) + return strings.Join(uaSlice, " ") +} + +// ProviderClient stores details that are required to interact with any +// services within a specific provider's API. +// +// Generally, you acquire a ProviderClient by calling the NewClient method in +// the appropriate provider's child package, providing whatever authentication +// credentials are required. +type ProviderClient struct { + // IdentityBase is the base URL used for a particular provider's identity + // service - it will be used when issuing authenticatation requests. It + // should point to the root resource of the identity service, not a specific + // identity version. + IdentityBase string + + // IdentityEndpoint is the identity endpoint. This may be a specific version + // of the identity service. If this is the case, this endpoint is used rather + // than querying versions first. + IdentityEndpoint string + + // TokenID is the ID of the most recently issued valid token. + // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. + // To safely read or write this value, call `Token` or `SetToken`, respectively + TokenID string + + // EndpointLocator describes how this provider discovers the endpoints for + // its constituent services. + EndpointLocator EndpointLocator + + // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. + HTTPClient http.Client + + // UserAgent represents the User-Agent header in the HTTP request. + UserAgent UserAgent + + // ReauthFunc is the function used to re-authenticate the user if the request + // fails with a 401 HTTP response code. This a needed because there may be multiple + // authentication functions for different Identity service versions. + ReauthFunc func() error + + // Throwaway determines whether if this client is a throw-away client. It's a copy of user's provider client + // with the token and reauth func zeroed. Such client can be used to perform reauthorization. + Throwaway bool + + // Context is the context passed to the HTTP request. + Context context.Context + + // mut is a mutex for the client. It protects read and write access to client attributes such as getting + // and setting the TokenID. + mut *sync.RWMutex + + // reauthmut is a mutex for reauthentication it attempts to ensure that only one reauthentication + // attempt happens at one time. + reauthmut *reauthlock + + authResult AuthResult +} + +// reauthlock represents a set of attributes used to help in the reauthentication process. +type reauthlock struct { + sync.RWMutex + reauthing bool + reauthingErr error + done *sync.Cond +} + +// AuthenticatedHeaders returns a map of HTTP headers that are common for all +// authenticated service requests. Blocks if Reauthenticate is in progress. +func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { + if client.IsThrowaway() { + return + } + if client.reauthmut != nil { + client.reauthmut.Lock() + for client.reauthmut.reauthing { + client.reauthmut.done.Wait() + } + client.reauthmut.Unlock() + } + t := client.Token() + if t == "" { + return + } + return map[string]string{"X-Auth-Token": t} +} + +// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. +// If the application's ProviderClient is not used concurrently, this doesn't need to be called. +func (client *ProviderClient) UseTokenLock() { + client.mut = new(sync.RWMutex) + client.reauthmut = new(reauthlock) +} + +// GetAuthResult returns the result from the request that was used to obtain a +// provider client's Keystone token. +// +// The result is nil when authentication has not yet taken place, when the token +// was set manually with SetToken(), or when a ReauthFunc was used that does not +// record the AuthResult. +func (client *ProviderClient) GetAuthResult() AuthResult { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.authResult +} + +// Token safely reads the value of the auth token from the ProviderClient. Applications should +// call this method to access the token instead of the TokenID field +func (client *ProviderClient) Token() string { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.TokenID +} + +// SetToken safely sets the value of the auth token in the ProviderClient. Applications may +// use this method in a custom ReauthFunc. +// +// WARNING: This function is deprecated. Use SetTokenAndAuthResult() instead. +func (client *ProviderClient) SetToken(t string) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + client.TokenID = t + client.authResult = nil +} + +// SetTokenAndAuthResult safely sets the value of the auth token in the +// ProviderClient and also records the AuthResult that was returned from the +// token creation request. Applications may call this in a custom ReauthFunc. +func (client *ProviderClient) SetTokenAndAuthResult(r AuthResult) error { + tokenID := "" + var err error + if r != nil { + tokenID, err = r.ExtractTokenID() + if err != nil { + return err + } + } + + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + client.TokenID = tokenID + client.authResult = r + return nil +} + +// CopyTokenFrom safely copies the token from another ProviderClient into the +// this one. +func (client *ProviderClient) CopyTokenFrom(other *ProviderClient) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + if other.mut != nil && other.mut != client.mut { + other.mut.RLock() + defer other.mut.RUnlock() + } + client.TokenID = other.TokenID + client.authResult = other.authResult +} + +// IsThrowaway safely reads the value of the client Throwaway field. +func (client *ProviderClient) IsThrowaway() bool { + if client.reauthmut != nil { + client.reauthmut.RLock() + defer client.reauthmut.RUnlock() + } + return client.Throwaway +} + +// SetThrowaway safely sets the value of the client Throwaway field. +func (client *ProviderClient) SetThrowaway(v bool) { + if client.reauthmut != nil { + client.reauthmut.Lock() + defer client.reauthmut.Unlock() + } + client.Throwaway = v +} + +// Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is +// called because of a 401 response, the caller may pass the previous token. In +// this case, the reauthentication can be skipped if another thread has already +// reauthenticated in the meantime. If no previous token is known, an empty +// string should be passed instead to force unconditional reauthentication. +func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { + if client.ReauthFunc == nil { + return nil + } + + if client.reauthmut == nil { + return client.ReauthFunc() + } + + client.reauthmut.Lock() + if client.reauthmut.reauthing { + for !client.reauthmut.reauthing { + client.reauthmut.done.Wait() + } + err = client.reauthmut.reauthingErr + client.reauthmut.Unlock() + return err + } + client.reauthmut.Unlock() + + client.reauthmut.Lock() + client.reauthmut.reauthing = true + client.reauthmut.done = sync.NewCond(client.reauthmut) + client.reauthmut.reauthingErr = nil + client.reauthmut.Unlock() + + if previousToken == "" || client.TokenID == previousToken { + err = client.ReauthFunc() + } + + client.reauthmut.Lock() + client.reauthmut.reauthing = false + client.reauthmut.reauthingErr = err + client.reauthmut.done.Broadcast() + client.reauthmut.Unlock() + return +} + +// RequestOpts customizes the behavior of the provider.Request() method. +type RequestOpts struct { + // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The + // content type of the request will default to "application/json" unless overridden by MoreHeaders. + // It's an error to specify both a JSONBody and a RawBody. + JSONBody interface{} + // RawBody contains an io.Reader that will be consumed by the request directly. No content-type + // will be set unless one is provided explicitly by MoreHeaders. + RawBody io.Reader + // JSONResponse, if provided, will be populated with the contents of the response body parsed as + // JSON. + JSONResponse interface{} + // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If + // the response has a different code, an error will be returned. + OkCodes []int + // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is + // provided with a blank value (""), that header will be *omitted* instead: use this to suppress + // the default Accept header or an inferred Content-Type, for example. + MoreHeaders map[string]string + // ErrorContext specifies the resource error type to return if an error is encountered. + // This lets resources override default error messages based on the response status code. + ErrorContext error +} + +var applicationJSON = "application/json" + +// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication +// header will automatically be provided. +func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + var body io.Reader + var contentType *string + + // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided + // io.ReadSeeker as-is. Default the content-type to application/json. + if options.JSONBody != nil { + if options.RawBody != nil { + return nil, errors.New("please provide only one of JSONBody or RawBody to gophercloud.Request()") + } + + rendered, err := json.Marshal(options.JSONBody) + if err != nil { + return nil, err + } + + body = bytes.NewReader(rendered) + contentType = &applicationJSON + } + + if options.RawBody != nil { + body = options.RawBody + } + + // Construct the http.Request. + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + if client.Context != nil { + req = req.WithContext(client.Context) + } + + // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to + // modify or omit any header. + if contentType != nil { + req.Header.Set("Content-Type", *contentType) + } + req.Header.Set("Accept", applicationJSON) + + // Set the User-Agent header + req.Header.Set("User-Agent", client.UserAgent.Join()) + + if options.MoreHeaders != nil { + for k, v := range options.MoreHeaders { + if v != "" { + req.Header.Set(k, v) + } else { + req.Header.Del(k) + } + } + } + + // get latest token from client + for k, v := range client.AuthenticatedHeaders() { + req.Header.Set(k, v) + } + + // Set connection parameter to close the connection immediately when we've got the response + req.Close = true + + prereqtok := req.Header.Get("X-Auth-Token") + + // Issue the request. + resp, err := client.HTTPClient.Do(req) + if err != nil { + return nil, err + } + + // Allow default OkCodes if none explicitly set + okc := options.OkCodes + if okc == nil { + okc = defaultOkCodes(method) + } + + // Validate the HTTP response status. + var ok bool + for _, code := range okc { + if resp.StatusCode == code { + ok = true + break + } + } + + if !ok { + body, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + respErr := ErrUnexpectedResponseCode{ + URL: url, + Method: method, + Expected: options.OkCodes, + Actual: resp.StatusCode, + Body: body, + } + + errType := options.ErrorContext + switch resp.StatusCode { + case http.StatusBadRequest: + err = ErrDefault400{respErr} + if error400er, ok := errType.(Err400er); ok { + err = error400er.Error400(respErr) + } + case http.StatusUnauthorized: + if client.ReauthFunc != nil { + err = client.Reauthenticate(prereqtok) + if err != nil { + e := &ErrUnableToReauthenticate{} + e.ErrOriginal = respErr + return nil, e + } + if options.RawBody != nil { + if seeker, ok := options.RawBody.(io.Seeker); ok { + seeker.Seek(0, 0) + } + } + resp, err = client.Request(method, url, options) + if err != nil { + switch err.(type) { + case *ErrUnexpectedResponseCode: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err.(*ErrUnexpectedResponseCode) + return nil, e + default: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err + return nil, e + } + } + return resp, nil + } + err = ErrDefault401{respErr} + if error401er, ok := errType.(Err401er); ok { + err = error401er.Error401(respErr) + } + case http.StatusForbidden: + err = ErrDefault403{respErr} + if error403er, ok := errType.(Err403er); ok { + err = error403er.Error403(respErr) + } + case http.StatusNotFound: + err = ErrDefault404{respErr} + if error404er, ok := errType.(Err404er); ok { + err = error404er.Error404(respErr) + } + case http.StatusMethodNotAllowed: + err = ErrDefault405{respErr} + if error405er, ok := errType.(Err405er); ok { + err = error405er.Error405(respErr) + } + case http.StatusRequestTimeout: + err = ErrDefault408{respErr} + if error408er, ok := errType.(Err408er); ok { + err = error408er.Error408(respErr) + } + case http.StatusConflict: + err = ErrDefault409{respErr} + if error409er, ok := errType.(Err409er); ok { + err = error409er.Error409(respErr) + } + case 429: + err = ErrDefault429{respErr} + if error429er, ok := errType.(Err429er); ok { + err = error429er.Error429(respErr) + } + case http.StatusInternalServerError: + err = ErrDefault500{respErr} + if error500er, ok := errType.(Err500er); ok { + err = error500er.Error500(respErr) + } + case http.StatusServiceUnavailable: + err = ErrDefault503{respErr} + if error503er, ok := errType.(Err503er); ok { + err = error503er.Error503(respErr) + } + } + + if err == nil { + err = respErr + } + + return resp, err + } + + // Parse the response body as JSON, if requested to do so. + if options.JSONResponse != nil { + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { + return nil, err + } + } + + return resp, nil +} + +func defaultOkCodes(method string) []int { + switch { + case method == "GET": + return []int{200} + case method == "POST": + return []int{201, 202} + case method == "PUT": + return []int{201, 202} + case method == "PATCH": + return []int{200, 202, 204} + case method == "DELETE": + return []int{202, 204} + } + + return []int{} +} diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go new file mode 100644 index 000000000000..94a16bff0b4f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/results.go @@ -0,0 +1,448 @@ +package gophercloud + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + "time" +) + +/* +Result is an internal type to be used by individual resource packages, but its +methods will be available on a wide variety of user-facing embedding types. + +It acts as a base struct that other Result types, returned from request +functions, can embed for convenience. All Results capture basic information +from the HTTP transaction that was performed, including the response body, +HTTP headers, and any errors that happened. + +Generally, each Result type will have an Extract method that can be used to +further interpret the result's payload in a specific context. Extensions or +providers can then provide additional extraction functions to pull out +provider- or extension-specific information as well. +*/ +type Result struct { + // Body is the payload of the HTTP response from the server. In most cases, + // this will be the deserialized JSON structure. + Body interface{} + + // Header contains the HTTP header structure from the original response. + Header http.Header + + // Err is an error that occurred during the operation. It's deferred until + // extraction to make it easier to chain the Extract call. + Err error +} + +// ExtractInto allows users to provide an object into which `Extract` will extract +// the `Result.Body`. This would be useful for OpenStack providers that have +// different fields in the response object than OpenStack proper. +func (r Result) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + if reader, ok := r.Body.(io.Reader); ok { + if readCloser, ok := reader.(io.Closer); ok { + defer readCloser.Close() + } + return json.NewDecoder(reader).Decode(to) + } + + b, err := json.Marshal(r.Body) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +func (r Result) extractIntoPtr(to interface{}, label string) error { + if label == "" { + return r.ExtractInto(&to) + } + + var m map[string]interface{} + err := r.ExtractInto(&m) + if err != nil { + return err + } + + b, err := json.Marshal(m[label]) + if err != nil { + return err + } + + toValue := reflect.ValueOf(to) + if toValue.Kind() == reflect.Ptr { + toValue = toValue.Elem() + } + + switch toValue.Kind() { + case reflect.Slice: + typeOfV := toValue.Type().Elem() + if typeOfV.Kind() == reflect.Struct { + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) + + if mSlice, ok := m[label].([]interface{}); ok { + for _, v := range mSlice { + // For each iteration of the slice, we create a new struct. + // This is to work around a bug where elements of a slice + // are reused and not overwritten when the same copy of the + // struct is used: + // + // https://github.com/golang/go/issues/21092 + // https://github.com/golang/go/issues/24155 + // https://play.golang.org/p/NHo3ywlPZli + newType := reflect.New(typeOfV).Elem() + + b, err := json.Marshal(v) + if err != nil { + return err + } + + // This is needed for structs with an UnmarshalJSON method. + // Technically this is just unmarshalling the response into + // a struct that is never used, but it's good enough to + // trigger the UnmarshalJSON method. + for i := 0; i < newType.NumField(); i++ { + s := newType.Field(i).Addr().Interface() + + // Unmarshal is used rather than NewDecoder to also work + // around the above-mentioned bug. + err = json.Unmarshal(b, s) + if err != nil { + return err + } + } + + newSlice = reflect.Append(newSlice, newType) + } + } + + // "to" should now be properly modeled to receive the + // JSON response body and unmarshal into all the correct + // fields of the struct or composed extension struct + // at the end of this method. + toValue.Set(newSlice) + } + } + case reflect.Struct: + typeOfV := toValue.Type() + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + for i := 0; i < toValue.NumField(); i++ { + toField := toValue.Field(i) + if toField.Kind() == reflect.Struct { + s := toField.Addr().Interface() + err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if err != nil { + return err + } + } + } + } + } + + err = json.Unmarshal(b, &to) + return err +} + +// ExtractIntoStructPtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying struct type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoStructPtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Struct: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to struct, got: %v", t) + } +} + +// ExtractIntoSlicePtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying slice type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoSlicePtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Slice: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to slice, got: %v", t) + } +} + +// PrettyPrintJSON creates a string containing the full response body as +// pretty-printed JSON. It's useful for capturing test fixtures and for +// debugging extraction bugs. If you include its output in an issue related to +// a buggy extraction function, we will all love you forever. +func (r Result) PrettyPrintJSON() string { + pretty, err := json.MarshalIndent(r.Body, "", " ") + if err != nil { + panic(err.Error()) + } + return string(pretty) +} + +// ErrResult is an internal type to be used by individual resource packages, but +// its methods will be available on a wide variety of user-facing embedding +// types. +// +// It represents results that only contain a potential error and +// nothing else. Usually, if the operation executed successfully, the Err field +// will be nil; otherwise it will be stocked with a relevant error. Use the +// ExtractErr method +// to cleanly pull it out. +type ErrResult struct { + Result +} + +// ExtractErr is a function that extracts error information, or nil, from a result. +func (r ErrResult) ExtractErr() error { + return r.Err +} + +/* +HeaderResult is an internal type to be used by individual resource packages, but +its methods will be available on a wide variety of user-facing embedding types. + +It represents a result that only contains an error (possibly nil) and an +http.Header. This is used, for example, by the objectstorage packages in +openstack, because most of the operations don't return response bodies, but do +have relevant information in headers. +*/ +type HeaderResult struct { + Result +} + +// ExtractInto allows users to provide an object into which `Extract` will +// extract the http.Header headers of the result. +func (r HeaderResult) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + tmpHeaderMap := map[string]string{} + for k, v := range r.Header { + if len(v) > 0 { + tmpHeaderMap[k] = v[0] + } + } + + b, err := json.Marshal(tmpHeaderMap) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +// RFC3339Milli describes a common time format used by some API responses. +const RFC3339Milli = "2006-01-02T15:04:05.999999Z" + +type JSONRFC3339Milli time.Time + +func (jt *JSONRFC3339Milli) UnmarshalJSON(data []byte) error { + b := bytes.NewBuffer(data) + dec := json.NewDecoder(b) + var s string + if err := dec.Decode(&s); err != nil { + return err + } + t, err := time.Parse(RFC3339Milli, s) + if err != nil { + return err + } + *jt = JSONRFC3339Milli(t) + return nil +} + +const RFC3339MilliNoZ = "2006-01-02T15:04:05.999999" + +type JSONRFC3339MilliNoZ time.Time + +func (jt *JSONRFC3339MilliNoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339MilliNoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339MilliNoZ(t) + return nil +} + +type JSONRFC1123 time.Time + +func (jt *JSONRFC1123) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + return err + } + *jt = JSONRFC1123(t) + return nil +} + +type JSONUnix time.Time + +func (jt *JSONUnix) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + unix, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + t = time.Unix(unix, 0) + *jt = JSONUnix(t) + return nil +} + +// RFC3339NoZ is the time format used in Heat (Orchestration). +const RFC3339NoZ = "2006-01-02T15:04:05" + +type JSONRFC3339NoZ time.Time + +func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339NoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339NoZ(t) + return nil +} + +// RFC3339ZNoT is the time format used in Zun (Containers Service). +const RFC3339ZNoT = "2006-01-02 15:04:05-07:00" + +type JSONRFC3339ZNoT time.Time + +func (jt *JSONRFC3339ZNoT) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoT, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoT(t) + return nil +} + +// RFC3339ZNoTNoZ is another time format used in Zun (Containers Service). +const RFC3339ZNoTNoZ = "2006-01-02 15:04:05" + +type JSONRFC3339ZNoTNoZ time.Time + +func (jt *JSONRFC3339ZNoTNoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoTNoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoTNoZ(t) + return nil +} + +/* +Link is an internal type to be used in packages of collection resources that are +paginated in a certain way. + +It's a response substructure common to many paginated collection results that is +used to point to related pages. Usually, the one we care about is the one with +Rel field set to "next". +*/ +type Link struct { + Href string `json:"href"` + Rel string `json:"rel"` +} + +/* +ExtractNextURL is an internal function useful for packages of collection +resources that are paginated in a certain way. + +It attempts to extract the "next" URL from slice of Link structs, or +"" if no such URL is present. +*/ +func ExtractNextURL(links []Link) (string, error) { + var url string + + for _, l := range links { + if l.Rel == "next" { + url = l.Href + } + } + + if url == "" { + return "", nil + } + + return url, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go new file mode 100644 index 000000000000..f222f05a66de --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -0,0 +1,154 @@ +package gophercloud + +import ( + "io" + "net/http" + "strings" +) + +// ServiceClient stores details required to interact with a specific service API implemented by a provider. +// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. +type ServiceClient struct { + // ProviderClient is a reference to the provider that implements this service. + *ProviderClient + + // Endpoint is the base URL of the service's API, acquired from a service catalog. + // It MUST end with a /. + Endpoint string + + // ResourceBase is the base URL shared by the resources within a service's API. It should include + // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used + // as-is, instead. + ResourceBase string + + // This is the service client type (e.g. compute, sharev2). + // NOTE: FOR INTERNAL USE ONLY. DO NOT SET. GOPHERCLOUD WILL SET THIS. + // It is only exported because it gets set in a different package. + Type string + + // The microversion of the service to use. Set this to use a particular microversion. + Microversion string + + // MoreHeaders allows users (or Gophercloud) to set service-wide headers on requests. Put another way, + // values set in this field will be set on all the HTTP requests the service client sends. + MoreHeaders map[string]string +} + +// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. +func (client *ServiceClient) ResourceBaseURL() string { + if client.ResourceBase != "" { + return client.ResourceBase + } + return client.Endpoint +} + +// ServiceURL constructs a URL for a resource belonging to this provider. +func (client *ServiceClient) ServiceURL(parts ...string) string { + return client.ResourceBaseURL() + strings.Join(parts, "/") +} + +func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) { + if v, ok := (JSONBody).(io.Reader); ok { + opts.RawBody = v + } else if JSONBody != nil { + opts.JSONBody = JSONBody + } + + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + + if opts.MoreHeaders == nil { + opts.MoreHeaders = make(map[string]string) + } + + if client.Microversion != "" { + client.setMicroversionHeader(opts) + } +} + +// Get calls `Request` with the "GET" HTTP verb. +func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, JSONResponse, opts) + return client.Request("GET", url, opts) +} + +// Post calls `Request` with the "POST" HTTP verb. +func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("POST", url, opts) +} + +// Put calls `Request` with the "PUT" HTTP verb. +func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PUT", url, opts) +} + +// Patch calls `Request` with the "PATCH" HTTP verb. +func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PATCH", url, opts) +} + +// Delete calls `Request` with the "DELETE" HTTP verb. +func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("DELETE", url, opts) +} + +// Head calls `Request` with the "HEAD" HTTP verb. +func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("HEAD", url, opts) +} + +func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { + switch client.Type { + case "compute": + opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion + case "sharev2": + opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion + case "volume": + opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion + case "baremetal": + opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion + case "baremetal-introspection": + opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion + } + + if client.Type != "" { + opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion + } +} + +// Request carries out the HTTP operation for the service client +func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + if len(client.MoreHeaders) > 0 { + if options == nil { + options = new(RequestOpts) + } + for k, v := range client.MoreHeaders { + options.MoreHeaders[k] = v + } + } + return client.ProviderClient.Request(method, url, options) +} diff --git a/vendor/github.com/gophercloud/gophercloud/util.go b/vendor/github.com/gophercloud/gophercloud/util.go new file mode 100644 index 000000000000..68f9a5d3eca5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/util.go @@ -0,0 +1,102 @@ +package gophercloud + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" + "time" +) + +// WaitFor polls a predicate function, once per second, up to a timeout limit. +// This is useful to wait for a resource to transition to a certain state. +// To handle situations when the predicate might hang indefinitely, the +// predicate will be prematurely cancelled after the timeout. +// Resource packages will wrap this in a more convenient function that's +// specific to a certain resource, but it can also be useful on its own. +func WaitFor(timeout int, predicate func() (bool, error)) error { + type WaitForResult struct { + Success bool + Error error + } + + start := time.Now().Unix() + + for { + // If a timeout is set, and that's been exceeded, shut it down. + if timeout >= 0 && time.Now().Unix()-start >= int64(timeout) { + return fmt.Errorf("A timeout occurred") + } + + time.Sleep(1 * time.Second) + + var result WaitForResult + ch := make(chan bool, 1) + go func() { + defer close(ch) + satisfied, err := predicate() + result.Success = satisfied + result.Error = err + }() + + select { + case <-ch: + if result.Error != nil { + return result.Error + } + if result.Success { + return nil + } + // If the predicate has not finished by the timeout, cancel it. + case <-time.After(time.Duration(timeout) * time.Second): + return fmt.Errorf("A timeout occurred") + } + } +} + +// NormalizeURL is an internal function to be used by provider clients. +// +// It ensures that each endpoint URL has a closing `/`, as expected by +// ServiceClient's methods. +func NormalizeURL(url string) string { + if !strings.HasSuffix(url, "/") { + return url + "/" + } + return url +} + +// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as +// a reference in the filesystem, if necessary. basePath is assumed to contain +// either '.' when first used, or the file:// type fqdn of the parent resource. +// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml +func NormalizePathURL(basePath, rawPath string) (string, error) { + u, err := url.Parse(rawPath) + if err != nil { + return "", err + } + // if a scheme is defined, it must be a fqdn already + if u.Scheme != "" { + return u.String(), nil + } + // if basePath is a url, then child resources are assumed to be relative to it + bu, err := url.Parse(basePath) + if err != nil { + return "", err + } + var basePathSys, absPathSys string + if bu.Scheme != "" { + basePathSys = filepath.FromSlash(bu.Path) + absPathSys = filepath.Join(basePathSys, rawPath) + bu.Path = filepath.ToSlash(absPathSys) + return bu.String(), nil + } + + absPathSys = filepath.Join(basePath, rawPath) + u.Path = filepath.ToSlash(absPathSys) + if err != nil { + return "", err + } + u.Scheme = "file" + return u.String(), nil + +} diff --git a/vendor/github.com/hashicorp/go-discover/.gitignore b/vendor/github.com/hashicorp/go-discover/.gitignore new file mode 100644 index 000000000000..1fd0277d0052 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/.gitignore @@ -0,0 +1,6 @@ +cmd/discover/discover +.terraform/ +.terraform.tfstate.lock.info +terraform.tfstate +terraform.tfstate.backup +terraform.tfvars diff --git a/vendor/github.com/hashicorp/go-discover/.travis.yml b/vendor/github.com/hashicorp/go-discover/.travis.yml new file mode 100644 index 000000000000..abb17bec1893 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/.travis.yml @@ -0,0 +1,10 @@ +language: go +sudo: false + +go: + - stable + +env: + - GO111MODULE=on + +install: true diff --git a/vendor/github.com/hashicorp/go-discover/LICENSE b/vendor/github.com/hashicorp/go-discover/LICENSE new file mode 100644 index 000000000000..c33dcc7c928c --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-discover/README.md b/vendor/github.com/hashicorp/go-discover/README.md new file mode 100644 index 000000000000..8eeb44c83b55 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/README.md @@ -0,0 +1,305 @@ +# Go Discover Nodes for Cloud Providers [![Build Status](https://travis-ci.org/hashicorp/go-discover.svg?branch=master)](https://travis-ci.org/hashicorp/go-discover) [![GoDoc](https://godoc.org/github.com/hashicorp/go-discover?status.svg)](https://godoc.org/github.com/hashicorp/go-discover) + + +`go-discover` is a Go (golang) library and command line tool to discover +ip addresses of nodes in cloud environments based on meta information +like tags provided by the environment. + +The configuration for the providers is provided as a list of `key=val key=val +...` tuples. If either the key or the value contains a space (` `), a backslash +(`\`) or double quotes (`"`) then it needs to be quoted with double quotes. +Within a quoted string you can use the backslash to escape double quotes or the +backslash itself, e.g. `key=val "some key"="some value"` + +Duplicate keys are reported as error and the provider is determined through the +`provider` key. + +### Supported Providers + +The following cloud providers have implementations in the go-discover/provider +sub packages. Additional providers can be added through the +[Register](https://godoc.org/github.com/hashicorp/go-discover#Register) +function. + + * Aliyun (Alibaba) Cloud [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/aliyun/aliyun_discover.go#L15-L28) + * Amazon AWS [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/aws/aws_discover.go#L19-L33) + * DigitalOcean [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/digitalocean/digitalocean_discover.go#L16-L24) + * Google Cloud [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/gce/gce_discover.go#L17-L37) + * Linode [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/linode/linode_discover.go#L30-L41) + * mDNS [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/mdns/mdns_provider.go#L19-L31) + * Microsoft Azure [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/azure/azure_discover.go#L16-L37) + * Openstack [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/os/os_discover.go#L23-L38) + * Scaleway [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/scaleway/scaleway_discover.go#L14-L22) + * SoftLayer [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/softlayer/softlayer_discover.go#L16-L25) + * TencentCloud [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/tencentcloud/tencentcloud_discover.go#L23-L37) + * Triton [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/triton/triton_discover.go#L17-L27) + * vSphere [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/vsphere/vsphere_discover.go#L148-L155) + * Packet [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/packet/packet_discover.go#L25-L35) + +The following providers are implemented in the go-discover/provider subdirectory +but aren't automatically registered. If you want to support these providers, +register them manually: + + * Kubernetes [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/k8s/k8s_discover.go#L32-L51) + +HashiCorp maintains acceptance tests that regularly allocate and run tests with +real resources to verify the behavior of several of these providers. Those +currently are: Amazon AWS, Microsoft Azure, Google Cloud, DigitalOcean, Triton, Scaleway, AliBaba Cloud, vSphere, and Packet.net. + +### Config Example + +``` +# Aliyun (Alibaba) Cloud +provider=aliyun region=... tag_key=consul tag_value=... access_key_id=... access_key_secret=... + +# Amazon AWS +provider=aws region=eu-west-1 tag_key=consul tag_value=... access_key_id=... secret_access_key=... + +# DigitalOcean +provider=digitalocean region=... tag_name=... api_token=... + +# Google Cloud +provider=gce project_name=... zone_pattern=eu-west-* tag_value=consul credentials_file=... + +# Linode +provider=linode tag_name=... region=us-east address_type=private_v4 api_token=... + +# mDNS +provider=mdns service=consul domain=local + +# Microsoft Azure +provider=azure tag_name=consul tag_value=... tenant_id=... client_id=... subscription_id=... secret_access_key=... + +# Openstack +provider=os tag_key=consul tag_value=server username=... password=... auth_url=... + +# Scaleway +provider=scaleway organization=my-org tag_name=consul-server token=... region=... + +# SoftLayer +provider=softlayer datacenter=dal06 tag_value=consul username=... api_key=... + +# TencentCloud +provider=tencentcloud region=ap-guangzhou tag_key=consul tag_value=... access_key_id=... access_key_secret=... + +# Triton +provider=triton account=testaccount url=https://us-sw-1.api.joyentcloud.com key_id=... tag_key=consul-role tag_value=server + +# vSphere +provider=vsphere category_name=consul-role tag_name=consul-server host=... user=... password=... insecure_ssl=[true|false] + +# Packet +provider=packet auth_token=token project=uuid url=... address_type=... + +# Kubernetes +provider=k8s label_selector="app = consul-server" +``` + +## Command Line Tool Usage + +Install the command line tool with: + +``` +go get -u github.com/hashicorp/go-discover/cmd/discover +``` + +Then run it with: + +``` +$ discover addrs provider=aws region=eu-west-1 ... +``` + +## Library Usage + +Install the library with: + +``` +go get -u github.com/hashicorp/go-discover +``` + +You can then either support discovery for all available providers +or only for some of them. + +```go +// support discovery for all supported providers +d := discover.Discover{} + +// support discovery for AWS and GCE only +d := discover.Discover{ + Providers : map[string]discover.Provider{ + "aws": discover.Providers["aws"], + "gce": discover.Providers["gce"], + } +} + +// use ioutil.Discard for no log output +l := log.New(os.Stderr, "", log.LstdFlags) + +cfg := "provider=aws region=eu-west-1 ..." +addrs, err := d.Addrs(cfg, l) +``` + +You can also add support for providers that aren't registered by default: + +```go +// Imports at top of file +import "github.com/hashicorp/go-discover/provider/k8s" + +// support discovery for all supported providers +d := discover.Discover{} + +// support discovery for AWS and GCE only +d := discover.Discover{ + Providers : map[string]discover.Provider{ + "k8s": &k8s.Provider{}, + } +} + +// ... +``` + +For complete API documentation, see +[GoDoc](https://godoc.org/github.com/hashicorp/go-discover). The configuration +for the supported providers is documented in the +[providers](https://godoc.org/github.com/hashicorp/go-discover/provider) +sub-package. + +## Testing + +**Note: Due to the `go.sum` checksum errors referenced in [#68](https://github.com/hashicorp/go-discover/issues/68), +you will need Go 1.11.4+ to build/test go-discover.** + +Configuration tests can be run with Go: + +``` +$ go test ./... +``` + +By default tests that communicate with providers do not run unless credentials +are set for that provider. To run provider tests you must set the necessary +environment variables. + +**Note: This will make real API calls to the account provided by the credentials.** + +``` +$ AWS_ACCESS_KEY_ID=... AWS_ACCESS_KEY_SECRET=... AWS_REGION=... go test -v ./provider/aws +``` + +This requires resources to exist that match those specified in tests +(eg instance tags in the case of AWS). To create these resources, +there are sets of [Terraform](https://www.terraform.io) configuration +in the `test/tf` directory for supported providers. + +You must use the same account and access credentials above. The same +environment variables should be applicable and read by Terraform. + +``` +$ cd test/tf/aws +$ export AWS_ACCESS_KEY_ID=... AWS_ACCESS_KEY_SECRET=... AWS_REGION=... +$ terraform init +... +$ terraform apply +... +``` + +After Terraform successfully runs, you should be able to successfully +run the tests, assuming you have exported credentials into +your environment: + +``` +$ go test -v ./provider/aws +``` + +To destroy the resources you need to use Terraform again: + +``` +$ cd test/tf/aws +$ terraform destroy +... +``` + +**Note: There should be no requirements to create and test these resources other +than credentials and Terraform. This is to ensure tests can run in development +and CI environments consistently across all providers.** + +## Retrieving Test Credentials + +Below are instructions for retrieving credentials in order to run +tests for some of the providers. + +
    + Google Cloud + +1. Go to https://console.cloud.google.com/ +1. IAM & Admin / Settings: + * Create Project, e.g. `discover` + * Write down the `Project ID`, e.g. `discover-xxx` +1. Billing: Ensure that the project is linked to a billing account +1. API Manager / Dashboard: Enable the following APIs + * Google Compute Engine API +1. IAM & Admin / Service Accounts: Create Service Account + * Service account name: `admin` + * Roles: + * `Project/Service Account Actor` + * `Compute Engine/Compute Instance Admin (v1)` + * `Compute Engine/Compute Security Admin` + * Furnish a new private key: `yes` + * Key type: `JSON` +1. The credentials file `discover-xxx.json` will have been downloaded + automatically to your machine +1. Source the contents of the credentials file into the `GOOGLE_CREDENTIALS` + environment variable + +
    + +
    + Azure +See also the [Terraform provider documentation](https://www.terraform.io/docs/providers/azurerm/index.html#creating-credentials). + +```shell +# Install Azure CLI (https://github.com/Azure/azure-cli) +curl -L https://aka.ms/InstallAzureCli | bash + +# 1. Login +$ az login + +# 2. Get SubscriptionID +$ az account list +[ + { + "cloudName": "AzureCloud", + "id": "subscription_id", + "isDefault": true, + "name": "Gratis versie", + "state": "Enabled", + "tenantId": "tenant_id", + "user": { + "name": "user@email.com", + "type": "user" + } + } +] + +# 3. Switch to subscription +$ az account set --subscription="subscription_id" + +# 4. Create ClientID and Secret +$ az ad sp create-for-rbac --role="Contributor" --scopes="/subscriptions/subscription_id" +{ + "appId": "client_id", + "displayName": "azure-cli-2017-07-18-16-51-43", + "name": "http://azure-cli-2017-07-18-16-51-43", + "password": "client_secret", + "tenant": "tenant_id" +} + +# 5. Export the Credentials for the client +export ARM_CLIENT_ID=client_id +export ARM_CLIENT_SECRET=client_secret +export ARM_TENANT_ID=tenant_id +export ARM_SUBSCRIPTION_ID=subscription_id + +# 6. Test the credentials +$ az vm list-sizes --location 'West Europe' +``` +
    diff --git a/vendor/github.com/hashicorp/go-discover/config.go b/vendor/github.com/hashicorp/go-discover/config.go new file mode 100644 index 000000000000..318018c75fca --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/config.go @@ -0,0 +1,229 @@ +package discover + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +// Config stores key/value pairs for the discovery +// functions to use. +type Config map[string]string + +// Parse parses a "key=val key=val ..." string into a config map. Keys +// and values which contain spaces, backslashes or double-quotes must be +// quoted with double quotes. Use the backslash to escape special +// characters within quoted strings, e.g. "some key"="some \"value\"". +func Parse(s string) (Config, error) { + return parse(s) +} + +// String formats a config map into the "key=val key=val ..." +// understood by Parse. The order of the keys is stable. +func (c Config) String() string { + // sort 'provider' to the front and keep the keys stable. + var keys []string + for k := range c { + if k != "provider" { + keys = append(keys, k) + } + } + sort.Strings(keys) + keys = append([]string{"provider"}, keys...) + + quote := func(s string) string { + if strings.ContainsAny(s, ` "\`) { + return strconv.Quote(s) + } + return s + } + + var vals []string + for _, k := range keys { + v := c[k] + if v == "" { + continue + } + vals = append(vals, quote(k)+"="+quote(v)) + } + return strings.Join(vals, " ") +} + +func parse(in string) (Config, error) { + m := Config{} + s := []rune(strings.TrimSpace(in)) + state := stateKey + key := "" + for { + // exit condition + if len(s) == 0 { + break + } + + // get the next token + item, val, n := lex(s) + s = s[n:] + // fmt.Printf("parse: state: %q item: %q val: '%s' n: %d rest: '%s'\n", state, item, val, n, string(s)) + + switch state { + + case stateKey: + switch item { + case itemText: + key = val + if _, exists := m[key]; exists { + return nil, fmt.Errorf("%s: duplicate key", key) + } + state = stateEqual + default: + if val == "" { + return nil, fmt.Errorf("%s: - equals in key's value, enclosing double-quote needed %s=\"value-with-=-symbol\"", key, key) + } + return nil, fmt.Errorf("%s: error with key=value pair %s", key, val) + } + + case stateEqual: + switch item { + case itemEqual: + state = stateVal + default: + return nil, fmt.Errorf("%s: missing '='", key) + } + + case stateVal: + switch item { + case itemText: + m[key] = val + state = stateKey + case itemError: + return nil, fmt.Errorf("%s: %s", key, val) + default: + return nil, fmt.Errorf("%s: missing value", key) + } + } + } + + //fmt.Printf("parse: state: %q rest: '%s'\n", state, string(s)) + switch state { + case stateEqual: + return nil, fmt.Errorf("%s: missing '='", key) + case stateVal: + return nil, fmt.Errorf("%s: missing value", key) + } + if len(m) == 0 { + return nil, nil + } + return m, nil +} + +type itemType string + +const ( + itemText itemType = "TEXT" + itemEqual = "EQUAL" + itemError = "ERROR" +) + +func (t itemType) String() string { + return string(t) +} + +type state string + +const ( + + // lexer states + stateStart state = "start" + stateEqual = "equal" + stateText = "text" + stateQText = "qtext" + stateQTextEnd = "qtextend" + stateQTextEsc = "qtextesc" + + // parser states + stateKey = "key" + stateVal = "val" +) + +func lex(s []rune) (itemType, string, int) { + isEqual := func(r rune) bool { return r == '=' } + isEscape := func(r rune) bool { return r == '\\' } + isQuote := func(r rune) bool { return r == '"' } + isSpace := func(r rune) bool { return r == ' ' } + + unquote := func(r []rune) (string, error) { + v := strings.TrimSpace(string(r)) + return strconv.Unquote(v) + } + + var quote rune + state := stateStart + for i, r := range s { + // fmt.Println("lex:", "i:", i, "r:", string(r), "state:", string(state), "head:", string(s[:i]), "tail:", string(s[i:])) + switch state { + case stateStart: + switch { + case isSpace(r): + // state = stateStart + case isEqual(r): + state = stateEqual + case isQuote(r): + quote = r + state = stateQText + default: + state = stateText + } + + case stateEqual: + return itemEqual, "", i + + case stateText: + switch { + case isEqual(r) || isSpace(r): + v := strings.TrimSpace(string(s[:i])) + return itemText, v, i + default: + // state = stateText + } + + case stateQText: + switch { + case r == quote: + state = stateQTextEnd + case isEscape(r): + state = stateQTextEsc + default: + // state = stateQText + } + + case stateQTextEsc: + state = stateQText + + case stateQTextEnd: + v, err := unquote(s[:i]) + if err != nil { + return itemError, err.Error(), i + } + return itemText, v, i + } + } + + // fmt.Println("lex:", "state:", string(state)) + switch state { + case stateEqual: + return itemEqual, "", len(s) + case stateQText: + return itemError, "unbalanced quotes", len(s) + case stateQTextEsc: + return itemError, "unterminated escape sequence", len(s) + case stateQTextEnd: + v, err := unquote(s) + if err != nil { + return itemError, err.Error(), len(s) + } + return itemText, v, len(s) + default: + return itemText, strings.TrimSpace(string(s)), len(s) + } +} diff --git a/vendor/github.com/hashicorp/go-discover/discover.go b/vendor/github.com/hashicorp/go-discover/discover.go new file mode 100644 index 000000000000..369fd4153a23 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/discover.go @@ -0,0 +1,186 @@ +// Package discover provides functions to get metadata for different +// cloud environments. +package discover + +import ( + "fmt" + "log" + "sort" + "strings" + "sync" + + "github.com/hashicorp/go-discover/provider/aliyun" + "github.com/hashicorp/go-discover/provider/aws" + "github.com/hashicorp/go-discover/provider/azure" + "github.com/hashicorp/go-discover/provider/digitalocean" + "github.com/hashicorp/go-discover/provider/gce" + "github.com/hashicorp/go-discover/provider/linode" + "github.com/hashicorp/go-discover/provider/mdns" + "github.com/hashicorp/go-discover/provider/os" + "github.com/hashicorp/go-discover/provider/packet" + "github.com/hashicorp/go-discover/provider/scaleway" + "github.com/hashicorp/go-discover/provider/softlayer" + "github.com/hashicorp/go-discover/provider/tencentcloud" + "github.com/hashicorp/go-discover/provider/triton" + "github.com/hashicorp/go-discover/provider/vsphere" +) + +// Provider has lookup functions for meta data in a +// cloud environment. +type Provider interface { + // Addrs looks up addresses in the cloud environment according to the + // configuration provided in args. + Addrs(args map[string]string, l *log.Logger) ([]string, error) + + // Help provides the configuration help for the command line client. + Help() string +} + +// ProviderWithUserAgent is a provider that declares it's user agent. Not all +// providers support this. +type ProviderWithUserAgent interface { + // SetUserAgent sets the user agent on the provider to the provided string. + SetUserAgent(s string) +} + +// Providers contains all available providers. +var Providers = map[string]Provider{ + "aliyun": &aliyun.Provider{}, + "aws": &aws.Provider{}, + "azure": &azure.Provider{}, + "digitalocean": &digitalocean.Provider{}, + "gce": &gce.Provider{}, + "linode": &linode.Provider{}, + "mdns": &mdns.Provider{}, + "os": &os.Provider{}, + "scaleway": &scaleway.Provider{}, + "softlayer": &softlayer.Provider{}, + "tencentcloud": &tencentcloud.Provider{}, + "triton": &triton.Provider{}, + "vsphere": &vsphere.Provider{}, + "packet": &packet.Provider{}, +} + +// Discover looks up metadata in different cloud environments. +type Discover struct { + // Providers is the list of address lookup providers. + // If nil, the default list of providers is used. + Providers map[string]Provider + + // userAgent is the string to use for requests, when supported. + userAgent string + + // once is used to initialize the actual list of providers. + once sync.Once +} + +// Option is used as an initialization option/ +type Option func(*Discover) error + +// New creates a new discover client with the given options. +func New(opts ...Option) (*Discover, error) { + d := new(Discover) + + for _, opt := range opts { + if err := opt(d); err != nil { + return nil, err + } + } + + d.once.Do(d.initProviders) + + return d, nil +} + +// WithUserAgent allows specifying a custom user agent option to send with +// requests when the underlying client library supports it. +func WithUserAgent(agent string) Option { + return func(d *Discover) error { + d.userAgent = agent + return nil + } +} + +// WithProviders allows specifying your own set of providers. +func WithProviders(m map[string]Provider) Option { + return func(d *Discover) error { + d.Providers = m + return nil + } +} + +// initProviders sets the list of providers to the +// default list of providers if none are configured. +func (d *Discover) initProviders() { + if d.Providers == nil { + d.Providers = Providers + } +} + +// Names returns the names of the configured providers. +func (d *Discover) Names() []string { + d.once.Do(d.initProviders) + + var names []string + for n := range d.Providers { + names = append(names, n) + } + sort.Strings(names) + return names +} + +var globalHelp = `The options for discovering ip addresses are provided as a + single string value in "key=value key=value ..." format where + the values are URL encoded. + + provider=aws region=eu-west-1 ... + + The options are provider specific and are listed below. +` + +// Help describes the format of the configuration string for address discovery +// and the various provider specific options. +func (d *Discover) Help() string { + d.once.Do(d.initProviders) + + h := []string{globalHelp} + for _, name := range d.Names() { + h = append(h, d.Providers[name].Help()) + } + return strings.Join(h, "\n") +} + +// Addrs discovers ip addresses of nodes that match the given filter criteria. +// The config string must have the format 'provider=xxx key=val key=val ...' +// where the keys and values are provider specific. The values are URL encoded. +func (d *Discover) Addrs(cfg string, l *log.Logger) ([]string, error) { + d.once.Do(d.initProviders) + + args, err := Parse(cfg) + if err != nil { + return nil, fmt.Errorf("discover: %s", err) + } + + name := args["provider"] + if name == "" { + return nil, fmt.Errorf("discover: no provider") + } + + providers := d.Providers + if providers == nil { + providers = Providers + } + + p := providers[name] + if p == nil { + return nil, fmt.Errorf("discover: unknown provider " + name) + } + l.Printf("[DEBUG] discover: Using provider %q", name) + + if typ, ok := p.(ProviderWithUserAgent); ok { + typ.SetUserAgent(d.userAgent) + return p.Addrs(args, l) + } + + return p.Addrs(args, l) +} diff --git a/vendor/github.com/hashicorp/go-discover/go.mod b/vendor/github.com/hashicorp/go-discover/go.mod new file mode 100644 index 000000000000..6eaae8ffe83c --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/go.mod @@ -0,0 +1,45 @@ +module github.com/hashicorp/go-discover + +require ( + github.com/Azure/azure-sdk-for-go v44.0.0+incompatible + github.com/Azure/go-autorest/autorest v0.11.0 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.0 + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect + github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect + github.com/aws/aws-sdk-go v1.25.41 + github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 + github.com/digitalocean/godo v1.7.5 + github.com/dnaeon/go-vcr v1.0.1 // indirect + github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect + github.com/googleapis/gnostic v0.2.0 // indirect + github.com/gophercloud/gophercloud v0.1.0 + github.com/hashicorp/go-multierror v1.0.0 + github.com/hashicorp/mdns v1.0.1 + github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 + github.com/imdario/mergo v0.3.6 // indirect + github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da // indirect + github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62 + github.com/linode/linodego v0.7.1 + github.com/mitchellh/go-homedir v1.1.0 + github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 + github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c + github.com/pkg/errors v0.8.0 // indirect + github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/sirupsen/logrus v1.0.6 // indirect + github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d + github.com/stretchr/testify v1.4.0 + github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible + github.com/vmware/govmomi v0.18.0 + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + google.golang.org/api v0.4.0 + gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect + gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect + gopkg.in/resty.v1 v1.12.0 // indirect + k8s.io/api v0.18.2 + k8s.io/apimachinery v0.18.2 + k8s.io/client-go v0.18.2 +) + +go 1.12 diff --git a/vendor/github.com/hashicorp/go-discover/go.sum b/vendor/github.com/hashicorp/go-discover/go.sum new file mode 100644 index 000000000000..7c22438881b7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/go.sum @@ -0,0 +1,317 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/azure-sdk-for-go v44.0.0+incompatible h1:e82Yv2HNpS0kuyeCrV29OPKvEiqfs2/uJHic3/3iKdg= +github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.11.0 h1:tnO41Uo+/0sxTMFY/U7aKg2abek3JOnnXcuSuba74jI= +github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.0 h1:nSMjYIe24eBYasAIxt859TxyXef/IqoH+8/g4+LmcVs= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/aws/aws-sdk-go v1.25.41 h1:/hj7nZ0586wFqpwjNpzWiUTwtaMgxAZNZKHay80MdXw= +github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 h1:lrWnAyy/F72MbxIxFUzKmcMCdt9Oi8RzpAxzTNQHD7o= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/digitalocean/godo v1.7.5 h1:JOQbAO6QT1GGjor0doT0mXefX2FgUDPOpYh2RaXA+ko= +github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da h1:FjHUJJ7oBW4G/9j1KzlHaXL09LyMVM9rupS39lncbXk= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62 h1:JHCT6xuyPUrbbgAPE/3dqlvUKzRHMNuTBKKUb6OeR/k= +github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= +github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible h1:8uRvJleFpqLsO77WaAh2UrasMOzd8MxXrNj20e7El+Q= +github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= +github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= +github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 h1:x6rhz8Y9CjbgQkccRGmELH6K+LJj7tOoh3XWeC1yaQM= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3 h1:eH6Eip3UpmR+yM/qI9Ijluzb1bNv/cAU/n+6l8tRSis= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 h1:ZUjXAXmrAyrmmCPHgCA/vChHcpsX27MZ3yBonD/z1KE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= +k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= +k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= +k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE= +k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/hashicorp/go-discover/provider/aliyun/aliyun_discover.go b/vendor/github.com/hashicorp/go-discover/provider/aliyun/aliyun_discover.go new file mode 100644 index 000000000000..39d2be4ccf89 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/aliyun/aliyun_discover.go @@ -0,0 +1,103 @@ +// Package aliyun provides node discovery for Aliyun. +package aliyun + +import ( + "fmt" + "io/ioutil" + "log" + + "github.com/denverdino/aliyungo/common" + "github.com/denverdino/aliyungo/ecs" +) + +type Provider struct { + userAgent string +} + +func (p *Provider) SetUserAgent(s string) { + p.userAgent = s +} + +func (p *Provider) Help() string { + return `Aliyun(Alibaba Cloud): + + provider: "aliyun" + region: The Aliyun region. + tag_key: The tag key to filter on + tag_value: The tag value to filter on + access_key_id: The Aliyun access key to use + access_key_secret: The Aliyun access key secret to use + + The required RAM permission is 'ecs:DescribeInstances'. + It is recommended you make a dedicated key used only for auto-joining. +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "aliyun" { + return nil, fmt.Errorf("discover-aliyun: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + region := args["region"] + tagKey := args["tag_key"] + tagValue := args["tag_value"] + accessKeyID := args["access_key_id"] + accessKeySecret := args["access_key_secret"] + + log.Printf("[DEBUG] discover-aliyun: Using region=%s tag_key=%s tag_value=%s", region, tagKey, tagValue) + if accessKeyID == "" && accessKeySecret == "" { + log.Printf("[DEBUG] discover-aliyun: No static credentials") + } else { + log.Printf("[DEBUG] discover-aliyun: Static credentials provided") + } + + if region == "" { + l.Printf("[DEBUG] discover-aliyun: Region not provided") + return nil, fmt.Errorf("discover-aliyun: invalid region") + } + l.Printf("[INFO] discover-aliyun: Region is %s", region) + + svc := ecs.NewClient(accessKeyID, accessKeySecret) + + if p.userAgent != "" { + svc.SetUserAgent(p.userAgent) + } + + l.Printf("[INFO] discover-aliyun: Filter instances with %s=%s", tagKey, tagValue) + resp, err := svc.DescribeInstancesWithRaw(&ecs.DescribeInstancesArgs{ + RegionId: common.Region(region), + Status: ecs.Running, + Tag: map[string]string{ + tagKey: tagValue, + }}, + ) + + if err != nil { + return nil, fmt.Errorf("discover-aliyun: DescribeInstancesWithRaw failed: %s", err) + } + + l.Printf("[DEBUG] discover-aliyun: Found total %d instances", resp.TotalCount) + + var addrs []string + for _, instanceAttributesType := range resp.Instances.Instance { + switch instanceAttributesType.InstanceNetworkType { + case "classic": + for _, ipAddress := range instanceAttributesType.InnerIpAddress.IpAddress { + l.Printf("[DEBUG] discover-aliyun: Instance %s has innner ip %s ", instanceAttributesType.InstanceId, ipAddress) + addrs = append(addrs, ipAddress) + } + case "vpc": + for _, ipAddress := range instanceAttributesType.VpcAttributes.PrivateIpAddress.IpAddress { + l.Printf("[DEBUG] discover-aliyun: Instance %s has private ip %s ", instanceAttributesType.InstanceId, ipAddress) + addrs = append(addrs, ipAddress) + } + } + } + + l.Printf("[DEBUG] discover-aliyun: Found ip addresses: %v", addrs) + return addrs, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/aws/aws_discover.go b/vendor/github.com/hashicorp/go-discover/provider/aws/aws_discover.go new file mode 100644 index 000000000000..d3e82f6e5b26 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/aws/aws_discover.go @@ -0,0 +1,164 @@ +// Package aws provides node discovery for Amazon AWS. +package aws + +import ( + "fmt" + "io/ioutil" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +type Provider struct{} + +func (p *Provider) Help() string { + return `Amazon AWS: + + provider: "aws" + region: The AWS region. Default to region of instance. + tag_key: The tag key to filter on + tag_value: The tag value to filter on + addr_type: "private_v4", "public_v4" or "public_v6". Defaults to "private_v4". + access_key_id: The AWS access key to use + secret_access_key: The AWS secret access key to use + + The only required IAM permission is 'ec2:DescribeInstances'. If the Consul agent is + running on AWS instance it is recommended you use an IAM role, otherwise it is + recommended you make a dedicated IAM user and access key used only for auto-joining. +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "aws" { + return nil, fmt.Errorf("discover-aws: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + region := args["region"] + tagKey := args["tag_key"] + tagValue := args["tag_value"] + addrType := args["addr_type"] + accessKey := args["access_key_id"] + secretKey := args["secret_access_key"] + + if addrType != "private_v4" && addrType != "public_v4" && addrType != "public_v6" { + l.Printf("[INFO] discover-aws: Address type %s is not supported. Valid values are {private_v4,public_v4,public_v6}. Falling back to 'private_v4'", addrType) + addrType = "private_v4" + } + + if addrType == "" { + l.Printf("[DEBUG] discover-aws: Address type not provided. Using 'private_v4'") + addrType = "private_v4" + } + + l.Printf("[DEBUG] discover-aws: Using region=%s tag_key=%s tag_value=%s addr_type=%s", region, tagKey, tagValue, addrType) + if accessKey == "" && secretKey == "" { + l.Printf("[DEBUG] discover-aws: No static credentials") + l.Printf("[DEBUG] discover-aws: Using environment variables, shared credentials or instance role") + } else { + l.Printf("[DEBUG] discover-aws: Static credentials provided") + } + + if region == "" { + l.Printf("[INFO] discover-aws: Region not provided. Looking up region in metadata...") + ec2meta := ec2metadata.New(session.New()) + identity, err := ec2meta.GetInstanceIdentityDocument() + if err != nil { + return nil, fmt.Errorf("discover-aws: GetInstanceIdentityDocument failed: %s", err) + } + region = identity.Region + } + l.Printf("[INFO] discover-aws: Region is %s", region) + + l.Printf("[DEBUG] discover-aws: Creating session...") + svc := ec2.New(session.New(), &aws.Config{ + Region: ®ion, + Credentials: credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: accessKey, + SecretAccessKey: secretKey, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + defaults.RemoteCredProvider(*(defaults.Config()), defaults.Handlers()), + }), + }) + + l.Printf("[INFO] discover-aws: Filter instances with %s=%s", tagKey, tagValue) + resp, err := svc.DescribeInstances(&ec2.DescribeInstancesInput{ + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("tag:" + tagKey), + Values: []*string{aws.String(tagValue)}, + }, + &ec2.Filter{ + Name: aws.String("instance-state-name"), + Values: []*string{aws.String("running")}, + }, + }, + }) + if err != nil { + return nil, fmt.Errorf("discover-aws: DescribeInstancesInput failed: %s", err) + } + + l.Printf("[DEBUG] discover-aws: Found %d reservations", len(resp.Reservations)) + var addrs []string + for _, r := range resp.Reservations { + l.Printf("[DEBUG] discover-aws: Reservation %s has %d instances", *r.ReservationId, len(r.Instances)) + for _, inst := range r.Instances { + id := *inst.InstanceId + l.Printf("[DEBUG] discover-aws: Found instance %s", id) + + switch addrType { + case "public_v6": + l.Printf("[DEBUG] discover-aws: Instance %s has %d network interfaces", id, len(inst.NetworkInterfaces)) + + for _, networkinterface := range inst.NetworkInterfaces { + l.Printf("[DEBUG] discover-aws: Checking NetworInterfaceId %s on Instance %s", *networkinterface.NetworkInterfaceId, id) + // Check if instance got any ipv6 + if networkinterface.Ipv6Addresses == nil { + l.Printf("[DEBUG] discover-aws: Instance %s has no IPv6 on NetworkInterfaceId %s", id, *networkinterface.NetworkInterfaceId) + continue + } + for _, ipv6address := range networkinterface.Ipv6Addresses { + l.Printf("[INFO] discover-aws: Instance %s has IPv6 %s on NetworkInterfaceId %s", id, *ipv6address.Ipv6Address, *networkinterface.NetworkInterfaceId) + addrs = append(addrs, *ipv6address.Ipv6Address) + } + } + + case "public_v4": + if inst.PublicIpAddress == nil { + l.Printf("[DEBUG] discover-aws: Instance %s has no public IPv4", id) + continue + } + + l.Printf("[INFO] discover-aws: Instance %s has public ip %s", id, *inst.PublicIpAddress) + addrs = append(addrs, *inst.PublicIpAddress) + + default: + // EC2-Classic don't have the PrivateIpAddress field + if inst.PrivateIpAddress == nil { + l.Printf("[DEBUG] discover-aws: Instance %s has no private ip", id) + continue + } + + l.Printf("[INFO] discover-aws: Instance %s has private ip %s", id, *inst.PrivateIpAddress) + addrs = append(addrs, *inst.PrivateIpAddress) + } + } + } + + l.Printf("[DEBUG] discover-aws: Found ip addresses: %v", addrs) + return addrs, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/azure/azure_discover.go b/vendor/github.com/hashicorp/go-discover/provider/azure/azure_discover.go new file mode 100644 index 000000000000..2106b663f238 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/azure/azure_discover.go @@ -0,0 +1,228 @@ +// Package azure provides node discovery for Microsoft Azure. +package azure + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure/auth" +) + +type Provider struct { + userAgent string +} + +func (p *Provider) SetUserAgent(s string) { + p.userAgent = s +} + +func (p *Provider) Help() string { + return `Microsoft Azure: + + provider: "azure" + tenant_id: The id of the tenant + client_id: The id of the client + subscription_id: The id of the subscription + secret_access_key: The authentication credential + **NOTE** The secret_access_key value often may have an equals sign in it's value, + especially if generated from the Azure Portal. So is important to wrap in single quotes + eg. secret_acccess_key='fpOfcHQJAQBczjAxiVpeyLmX1M0M0KPBST+GU2GvEN4=' + + Variables can also be provided by environmental variables: + export ARM_SUBSCRIPTION_ID for subscription + export ARM_TENANT_ID for tenant + export ARM_CLIENT_ID for client + export ARM_CLIENT_SECRET for secret access key + + If none of those options are given, the Azure SDK is using the default environment based authentication outlined + here https://docs.microsoft.com/en-us/go/azure/azure-sdk-go-authorization#use-environment-based-authentication + This will fallback to MSI if nothing is explicitly specified. + + Use these configuration parameters when using tags: + + tag_name: The name of the tag to filter on + tag_value: The value of the tag to filter on + + Use these configuration parameters when using Virtual Machine Scale Sets: + + resource_group: The name of the resource group to filter on + vm_scale_set: The name of the virtual machine scale set to filter on + + When using tags the only permission needed is Microsoft.Network/networkInterfaces/* + + When using Virtual Machine Scale Sets the only role action needed is Microsoft.Compute/virtualMachineScaleSets/*/read. + + It is recommended you make a dedicated key used only for auto-joining. +` +} + +// argsOrEnv allows you to pick an environmental variable for a setting if the arg is not set +func argsOrEnv(args map[string]string, key, env string) string { + if value, ok := args[key]; ok { + return value + } + return os.Getenv(env) +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + var authorizer autorest.Authorizer + + if args["provider"] != "azure" { + return nil, fmt.Errorf("discover-azure: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + // check for environmental variables, and use if the argument hasn't been set in config + tenantID := argsOrEnv(args, "tenant_id", "ARM_TENANT_ID") + clientID := argsOrEnv(args, "client_id", "ARM_CLIENT_ID") + subscriptionID := argsOrEnv(args, "subscription_id", "ARM_SUBSCRIPTION_ID") + secretKey := argsOrEnv(args, "secret_access_key", "ARM_CLIENT_SECRET") + + // Try to use the argument and environment provided arguments first, if this fails fall back to the Azure + // SDK provided methods + if tenantID != "" && clientID != "" && secretKey != "" { + var err error + authorizer, err = auth.NewClientCredentialsConfig(clientID, secretKey, tenantID).Authorizer() + if err != nil { + return nil, fmt.Errorf("discover-azure (ClientCredentials): %s", err) + } + } else { + var err error + authorizer, err = auth.NewAuthorizerFromEnvironment() + if err != nil { + return nil, fmt.Errorf("discover-azure (EnvironmentCredentials): %s", err) + } + } + + // Use tags if using network interfaces + tagName := args["tag_name"] + tagValue := args["tag_value"] + + // Use resourceGroup and vmScaleSet if using vm scale sets + resourceGroup := args["resource_group"] + vmScaleSet := args["vm_scale_set"] + + // Setup the client using autorest; followed the structure from Terraform + vmnet := network.NewInterfacesClient(subscriptionID) + vmnet.Sender = autorest.CreateSender(autorest.WithLogging(l)) + vmnet.Authorizer = authorizer + + if p.userAgent != "" { + vmnet.Client.UserAgent = p.userAgent + } + + if tagName != "" && tagValue != "" && resourceGroup == "" && vmScaleSet == "" { + l.Printf("[DEBUG] discover-azure: using tag method. tag_name: %s, tag_value: %s", tagName, tagValue) + return fetchAddrsWithTags(tagName, tagValue, vmnet, l) + } else if resourceGroup != "" && vmScaleSet != "" && tagName == "" && tagValue == "" { + l.Printf("[DEBUG] discover-azure: using vm scale set method. resource_group: %s, vm_scale_set: %s", resourceGroup, vmScaleSet) + return fetchAddrsWithVmScaleSet(resourceGroup, vmScaleSet, vmnet, l) + } else { + l.Printf("[ERROR] discover-azure: tag_name: %s, tag_value: %s", tagName, tagValue) + l.Printf("[ERROR] discover-azure: resource_group %s, vm_scale_set %s", resourceGroup, vmScaleSet) + return nil, fmt.Errorf("discover-azure: unclear configuration. use (tag name and value) or (resouce_group and vm_scale_set)") + } + +} + +func fetchAddrsWithTags(tagName string, tagValue string, vmnet network.InterfacesClient, l *log.Logger) ([]string, error) { + // Get all network interfaces across resource groups + // unless there is a compelling reason to restrict + + ctx := context.Background() + netres, err := vmnet.ListAll(ctx) + + if err != nil { + return nil, fmt.Errorf("discover-azure: %s", err) + } + + if len(netres.Values()) == 0 { + return nil, fmt.Errorf("discover-azure: no interfaces") + } + + // Choose any PrivateIPAddress with the matching tag + var addrs []string + for _, v := range netres.Values() { + var id string + if v.ID != nil { + id = *v.ID + } else { + id = "ip address id not found" + } + if v.Tags == nil { + l.Printf("[DEBUG] discover-azure: Interface %s has no tags", id) + continue + } + tv := v.Tags[tagName] // *string + if tv == nil { + l.Printf("[DEBUG] discover-azure: Interface %s did not have tag: %s", id, tagName) + continue + } + if *tv != tagValue { + l.Printf("[DEBUG] discover-azure: Interface %s tag value was: %s which did not match: %s", id, *tv, tagValue) + continue + } + if v.IPConfigurations == nil { + l.Printf("[DEBUG] discover-azure: Interface %s had no ip configuration", id) + continue + } + for _, x := range *v.IPConfigurations { + if x.PrivateIPAddress == nil { + l.Printf("[DEBUG] discover-azure: Interface %s had no private ip", id) + continue + } + iAddr := *x.PrivateIPAddress + l.Printf("[DEBUG] discover-azure: Interface %s has private ip: %s", id, iAddr) + addrs = append(addrs, iAddr) + } + } + l.Printf("[DEBUG] discover-azure: Found ip addresses: %v", addrs) + return addrs, nil +} + +func fetchAddrsWithVmScaleSet(resourceGroup string, vmScaleSet string, vmnet network.InterfacesClient, l *log.Logger) ([]string, error) { + // Get all network interfaces for a specific virtual machine scale set + ctx := context.Background() + netres, err := vmnet.ListVirtualMachineScaleSetNetworkInterfaces(ctx, resourceGroup, vmScaleSet) + if err != nil { + return nil, fmt.Errorf("discover-azure: %s", err) + } + + if len(netres.Values()) == 0 { + return nil, fmt.Errorf("discover-azure: no interfaces") + } + + // Get all of PrivateIPAddresses we can. + var addrs []string + for _, v := range netres.Values() { + var id string + if v.ID != nil { + id = *v.ID + } else { + id = "ip address id not found" + } + if v.IPConfigurations == nil { + l.Printf("[DEBUG] discover-azure: Interface %s had no ip configuration", id) + continue + } + for _, x := range *v.IPConfigurations { + if x.PrivateIPAddress == nil { + l.Printf("[DEBUG] discover-azure: Interface %s had no private ip", id) + continue + } + iAddr := *x.PrivateIPAddress + l.Printf("[DEBUG] discover-azure: Interface %s has private ip: %s", id, iAddr) + addrs = append(addrs, iAddr) + } + } + l.Printf("[DEBUG] discover-azure: Found ip addresses: %v", addrs) + return addrs, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/digitalocean/digitalocean_discover.go b/vendor/github.com/hashicorp/go-discover/provider/digitalocean/digitalocean_discover.go new file mode 100644 index 000000000000..fbd07716196b --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/digitalocean/digitalocean_discover.go @@ -0,0 +1,121 @@ +// Package digitalocean provides node discovery for DigitalOcean. +package digitalocean + +import ( + "context" + "fmt" + "io/ioutil" + "log" + + "github.com/digitalocean/godo" + "golang.org/x/oauth2" +) + +type Provider struct { + userAgent string +} + +func (p *Provider) SetUserAgent(s string) { + p.userAgent = s +} + +func (p *Provider) Help() string { + return `DigitalOcean: + + provider: "digitalocean" + region: The DigitalOcean region to filter on + tag_name: The tag name to filter on + api_token: The DigitalOcean API token to use +` +} + +type TokenSource struct { + AccessToken string +} + +func (t *TokenSource) Token() (*oauth2.Token, error) { + token := &oauth2.Token{ + AccessToken: t.AccessToken, + } + return token, nil +} + +func listDropletsByTag(c *godo.Client, tagName string) ([]godo.Droplet, error) { + dropletList := []godo.Droplet{} + pageOpt := &godo.ListOptions{ + Page: 1, + PerPage: 200, + } + + for { + droplets, resp, err := c.Droplets.ListByTag(context.TODO(), tagName, pageOpt) + if err != nil { + return nil, err + } + + for _, d := range droplets { + dropletList = append(dropletList, d) + } + + if resp.Links == nil || resp.Links.IsLastPage() { + break + } + + page, err := resp.Links.CurrentPage() + if err != nil { + return nil, err + } + + pageOpt.Page = page + 1 + } + + return dropletList, nil +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "digitalocean" { + return nil, fmt.Errorf("discover-digitalocean: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + region := args["region"] + tagName := args["tag_name"] + apiToken := args["api_token"] + l.Printf("[DEBUG] discover-digitalocean: Using region=%s tag_name=%s", region, tagName) + + tokenSource := &TokenSource{ + AccessToken: apiToken, + } + + oauthClient := oauth2.NewClient(context.TODO(), tokenSource) + client := godo.NewClient(oauthClient) + if p.userAgent != "" { + client.UserAgent = p.userAgent + } + + droplets, err := listDropletsByTag(client, tagName) + if err != nil { + return nil, fmt.Errorf("discover-digitalocean: %s", err) + } + + var addrs []string + for _, d := range droplets { + if d.Region.Slug == region || region == "" { + privateIP, err := d.PrivateIPv4() + if err != nil { + return nil, fmt.Errorf("discover-digitalocean: %s", err) + } + + if privateIP != "" { + l.Printf("[INFO] discover-digitalocean: Found instance %s (%d) with private IP: %s", d.Name, d.ID, privateIP) + addrs = append(addrs, privateIP) + } + } + } + + l.Printf("[DEBUG] discover-digitalocean: Found ip addresses: %v", addrs) + return addrs, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/gce/gce_discover.go b/vendor/github.com/hashicorp/go-discover/provider/gce/gce_discover.go new file mode 100644 index 000000000000..2064cb193c40 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/gce/gce_discover.go @@ -0,0 +1,200 @@ +// Package gce provides node discovery for Google Cloud. +package gce + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + compute "google.golang.org/api/compute/v1" +) + +type Provider struct { + userAgent string +} + +func (p *Provider) SetUserAgent(s string) { + p.userAgent = s +} + +func (p *Provider) Help() string { + return `Google Cloud: + + provider: "gce" + project_name: The name of the project. discovered if not set + tag_value: The tag value for filtering instances + zone_pattern: A RE2 regular expression for filtering zones, e.g. us-west1-.*, or us-(?west|east).* + credentials_file: The path to the credentials file. See below for more details + + The credentials for a GCE Service Account are required and are searched in + the following locations: + + 1. Use credentials from "credentials_file", if provided. + 2. Use JSON file from GOOGLE_APPLICATION_CREDENTIALS environment variable. + 3. Use JSON file in a location known to the gcloud command-line tool. + On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. + On other systems, $HOME/.config/gcloud/application_default_credentials.json. + 4. On Google Compute Engine, use credentials from the metadata + server. In this final case any provided scopes are ignored. +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "gce" { + return nil, fmt.Errorf("discover-gce: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + project := args["project_name"] + zone := args["zone_pattern"] + creds := args["credentials_file"] + tagValue := args["tag_value"] + + // determine the project name + if project == "" { + l.Println("[INFO] discover-gce: Looking up project name") + p, err := lookupProject() + if err != nil { + return nil, fmt.Errorf("discover-gce: %s", err) + } + project = p + } + l.Printf("[INFO] discover-gce: Project name is %q", project) + + // create an authenticated client + if creds != "" { + l.Printf("[INFO] discover-gce: Loading credentials from %s", creds) + } + client, err := client(creds) + if err != nil { + return nil, fmt.Errorf("discover-gce: %s", err) + } + svc, err := compute.New(client) + if err != nil { + return nil, fmt.Errorf("discover-gce: %s", err) + } + if p.userAgent != "" { + svc.UserAgent = p.userAgent + } + + // lookup the project zones to look in + if zone != "" { + l.Printf("[INFO] discover-gce: Looking up zones matching %s", zone) + } else { + l.Printf("[INFO] discover-gce: Looking up all zones") + } + zones, err := lookupZones(svc, project, zone) + if err != nil { + return nil, fmt.Errorf("discover-gce: %s", err) + } + l.Printf("[INFO] discover-gce: Found zones %v", zones) + + // lookup the instance addresses + var addrs []string + for _, zone := range zones { + a, err := lookupAddrs(svc, project, zone, tagValue) + if err != nil { + return nil, fmt.Errorf("discover-gce: %s", err) + } + l.Printf("[INFO] discover-gce: Zone %q has %v", zone, a) + addrs = append(addrs, a...) + } + return addrs, nil +} + +// client returns an authenticated HTTP client for use with GCE. +func client(path string) (*http.Client, error) { + if path == "" { + return google.DefaultClient(oauth2.NoContext, compute.ComputeScope) + } + + key, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + jwtConfig, err := google.JWTConfigFromJSON(key, compute.ComputeScope) + if err != nil { + return nil, err + } + + return jwtConfig.Client(oauth2.NoContext), nil +} + +// lookupProject retrieves the project name from the metadata of the current node. +func lookupProject() (string, error) { + req, err := http.NewRequest("GET", "http://metadata.google.internal/computeMetadata/v1/project/project-id", nil) + if err != nil { + return "", err + } + req.Header.Add("Metadata-Flavor", "Google") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return "", fmt.Errorf("discover-gce: invalid status code %d when fetching project id", resp.StatusCode) + } + + project, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(project), nil +} + +// lookupZones retrieves the zones of the project and filters them by pattern. +func lookupZones(svc *compute.Service, project, pattern string) ([]string, error) { + call := svc.Zones.List(project) + if pattern != "" { + call = call.Filter("name eq " + pattern) + } + + var zones []string + f := func(page *compute.ZoneList) error { + for _, v := range page.Items { + zones = append(zones, v.Name) + } + return nil + } + + if err := call.Pages(oauth2.NoContext, f); err != nil { + return nil, err + } + return zones, nil +} + +// lookupAddrs retrieves the private ip addresses of all instances in a given +// project and zone which have a matching tag value. +func lookupAddrs(svc *compute.Service, project, zone, tag string) ([]string, error) { + var addrs []string + f := func(page *compute.InstanceList) error { + for _, v := range page.Items { + if len(v.NetworkInterfaces) == 0 || v.NetworkInterfaces[0].NetworkIP == "" { + continue + } + for _, t := range v.Tags.Items { + if t == tag { + addrs = append(addrs, v.NetworkInterfaces[0].NetworkIP) + break + } + } + } + return nil + } + + call := svc.Instances.List(project, zone) + if err := call.Pages(oauth2.NoContext, f); err != nil { + return nil, err + } + return addrs, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/linode/linode_discover.go b/vendor/github.com/hashicorp/go-discover/provider/linode/linode_discover.go new file mode 100644 index 000000000000..7322dab34f7a --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/linode/linode_discover.go @@ -0,0 +1,142 @@ +// Package linode provides node discovery for Linode. +package linode + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + + "github.com/linode/linodego" + "golang.org/x/oauth2" +) + +type Filter struct { + Region string `json:"region,omitempty"` + Tag string `json:"tags,omitempty"` +} + +type Provider struct { + userAgent string +} + +func (p *Provider) SetUserAgent(s string) { + p.userAgent = s +} + +func (p *Provider) Help() string { + return `Linode: + provider: "linode" + api_token: The Linode API token to use + region: The Linode region to filter on + tag_name: The tag name to filter on + address_type: "private_v4", "public_v4", "private_v6" or "public_v6". (default: "private_v4") + + Variables can also be provided by environment variables: + export LINODE_TOKEN for api_token +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "linode" { + return nil, fmt.Errorf("discover-linode: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + addressType := args["address_type"] + region := args["region"] + tagName := args["tag_name"] + apiToken := argsOrEnv(args, "api_token", "LINODE_TOKEN") + l.Printf("[DEBUG] discover-linode: Using address_type=%s region=%s tag_name=%s", addressType, region, tagName) + + client := getLinodeClient(p.userAgent, apiToken) + + filters := Filter{ + Region: "", + Tag: "", + } + + if region != "" { + filters.Region = region + } + if tagName != "" { + filters.Tag = tagName + } + + jsonFilters, _ := json.Marshal(filters) + filterOpt := linodego.ListOptions{Filter: string(jsonFilters)} + + linodes, err := client.ListInstances(context.Background(), &filterOpt) + if err != nil { + return nil, fmt.Errorf("discover-linode: Fetching Linode instances failed: %s", err) + } + + var addrs []string + for _, linode := range linodes { + addr, err := client.GetInstanceIPAddresses(context.Background(), linode.ID) + if err != nil { + return nil, fmt.Errorf("discover-linode: Fetching Linode IP address for instance %v failed: %s", linode.ID, err) + } + + switch addressType { + case "public_v4": + if len(addr.IPv4.Public) == 0 { + break + } + addrs = append(addrs, addr.IPv4.Public[0].Address) + case "private_v4": + if len(addr.IPv4.Private) == 0 { + break + } + addrs = append(addrs, addr.IPv4.Private[0].Address) + case "public_v6": + if addr.IPv6.SLAAC.Address == "" { + break + } + addrs = append(addrs, addr.IPv6.SLAAC.Address) + case "private_v6": + if addr.IPv6.LinkLocal.Address == "" { + break + } + addrs = append(addrs, addr.IPv6.LinkLocal.Address) + default: + if len(addr.IPv4.Private) == 0 { + break + } + addrs = append(addrs, addr.IPv4.Private[0].Address) + } + } + + return addrs, nil +} + +func getLinodeClient(userAgent, apiToken string) linodego.Client { + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: apiToken}) + + oauth2Client := &http.Client{ + Transport: &oauth2.Transport{ + Source: tokenSource, + }, + } + + client := linodego.NewClient(oauth2Client) + + if userAgent != "" { + client.SetUserAgent(userAgent) + } + + return client +} + +func argsOrEnv(args map[string]string, key, env string) string { + if value := args[key]; value != "" { + return value + } + return os.Getenv(env) +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/mdns/mdns_provider.go b/vendor/github.com/hashicorp/go-discover/provider/mdns/mdns_provider.go new file mode 100644 index 000000000000..7445a8357c32 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/mdns/mdns_provider.go @@ -0,0 +1,119 @@ +// Package mdns provides node discovery via mDNS. +package mdns + +import ( + "fmt" + "io/ioutil" + "log" + "net" + "strconv" + "time" + + m "github.com/hashicorp/mdns" +) + +// Provider implements the Provider interface. +type Provider struct{} + +// Help returns help information for the mDNS package. +func (p *Provider) Help() string { + return `mDNS: + + provider: "mdns" + service: The mDNS service name. + domain: The mDNS discovery domain. Default "local". + timeout: The mDNS lookup timeout. Default "5s" (five seconds). + v6: IPv6 will be allowed and preferred when set to "true" + and disabled when set to "false". Default "true". + v4: IPv4 will be allowed when set to "true" and disabled + when set to "false". Default "true". +` +} + +// Addrs returns discovered addresses for the mDNS package. +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + var params *m.QueryParam + var ch chan *m.ServiceEntry + var v6, v4 bool + var addrs []string + var err error + + // default to null logger + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + // init params + params = new(m.QueryParam) + + // validate and set service record + if args["service"] == "" { + return nil, fmt.Errorf("discover-mdns: Service record not provided." + + " Please specify a service record for the mDNS lookup.") + } + params.Service = args["service"] + + // validate and set domain + if args["domain"] != "" { + params.Domain = args["domain"] + } else { + params.Domain = "local" + } + + // validate and set timeout + if args["timeout"] != "" { + if params.Timeout, err = time.ParseDuration(args["timeout"]); err != nil { + return nil, fmt.Errorf("discover-mdns: Failed to parse timeout: %s", err) + } + } else { + params.Timeout = 5 * time.Second + } + + // validate and set v6 toggle + if args["v6"] != "" { + if v6, err = strconv.ParseBool(args["v6"]); err != nil { + return nil, fmt.Errorf("discover-mdns: Failed to parse v6: %s", err) + } + } else { + v6 = true + } + + // validate and set v4 toggle + if args["v4"] != "" { + if v4, err = strconv.ParseBool(args["v4"]); err != nil { + return nil, fmt.Errorf("discover-mdns: Failed to parse v4: %s", err) + } + } else { + v4 = true + } + + // init entries channel + ch = make(chan *m.ServiceEntry) + defer close(ch) + params.Entries = ch + + // build addresses + go func() { + var addr string + for e := range ch { + addr = "" // reset addr each loop + if v6 && e.AddrV6 != nil { + addr = net.JoinHostPort(e.AddrV6.String(), + strconv.Itoa(e.Port)) + } + if addr == "" && v4 && e.AddrV4 != nil { + addr = net.JoinHostPort(e.AddrV4.String(), + strconv.Itoa(e.Port)) + } + if addr != "" { + l.Printf("[DEBUG] discover-mdns: %s -> %s", + e.Host, addr) + // build address list + addrs = append(addrs, addr) + } + } + }() + + // lookup and return + return addrs, m.Query(params) +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/os/os_discover.go b/vendor/github.com/hashicorp/go-discover/provider/os/os_discover.go new file mode 100644 index 000000000000..278003579145 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/os/os_discover.go @@ -0,0 +1,223 @@ +// Package os provides node discovery for Openstack. +package os + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/pagination" +) + +type Provider struct { + userAgent string +} + +func (p *Provider) SetUserAgent(s string) { + p.userAgent = s +} + +func (p *Provider) Help() string { + return `Openstack: + + provider: "os" + auth_url: The endpoint of OS identity + project_id: The id of the project (tenant id) + tag_key: The tag key to filter on + tag_value: The tag value to filter on + user_name: The user used to authenticate + password: The password of the provided user + token: The token to use + insecure: Sets if the api certificate shouldn't be check. Any value means true + + Variables can also be provided by environmental variables. +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "os" { + return nil, fmt.Errorf("discover-os: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + projectID := args["project_id"] + tagKey := args["tag_key"] + tagValue := args["tag_value"] + var err error + + if projectID == "" { // Use the one on the instance if not provided either by parameter or env + l.Printf("[INFO] discover-os: ProjectID not provided. Looking up in metadata...") + projectID, err = getProjectID() + if err != nil { + return nil, err + } + l.Printf("[INFO] discover-os: ProjectID is %s", projectID) + args["project_id"] = projectID + } + + log.Printf("[DEBUG] discover-os: Using project_id=%s tag_key=%s tag_value=%s", projectID, tagKey, tagValue) + client, err := newClient(args, l) + if err != nil { + return nil, err + } + + if p.userAgent != "" { + client.UserAgent.Prepend(p.userAgent) + } + + pager := servers.List(client, ListOpts{ListOpts: servers.ListOpts{Status: "ACTIVE"}, ProjectID: projectID}) + if err := pager.Err; err != nil { + return nil, fmt.Errorf("discover-os: ListServers failed: %s", err) + } + + var addrs []string + err = pager.EachPage(func(page pagination.Page) (bool, error) { + srvs, err := servers.ExtractServers(page) + if err != nil { + return false, err + } + for _, srv := range srvs { + for key, value := range srv.Metadata { + l.Printf("[INFO] discover-os: Filter instances with %s=%s", tagKey, tagValue) + if key == tagKey && value == tagValue { + // Loop over the server address and append any fixed one to the list + for _, v := range srv.Addresses { + if addrsInfo, ok := v.([]interface{}); ok { + for _, addrInfo := range addrsInfo { + if info, ok := addrInfo.(map[string]interface{}); ok { + if info["OS-EXT-IPS:type"] == "fixed" { + addrs = append(addrs, info["addr"].(string)) + } + } + } + } + } + } + } + } + return true, nil + }) + if err != nil { + return nil, fmt.Errorf("discover-os: ExtractServerInfo failed: %s", err) + } + + l.Printf("[DEBUG] discover-os: Found ip addresses: %v", addrs) + return addrs, nil +} + +func newClient(args map[string]string, l *log.Logger) (*gophercloud.ServiceClient, error) { + username := argsOrEnv(args, "user_name", "OS_USERNAME") + password := argsOrEnv(args, "password", "OS_PASSWORD") + token := argsOrEnv(args, "token", "OS_AUTH_TOKEN") + url := argsOrEnv(args, "auth_url", "OS_AUTH_URL") + region := argsOrEnv(args, "region", "OS_REGION_NAME") + if region == "" { + region = "RegionOne" + } + projectID := argsOrEnv(args, "project_id", "OS_PROJECT_ID") + insecure := argsOrEnv(args, "insecure", "OS_INSECURE") + domain_id := argsOrEnv(args, "domain_id", "OS_DOMAIN_ID") + domain_name := argsOrEnv(args, "domain_name", "OS_DOMAIN_NAME") + + if url == "" { + return nil, fmt.Errorf("discover-os: Auth url must be provided") + } + + ao := gophercloud.AuthOptions{ + DomainID: domain_id, + DomainName: domain_name, + IdentityEndpoint: url, + Username: username, + Password: password, + TokenID: token, + TenantID: projectID, + } + + client, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + return nil, fmt.Errorf("discover-os: Client initialization failed: %s", err) + } + + config := &tls.Config{InsecureSkipVerify: insecure != ""} + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: config, + } + transport.TLSClientConfig = config + client.HTTPClient = *http.DefaultClient + client.HTTPClient.Transport = transport + + l.Printf("[DEBUG] discover-os: Authenticating...") + if err = openstack.Authenticate(client, ao); err != nil { + return nil, fmt.Errorf("discover-os: Authentication failed: %s", err) + } + + l.Printf("[DEBUG] discover-os: Creating client...") + computeClient, err := openstack.NewComputeV2(client, gophercloud.EndpointOpts{Region: region}) + if err != nil { + return nil, fmt.Errorf("discover-os: ComputeClient initialization failed: %s", err) + } + return computeClient, nil +} + +func argsOrEnv(args map[string]string, key, env string) string { + if value := args[key]; value != "" { + return value + } + return os.Getenv(env) +} + +// ListOpts add the project to the parameters of servers.ListOpts +type ListOpts struct { + servers.ListOpts + ProjectID string `q:"project_id"` +} + +// ToServerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToServerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +func getProjectID() (string, error) { + resp, err := http.Get("http://169.254.169.254/openstack/latest/meta_data.json") + if err != nil { + return "", fmt.Errorf("discover-os: Error asking metadata for project_id: %s", err) + } + data := struct { + ProjectID string `json:"project_id"` + }{} + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("discover-os: Can't read response body: %s", err) + } + resp.Body.Close() + if err = json.Unmarshal(body, &data); err != nil { + return "", fmt.Errorf("discover-os: Can't convert project_id: %s", err) + } + if data.ProjectID == "" { + return "", fmt.Errorf("discover-os: Couln't find project_id on metadata") + } + return data.ProjectID, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/packet/packet_discover.go b/vendor/github.com/hashicorp/go-discover/provider/packet/packet_discover.go new file mode 100644 index 000000000000..285697057f81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/packet/packet_discover.go @@ -0,0 +1,147 @@ +package packet + +import ( + "fmt" + "log" + "os" + "strings" + + "github.com/packethost/packngo" +) + +const baseURL = "https://api.packet.net/" + +// Provider struct +type Provider struct { + userAgent string +} + +// SetUserAgent setter +func (p *Provider) SetUserAgent(s string) { + p.userAgent = s +} + +// Help function +func (p *Provider) Help() string { + return `Packet: + provider: "packet" + project: UUID of packet project. Required + auth_token: Packet authentication token. Required + url: Packet REST URL. Optional + address_type: "private_v4", "public_v4" or "public_v6". Defaults to "private_v4". Optional + facility: Filter for specific facility (Examples: "ewr1,ams1") + tag: Filter by tag (Examples: "tag1,tag2") + + Variables can also be provided by environmental variables: + export PACKET_PROJECT for project + export PACKET_URL for url + export PACKET_AUTH_TOKEN for auth_token +` +} + +// Addrs function +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + authToken := argsOrEnv(args, "auth_token", "PACKET_AUTH_TOKEN") + projectID := argsOrEnv(args, "project", "PACKET_PROJECT") + packetURL := argsOrEnv(args, "url", "PACKET_URL") + addressType := args["address_type"] + packetFacilities := args["facility"] + packetTags := args["tag"] + + if addressType != "private_v4" && addressType != "public_v4" && addressType != "public_v6" { + l.Printf("[INFO] discover-packet: Address type %s is not supported. Valid values are {private_v4,public_v4,public_v6}. Falling back to 'private_v4'", addressType) + addressType = "private_v4" + } + + includeFacilities := includeArgs(packetFacilities) + includeTags := includeArgs(packetTags) + + c, err := client(p.userAgent, packetURL, authToken) + if err != nil { + return nil, fmt.Errorf("discover-packet: Initializing Packet client failed: %s", err) + } + + var devices []packngo.Device + + if projectID == "" { + return nil, fmt.Errorf("discover-packet: 'project' parameter must be provider") + } + + devices, _, err = c.Devices.List(projectID, nil) + if err != nil { + return nil, fmt.Errorf("discover-packet: Fetching Packet devices failed: %s", err) + } + + var addrs []string + for _, d := range devices { + + if len(includeFacilities) > 0 && !Include(includeFacilities, d.Facility.Code) { + continue + } + + if len(includeTags) > 0 && !Any(d.Tags, func(v string) bool { return Include(includeTags, v) }) { + continue + } + + addressFamily := 4 + if addressType == "public_v6" { + addressFamily = 6 + } + for _, n := range d.Network { + + if (n.Public == (addressType == "public_v4" || addressType == "public_v6")) && n.AddressFamily == addressFamily { + addrs = append(addrs, n.Address) + } + } + } + return addrs, nil +} + +func client(useragent, url, token string) (*packngo.Client, error) { + if url == "" { + url = baseURL + } + + return packngo.NewClientWithBaseURL(useragent, token, nil, url) +} +func argsOrEnv(args map[string]string, key, env string) string { + if value := args[key]; value != "" { + return value + } + return os.Getenv(env) +} + +func includeArgs(s string) []string { + var include []string + for _, localstring := range strings.Split(s, ",") { + if len(localstring) > 0 { + include = append(include, localstring) + } + } + return include +} + +// Index returns the first index of the target string t, or -1 if no match is found. +func Index(vs []string, t string) int { + for i, v := range vs { + if v == t { + return i + } + } + return -1 +} + +// Include returns true if the target string t is in the slice. +func Include(vs []string, t string) bool { + return Index(vs, t) >= 0 +} + +//Any returns true if one of the strings in the slice satisfies the predicate f. +func Any(vs []string, f func(string) bool) bool { + for _, v := range vs { + if f(v) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/scaleway_discover.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/scaleway_discover.go new file mode 100644 index 000000000000..043cb2fe9083 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/scaleway_discover.go @@ -0,0 +1,81 @@ +// Package scaleway provides node discovery for Scaleway. +package scaleway + +import ( + "fmt" + "io/ioutil" + "log" + + api "github.com/nicolai86/scaleway-sdk" +) + +type Provider struct{} + +func (p *Provider) Help() string { + return `Scaleway: + + provider: "scaleway" + organization: The Scaleway organization access key + tag_name: The tag name to filter on + token: The Scaleway API access token + region: The Scalway region +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "scaleway" { + return nil, fmt.Errorf("discover-scaleway: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + organization := args["organization"] + tagName := args["tag_name"] + token := args["token"] + region := args["region"] + + l.Printf("[INFO] discover-scaleway: Organization is %q", organization) + l.Printf("[INFO] discover-scaleway: Region is %q", region) + + // Create a new API client + api, err := api.New(organization, token, region) + if err != nil { + return nil, fmt.Errorf("discover-scaleway: %s", err) + } + + // Currently fetching all servers since the API doesn't support + // filter options. + // api.GetServers() takes the following two arguments: + // * all (bool) - lets you list all servers in any state (stopped, running etc) + // * limit (int) - limits the results to a certain number. In this case we are listing + servers, err := api.GetServers(true, 0) + if err != nil { + return nil, fmt.Errorf("discover-scaleway: %s", err) + } + + // Filter servers by tag + var addrs []string + if servers != nil { + for _, server := range servers { + if stringInSlice(tagName, server.Tags) { + l.Printf("[DEBUG] discover-scaleway: Found server (%s) - %s with private IP: %s", + server.Name, server.Hostname, server.PrivateIP) + addrs = append(addrs, server.PrivateIP) + } + } + } + + l.Printf("[DEBUG] discover-scaleway: Found ip addresses: %v", addrs) + return addrs, nil +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/softlayer/softlayer_discover.go b/vendor/github.com/hashicorp/go-discover/provider/softlayer/softlayer_discover.go new file mode 100644 index 000000000000..b07c70ce7058 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/softlayer/softlayer_discover.go @@ -0,0 +1,67 @@ +// Package softlayer provides node discovery for Softlayer. +package softlayer + +import ( + "fmt" + "io/ioutil" + "log" + + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" +) + +type Provider struct{} + +func (p *Provider) Help() string { + return `Softlayer: + + provider: "softlayer" + datacenter: The SoftLayer datacenter to filter on + tag_value: The tag value to filter on + username: The SoftLayer username to use + api_key: The SoftLayer api key to use +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "softlayer" { + return nil, fmt.Errorf("discover-softlayer: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + datacenter := args["datacenter"] + tagValue := args["tag_value"] + username := args["username"] + apiKey := args["api_key"] + + l.Printf("[INFO] discover-softlayer: Datacenter is %q", datacenter) + + // Create a session and get a service + sess := session.New(username, apiKey) + service := services.GetAccountService(sess) + + // Compose the filter + mask := "id,hostname,domain,tagReferences[tag[name]],primaryBackendIpAddress,datacenter" + filterVMs := filter.Build( + filter.Path("virtualGuests.datacenter.name").Eq(datacenter), + filter.Path("virtualGuests.tagReferences.tag.name").Eq(tagValue), + ) + + // Get the virtual machines that match the filter + vms, err := service.Mask(mask).Filter(filterVMs).GetVirtualGuests() + if err != nil { + return nil, fmt.Errorf("discover-softlayer: %s", err) + } + + var addrs []string + for _, vm := range vms { + l.Printf("[INFO] discover-softlayer: Found instance (%d) %s.%s with private IP: %s", + *vm.Id, *vm.Hostname, *vm.Domain, *vm.PrimaryBackendIpAddress) + addrs = append(addrs, *vm.PrimaryBackendIpAddress) + } + return addrs, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/tencentcloud/tencentcloud_discover.go b/vendor/github.com/hashicorp/go-discover/provider/tencentcloud/tencentcloud_discover.go new file mode 100644 index 000000000000..57e195678f49 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/tencentcloud/tencentcloud_discover.go @@ -0,0 +1,134 @@ +// Package tencentcloud provides node discovery for TencentCloud. +package tencentcloud + +import ( + "fmt" + "io/ioutil" + "log" + + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" + cvm "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312" +) + +type Provider struct { + userAgent string +} + +func (p *Provider) SetUserAgent(s string) { + p.userAgent = s +} + +func (p *Provider) Help() string { + return `TencentCloud: + + provider: "tencentcloud" + region: The TencentCloud region + tag_key: The tag key to filter on + tag_value: The tag value to filter on + address_type: "private_v4", "public_v4". (default: "private_v4") + access_key_id: The secret id of TencentCloud + access_key_secret: The secret key of TencentCloud + + This required permission to 'cvm:DescribeInstances'. + It is recommended you make a dedicated key used only for auto-joining. +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "tencentcloud" { + return nil, fmt.Errorf("discover-tencentcloud: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + region := args["region"] + tagKey := args["tag_key"] + tagValue := args["tag_value"] + addressType := args["address_type"] + accessKeyID := args["access_key_id"] + accessKeySecret := args["access_key_secret"] + + l.Printf("[DEBUG] discover-tencentcloud: Using region=%s, tag_key=%s, tag_value=%s", region, tagKey, tagValue) + if accessKeyID == "" { + l.Printf("[DEBUG] discover-tencentcloud: No static credentials provided") + } else { + l.Printf("[DEBUG] discover-tencentcloud: Static credentials provided") + } + + if region == "" { + l.Printf("[DEBUG] discover-tencentcloud: Region not provided") + return nil, fmt.Errorf("discover-tencentcloud: region missing") + } + l.Printf("[DEBUG] discover-tencentcloud: region is %s", region) + + if addressType == "" { + addressType = "private_v4" + } + + if addressType != "private_v4" && addressType != "public_v4" { + l.Printf("[DEBUG] discover-tencentcloud: Address type %s invalid", addressType) + return nil, fmt.Errorf("discover-tencentcloud: invalid address_type " + addressType) + } + l.Printf("[DEBUG] discover-tencentcloud: address type is %s", addressType) + + credential := common.NewCredential( + accessKeyID, + accessKeySecret, + ) + + cpf := profile.NewClientProfile() + cpf.HttpProfile.ReqMethod = "POST" + cpf.HttpProfile.ReqTimeout = 300 + cpf.Language = "en-US" + cvmClient, _ := cvm.NewClient(credential, region, cpf) + + l.Printf("[DEBUG] discover-tencentcloud: Filter instances with %s=%s", tagKey, tagValue) + request := cvm.NewDescribeInstancesRequest() + request.Filters = []*cvm.Filter{ + { + Name: stringToPointer("instance-state"), + Values: []*string{stringToPointer("RUNNING")}, + }, + { + Name: stringToPointer("tag:" + tagKey), + Values: []*string{stringToPointer(tagValue)}, + }, + } + + response, err := cvmClient.DescribeInstances(request) + if err != nil { + l.Printf("[DEBUG] discover-tencentcloud: DescribeInstances failed, %s", err) + return nil, fmt.Errorf("discover-tencentcloud: DescribeInstances failed, %s", err) + } + l.Printf("[DEBUG] discover-tencentcloud: Found %d instances", len(response.Response.InstanceSet)) + + var addrs []string + for _, v := range response.Response.InstanceSet { + switch addressType { + case "public_v4": + if len(v.PublicIpAddresses) == 0 { + l.Printf("[DEBUG] discover-tencentcloud: Instance %s has no public_v4", *v.InstanceId) + continue + } + l.Printf("[DEBUG] discover-tencentcloud: Instance %s has public_v4 %v", *v.InstanceId, *v.PublicIpAddresses[0]) + addrs = append(addrs, *v.PublicIpAddresses[0]) + case "private_v4": + if len(v.PrivateIpAddresses) == 0 { + l.Printf("[DEBUG] discover-tencentcloud: Instance %s has no private_v4", *v.InstanceId) + continue + } + l.Printf("[DEBUG] discover-tencentcloud: Instance %s has private_v4 %v", *v.InstanceId, *v.PrivateIpAddresses[0]) + addrs = append(addrs, *v.PrivateIpAddresses[0]) + } + } + + l.Printf("[DEBUG] discover-tencentcloud: Found address: %v", addrs) + return addrs, nil +} + +func stringToPointer(s string) *string { + return &s +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/triton/triton_discover.go b/vendor/github.com/hashicorp/go-discover/provider/triton/triton_discover.go new file mode 100644 index 000000000000..c65405a021c8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/triton/triton_discover.go @@ -0,0 +1,90 @@ +// Package aws provides node discovery for Joyent Triton. +package triton + +import ( + "context" + "fmt" + "io/ioutil" + "log" + + "github.com/joyent/triton-go" + "github.com/joyent/triton-go/authentication" + "github.com/joyent/triton-go/compute" +) + +type Provider struct{} + +func (p *Provider) Help() string { + return `triton: + + provider: "triton" + account: The Triton account name + key_id: The Triton KeyID + url: The Triton URL + tag_key: The tag key to filter on + tag_value: The tag value to filter on +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "triton" { + return nil, fmt.Errorf("discover-triton: invalid provider " + args["provider"]) + } + + if l == nil { + l = log.New(ioutil.Discard, "", 0) + } + + account := args["account"] + keyID := args["key_id"] + url := args["url"] + tagKey := args["tag_key"] + tagValue := args["tag_value"] + + l.Printf("[INFO] discover-triton: Account is %q", account) + l.Printf("[INFO] discover-triton: URL is %q", url) + + input := authentication.SSHAgentSignerInput{ + KeyID: keyID, + AccountName: account, + } + signer, err := authentication.NewSSHAgentSigner(input) + if err != nil { + return nil, fmt.Errorf("error Creating SSH Agent Signer: %v", err) + } + + config := &triton.ClientConfig{ + TritonURL: url, + AccountName: account, + Signers: []authentication.Signer{signer}, + } + + c, err := compute.NewClient(config) + if err != nil { + return nil, fmt.Errorf("error constructing Compute Client: %v", err) + } + + t := make(map[string]interface{}, 0) + t[tagKey] = tagValue + + listInput := &compute.ListInstancesInput{ + Tags: t, + } + instances, err := c.Instances().List(context.Background(), listInput) + if err != nil { + return nil, fmt.Errorf("error getting instance list: %v", err) + } + var addrs []string + for _, instance := range instances { + l.Printf("[DEBUG] Instance ID: %q", instance.ID) + l.Printf("[DEBUG] Instance PrimaryIP: %q", instance.PrimaryIP) + if instance.PrimaryIP == "" { + l.Printf("[DEBUG] discover-triton: Instance %s has no marked PrimaryIP", instance.ID) + continue + } else { + addrs = append(addrs, instance.PrimaryIP) + } + } + + return addrs, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/vsphere/vsphere_discover.go b/vendor/github.com/hashicorp/go-discover/provider/vsphere/vsphere_discover.go new file mode 100644 index 000000000000..ab5bb6b4c3f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/vsphere/vsphere_discover.go @@ -0,0 +1,417 @@ +// Package vsphere provides node discovery for VMware vSphere. +// +// The package performs discovery by searching vCenter for all nodes matching a +// certain tag, it then discovers all known IP addresses through VMware tools +// that are not loopback or auto-configuration addresses. +// +// This package requires at least vSphere 6.0 in order to function. +package vsphere + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "net" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/vic/pkg/vsphere/tags" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// providerLog is the local provider logger. This should be initialized from +// the provider entry point. +var logger *log.Logger + +// setLog sets the logger. +func setLog(l *log.Logger) { + if l != nil { + logger = l + } else { + logger = log.New(ioutil.Discard, "", 0) + } +} + +// discoverErr prints out a friendly error heading for the top-level discovery +// errors. It should only be used in the Addrs method. +func discoverErr(format string, a ...interface{}) error { + var s string + if len(a) > 1 { + s = fmt.Sprintf(format, a...) + } else { + s = format + } + return fmt.Errorf("discover-vsphere: %s", s) +} + +// valueOrEnv provides a way of suppling configuration values through +// environment variables. Defined values always take priority. +func valueOrEnv(config map[string]string, key, env string) string { + if v := config[key]; v != "" { + return v + } + if v := os.Getenv(env); v != "" { + logger.Printf("[DEBUG] Using value of %s for configuration of %s", env, key) + return v + } + return "" +} + +// vSphereClient is a client connection manager for the vSphere provider. +type vSphereClient struct { + // The VIM/govmomi client. + VimClient *govmomi.Client + + // The specialized tags client SDK imported from vmware/vic. + TagsClient *tags.RestClient +} + +// vimURL returns a URL to pass to the VIM SOAP client. +func vimURL(server, user, password string) (*url.URL, error) { + u, err := url.Parse("https://" + server + "/sdk") + if err != nil { + return nil, fmt.Errorf("error parsing url: %s", err) + } + + u.User = url.UserPassword(user, password) + + return u, nil +} + +// newVSphereClient returns a new vSphereClient after setting up the necessary +// connections. +func newVSphereClient(ctx context.Context, host, user, password string, insecure bool) (*vSphereClient, error) { + logger.Println("[DEBUG] Connecting to vSphere client endpoints") + + client := new(vSphereClient) + + u, err := vimURL(host, user, password) + if err != nil { + return nil, fmt.Errorf("error generating SOAP endpoint url: %s", err) + } + + // Set up the VIM/govmomi client connection + client.VimClient, err = newVimSession(ctx, u, insecure) + if err != nil { + return nil, err + } + + client.TagsClient, err = newRestSession(ctx, u, insecure) + if err != nil { + return nil, err + } + + logger.Println("[DEBUG] All vSphere client endpoints connected successfully") + return client, nil +} + +// newVimSession connects the VIM SOAP API client connection. +func newVimSession(ctx context.Context, u *url.URL, insecure bool) (*govmomi.Client, error) { + logger.Printf("[DEBUG] Creating new SOAP API session on endpoint %s", u.Host) + client, err := govmomi.NewClient(ctx, u, insecure) + if err != nil { + return nil, fmt.Errorf("error setting up new vSphere SOAP client: %s", err) + } + + logger.Println("[DEBUG] SOAP API session creation successful") + return client, nil +} + +// newRestSession connects to the vSphere REST API endpoint, necessary for +// tags. +func newRestSession(ctx context.Context, u *url.URL, insecure bool) (*tags.RestClient, error) { + logger.Printf("[DEBUG] Creating new CIS REST API session on endpoint %s", u.Host) + client := tags.NewClient(u, insecure, "") + if err := client.Login(ctx); err != nil { + return nil, fmt.Errorf("error connecting to CIS REST endpoint: %s", err) + } + + logger.Println("[DEBUG] CIS REST API session creation successful") + return client, nil +} + +// Provider defines the vSphere discovery provider. +type Provider struct{} + +// Help implements the Provider interface for the vsphere package. +func (p *Provider) Help() string { + return `VMware vSphere: + + provider: "vsphere" + tag_name: The name of the tag to look up. + category_name: The category of the tag to look up. + host: The host of the vSphere server to connect to. + user: The username to connect as. + password: The password of the user to connect to vSphere as. + insecure_ssl: Whether or not to skip SSL certificate validation. + timeout: Discovery context timeout (default: 10m) +` +} + +// Addrs implements the Provider interface for the vsphere package. +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "vsphere" { + return nil, discoverErr("invalid provider %s", args["provider"]) + } + + setLog(l) + + tagName := args["tag_name"] + categoryName := args["category_name"] + host := valueOrEnv(args, "host", "VSPHERE_SERVER") + user := valueOrEnv(args, "user", "VSPHERE_USER") + password := valueOrEnv(args, "password", "VSPHERE_PASSWORD") + insecure, err := strconv.ParseBool(valueOrEnv(args, "insecure_ssl", "VSPHERE_ALLOW_UNVERIFIED_SSL")) + if err != nil { + logger.Println("[DEBUG] Non-truthy/falsey value for insecure_ssl, assuming false") + } + timeout, err := time.ParseDuration(args["timeout"]) + if err != nil { + logger.Println("[DEBUG] Non-time value given for timeout, assuming 10m") + timeout = time.Minute * 10 + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + client, err := newVSphereClient(ctx, host, user, password, insecure) + if err != nil { + return nil, discoverErr(err.Error()) + } + + if tagName == "" || categoryName == "" { + return nil, discoverErr("both tag_name and category_name must be specified") + } + + logger.Printf("[INFO] Locating all virtual machine IP addresses with tag %q in category %q", tagName, categoryName) + + tagID, err := tagIDFromName(ctx, client.TagsClient, tagName, categoryName) + if err != nil { + return nil, discoverErr(err.Error()) + } + + addrs, err := virtualMachineIPsForTag(ctx, client, tagID) + if err != nil { + return nil, discoverErr(err.Error()) + } + + logger.Printf("[INFO] Final IP address list: %s", strings.Join(addrs, ",")) + return addrs, nil +} + +// tagIDFromName helps convert the tag and category names into the final ID +// used for discovery. +func tagIDFromName(ctx context.Context, client *tags.RestClient, name, category string) (string, error) { + logger.Printf("[DEBUG] Fetching tag ID for tag name %q and category %q", name, category) + + categoryID, err := tagCategoryByName(ctx, client, category) + if err != nil { + return "", err + } + + return tagByName(ctx, client, name, categoryID) +} + +// tagCategoryByName converts a tag category name into its ID. +func tagCategoryByName(ctx context.Context, client *tags.RestClient, name string) (string, error) { + cats, err := client.GetCategoriesByName(ctx, name) + if err != nil { + return "", fmt.Errorf("could not get category for name %q: %s", name, err) + } + + if len(cats) < 1 { + return "", fmt.Errorf("category name %q not found", name) + } + if len(cats) > 1 { + // Although GetCategoriesByName does not seem to think that tag categories + // are unique, empirical observation via the console and API show that they + // are. This error case is handled anyway. + return "", fmt.Errorf("multiple categories with name %q found", name) + } + + return cats[0].ID, nil +} + +// tagByName converts a tag name into its ID. +func tagByName(ctx context.Context, client *tags.RestClient, name, categoryID string) (string, error) { + tids, err := client.GetTagByNameForCategory(ctx, name, categoryID) + if err != nil { + return "", fmt.Errorf("could not get tag for name %q: %s", name, err) + } + + if len(tids) < 1 { + return "", fmt.Errorf("tag name %q not found in category ID %q", name, categoryID) + } + if len(tids) > 1 { + // This situation is very similar to the one in tagCategoryByName. The API + // docs even say that tags need to be unique in categories, yet + // GetTagByNameForCategory still returns multiple results. + return "", fmt.Errorf("multiple tags with name %q found", name) + } + + logger.Printf("[DEBUG] Tag ID is %q", tids[0].ID) + return tids[0].ID, nil +} + +// virtualMachineIPsForTag is a higher-level wrapper that calls out to +// functions to fetch all of the virtual machines matching a certain tag ID, +// and then gets all of the IP addresses for those virtual machines. +func virtualMachineIPsForTag(ctx context.Context, client *vSphereClient, id string) ([]string, error) { + vms, err := virtualMachinesForTag(ctx, client, id) + if err != nil { + return nil, err + } + + return ipAddrsForVirtualMachines(ctx, client, vms) +} + +// virtualMachinesForTag discovers all of the virtual machines that match a +// specific tag ID and returns their higher level helper objects. +func virtualMachinesForTag(ctx context.Context, client *vSphereClient, id string) ([]*object.VirtualMachine, error) { + logger.Printf("[DEBUG] Locating all virtual machines under tag ID %q", id) + + var vms []*object.VirtualMachine + + objs, err := client.TagsClient.ListAttachedObjects(ctx, id) + if err != nil { + return nil, err + } + for i, obj := range objs { + switch { + case obj.Type == nil || obj.ID == nil: + logger.Printf("[WARN] Discovered object at index %d has either no ID or type", i) + continue + case *obj.Type != "VirtualMachine": + logger.Printf("[DEBUG] Discovered object ID %q is not a virutal machine", *obj.ID) + continue + } + vm, err := virtualMachineFromMOID(ctx, client.VimClient, *obj.ID) + if err != nil { + return nil, fmt.Errorf("error locating virtual machine with ID %q: %s", *obj.ID, err) + } + vms = append(vms, vm) + } + + logger.Printf("[DEBUG] Discovered virtual machines: %s", virtualMachineNames(vms)) + return vms, nil +} + +// ipAddrsForVirtualMachines takes a set of virtual machines and returns a +// consolidated list of IP addresses for all of the VMs. +func ipAddrsForVirtualMachines(ctx context.Context, client *vSphereClient, vms []*object.VirtualMachine) ([]string, error) { + var addrs []string + for _, vm := range vms { + as, err := buildAndSelectGuestIPs(ctx, vm) + if err != nil { + return nil, err + } + addrs = append(addrs, as...) + } + return addrs, nil +} + +// virtualMachineFromMOID locates a virtual machine by its managed object +// reference ID. +func virtualMachineFromMOID(ctx context.Context, client *govmomi.Client, id string) (*object.VirtualMachine, error) { + logger.Printf("[DEBUG] Locating VM with managed object ID %q", id) + + finder := find.NewFinder(client.Client, false) + + ref := types.ManagedObjectReference{ + Type: "VirtualMachine", + Value: id, + } + + vm, err := finder.ObjectReference(ctx, ref) + if err != nil { + return nil, err + } + // Should be safe to return here. If our reference returned here and is not a + // VM, then we have bigger problems and to be honest we should be panicking + // anyway. + return vm.(*object.VirtualMachine), nil +} + +// virtualMachineProperties is a convenience method that wraps fetching the +// VirtualMachine MO from its higher-level object. +// +// It takes a list of property keys to fetch. Keeping the property set small +// can sometimes result in significant performance improvements. +func virtualMachineProperties(ctx context.Context, vm *object.VirtualMachine, keys []string) (*mo.VirtualMachine, error) { + logger.Printf("[DEBUG] Fetching properties for VM %q", vm.Name()) + var props mo.VirtualMachine + if err := vm.Properties(ctx, vm.Reference(), keys, &props); err != nil { + return nil, err + } + return &props, nil +} + +// buildAndSelectGuestIPs builds a list of IP addresses known to VMware tools, +// skipping local and auto-configuration addresses. +// +// The builder is non-discriminate and is only deterministic to the order that +// it discovers addresses in VMware tools. +func buildAndSelectGuestIPs(ctx context.Context, vm *object.VirtualMachine) ([]string, error) { + logger.Printf("[DEBUG] Discovering addresses for virtual machine %q", vm.Name()) + var addrs []string + + props, err := virtualMachineProperties(ctx, vm, []string{"guest.net"}) + if err != nil { + return nil, fmt.Errorf("cannot fetch properties for VM %q: %s", vm.Name(), err) + } + + if props.Guest == nil || props.Guest.Net == nil { + logger.Printf("[WARN] No networking stack information available for %q or VMware tools not running", vm.Name()) + return nil, nil + } + + // Now fetch all IP addresses, checking at the same time to see if the IP + // address is eligible to be a primary IP address. + for _, n := range props.Guest.Net { + if n.IpConfig != nil { + for _, addr := range n.IpConfig.IpAddress { + if skipIPAddr(net.ParseIP(addr.IpAddress)) { + continue + } + addrs = append(addrs, addr.IpAddress) + } + } + } + + logger.Printf("[INFO] Discovered IP addresses for virtual machine %q: %s", vm.Name(), strings.Join(addrs, ",")) + return addrs, nil +} + +// skipIPAddr defines the set of criteria that buildAndSelectGuestIPs uses to +// check to see if it needs to skip an IP address. +func skipIPAddr(ip net.IP) bool { + switch { + case ip.IsLinkLocalMulticast(): + fallthrough + case ip.IsLinkLocalUnicast(): + fallthrough + case ip.IsLoopback(): + fallthrough + case ip.IsMulticast(): + return true + } + return false +} + +// virtualMachineNames is a helper method that returns all the names for a list +// of virtual machines, comma separated. +func virtualMachineNames(vms []*object.VirtualMachine) string { + var s []string + for _, vm := range vms { + s = append(s, vm.Name()) + } + return strings.Join(s, ",") +} diff --git a/vendor/github.com/hashicorp/mdns/.gitignore b/vendor/github.com/hashicorp/mdns/.gitignore new file mode 100644 index 000000000000..836562412fe8 --- /dev/null +++ b/vendor/github.com/hashicorp/mdns/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/mdns/LICENSE b/vendor/github.com/hashicorp/mdns/LICENSE new file mode 100644 index 000000000000..a5df10e675dd --- /dev/null +++ b/vendor/github.com/hashicorp/mdns/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/hashicorp/mdns/README.md b/vendor/github.com/hashicorp/mdns/README.md new file mode 100644 index 000000000000..80f3d90ab387 --- /dev/null +++ b/vendor/github.com/hashicorp/mdns/README.md @@ -0,0 +1,37 @@ +mdns +==== + +Simple mDNS client/server library in Golang. mDNS or Multicast DNS can be +used to discover services on the local network without the use of an authoritative +DNS server. This enables peer-to-peer discovery. It is important to note that many +networks restrict the use of multicasting, which prevents mDNS from functioning. +Notably, multicast cannot be used in any sort of cloud, or shared infrastructure +environment. However it works well in most office, home, or private infrastructure +environments. + +Using the library is very simple, here is an example of publishing a service entry: + + // Setup our service export + host, _ := os.Hostname() + info := []string{"My awesome service"}, + service, _ := NewMDNSService(host, "_foobar._tcp", "", "", 8000, nil, info) + + // Create the mDNS server, defer shutdown + server, _ := mdns.NewServer(&mdns.Config{Zone: service}) + defer server.Shutdown() + + +Doing a lookup for service providers is also very simple: + + // Make a channel for results and start listening + entriesCh := make(chan *mdns.ServiceEntry, 4) + go func() { + for entry := range entriesCh { + fmt.Printf("Got new entry: %v\n", entry) + } + }() + + // Start the lookup + mdns.Lookup("_foobar._tcp", entriesCh) + close(entriesCh) + diff --git a/vendor/github.com/hashicorp/mdns/client.go b/vendor/github.com/hashicorp/mdns/client.go new file mode 100644 index 000000000000..62351231aa9a --- /dev/null +++ b/vendor/github.com/hashicorp/mdns/client.go @@ -0,0 +1,365 @@ +package mdns + +import ( + "fmt" + "log" + "net" + "strings" + "sync/atomic" + "time" + + "github.com/miekg/dns" + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// ServiceEntry is returned after we query for a service +type ServiceEntry struct { + Name string + Host string + AddrV4 net.IP + AddrV6 net.IP + Port int + Info string + InfoFields []string + + Addr net.IP // @Deprecated + + hasTXT bool + sent bool +} + +// complete is used to check if we have all the info we need +func (s *ServiceEntry) complete() bool { + return (s.AddrV4 != nil || s.AddrV6 != nil || s.Addr != nil) && s.Port != 0 && s.hasTXT +} + +// QueryParam is used to customize how a Lookup is performed +type QueryParam struct { + Service string // Service to lookup + Domain string // Lookup domain, default "local" + Timeout time.Duration // Lookup timeout, default 1 second + Interface *net.Interface // Multicast interface to use + Entries chan<- *ServiceEntry // Entries Channel + WantUnicastResponse bool // Unicast response desired, as per 5.4 in RFC +} + +// DefaultParams is used to return a default set of QueryParam's +func DefaultParams(service string) *QueryParam { + return &QueryParam{ + Service: service, + Domain: "local", + Timeout: time.Second, + Entries: make(chan *ServiceEntry), + WantUnicastResponse: false, // TODO(reddaly): Change this default. + } +} + +// Query looks up a given service, in a domain, waiting at most +// for a timeout before finishing the query. The results are streamed +// to a channel. Sends will not block, so clients should make sure to +// either read or buffer. +func Query(params *QueryParam) error { + // Create a new client + client, err := newClient() + if err != nil { + return err + } + defer client.Close() + + // Set the multicast interface + if params.Interface != nil { + if err := client.setInterface(params.Interface); err != nil { + return err + } + } + + // Ensure defaults are set + if params.Domain == "" { + params.Domain = "local" + } + if params.Timeout == 0 { + params.Timeout = time.Second + } + + // Run the query + return client.query(params) +} + +// Lookup is the same as Query, however it uses all the default parameters +func Lookup(service string, entries chan<- *ServiceEntry) error { + params := DefaultParams(service) + params.Entries = entries + return Query(params) +} + +// Client provides a query interface that can be used to +// search for service providers using mDNS +type client struct { + ipv4UnicastConn *net.UDPConn + ipv6UnicastConn *net.UDPConn + + ipv4MulticastConn *net.UDPConn + ipv6MulticastConn *net.UDPConn + + closed int32 + closedCh chan struct{} // TODO(reddaly): This doesn't appear to be used. +} + +// NewClient creates a new mdns Client that can be used to query +// for records +func newClient() (*client, error) { + // TODO(reddaly): At least attempt to bind to the port required in the spec. + // Create a IPv4 listener + uconn4, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + log.Printf("[ERR] mdns: Failed to bind to udp4 port: %v", err) + } + uconn6, err := net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0}) + if err != nil { + log.Printf("[ERR] mdns: Failed to bind to udp6 port: %v", err) + } + + if uconn4 == nil && uconn6 == nil { + return nil, fmt.Errorf("failed to bind to any unicast udp port") + } + + mconn4, err := net.ListenMulticastUDP("udp4", nil, ipv4Addr) + if err != nil { + log.Printf("[ERR] mdns: Failed to bind to udp4 port: %v", err) + } + mconn6, err := net.ListenMulticastUDP("udp6", nil, ipv6Addr) + if err != nil { + log.Printf("[ERR] mdns: Failed to bind to udp6 port: %v", err) + } + + if mconn4 == nil && mconn6 == nil { + return nil, fmt.Errorf("failed to bind to any multicast udp port") + } + + c := &client{ + ipv4MulticastConn: mconn4, + ipv6MulticastConn: mconn6, + ipv4UnicastConn: uconn4, + ipv6UnicastConn: uconn6, + closedCh: make(chan struct{}), + } + return c, nil +} + +// Close is used to cleanup the client +func (c *client) Close() error { + if !atomic.CompareAndSwapInt32(&c.closed, 0, 1) { + // something else already closed it + return nil + } + + log.Printf("[INFO] mdns: Closing client %v", *c) + close(c.closedCh) + + if c.ipv4UnicastConn != nil { + c.ipv4UnicastConn.Close() + } + if c.ipv6UnicastConn != nil { + c.ipv6UnicastConn.Close() + } + if c.ipv4MulticastConn != nil { + c.ipv4MulticastConn.Close() + } + if c.ipv6MulticastConn != nil { + c.ipv6MulticastConn.Close() + } + + return nil +} + +// setInterface is used to set the query interface, uses sytem +// default if not provided +func (c *client) setInterface(iface *net.Interface) error { + p := ipv4.NewPacketConn(c.ipv4UnicastConn) + if err := p.SetMulticastInterface(iface); err != nil { + return err + } + p2 := ipv6.NewPacketConn(c.ipv6UnicastConn) + if err := p2.SetMulticastInterface(iface); err != nil { + return err + } + p = ipv4.NewPacketConn(c.ipv4MulticastConn) + if err := p.SetMulticastInterface(iface); err != nil { + return err + } + p2 = ipv6.NewPacketConn(c.ipv6MulticastConn) + if err := p2.SetMulticastInterface(iface); err != nil { + return err + } + return nil +} + +// query is used to perform a lookup and stream results +func (c *client) query(params *QueryParam) error { + // Create the service name + serviceAddr := fmt.Sprintf("%s.%s.", trimDot(params.Service), trimDot(params.Domain)) + + // Start listening for response packets + msgCh := make(chan *dns.Msg, 32) + go c.recv(c.ipv4UnicastConn, msgCh) + go c.recv(c.ipv6UnicastConn, msgCh) + go c.recv(c.ipv4MulticastConn, msgCh) + go c.recv(c.ipv6MulticastConn, msgCh) + + // Send the query + m := new(dns.Msg) + m.SetQuestion(serviceAddr, dns.TypePTR) + // RFC 6762, section 18.12. Repurposing of Top Bit of qclass in Question + // Section + // + // In the Question Section of a Multicast DNS query, the top bit of the qclass + // field is used to indicate that unicast responses are preferred for this + // particular question. (See Section 5.4.) + if params.WantUnicastResponse { + m.Question[0].Qclass |= 1 << 15 + } + m.RecursionDesired = false + if err := c.sendQuery(m); err != nil { + return err + } + + // Map the in-progress responses + inprogress := make(map[string]*ServiceEntry) + + // Listen until we reach the timeout + finish := time.After(params.Timeout) + for { + select { + case resp := <-msgCh: + var inp *ServiceEntry + for _, answer := range append(resp.Answer, resp.Extra...) { + // TODO(reddaly): Check that response corresponds to serviceAddr? + switch rr := answer.(type) { + case *dns.PTR: + // Create new entry for this + inp = ensureName(inprogress, rr.Ptr) + + case *dns.SRV: + // Check for a target mismatch + if rr.Target != rr.Hdr.Name { + alias(inprogress, rr.Hdr.Name, rr.Target) + } + + // Get the port + inp = ensureName(inprogress, rr.Hdr.Name) + inp.Host = rr.Target + inp.Port = int(rr.Port) + + case *dns.TXT: + // Pull out the txt + inp = ensureName(inprogress, rr.Hdr.Name) + inp.Info = strings.Join(rr.Txt, "|") + inp.InfoFields = rr.Txt + inp.hasTXT = true + + case *dns.A: + // Pull out the IP + inp = ensureName(inprogress, rr.Hdr.Name) + inp.Addr = rr.A // @Deprecated + inp.AddrV4 = rr.A + + case *dns.AAAA: + // Pull out the IP + inp = ensureName(inprogress, rr.Hdr.Name) + inp.Addr = rr.AAAA // @Deprecated + inp.AddrV6 = rr.AAAA + } + } + + if inp == nil { + continue + } + + // Check if this entry is complete + if inp.complete() { + if inp.sent { + continue + } + inp.sent = true + select { + case params.Entries <- inp: + default: + } + } else { + // Fire off a node specific query + m := new(dns.Msg) + m.SetQuestion(inp.Name, dns.TypePTR) + m.RecursionDesired = false + if err := c.sendQuery(m); err != nil { + log.Printf("[ERR] mdns: Failed to query instance %s: %v", inp.Name, err) + } + } + case <-finish: + return nil + } + } +} + +// sendQuery is used to multicast a query out +func (c *client) sendQuery(q *dns.Msg) error { + buf, err := q.Pack() + if err != nil { + return err + } + if c.ipv4UnicastConn != nil { + c.ipv4UnicastConn.WriteToUDP(buf, ipv4Addr) + } + if c.ipv6UnicastConn != nil { + c.ipv6UnicastConn.WriteToUDP(buf, ipv6Addr) + } + return nil +} + +// recv is used to receive until we get a shutdown +func (c *client) recv(l *net.UDPConn, msgCh chan *dns.Msg) { + if l == nil { + return + } + buf := make([]byte, 65536) + for atomic.LoadInt32(&c.closed) == 0 { + n, err := l.Read(buf) + + if atomic.LoadInt32(&c.closed) == 1 { + return + } + + if err != nil { + log.Printf("[ERR] mdns: Failed to read packet: %v", err) + continue + } + msg := new(dns.Msg) + if err := msg.Unpack(buf[:n]); err != nil { + log.Printf("[ERR] mdns: Failed to unpack packet: %v", err) + continue + } + select { + case msgCh <- msg: + case <-c.closedCh: + return + } + } +} + +// ensureName is used to ensure the named node is in progress +func ensureName(inprogress map[string]*ServiceEntry, name string) *ServiceEntry { + if inp, ok := inprogress[name]; ok { + return inp + } + inp := &ServiceEntry{ + Name: name, + } + inprogress[name] = inp + return inp +} + +// alias is used to setup an alias between two entries +func alias(inprogress map[string]*ServiceEntry, src, dst string) { + srcEntry := ensureName(inprogress, src) + inprogress[dst] = srcEntry +} diff --git a/vendor/github.com/hashicorp/mdns/go.mod b/vendor/github.com/hashicorp/mdns/go.mod new file mode 100644 index 000000000000..dac6f0b8b78a --- /dev/null +++ b/vendor/github.com/hashicorp/mdns/go.mod @@ -0,0 +1,9 @@ +module github.com/hashicorp/mdns + +require ( + github.com/miekg/dns v1.0.14 + golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 // indirect + golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 // indirect +) diff --git a/vendor/github.com/hashicorp/mdns/go.sum b/vendor/github.com/hashicorp/mdns/go.sum new file mode 100644 index 000000000000..62896aae7a1b --- /dev/null +++ b/vendor/github.com/hashicorp/mdns/go.sum @@ -0,0 +1,10 @@ +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 h1:x6rhz8Y9CjbgQkccRGmELH6K+LJj7tOoh3XWeC1yaQM= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/hashicorp/mdns/server.go b/vendor/github.com/hashicorp/mdns/server.go new file mode 100644 index 000000000000..a33668da413a --- /dev/null +++ b/vendor/github.com/hashicorp/mdns/server.go @@ -0,0 +1,288 @@ +package mdns + +import ( + "fmt" + "log" + "net" + "strings" + "sync/atomic" + + "github.com/miekg/dns" +) + +const ( + ipv4mdns = "224.0.0.251" + ipv6mdns = "ff02::fb" + mdnsPort = 5353 + forceUnicastResponses = false +) + +var ( + ipv4Addr = &net.UDPAddr{ + IP: net.ParseIP(ipv4mdns), + Port: mdnsPort, + } + ipv6Addr = &net.UDPAddr{ + IP: net.ParseIP(ipv6mdns), + Port: mdnsPort, + } +) + +// Config is used to configure the mDNS server +type Config struct { + // Zone must be provided to support responding to queries + Zone Zone + + // Iface if provided binds the multicast listener to the given + // interface. If not provided, the system default multicase interface + // is used. + Iface *net.Interface + + // LogEmptyResponses indicates the server should print an informative message + // when there is an mDNS query for which the server has no response. + LogEmptyResponses bool +} + +// mDNS server is used to listen for mDNS queries and respond if we +// have a matching local record +type Server struct { + config *Config + + ipv4List *net.UDPConn + ipv6List *net.UDPConn + + shutdown int32 + shutdownCh chan struct{} +} + +// NewServer is used to create a new mDNS server from a config +func NewServer(config *Config) (*Server, error) { + // Create the listeners + ipv4List, _ := net.ListenMulticastUDP("udp4", config.Iface, ipv4Addr) + ipv6List, _ := net.ListenMulticastUDP("udp6", config.Iface, ipv6Addr) + + // Check if we have any listener + if ipv4List == nil && ipv6List == nil { + return nil, fmt.Errorf("No multicast listeners could be started") + } + + s := &Server{ + config: config, + ipv4List: ipv4List, + ipv6List: ipv6List, + shutdownCh: make(chan struct{}), + } + + if ipv4List != nil { + go s.recv(s.ipv4List) + } + + if ipv6List != nil { + go s.recv(s.ipv6List) + } + + return s, nil +} + +// Shutdown is used to shutdown the listener +func (s *Server) Shutdown() error { + if !atomic.CompareAndSwapInt32(&s.shutdown, 0, 1) { + // something else already closed us + return nil + } + + close(s.shutdownCh) + + if s.ipv4List != nil { + s.ipv4List.Close() + } + if s.ipv6List != nil { + s.ipv6List.Close() + } + return nil +} + +// recv is a long running routine to receive packets from an interface +func (s *Server) recv(c *net.UDPConn) { + if c == nil { + return + } + buf := make([]byte, 65536) + for atomic.LoadInt32(&s.shutdown) == 0 { + n, from, err := c.ReadFrom(buf) + + if err != nil { + continue + } + if err := s.parsePacket(buf[:n], from); err != nil { + log.Printf("[ERR] mdns: Failed to handle query: %v", err) + } + } +} + +// parsePacket is used to parse an incoming packet +func (s *Server) parsePacket(packet []byte, from net.Addr) error { + var msg dns.Msg + if err := msg.Unpack(packet); err != nil { + log.Printf("[ERR] mdns: Failed to unpack packet: %v", err) + return err + } + return s.handleQuery(&msg, from) +} + +// handleQuery is used to handle an incoming query +func (s *Server) handleQuery(query *dns.Msg, from net.Addr) error { + if query.Opcode != dns.OpcodeQuery { + // "In both multicast query and multicast response messages, the OPCODE MUST + // be zero on transmission (only standard queries are currently supported + // over multicast). Multicast DNS messages received with an OPCODE other + // than zero MUST be silently ignored." Note: OpcodeQuery == 0 + return fmt.Errorf("mdns: received query with non-zero Opcode %v: %v", query.Opcode, *query) + } + if query.Rcode != 0 { + // "In both multicast query and multicast response messages, the Response + // Code MUST be zero on transmission. Multicast DNS messages received with + // non-zero Response Codes MUST be silently ignored." + return fmt.Errorf("mdns: received query with non-zero Rcode %v: %v", query.Rcode, *query) + } + + // TODO(reddaly): Handle "TC (Truncated) Bit": + // In query messages, if the TC bit is set, it means that additional + // Known-Answer records may be following shortly. A responder SHOULD + // record this fact, and wait for those additional Known-Answer records, + // before deciding whether to respond. If the TC bit is clear, it means + // that the querying host has no additional Known Answers. + if query.Truncated { + return fmt.Errorf("[ERR] mdns: support for DNS requests with high truncated bit not implemented: %v", *query) + } + + var unicastAnswer, multicastAnswer []dns.RR + + // Handle each question + for _, q := range query.Question { + mrecs, urecs := s.handleQuestion(q) + multicastAnswer = append(multicastAnswer, mrecs...) + unicastAnswer = append(unicastAnswer, urecs...) + } + + // See section 18 of RFC 6762 for rules about DNS headers. + resp := func(unicast bool) *dns.Msg { + // 18.1: ID (Query Identifier) + // 0 for multicast response, query.Id for unicast response + id := uint16(0) + if unicast { + id = query.Id + } + + var answer []dns.RR + if unicast { + answer = unicastAnswer + } else { + answer = multicastAnswer + } + if len(answer) == 0 { + return nil + } + + return &dns.Msg{ + MsgHdr: dns.MsgHdr{ + Id: id, + + // 18.2: QR (Query/Response) Bit - must be set to 1 in response. + Response: true, + + // 18.3: OPCODE - must be zero in response (OpcodeQuery == 0) + Opcode: dns.OpcodeQuery, + + // 18.4: AA (Authoritative Answer) Bit - must be set to 1 + Authoritative: true, + + // The following fields must all be set to 0: + // 18.5: TC (TRUNCATED) Bit + // 18.6: RD (Recursion Desired) Bit + // 18.7: RA (Recursion Available) Bit + // 18.8: Z (Zero) Bit + // 18.9: AD (Authentic Data) Bit + // 18.10: CD (Checking Disabled) Bit + // 18.11: RCODE (Response Code) + }, + // 18.12 pertains to questions (handled by handleQuestion) + // 18.13 pertains to resource records (handled by handleQuestion) + + // 18.14: Name Compression - responses should be compressed (though see + // caveats in the RFC), so set the Compress bit (part of the dns library + // API, not part of the DNS packet) to true. + Compress: true, + + Answer: answer, + } + } + + if s.config.LogEmptyResponses && len(multicastAnswer) == 0 && len(unicastAnswer) == 0 { + questions := make([]string, len(query.Question)) + for i, q := range query.Question { + questions[i] = q.Name + } + log.Printf("no responses for query with questions: %s", strings.Join(questions, ", ")) + } + + if mresp := resp(false); mresp != nil { + if err := s.sendResponse(mresp, from, false); err != nil { + return fmt.Errorf("mdns: error sending multicast response: %v", err) + } + } + if uresp := resp(true); uresp != nil { + if err := s.sendResponse(uresp, from, true); err != nil { + return fmt.Errorf("mdns: error sending unicast response: %v", err) + } + } + return nil +} + +// handleQuestion is used to handle an incoming question +// +// The response to a question may be transmitted over multicast, unicast, or +// both. The return values are DNS records for each transmission type. +func (s *Server) handleQuestion(q dns.Question) (multicastRecs, unicastRecs []dns.RR) { + records := s.config.Zone.Records(q) + + if len(records) == 0 { + return nil, nil + } + + // Handle unicast and multicast responses. + // TODO(reddaly): The decision about sending over unicast vs. multicast is not + // yet fully compliant with RFC 6762. For example, the unicast bit should be + // ignored if the records in question are close to TTL expiration. For now, + // we just use the unicast bit to make the decision, as per the spec: + // RFC 6762, section 18.12. Repurposing of Top Bit of qclass in Question + // Section + // + // In the Question Section of a Multicast DNS query, the top bit of the + // qclass field is used to indicate that unicast responses are preferred + // for this particular question. (See Section 5.4.) + if q.Qclass&(1<<15) != 0 || forceUnicastResponses { + return nil, records + } + return records, nil +} + +// sendResponse is used to send a response packet +func (s *Server) sendResponse(resp *dns.Msg, from net.Addr, unicast bool) error { + // TODO(reddaly): Respect the unicast argument, and allow sending responses + // over multicast. + buf, err := resp.Pack() + if err != nil { + return err + } + + // Determine the socket to send from + addr := from.(*net.UDPAddr) + if addr.IP.To4() != nil { + _, err = s.ipv4List.WriteToUDP(buf, addr) + return err + } else { + _, err = s.ipv6List.WriteToUDP(buf, addr) + return err + } +} diff --git a/vendor/github.com/hashicorp/mdns/zone.go b/vendor/github.com/hashicorp/mdns/zone.go new file mode 100644 index 000000000000..6f442c7b1019 --- /dev/null +++ b/vendor/github.com/hashicorp/mdns/zone.go @@ -0,0 +1,307 @@ +package mdns + +import ( + "fmt" + "net" + "os" + "strings" + + "github.com/miekg/dns" +) + +const ( + // defaultTTL is the default TTL value in returned DNS records in seconds. + defaultTTL = 120 +) + +// Zone is the interface used to integrate with the server and +// to serve records dynamically +type Zone interface { + // Records returns DNS records in response to a DNS question. + Records(q dns.Question) []dns.RR +} + +// MDNSService is used to export a named service by implementing a Zone +type MDNSService struct { + Instance string // Instance name (e.g. "hostService name") + Service string // Service name (e.g. "_http._tcp.") + Domain string // If blank, assumes "local" + HostName string // Host machine DNS name (e.g. "mymachine.net.") + Port int // Service Port + IPs []net.IP // IP addresses for the service's host + TXT []string // Service TXT records + + serviceAddr string // Fully qualified service address + instanceAddr string // Fully qualified instance address + enumAddr string // _services._dns-sd._udp. +} + +// validateFQDN returns an error if the passed string is not a fully qualified +// hdomain name (more specifically, a hostname). +func validateFQDN(s string) error { + if len(s) == 0 { + return fmt.Errorf("FQDN must not be blank") + } + if s[len(s)-1] != '.' { + return fmt.Errorf("FQDN must end in period: %s", s) + } + // TODO(reddaly): Perform full validation. + + return nil +} + +// NewMDNSService returns a new instance of MDNSService. +// +// If domain, hostName, or ips is set to the zero value, then a default value +// will be inferred from the operating system. +// +// TODO(reddaly): This interface may need to change to account for "unique +// record" conflict rules of the mDNS protocol. Upon startup, the server should +// check to ensure that the instance name does not conflict with other instance +// names, and, if required, select a new name. There may also be conflicting +// hostName A/AAAA records. +func NewMDNSService(instance, service, domain, hostName string, port int, ips []net.IP, txt []string) (*MDNSService, error) { + // Sanity check inputs + if instance == "" { + return nil, fmt.Errorf("missing service instance name") + } + if service == "" { + return nil, fmt.Errorf("missing service name") + } + if port == 0 { + return nil, fmt.Errorf("missing service port") + } + + // Set default domain + if domain == "" { + domain = "local." + } + if err := validateFQDN(domain); err != nil { + return nil, fmt.Errorf("domain %q is not a fully-qualified domain name: %v", domain, err) + } + + // Get host information if no host is specified. + if hostName == "" { + var err error + hostName, err = os.Hostname() + if err != nil { + return nil, fmt.Errorf("could not determine host: %v", err) + } + hostName = fmt.Sprintf("%s.", hostName) + } + if err := validateFQDN(hostName); err != nil { + return nil, fmt.Errorf("hostName %q is not a fully-qualified domain name: %v", hostName, err) + } + + if len(ips) == 0 { + var err error + ips, err = net.LookupIP(hostName) + if err != nil { + // Try appending the host domain suffix and lookup again + // (required for Linux-based hosts) + tmpHostName := fmt.Sprintf("%s%s", hostName, domain) + + ips, err = net.LookupIP(tmpHostName) + + if err != nil { + return nil, fmt.Errorf("could not determine host IP addresses for %s", hostName) + } + } + } + for _, ip := range ips { + if ip.To4() == nil && ip.To16() == nil { + return nil, fmt.Errorf("invalid IP address in IPs list: %v", ip) + } + } + + return &MDNSService{ + Instance: instance, + Service: service, + Domain: domain, + HostName: hostName, + Port: port, + IPs: ips, + TXT: txt, + serviceAddr: fmt.Sprintf("%s.%s.", trimDot(service), trimDot(domain)), + instanceAddr: fmt.Sprintf("%s.%s.%s.", instance, trimDot(service), trimDot(domain)), + enumAddr: fmt.Sprintf("_services._dns-sd._udp.%s.", trimDot(domain)), + }, nil +} + +// trimDot is used to trim the dots from the start or end of a string +func trimDot(s string) string { + return strings.Trim(s, ".") +} + +// Records returns DNS records in response to a DNS question. +func (m *MDNSService) Records(q dns.Question) []dns.RR { + switch q.Name { + case m.enumAddr: + return m.serviceEnum(q) + case m.serviceAddr: + return m.serviceRecords(q) + case m.instanceAddr: + return m.instanceRecords(q) + case m.HostName: + if q.Qtype == dns.TypeA || q.Qtype == dns.TypeAAAA { + return m.instanceRecords(q) + } + fallthrough + default: + return nil + } +} + +func (m *MDNSService) serviceEnum(q dns.Question) []dns.RR { + switch q.Qtype { + case dns.TypeANY: + fallthrough + case dns.TypePTR: + rr := &dns.PTR{ + Hdr: dns.RR_Header{ + Name: q.Name, + Rrtype: dns.TypePTR, + Class: dns.ClassINET, + Ttl: defaultTTL, + }, + Ptr: m.serviceAddr, + } + return []dns.RR{rr} + default: + return nil + } +} + +// serviceRecords is called when the query matches the service name +func (m *MDNSService) serviceRecords(q dns.Question) []dns.RR { + switch q.Qtype { + case dns.TypeANY: + fallthrough + case dns.TypePTR: + // Build a PTR response for the service + rr := &dns.PTR{ + Hdr: dns.RR_Header{ + Name: q.Name, + Rrtype: dns.TypePTR, + Class: dns.ClassINET, + Ttl: defaultTTL, + }, + Ptr: m.instanceAddr, + } + servRec := []dns.RR{rr} + + // Get the instance records + instRecs := m.instanceRecords(dns.Question{ + Name: m.instanceAddr, + Qtype: dns.TypeANY, + }) + + // Return the service record with the instance records + return append(servRec, instRecs...) + default: + return nil + } +} + +// serviceRecords is called when the query matches the instance name +func (m *MDNSService) instanceRecords(q dns.Question) []dns.RR { + switch q.Qtype { + case dns.TypeANY: + // Get the SRV, which includes A and AAAA + recs := m.instanceRecords(dns.Question{ + Name: m.instanceAddr, + Qtype: dns.TypeSRV, + }) + + // Add the TXT record + recs = append(recs, m.instanceRecords(dns.Question{ + Name: m.instanceAddr, + Qtype: dns.TypeTXT, + })...) + return recs + + case dns.TypeA: + var rr []dns.RR + for _, ip := range m.IPs { + if ip4 := ip.To4(); ip4 != nil { + rr = append(rr, &dns.A{ + Hdr: dns.RR_Header{ + Name: m.HostName, + Rrtype: dns.TypeA, + Class: dns.ClassINET, + Ttl: defaultTTL, + }, + A: ip4, + }) + } + } + return rr + + case dns.TypeAAAA: + var rr []dns.RR + for _, ip := range m.IPs { + if ip.To4() != nil { + // TODO(reddaly): IPv4 addresses could be encoded in IPv6 format and + // putinto AAAA records, but the current logic puts ipv4-encodable + // addresses into the A records exclusively. Perhaps this should be + // configurable? + continue + } + + if ip16 := ip.To16(); ip16 != nil { + rr = append(rr, &dns.AAAA{ + Hdr: dns.RR_Header{ + Name: m.HostName, + Rrtype: dns.TypeAAAA, + Class: dns.ClassINET, + Ttl: defaultTTL, + }, + AAAA: ip16, + }) + } + } + return rr + + case dns.TypeSRV: + // Create the SRV Record + srv := &dns.SRV{ + Hdr: dns.RR_Header{ + Name: q.Name, + Rrtype: dns.TypeSRV, + Class: dns.ClassINET, + Ttl: defaultTTL, + }, + Priority: 10, + Weight: 1, + Port: uint16(m.Port), + Target: m.HostName, + } + recs := []dns.RR{srv} + + // Add the A record + recs = append(recs, m.instanceRecords(dns.Question{ + Name: m.instanceAddr, + Qtype: dns.TypeA, + })...) + + // Add the AAAA record + recs = append(recs, m.instanceRecords(dns.Question{ + Name: m.instanceAddr, + Qtype: dns.TypeAAAA, + })...) + return recs + + case dns.TypeTXT: + txt := &dns.TXT{ + Hdr: dns.RR_Header{ + Name: q.Name, + Rrtype: dns.TypeTXT, + Class: dns.ClassINET, + Ttl: defaultTTL, + }, + Txt: m.TXT, + } + return []dns.RR{txt} + } + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/go.mod b/vendor/github.com/hashicorp/vault/api/go.mod index abc93cd9ebd8..c6cee73f22f9 100644 --- a/vendor/github.com/hashicorp/vault/api/go.mod +++ b/vendor/github.com/hashicorp/vault/api/go.mod @@ -12,7 +12,7 @@ require ( github.com/hashicorp/go-retryablehttp v0.6.6 github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/vault/sdk v0.0.0-20201001212527-2e121bafe1e4 + github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 github.com/mitchellh/mapstructure v1.3.2 golang.org/x/net v0.0.0-20200602114024-627f9648deb9 golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go index 1a8aa1176b63..3f7e0a3b86a0 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_raft.go +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -16,6 +16,7 @@ type RaftJoinResponse struct { // RaftJoinRequest represents the parameters consumed by the raft join API type RaftJoinRequest struct { + AutoJoin string `json:"auto_join"` LeaderAPIAddr string `json:"leader_api_addr"` LeaderCACert string `json:"leader_ca_cert"` LeaderClientCert string `json:"leader_client_cert"` diff --git a/vendor/github.com/hashicorp/vic/LICENSE b/vendor/github.com/hashicorp/vic/LICENSE new file mode 100644 index 000000000000..cc9a0c7146f2 --- /dev/null +++ b/vendor/github.com/hashicorp/vic/LICENSE @@ -0,0 +1,24135 @@ +open_source_licenses.txt + +vSphere Integrated Containers Engine 1.5.2 GA + +====================================================================== + +Copyright (c) 2016-2019 VMware, Inc. All rights reserved. + +This product is licensed to you under the Apache License version 2.0 (the License). You may not use this product except in compliance with the License. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +====================================================================== + +vSphere Integrated Containers Engine v1.5.2 GA includes a number of +components with separate copyright notices and license terms. This +product does not necessarily use all the open source components referred +to below. Your use of the source code for these components is subject to +the terms and conditions of the following licenses. + +====================================================================== + +The following copyright statements and licenses apply to various open +source software packages (or portions thereof) that are distributed with +this VMware Product. + +The VMware Product may also include other VMware components, which may +contain additional open source software packages. One or more such +open_source_licenses.txt files may therefore accompany this VMware +Product. + +The VMware Product that includes this file does not necessarily use all +the open source software packages referred to below and may also only +use portions of a given package. + + +=============== TABLE OF CONTENTS ============================= + +The following is a listing of the open source components detailed in +this document. This list is provided for your convenience; please read +further if you wish to review the copyright notice(s) and the full text +of the license associated with each component. + +PART 1. VIRTUAL APPLIANCE: APPLICATION LAYER + + +SECTION 1: Apache License, V2.0 + + >>> appengine-ca59ef35f409df61fa4a5f8290ff289b37eccfb8 + >>> aws_aws-sdk-go_aws-master + >>> coreos_etcd_client-781196fa8746d6c34ace2a62898051af6dc46002 + >>> coreos_etcd_version-781196fa8746d6c34ace2a62898051af6dc46002 + >>> coreos_go-semver_semver-5e3acbb5668c4c3deb4842615c4098eb61fb6b1e + >>> docker_distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc + >>> docker_go-events-aa2e3b613fbbfdddbe055a7b9e3ce271cfd83eca + >>> docker_libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd + >>> docker_libtrust-9cbd2a1374f46905c68a4eb3694a130610adc62a + >>> go-openapi_analysis-d5a75b7d751ca3f11ad5d93cfe97405f2c3f6a47 + >>> go-openapi_errors-fc3f73a224499b047eda7191e5d22e1e9631e86f + >>> go-openapi_jsonpointer-779f45308c19820f1a69e9a4cd965f496e0da10f + >>> go-openapi_jsonreference-36d33bfe519efae5632669801b180bf1a245da3b + >>> go-openapi_loads-6bb6486231e079ea125c0f39994ed3d0c53399ed + >>> go-openapi_runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854 + >>> go-openapi_spec-e072eb2390d3a03736a6eb617d30f48369160447 + >>> go-openapi_strfmt-0cb3db44c13bad3b3f567b762a66751972a310cc + >>> go-openapi_swag-96d7b9ebd181a1735a1c9ac87914f2b32fbf56c9 + >>> go-openapi_validate-035dcd74f1f61e83debe1c22950dc53556e7e4b2 + >>> go_compute_metadata-cd0da878c66091060d2e7403abd62192b3e387e0 + >>> go_internal-cd0da878c66091060d2e7403abd62192b3e387e0 + >>> golang_glog-23def4e6c14b4da8ac2ed8007337bc5eb5007998 + >>> golang_groupcache_lru-72d04f9fcdec7d3821820cc4a6f150eae553639a + >>> govmomi-v0.17.1 + >>> maruel_panicparse_stack-25bcac0d793cf4109483505a0d66e066a3a90a80 + >>> oxtoacart/bpool-4e1c5567d7c2dd59fa4c7c83d34c2f3528b025d6 + >>> syncutil_singleflight-7ce08ca145dbe0e66a127c447b80ee7914f3e4f9 + >>> vdemeester_shakers-24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 + >>> vishvananda_netlink-482f7a52b758233521878cb6c5904b6bd63f3457 + >>> vishvananda_netns-604eaf189ee867d8c147fafc28def2394e878d25 + >>> yaml_v2-4c78c975fe7c825c6d1466c42be594d1d6f3aba6 + + +SECTION 2: BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES + + >>> agl_ed25519-master + >>> airbrake_gobrake_v2-c9d51adc624b5cc4c1bf8de730a09af4878ffe2d + >>> api_compute_v1-dfa61ae24628a06502b9c2805d983b57e89399b5 + >>> api_gensupport-dfa61ae24628a06502b9c2805d983b57e89399b5 + >>> api_googleapi-dfa61ae24628a06502b9c2805d983b57e89399b5 + >>> armon_go-metrics-f303b03b91d770a11a39677f1d3b55da4002bbcb + >>> armon_go-radix-4239b77079c7b5d1243b7b4736304ce8ddb6f0f2 + >>> asaskevich_govalidator-9699ab6b38bee2e02cd3fe8b99ecf67665395c96 + >>> azure_go-ansiterm-70b2c90b260171e829f1ebd7c17f600c11858dbe + >>> boltdb_bolt-b514920f8f2e0a68f857e5a12c774f385a59aef9 + >>> cenkalti_backoff-b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3 + >>> client9_misspell-9a1fc2456ac9e8c9b4cbe9d005b6e7adac0d357f + >>> codegangsta/inject-33e0aa1cb7c019ccc3fbe049a8262a6403d30504 + >>> codegangsta_cli-master + >>> cpuguy83_go-md2man_md2man-a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa + >>> d2g_dhcp4-f0e4d29ff0231dce36e250b2ed9ff08412584bca + >>> deckarep_golang-set-1f0f4ff8d3fbef9328522993ce71c35890f67554 + >>> dustin_go-humanize-259d2a102b871d17f30e3cd9881a642961a1e486 + >>> gemnasium_logrus-airbrake-hook_v2-31e6fd4bd5a98d8ee7673d24bc54ec73c31810dd + >>> gizak_termui-798ffb9cbbe4073ef1f88e6069ca4a2c6aa6676b + >>> go-check_check-20d25e2804050c1cd24a7eea1e7a6447dd0e74ec + >>> go-martini/martini-22fa46961aabd2665cf3f1343b146d20028f5071 + >>> go-nfs-client-42fca177a36031d648551c751de1daf5f55dd4f0 + >>> godbus_dbus-230e4b23db2fd81c53eaa0073f76659d4849ce51 + >>> gogo_protobuf_gogoproto-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + >>> gogo_protobuf_proto-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + >>> gogo_protobuf_protoc-gen-gogo_descriptor-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + >>> gogo_protobuf_sortkeys-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + >>> gogo_protobuf_types-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + >>> goji/param-d7f49fd7d1ed53fd562e737a8b5bbcc7e3d925e2 + >>> golang_lint-cb00e5669539f047b2f4c53a421a01b0c8e172c6 + >>> google_go-github_github-7a51fb928f52a196d5f31daefb8a489453ef54ff + >>> google_go-querystring_query-53e6ce116135b80d037921a7fdd5138cf32d7a8a + >>> google_uuid-f3f4b54b2fabcf1f11dcc939025bb0c109b00ed8 + >>> googleapis_gax-go-da06d194a00e19ce00d9011a13931c3f6f6887c7 + >>> gorilla_context-1c83b3eabd45b6d76072b66b746c20815fb2872d + >>> gorilla_mux-acf3be1b335c8ce30b2c8d51300984666f0ceefa + >>> gorilla_securecookie-fa5329f913702981df43dcb2a380bac429c810b5 + >>> gorilla_sessions-ca9ada44574153444b00d3fd9c8559e4cc95f896 + >>> gorilla_websocket-a91eba7f97777409bc2c443f5534d41dd20c5720 + >>> graylog2_go-gelf_gelf-master + >>> grpc-b1a2821ca5a4fd6b6e48ddfbb7d6d7584d839d21 + >>> hpcloud_tail-a30252cb686a21eb2d0b98132633053ec2f7f1e5 + >>> jessevdk_go-flags-97448c91aac742cbca3d020b3e769013a420a06f + >>> julienschmidt/httprouter-975b5c4c7c21c0e3d2764200bf2aa8e34657ae6e + >>> justinas/alice-1051eaf52fcafdd87ead59d28b065f1fcb8274ec + >>> kr_pty-f7ee69f31298ecbe5d2b349c711e2547a617d398 + >>> mailru_easyjson-99e922cf9de1bc0ab38310c277cff32c2147e747 + >>> martini-contrib/render-ec18f8345a1181146728238980606fb1d6f40e8c + >>> matryer_resync-d39c09a11215c84aab0b65e323fc47dd6e276af1 + >>> mattn_go-runewidth-14207d285c6c197daabb5c9793d63e7af9ab2d50 + >>> mdlayher/arp-7140e8e22da120cb3efc8e1096f299933624024e + >>> mdlayher/ethernet-e72cf8343052938936194daba1914fd1bb33d2fe + >>> mdlayher/raw-c83864e487652c25edc58048eafdc1dfa2f142c7 + >>> microsoft_go-winio-24a3e3d3fc7451805e09d11e11e95d9a0a4f205e + >>> microsoft_hcsshim-d327ca738085de7d617aa1df16d98fe7a64c2455 + >>> miekg_dns-ca336a1f95a6b89be9c250df26c7a41742eb4a6f + >>> mitchellh_go-wordwrap-ad45545899c7b13c020ea92b2072220eefad42b8 + >>> mitchellh_mapstructure-5a0325d7fafaac12dda6e7fb8bd222ec1b69875e + >>> mreiferson_go-httpclient-31f0106b4474f14bc441575c19d3a5fa21aa1f6c + >>> naoina_denco-9af2ba0e24214bac003821f4a501b131b2e04c75 + >>> nlopes_slack-f243c7602fdf906248fc1f165284daa4e4d2d09e + >>> nsf_termbox-go-91bae1bb5fa9ee504905ecbe7043fa30e92feaa3 + >>> opennota_urlesc-5fa9ff0392746aeae1c4b37fcc42c65afa7a9587 + >>> pkg_profile-8a808a6967b79da66deacfe508b26d398a69518f + >>> puerkitobio_purell-d69616f51cdfcd7514d6a380847a152dfc2a749d + >>> racksec_srslog-a974ba6f7fb527d2ddc73ee9c05d3e2ccc0af0dc + >>> rs/cors-8dd4211afb5d08dbb39a533b9bb9e4b486351df6 + >>> ryanuber_go-glob-256dc444b735e061061cf46c809487313d5b0065 + >>> sirupsen_logrus-d26492970760ca5d33129d2d799e34be5c4782eb + >>> smartystreets_assertions-287b4346dc4e71a038c346375a9d572453bc469b + >>> smartystreets_assertions_internal_oglematchers-287b4346dc4e71a038c346375a9d572453bc469b + >>> smartystreets_assertions_internal_oglemock-287b4346dc4e71a038c346375a9d572453bc469b + >>> smartystreets_assertions_internal_ogletest-287b4346dc4e71a038c346375a9d572453bc469b + >>> smartystreets_assertions_internal_reqtrace-287b4346dc4e71a038c346375a9d572453bc469b + >>> stretchr_objx-1a9d0bb9f541897e62256577b352fdbc1fb4fd94 + >>> tomb_v1-dd632973f1e7218eb1089048e0798ec9ae7dceb8 + >>> tylerb/graceful.v1-4654dfbb6ad53cb5e27f37d99b02e16c1872fbbb + >>> tylerb_graceful-d7d7a205e779a4738d38eda0671fff8bfc965f14 + >>> ugorji_go_codec-708a42d246822952f38190a8d8c4e6b16a0e600c + >>> urfave/negroni-ccc4a14984828dcb944e6f17fa4a967b18624f2b + >>> urfave_cli_v1-0bdeddeeb0f650497d603c4ad7b20cfe685682f6 + >>> x_crypto_acme-728b753d0135da6801d45a38e6f43ff55779c5c2 + >>> x_crypto_curve25519-728b753d0135da6801d45a38e6f43ff55779c5c2 + >>> x_crypto_ed25519-728b753d0135da6801d45a38e6f43ff55779c5c2 + >>> x_crypto_nacl_secretbox-728b753d0135da6801d45a38e6f43ff55779c5c2 + >>> x_crypto_poly1305-728b753d0135da6801d45a38e6f43ff55779c5c2 + >>> x_crypto_salsa20_salsa-728b753d0135da6801d45a38e6f43ff55779c5c2 + >>> x_crypto_ssh-728b753d0135da6801d45a38e6f43ff55779c5c2 + >>> x_net_context-a6577fac2d73be281a500b310739095313165611 + >>> x_net_http2-a6577fac2d73be281a500b310739095313165611 + >>> x_net_idna-a6577fac2d73be281a500b310739095313165611 + >>> x_net_internal_timeseries-a6577fac2d73be281a500b310739095313165611 + >>> x_net_lex_httplex-a6577fac2d73be281a500b310739095313165611 + >>> x_net_proxy-a6577fac2d73be281a500b310739095313165611 + >>> x_net_trace-a6577fac2d73be281a500b310739095313165611 + >>> x_net_websocket-a6577fac2d73be281a500b310739095313165611 + >>> x_oauth2-1611bb46e67abc64a71ecc5c3ae67f1cbbc2b921 + >>> x_sync_singleflight-5a06fca2c336a4b2b2fcb45702e8c47621b2aa2c + >>> x_sys_unix-99f16d856c9836c42d24e7ab64ea72916925fa97 + >>> x_sys_windows-99f16d856c9836c42d24e7ab64ea72916925fa97 + >>> x_text_internal-f28f36722d5ef2f9655ad3de1f248e3e52ad5ebd + >>> x_text_language-f28f36722d5ef2f9655ad3de1f248e3e52ad5ebd + >>> x_text_message-f28f36722d5ef2f9655ad3de1f248e3e52ad5ebd + >>> x_tools_go_ast_astutil-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + >>> x_tools_go_buildutil-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + >>> x_tools_go_gcexportdata-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + >>> x_tools_go_gcimporter15-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + >>> x_tools_go_loader-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + >>> x_tools_imports-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + >>> zenazn/goji-c05078ca81941f8e801bba3ddc0e6b86b7fdc893 + + +SECTION 3: Mozilla Public License, V2.0 + + >>> d2g_dhcp4client-master + >>> hashicorp_go-cleanhttp-875fb671b3ddc66f8e2f0acc33829c8cb989a38d + >>> hashicorp_memberlist-cef12ad58224d55cf26caa9e3d239c2fcb3432a2 + + +PART 2. VIRTUAL APPLIANCE: OPERATING SYSTEM LAYER + + +SECTION 1: Academic Free License, V2.1 + + >>> dbus-1.11.12-1.ph2 + + +SECTION 2: Apache License, V2.0 + + >>> photon-release-2.0-2.ph2 + + +SECTION 3: BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES + + >>> bzip2-1.0.6-8.ph2 + >>> curl-7.59.0-4.ph2 + >>> expat-2.2.4-1.ph2 + >>> iputils-20151218-4.ph2 + >>> krb5-1.16-2.ph2 + >>> libarchive-3.3.1-2.ph2 + >>> libcap-2.25-7.ph2 + >>> libdb-5.3.28-1.ph2 + >>> libffi-3.2.1-5.ph2 + >>> libsolv-0.6.26-3.ph2 + >>> libssh2-1.8.0-1.ph2 + >>> libtirpc-1.0.1-8.ph2 + >>> linux-pam-1.3.0-1.ph2 + >>> lsof-4.89-2.ph2 + >>> ncurses-6.0-14.ph2 + >>> openssh-7.5p1-11.ph2 + >>> openssl-1.0.2q-1.ph2 + >>> pcre-8.41-1.ph2 + >>> popt-1.16-5.ph2 + >>> shadow-4.2.1-16.ph2 + >>> sqlite-3.22.0-3.ph2 + >>> sudo-1.8.20p2-5.ph2 + >>> vim-8.0.0533-6.ph2 + >>> xz-5.2.3-2.ph2 + >>> zlib-1.2.11-1.ph2 + + +SECTION 4: GNU General Public License, V2.0 + + >>> e2fsprogs-1.43.4-2.ph2 + >>> gcc-6.3.0-7.ph2 + >>> iproute2-4.10.0-3.ph2 + >>> iptables-1.6.1-4.ph2 + >>> linux-esx-4.9.154-1.ph2 + >>> logrotate-3.11.0-3.ph2 + >>> mingetty-1.08-2.ph2 + >>> net-tools-1.60-11.ph2 + >>> procps-ng-3.3.15-2.ph2 + >>> rpm-4.13.0.2-1.ph2 + >>> tdnf-1.2.3-6.ph2 + >>> util-linux-2.32-1.ph2 + + +SECTION 5: GNU General Public License, V3.0 + + >>> bash-4.4.12-3.ph2 + >>> coreutils-8.27-3.ph2 + >>> elfutils-0.169-3.ph2 + >>> grep-3.0-4.ph2 + >>> gzip-1.8-1.ph2 + >>> haveged-1.9.1-4.ph2 + >>> readline-7.0-2.ph2 + + +SECTION 6: GNU Lesser General Public License, V2.1 + + >>> cracklib-2.9.6-8.ph2 + >>> glibc-2.26-13.ph2 + >>> hawkey-2017.1-6.ph2 + >>> kmod-24-3.ph2 + >>> libgcrypt-1.8.1-2.ph2 + >>> libgpg-error-1.27-1.ph2 + >>> systemd-233-18.ph2 + + +SECTION 7: GNU Lesser General Public License, V3.0 + + >>> gmp-6.1.2-2.ph2 + + +SECTION 8: GNU Library General Public License, V2.0 + + >>> glib-2.52.1-3.ph2 + + +SECTION 9: Mozilla Public License, V2.0 + + >>> ca-certificates-20180919-1.ph2 + >>> nspr-4.15-1.ph2 + >>> nss-3.31.1-1.ph2 + + +APPENDIX. Standard License Files + + >>> Apache License, V2.0 + + >>> GNU General Public License, V2.0 + + >>> GNU General Public License, V3.0 + + >>> GNU Library General Public License, V2.0 + + >>> GNU Lesser General Public License, V2.1 + + >>> GNU Lesser General Public License, V3.0 + + >>> Mozilla Public License, V2.0 + + >>> GNU General Public License, V1.0 + + >>> GNU Free Documentation License V1.1 + + >>> GNU Free Documentation License V1.2 + + >>> GNU Free Documentation License V1.3 + + >>> Artistic License, V1.0 + + >>> Artistic License, V2.0 + + >>> Microsoft Permissive Public License + + >>> Microsoft Limited Public License + + >>> Creative Commons Attribution License, V3.0 + + >>> Creative Commons Attribution 4.0 International + + >>> Creative Commons Attribution-ShareAlike, V4.0 + + >>> Creative Commons Attribution-ShareAlike, V3.0 + + >>> Academic Free License, V2.1 + + >>> SIL Open Font License, V1.1 + + >>> LaTeX Project Public License v1.3c + + >>> Open Software License 1.1 + + >>> Creative Commons Attribution Share Alike 2.0 Generic + + >>> LDP GENERAL PUBLIC LICENSE v1 + + + +====================PART 1. VIRTUAL APPLIANCE: APPLICATION LAYER ==================== + + +--------------- SECTION 1: Apache License, V2.0 ---------- + +Apache License, V2.0 is applicable to the following component(s). + + +>>> appengine-ca59ef35f409df61fa4a5f8290ff289b37eccfb8 + +Copyright 2015 Google Inc. All rights reserved. +Use of this source code is governed by the Apache 2.0 +license that can be found in the LICENSE file. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-Style + +appengine-ca59ef35f409df61fa4a5f8290ff289b37eccfb8.tar.gz\appengine-ca59ef35f409df61fa4a5f8290ff289b37eccfb8.tar\appengine-ca59ef35f409df61fa4a5f8290ff289b37eccfb8\cmd\aefix\typecheck.go + +Copyright 2011 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file. + +>>> aws_aws-sdk-go_aws-master + +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +ADDITIONAL LICENSE INFORMATION: + +>BSD-2 + +aws-sdk-go-master.tar.gz\aws-sdk-go-master.tar\aws-sdk-go-master\awsmigrate\awsmigrate-renamer\vendor\golang.org\x\tools\LICENSE + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>MIT + +aws-sdk-go-master.tar.gz\aws-sdk-go-master.tar\aws-sdk-go-master\doc-src\aws-godoc\templates\jquery.js + +jQuery v1.8.2 jquery.com | jquery.org/license + +>MIT + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE MIT. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +aws-sdk-go-master.tar.gz\aws-sdk-go-master.tar\aws-sdk-go-master\doc-src\aws-godoc\templates\jquery.treeview.js + +http://bassistance.de/jquery-plugins/jquery-plugin-treeview/ +http://docs.jquery.com/Plugins/Treeview + +Copyright (c) 2007 Jörn Zaefferer + +Dual licensed under the MIT and GPL licenses: +http://www.opensource.org/licenses/mit-license.php +http://www.gnu.org/licenses/gpl.html + +>>> coreos_etcd_client-781196fa8746d6c34ace2a62898051af6dc46002 + +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). + +Copyright 2016 The etcd Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +ADDITIONAL LICENSE INFORMATION: + +> BSD + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\client\cancelreq.go + +Copyright 2015 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file. + +> MIT + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\beorn7\perks\LICENSE + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +> MIT-Style + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\davecgh\go-spew\spew\dump.go + +Copyright (c) 2013 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +> BSD-3 Style + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\gogo\protobuf\proto\text_parser.go + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> BSD-2 Clause + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\gogo\protobuf\proto\text_gogo.go + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\prometheus\client_golang\NOTICE + +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 + +> BSD-3 (WITH GOOGLE PATENT) + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\golang.org\x\crypto\LICENSE + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +> LGPL 3.0 (WITH LINKING EXCEPTION) + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\gopkg.in\yaml.v2\LICENSE + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + +>>> coreos_etcd_version-781196fa8746d6c34ace2a62898051af6dc46002 + +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). + +Copyright 2016 The etcd Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +ADDITIONAL LICENSE INFORMATION: + +> BSD + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\client\cancelreq.go + +Copyright 2015 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file. + +> MIT + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\beorn7\perks\LICENSE + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +> MIT-Style + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\davecgh\go-spew\spew\dump.go + +Copyright (c) 2013 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +> BSD-3 Style + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\gogo\protobuf\proto\text_parser.go + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> BSD-2 Clause + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\gogo\protobuf\proto\text_gogo.go + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\github.com\prometheus\client_golang\NOTICE + +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 + +> BSD-3 (WITH GOOGLE PATENT) + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\golang.org\x\crypto\LICENSE + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +> LGPL 3.0 (WITH LINKING EXCEPTION) + +etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar.gz\etcd-781196fa8746d6c34ace2a62898051af6dc46002.tar\etcd-781196fa8746d6c34ace2a62898051af6dc46002\cmd\vendor\gopkg.in\yaml.v2\LICENSE + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + +>>> coreos_go-semver_semver-5e3acbb5668c4c3deb4842615c4098eb61fb6b1e + +Copyright 2013-2015 CoreOS, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> docker_distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc + +Copyright 2012 Gary Burd + +Licensed under the Apache License, Version 2.0 (the "License"): you may +not use this file except in compliance with the License. You may obtain +a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +ADDITIONAL LICENSE INFORMATION: + + +> MIT + + +distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar.gz\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc\registry\storage\driver\s3-aws\s3_v2_signer.go + + +Copyright (c) 2013 Damien Le Berrigaud and Nick Wade + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +> MIT Style + + +distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar.gz\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc\vendor\github.com\bugsnag\osext\LICENSE + + +Copyright (c) 2012 Daniel Theophanes + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + + 3. This notice may not be removed or altered from any source + distribution. + + + +> LGPL 3.0 (With linking exception) + + +distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar.gz\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc\vendor\github.com\docker\goamz\LICENSE + + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + +> BSD 3 Clause + + +distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar.gz\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc\vendor\golang.org\x\crypto\LICENSE + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + +> BSD 2 Clause + + +distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar.gz\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc.tar\distribution-28602af35aceda2f8d571bad7ca37a54cf0250bc\vendor\github.com\gorilla\handlers\LICENSE + + +Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> docker_go-events-aa2e3b613fbbfdddbe055a7b9e3ce271cfd83eca + +License: Apache 2.0 + +>>> docker_libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd + +Copyright 2015 CoreOS, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.orglicensesLICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +ADDITIONAL LICENSE INFORMATION: + +> BSD 3 Clause + +libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar.gz\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd\client\mflag\LICENSE + +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation andor other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> MIT + + +libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar.gz\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd\Godeps\_workspace\src\github.com\armon\go-metrics\LICENSE + +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, andor sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +> WTFPL(MIT Stle) + +libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar.gz\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd\Godeps\_workspace\src\github.com\BurntSushi\toml\COPYING + +DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE +Version 2, December 2004 + +Copyright (C) 2004 Sam Hocevar + +Everyone is permitted to copy and distribute verbatim or modified +copies of this license document, and changing it is allowed as long +as the name is changed. + +DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. You just DO WHAT THE FUCK YOU WANT TO. + +> BSD Style + +libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar.gz\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd\Godeps\_workspace\src\github.com\BurntSushi\toml\type_fields.go + +Copyright 2010 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the Go distribution. + +> CC-Attribution share alike 4.0 + + +libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar.gz\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd\Godeps\_workspace\src\github.com\docker\go-units\README.md + +Copyright and license + +Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code +is released under the Apache 2.0 license. The README.md file, and files in the +"docs" folder are licensed under the Creative Commons Attribution 4.0 +International License under the terms and conditions set forth in the file +"LICENSE.docs". You may obtain a duplicate copy of the same license, titled +CC-BY-SA-4.0, at http:creativecommons.orglicensesby4.0. + +> BSD2 Clause + + +libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar.gz\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd\Godeps\_workspace\src\github.com\godbus\dbus\LICENSE + + +Copyright (c) 2013, Georg Reinke (), Google +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation andor other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> MPL2.0 + + +libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar.gz\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd\Godeps\_workspace\src\github.com\hashicorp\consul\LICENSE + +License: MPL2.0 + + +> CC- Attribution 4.0 + +libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar.gz\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd\README.md + +Copyright and license +Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. + + +> BSD 3 with IP Rights Grant + + +libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar.gz\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd.tar\libnetwork-fd27f22aaa35e3d57f88688f919d05b744f431fd\Godeps\_workspace\src\golang.org\x\net\LICENSE + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>>> docker_libtrust-9cbd2a1374f46905c68a4eb3694a130610adc62a + +License : Apache 2.0 + +>>> go-openapi_analysis-d5a75b7d751ca3f11ad5d93cfe97405f2c3f6a47 + +Copyright 2015 go-swagger maintainers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> go-openapi_errors-fc3f73a224499b047eda7191e5d22e1e9631e86f + +Copyright 2015 go-swagger maintainers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> go-openapi_jsonpointer-779f45308c19820f1a69e9a4cd965f496e0da10f + +Copyright 2013 sigu-399 ( https:github.com/sigu-399 ) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> go-openapi_jsonreference-36d33bfe519efae5632669801b180bf1a245da3b + +Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> go-openapi_loads-6bb6486231e079ea125c0f39994ed3d0c53399ed + +Copyright 2015 go-swagger maintainers + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http:www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +>>> go-openapi_runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854 + +Copyright 2015 go-swagger maintainers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854.tar.gz\runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854.tar\runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854\middleware\denco\LICENSE + +Copyright (c) 2014 Naoya Inada + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +> CC-Attribution 4.0 + +runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854.tar.gz\runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854.tar\runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854\internal\testing\data.go + +License name: Creative Commons 4.0 International, +url: http://creativecommons.org/licenses/by/4.0/ + +> BSD-3 clause + +runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854.tar.gz\runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854.tar\runtime-3b13ebb46790d871d74a6c2450fa4b1280f90854\middleware\negotiate_test.go + +Copyright 2013 The Go Authors. All rights reserved. + +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file or at +https://developers.google.com/open-source/licenses/bsd. + +>>> go-openapi_spec-e072eb2390d3a03736a6eb617d30f48369160447 + +Copyright 2015 go-swagger maintainers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +ADDITIONAL LICENSE INFORMATION: + +> CC-Attribution 4.0 + +spec-e072eb2390d3a03736a6eb617d30f48369160447.tar.gz\spec-e072eb2390d3a03736a6eb617d30f48369160447.tar\spec-e072eb2390d3a03736a6eb617d30f48369160447\fixtures\expansion\invalid-refs.json + +license: +Creative Commons 4.0 International +http://creativecommons.org/licenses/by/4.0/ + +> MIT + +spec-e072eb2390d3a03736a6eb617d30f48369160447.tar.gz\spec-e072eb2390d3a03736a6eb617d30f48369160447.tar\spec-e072eb2390d3a03736a6eb617d30f48369160447\fixtures\expansion\all-the-things.json + +License: MIT + +>>> go-openapi_strfmt-0cb3db44c13bad3b3f567b762a66751972a310cc + +Copyright 2015 go-swagger maintainers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> go-openapi_swag-96d7b9ebd181a1735a1c9ac87914f2b32fbf56c9 + +Copyright 2015 go-swagger maintainers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> go-openapi_validate-035dcd74f1f61e83debe1c22950dc53556e7e4b2 + +Copyright 2015 go-swagger maintainers + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> go_compute_metadata-cd0da878c66091060d2e7403abd62192b3e387e0 + +Copyright 2015 go-swagger maintainers + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http:www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +>>> go_internal-cd0da878c66091060d2e7403abd62192b3e387e0 + +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> golang_glog-23def4e6c14b4da8ac2ed8007337bc5eb5007998 + +Go support for leveled logs, analogous to https:code.google.compgoogle-glog + +Copyright 2013 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.orglicensesLICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> golang_groupcache_lru-72d04f9fcdec7d3821820cc4a6f150eae553639a + +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> govmomi-v0.17.1 + +Copyright (c) 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +ADDITIONAL LICENSE INFORMATION: + +> MIT- Style + +govmomi-0.17.1.tar.gz\govmomi-0.17.1.tar\govmomi-0.17.1\vendor\github.com\davecgh\go-xdr\LICENSE + +Copyright (c) 2012-2014 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +> BSD- 3 Clause + +govmomi-0.17.1.tar.gz\govmomi-0.17.1.tar\govmomi-0.17.1\vim25\xml\LICENSE + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> maruel_panicparse_stack-25bcac0d793cf4109483505a0d66e066a3a90a80 + +Copyright 2015 Marc-Antoine Ruel. All rights reserved. +Use of this source code is governed under the Apache License, Version 2.0 +that can be found in the LICENSE file. + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +panicparse-25bcac0d793cf4109483505a0d66e066a3a90a80.tar.gz\panicparse-25bcac0d793cf4109483505a0d66e066a3a90a80.tar\panicparse-25bcac0d793cf4109483505a0d66e066a3a90a80\vendor\github.com\kr\pretty\License + +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +> BSD-3 Clause + +panicparse-25bcac0d793cf4109483505a0d66e066a3a90a80.tar.gz\panicparse-25bcac0d793cf4109483505a0d66e066a3a90a80.tar\panicparse-25bcac0d793cf4109483505a0d66e066a3a90a80\vendor\github.com\pmezard\go-difflib\LICENSE + +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> oxtoacart/bpool-4e1c5567d7c2dd59fa4c7c83d34c2f3528b025d6 + +Copyright 2014 Percy Wegmann + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> syncutil_singleflight-7ce08ca145dbe0e66a127c447b80ee7914f3e4f9 + +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-Style + +go4-7ce08ca145dbe0e66a127c447b80ee7914f3e4f9.tar.gz\go4-7ce08ca145dbe0e66a127c447b80ee7914f3e4f9.tar\go4-7ce08ca145dbe0e66a127c447b80ee7914f3e4f9\reflectutil\swapper_unsafe_15.go + +Copyright 2016 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file. + +>>> vdemeester_shakers-24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 + +License : Apache 2.0 + +>>> vishvananda_netlink-482f7a52b758233521878cb6c5904b6bd63f3457 + +License: Apache 2.0 + +>>> vishvananda_netns-604eaf189ee867d8c147fafc28def2394e878d25 + +Copyright 2014 Vishvananda Ishaya. +Copyright 2014 Docker, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> yaml_v2-4c78c975fe7c825c6d1466c42be594d1d6f3aba6 + +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +ADDITIONAL LICENSE INFORMATION: + + +> MIT + +yaml-4c78c975fe7c825c6d1466c42be594d1d6f3aba6.tar.gz\yaml-4c78c975fe7c825c6d1466c42be594d1d6f3aba6.tar\yaml-4c78c975fe7c825c6d1466c42be594d1d6f3aba6\LICENSE.libyaml + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------- SECTION 2: BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES ---------- + +BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES are applicable to the following component(s). + + +>>> agl_ed25519-master + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> airbrake_gobrake_v2-c9d51adc624b5cc4c1bf8de730a09af4878ffe2d + +Copyright (c) 2014 The Gobrake Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> api_compute_v1-dfa61ae24628a06502b9c2805d983b57e89399b5 + +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> api_gensupport-dfa61ae24628a06502b9c2805d983b57e89399b5 + +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +> Apache 2.0 + +google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5.tar.gz\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5.tar\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5\examples\pubsub.go + +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +> BSD + +google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5.tar.gz\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5.tar\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5\examples\mapsengine.go + +Copyright 2014 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file. + +> MIT + +google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5.tar.gz\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5.tar\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5\googleapi\internal\uritemplates\LICENSE + +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> api_googleapi-dfa61ae24628a06502b9c2805d983b57e89399b5 + +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5.tar.gz\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5(1).tar\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5\googleapi\internal\uritemplates\LICENSE + +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +> Apache 2.0 + +google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5.tar.gz\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5(1).tar\google-api-go-client-dfa61ae24628a06502b9c2805d983b57e89399b5\examples\compute.go + +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> armon_go-metrics-f303b03b91d770a11a39677f1d3b55da4002bbcb + +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> armon_go-radix-4239b77079c7b5d1243b7b4736304ce8ddb6f0f2 + +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> asaskevich_govalidator-9699ab6b38bee2e02cd3fe8b99ecf67665395c96 + +The MIT License (MIT) + +Copyright (c) 2014 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> azure_go-ansiterm-70b2c90b260171e829f1ebd7c17f600c11858dbe + +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +>>> boltdb_bolt-b514920f8f2e0a68f857e5a12c774f385a59aef9 + +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> cenkalti_backoff-b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3 + +The MIT License (MIT) + +Copyright (c) 2014 Cenk Alti + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> client9_misspell-9a1fc2456ac9e8c9b4cbe9d005b6e7adac0d357f + +The MIT License (MIT) + +Copyright (c) 2015-2016 Nick Galbreath + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +ADDITIONAL LICENSE INFORMATION: + +> BSD-3 Clause + +misspell-9a1fc2456ac9e8c9b4cbe9d005b6e7adac0d357f.tar.gz\misspell-9a1fc2456ac9e8c9b4cbe9d005b6e7adac0d357f.tar\misspell-9a1fc2456ac9e8c9b4cbe9d005b6e7adac0d357f\stringreplacer\LICENSE + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> codegangsta/inject-33e0aa1cb7c019ccc3fbe049a8262a6403d30504 + +The MIT License (MIT) + +Copyright (c) 2013 Jeremy Saenz + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> codegangsta_cli-master + +MIT License + +Copyright (c) 2016 Jeremy Saenz & Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> cpuguy83_go-md2man_md2man-a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa + +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-2 + +go-md2man-a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa.tar.gz\go-md2man-a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa.tar\go-md2man-a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa\vendor\github.com\russross\blackfriday\LICENSE.txt + +Copyright © 2011 Russ Ross +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided with +the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +>>> d2g_dhcp4-f0e4d29ff0231dce36e250b2ed9ff08412584bca + +Copyright (c) 2013 Skagerrak Software Limited. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Skagerrak Software Limited nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> deckarep_golang-set-1f0f4ff8d3fbef9328522993ce71c35890f67554 + +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> dustin_go-humanize-259d2a102b871d17f30e3cd9881a642961a1e486 + +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +>>> gemnasium_logrus-airbrake-hook_v2-31e6fd4bd5a98d8ee7673d24bc54ec73c31810dd + +The MIT License (MIT) + +Copyright (c) 2015 Gemnasium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +>>> gizak_termui-798ffb9cbbe4073ef1f88e6069ca4a2c6aa6676b + +The MIT License (MIT) + +Copyright (c) 2015 Zack Guo + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> go-check_check-20d25e2804050c1cd24a7eea1e7a6447dd0e74ec + +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +> BSD 3 + +check-20d25e2804050c1cd24a7eea1e7a6447dd0e74ec.tar.gz\check-20d25e2804050c1cd24a7eea1e7a6447dd0e74ec.tar\check-20d25e2804050c1cd24a7eea1e7a6447dd0e74ec\benchmark.go + + Copyright (c) 2012 The Go Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> go-martini/martini-22fa46961aabd2665cf3f1343b146d20028f5071 + +The MIT License (MIT) + +Copyright (c) 2015 Jeremy Saenz + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> go-nfs-client-42fca177a36031d648551c751de1daf5f55dd4f0 + +Go-nfs-client version 0.1 + +Copyright © 2017 VMware, Inc. All rights reserved + +The BSD-2 license (the “License”) set forth below applies to all parts of the Go-nfs-client +project. You may not use this file except in compliance with the License. + +BSD-2 License + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: +• Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +• Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +> Apache 2.0 + +go-nfs-client-42fca177a36031d648551c751de1daf5f55dd4f0.tar.gz\go-nfs-client-42fca177a36031d648551c751de1daf5f55dd4f0.tar\go-nfs-client-42fca177a36031d648551c751de1daf5f55dd4f0\nfs\util\log.go + + +Copyright © 2017 VMware, Inc. All Rights Reserved. +SPDX-License-Identifier: BSD-2-Clause + +Copyright 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> godbus_dbus-230e4b23db2fd81c53eaa0073f76659d4849ce51 + +Copyright (c) 2013, Georg Reinke (), Google +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gogo_protobuf_gogoproto-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http:github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https:github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +> BSD 2 + +protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar.gz\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7\vanity\test\proto3.proto + + Protocol Buffers for Go with Gadgets + + Copyright (c) 2015, The GoGo Authors. All rights reserved. + http:github.com/gogo/protobuf + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gogo_protobuf_proto-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-2 + +protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar.gz\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7\plugin\compare\compare.go + +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http:github.com/gogo/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gogo_protobuf_protoc-gen-gogo_descriptor-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http:github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https:github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +> BSD-2 clause + +protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar.gz\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7\codec\codec.go + +Copyright (c) 2015, The GoGo Authors. All rights reserved. +http:github.com/gogo/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gogo_protobuf_sortkeys-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICNSE INFORMATION : + +> BSD-2 + +protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar.gz\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7\codec\codec.go + +Copyright (c) 2015, The GoGo Authors. All rights reserved. +http:github.com/gogo/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gogo_protobuf_types-6a92c871a8f5333cf106b2cdf937567208dbb2b7 + +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-2 + +protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar.gz\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7.tar\protobuf-6a92c871a8f5333cf106b2cdf937567208dbb2b7\plugin\compare\compare.go + +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http:github.com/gogo/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> goji/param-d7f49fd7d1ed53fd562e737a8b5bbcc7e3d925e2 + +Copyright (c) 2014, 2015 Carl Jackson (carl@avtok.com) + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> golang_lint-cb00e5669539f047b2f4c53a421a01b0c8e172c6 + +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> google_go-github_github-7a51fb928f52a196d5f31daefb8a489453ef54ff + +Copyright (c) 2013 The go-github AUTHORS. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------- + +Some documentation is taken from the GitHub Developer site +, which is available under the following Creative +Commons Attribution 3.0 License. This applies only to the go-github source +code and would not apply to any compiled binaries. + +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE +COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY +COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS +AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE +TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY +BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS +CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND +CONDITIONS. + +1. Definitions + + a. "Adaptation" means a work based upon the Work, or upon the Work and + other pre-existing works, such as a translation, adaptation, + derivative work, arrangement of music or other alterations of a + literary or artistic work, or phonogram or performance and includes + cinematographic adaptations or any other form in which the Work may be + recast, transformed, or adapted including in any form recognizably + derived from the original, except that a work that constitutes a + Collection will not be considered an Adaptation for the purpose of + this License. For the avoidance of doubt, where the Work is a musical + work, performance or phonogram, the synchronization of the Work in + timed-relation with a moving image ("synching") will be considered an + Adaptation for the purpose of this License. + b. "Collection" means a collection of literary or artistic works, such as + encyclopedias and anthologies, or performances, phonograms or + broadcasts, or other works or subject matter other than works listed + in Section 1(f) below, which, by reason of the selection and + arrangement of their contents, constitute intellectual creations, in + which the Work is included in its entirety in unmodified form along + with one or more other contributions, each constituting separate and + independent works in themselves, which together are assembled into a + collective whole. A work that constitutes a Collection will not be + considered an Adaptation (as defined above) for the purposes of this + License. + c. "Distribute" means to make available to the public the original and + copies of the Work or Adaptation, as appropriate, through sale or + other transfer of ownership. + d. "Licensor" means the individual, individuals, entity or entities that + offer(s) the Work under the terms of this License. + e. "Original Author" means, in the case of a literary or artistic work, + the individual, individuals, entity or entities who created the Work + or if no individual or entity can be identified, the publisher; and in + addition (i) in the case of a performance the actors, singers, + musicians, dancers, and other persons who act, sing, deliver, declaim, + play in, interpret or otherwise perform literary or artistic works or + expressions of folklore; (ii) in the case of a phonogram the producer + being the person or legal entity who first fixes the sounds of a + performance or other sounds; and, (iii) in the case of broadcasts, the + organization that transmits the broadcast. + f. "Work" means the literary and/or artistic work offered under the terms + of this License including without limitation any production in the + literary, scientific and artistic domain, whatever may be the mode or + form of its expression including digital form, such as a book, + pamphlet and other writing; a lecture, address, sermon or other work + of the same nature; a dramatic or dramatico-musical work; a + choreographic work or entertainment in dumb show; a musical + composition with or without words; a cinematographic work to which are + assimilated works expressed by a process analogous to cinematography; + a work of drawing, painting, architecture, sculpture, engraving or + lithography; a photographic work to which are assimilated works + expressed by a process analogous to photography; a work of applied + art; an illustration, map, plan, sketch or three-dimensional work + relative to geography, topography, architecture or science; a + performance; a broadcast; a phonogram; a compilation of data to the + extent it is protected as a copyrightable work; or a work performed by + a variety or circus performer to the extent it is not otherwise + considered a literary or artistic work. + g. "You" means an individual or entity exercising rights under this + License who has not previously violated the terms of this License with + respect to the Work, or who has received express permission from the + Licensor to exercise rights under this License despite a previous + violation. + h. "Publicly Perform" means to perform public recitations of the Work and + to communicate to the public those public recitations, by any means or + process, including by wire or wireless means or public digital + performances; to make available to the public Works in such a way that + members of the public may access these Works from a place and at a + place individually chosen by them; to perform the Work to the public + by any means or process and the communication to the public of the + performances of the Work, including by public digital performance; to + broadcast and rebroadcast the Work by any means including signs, + sounds or images. + i. "Reproduce" means to make copies of the Work by any means including + without limitation by sound or visual recordings and the right of + fixation and reproducing fixations of the Work, including storage of a + protected performance or phonogram in digital form or other electronic + medium. + +2. Fair Dealing Rights. Nothing in this License is intended to reduce, +limit, or restrict any uses free from copyright or rights arising from +limitations or exceptions that are provided for in connection with the +copyright protection under copyright law or other applicable laws. + +3. License Grant. Subject to the terms and conditions of this License, +Licensor hereby grants You a worldwide, royalty-free, non-exclusive, +perpetual (for the duration of the applicable copyright) license to +exercise the rights in the Work as stated below: + + a. to Reproduce the Work, to incorporate the Work into one or more + Collections, and to Reproduce the Work as incorporated in the + Collections; + b. to create and Reproduce Adaptations provided that any such Adaptation, + including any translation in any medium, takes reasonable steps to + clearly label, demarcate or otherwise identify that changes were made + to the original Work. For example, a translation could be marked "The + original work was translated from English to Spanish," or a + modification could indicate "The original work has been modified."; + c. to Distribute and Publicly Perform the Work including as incorporated + in Collections; and, + d. to Distribute and Publicly Perform Adaptations. + e. For the avoidance of doubt: + + i. Non-waivable Compulsory License Schemes. In those jurisdictions in + which the right to collect royalties through any statutory or + compulsory licensing scheme cannot be waived, the Licensor + reserves the exclusive right to collect such royalties for any + exercise by You of the rights granted under this License; + ii. Waivable Compulsory License Schemes. In those jurisdictions in + which the right to collect royalties through any statutory or + compulsory licensing scheme can be waived, the Licensor waives the + exclusive right to collect such royalties for any exercise by You + of the rights granted under this License; and, + iii. Voluntary License Schemes. The Licensor waives the right to + collect royalties, whether individually or, in the event that the + Licensor is a member of a collecting society that administers + voluntary licensing schemes, via that society, from any exercise + by You of the rights granted under this License. + +The above rights may be exercised in all media and formats whether now +known or hereafter devised. The above rights include the right to make +such modifications as are technically necessary to exercise the rights in +other media and formats. Subject to Section 8(f), all rights not expressly +granted by Licensor are hereby reserved. + +4. Restrictions. The license granted in Section 3 above is expressly made +subject to and limited by the following restrictions: + + a. You may Distribute or Publicly Perform the Work only under the terms + of this License. You must include a copy of, or the Uniform Resource + Identifier (URI) for, this License with every copy of the Work You + Distribute or Publicly Perform. You may not offer or impose any terms + on the Work that restrict the terms of this License or the ability of + the recipient of the Work to exercise the rights granted to that + recipient under the terms of the License. You may not sublicense the + Work. You must keep intact all notices that refer to this License and + to the disclaimer of warranties with every copy of the Work You + Distribute or Publicly Perform. When You Distribute or Publicly + Perform the Work, You may not impose any effective technological + measures on the Work that restrict the ability of a recipient of the + Work from You to exercise the rights granted to that recipient under + the terms of the License. This Section 4(a) applies to the Work as + incorporated in a Collection, but this does not require the Collection + apart from the Work itself to be made subject to the terms of this + License. If You create a Collection, upon notice from any Licensor You + must, to the extent practicable, remove from the Collection any credit + as required by Section 4(b), as requested. If You create an + Adaptation, upon notice from any Licensor You must, to the extent + practicable, remove from the Adaptation any credit as required by + Section 4(b), as requested. + b. If You Distribute, or Publicly Perform the Work or any Adaptations or + Collections, You must, unless a request has been made pursuant to + Section 4(a), keep intact all copyright notices for the Work and + provide, reasonable to the medium or means You are utilizing: (i) the + name of the Original Author (or pseudonym, if applicable) if supplied, + and/or if the Original Author and/or Licensor designate another party + or parties (e.g., a sponsor institute, publishing entity, journal) for + attribution ("Attribution Parties") in Licensor's copyright notice, + terms of service or by other reasonable means, the name of such party + or parties; (ii) the title of the Work if supplied; (iii) to the + extent reasonably practicable, the URI, if any, that Licensor + specifies to be associated with the Work, unless such URI does not + refer to the copyright notice or licensing information for the Work; + and (iv) , consistent with Section 3(b), in the case of an Adaptation, + a credit identifying the use of the Work in the Adaptation (e.g., + "French translation of the Work by Original Author," or "Screenplay + based on original Work by Original Author"). The credit required by + this Section 4 (b) may be implemented in any reasonable manner; + provided, however, that in the case of a Adaptation or Collection, at + a minimum such credit will appear, if a credit for all contributing + authors of the Adaptation or Collection appears, then as part of these + credits and in a manner at least as prominent as the credits for the + other contributing authors. For the avoidance of doubt, You may only + use the credit required by this Section for the purpose of attribution + in the manner set out above and, by exercising Your rights under this + License, You may not implicitly or explicitly assert or imply any + connection with, sponsorship or endorsement by the Original Author, + Licensor and/or Attribution Parties, as appropriate, of You or Your + use of the Work, without the separate, express prior written + permission of the Original Author, Licensor and/or Attribution + Parties. + c. Except as otherwise agreed in writing by the Licensor or as may be + otherwise permitted by applicable law, if You Reproduce, Distribute or + Publicly Perform the Work either by itself or as part of any + Adaptations or Collections, You must not distort, mutilate, modify or + take other derogatory action in relation to the Work which would be + prejudicial to the Original Author's honor or reputation. Licensor + agrees that in those jurisdictions (e.g. Japan), in which any exercise + of the right granted in Section 3(b) of this License (the right to + make Adaptations) would be deemed to be a distortion, mutilation, + modification or other derogatory action prejudicial to the Original + Author's honor and reputation, the Licensor will waive or not assert, + as appropriate, this Section, to the fullest extent permitted by the + applicable national law, to enable You to reasonably exercise Your + right under Section 3(b) of this License (right to make Adaptations) + but not otherwise. + +5. Representations, Warranties and Disclaimer + +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR +OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY +KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, +INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, +FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF +LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, +WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION +OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + +6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE +LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR +ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES +ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS +BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. Termination + + a. This License and the rights granted hereunder will terminate + automatically upon any breach by You of the terms of this License. + Individuals or entities who have received Adaptations or Collections + from You under this License, however, will not have their licenses + terminated provided such individuals or entities remain in full + compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will + survive any termination of this License. + b. Subject to the above terms and conditions, the license granted here is + perpetual (for the duration of the applicable copyright in the Work). + Notwithstanding the above, Licensor reserves the right to release the + Work under different license terms or to stop distributing the Work at + any time; provided, however that any such election will not serve to + withdraw this License (or any other license that has been, or is + required to be, granted under the terms of this License), and this + License will continue in full force and effect unless terminated as + stated above. + +8. Miscellaneous + + a. Each time You Distribute or Publicly Perform the Work or a Collection, + the Licensor offers to the recipient a license to the Work on the same + terms and conditions as the license granted to You under this License. + b. Each time You Distribute or Publicly Perform an Adaptation, Licensor + offers to the recipient a license to the original Work on the same + terms and conditions as the license granted to You under this License. + c. If any provision of this License is invalid or unenforceable under + applicable law, it shall not affect the validity or enforceability of + the remainder of the terms of this License, and without further action + by the parties to this agreement, such provision shall be reformed to + the minimum extent necessary to make such provision valid and + enforceable. + d. No term or provision of this License shall be deemed waived and no + breach consented to unless such waiver or consent shall be in writing + and signed by the party to be charged with such waiver or consent. + e. This License constitutes the entire agreement between the parties with + respect to the Work licensed here. There are no understandings, + agreements or representations with respect to the Work not specified + here. Licensor shall not be bound by any additional provisions that + may appear in any communication from You. This License may not be + modified without the mutual written agreement of the Licensor and You. + f. The rights granted under, and the subject matter referenced, in this + License were drafted utilizing the terminology of the Berne Convention + for the Protection of Literary and Artistic Works (as amended on + September 28, 1979), the Rome Convention of 1961, the WIPO Copyright + Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 + and the Universal Copyright Convention (as revised on July 24, 1971). + These rights and subject matter take effect in the relevant + jurisdiction in which the License terms are sought to be enforced + according to the corresponding provisions of the implementation of + those treaty provisions in the applicable national law. If the + standard suite of rights granted under applicable copyright law + includes additional rights not granted under this License, such + additional rights are deemed to be included in the License; this + License is not intended to restrict the license of any rights under + applicable law. + + +Creative Commons Notice + + Creative Commons is not a party to this License, and makes no warranty + whatsoever in connection with the Work. Creative Commons will not be + liable to You or any party on any legal theory for any damages + whatsoever, including without limitation any general, special, + incidental or consequential damages arising in connection to this + license. Notwithstanding the foregoing two (2) sentences, if Creative + Commons has expressly identified itself as the Licensor hereunder, it + shall have all rights and obligations of Licensor. + + Except for the limited purpose of indicating to the public that the + Work is licensed under the CCPL, Creative Commons does not authorize + the use by either party of the trademark "Creative Commons" or any + related trademark or logo of Creative Commons without the prior + written consent of Creative Commons. Any permitted use will be in + compliance with Creative Commons' then-current trademark usage + guidelines, as may be published on its website or otherwise made + available upon request from time to time. For the avoidance of doubt, + this trademark restriction does not form part of this License. + + Creative Commons may be contacted at http://creativecommons.org/. + +>>> google_go-querystring_query-53e6ce116135b80d037921a7fdd5138cf32d7a8a + +Copyright (c) 2013 Google. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> google_uuid-f3f4b54b2fabcf1f11dcc939025bb0c109b00ed8 + +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD Style + +uuid-f3f4b54b2fabcf1f11dcc939025bb0c109b00ed8.tar.gz\uuid-f3f4b54b2fabcf1f11dcc939025bb0c109b00ed8.tar\uuid-f3f4b54b2fabcf1f11dcc939025bb0c109b00ed8\dce.go + +Copyright 2016 Google Inc. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file. + +>>> googleapis_gax-go-da06d194a00e19ce00d9011a13931c3f6f6887c7 + +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gorilla_context-1c83b3eabd45b6d76072b66b746c20815fb2872d + +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gorilla_mux-acf3be1b335c8ce30b2c8d51300984666f0ceefa + +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gorilla_securecookie-fa5329f913702981df43dcb2a380bac429c810b5 + +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gorilla_sessions-ca9ada44574153444b00d3fd9c8559e4cc95f896 + +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> gorilla_websocket-a91eba7f97777409bc2c443f5534d41dd20c5720 + +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> graylog2_go-gelf_gelf-master + +Copyright 2012 SocialCode + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> grpc-b1a2821ca5a4fd6b6e48ddfbb7d6d7584d839d21 + +Copyright 2014, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the gRPC project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of gRPC, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of gRPC. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of gRPC or any code incorporated within this +implementation of gRPC constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of gRPC +shall terminate as of the date such litigation is filed. + +>>> hpcloud_tail-a30252cb686a21eb2d0b98132633053ec2f7f1e5 + +The MIT License (MIT) + +© Copyright 2015 Hewlett Packard Enterprise Development LP +Copyright (c) 2014 ActiveState + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +ADDITIONAL LICENSE INFORMATION: + +> BSD-3 clause + +tail-a30252cb686a21eb2d0b98132633053ec2f7f1e5.tar.gz\tail-a30252cb686a21eb2d0b98132633053ec2f7f1e5.tar\tail-a30252cb686a21eb2d0b98132633053ec2f7f1e5\vendor\gopkg.in\fsnotify.v1\LICENSE + +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> jessevdk_go-flags-97448c91aac742cbca3d020b3e769013a420a06f + +Copyright (c) 2012 Jesse van den Kieboom. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> julienschmidt/httprouter-975b5c4c7c21c0e3d2764200bf2aa8e34657ae6e + +Copyright (c) 2013 Julien Schmidt. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +The names of the contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL JULIEN SCHMIDT BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> justinas/alice-1051eaf52fcafdd87ead59d28b065f1fcb8274ec + +The MIT License (MIT) + +Copyright (c) 2014 Justinas Stankevicius + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> kr_pty-f7ee69f31298ecbe5d2b349c711e2547a617d398 + +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> mailru_easyjson-99e922cf9de1bc0ab38310c277cff32c2147e747 + +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> martini-contrib/render-ec18f8345a1181146728238980606fb1d6f40e8c + +The MIT License (MIT) + +Copyright (c) 2013 Jeremy Saenz + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> matryer_resync-d39c09a11215c84aab0b65e323fc47dd6e276af1 + +The MIT License (MIT) + +Copyright (c) 2016 Mat Ryer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> mattn_go-runewidth-14207d285c6c197daabb5c9793d63e7af9ab2d50 + +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> mdlayher/arp-7140e8e22da120cb3efc8e1096f299933624024e + +MIT License +=========== + +Copyright (C) 2015 Matt Layher + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +ADDITIONAL LICENSE INFORMATION: + +> BSD-3 Clause + +arp-master.zip\arp-master\client.go + +Copyright (c) 2012 The Go Authors. All rights reserved. +Source code in this file is based on src/net/interface_linux.go, +from the Go standard library. The Go license can be found here: +https://golang.org/LICENSE. + +>>> mdlayher/ethernet-e72cf8343052938936194daba1914fd1bb33d2fe + +MIT License +=========== + +Copyright (C) 2015 Matt Layher + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> mdlayher/raw-c83864e487652c25edc58048eafdc1dfa2f142c7 + +MIT License +=========== + +Copyright (C) 2015 Matt Layher + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +ADDITIONAL LICENSE INFORMATION: + +> BSD- 3 Clause + +raw-master.zip\raw-master\raw.go + +Copyright (c) 2012 The Go Authors. All rights reserved. +Source code in this file is based on src/net/interface_linux.go, +from the Go standard library. The Go license can be found here: +https://golang.org/LICENSE. + +>>> microsoft_go-winio-24a3e3d3fc7451805e09d11e11e95d9a0a4f205e + +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +ADDITIONAL LICENSE INFORMATION: + +> BSD-Style + +go-winio-24a3e3d3fc7451805e09d11e11e95d9a0a4f205e.tar.gz\go-winio-24a3e3d3fc7451805e09d11e11e95d9a0a4f205e.tar\go-winio-24a3e3d3fc7451805e09d11e11e95d9a0a4f205e\archive\tar\LICENSE + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> microsoft_hcsshim-d327ca738085de7d617aa1df16d98fe7a64c2455 + +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> miekg_dns-ca336a1f95a6b89be9c250df26c7a41742eb4a6f + +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> mitchellh_go-wordwrap-ad45545899c7b13c020ea92b2072220eefad42b8 + +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +>>> mitchellh_mapstructure-5a0325d7fafaac12dda6e7fb8bd222ec1b69875e + +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +>>> mreiferson_go-httpclient-31f0106b4474f14bc441575c19d3a5fa21aa1f6c + +The MIT License (MIT) + +Copyright (c) 2012 Matt Reiferson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +>>> naoina_denco-9af2ba0e24214bac003821f4a501b131b2e04c75 + +Copyright (c) 2014 Naoya Inada + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +>>> nlopes_slack-f243c7602fdf906248fc1f165284daa4e4d2d09e + +Copyright (c) 2015, Norberto Lopes +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> nsf_termbox-go-91bae1bb5fa9ee504905ecbe7043fa30e92feaa3 + +Copyright (C) 2012 termbox-go authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +>>> opennota_urlesc-5fa9ff0392746aeae1c4b37fcc42c65afa7a9587 + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> pkg_profile-8a808a6967b79da66deacfe508b26d398a69518f + +Copyright (c) 2013 Dave Cheney. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> puerkitobio_purell-d69616f51cdfcd7514d6a380847a152dfc2a749d + +Copyright (c) 2012, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> racksec_srslog-a974ba6f7fb527d2ddc73ee9c05d3e2ccc0af0dc + +Copyright (c) 2015 Rackspace. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> rs/cors-8dd4211afb5d08dbb39a533b9bb9e4b486351df6 + +Copyright (c) 2014 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +>>> ryanuber_go-glob-256dc444b735e061061cf46c809487313d5b0065 + +The MIT License (MIT) + +Copyright (c) 2014 Ryan Uber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> sirupsen_logrus-d26492970760ca5d33129d2d799e34be5c4782eb + +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +ADDITIONAL LICENSE INFORMATION: + +>BSD – Style + +logrus-master.zip\logrus-master\terminal_notwindows.go + +Copyright 2011 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the LICENSE file. + +>>> smartystreets_assertions-287b4346dc4e71a038c346375a9d572453bc469b + +Copyright (c) 2015 SmartyStreets, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +NOTE: Various optional and subordinate components carry their own licensing +requirements and restrictions. Use of those components is subject to the terms +and conditions outlined the respective license of each component. + +ADDITIONAL LICENSE INFORMATION: + +> Apache 2.0 + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\reqtrace\reqtrace.go + + Copyright 2015 Google Inc. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http:www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +> BSD 3 + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\go-render\LICENSE + + Copyright (c) 2015 The Chromium Authors. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> smartystreets_assertions_internal_oglematchers-287b4346dc4e71a038c346375a9d572453bc469b + +Copyright (c) 2015 SmartyStreets, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +NOTE: Various optional and subordinate components carry their own licensing +requirements and restrictions. Use of those components is subject to the terms +and conditions outlined the respective license of each component. + + +ADDITIONAL LICENSE INFORMATION: + +> Apache 2.0 + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\reqtrace\reqtrace.go + +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +> BSD-3 + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\go-render\LICENSE + +Copyright (c) 2015 The Chromium Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> smartystreets_assertions_internal_oglemock-287b4346dc4e71a038c346375a9d572453bc469b + +Copyright (c) 2015 SmartyStreets, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +NOTE: Various optional and subordinate components carry their own licensing +requirements and restrictions. Use of those components is subject to the terms +and conditions outlined the respective license of each component. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-3 Clause + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\go-render\LICENSE + + +Copyright (c) 2015 The Chromium Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> Apache 2.0 + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\oglematchers\all_of.go + +Copyright 2011 Aaron Jacobs. All Rights Reserved. +Author: aaronjjacobs@gmail.com (Aaron Jacobs) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> smartystreets_assertions_internal_ogletest-287b4346dc4e71a038c346375a9d572453bc469b + +Copyright (c) 2015 SmartyStreets, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +NOTE: Various optional and subordinate components carry their own licensing +requirements and restrictions. Use of those components is subject to the terms +and conditions outlined the respective license of each component. + + +ADDITIONAL LICENSE INFORMATION: + +> Apache 2.0 + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\reqtrace\reqtrace.go + +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +> BSD-3 + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\go-render\LICENSE + +Copyright (c) 2015 The Chromium Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> smartystreets_assertions_internal_reqtrace-287b4346dc4e71a038c346375a9d572453bc469b + +Copyright (c) 2015 SmartyStreets, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +NOTE: Various optional and subordinate components carry their own licensing +requirements and restrictions. Use of those components is subject to the terms +and conditions outlined the respective license of each component. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-3 + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\go-render\LICENSE + +Copyright (c) 2015 The Chromium Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +> Apache 2.0 + +assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar.gz\assertions-287b4346dc4e71a038c346375a9d572453bc469b.tar\assertions-287b4346dc4e71a038c346375a9d572453bc469b\internal\oglematchers\transform_description.go + +Copyright 2011 Aaron Jacobs. All Rights Reserved. +Author: aaronjjacobs@gmail.com (Aaron Jacobs) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> stretchr_objx-1a9d0bb9f541897e62256577b352fdbc1fb4fd94 + +objx - by Mat Ryer and Tyler Bunnell + +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> tomb_v1-dd632973f1e7218eb1089048e0798ec9ae7dceb8 + +tomb - support for clean goroutine termination in Go. + +Copyright (c) 2010-2011 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> tylerb/graceful.v1-4654dfbb6ad53cb5e27f37d99b02e16c1872fbbb + +The MIT License (MIT) + +Copyright (c) 2014 Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +ADDITIONAL LICENSE INFORMATION: + +> Apache 2.0 + +graceful-1.2.15.zip\graceful-1.2.15\limit_listen.go + + +Copyright 2013 The etcd Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> tylerb_graceful-d7d7a205e779a4738d38eda0671fff8bfc965f14 + +The MIT License (MIT) + +Copyright (c) 2014 Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +ADDITIONAL LICENSE INFORMATION: + + + +> Apache 2.0 + + +graceful-d7d7a205e779a4738d38eda0671fff8bfc965f14.tar.gz\graceful-d7d7a205e779a4738d38eda0671fff8bfc965f14.tar\graceful-d7d7a205e779a4738d38eda0671fff8bfc965f14\limit_listen.go + + +Copyright 2013 The etcd Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http:www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>>> ugorji_go_codec-708a42d246822952f38190a8d8c4e6b16a0e600c + +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> urfave/negroni-ccc4a14984828dcb944e6f17fa4a967b18624f2b + +The MIT License (MIT) + +Copyright (c) 2014 Jeremy Saenz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> urfave_cli_v1-0bdeddeeb0f650497d603c4ad7b20cfe685682f6 + +MIT License + +Copyright (c) 2016 Jeremy Saenz & Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +>>> x_crypto_acme-728b753d0135da6801d45a38e6f43ff55779c5c2 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\pkcs12\internal\rc2\rc2.go + +This code is licensed under the MIT license. + +>>> x_crypto_curve25519-728b753d0135da6801d45a38e6f43ff55779c5c2 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + +ADDITIONAL LICENSE INFORMATION: + +> Public Domain + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\curve25519\square_amd64.s + +This code was translated into a form compatible with 6a from the public +domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + + +> MIT + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\pkcs12\internal\rc2\rc2.go + +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. + +>>> x_crypto_ed25519-728b753d0135da6801d45a38e6f43ff55779c5c2 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\pkcs12\internal\rc2\rc2.go + +This code is licensed under the MIT license. + + +> PUBLIC DOMAIN + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\twofish\twofish.go + +This code is a port of the LibTom C implementation. +See http://libtom.org/?page=features&newsitems=5&whatfile=crypt. +LibTomCrypt is free for all purposes under the public domain. +It was heavily inspired by the go blowfish package. + +>>> x_crypto_nacl_secretbox-728b753d0135da6801d45a38e6f43ff55779c5c2 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\pkcs12\internal\rc2\rc2.go + +This code is licensed under the MIT license. + +>>> x_crypto_poly1305-728b753d0135da6801d45a38e6f43ff55779c5c2 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + +ADDITIONAL LICENSE INFORMATION: + +> Public Domain + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\curve25519\square_amd64.s + +This code was translated into a form compatible with 6a from the public +domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + + +> MIT + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\pkcs12\internal\rc2\rc2.go + +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. + +>>> x_crypto_salsa20_salsa-728b753d0135da6801d45a38e6f43ff55779c5c2 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + + +ADDITIONAL LICENSE INFORMATION: + + +> Public Domain + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\ed25519\internal\edwards25519\const.go + + +These values are from the public domain + +>>> x_crypto_ssh-728b753d0135da6801d45a38e6f43ff55779c5c2 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + +ADDITIONAL LICENSE INFORMATION: + +> Public Domain + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\curve25519\square_amd64.s + +This code was translated into a form compatible with 6a from the public +domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + + +> MIT + +crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar.gz\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2.tar\crypto-728b753d0135da6801d45a38e6f43ff55779c5c2\pkcs12\internal\rc2\rc2.go + +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. + +>>> x_net_context-a6577fac2d73be281a500b310739095313165611 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + + +ADDITIONAL LICENSE INFORMATION: + + +> BSD 2 Clause + +net-a6577fac2d73be281a500b310739095313165611.tar.gz\net-a6577fac2d73be281a500b310739095313165611.tar\net-a6577fac2d73be281a500b310739095313165611\html\testdata\webkit\README + + +The *.dat files in this directory are copied from The WebKit Open Source +Project, specifically $WEBKITROOT/LayoutTests/html5lib/resources. +WebKit is licensed under a BSD style license. +http://webkit.org/coding/bsd-license.html says: + +Copyright (C) 2009 Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> x_net_http2-a6577fac2d73be281a500b310739095313165611 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + + +ADDITIONAL LICENSE INFORMATION: + +> BSD2 + +net-a6577fac2d73be281a500b310739095313165611.tar.gz\net-a6577fac2d73be281a500b310739095313165611.tar\net-a6577fac2d73be281a500b310739095313165611\html\testdata\webkit\README + +Copyright (C) 2009 Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> x_net_idna-a6577fac2d73be281a500b310739095313165611 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +ADDITIONAL LICENSE INFORMATION: + +> W3C 3-clause BSD License + +net-a6577fac2d73be281a500b310739095313165611.tar.gz\net-a6577fac2d73be281a500b310739095313165611.tar\net-a6577fac2d73be281a500b310739095313165611\html\charset\testdata\README + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE W3C 3-clause BSD License. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +These test cases come from +http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics + +Distributed under both the W3C Test Suite License +(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license) +and the W3C 3-clause BSD License +(http://www.w3.org/Consortium/Legal/2008/03-bsd-license). +To contribute to a W3C Test Suite, see the policies and contribution +forms (http://www.w3.org/2004/10/27-testcases). + +> BSD-2 clause + +net-a6577fac2d73be281a500b310739095313165611.tar.gz\net-a6577fac2d73be281a500b310739095313165611.tar\net-a6577fac2d73be281a500b310739095313165611\html\testdata\webkit\README + +The *.dat files in this directory are copied from The WebKit Open Source +Project, specifically $WEBKITROOT/LayoutTests/html5lib/resources. +WebKit is licensed under a BSD style license. +http://webkit.org/coding/bsd-license.html says: + +Copyright (C) 2009 Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> x_net_internal_timeseries-a6577fac2d73be281a500b310739095313165611 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-2 + +net-a6577fac2d73be281a500b310739095313165611.tar.gz\net-a6577fac2d73be281a500b310739095313165611.tar\net-a6577fac2d73be281a500b310739095313165611\html\testdata\webkit\README + +The *.dat files in this directory are copied from The WebKit Open Source +Project, specifically $WEBKITROOT/LayoutTests/html5lib/resources. +WebKit is licensed under a BSD style license. +http://webkit.org/coding/bsd-license.html says: + +Copyright (C) 2009 Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> x_net_lex_httplex-a6577fac2d73be281a500b310739095313165611 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>>> x_net_proxy-a6577fac2d73be281a500b310739095313165611 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-2 + +net-a6577fac2d73be281a500b310739095313165611.tar.gz\net-a6577fac2d73be281a500b310739095313165611.tar\net-a6577fac2d73be281a500b310739095313165611\html\testdata\webkit\README + +The *.dat files in this directory are copied from The WebKit Open Source +Project, specifically $WEBKITROOT/LayoutTests/html5lib/resources. +WebKit is licensed under a BSD style license. +http://webkit.org/coding/bsd-license.html says: + +Copyright (C) 2009 Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> x_net_trace-a6577fac2d73be281a500b310739095313165611 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>>> x_net_websocket-a6577fac2d73be281a500b310739095313165611 + +License: BSD-3 clause with Google Patents + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +ADDITIONAL LICENSE INFORMATION: + +> BSD-2 clause + +net-a6577fac2d73be281a500b310739095313165611.tar.gz\net-a6577fac2d73be281a500b310739095313165611.tar\net-a6577fac2d73be281a500b310739095313165611\html\testdata\webkit\README + +Copyright (C) 2009 Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> x_oauth2-1611bb46e67abc64a71ecc5c3ae67f1cbbc2b921 + +Copyright (c) 2009 The oauth2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>>> x_sync_singleflight-5a06fca2c336a4b2b2fcb45702e8c47621b2aa2c + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>>> x_sys_unix-99f16d856c9836c42d24e7ab64ea72916925fa97 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>>> x_sys_windows-99f16d856c9836c42d24e7ab64ea72916925fa97 + +License: BSD-3 clause with Google Patents + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>>> x_text_internal-f28f36722d5ef2f9655ad3de1f248e3e52ad5ebd + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>>> x_text_language-f28f36722d5ef2f9655ad3de1f248e3e52ad5ebd + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>>> x_text_message-f28f36722d5ef2f9655ad3de1f248e3e52ad5ebd + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +>>> x_tools_go_ast_astutil-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\cmd\goyacc\yacc.go + + Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. + Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) + Portions Copyright © 1997-1999 Vita Nuova Limited + Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) + Portions Copyright © 2004,2006 Bruce Ellis + Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) + Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others + Portions Copyright © 2009 The Go Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +> MIT + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\godoc\static\jquery.treeview.js + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE MIT LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright (c) 2007 Jörn Zaefferer + +Dual licensed under the MIT and GPL licenses: +http://www.opensource.org/licenses/mit-license.php +http://www.gnu.org/licenses/gpl.html + + +> Apache 2.0 + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\third_party\typescript\typescript.js + +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. + +>>> x_tools_go_buildutil-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + +License: BSD-3 clause with Google Patents + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\third_party\moduleloader\ + +Copyright (c) 2013-2016 Guy Bedford, Luke Hoban, Addy Osmani + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +> Apache 2.0 + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\third_party\typescript\typescript.js + +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. + +>>> x_tools_go_gcexportdata-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\cmd\goyacc\yacc.go + + Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. + Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) + Portions Copyright © 1997-1999 Vita Nuova Limited + Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) + Portions Copyright © 2004,2006 Bruce Ellis + Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) + Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others + Portions Copyright © 2009 The Go Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +> MIT + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\godoc\static\jquery.treeview.js + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE MIT LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright (c) 2007 Jörn Zaefferer + +Dual licensed under the MIT and GPL licenses: +http://www.opensource.org/licenses/mit-license.php +http://www.gnu.org/licenses/gpl.html + + +> Apache 2.0 + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\third_party\typescript\typescript.js + +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. + +>>> x_tools_go_gcimporter15-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +* Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + +> MIT + + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\cmd\goyacc\yacc.go + + +Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +Portions Copyright © 1997-1999 Vita Nuova Limited +Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +Portions Copyright © 2004,2006 Bruce Ellis +Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +Portions Copyright © 2009 The Go Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +> CC Attribution 3.0 + + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\cmd\present\templates\dir.tmpl + + +Except as noted, +the content of this page is licensed under the +Creative Commons Attribution 3.0 License, +and code is licensed under a BSD license.
    +Terms of Service | +Privacy Policy + + + +> BSD + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\cmd\present\templates\dir.tmpl + +code is licensed under a BSD license.
    +Terms of Service | +Privacy Policy + + + +> Apache 2.0 + + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\godoc\short\short.go + + +Copyright 2015 The Go Authors. All rights reserved. +Use of this source code is governed by the Apache 2.0 +license that can be found in the LICENSE file. + ++build appengine + +Package short implements a simple URL shortener, serving an administrative +interface at /s and shortened urls from /s/key. +It is designed to run only on the instance of godoc that serves golang.org. +package short + + +> MIT + + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\godoc\static\jquery.treeview.js + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE MIT LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + + +Copyright (c) 2007 Jörn Zaefferer + +Dual licensed under the MIT and GPL licenses: +http://www.opensource.org/licenses/mit-license.php +http://www.gnu.org/licenses/gpl.html + +>>> x_tools_go_loader-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + + + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\cmd\goyacc\yacc.go + + Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. + Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) + Portions Copyright © 1997-1999 Vita Nuova Limited + Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) + Portions Copyright © 2004,2006 Bruce Ellis + Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) + Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others + Portions Copyright © 2009 The Go Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +> MIT + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\godoc\static\jquery.treeview.js + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE MIT LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright (c) 2007 Jörn Zaefferer + +Dual licensed under the MIT and GPL licenses: +http://www.opensource.org/licenses/mit-license.php +http://www.gnu.org/licenses/gpl.html + + +> Apache 2.0 + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\third_party\typescript\typescript.js + +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. + +>>> x_tools_imports-381149a2d6e5d8f319ccf04bfefc71e03a78b868 + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +ADDITIONAL LICENSE INFORMATION: + + +> Apache 2.0 + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\third_party\typescript\typescript.js + +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +MERCHANTABLITY OR NON-INFRINGEMENT. + +See the Apache Version 2.0 License for specific language governing permissions +and limitations under the License. + +> MIT + +tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar.gz\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868.tar\tools-381149a2d6e5d8f319ccf04bfefc71e03a78b868\third_party\moduleloader\LICENSE + +Copyright (c) 2013-2016 Guy Bedford, Luke Hoban, Addy Osmani + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +>>> zenazn/goji-c05078ca81941f8e801bba3ddc0e6b86b7fdc893 + +Copyright (c) 2014, 2015, 2016 Carl Jackson (carl@avtok.com) + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +--------------- SECTION 3: Mozilla Public License, V2.0 ---------- + +Mozilla Public License, V2.0 is applicable to the following component(s). + + +>>> d2g_dhcp4client-master + +LICENSE: MPL 2.0 + +>>> hashicorp_go-cleanhttp-875fb671b3ddc66f8e2f0acc33829c8cb989a38d + +LICENSE: MPL 2.0 + +>>> hashicorp_memberlist-cef12ad58224d55cf26caa9e3d239c2fcb3432a2 + +License: MPL 2.0 + +====================PART 2. VIRTUAL APPLIANCE: OPERATING SYSTEM LAYER ==================== + + +--------------- SECTION 1: Academic Free License, V2.1 ---------- + +Academic Free License, V2.1 is applicable to the following component(s). + + +>>> dbus-1.11.12-1.ph2 + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE AFL 2.1 LICENSE. PLEASE SEE APPENDIX FOR THE FULL TEXT OF THE AFL 2.1 LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +D-Bus is licensed to you under your choice of the Academic Free +License version 2.1, or the GNU General Public License version 2 +(or, at your option any later version). + +Both licenses are included here. Some of the standalone binaries are +under the GPL only; in particular, but not limited to, +tools/dbus-cleanup-sockets.c and test/decode-gcov.c. Each source code +file is marked with the proper copyright information - if you find a +file that isn't marked please bring it to our attention. + + +ADDITIONAL LICENSE INFORMATION: + +> BSD-3 + +dbus-1.11.12.tar.gz\dbus-1.11.12.tar\dbus-1.11.12\cmake\modules\COPYING-CMAKE-SCRIPTS + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +> Sun Microsystems License (MIT-Style) + +dbus-1.11.12.tar.gz\dbus-1.11.12.tar\dbus-1.11.12\dbus\dbus-hash.c + +Copyright (c) 1991-1993 The Regents of the University of California. +Copyright (c) 1994 Sun Microsystems, Inc. + +This software is copyrighted by the Regents of the University of +California, Sun Microsystems, Inc., Scriptics Corporation, and +other parties. The following terms apply to all files associated +with the software unless explicitly disclaimed in individual files. + +The authors hereby grant permission to use, copy, modify, +distribute, and license this software and its documentation for any +purpose, provided that existing copyright notices are retained in +all copies and that this notice is included verbatim in any +distributions. No written agreement, license, or royalty fee is +required for any of the authorized uses. Modifications to this +software may be copyrighted by their authors and need not follow +the licensing terms described here, provided that the new terms are +clearly indicated on the first page of each file where they apply. + +IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY +PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, +OR ANY DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND +NON-INFRINGEMENT. THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +AND THE AUTHORS AND DISTRIBUTORS HAVE NO OBLIGATION TO PROVIDE +MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + +GOVERNMENT USE: If you are acquiring this software on behalf of the +U.S. government, the Government shall have only "Restricted Rights" +in the software and related documentation as defined in the Federal +Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you +are acquiring the software on behalf of the Department of Defense, +the software shall be classified as "Commercial Computer Software" +and the Government shall have only "Restricted Rights" as defined +in Clause 252.227-7013 (c) (1) of DFARs. Notwithstanding the +foregoing, the authors grant the U.S. Government and others acting +in its behalf permission to use and distribute the software in +accordance with the terms specified in this license. + + +> MIT + +dbus-1.11.12.tar.gz\dbus-1.11.12.tar\dbus-1.11.12\dbus\dbus-server-launchd.c + +Copyright (C) 2007, Tanner Lovelace +Copyright (C) 2008, Colin Walters +Copyright (C) 2008-2009, Benjamin Reed +Copyright (C) 2009, Jonas Bähr + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + + +> GPL 2.0 + +dbus-1.11.12.tar.gz\dbus-1.11.12.tar\dbus-1.11.12\doc\introspect.dtd + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE GPL 2.0 LICENSE. PLEASE SEE APPENDIX FOR THE FULL TEXT OF THE GPL 2.0 LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +(C) 2005-02-02 David A. Wheeler; released under the D-Bus licenses, +GNU GPL version 2 (or greater) and AFL 1.1 (or greater) + + +> LGPL 3.0 + +dbus-1.11.12.tar.gz\dbus-1.11.12.tar\dbus-1.11.12\tools\dbus-cleanup-sockets.c + +Copyright (C) 2003 Red Hat, Inc. +Copyright (C) 2002 Michael Meeks + +Note that this file is NOT licensed under the Academic Free License, +as it is based on linc-cleanup-sockets which is LGPL. + + +> GPL 2.0 + +dbus-1.11.12.tar.gz\dbus-1.11.12.tar\dbus-1.11.12\tools\dbus-echo.c + +Copyright © 2003 Philip Blundell +Copyright © 2011 Nokia Corporation +Copyright © 2014 Collabora Ltd. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +--------------- SECTION 2: Apache License, V2.0 ---------- + +Apache License, V2.0 is applicable to the following component(s). + + +>>> photon-release-2.0-2.ph2 + +License: Apache License + +--------------- SECTION 3: BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES ---------- + +BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES are applicable to the following component(s). + + +>>> bzip2-1.0.6-8.ph2 + +This program, "bzip2", the associated library "libbzip2", and all +documentation, are copyright (C) 1996-2010 Julian R Seward. All +rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. The origin of this software must not be misrepresented; you must +not claim that you wrote the original software. If you use this +software in a product, an acknowledgment in the product +documentation would be appreciated but is not required. + +3. Altered source versions must be plainly marked as such, and must +not be misrepresented as being the original software. + +4. The name of the author may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Julian Seward, jseward@bzip.org +bzip2/libbzip2 version 1.0.6 of 6 September 2010 + +ADDITIONAL LICENSE INFORMATION : + +> GPL 3.0 + +bzip2_1.0.6-1.debian.tar.bz2\bzip2_1.0.6-1.debian.tar\debian\copyright.in + +This package (bzip2) was created by Philippe Troin . +It is currently mantained by Anibal Monsalve Salazar . +This package is Copyright (C) 1999, 2000, 2001, 2002 Philippe Troin + and Copyright (C) 2004-2007 Anibal Monsalve Salazar. +It is licensed under the GNU General Public License which can be +found in /usr/share/common-licenses/GPL. + +It was downloaded from +http://www.bzip.org/downloads.html + +For more information about bzip2, please visit: +http://www.bzip.org/ + +>>> curl-7.59.0-4.ph2 + +COPYRIGHT AND PERMISSION NOTICE + +Copyright (c) 1996 - 2018, Daniel Stenberg, , and many +contributors, see the THANKS file. + +All rights reserved. + +Permission to use, copy, modify, and distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright +notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN +NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the name of a copyright holder shall not +be used in advertising or otherwise to promote the sale, use or other dealings +in this Software without prior written authorization of the copyright holder. + +ADDITIONAL LICENSE INFORMATION: + +> Openssl + +curl-7.59.0-4.ph2.src.rpm\curl-7.59.0-4.ph2.src.cpio\curl-7.59.0.tar.gz\curl-7.59.0.tar\curl-7.59.0\docs\examples\curlx.c + +Copyright (c) 2003 The OpenEvidence Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions, the following disclaimer, +and the original OpenSSL and SSLeay Licences below. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions, the following disclaimer +and the original OpenSSL and SSLeay Licences below in +the documentation and/or other materials provided with the +distribution. + +3. All advertising materials mentioning features or use of this +software must display the following acknowledgments: +"This product includes software developed by the Openevidence Project +for use in the OpenEvidence Toolkit. (http://www.openevidence.org/)" +This product includes software developed by the OpenSSL Project +for use in the OpenSSL Toolkit (https://www.openssl.org/)" +This product includes cryptographic software written by Eric Young +(eay@cryptsoft.com). This product includes software written by Tim +Hudson (tjh@cryptsoft.com)." + +4. The names "OpenEvidence Toolkit" and "OpenEvidence Project" must not be +used to endorse or promote products derived from this software without +prior written permission. For written permission, please contact +openevidence-core@openevidence.org. + +5. Products derived from this software may not be called "OpenEvidence" +nor may "OpenEvidence" appear in their names without prior written +permission of the OpenEvidence Project. + +6. Redistributions of any form whatsoever must retain the following +acknowledgments: +"This product includes software developed by the OpenEvidence Project +for use in the OpenEvidence Toolkit (http://www.openevidence.org/) +This product includes software developed by the OpenSSL Project +for use in the OpenSSL Toolkit (https://www.openssl.org/)" +This product includes cryptographic software written by Eric Young +(eay@cryptsoft.com). This product includes software written by Tim +Hudson (tjh@cryptsoft.com)." + +THIS SOFTWARE IS PROVIDED BY THE OpenEvidence PROJECT ``AS IS'' AND ANY +EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenEvidence PROJECT OR +ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +This product includes software developed by the OpenSSL Project +for use in the OpenSSL Toolkit (https://www.openssl.org/) +This product includes cryptographic software written by Eric Young +(eay@cryptsoft.com). This product includes software written by Tim +Hudson (tjh@cryptsoft.com). + +> BSD-3 + +curl-7.59.0-4.ph2.src.rpm\curl-7.59.0-4.ph2.src.cpio\curl-7.59.0.tar.gz\curl-7.59.0.tar\curl-7.59.0\docs\examples\fopen.c + +Copyright (c) 2003, 2017 Simtec Electronics + +Re-implemented by Vincent Sanders with extensive +reference to original curl example code + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> Public Domain + +curl-7.59.0-4.ph2.src.rpm\curl-7.59.0-4.ph2.src.cpio\curl-7.59.0.tar.gz\curl-7.59.0.tar\curl-7.59.0\lib\md4.c + +Author: +Alexander Peslyak, better known as Solar Designer + +This software was written by Alexander Peslyak in 2001. No copyright is +claimed, and the software is hereby placed in the public domain. In case +this attempt to disclaim copyright and place the software in the public +domain is deemed null and void, then the software is Copyright (c) 2001 +Alexander Peslyak and it is hereby released to the general public under the +following terms: + +Redistribution and use in source and binary forms, with or without +modification, are permitted. + +There's ABSOLUTELY NO WARRANTY, express or implied. + +>>> expat-2.2.4-1.ph2 + +Copyright (c) 1997-2000 Thai Open Source Software Center Ltd + Copyright (c) 2000-2017 Expat development team + Licensed under the MIT license: + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to permit + persons to whom the Software is furnished to do so, subject to the + following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN + NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + USE OR OTHER DEALINGS IN THE SOFTWARE. + +>>> iputils-20151218-4.ph2 + +Copyright (c) 1989 The Regents of the University of California. +All rights reserved. + +This code is derived from software contributed to Berkeley by +Mike Muuss. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. All advertising materials mentioning features or use of this software +must display the following acknowledgement: +This product includes software developed by the University of +California, Berkeley and its contributors. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + + +> GPL 2.0 + +iputils-s20151218.tar.bz2\iputils-s20151218.tar\iputils-s20151218\arping.c + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version +2 of the License, or (at your option) any later version. + +Authors: Alexey Kuznetsov, +YOSHIFUJI Hideaki yoshfuji@linux-ipv6.org + + +> MIT-Style + +iputils-s20151218.tar.bz2\iputils-s20151218.tar\iputils-s20151218\rdisc.c + + +Rdisc (this program) was developed by Sun Microsystems, Inc. and is +provided for unrestricted use provided that this legend is included on +all tape media and as a part of the software program in whole or part. +Users may copy or modify Rdisc without charge, and they may freely +distribute it. + +RDISC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE +WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR +PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. + +Rdisc is provided with no support and without any obligation on the +part of Sun Microsystems, Inc. to assist in its use, correction, +modification or enhancement. + +SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE +INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY RDISC +OR ANY PART THEREOF. + +In no event will Sun Microsystems, Inc. be liable for any lost revenue +or profits or other special, indirect and consequential damages, even if +Sun has been advised of the possibility of such damages. + +Sun Microsystems, Inc. +2550 Garcia Avenue +Mountain View, California 94043. + +>>> krb5-1.16-2.ph2 + +Copyright (c) 2004 Sun Microsystems, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +ADDITIONAL LICENSE INFORMATION: + + +Copyright (C) 1985-2017 by the Massachusetts Institute of Technology. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Downloading of this software may constitute an export of cryptographic +software from the United States of America that is subject to the +United States Export Administration Regulations (EAR), 15 CFR 730-774. +Additional laws or regulations may apply. It is the responsibility of +the person or entity contemplating export to comply with all +applicable export laws and regulations, including obtaining any +required license from the U.S. government. + +The U.S. government prohibits export of encryption source code to +certain countries and individuals, including, but not limited to, the +countries of Cuba, Iran, North Korea, Sudan, Syria, and residents and +nationals of those countries. + +Documentation components of this software distribution are licensed +under a Creative Commons Attribution-ShareAlike 3.0 Unported License. +(http://creativecommons.org/licenses/by-sa/3.0/) + +Individual source code files are copyright MIT, Cygnus Support, +Novell, OpenVision Technologies, Oracle, Red Hat, Sun Microsystems, +FundsXpress, and others. + +Project Athena, Athena, Athena MUSE, Discuss, Hesiod, Kerberos, Moira, +and Zephyr are trademarks of the Massachusetts Institute of Technology +(MIT). No commercial use of these trademarks may be made without +prior written permission of MIT. + +"Commercial use" means use of a name in a product or other for-profit +manner. It does NOT prevent a commercial firm from referring to the +MIT trademarks in order to convey information (although in doing so, +recognition of their trademark status should be given). + +====================================================================== + +The following copyright and permission notice applies to the +OpenVision Kerberos Administration system located in "kadmin/create", +"kadmin/dbutil", "kadmin/passwd", "kadmin/server", "lib/kadm5", and +portions of "lib/rpc": + +Copyright, OpenVision Technologies, Inc., 1993-1996, All Rights +Reserved + +WARNING: Retrieving the OpenVision Kerberos Administration system +source code, as described below, indicates your acceptance of the +following terms. If you do not agree to the following terms, do +not retrieve the OpenVision Kerberos administration system. + +You may freely use and distribute the Source Code and Object Code +compiled from it, with or without modification, but this Source +Code is provided to you "AS IS" EXCLUSIVE OF ANY WARRANTY, +INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY OR +FITNESS FOR A PARTICULAR PURPOSE, OR ANY OTHER WARRANTY, WHETHER +EXPRESS OR IMPLIED. IN NO EVENT WILL OPENVISION HAVE ANY LIABILITY +FOR ANY LOST PROFITS, LOSS OF DATA OR COSTS OF PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES, OR FOR ANY SPECIAL, INDIRECT, OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, INCLUDING, +WITHOUT LIMITATION, THOSE RESULTING FROM THE USE OF THE SOURCE +CODE, OR THE FAILURE OF THE SOURCE CODE TO PERFORM, OR FOR ANY +OTHER REASON. + +OpenVision retains all copyrights in the donated Source Code. +OpenVision also retains copyright to derivative works of the Source +Code, whether created by OpenVision or by a third party. The +OpenVision copyright notice must be preserved if derivative works +are made based on the donated Source Code. + +OpenVision Technologies, Inc. has donated this Kerberos +Administration system to MIT for inclusion in the standard Kerberos +5 distribution. This donation underscores our commitment to +continuing Kerberos technology development and our gratitude for +the valuable work which has been performed by MIT and the Kerberos +community. + +====================================================================== + +Portions contributed by Matt Crawford "crawdad@fnal.gov" were work +performed at Fermi National Accelerator Laboratory, which is +operated by Universities Research Association, Inc., under contract +DE-AC02-76CHO3000 with the U.S. Department of Energy. + +====================================================================== + +Portions of "src/lib/crypto" have the following copyright: + +Copyright (C) 1998 by the FundsXpress, INC. + +All rights reserved. + +Export of this software from the United States of America may +require a specific license from the United States Government. +It is the responsibility of any person or organization +contemplating export to obtain such a license before exporting. + +WITHIN THAT CONSTRAINT, permission to use, copy, modify, and +distribute this software and its documentation for any purpose and +without fee is hereby granted, provided that the above copyright +notice appear in all copies and that both that copyright notice and +this permission notice appear in supporting documentation, and that +the name of FundsXpress. not be used in advertising or publicity +pertaining to distribution of the software without specific, +written prior permission. FundsXpress makes no representations +about the suitability of this software for any purpose. It is +provided "as is" without express or implied warranty. + +THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED +WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +====================================================================== + +The implementation of the AES encryption algorithm in +"src/lib/crypto/builtin/aes" has the following copyright: + +Copyright (C) 2001, Dr Brian Gladman "brg@gladman.uk.net", Worcester, UK. +All rights reserved. + +LICENSE TERMS + +The free distribution and use of this software in both source and +binary form is allowed (with or without changes) provided that: + +1. distributions of this source code include the above copyright +notice, this list of conditions and the following disclaimer; + +2. distributions in binary form include the above copyright notice, +this list of conditions and the following disclaimer in the +documentation and/or other associated materials; + +3. the copyright holder's name is not used to endorse products +built using this software without specific written permission. + +DISCLAIMER + +This software is provided 'as is' with no explcit or implied +warranties in respect of any properties, including, but not limited +to, correctness and fitness for purpose. + +====================================================================== + +Portions contributed by Red Hat, including the pre-authentication +plug-in framework and the NSS crypto implementation, contain the +following copyright: + +Copyright (C) 2006 Red Hat, Inc. +Portions copyright (C) 2006 Massachusetts Institute of Technology +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. + +Neither the name of Red Hat, Inc., nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +====================================================================== + +The bundled verto source code is subject to the following license: + +Copyright 2011 Red Hat, Inc. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +====================================================================== + +The MS-KKDCP client implementation has the following copyright: + +Copyright 2013,2014 Red Hat, Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above +copyright notice, this list of conditions and the following +disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials +provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +====================================================================== + +The implementations of GSSAPI mechglue in GSSAPI-SPNEGO in +"src/lib/gssapi", including the following files: + +lib/gssapi/generic/gssapi_err_generic.et +lib/gssapi/mechglue/g_accept_sec_context.c +lib/gssapi/mechglue/g_acquire_cred.c +lib/gssapi/mechglue/g_canon_name.c +lib/gssapi/mechglue/g_compare_name.c +lib/gssapi/mechglue/g_context_time.c +lib/gssapi/mechglue/g_delete_sec_context.c +lib/gssapi/mechglue/g_dsp_name.c +lib/gssapi/mechglue/g_dsp_status.c +lib/gssapi/mechglue/g_dup_name.c +lib/gssapi/mechglue/g_exp_sec_context.c +lib/gssapi/mechglue/g_export_name.c +lib/gssapi/mechglue/g_glue.c +lib/gssapi/mechglue/g_imp_name.c +lib/gssapi/mechglue/g_imp_sec_context.c +lib/gssapi/mechglue/g_init_sec_context.c +lib/gssapi/mechglue/g_initialize.c +lib/gssapi/mechglue/g_inquire_context.c +lib/gssapi/mechglue/g_inquire_cred.c +lib/gssapi/mechglue/g_inquire_names.c +lib/gssapi/mechglue/g_process_context.c +lib/gssapi/mechglue/g_rel_buffer.c +lib/gssapi/mechglue/g_rel_cred.c +lib/gssapi/mechglue/g_rel_name.c +lib/gssapi/mechglue/g_rel_oid_set.c +lib/gssapi/mechglue/g_seal.c +lib/gssapi/mechglue/g_sign.c +lib/gssapi/mechglue/g_store_cred.c +lib/gssapi/mechglue/g_unseal.c +lib/gssapi/mechglue/g_userok.c +lib/gssapi/mechglue/g_utils.c +lib/gssapi/mechglue/g_verify.c +lib/gssapi/mechglue/gssd_pname_to_uid.c +lib/gssapi/mechglue/mglueP.h +lib/gssapi/mechglue/oid_ops.c +lib/gssapi/spnego/gssapiP_spnego.h +lib/gssapi/spnego/spnego_mech.c + +and the initial implementation of incremental propagation, including +the following new or changed files: + +include/iprop_hdr.h +kadmin/server/ipropd_svc.c +lib/kdb/iprop.x +lib/kdb/kdb_convert.c +lib/kdb/kdb_log.c +lib/kdb/kdb_log.h +lib/krb5/error_tables/kdb5_err.et +slave/kpropd_rpc.c +slave/kproplog.c + +are subject to the following license: + +Copyright (C) 2004 Sun Microsystems, Inc. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +====================================================================== + +Kerberos V5 includes documentation and software developed at the +University of California at Berkeley, which includes this copyright +notice: + +Copyright (C) 1983 Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +3. Neither the name of the University nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +====================================================================== + +Portions contributed by Novell, Inc., including the LDAP database +backend, are subject to the following license: + +Copyright (C) 2004-2005, Novell, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. + +The copyright holder's name is not used to endorse or promote +products derived from this software without specific prior +written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +====================================================================== + +Portions funded by Sandia National Laboratory and developed by the +University of Michigan's Center for Information Technology +Integration, including the PKINIT implementation, are subject to the +following license: + +COPYRIGHT (C) 2006-2007 +THE REGENTS OF THE UNIVERSITY OF MICHIGAN +ALL RIGHTS RESERVED + +Permission is granted to use, copy, create derivative works and +redistribute this software and such derivative works for any +purpose, so long as the name of The University of Michigan is not +used in any advertising or publicity pertaining to the use of +distribution of this software without specific, written prior +authorization. If the above copyright notice or any other +identification of the University of Michigan is included in any +copy of any portion of this software, then the disclaimer below +must also be included. + +THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION FROM THE +UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY PURPOSE, AND +WITHOUT WARRANTY BY THE UNIVERSITY OF MICHIGAN OF ANY KIND, EITHER +EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. +THE REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE FOR +ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR +CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING OUT OF OR +IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN IF IT HAS BEEN OR +IS HEREAFTER ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +====================================================================== + +The pkcs11.h file included in the PKINIT code has the following +license: + +Copyright 2006 g10 Code GmbH +Copyright 2006 Andreas Jellinghaus + +This file is free software; as a special exception the author gives +unlimited permission to copy and/or distribute it, with or without +modifications, as long as this notice is preserved. + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY, to the extent permitted by law; without even +the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +PURPOSE. + +====================================================================== + +Portions contributed by Apple Inc. are subject to the following +license: + +Copyright 2004-2008 Apple Inc. All Rights Reserved. + +Export of this software from the United States of America may +require a specific license from the United States Government. +It is the responsibility of any person or organization +contemplating export to obtain such a license before exporting. + +WITHIN THAT CONSTRAINT, permission to use, copy, modify, and +distribute this software and its documentation for any purpose and +without fee is hereby granted, provided that the above copyright +notice appear in all copies and that both that copyright notice and +this permission notice appear in supporting documentation, and that +the name of Apple Inc. not be used in advertising or publicity +pertaining to distribution of the software without specific, +written prior permission. Apple Inc. makes no representations +about the suitability of this software for any purpose. It is +provided "as is" without express or implied warranty. + +THIS SOFTWARE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED +WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +====================================================================== + +The implementations of UTF-8 string handling in src/util/support and +src/lib/krb5/unicode are subject to the following copyright and +permission notice: + +The OpenLDAP Public License +Version 2.8, 17 August 2003 + +Redistribution and use of this software and associated +documentation ("Software"), with or without modification, are +permitted provided that the following conditions are met: + +1. Redistributions in source form must retain copyright statements +and notices, + +2. Redistributions in binary form must reproduce applicable +copyright statements and notices, this list of conditions, and +the following disclaimer in the documentation and/or other +materials provided with the distribution, and + +3. Redistributions must contain a verbatim copy of this document. + +The OpenLDAP Foundation may revise this license from time to time. +Each revision is distinguished by a version number. You may use +this Software under terms of this license revision or under the +terms of any subsequent revision of the license. + +THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS +CONTRIBUTORS "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE OPENLDAP FOUNDATION, ITS +CONTRIBUTORS, OR THE AUTHOR(S) OR OWNER(S) OF THE SOFTWARE BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +The names of the authors and copyright holders must not be used in +advertising or otherwise to promote the sale, use or other dealing +in this Software without specific, written prior permission. Title +to copyright in this Software shall at all times remain with +copyright holders. + +OpenLDAP is a registered trademark of the OpenLDAP Foundation. + +Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, +California, USA. All Rights Reserved. Permission to copy and +distribute verbatim copies of this document is granted. + +====================================================================== + +Marked test programs in src/lib/krb5/krb have the following copyright: + +Copyright (C) 2006 Kungliga Tekniska Högskola +(Royal Institute of Technology, Stockholm, Sweden). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +3. Neither the name of KTH nor the names of its contributors may be +used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY KTH AND ITS CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL KTH OR ITS +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +====================================================================== + +The KCM Mach RPC definition file used on macOS has the following +copyright: + +Copyright (C) 2009 Kungliga Tekniska Högskola +(Royal Institute of Technology, Stockholm, Sweden). +All rights reserved. + +Portions Copyright (C) 2009 Apple Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above +copyright notice, this list of conditions and the following +disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +3. Neither the name of the Institute nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +====================================================================== + +Portions of the RPC implementation in src/lib/rpc and +src/include/gssrpc have the following copyright and permission notice: + +Copyright (C) 2010, Oracle America, Inc. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +3. Neither the name of the "Oracle America, Inc." nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +====================================================================== + +Copyright (C) 2006,2007,2009 NTT (Nippon Telegraph and Telephone +Corporation). All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer as +the first lines of this file unmodified. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +THIS SOFTWARE IS PROVIDED BY NTT "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL NTT BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +====================================================================== + +Copyright 2000 by Carnegie Mellon University + +All Rights Reserved + +Permission to use, copy, modify, and distribute this software and +its documentation for any purpose and without fee is hereby +granted, provided that the above copyright notice appear in all +copies and that both that copyright notice and this permission +notice appear in supporting documentation, and that the name of +Carnegie Mellon University not be used in advertising or publicity +pertaining to distribution of the software without specific, +written prior permission. + +CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN +AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING +OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +====================================================================== + +Copyright (C) 2002 Naval Research Laboratory (NRL/CCS) + +Permission to use, copy, modify and distribute this software and +its documentation is hereby granted, provided that both the +copyright notice and this permission notice appear in all copies of +the software, derivative works or modified versions, and any +portions thereof. + +NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND +DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER +RESULTING FROM THE USE OF THIS SOFTWARE. + +====================================================================== + +Portions extracted from Internet RFCs have the following copyright +notice: + +Copyright (C) The Internet Society (2006). + +This document is subject to the rights, licenses and restrictions +contained in BCP 78, and except as set forth therein, the authors +retain all their rights. + +This document and the information contained herein are provided on +an "AS IS" basis and THE CONTRIBUTOR, THE ORGANIZATION HE/SHE +REPRESENTS OR IS SPONSORED BY (IF ANY), THE INTERNET SOCIETY AND +THE INTERNET ENGINEERING TASK FORCE DISCLAIM ALL WARRANTIES, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTY THAT +THE USE OF THE INFORMATION HEREIN WILL NOT INFRINGE ANY RIGHTS OR +ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A +PARTICULAR PURPOSE. + +====================================================================== + +Copyright (C) 1991, 1992, 1994 by Cygnus Support. + +Permission to use, copy, modify, and distribute this software and +its documentation for any purpose and without fee is hereby +granted, provided that the above copyright notice appear in all +copies and that both that copyright notice and this permission +notice appear in supporting documentation. Cygnus Support makes no +representations about the suitability of this software for any +purpose. It is provided "as is" without express or implied +warranty. + +====================================================================== + +Copyright (C) 2006 Secure Endpoints Inc. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +====================================================================== + +Portions of the implementation of the Fortuna-like PRNG are subject to +the following notice: + +Copyright (C) 2005 Marko Kreen +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +Copyright (C) 1994 by the University of Southern California + +EXPORT OF THIS SOFTWARE from the United States of America may +require a specific license from the United States Government. It +is the responsibility of any person or organization +contemplating export to obtain such a license before exporting. + +WITHIN THAT CONSTRAINT, permission to copy, modify, and distribute +this software and its documentation in source and binary forms is +hereby granted, provided that any documentation or other materials +related to such distribution or use acknowledge that the software +was developed by the University of Southern California. + +DISCLAIMER OF WARRANTY. THIS SOFTWARE IS PROVIDED "AS IS". The +University of Southern California MAKES NO REPRESENTATIONS OR +WARRANTIES, EXPRESS OR IMPLIED. By way of example, but not +limitation, the University of Southern California MAKES NO +REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY +PARTICULAR PURPOSE. The University of Southern California shall not +be held liable for any liability nor for any direct, indirect, or +consequential damages with respect to any claim by the user or +distributor of the ksu software. + +====================================================================== + +Copyright (C) 1995 +The President and Fellows of Harvard University + +This code is derived from software contributed to Harvard by Jeremy +Rassen. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +3. All advertising materials mentioning features or use of this +software must display the following acknowledgement: + +This product includes software developed by the University of +California, Berkeley and its contributors. + +4. Neither the name of the University nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +====================================================================== + +Copyright (C) 2008 by the Massachusetts Institute of Technology. +Copyright 1995 by Richard P. Basch. All Rights Reserved. +Copyright 1995 by Lehman Brothers, Inc. All Rights Reserved. + +Export of this software from the United States of America may +require a specific license from the United States Government. It +is the responsibility of any person or organization +contemplating export to obtain such a license before exporting. + +WITHIN THAT CONSTRAINT, permission to use, copy, modify, and +distribute this software and its documentation for any purpose and +without fee is hereby granted, provided that the above copyright +notice appear in all copies and that both that copyright notice and +this permission notice appear in supporting documentation, and that +the name of Richard P. Basch, Lehman Brothers and M.I.T. not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. Richard P. +Basch, Lehman Brothers and M.I.T. make no representations about the +suitability of this software for any purpose. It is provided "as +is" without express or implied warranty. + +====================================================================== + +The following notice applies to "src/lib/krb5/krb/strptime.c" and +"src/include/k5-queue.h". + +Copyright (C) 1997, 1998 The NetBSD Foundation, Inc. +All rights reserved. + +This code was contributed to The NetBSD Foundation by Klaus Klein. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +3. All advertising materials mentioning features or use of this +software must display the following acknowledgement: + +This product includes software developed by the NetBSD +Foundation, Inc. and its contributors. + +4. Neither the name of The NetBSD Foundation nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +====================================================================== + +The following notice applies to Unicode library files in +"src/lib/krb5/unicode": + +Copyright 1997, 1998, 1999 Computing Research Labs, +New Mexico State University + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE COMPUTING RESEARCH LAB OR +NEW MEXICO STATE UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +====================================================================== + +The following notice applies to "src/util/support/strlcpy.c": + +Copyright (C) 1998 Todd C. Miller "Todd.Miller@courtesan.com" + +Permission to use, copy, modify, and distribute this software for +any purpose with or without fee is hereby granted, provided that +the above copyright notice and this permission notice appear in all +copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL +WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE +AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +====================================================================== + +The following notice applies to "src/util/profile/argv_parse.c" and +"src/util/profile/argv_parse.h": + +Copyright 1999 by Theodore Ts'o. + +Permission to use, copy, modify, and distribute this software for +any purpose with or without fee is hereby granted, provided that +the above copyright notice and this permission notice appear in all +copies. THE SOFTWARE IS PROVIDED "AS IS" AND THEODORE TS'O (THE +AUTHOR) DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN +NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION +OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. (Isn't +it sick that the U.S. culture of lawsuit-happy lawyers requires +this kind of disclaimer?) + +====================================================================== + +The following notice applies to SWIG-generated code in +"src/util/profile/profile_tcl.c": + +Copyright (C) 1999-2000, The University of Chicago + +This file may be freely redistributed without license or fee +provided this copyright message remains intact. + +====================================================================== + +The following notice applies to portiions of "src/lib/rpc" and +"src/include/gssrpc": + +Copyright (C) 2000 The Regents of the University of Michigan. All +rights reserved. + +Copyright (C) 2000 Dug Song "dugsong@UMICH.EDU". All rights +reserved, all wrongs reversed. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +3. Neither the name of the University nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +====================================================================== + +Implementations of the MD4 algorithm are subject to the following +notice: + +Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. + +License to copy and use this software is granted provided that it +is identified as the "RSA Data Security, Inc. MD4 Message Digest +Algorithm" in all material mentioning or referencing this software +or this function. + +License is also granted to make and use derivative works provided +that such works are identified as "derived from the RSA Data +Security, Inc. MD4 Message Digest Algorithm" in all material +mentioning or referencing the derived work. + +RSA Data Security, Inc. makes no representations concerning either +the merchantability of this software or the suitability of this +software for any particular purpose. It is provided "as is" +without express or implied warranty of any kind. + +These notices must be retained in any copies of any part of this +documentation and/or software. + +====================================================================== + +Implementations of the MD5 algorithm are subject to the following +notice: + +Copyright (C) 1990, RSA Data Security, Inc. All rights reserved. + +License to copy and use this software is granted provided that it +is identified as the "RSA Data Security, Inc. MD5 Message- Digest +Algorithm" in all material mentioning or referencing this software +or this function. + +License is also granted to make and use derivative works provided +that such works are identified as "derived from the RSA Data +Security, Inc. MD5 Message-Digest Algorithm" in all material +mentioning or referencing the derived work. + +RSA Data Security, Inc. makes no representations concerning either +the merchantability of this software or the suitability of this +software for any particular purpose. It is provided "as is" +without express or implied warranty of any kind. + +These notices must be retained in any copies of any part of this +documentation and/or software. + +====================================================================== + +The following notice applies to +"src/lib/crypto/crypto_tests/t_mddriver.c": + +Copyright (C) 1990-2, RSA Data Security, Inc. Created 1990. All +rights reserved. + +RSA Data Security, Inc. makes no representations concerning either +the merchantability of this software or the suitability of this +software for any particular purpose. It is provided "as is" without +express or implied warranty of any kind. + +These notices must be retained in any copies of any part of this +documentation and/or software. + +====================================================================== + +Portions of "src/lib/krb5" are subject to the following notice: + +Copyright (C) 1994 CyberSAFE Corporation. +Copyright 1990,1991,2007,2008 by the Massachusetts Institute of Technology. +All Rights Reserved. + +Export of this software from the United States of America may +require a specific license from the United States Government. It +is the responsibility of any person or organization +contemplating export to obtain such a license before exporting. + +WITHIN THAT CONSTRAINT, permission to use, copy, modify, and +distribute this software and its documentation for any purpose and +without fee is hereby granted, provided that the above copyright +notice appear in all copies and that both that copyright notice and +this permission notice appear in supporting documentation, and that +the name of M.I.T. not be used in advertising or publicity +pertaining to distribution of the software without specific, +written prior permission. Furthermore if you modify this software +you must label your software as modified software and not +distribute it in such a fashion that it might be confused with the +original M.I.T. software. Neither M.I.T., the Open Computing +Security Group, nor CyberSAFE Corporation make any representations +about the suitability of this software for any purpose. It is +provided "as is" without express or implied warranty. + +====================================================================== + +Portions contributed by PADL Software are subject to the following +license: + +Copyright (c) 2011, PADL Software Pty Ltd. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +3. Neither the name of PADL Software nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +====================================================================== + +The bundled libev source code is subject to the following license: + +All files in libev are Copyright (C)2007,2008,2009 Marc Alexander +Lehmann. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +Alternatively, the contents of this package may be used under the +terms of the GNU General Public License ("GPL") version 2 or any +later version, in which case the provisions of the GPL are +applicable instead of the above. If you wish to allow the use of +your version of this package only under the terms of the GPL and +not to allow others to use your version of this file under the BSD +license, indicate your decision by deleting the provisions above +and replace them with the notice and other provisions required by +the GPL in this and the other files of this package. If you do not +delete the provisions above, a recipient may use your version of +this file under either the BSD or the GPL. + +====================================================================== + +Files copied from the Intel AESNI Sample Library are subject to the +following license: + +Copyright (C) 2010, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials +provided with the distribution. + +Neither the name of Intel Corporation nor the names of its +contributors may be used to endorse or promote products +derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +====================================================================== + +The following notice applies to +"src/ccapi/common/win/OldCC/autolock.hxx": + +Copyright (C) 1998 by Danilo Almeida. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials provided +with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +> LPPL 1.3c + +krb5-1.16-2.ph2.src.rpm\krb5-1.16-2.ph2.src.cpio\krb5-1.16.tar.gz\krb5-1.16.tar\krb5-1.16\doc\pdf\tabulary.sty + + +Copyright (C) 1995 1996 2003 2008 David Carlisle +This file may be distributed under the terms of the LPPL. +See 00readme.txt for details. + + +> BSD- 4 Clause + + +krb5-1.16-2.ph2.src.rpm\krb5-1.16-2.ph2.src.cpio\krb5-1.16.tar.gz\krb5-1.16.tar\krb5-1.16\src\lib\apputils\daemon.c + + +Copyright (c) 1990 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. All advertising materials mentioning features or use of this software +must display the following acknowledgement: +This product includes software developed by the University of +California, Berkeley and its contributors. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +> Public Domain + +krb5-1.16-2.ph2.src.rpm\krb5-1.16-2.ph2.src.cpio\krb5-1.16.tar.gz\krb5-1.16.tar\krb5-1.16\src\lib\krb5\rcache\rc_base.c + +This file of the Kerberos V5 software is derived from public-domain code +contributed by Daniel J. Bernstein, . + +>>> libarchive-3.3.1-2.ph2 + +Copyright (c) 2003-2009 +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer +in this position and unchanged. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +>MIT Style + +libarchive-3.3.1.tar.gz\libarchive-3.3.1.tar\libarchive-3.3.1\build\autoconf\ax_require_defined.m4 + +Copyright (c) 2014 Mike Frysinger + +Copying and distribution of this file, with or without modification, are +permitted in any medium without royalty provided the copyright notice +and this notice are preserved. This file is offered as-is, without any +warranty. + +>Apache 2.0 + +libarchive-3.3.1.tar.gz\libarchive-3.3.1.tar\libarchive-3.3.1\contrib\android\Android.mk + +Copyright (C) 2014 Trevor Drake + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +>BSD – 4Clause + +libarchive-3.3.1.tar.gz\libarchive-3.3.1.tar\libarchive-3.3.1\contrib\shar\shar.1 + +Copyright (c) 1990, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. All advertising materials mentioning features or use of this software +must display the following acknowledgement: +This product includes software developed by the University of +California, Berkeley and its contributors. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +>MIT + +libarchive-3.3.1.tar.gz\libarchive-3.3.1.tar\libarchive-3.3.1\doc\mdoc2man.awk + +Copyright (c) 2003 Peter Stuge + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +>Public Domain + +libarchive-3.3.1.tar.gz\libarchive-3.3.1.tar\libarchive-3.3.1\libarchive\archive_getdate.c + +This code is in the public domain and has no copyright. + +This is a plain C recursive-descent translation of an old +public-domain YACC grammar that has been used for parsing dates in +very many open-source projects. + +Since the original authors were generous enough to donate their +work to the public domain, I feel compelled to match their +generosity. + +Tim Kientzle, February 2009. + +>BSD – 3Clause + +libarchive-3.3.1.tar.gz\libarchive-3.3.1.tar\libarchive-3.3.1\libarchive\mtree.5 + +Copyright (c) 1989, 1990, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +>>> libcap-2.25-7.ph2 + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD-3. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Unless otherwise *explicitly* stated the following text describes the +licensed conditions under which the contents of this module release +may be distributed: + +------------------------------------------------------------------------- +Redistribution and use in source and binary forms of this module, with +or without modification, are permitted provided that the following +conditions are met: + +1. Redistributions of source code must retain any existing copyright + notice, and this entire permission notice in its entirety, + including the disclaimer of warranties. + +2. Redistributions in binary form must reproduce all prior and current + copyright notices, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +3. The name of any author may not be used to endorse or promote + products derived from this software without their specific prior + written permission. + +ALTERNATIVELY, this product may be distributed under the terms of the +GNU Library General Public License, in which case the provisions of +the GNU LGPL are required INSTEAD OF the above restrictions. (This +clause is necessary due to a potential conflict between the GNU LGPL +and the restrictions contained in a BSD-style copyright.) + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +>>> libdb-5.3.28-1.ph2 + +The following is the license that applies to this copy of the Berkeley DB +software. For a license to use the Berkeley DB software under conditions +other than those described here, or to purchase support for this software, +please contact Oracle at berkeleydb-info_us@oracle.com. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +Copyright (c) 1990, 2013 Oracle and/or its affiliates. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Redistributions in any form must be accompanied by information on +how to obtain complete source code for the DB software and any +accompanying software that uses the DB software. The source code +must either be included in the distribution or be available for no +more than the cost of distribution plus a nominal fee, and must be +freely redistributable under reasonable conditions. For an +executable file, complete source code means the source code for all +modules it contains. It does not include source code for modules or +files that typically accompany the major components of the operating +system on which the executable file runs. + +THIS SOFTWARE IS PROVIDED BY ORACLE ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL ORACLE BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Copyright (c) 1990, 1993, 1994, 1995 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + + +Copyright (c) 1995, 1996 +The President and Fellows of Harvard University. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +ASM: a very small and fast Java bytecode manipulation framework +Copyright (c) 2000-2005 INRIA, France Telecom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holders nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + + +ADDITIONAL LICENSE INFORMATION: + +>BSD-4 + +libdb-5.3.28-1.ph2.src.rpm\libdb-5.3.28-1.ph2.src.cpio\db-5.3.28.tar.gz\db-5.3.28.tar\db-5.3.28\src\clib\atol.c + +Copyright (c) 1988, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. All advertising materials mentioning features or use of this software +must display the following acknowledgement: +This product includes software developed by the University of +California, Berkeley and its contributors. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +>ARTISTIC 2.0 + +libdb-5.3.28-1.ph2.src.rpm\libdb-5.3.28-1.ph2.src.cpio\db-5.3.28.tar.gz\db-5.3.28.tar\db-5.3.28\src\crypto\mersenne\mt19937db.c + +This library is free software under the Artistic license: +see the file COPYING distributed together with this code. +For the verification of the code, its output sequence file +mt19937int.out is attached (2001/4/2) + +>PUBLIC DOMAIN + +libdb-5.3.28-1.ph2.src.rpm\libdb-5.3.28-1.ph2.src.cpio\db-5.3.28.tar.gz\db-5.3.28.tar\db-5.3.28\src\crypto\rijndael\rijndael-alg-fst.c + + +rijndael-alg-fst.c + +@version 3.0 (December 2000) + +Optimised ANSI C code for the Rijndael cipher (now AES) + +@author Vincent Rijmen +@author Antoon Bosselaers +@author Paulo Barreto + +This code is hereby placed in the public domain. + +THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +>PUBLIC DOMAIN + +libdb-5.3.28-1.ph2.src.rpm\libdb-5.3.28-1.ph2.src.cpio\db-5.3.28.tar.gz\db-5.3.28.tar\db-5.3.28\src\hmac\sha1.c + +SHA-1 in C +By Steve Reid +100% Public Domain +>MS-PL + + +libdb-5.3.28-1.ph2.src.rpm\libdb-5.3.28-1.ph2.src.cpio\db-5.3.28.tar.gz\db-5.3.28.tar\db-5.3.28\docs\csharp\TOC.js + + +Note : Copyright 2006-2009, Eric Woodruff, All rights reserved +Compiler: JavaScript + +This file contains the methods necessary to implement a simple tree view +for the table of content with a resizable splitter and Ajax support to +load tree nodes on demand. It also contains the script necessary to do +full-text searches. + +This code is published under the Microsoft Public License (Ms-PL). A copy +of the license should be distributed with the code. It can also be found +at the project website: http://SHFB.CodePlex.com. This notice, the +author's name, and all copyright notices must remain intact in all +applications, documentation, and source files. + +>MIT + +libdb-5.3.28-1.ph2.src.rpm\libdb-5.3.28-1.ph2.src.cpio\db-5.3.28.tar.gz\db-5.3.28.tar\db-5.3.28\dist\aclocal\ltoptions.m4 + +Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation, +Inc. +Written by Gary V. Vaughan, 2004 + +This file is free software; the Free Software Foundation gives +unlimited permission to copy and/or distribute it, with or without +modifications, as long as this notice is preserved. + +>GPL 2.0 + +libdb-5.3.28-1.ph2.src.rpm\libdb-5.3.28-1.ph2.src.cpio\db-5.3.28.tar.gz\db-5.3.28.tar\db-5.3.28\lang\perl\BerkeleyDB\BerkeleyDB.pm + +[VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS SUBCOMPONENT UNDER THE TERMS OF THE GPL v2 LICENSE THE TEXT OF WHICH IS SET FORTH IN THE APPENDIX. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright (c) 1997-2011 Paul Marquess. All rights reserved. +This program is free software; you can redistribute it and/or +modify it under the same terms as Perl itself. + + +>APACHE 2.0 + +libdb-5.3.28-1.ph2.src.rpm\libdb-5.3.28-1.ph2.src.cpio\db-5.3.28.tar.gz\db-5.3.28.tar\db-5.3.28\lang\php_db4\db4.cpp + +Copyright (c) 2004, 2013 Oracle and/or its affiliates. All rights reserved. + +http://www.apache.org/licenses/LICENSE-2.0.txt + +authors: George Schlossnagle george@omniti.com + +>MIT STYLE + +libdb-5.3.28-1.ph2.src.rpm\libdb-5.3.28-1.ph2.src.cpio\db-5.3.28.tar.gz\db-5.3.28.tar\db-5.3.28\lang\sql\jdbc\license.terms + +This software is copyrighted by Christian Werner and others. +The following terms apply to all files associated with the software +unless explicitly disclaimed in individual files. + +The authors hereby grant permission to use, copy, modify, distribute, +and license this software and its documentation for any purpose, provided +that existing copyright notices are retained in all copies and that this +notice is included verbatim in any distributions. No written agreement, +license, or royalty fee is required for any of the authorized uses. +Modifications to this software may be copyrighted by their authors +and need not follow the licensing terms described here, provided that +the new terms are clearly indicated on the first page of each file where +they apply. + +IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +MODIFICATIONS. + +>>> libffi-3.2.1-5.ph2 + +libffi - Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +ADDITIONAL LICENSE INFORMATION: + +> GPL 2.0 + +libffi-3.2.1.tar.gz\libffi-3.2.1.tar\libffi-3.2.1\libtool-ldflags + +Copyright (C) 2005 Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +MA 02110-1301, USA. + + +> LGPL 2.1 + +libffi-3.2.1.tar.gz\libffi-3.2.1.tar\libffi-3.2.1\msvcc.sh + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE LGPL 2.1 LICENSE. PLEASE SEE APPENDIX FOR THE FULL TEXT OF THE LGPL 2.1 LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +***** BEGIN LICENSE BLOCK ***** +Version: MPL 1.1/GPL 2.0/LGPL 2.1 + +The contents of this file are subject to the Mozilla Public License Version +1.1 (the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at +http://www.mozilla.org/MPL/ + +Software distributed under the License is distributed on an "AS IS" basis, +WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +for the specific language governing rights and limitations under the +License. + +The Original Code is the MSVC wrappificator. + +The Initial Developer of the Original Code is +Timothy Wall . +Portions created by the Initial Developer are Copyright (C) 2009 +the Initial Developer. All Rights Reserved. + +Contributor(s): +Daniel Witte + +Alternatively, the contents of this file may be used under the terms of +either the GNU General Public License Version 2 or later (the "GPL"), or +the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +in which case the provisions of the GPL or the LGPL are applicable instead +of those above. If you wish to allow use of your version of this file only +under the terms of either the GPL or the LGPL, and not to allow others to +use your version of this file under the terms of the MPL, indicate your +decision by deleting the provisions above and replace them with the notice +and other provisions required by the GPL or the LGPL. If you do not delete +the provisions above, a recipient may use your version of this file under +the terms of any one of the MPL, the GPL or the LGPL. + +***** END LICENSE BLOCK ***** + + +> Public Domain + +libffi-3.2.1.tar.gz\libffi-3.2.1.tar\libffi-3.2.1\src\dlmalloc.c + +This is a version (aka dlmalloc) of malloc/free/realloc written by +Doug Lea and released to the public domain, as explained at +http://creativecommons.org/licenses/publicdomain. Send questions, +comments, complaints, performance data, etc to dl@cs.oswego.edu + +>>> libsolv-0.6.26-3.ph2 + +Copyright (c) 2000-2001, Aaron D. Gifford +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +> Public Domain + +libsolv-0.6.26.tar.gz\libsolv-0.6.26.tar\libsolv-0.6.26\src\sha1.h + +this file is in the public domain + +>>> libssh2-1.8.0-1.ph2 + +Copyright (c) 2004-2007 Sara Golemon + Copyright (c) 2005,2006 Mikhail Gusarov + Copyright (c) 2006-2007 The Written Word, Inc. + Copyright (c) 2007 Eli Fant + Copyright (c) 2009-2014 Daniel Stenberg + Copyright (C) 2008, 2009 Simon Josefsson + All rights reserved. + + Redistribution and use in source and binary forms, + with or without modification, are permitted provided + that the following conditions are met: + + Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + + Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + Neither the name of the copyright holder nor the names + of any other contributors may be used to endorse or + promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + OF SUCH DAMAGE. + +>>> libtirpc-1.0.1-8.ph2 + +Copyright (c) Copyright (c) Bull S.A. 2005 All Rights Reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products +derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +ADDITIONAL LICENSE INFORMATION: + +>GPL 2.0 + +libtirpc-1.0.1\src\debug.h + +Copyright (C) 2014 Red Hat, Steve Dickson + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA. + +>LGPL 2.1 + +libtirpc-1.0.1\src\des_impl.c + +Copyright (C) 1992 Eric Young +Collected from libdes and modified for SECURE RPC by Martin Kuck 1994 +This file is distributed under the terms of the GNU Lesser General +Public License, version 2.1 or later - see the file COPYING.LIB for details. +If you did not receive a copy of the license with this program, please +see to obtain a copy. + +>Sun Microsystem (MIT-Style) + +libtirpc-1.0.1\tirpc\rpc\key_prot.h + +Sun RPC is a product of Sun Microsystems, Inc. and is provided for +unrestricted use provided that this legend is included on all tape +media and as a part of the software program in whole or part. Users +may copy or modify Sun RPC without charge, but are not authorized +to license or distribute it to anyone else except as part of a product or +program developed by the user. +SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE +WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR +PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. +Sun RPC is provided with no support and without any obligation on the +part of Sun Microsystems, Inc. to assist in its use, correction, +modification or enhancement. +SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE +INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC +OR ANY PART THEREOF. +In no event will Sun Microsystems, Inc. be liable for any lost revenue +or profits or other special, indirect and consequential damages, even if +Sun has been advised of the possibility of such damages. +Sun Microsystems, Inc. +2550 Garcia Avenue +Mountain View, California 94043. + +>>> linux-pam-1.3.0-1.ph2 + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD-3 LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Unless otherwise *explicitly* stated the following text describes the +licensed conditions under which the contents of this Linux-PAM release +may be distributed: + +------------------------------------------------------------------------- +Redistribution and use in source and binary forms of Linux-PAM, with +or without modification, are permitted provided that the following +conditions are met: + +1. Redistributions of source code must retain any existing copyright + notice, and this entire permission notice in its entirety, + including the disclaimer of warranties. + +2. Redistributions in binary form must reproduce all prior and current + copyright notices, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +3. The name of any author may not be used to endorse or promote + products derived from this software without their specific prior + written permission. + +ALTERNATIVELY, this product may be distributed under the terms of the +GNU General Public License, in which case the provisions of the GNU +GPL are required INSTEAD OF the above restrictions. (This clause is +necessary due to a potential conflict between the GNU GPL and the +restrictions contained in a BSD-style copyright.) + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. +------------------------------------------------------------------------- + + +ADDITIONAL LICENSE INFORMATION: + +> GPL 3.0 with Bison Parser Exception + +Linux-PAM-1.3.0.tar.bz2\Linux-PAM-1.3.0.tar\Linux-PAM-1.3.0\conf\pam_conv1\pam_conv_y.c + +Bison implementation for Yacc-like parsers in C + +Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +As a special exception, you may create a larger work that contains +part or all of the Bison parser skeleton and distribute that work +under terms of your choice, so long as that work isn't itself a +parser generator using the skeleton or a modified version thereof +as a parser skeleton. Alternatively, if you modify or redistribute +the parser skeleton itself, you may (at your option) remove this +special exception, which will cause the skeleton and the resulting +Bison output files to be licensed under the GNU General Public +License without this special exception. + +This special exception was added by the Free Software Foundation in +version 2.2 of Bison. + + +> BSD + +Linux-PAM-1.3.0.tar.bz2\Linux-PAM-1.3.0.tar\Linux-PAM-1.3.0\modules\pam_access\pam_access.c + +Copyright 1995 by Wietse Venema. All rights reserved. Individual files +may be covered by other copyrights (as noted in the file itself.) + +This material was originally written and compiled by Wietse Venema at +Eindhoven University of Technology, The Netherlands, in 1990, 1991, +1992, 1993, 1994 and 1995. + +Redistribution and use in source and binary forms are permitted +provided that this entire copyright notice is duplicated in all such +copies. + +This software is provided "as is" and without any expressed or implied +warranties, including, without limitation, the implied warranties of +merchantibility and fitness for any particular purpose. + + +> LGPL 2.0 + +Linux-PAM-1.3.0.tar.bz2\Linux-PAM-1.3.0.tar\Linux-PAM-1.3.0\modules\pam_issue\pam_issue.c + +Copyright 1999 by Ben Collins +Released under the GNU LGPL version 2 or later + + +> GPL 2.0 + +Linux-PAM-1.3.0.tar.bz2\Linux-PAM-1.3.0.tar\Linux-PAM-1.3.0\modules\pam_loginuid\pam_loginuid.c + +Copyright 2005 Red Hat Inc., Durham, North Carolina. +All Rights Reserved. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Suite 500 +Boston, MA 02110-1335 USA + + +> Public Domain + +Linux-PAM-1.3.0.tar.bz2\Linux-PAM-1.3.0.tar\Linux-PAM-1.3.0\modules\pam_namespace\md5.c + +This code implements the MD5 message-digest algorithm. +The algorithm is due to Ron Rivest. This code was +written by Colin Plumb in 1993, no copyright is claimed. +This code is in the public domain; do with it what you wish. + + +> MIT + +Linux-PAM-1.3.0.tar.bz2\Linux-PAM-1.3.0.tar\Linux-PAM-1.3.0\modules\pam_namespace\argv_parse.c + +Copyright 1999 by Theodore Ts'o. + +Permission to use, copy, modify, and distribute this software for +any purpose with or without fee is hereby granted, provided that +the above copyright notice and this permission notice appear in all +copies. THE SOFTWARE IS PROVIDED "AS IS" AND THEODORE TS'O (THE +AUTHOR) DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER +RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION +OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +> BSD-3 + +Linux-PAM-1.3.0.tar.bz2\Linux-PAM-1.3.0.tar\Linux-PAM-1.3.0\modules\pam_tally2\pam_tally2.c + +Portions Copyright 2006, Red Hat, Inc. +Portions Copyright 1989 - 1993, Julianne Frances Haugh +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of Julianne F. Haugh nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY JULIE HAUGH AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL JULIE HAUGH OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + + +> Beer-Ware License (MIT-Style) + +Linux-PAM-1.3.0.tar.bz2\Linux-PAM-1.3.0.tar\Linux-PAM-1.3.0\modules\pam_unix\md5_crypt.c + +"THE BEER-WARE LICENSE" (Revision 42): + wrote this file. As long as you retain this notice you +can do whatever you want with this stuff. If we meet some day, and you think +this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp + +>>> lsof-4.89-2.ph2 + +Copyright 1994 Purdue Research Foundation, West Lafayette, Indiana +47907. All rights reserved. + +Written by Victor A. Abell + +This software is not subject to any license of the American Telephone +and Telegraph Company or the Regents of the University of California. + +Permission is granted to anyone to use this software for any purpose on +any computer system, and to alter it and redistribute it freely, subject +to the following restrictions: + +1. Neither the authors nor Purdue University are responsible for any +consequences of the use of this software. + +2. The origin of this software must not be misrepresented, either by +explicit claim or by omission. Credit to the authors and Purdue +University must appear in documentation and sources. + +3. Altered versions must be plainly marked as such, and must not be +misrepresented as being the original software. + +4. This notice may not be removed or altered. + +ADDITIONAL LICENSE INFORMATION: + +> BSD + +lsof_4.89.tar.gz\lsof_4.89.tar\lsof_4.89\lsof_4.89_src.tar\lsof_4.89_src\dialects\freebsd\include\procfs\pfsnode.h + +Copyright (c) 1993 Paul Kranenburg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. All advertising materials mentioning features or use of this software +must display the following acknowledgement: +This product includes software developed by Paul Kranenburg. +4. The name of the author may not be used to endorse or promote products +derived from this software withough specific prior written permission + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> GPL 2.0 + +lsof_4.89.tar.gz\lsof_4.89.tar\lsof_4.89\lsof_4.89_src.tar\lsof_4.89_src\scripts\sort_res.perl5 + +Copyright (c) 2004, 2005 - Fabian Frederick + +This program/include file is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License as published +by the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program/include file is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty +of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program (in the main directory of the Linux-NTFS +distribution in the file COPYING); if not, write to the Free Software +Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +>>> ncurses-6.0-14.ph2 + +Upstream source http://invisible-island.net/ncurses/ncurses-examples.html + +Current ncurses maintainer: Thomas Dickey + +------------------------------------------------------------------------------- +Files: * +Copyright: 1998-2016,2017 Free Software Foundation, Inc. +Licence: X11 + +Files: aclocal.m4 package +Copyright: 2010-2016,2017 by Thomas E. Dickey +Licence: X11 + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, distribute with modifications, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR + THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + Except as contained in this notice, the name(s) of the above copyright + holders shall not be used in advertising or otherwise to promote the + sale, use or other dealings in this Software without prior written + authorization. + +------------------------------------------------------------------------------- +Files: install-sh +Copyright: 1994 X Consortium +Licence: X11 + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- + TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + Except as contained in this notice, the name of the X Consortium shall not + be used in advertising or otherwise to promote the sale, use or other deal- + ings in this Software without prior written authorization from the X Consor- + tium. + + FSF changes to this file are in the public domain. + + Calling this script install-sh is preferred over install.sh, to prevent + `make' implicit rules from creating a file called install from it + when there is no Makefile. + + This script is compatible with the BSD install script, but was written + from scratch. It can only install one file at a time, a restriction + shared with many OS's install programs. + +On Debian systems, the complete text of the GNU General +Public License can be found in '/usr/share/common-licenses/GPL-2' + +-- vile: txtmode file-encoding=utf-8 + +>>> openssh-7.5p1-11.ph2 + +This file is part of the OpenSSH software. + +The licences which components of this software fall under are as +follows. First, we will summarize and say that all components +are under a BSD licence, or a licence more free than that. + +OpenSSH contains no GPL code. + +1) +Copyright (c) 1995 Tatu Ylonen , Espoo, Finland +All rights reserved + +As far as I am concerned, the code I have written for this software +can be used freely for any purpose. Any derived versions of this +software must be clearly marked as such, and if the derived work is +incompatible with the protocol description in the RFC file, it must be +called by a name other than "ssh" or "Secure Shell". + +[Tatu continues] +However, I am not implying to give any licenses to any patents or +copyrights held by third parties, and the software includes parts that +are not under my direct control. As far as I know, all included +source code is used in accordance with the relevant license agreements +and can be used freely for any purpose (the GNU license being the most +restrictive); see below for details. + +[However, none of that term is relevant at this point in time. All of +these restrictively licenced software components which he talks about +have been removed from OpenSSH, i.e., + +- RSA is no longer included, found in the OpenSSL library +- IDEA is no longer included, its use is deprecated +- DES is now external, in the OpenSSL library +- GMP is no longer used, and instead we call BN code from OpenSSL +- Zlib is now external, in a library +- The make-ssh-known-hosts script is no longer included +- TSS has been removed +- MD5 is now external, in the OpenSSL library +- RC4 support has been replaced with ARC4 support from OpenSSL +- Blowfish is now external, in the OpenSSL library + +[The licence continues] + +Note that any information and cryptographic algorithms used in this +software are publicly available on the Internet and at any major +bookstore, scientific library, and patent office worldwide. More +information can be found e.g. at "http://www.cs.hut.fi/crypto". + +The legal status of this program is some combination of all these +permissions and restrictions. Use only at your own responsibility. +You will be responsible for any legal consequences yourself; I am not +making any claims whether possessing or using this is legal or not in +your country, and I am not taking any responsibility on your behalf. + + +NO WARRANTY + +BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +2) +The 32-bit CRC compensation attack detector in deattack.c was +contributed by CORE SDI S.A. under a BSD-style license. + +Cryptographic attack detector for ssh - source code + +Copyright (c) 1998 CORE SDI S.A., Buenos Aires, Argentina. + +All rights reserved. Redistribution and use in source and binary +forms, with or without modification, are permitted provided that +this copyright notice is retained. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES ARE DISCLAIMED. IN NO EVENT SHALL CORE SDI S.A. BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY OR +CONSEQUENTIAL DAMAGES RESULTING FROM THE USE OR MISUSE OF THIS +SOFTWARE. + +Ariel Futoransky + + +3) +ssh-keyscan was contributed by David Mazieres under a BSD-style +license. + +Copyright 1995, 1996 by David Mazieres . + +Modification and redistribution in source and binary forms is +permitted provided that due credit is given to the author and the +OpenBSD project by leaving this copyright notice intact. + +4) +The Rijndael implementation by Vincent Rijmen, Antoon Bosselaers +and Paulo Barreto is in the public domain and distributed +with the following license: + +@version 3.0 (December 2000) + +Optimised ANSI C code for the Rijndael cipher (now AES) + +@author Vincent Rijmen +@author Antoon Bosselaers +@author Paulo Barreto + +This code is hereby placed in the public domain. + +THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +5) +One component of the ssh source code is under a 3-clause BSD license, +held by the University of California, since we pulled these parts from +original Berkeley code. + +Copyright (c) 1983, 1990, 1992, 1993, 1995 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +6) +Remaining components of the software are provided under a standard +2-term BSD licence with the following names as copyright holders: + +Markus Friedl +Theo de Raadt +Niels Provos +Dug Song +Aaron Campbell +Damien Miller +Kevin Steves +Daniel Kouril +Wesley Griffin +Per Allansson +Nils Nordman +Simon Wilkinson + +Portable OpenSSH additionally includes code from the following copyright +holders, also under the 2-term BSD license: + +Ben Lindstrom +Tim Rice +Andre Lucas +Chris Adams +Corinna Vinschen +Cray Inc. +Denis Parker +Gert Doering +Jakob Schlyter +Jason Downs +Juha Yrjölä +Michael Stone +Networks Associates Technology, Inc. +Solar Designer +Todd C. Miller +Wayne Schroeder +William Jones +Darren Tucker +Sun Microsystems +The SCO Group +Daniel Walsh +Red Hat, Inc +Simon Vallet / Genoscope + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +8) Portable OpenSSH contains the following additional licenses: + +a) md5crypt.c, md5crypt.h + +"THE BEER-WARE LICENSE" (Revision 42): + wrote this file. As long as you retain this +notice you can do whatever you want with this stuff. If we meet +some day, and you think this stuff is worth it, you can buy me a +beer in return. Poul-Henning Kamp + +b) snprintf replacement + +Copyright Patrick Powell 1995 +This code is based on code written by Patrick Powell +(papowell@astart.com) It may be used for any purpose as long as this +notice remains intact on all source code distributions + +c) Compatibility code (openbsd-compat) + +Apart from the previously mentioned licenses, various pieces of code +in the openbsd-compat/ subdirectory are licensed as follows: + +Some code is licensed under a 3-term BSD license, to the following +copyright holders: + +Todd C. Miller +Theo de Raadt +Damien Miller +Eric P. Allman +The Regents of the University of California +Constantin S. Svintsoff + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +Some code is licensed under an ISC-style license, to the following +copyright holders: + +Internet Software Consortium. +Todd C. Miller +Reyk Floeter +Chad Mynhier + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND TODD C. MILLER DISCLAIMS ALL +WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL TODD C. MILLER BE LIABLE +FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION +OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +Some code is licensed under a MIT-style license to the following +copyright holders: + +Free Software Foundation, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, distribute with modifications, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR +THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the name(s) of the above copyright +holders shall not be used in advertising or otherwise to promote the +sale, use or other dealings in this Software without prior written +authorization. +------ +$OpenBSD: LICENCE,v 1.19 2004/08/30 09:18:08 markus Exp $ + +>>> openssl-1.0.2q-1.ph2 + +LICENSE ISSUES +============== + +The OpenSSL toolkit stays under a double license, i.e. both the conditions of +the OpenSSL License and the original SSLeay license apply to the toolkit. +See below for the actual license texts. Actually both licenses are BSD-style +Open Source licenses. In case of any license issues related to OpenSSL +please contact openssl-core@openssl.org. + +OpenSSL License +--------------- + +/ ==================================================================== +Copyright (c) 1998-2018 The OpenSSL Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. + +3. All advertising materials mentioning features or use of this +software must display the following acknowledgment: +"This product includes software developed by the OpenSSL Project +for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + +4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to +endorse or promote products derived from this software without +prior written permission. For written permission, please contact +openssl-core@openssl.org. + +5. Products derived from this software may not be called "OpenSSL" +nor may "OpenSSL" appear in their names without prior written +permission of the OpenSSL Project. + +6. Redistributions of any form whatsoever must retain the following +acknowledgment: +"This product includes software developed by the OpenSSL Project +for use in the OpenSSL Toolkit (http://www.openssl.org/)" + +THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY +EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR +ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. +==================================================================== + +This product includes cryptographic software written by Eric Young +(eay@cryptsoft.com). This product includes software written by Tim +Hudson (tjh@cryptsoft.com). + +/ + +Original SSLeay License +----------------------- + +/ Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) +All rights reserved. + +This package is an SSL implementation written +by Eric Young (eay@cryptsoft.com). +The implementation was written so as to conform with Netscapes SSL. + +This library is free for commercial and non-commercial use as long as +the following conditions are aheared to. The following conditions +apply to all code found in this distribution, be it the RC4, RSA, +lhash, DES, etc., code; not just the SSL code. The SSL documentation +included with this distribution is covered by the same copyright terms +except that the holder is Tim Hudson (tjh@cryptsoft.com). + +Copyright remains Eric Young's, and as such any Copyright notices in +the code are not to be removed. +If this package is used in a product, Eric Young should be given attribution +as the author of the parts of the library used. +This can be in the form of a textual message at program startup or +in documentation (online or textual) provided with the package. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. All advertising materials mentioning features or use of this software +must display the following acknowledgement: +"This product includes cryptographic software written by +Eric Young (eay@cryptsoft.com)" +The word 'cryptographic' can be left out if the rouines from the library +being used are not cryptographic related :-). +4. If you include any Windows specific code (or a derivative thereof) from +the apps directory (application code) you must include an acknowledgement: +"This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + +THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +The licence and distribution terms for any publically available version or +derivative of this code cannot be changed. i.e. this code cannot simply be +copied and put under another distribution licence +[including the GNU Public Licence.] + + +ADDITIONAL LICENSE INFORMATION: + +> Public Domain + +openssl-1.0.2q.tar.gz\openssl-1.0.2q.tar\openssl-1.0.2q\crypto\aes\asm\vpaes-x86.pl + +By Mike Hamburg (Stanford University), 2009 +Public domain. + +For details see http://shiftleft.org/papers/vector_aes/ and +http://crypto.stanford.edu/vpaes/. + +> OpenSSL + +openssl-1.0.2m.tar.gz\openssl-1.0.2m.tar\openssl-1.0.2m\crypto\aes\asm\bsaes-armv7.pl + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS SUBCOMPONENT UNDER THE TERMS OF THE OPENSSL LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Written by Andy Polyakov for the OpenSSL +project. The module is, however, dual licensed under OpenSSL and +CRYPTOGAMS licenses depending on where you obtain it. For further +details see http://www.openssl.org/~appro/cryptogams/. + +Specific modes and adaptation for Linux kernel by Ard Biesheuvel +. Permission to use under GPL terms is +granted. + +> BSD + +openssl-1.0.2q.tar.gz\openssl-1.0.2q.tar\openssl-1.0.2q\crypto\camellia\asm\cmll-x86_64.pl + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS SUBCOMPONENT UNDER THE TERMS OF THE BSD LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE] + +Copyright (c) 2008 Andy Polyakov + +This module may be used under the terms of either the GNU General +Public License version 2 or later, the GNU Lesser General Public +License version 2.1 or later, the Mozilla Public License version +1.1 or the BSD License. The exact terms of either license are +distributed along with this module. For further details see +http://www.openssl.org/~appro/camellia/. + +> Apache 2.0 + +openssl-1.0.2q.tar.gz\openssl-1.0.2q.tar\openssl-1.0.2q\crypto\ec\ecp_nistputil.c + +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); + +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +> MIT + +openssl-1.0.2q.tar.gz\openssl-1.0.2q.tar\openssl-1.0.2q\crypto\md5\asm\md5-ia64.S + +Copyright (c) 2005 Hewlett-Packard Development Company, L.P. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +> BSD-3 Clause + +openssl-1.0.2q.tar.gz\openssl-1.0.2q.tar\openssl-1.0.2q\crypto\x509v3\v3_pci.c + +Copyright (c) 2004 Kungliga Tekniska Högskolan +(Royal Institute of Technology, Stockholm, Sweden). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the Institute nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +> BSD- Style + +openssl-1.0.2q.tar.gz\openssl-1.0.2q.tar\openssl-1.0.2q\demos\easy_tls\easy-tls.c + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD STYLE LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright 1999 Bodo Moeller. All rights reserved. + +This is free software; you can redistributed and/or modify it +unter the terms of either +- the GNU General Public License as published by the +Free Software Foundation, version 1, or (at your option) +any later version, +or +- the following license: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that each of the following +conditions is met: + +1. Redistributions qualify as "freeware" or "Open Source Software" under +one of the following terms: + +(a) Redistributions are made at no charge beyond the reasonable cost of +materials and delivery. + +(b) Redistributions are accompanied by a copy of the Source Code +or by an irrevocable offer to provide a copy of the Source Code +for up to three years at the cost of materials and delivery. +Such redistributions must allow further use, modification, and +redistribution of the Source Code under substantially the same +terms as this license. + +2. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +3. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. + +4. All advertising materials mentioning features or use of this +software must display the following acknowledgment: +"This product includes software developed by Bodo Moeller." +(If available, substitute umlauted o for oe.) + +5. Redistributions of any form whatsoever must retain the following +acknowledgment: +"This product includes software developed by Bodo Moeller." + +THIS SOFTWARE IS PROVIDED BY BODO MOELLER ``AS IS'' AND ANY +EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BODO MOELLER OR +HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +> BSD- 2 Clause + +openssl-1.0.2q.tar.gz\openssl-1.0.2q.tar\openssl-1.0.2q\crypto\seed\seed.c + +Copyright (c) 2007 KISA(Korea Information Security Agency). All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Neither the name of author nor the names of its contributors may +be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + + +> Creative Commons Attribution 4.0 + +openssl-1.0.2q.tar.gz\openssl-1.0.2q.tar\openssl-1.0.2q\ssl\heartbeat_test.c + +Creative Commons Attribution 4.0 International (CC By 4.0) +http://creativecommons.org/licenses/by/4.0/deed.en_US + +>>> pcre-8.41-1.ph2 + +PCRE LICENCE +------------ + +PCRE is a library of functions to support regular expressions whose syntax +and semantics are as close as possible to those of the Perl 5 language. + +Release 8 of PCRE is distributed under the terms of the "BSD" licence, as +specified below. The documentation for PCRE, supplied in the "doc" +directory, is distributed under the same terms as the software itself. The data +in the testdata directory is not copyrighted and is in the public domain. + +The basic library functions are written in C and are freestanding. Also +included in the distribution is a set of C++ wrapper functions, and a +just-in-time compiler that can be used to optimize pattern matching. These +are both optional features that can be omitted when the library is built. + + +THE BASIC LIBRARY FUNCTIONS +--------------------------- + +Written by: Philip Hazel +Email local part: ph10 +Email domain: cam.ac.uk + +University of Cambridge Computing Service, +Cambridge, England. + +Copyright (c) 1997-2017 University of Cambridge +All rights reserved. + + +PCRE JUST-IN-TIME COMPILATION SUPPORT +------------------------------------- + +Written by: Zoltan Herczeg +Email local part: hzmester +Emain domain: freemail.hu + +Copyright(c) 2010-2017 Zoltan Herczeg +All rights reserved. + + +STACK-LESS JUST-IN-TIME COMPILER +-------------------------------- + +Written by: Zoltan Herczeg +Email local part: hzmester +Emain domain: freemail.hu + +Copyright(c) 2009-2017 Zoltan Herczeg +All rights reserved. + + +THE C++ WRAPPER FUNCTIONS +------------------------- + +Contributed by: Google Inc. + +Copyright (c) 2007-2012, Google Inc. +All rights reserved. + + +THE "BSD" LICENCE +----------------- + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +Neither the name of the University of Cambridge nor the name of Google +Inc. nor the names of their contributors may be used to endorse or +promote products derived from this software without specific prior +written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +End + +ADDITIONAL LICENSE INFORMATION: + +> BSD 2 clause + +pcre-8.41.tar.bz2\pcre-8.41.tar\pcre-8.41\sljit\sljitConfigInternal.h + +Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are +permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of +conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list +of conditions and the following disclaimer in the documentation and/or other materials +provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT +SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +>MIT STYLE + +pcre-8.41.tar.bz2\pcre-8.41.tar\pcre-8.41\aclocal.m4 + +Copyright (C) 1996-2014 Free Software Foundation, Inc. + +Copyright (C) 1996-2014 Free Software Foundation, Inc. + +This file is free software; the Free Software Foundation +gives unlimited permission to copy and/or distribute it, +with or without modifications, as long as this notice is preserved. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY, to the extent permitted by law; without +even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. + +>>> popt-1.16-5.ph2 + +Copyright (c) 1998 Red Hat Software + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the name of the X Consortium shall not be +used in advertising or otherwise to promote the sale, use or other dealings +in this Software without prior written authorization from the X Consortium. + +>>> shadow-4.2.1-16.ph2 + +Copyright (c) 1991 - 1994, Julianne Frances Haugh +Copyright (c) 1996 - 2000, Marek Michalkiewicz +Copyright (c) 2000 - 2006, Tomasz Kloczko +Copyright (c) 2007 - 2011, Nicolas François +All rights reserved + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1 Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer +2 Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution +3 The name of the copyright holders or contributors may not be used to +endorse or promote products derived from this software without +specific prior written permission + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED IN NO EVENT SHALL THE COPYRIGHT +HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + +ADDITIONAL LICENSE INFORMATION: + +> GPL 2.0 + +shadow-421tarxz\shadow-421tar\shadow-421\src\vipwc + +Copyright (c) 1997 , Guy Maor +Copyright (c) 1999 - 2000, Marek Michalkiewicz +Copyright (c) 2002 - 2006, Tomasz Kloczko +Copyright (c) 2007 - 2013, Nicolas François +All rights reserved + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE See the GNU +General Public License for more details + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc, 51 Franklin Street, Fifth Floor, +Boston, MA 02110-1301, USA + + +udbachktgz + + +> BSD Style + +shadow-421tarxz\shadow-421tar\shadow-421\src\login_nopamc + +Copyright 1995 by Wietse Venema All rights reserved Individual files +may be covered by other copyrights (as noted in the file itself) + +This material was originally written and compiled by Wietse Venema at +Eindhoven University of Technology, The Netherlands, in 1990, 1991, +1992, 1993, 1994 and 1995 + +Redistribution and use in source and binary forms are permitted +provided that this entire copyright notice is duplicated in all such +copies + +This software is provided "as is" and without any expressed or implied +warranties, including, without limitation, the implied warranties of +merchantibility and fitness for any particular purpose + + +> Public Domain + +shadow-421tarxz\shadow-421tar\shadow-421\man\zh_TW\man1\newgrp1 + +Original author unknown This man page is in the public domain +Modified Sat Oct 9 17:46:48 1993 by faith@csuncedu +TH NEWGRP 1 "9 October 1993" "Linux 12" "Linux Programmer's Manual" + + +> GPL 3.0 + +shadow-421tarxz\shadow-421tar\shadow-421\man\tr\man1\chfn.1 + +chfn.1 -- change your finger information +(c) 1994 by salvatore valente +this program is free software. you can redistribute it and +modify it under the terms of the gnu general public license. +there is no warranty. + +> MIT-Style + +shadow-421tarxz\shadow-421tar\shadow-421\aclocalm4 + +Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, +Inc +This file is free software; the Free Software Foundation +gives unlimited permission to copy and/or distribute it, +with or without modifications, as long as this notice is preserved + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY, to the extent permitted by law; without +even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE + + +> BSD + +[PLEASE NOTE: VMWARE, INC ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD LICENSE THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE] + +shadow-421tarxz\shadow-421tar\shadow-421\man\tr\man1\passwd1 + +Copyright Red Hat, Inc, 1998, 1999, 2002 + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1 Redistributions of source code must retain the above copyright +notice, and the entire permission notice in its entirety, +including the disclaimer of warranties +2 Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution +3 The name of the author may not be used to endorse or promote +products derived from this software without specific prior +written permission + +ALTERNATIVELY, this product may be distributed under the terms of +the GNU Public License, in which case the provisions of the GPL are +required INSTEAD OF the above restrictions (This clause is +necessary due to a potential bad interaction between the GPL and +the restrictions contained in a BSD-style copyright) + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE + +Copyright (c) Cristian Gafton, 1998, + + + +> LDP General Public License 1.0 + +shadow-421tarxz\shadow-421tar\shadow-421\man\hu\man1\su1 + +You may copy, distribute and modify under the terms of the LDP General +Public License as specified in the LICENSE file that comes with the +gnumaniak distribution + +The author kindly requests that no comments regarding the "better" +suitability or up\-to\-date notices of any info documentation alternative +is added without contacting him first + +(C) 1999 Ragnar Hojland Espinosa + +GNU su man page +man pages are NOT obsolete! + +TH su 1 "18 August 1999" "GNU Shell Utilities 20" + +>>> sqlite-3.22.0-3.ph2 + +The author disclaims copyright to this source code. In place of +a legal notice, here is a blessing: + +May you do good and not evil. +May you find forgiveness for yourself and forgive others. +May you share freely, never taking more than you give. + +ADDITIONAL LICENSE INFORMATION: + +> MIT-Style + +sqlite-autoconf-3220000.tar.gz\sqlite-autoconf-3220000.tar\sqlite-autoconf-3220000\aclocal.m4 + +Copyright (C) 1996-2014 Free Software Foundation, Inc. + +This file is free software; the Free Software Foundation +gives unlimited permission to copy and/or distribute it, +with or without modifications, as long as this notice is preserved. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY, to the extent permitted by law; without +even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. + +>>> sudo-1.8.20p2-5.ph2 + +Sudo is distributed under the following license: + +Copyright (c) 1994-1996, 1998-2017 +Todd C. Miller + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +Sponsored in part by the Defense Advanced Research Projects +Agency (DARPA) and Air Force Research Laboratory, Air Force +Materiel Command, USAF, under agreement number F39502-99-1-0512. + +The file redblack.c bears the following license: + +Copyright (c) 2001 Emin Martinian + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that neither the name of Emin +Martinian nor the names of any contributors are be used to endorse or +promote products derived from this software without specific prior +written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The file reallocarray.c bears the following license: + +Copyright (c) 2008 Otto Moerbeek + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +The files getcwd.c, glob.c, glob.h, snprintf.c and sudo_queue.h bear the +following license: + +Copyright (c) 1989, 1990, 1991, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +The file fnmatch.c bears the following license: + +Copyright (c) 2011, VMware, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +Neither the name of the VMware, Inc. nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The file getopt_long.c bears the following license: + + +Copyright (c) 2000 The NetBSD Foundation, Inc. +All rights reserved. + +This code is derived from software contributed to The NetBSD Foundation +by Dieter Baron and Thomas Klausner. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +The file inet_pton.c bears the following license: + +Copyright (c) 1996 by Internet Software Consortium. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS +ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE +CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + + +The embedded copy of zlib bears the following license: + +Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not +claim that you wrote the original software. If you use this software +in a product, an acknowledgment in the product documentation would be +appreciated but is not required. +2. Altered source versions must be plainly marked as such, and must not be +misrepresented as being the original software. +3. This notice may not be removed or altered from any source distribution. + +Jean-loup Gailly Mark Adler +jloup@gzip.org madler@alumni.caltech.edu + +> PUBLIC DOMAIN + +sudo-1.8.20p2.tar.gz\sudo-1.8.20p2.tar\sudo-1.8.20p2\lib\util\regress\glob\globtest.c + +Public domain, 2008, Todd C. Miller Todd.Miller@courtesan.com + +>>> vim-8.0.0533-6.ph2 + +Vim is Charityware. You can use and copy it as much as you like, but you are +encouraged to make a donation to help orphans in Uganda. Please read the file +"runtime/doc/uganda.txt" for details (do ":help uganda" inside Vim). + +Summary of the license: There are no restrictions on using or distributing an +unmodified copy of Vim. Parts of Vim may also be distributed, but the license +text must always be included. For modified versions a few restrictions apply. +The license is GPL compatible, you may compile Vim with GPL libraries and +distribute it. + +ADDITIONAL LICENSE INFORMATION: + +>MIT + +vim-8.0.0533\runtime\autoload\netrwFileHandlers.vim + +Copyright (C) 1999-2012 Charles E. Campbell {{{1 +Permission is hereby granted to use and distribute this code, +with or without modifications, provided that this copyright +notice is copied with it. Like anything else that's free, +netrwFileHandlers.vim is provided *as is* and comes with no +warranty of any kind, either expressed or implied. In no +event will the copyright holder be liable for any damages +resulting from the use of this software. + +>GPL 3.0 + +vim-8.0.0533\runtime\plugin\tarPlugin.vim + +tarPlugin.vim -- a Vim plugin for browsing tarfiles +Original was copyright (c) 2002, Michael C. Toren +Modified by Charles E. Campbell +Distributed under the GNU General Public License. + +>GPL 2.0 + +vim-8.0.0533\runtime\tools\efm_perl.pl + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE GPL 2.PLEASE SEE THE APPENDIX TO REVIEW THE FULL TEXT OF THE GPL 2.0. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright (c) 2001 by Joerg Ziefle +You may use and distribute this software under the same terms as Perl itself. + +>Public Domain + +vim-8.0.0533\src\tee\tee.c + +Copyright (c) 1996, Paul Slootman + +Author: Paul Slootman +(paul@wurtel.hobby.nl, paul@murphy.nl, paulS@toecompst.nl) +Modifications for MSVC: Yasuhiro Matsumoto + +This source code is released into the public domain. It is provided on an +as-is basis and no responsibility is accepted for its failure to perform +as expected. It is worth at least as much as you paid for it! + +>>> xz-5.2.3-2.ph2 + +Author: Lasse Collin +This file has been put into the public domain. +You can do whatever you want with this file. + +ADDITIONAL LICENSE INFORMATION: + +> GPL 2.0 + +xz-5.2.3.tar.xz\xz-5.2.3.tar\xz-5.2.3\src\scripts\xzmore.in + +Copyright (C) 2001, 2002, 2007 Free Software Foundation +Copyright (C) 1992, 1993 Jean-loup Gailly + +Modified for XZ Utils by Andrew Dudman and Lasse Collin. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +> MIT-Style + +xz-5.2.3.tar.xz\xz-5.2.3.tar\xz-5.2.3\m4\visibility.m4 + +Copyright (C) 2005, 2008-2010 Free Software Foundation, Inc. +This file is free software; the Free Software Foundation +gives unlimited permission to copy and/or distribute it, +with or without modifications, as long as this notice is preserved. + +> LGPL 2.1 + +xz-5.2.3.tar.xz\xz-5.2.3.tar\xz-5.2.3\lib\ getopt1.c + +Copyright (C) 1987,88,89,90,91,92,93,94,96,97,98,2004,2006 +Free Software Foundation, Inc. +This file is part of the GNU C Library. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 2.1, or (at your option) +any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License along +with this program; if not, write to the Free Software Foundation, +Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +> GPL 3.0 + +xz-5.2.3.tar.xz\xz-5.2.3.tar\xz-5.2.3\COPYING.GPLv3 + +License: GPL 3.0 + +>>> zlib-1.2.11-1.ph2 + +Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not +claim that you wrote the original software. If you use this software +in a product, an acknowledgment in the product documentation would be +appreciated but is not required. +2. Altered source versions must be plainly marked as such, and must not be +misrepresented as being the original software. +3. This notice may not be removed or altered from any source distribution. + +Jean-loup Gailly Mark Adler +jloup@gzip.org madler@alumni.caltech.edu + +ADDITIONAL LICENSE INFORMATION: + +> GPL 2.0 + +zlib-1.2.11.tar.gz\zlib-1.2.11.tar\zlib-1.2.11\contrib\ada\zlib.ads + +Copyright (C) 20022004 Dmitriy Anisimkov + +This library is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or (at +your option) any later version. + +This library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this library; if not, write to the Free Software Foundation, +Inc., 59 Temple Place Suite 330, Boston, MA 021111307, USA. + +As a special exception, if other files instantiate generics from this +unit, or you link this unit with other files to produce an executable, +this unit does not by itself cause the resulting executable to be +covered by the GNU General Public License. This exception does not +however invalidate any other reasons why the executable file might be +covered by the GNU Public License. + +> BSD-Style + +zlib-1.2.11.tar.gz\zlib-1.2.11.tar\zlib-1.2.11\contrib\amd64\ amd64-match.S + +This is free software; you can redistribute it and/or modify it +under the terms of the BSD License. Use by owners of Che Guevarra +parafernalia is prohibited, where possible, and highly discouraged +elsewhere. + +> MIT-Style (Boost Software License) + +zlib-1.2.11.tar.gz\zlib-1.2.11.tar\zlib-1.2.11\contrib\dotzlib\LICENSE_1_0.txt + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +> Public Domain + +zlib-1.2.11.tar.gz\zlib-1.2.11.tar\zlib-1.2.11\examples\ fitblk.c + +fitblk.c: example of fitting compressed output to a specified size +Not copyrighted -- provided to the public domain +Version 1.1 25 November 2004 Mark Adler + +--------------- SECTION 4: GNU General Public License, V2.0 ---------- + +GNU General Public License, V2.0 is applicable to the following component(s). + + +>>> e2fsprogs-1.43.4-2.ph2 + +Written by Theodore Ts'o, Copyright 2007, 2008, 2009. + +This file may be redistributed under the terms of the GNU Public License, version 2. + +ADDITIONAL LICENSE INFORMATION : + +> GPL 3.0 + +e2fsprogs-1.43.4.tar.gz\e2fsprogs-1.43.4.tar\e2fsprogs-1.43.4\contrib\fallocate.c + +Copyright (C) 2008 Red Hat, Inc. All rights reserved. +Written by Eric Sandeen + +cvtnum routine taken from xfsprogs, +Copyright (c) 2003-2005 Silicon Graphics, Inc. + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License as +published by the Free Software Foundation. + +This program is distributed in the hope that it would be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software Foundation, +Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + +> Public Domain + +e2fsprogs-1.43.4.tar.gz\e2fsprogs-1.43.4.tar\e2fsprogs-1.43.4\config\mkinstalldirs + +mkinstalldirs --- make directory hierarchy +Author: Noah Friedman +Created: 1993-05-16 +Public domain. + +> LGPL 2.0 + +e2fsprogs-1.43.4.tar.gz\e2fsprogs-1.43.4.tar\e2fsprogs-1.43.4\e2fsck\mtrace.c + +Copyright (C) 1991, 1992 Free Software Foundation, Inc. +Written April 2, 1991 by John Gilmore of Cygnus Support. +Based on mcheck.c by Mike Haertel. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this library; see the file COPYING.LIB. If +not, write to the Free Software Foundation, Inc., 675 Mass Ave, +Cambridge, MA 02139, USA. + +> MIT-Style + +e2fsprogs-1.43.4.tar.gz\e2fsprogs-1.43.4.tar\e2fsprogs-1.43.4\aclocal.m4 + +Copyright (C) 1996-2013 Free Software Foundation, Inc. + +This file is free software; the Free Software Foundation +gives unlimited permission to copy and/or distribute it, +with or without modifications, as long as this notice is preserved. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY, to the extent permitted by law; without +even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. + +> BSD-3 Clause + +e2fsprogs-1.43.4.tar.gz\e2fsprogs-1.43.4.tar\e2fsprogs-1.43.4\lib\uuid\COPYING + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, and the entire permission notice in its entirety, + including the disclaimer of warranties. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF +WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +> LGPL 3.0 + +e2fsprogs-1.43.4.tar.gz\e2fsprogs-1.43.4.tar\e2fsprogs-1.43.4\lib\blkid\blkid.h.in + +Copyright (C) 2001 Andreas Dilger +Copyright (C) 2003 Theodore Ts'o + +This file may be redistributed under the terms of the GNU Lesser General Public License. + +>>> gcc-6.3.0-7.ph2 + +Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING. If not, write to +the Free Software Foundation, 51 Franklin Street, Fifth Floor, +Boston MA 02110-1301, USA. + +ADDITIONAL LICENSE INFORMATION: + +> MIT-Style + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\boehm-gc\cord\cordbscs.c + +Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved. + +THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED +OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + +Permission is hereby granted to use or copy this program +for any purpose, provided the above notices are retained on all copies. +Permission to modify the code and to distribute modified code is granted, +provided the above notices are retained, and a notice that the code was +modified is included with the above copyright notice. + +> GPL 3.0 + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\boehm-gc\testsuite\boehm-gc.c\c.exp + +Copyright (C) 2011 Free Software Foundation, Inc. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING3. If not see +. + +> GFDL 1.3 + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\gcc\doc\gcov.texi + +Copyright (C) 1996-2013 Free Software Foundation, Inc. +This is part of the GCC manual. +For copying conditions, see the file gcc.texi. + +Copyright @copyright{} 1996-2013 Free Software Foundation, Inc. + +Permission is granted to copy, distribute and/or modify this document +under the terms of the GNU Free Documentation License, Version 1.3 or +any later version published by the Free Software Foundation; with the +Invariant Sections being ``GNU General Public License'' and ``Funding +Free Software'', the Front-Cover texts being (a) (see below), and with +the Back-Cover Texts being (b) (see below). A copy of the license is +included in the gfdl(7) man page. + +> BSD with Patent grant + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\gcc\go\gofrontend\LICENSE + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. +Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. + +> LGPL 2.1 + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\gcc\testsuite\gcc.dg\compat\generate-random.h + +Copyright (C) 2004 Free Software Foundation + +The GNU C Library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +The GNU C Library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with the GNU C Library; if not, write to the Free +Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +02110-1301 USA. + +> LGPL 3.0 + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\gcc\stab.def + +Table of DBX symbol codes for the GNU system. +Copyright (C) 1988-2013 Free Software Foundation, Inc. +This file is part of the GNU C Library. + +The GNU C Library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 3 of the +License, or (at your option) any later version. + +The GNU C Library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with the GNU C Library; see the file COPYING3. If +not see . + +> LGPL 2.0 + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\include\demangle.h + +Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, +2003, 2004, 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License +as published by the Free Software Foundation; either version 2, or +(at your option) any later version. + +In addition to the permissions in the GNU Library General Public +License, the Free Software Foundation gives you unlimited +permission to link the compiled version of this file into +combinations with other programs, and to distribute those +combinations without any restriction coming from the use of this +file. (The Library Public License restrictions do apply in other +respects; for example, they cover modification of the file, and +distribution when not linked into a combined executable.) + +This program is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA +02110-1301, USA. + +> Public Domain + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libffi\src\dlmalloc.c + +This is a version (aka dlmalloc) of malloc/free/realloc written by +Doug Lea and released to the public domain, as explained at +http://creativecommons.org/licenses/publicdomain. Send questions, +comments, complaints, performance data, etc to dl@cs.oswego.edu. + +> MIT + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libffi\LICENSE + +Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others. +See source files for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +``Software''), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +> LGPL 2.1 + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libffi\msvcc.sh + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE LGPL 2.1 LICENSE THE TEXT OF WHICH IS SET FORTH IN THE APPENDIX. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +BEGIN LICENSE BLOCK +Version: MPL 1.1/GPL 2.0/LGPL 2.1 + +The contents of this file are subject to the Mozilla Public License Version +1.1 (the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at +http://www.mozilla.org/MPL/ + +Software distributed under the License is distributed on an "AS IS" basis, +WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +for the specific language governing rights and limitations under the +License. + +The Original Code is the MSVC wrappificator. + +The Initial Developer of the Original Code is +Timothy Wall . +Portions created by the Initial Developer are Copyright (C) 2009 +the Initial Developer. All Rights Reserved. + +Contributor(s): +Daniel Witte + +Alternatively, the contents of this file may be used under the terms of +either the GNU General Public License Version 2 or later (the "GPL"), or +the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +in which case the provisions of the GPL or the LGPL are applicable instead +of those above. If you wish to allow use of your version of this file only +under the terms of either the GPL or the LGPL, and not to allow others to +use your version of this file under the terms of the MPL, indicate your +decision by deleting the provisions above and replace them with the notice +and other provisions required by the GPL or the LGPL. If you do not delete +the provisions above, a recipient may use your version of this file under +the terms of any one of the MPL, the GPL or the LGPL. + +END LICENSE BLOCK + +> GPL 3.0 (with runtime library exception) + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libgcc\config\aarch64\crti.S + +Machine description for AArch64 architecture. +Copyright (C) 2009-2013 Free Software Foundation, Inc. +Contributed by ARM Ltd. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +. + +> LGPL 2.1(with linking exception) + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libgcc\config\c6x\sfp-machine.h + +Copyright (C) 2010-2013 Free Software Foundation, Inc. + +This files is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +In addition to the permissions in the GNU Lesser General Public +License, the Free Software Foundation gives you unlimited +permission to link the compiled version of this file into +combinations with other programs, and to distribute those +combinations without any restriction coming from the use of this +file. (The Lesser General Public License restrictions do apply in +other respects; for example, they cover modification of the file, +and distribution when not linked into a combine executable.) + +This file is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with GCC; see the file COPYING.LIB. If not see +. + +> GPL 2.0 (with linking exception) + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libiberty\testsuite\test-expandargv.c + +Copyright (C) 2006 Free Software Foundation, Inc. +Written by Carlos O'Donell + +This file is part of the libiberty library, which is part of GCC. + +This file is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, +and to distribute those combinations without any restriction coming +from the use of this file. (The General Public License restrictions +do apply in other respects; for example, they cover modification of +the file, and distribution when not linked into a combined +executable.) + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. + +> MIT-Style + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libjava\classpath\external\jsr166\readme + +Copyright 2002-2004 Sun Microsystems, Inc. All rights reserved. Use is +subject to the following license terms. + +"Sun hereby grants you a non-exclusive, worldwide, non-transferrable +license to use and distribute the Java Software technologies as part +of a larger work in source and binary forms, with or without +modification, provided that the following conditions are met: + +-Neither the name of or trademarks of Sun may be used to endorse or +promote products derived from the Java Software technology without +specific prior written permission. + +-Redistributions of source or binary code must be accompanied by the +following notice and disclaimers: + +Portions copyright Sun Microsystems, Inc. Used with kind permission. + +This software is provided AS IS, without a warranty of any kind. ALL +EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND +WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PUPOSE OR +NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN +MICROSYSTEMS, INC. AND ITS LICENSORS SHALL NOT BE LIABLE +FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF +USING, MODIFYING OR DISTRIBUTING THE SOFTWARE OR ITS +DERIVATIVES. IN NO EVENT WILL SUN MICROSYSTEMS, INC. OR +ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR +DATA, OR FOR DIRECT, INDIRECT,CONSQUENTIAL, INCIDENTAL +OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF +THE THEORY OR LIABILITY, ARISING OUT OF THE USE OF OR +INABILITY TO USE SOFTWARE, EVEN IF SUN MICROSYSTEMS, INC. +HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +You acknowledge that Software is not designed, licensed or intended for +use in the design, construction, operation or maintenance of any nuclear +facility." + +> MIT + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libsanitizer\LICENSE.TXT + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE MIT LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +============================================================================== +compiler_rt License +============================================================================== +The compiler_rt library is dual licensed under both the University of Illinois +"BSD-Like" license and the MIT license. As a user of this code you may choose +to use it under either license. As a contributor, you agree to allow your code +to be used under both. + +Full text of the relevant licenses is included below. +============================================================================== +University of Illinois/NCSA +Open Source License + +Copyright (c) 2009-2012 by the contributors listed in CREDITS.TXT + +All rights reserved. + +Developed by: + +LLVM Team + +University of Illinois at Urbana-Champaign + +http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimers. + +Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimers in the +documentation and/or other materials provided with the distribution. + +Neither the names of the LLVM Team, University of Illinois at +Urbana-Champaign, nor the names of its contributors may be used to +endorse or promote products derived from this Software without specific +prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. +============================================================================== +Copyright (c) 2009-2012 by the contributors listed in CREDITS.TXT + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +============================================================================== +Copyrights and Licenses for Third Party Software Distributed with LLVM: +============================================================================== +The LLVM software contains code written by third parties. Such software will +have its own individual LICENSE.TXT file in the directory in which it appears. +This file will describe the copyrights, license, and restrictions which apply +to that code. + +The disclaimer of warranty in the University of Illinois Open Source License +applies to all code in the LLVM Distribution, and nothing in any of the +other licenses gives permission to use the names of the LLVM Team or the +University of Illinois to endorse or promote products derived from this +Software. + +The following pieces of software have additional or alternate copyrights, +licenses, and/or restrictions: + +Program Directory +------- --------- +mach_override lib/interception/mach_override + +> GFDL 1.2 + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libstdc++-v3\doc\html\index.html + +Permission is granted to copy, distribute and/or modify this +document under the terms of the GNU Free Documentation +License, Version 1.2 or any later version published by the +Free Software Foundation; with no Invariant Sections, with no +Front-Cover Texts, and with no Back-Cover Texts. + +> Project Gutenberg license + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\libgo\go\compress\testdata\Mark.Twain- +Tom.Sawyer.txt + +The Project Gutenberg EBook of The Adventures of Tom Sawyer, Complete +by Mark Twain (Samuel Clemens) + +This eBook is for the use of anyone anywhere at no cost and with +almost no restrictions whatsoever. You may copy it, give it away or +re-use it under the terms of the Project Gutenberg License included +with this eBook or online at www.gutenberg.net + +> BSD + +gcc-6.3.0.tar.bz2\gcc-6.3.0.tar\gcc-6.3.0\mkdep + +Copyright (c) 1987 Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms are permitted +provided that the above copyright notice and this paragraph are +duplicated in all such forms and that any documentation, +advertising materials, and other materials related to such +distribution and use acknowledge that the software was developed +by the University of California, Berkeley. The name of the +University may not be used to endorse or promote products derived +from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED +WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +>>> iproute2-4.10.0-3.ph2 + +Copyright (C) 1999 Pavel Golubev +Copyright (C) 2001-2004 Lubomir Bulej + +chkconfig: 2345 11 89 +description: sets up CBQ-based traffic control + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, see . + +ADDITIONAL LICENSE INFORMATION: + +> GPL 3.0 + +iproute2-4.10.0.tar.xz\iproute2-4.10.0.tar\iproute2-4.10.0\include\linux\if_bonding.h + +(c) Copyright 1999, Thomas Davis, tadavis@lbl.gov + +This software may be used and distributed according to the terms +of the GNU Public License, incorporated herein by reference. + +> BSD- 3 Clause + +iproute2-4.10.0.tar.xz\iproute2-4.10.0.tar\iproute2-4.10.0\include\linux\can.h + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD- 3 CLAUSE LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Authors: Oliver Hartkopp +Urs Thuermann +Copyright (c) 2002-2007 Volkswagen Group Electronic Research +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of Volkswagen nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +Alternatively, provided that this notice is retained in full, this +software may be distributed under the terms of the GNU General +Public License ("GPL") version 2, in which case the provisions of the +GPL apply INSTEAD OF those given above. + +The provided data structures and external interfaces from this code +are not restricted to be used by modules with a GPL compatible license. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +> BSD- 3 Clause + +iproute2-4.10.0.tar.xz\iproute2-4.10.0.tar\iproute2-4.10.0\include\netinet\tcp.h + +Copyright (c) 1982, 1986, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +> Public Domain + +iproute2-4.10.0.tar.xz\iproute2-4.10.0.tar\iproute2-4.10.0\ip\routel + +Script created by: Stephen R. van den Berg , 1999/04/18 +Donated to the public domain. + +>>> iptables-1.6.1-4.ph2 + +Copyright (C) 2000-2002 Joakim Axelsson +Patrick Schaaf +Martin Josefsson +Copyright (C) 2003-2011 Jozsef Kadlecsik + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License version 2 as +published by the Free Software Foundation. + +ADDITIONAL LICENSE INFORMATION: + +> GPL 3.0 + +iptables-1.6.1.tar.bz2\iptables-1.6.1.tar\iptables-1.6.1\extensions\libip6t_HL.c + +IPv6 Hop Limit Target module +Maciej Soltysiak +Based on HW's ttl target +This program is distributed under the terms of GNU GPL. + +> LGPL 2.0 + +iptables-1.6.1.tar.bz2\iptables-1.6.1.tar\iptables-1.6.1\iptables\getethertype.c + +This file was part of the NYS Library. + +The NYS Library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +> Artistic 2.0 + +iptables-1.6.1.tar.bz2\iptables-1.6.1.tar\iptables-1.6.1\iptables\iptables-apply + +Copyright © Martin F. Krafft +Released under the terms of the Artistic Licence 2.0 + +> MIT + +iptables-1.6.1.tar.bz2\iptables-1.6.1.tar\iptables-1.6.1\utils\pf.os + +SYN signatures. Those signatures work for SYN packets only (duh!). + +(C) Copyright 2000-2003 by Michal Zalewski +(C) Copyright 2003 by Mike Frantzen + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +>>> linux-esx-4.9.154-1.ph2 + +Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. +Written by David Howells (dhowells@redhat.com) + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public Licence +as published by the Free Software Foundation; either version +2 of the Licence, or (at your option) any later version. + +ADDITIONAL LICENSE INFORMATION: + +> MIT + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\arm\boot\dts\arm-realview-pb1176.dts + +Copyright 2014 Linaro Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +> OpenSSL + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\linux-4.9.154\arch\arm\crypto\aes-armv4.S + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE OPENSSL LICENSE.PLEASE SEE BELOW FOR THE FULL TEXT OF THE OPENSSL LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE]. + +Written by Andy Polyakov for the OpenSSL +project. The module is, however, dual licensed under OpenSSL and +CRYPTOGAMS licenses depending on where you obtain it. For further +details see http://www.openssl.org/~appro/cryptogams/. + + +> GPL 3.0 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\linux-4.9.154\arch\arm\mach-imx\pm-imx27.c + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License. + +> LGPL 2.0 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\linux-4.9.154\arch\arm\mach-orion5x\dns323-setup.c + +Copyright (C) 2010 Benjamin Herrenschmidt + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +> BSD- Style + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\arm\nwfpe\milieu.h + +his C header file is part of the SoftFloat IEC/IEEE Floating-point +Arithmetic Package, Release 2. + +Written by John R. Hauser. This work was made possible in part by the +International Computer Science Institute, located at Suite 600, 1947 Center +Street, Berkeley, California 94704. Funding was partially provided by the +National Science Foundation under grant MIP-9311980. The original version +of this code was written as part of a project to build a fixed-point vector +processor in collaboration with the University of California at Berkeley, +overseen by Profs. Nelson Morgan and John Wawrzynek. More information +is available through the Web page +http://www.jhauser.us/arithmetic/SoftFloat-2b/SoftFloat-source.txt + +THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort +has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT +TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO +PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY +AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + +Derivative works are acceptable, even for commercial purposes, so long as +(1) they include prominent notice that the work is derivative, and (2) they +include prominent notice akin to these three paragraphs for those parts of +this code that are retained. + +> BSD- 3 Clause + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\blackfin\Clear_BSD.txt + +Copyright (c) 2012, Analog Devices, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the +distribution. + +* Neither the name of Analog Devices, Inc. nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> BSD-3 Clause + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\blackfin\include\asm\bfin_pfmon.h + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD- 3 CLAUSE LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright 2005-2011 Analog Devices Inc. + +Licensed under the Clear BSD license or GPL-2 (or later). + +> BSD- 2 Clause + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\cris\arch-v10\lib\memset.c + +Copyright (C) 1999-2005 Axis Communications. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Neither the name of Axis Communications nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY AXIS COMMUNICATIONS AND ITS CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AXIS +COMMUNICATIONS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +ena_linux_1.1.3.tar.gz, linux-4.4.111.tar.xz + +> MIT- Style + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\m68k\fpsp040\README + +M68040 Software Package Copyright (c) 1993, 1994 Motorola Inc. +All rights reserved. + +THE SOFTWARE is provided on an "AS IS" basis and without warranty. +To the maximum extent permitted by applicable law, +MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED, +INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A +PARTICULAR PURPOSE and any warranty against infringement with +regard to the SOFTWARE (INCLUDING ANY MODIFIED VERSIONS THEREOF) +and any accompanying written materials. + +To the maximum extent permitted by applicable law, +IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER +(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS +PROFITS, BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR +OTHER PECUNIARY LOSS) ARISING OF THE USE OR INABILITY TO USE THE +SOFTWARE. Motorola assumes no responsibility for the maintenance +and support of the SOFTWARE. + +You are hereby granted a copyright license to use, modify, and +distribute the SOFTWARE so long as this entire notice is retained +without alteration in any modified and/or redistributed versions, +and that such modified versions are clearly identified as such. +No licenses are granted by implication, estoppel or otherwise +under any patents or trademarks of Motorola, Inc. + +> GPL 2.0 with Special Exception + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\m68k\lib\divsi3.S + +Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file with other programs, and to distribute +those programs without any restriction coming from the use of this +file. (The General Public License restrictions do apply in other +respects; for example, they cover modification of the file, and +distribution when not linked into another program.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. + +As a special exception, if you link this library with files +compiled with GCC to produce an executable, this does not cause +the resulting executable to be covered by the GNU General Public License. +This exception does not however invalidate any other reasons why +the executable file might be covered by the GNU General Public License. + +> MIT- Style + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\m68k\math-emu\fp_log.c + +Copyright (c) 1998-1999 David Huggins-Daines / Roman Zippel. + +I hereby give permission, free of charge, to copy, modify, and +redistribute this software, in source or binary form, provided that +the above copyright notice and the following disclaimer are included +in all such copies. + +THIS SOFTWARE IS PROVIDED "AS IS", WITH ABSOLUTELY NO WARRANTY, REAL +OR IMPLIED. + +> Third Party + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\microblaze\lib\memcpy.c + +Copyright (C) 2008-2009 Michal Simek +Copyright (C) 2008-2009 PetaLogix +Copyright (C) 2007 John Williams + +Reasonably optimised generic C-code for memcpy on Microblaze +This is generic C code to do efficient, alignment-aware memcpy. + +It is based on demo code originally Copyright 2001 by Intel Corp, taken from +http://www.embedded.com/showArticle.jhtml?articleID=19205567 + +Attempts were made, unsuccessfully, to contact the original +author of this code (Michael Morrow, Intel). Below is the original +copyright notice. + +This software has been developed by Intel Corporation. +Intel specifically disclaims all warranties, express or +implied, and all liability, including consequential and +other indirect damages, for the use of this program, including +liability for infringement of any proprietary rights, +and including the warranties of merchantability and fitness +for a particular purpose. Intel does not assume any +responsibility for and errors which may appear in this program +not any responsibility to update it. + +> BSD- 2 Clause + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\mips\include\asm\netlogic\xlp-hal\bridge.h + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD- 2 CLAUSE LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights +reserved. + +This software is available to you under a choice of one of two +licenses. You may choose to be licensed under the terms of the GNU +General Public License (GPL) Version 2, available from the file +COPYING in the main directory of this source tree, or the NetLogic +license below: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +> BSD + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\mips\sgi-ip22\ip22-eisa.c + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +(C) 2002 Pascal Dameme +and Marc Zyngier + +This code is released under both the GPL version 2 and BSD +licenses. Either license may be used. + +This code offers a very basic support for this EISA bus present in +the SGI Indigo-2. It currently only supports PIO (forget about DMA +for the time being). This is enough for a low-end ethernet card, +but forget about your favorite SCSI card... + +> LGPL 2.1 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\linux-4.9.97\arch\parisc\lib\memset.c + +Copyright (C) 1991, 1997 Free Software Foundation, Inc. +This file is part of the GNU C Library. + +The GNU C Library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +The GNU C Library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with the GNU C Library; if not, write to the Free +Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +02111-1307 USA. + +> GPL 1.0 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\arch\powerpc\xmon\ppc.h + +Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 +Free Software Foundation, Inc. +Written by Ian Lance Taylor, Cygnus Support + +This file is part of GDB, GAS, and the GNU binutils. + +GDB, GAS, and the GNU binutils are free software; you can redistribute +them and/or modify them under the terms of the GNU General Public +License as published by the Free Software Foundation; either version +1, or (at your option) any later version. + +GDB, GAS, and the GNU binutils are distributed in the hope that they +will be useful, but WITHOUT ANY WARRANTY; without even the implied +warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See +the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this file; see the file COPYING. If not, write to the Free +Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. + +> BSD- 3 Clause + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\crypto\drbg.c + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD- 3 Clause LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright Stephan Mueller , 2014 + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, and the entire permission notice in its entirety, +including the disclaimer of warranties. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote +products derived from this software without specific prior +written permission. + +ALTERNATIVELY, this product may be distributed under the terms of +the GNU General Public License, in which case the provisions of the GPL are +required INSTEAD OF the above restrictions. (This clause is +necessary due to a potential bad interaction between the GPL and +the restrictions contained in a BSD-style copyright.) + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF +WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +> GFDL 1.1 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\Documentation\DocBook\media_api.tmpl + +Copyright 2009-2012 +LinuxTV Developers + +Permission is granted to copy, distribute and/or modify +this document under the terms of the GNU Free Documentation License, +Version 1.1 or any later version published by the Free Software +Foundation. A copy of the license is included in the chapter entitled +"GNU Free Documentation License". + +> Open Software License1.1 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\Documentation\DocBook\libata.tmpl + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE OPEN SOFTWARE LICENSE1.1 LICENSE]. PLEASE SEE APPENDIX FOR THE FULL TEXT OF THE OPEN SOFTWARE LICENSE1.1 LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE] + +Copyright + +The contents of this file are subject to the Open Software License version 1.1 that can be found at +"http://fedoraproject.org/wiki/Licensing:OSL1.1" and is included herein by reference. + +Alternatively, the contents of this file may be used under the terms +of the GNU General Public License version 2 (the "GPL") as distributed +in the kernel source COPYING file, in which case the provisions of +the GPL are applicable instead of the above. If you wish to allow +the use of your version of this file only under the terms of the +GPL and not to allow others to use your version of this file under +the OSL, indicate your decision by deleting the provisions above and +replace them with the notice and other provisions required by the GPL. +If you do not delete the provisions above, a recipient may use your +version of this file under either the OSL or the GPL. + +> CC-Attribution-ShareAlike 2.0 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\Documentation\PCI\pci.txt + +A more complete resource is the third edition of "Linux Device Drivers" +by Jonathan Corbet, Alessandro Rubini, and Greg Kroah-Hartman. +LDD3 is available for free (under Creative Commons License) from: + +http://lwn.net/Kernel/LDD3/ + +However, keep in mind that all documents are subject to "bit rot". +Refer to the source code if things are not working as described here. + +Please send questions/comments/patches about Linux PCI API to the +"Linux PCI" mailing list. + +> BSD- Style + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\Documentation\scsi\dpti.txt + +Redistribution and use in source form, with or without modification, are +permitted provided that redistributions of source code must retain the +above copyright notice, this list of conditions and the following disclaimer. + +This software is provided `as is' by Adaptec and +any express or implied warranties, including, but not limited to, the +implied warranties of merchantability and fitness for a particular purpose, +are disclaimed. In no event shall Adaptec be +liable for any direct, indirect, incidental, special, exemplary or +consequential damages (including, but not limited to, procurement of +substitute goods or services; loss of use, data, or profits; or business +interruptions) however caused and on any theory of liability, whether in +contract, strict liability, or tort (including negligence or otherwise) +arising in any way out of the use of this driver software, even if advised +of the possibility of such damage. + +> GFDL 1.2 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\Documentation\trace\ftrace.txt + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE GFDL 1.2 LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright 2008 Red Hat Inc. +Author: Steven Rostedt +License: The GNU Free Documentation License, Version 1.2 +(dual licensed under the GPL v2) + +> MS-LPL + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\Documentation\usb\linux-cdc-acm.inf + +Based on INF template which was: +Copyright (c) 2000 Microsoft Corporation +Copyright (c) 2007 Microchip Technology Inc. +likely to be covered by the MLPL as found at: + + +> Public Domain + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\Documentation\vDSO\parse_vdso.c + +Written by Andrew Lutomirski, 2011-2014. + +This code is meant to be linked in to various programs that run on Linux. +As such, it is available with as few restrictions as possible. This file +is licensed under the Creative Commons Zero License, version 1.0, +available at http://creativecommons.org/publicdomain/zero/1.0/legalcode + +> GPL 2.0 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\drivers\ide\ide-cs.c + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE GPL 2.0 LICENSE. PLEASE SEE APPENDIX FOR THE FULL TEXT OF THE GPL 2.0 LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +The contents of this file are subject to the Mozilla Public +License Version 1.1 (the "License"); you may not use this file +except in compliance with the License. You may obtain a copy of +the License at http://www.mozilla.org/MPL/ + +Software distributed under the License is distributed on an "AS +IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or +implied. See the License for the specific language governing +rights and limitations under the License. + +The initial developer of the original code is David A. Hinds +. Portions created by David A. Hinds +are Copyright (C) 1999 David A. Hinds. All Rights Reserved. + +Alternatively, the contents of this file may be used under the +terms of the GNU General Public License version 2 (the "GPL"), in +which case the provisions of the GPL are applicable instead of the +above. If you wish to allow the use of your version of this file +only under the terms of the GPL and not to allow others to use +your version of this file under the MPL, indicate your decision +by deleting the provisions above and replace them with the notice +and other provisions required by the GPL. If you do not delete +the provisions above, a recipient may use your version of this +file under either the MPL or the GPL. + +> Apache 2.0 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\drivers\staging\android\ashmem.h + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE APACHE 2.0 LICENSE. PLEASE SEE APPENDIX FOR THE FULL TEXT OF THE APACHE 2.0 LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright 2008 Google Inc. +Author: Robert Love + +This file is dual licensed. It may be redistributed and/or modified +under the terms of the Apache 2.0 License OR version 2 of the GNU +General Public License. + +> BSD- 2 Clause + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\drivers\input\keyboard\hil_kbd.c + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD- 2 Clause LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright (c) 2001 Brian S. Julin +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions, and the following disclaimer, +without modification. +2. The name of the author may not be used to endorse or promote products +derived from this software without specific prior written permission. + +Alternatively, this software may be distributed under the terms of the +GNU General Public License ("GPL"). + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + +> BSD- 4 Clause + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\drivers\net\ppp\bsd_comp.c + +Copyright (c) 1985, 1986 The Regents of the University of California. +All rights reserved. + +This code is derived from software contributed to Berkeley by +James A. Woods, derived from original work by Spencer Thomas +and Joseph Orost. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. All advertising materials mentioning features or use of this software +must display the following acknowledgement: +This product includes software developed by the University of +California, Berkeley and its contributors. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +> GPL 2.0 with Special Exception + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\fs\jffs2\LICENCE + +Copyright © 2001-2007 Red Hat, Inc. and others + +JFFS2 is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2 or (at your option) any later +version. + +JFFS2 is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License along +with JFFS2; if not, write to the Free Software Foundation, Inc., +59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + +As a special exception, if other files instantiate templates or use +macros or inline functions from these files, or you compile these +files and link them with other works to produce a work based on these +files, these files do not by themselves cause the resulting work to be +covered by the GNU General Public License. However the source code for +these files must still be made available in accordance with section (3) +of the GNU General Public License. + +This exception does not invalidate any other reasons why a work based on +this file might be covered by the GNU General Public License. + +> MIT + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\include\dt-bindings\clock\sun4i-a10-pll2.h + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE MIT LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright 2015 Maxime Ripard + +Maxime Ripard + +This file is dual-licensed: you can use it either under the terms +of the GPL or the X11 license, at your option. Note that this dual +licensing only applies to this file, and not this project as a +whole. + +a) This file is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This file is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Or, alternatively, + +b) Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +> LGPL 3.0 + +linux-esx-4.9.154-1.ph2.src.rpm\linux-esx-4.9.154-1.ph2.src.cpio\linux-4.9.154.tar.xz\linux-4.9.154.tar\include\uapi\linux\dm-ioctl.h + +Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. +Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved. + +This file is released under the LGPL. + +>>> logrotate-3.11.0-3.ph2 + +Copyright (c) 2006-2012 Novell, Inc. All Rights Reserved. + + +This program is free software; you can redistribute it and/or modify it under +the terms of version 2 of the GNU General Public License as published by the +Free Software Foundation. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with +this program; if not, contact Novell, Inc. + +To contact Novell about this file by physical or electronic mail, you may find +current contact information at www.novell.com. + +ADDITIONAL LICENSE INFORMATION: + +> BSD-3 + +logrotate-3.11.0-3.ph2.src-1.rpm\logrotate-3.11.0-3.ph2.src.cpio\logrotate-3.11.0.tar.gz\logrotate-3.11.0.tar\logrotate-3.11.0\queue.h + + +Copyright (c) 1991, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +> GPL 3.0 + +logrotate-3.11.0-3.ph2.src-1.rpm\logrotate-3.11.0-3.ph2.src.cpio\logrotate-3.11.0.tar.gz\logrotate-3.11.0.tar\logrotate-3.11.0\logrotate.c + + +Copyright (C) 1995-2001 Red Hat, Inc.\n"); +fprintf(stderr, +"This may be freely redistributed under the terms of " +"the GNU Public License + +>>> mingetty-1.08-2.ph2 + +Copyright (C) 1996 Florian La Roche +Copyright (C) 2002, 2003 Red Hat, Inc +This getty can only be used as a small console getty. Look at mgetty +for a real modem getty. + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version +2 of the License, or (at your option) any later version. + +>>> net-tools-1.60-11.ph2 + +Version: $Id: ax25_gr.c,v 1.4 1999/01/05 20:53:21 philip Exp $ + +Author: Bernd Eckenfels, +Copyright 1999 Bernd Eckenfels, Germany +base on Code from Jonathan Naylor + +This program is free software; you can redistribute it +and/or modify it under the terms of the GNU General +Public License as published by the Free Software +Foundation; either version 2 of the License, or (at +your option) any later version. + +>>> procps-ng-3.3.15-2.ph2 + +Copyright (C) 2010 Karel Zak +Copyright (C) 2010 Davidlohr Bueso + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + + +ADDITIONAL LICENSE INFORMATION: + +> Public Domain + +procps-ng-3.3.15.tar.xz\procps-ng-3.3.15.tar\procps-ng-3.3.15\mkinstalldirs + +mkinstalldirs --- make directory hierarchy +Author: Noah Friedman +Created: 1993-05-16 +Public domain + + +> LGPL 2.1 + +procps-ng-3.3.15.tar.xz\procps-ng-3.3.15.tar\procps-ng-3.3.15\pmap.c + +pmap.c - print process memory mapping +Copyright 2002 Albert Cahalan + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +> LGPL 2.0 + +procps-ng-3.3.15.tar.xz\procps-ng-3.3.15.tar\procps-ng-3.3.15\proc\sig.h + +Copyright 1998-2003 by Albert Cahalan; all rights resered. +This file may be used subject to the terms and conditions of the +GNU Library General Public License Version 2, or any later version +at your option, as published by the Free Software Foundation. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Library General Public License for more details. + + +> GPL 3.0 + +procps-ng-3.3.15.tar.xz\procps-ng-3.3.15.tar\procps-ng-3.3.15\misc\git-version-gen + +Copyright (C) 2007-2011 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +>>> rpm-4.13.0.2-1.ph2 + +Copyright (C) 1993, 1995, 1996, 1997 Free Software Foundation, Inc. + +NOTE: The canonical source of this file is maintained with the GNU C Library. +Bugs can be reported to bug-glibc@prep.ai.mit.edu. + +This program is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, +USA. + +ADDITIONAL LICENSE INFORMATION: + +> BSD-3 + +rpm-4.13.0.2-1.ph2.src.rpm\rpm-4.13.0.2-1.ph2.src.cpio\rpm-4.13.0.2-release.tar.gz\rpm-4.13.0.2-release.tar\rpm-rpm-4.13.0.2-release\misc\fts.h + +Copyright (c) 1989, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +> LGPL 2.1 + +rpm-4.13.0.2-1.ph2.src.rpm\rpm-4.13.0.2-1.ph2.src.cpio\rpm-4.13.0.2-release.tar.gz\rpm-4.13.0.2-release.tar\rpm-rpm-4.13.0.2-release\misc\fnmatch.h + +Copyright (C) 1991,92,93,96,97,98,99,2001 Free Software Foundation, Inc. +This file is part of the GNU C Library. + +The GNU C Library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +The GNU C Library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with the GNU C Library; if not, write to the Free +Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA +02111-1307 USA. + +> LGPL 2.0 + +rpm-4.13.0.2-1.ph2.src.rpm\rpm-4.13.0.2-1.ph2.src.cpio\rpm-4.13.0.2-release.tar.gz\rpm-4.13.0.2-release.tar\rpm-rpm-4.13.0.2-release\misc\fnmatch.c + +Copyright (C) 1991-1993, 1996-1999, 2000 Free Software Foundation, Inc. +This file is part of the GNU C Library. + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with this library; see the file COPYING.LIB. If not, +write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. + +> GPL 3.0 + +rpm-4.13.0.2-1.ph2.src.rpm\rpm-4.13.0.2-1.ph2.src.cpio\rpm-4.13.0.2-release.tar.gz\rpm-4.13.0.2-release.tar\rpm-rpm-4.13.0.2-release\tools\sepdebugcrcfix.c + +Copyright (C) 2013 Free Software Foundation, Inc. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +> Public Domain + +rpm-4.13.0.2-1.ph2.src.rpm\rpm-4.13.0.2-1.ph2.src.cpio\rpm-4.13.0.2-release.tar.gz\rpm-4.13.0.2-release.tar\rpm-rpm-4.13.0.2-release\tools\mkinstalldirs + +Original author: Noah Friedman +Created: 1993-05-16 +Public domain. + +>>> tdnf-1.2.3-6.ph2 + +Copyright (C) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the GNU General Public License v2 (the "License"); +you may not use this file except in compliance with the License. The terms +of the License are located in the COPYING file of this distribution. + +ADDITIONAL LICENSE INFORMATION: + +> LGPL2.1 + +tdnf-1.2.3.tar.gz\tdnf-1.2.3.tar\tdnf-1.2.3\tools\cli\lib\api.c + +Copyright (C) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the GNU Lesser General Public License v2.1 (the "License"); +you may not use this file except in compliance with the License. The terms +of the License are located in the COPYING file of this distribution. + +>>> util-linux-2.32-1.ph2 + +Copyright (C) 2008, Karel Zak +Copyright (C) 2008, James Youngman + +This file is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This file is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + + +Based on scriptreplay.pl by Joey Hess + + +ADDITIONAL LICENSE INFORMATION: + +> GPL 3.0 + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\tools\git-version-gen + +Copyright (C) 2007-2011 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +> BSD 4 + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\text-utils\ul.c + +Copyright (c) 1980, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. All advertising materials mentioning features or use of this software +must display the following acknowledgement: +This product includes software developed by the University of +California, Berkeley and its contributors. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +> PUBLIC DOMAIN + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\text-utils\line.c + +line - read one line + +Gunnar Ritter, Freiburg i. Br., Germany, December 2000. + +Public Domain. + + +> BSD + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\text-utils\more.c + +Copyright (C) 1980 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms are permitted +provided that the above copyright notice and this paragraph are +duplicated in all such forms and that any documentation, +advertising materials, and other materials related to such +distribution and use acknowledge that the software was developed +by the University of California, Berkeley. The name of the +University may not be used to endorse or promote products derived +from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED +WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +> MIT + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\sys-utils\flock.1 + +Copyright 2003-2006 H. Peter Anvin - All Rights Reserved + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom +the Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +> BSD 3 + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\misc-utils\uuidparse.c + +uuidparse.c --- Interpret uuid encoded information. This program +violates the UUID abstraction barrier by reaching into the +guts of a UUID. + +Based on libuuid/src/uuid_time.c +Copyright (C) 1998, 1999 Theodore Ts'o. + +All alterations (C) 2017 Sami Kerola +The 3-Clause BSD License + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, and the entire permission notice in its entirety, +including the disclaimer of warranties. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote +products derived from this software without specific prior +written permission. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF +WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +> MIT STYLE + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\m4\compiler.m4 + +Copyright (C) 2008-2011 Free Software Foundation, Inc. +This file is free software; the Free Software Foundation +gives unlimited permission to copy and/or distribute it, +with or without modifications, as long as this notice is preserved. + +From Simon Josefsson +-- derivated from coreutils m4/warnings.m4 + +> LGPL 2.0 + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\login-utils\setpwnam.c +setpwnam.c -- edit an entry in a password database. + +(c) 1994 Salvatore Valente +This file is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +Edited 11/10/96 (DD/MM/YY ;-) by Nicolai Langfeldt (janl@math.uio.no) +to read /etc/passwd directly so that passwd, chsh and chfn can work on +machines that run NIS (previously YP). Changes will not be made to +usernames starting with +. + +This file is distributed with no warranty. + +Usage: +1) get a struct passwd from getpwnam(). +You should assume a struct passwd has an infinite number of fields, so +you should not try to create one from scratch. +2) edit the fields you want to edit. +3) call setpwnam() with the edited struct passwd. + +A _normal user_ program should never directly manipulate etc/passwd but +/use getpwnam() and (family, as well as) setpwnam(). + +But, setpwnam was made to _edit_ the password file. For use by chfn, +chsh and passwd. _I_ _HAVE_ to read and write /etc/passwd directly. Let +those who say nay be forever silent and think about how getpwnam (and +family) works on a machine running YP. + +Added checks for failure of malloc() and removed error reporting to +stderr, this is a library function and should not print on the screen, +but return appropriate error codes. +27-Jan-97 - poe@daimi.aau.dk + +Thanks to "two guys named Ian". + +$Author: poer $ +$Revision: 1.13 $ +$Date: 1997/06/23 08:26:29 $ + +> LGPL 2.1 + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\libsmartcols\COPYING + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later +version. + +The complete text of the license is available in the +../Documentation/licenses/COPYING.LGPLv2.1 file. + +> LGPL 3.0 + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\libmount\python\pylibmount.c + +Python bindings for the libmount library. + +Copyright (C) 2013, Red Hat, Inc. All rights reserved. +Written by Ondrej Oprala and Karel Zak + +This file is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 3 of the License, or (at your option) any later version. + +This file is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this file; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +> GPL 3.0 WITH BISON PARSER EXCEPTION + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\lib\parse-date.c + +Bison implementation for Yacc-like parsers in C + +Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +As a special exception, you may create a larger work that contains +part or all of the Bison parser skeleton and distribute that work +under terms of your choice, so long as that work isn't itself a +parser generator using the skeleton or a modified version thereof +as a parser skeleton. Alternatively, if you modify or redistribute +the parser skeleton itself, you may (at your option) remove this +special exception, which will cause the skeleton and the resulting +Bison output files to be licensed under the GNU General Public +License without this special exception. + +This special exception was added by the Free Software Foundation in +version 2.2 of Bison. + +> GPL 1.0 + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\disk-utils\fdisk.c + +Copyright (C) 1992 A. V. Le Blanc (LeBlanc@mcc.ac.uk) +Copyright (C) 2012 Davidlohr Bueso + +Copyright (C) 2007-2013 Karel Zak + +This program is free software. You can redistribute it and/or +modify it under the terms of the GNU General Public License as +published by the Free Software Foundation: either version 1 or +(at your option) any later version. + +> MIT STYLE + +util-linux-2.32-1.ph2.src.rpm\util-linux-2.32-1.ph2.src.cpio\util-linux-2.32.tar.xz\util-linux-2.32.tar\util-linux-2.32\disk-utils\cfdisk.8 + +Copyright 1994 Kevin E. Martin (martin@cs.unc.edu) +Copyright (C) 2014 Karel Zak + +Permission is granted to make and distribute verbatim copies of this +manual provided the copyright notice and this permission notice are +preserved on all copies. + +Permission is granted to copy and distribute modified versions of this +manual under the conditions for verbatim copying, provided that the +entire resulting derived work is distributed under the terms of a +permission notice identical to this one. + +--------------- SECTION 5: GNU General Public License, V3.0 ---------- + +GNU General Public License, V3.0 is applicable to the following component(s). + + +>>> bash-4.4.12-3.ph2 + +error.c -- Functions for handling errors. + +Copyright (C) 1993-2009 Free Software Foundation, Inc. + +This file is part of GNU Bash, the Bourne Again SHell. + +Bash is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Bash is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Bash. If not, see . + + + +ADDITIONAL LICENSE INFORMATION: + + +>GPL 3.0 WITH BISON PARSER EXCEPTION + + +bash-4.4.tar.gz\bash-4.4.tar\bash-4.4\parser-built + + +A Bison parser, made by GNU Bison 3.0.4. + +Bison interface for Yacc-like parsers in C + +Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +As a special exception, you may create a larger work that contains +part or all of the Bison parser skeleton and distribute that work +under terms of your choice, so long as that work isn't itself a +parser generator using the skeleton or a modified version thereof +as a parser skeleton. Alternatively, if you modify or redistribute +the parser skeleton itself, you may (at your option) remove this +special exception, which will cause the skeleton and the resulting +Bison output files to be licensed under the GNU General Public +License without this special exception. + +This special exception was added by the Free Software Foundation in +version 2.2 of Bison. + + +>MIT STYLE + + +bash-4.4.tar.gz\bash-4.4.tar\bash-4.4\README + + +Copying and distribution of this file, with or without modification, +are permitted in any medium without royalty provided the copyright +notice and this notice are preserved. This file is offered as-is, +without any warranty. + + +>GFDL 1.3 + + +bash-4.4.tar.gz\bash-4.4.tar\bash-4.4\doc\bashref.texi + + +Copyright @copyright{} 1988--2016 Free Software Foundation, Inc. + +@quotation +Permission is granted to copy, distribute and/or modify this document +under the terms of the GNU Free Documentation License, Version 1.3 or +any later version published by the Free Software Foundation; with no +Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. +A copy of the license is included in the section entitled +``GNU Free Documentation License''. +@end quotation + + +>SIL OPEN FONT LICENSE 1.1 + +bash-4.4.tar.gz\bash-4.4.tar\bash-4.4\doc\bashref.ps + +Copyright: Copyright (c) 1997, 2009 American Mathematical Society +Copyright: (), with Reserved Font Name CMR9. +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is in the accompanying file OFL.txt, and is also +available with a FAQ at: http://scripts.sil.org/OFL. + + +>GPL 2.0 + + +bash-4.4.tar.gz\bash-4.4.tar\bash-4.4\examples\complete\complete-examples + + + +Copyright 2002 Chester Ramey + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +TThis program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software Foundation, +Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + +>PUBLIC DOMAIN + + +bash-4.4.tar.gz\bash-4.4.tar\bash-4.4\support\mkinstalldirs + + +Author: Noah Friedman +Created: 1993-05-16 +Public domain + + +>BSD-4 + + +bash-4.4.tar.gz\bash-4.4.tar\bash-4.4\lib\sh\inet_aton.c + + +Copyright (c) 1983, 1990, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. All advertising materials mentioning features or use of this software +must display the following acknowledgement: +This product includes software developed by the University of +California, Berkeley and its contributors. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. +- + + + +>BSD + + +bash-4.4.tar.gz\bash-4.4.tar\bash-4.4\lib\sh\inet_aton. + + +Portions Copyright (c) 1993 by Digital Equipment Corporation. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies, and that +the name of Digital Equipment Corporation not be used in advertising or +publicity pertaining to distribution of the document or software without +specific, written prior permission. + +THE SOFTWARE IS PROVIDED "AS IS" AND DIGITAL EQUIPMENT CORP. DISCLAIMS ALL +WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DIGITAL EQUIPMENT +CORPORATION BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. +- + +>>> coreutils-8.27-3.ph2 + +Copyright (C) 2003-2017 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +ADDITIONAL LICENSE INFORMATION: + +>GFDL 1.3 + +coreutils-8.27.tar.xz\coreutils-8.27.tar\coreutils-8.27\README + + +Copyright (C) 1998-2017 Free Software Foundation, Inc. + +Permission is granted to copy, distribute and/or modify this document +under the terms of the GNU Free Documentation License, Version 1.3 or +any later version published by the Free Software Foundation; with no +Invariant Sections, with no Front-Cover Texts, and with no Back-Cover +Texts. A copy of the license is included in the "GNU Free +Documentation License" file as part of this distribution. + +>MIT + +coreutils-8.27.tar.xz\coreutils-8.27.tar\coreutils-8.27\gnulib-tests\inet_pton.c + + +Copyright (c) 1996,1999 by Internet Software Consortium. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS +ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE +CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + + +>BSD-3 + +coreutils-8.27.tar.xz\coreutils-8.27.tar\coreutils-8.27\lib\fts.c + +Copyright (c) 1990, 1993, 1994 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + + +>GPL 3.0 WITH BISON PARSER EXCEPTION + + +coreutils-8.27.tar.xz\coreutils-8.27.tar\coreutils-8.27\lib\parse-datetime.c + +/ Bison implementation for Yacc-like parsers in C + +Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . / + +/ As a special exception, you may create a larger work that contains +part or all of the Bison parser skeleton and distribute that work +under terms of your choice, so long as that work isn't itself a +parser generator using the skeleton or a modified version thereof +as a parser skeleton. Alternatively, if you modify or redistribute +the parser skeleton itself, you may (at your option) remove this +special exception, which will cause the skeleton and the resulting +Bison output files to be licensed under the GNU General Public +License without this special exception. + +This special exception was added by the Free Software Foundation in +version 2.2 of Bison. / + +>PUBLIC DOMAIN + +coreutils-8.27.tar.xz\coreutils-8.27.tar\coreutils-8.27\lib\alloca.c + +(Mostly) portable public-domain implementation -- D A Gwyn + +This implementation of the PWB library alloca function, +which is used to allocate space off the run-time stack so +that it is automatically reclaimed upon procedure exit, +was inspired by discussions with J. Q. Johnson of Cornell. +J.Otto Tennant contributed the Cray support. + +There are some preprocessor constants that can +be defined when compiling for your specific system, for +improved efficiency; however, the defaults should be okay. + +The general concept of this implementation is to keep +track of all alloca-allocated blocks, and reclaim any +that are found to be deeper in the stack than the current +invocation. This heuristic does not reclaim storage as +soon as it becomes invalid, but it will do so eventually. + +>>> elfutils-0.169-3.ph2 + +Copyright (C) 2000, 2002, 2005 Red Hat, Inc. +This file is part of elfutils. +Written by Ulrich Drepper , 2000. + +This file is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3 of the License, or +(at your option) any later version. + +elfutils is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +ADDITIONAL LICENSE INFORMATION + +> LGPL 3.0 + +elfutils-0.169.tar.bz2\elfutils-0.169.tar\elfutils-0.169\lib\color.c + +[PLEASE NOTE: VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE LGPL 3.0 LICENSE. PLEASE SEE THE APPENDIX FOR THE FULL TEXT OF THE LGPL 3.0 LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.] + +Copyright (C) 2008 Red Hat, Inc. + +This file is part of elfutils. + +This file is free software; you can redistribute it and/or modify +it under the terms of either + +the GNU Lesser General Public License as published by the Free +Software Foundation; either version 3 of the License, or (at +your option) any later version + +or + +the GNU General Public License as published by the Free +Software Foundation; either version 2 of the License, or (at +your option) any later version + +or both in parallel, as here. + +elfutils is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received copies of the GNU General Public License and +the GNU Lesser General Public License along with this program. If +not, see . + +> MIT-Style + +elfutils-0.169.tar.bz2\elfutils-0.169.tar\elfutils-0.169\aclocal.m4 + +Copyright (C) 1996-2013 Free Software Foundation, Inc. + +This file is free software; the Free Software Foundation +gives unlimited permission to copy and/or distribute it, +with or without modifications, as long as this notice is preserved. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY, to the extent permitted by law; without +even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. + + +> LGPL 2.1 + +elfutils-0.169.tar.bz2\elfutils-0.169.tar\elfutils-0.169\libelf\elf.h + +Copyright (C) 1995-2014 Free Software Foundation, Inc. +This file is part of the GNU C Library. + +The GNU C Library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +The GNU C Library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with the GNU C Library; if not, see +. + +>>> grep-3.0-4.ph2 + +Copyright (C) 2002-2017 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +ADDITIONAL LICENSE INFORMATION: + +> MIT- Style + +grep-3.0.tar.xz\grep-3.0.tar\grep-3.0\aclocal.m4 + +Copyright (C) 1996-2015 Free Software Foundation, Inc. + +This file is free software; the Free Software Foundation +gives unlimited permission to copy and/or distribute it, +with or without modifications, as long as this notice is preserved. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY, to the extent permitted by law; without +even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. + +> GFDL 1.3 + +grep-3.0.tar.xz\grep-3.0.tar\grep-3.0\doc\grep.info + +Copyright © 1999-2002, 2005, 2008-2017 Free Software Foundation, Inc. + +Permission is granted to copy, distribute and/or modify this +document under the terms of the GNU Free Documentation License, +Version 1.3 or any later version published by the Free Software +Foundation; with no Invariant Sections, with no Front-Cover Texts, +and with no Back-Cover Texts. A copy of the license is included in +the section entitled “GNU Free Documentation License”. + +> Public Domain + +grep-3.0.tar.xz\grep-3.0.tar\grep-3.0\lib\alloca.c + +alloca.c -- allocate automatically reclaimed memory +(Mostly) portable public-domain implementation -- D A Gwyn + +This implementation of the PWB library alloca function, +which is used to allocate space off the run-time stack so +that it is automatically reclaimed upon procedure exit, +was inspired by discussions with J. Q. Johnson of Cornell. +J.Otto Tennant contributed the Cray support. + +There are some preprocessor constants that can +be defined when compiling for your specific system, for +improved efficiency; however, the defaults should be okay. + +The general concept of this implementation is to keep +track of all alloca-allocated blocks, and reclaim any +that are found to be deeper in the stack than the current +invocation. This heuristic does not reclaim storage as +soon as it becomes invalid, but it will do so eventually. + +As a special case, alloca(0) reclaims storage without +allocating any. It is a good idea to use alloca(0) in +your main control loop, etc. to force garbage collection. + +> BSD- 3 Clause + +grep-3.0.tar.xz\grep-3.0.tar\grep-3.0\lib\fts.c + + +Copyright (c) 1990, 1993, 1994 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +>>> gzip-1.8-1.ph2 + +Copyright (C) 2010-2016 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify it +under the terms of the GNU General Public License as published +by the Free Software Foundation; either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +ADDITIONAL LICENSE INFORMATION: + +> GFDL 1.3 + +gzip-1.8.tar.xz\gzip-1.8.tar\gzip-1.8\doc\gzip.info + +Copyright © 1992, 1993 Jean-loup Gailly + +Permission is granted to copy, distribute and/or modify this +document under the terms of the GNU Free Documentation License, +Version 1.3 or any later version published by the Free Software +Foundation; with no Invariant Sections, with no Front-Cover Texts, +and with no Back-Cover Texts. A copy of the license is included in +the section entitled “GNU Free Documentation License”. + +> MIT-Style + +gzip-1.8.tar.xz\gzip-1.8.tar\gzip-1.8\zless.1 + +Copyright \(co 2006-2007, 2015-2016 Free Software Foundation, Inc. + +Copyright \(co 1992, 1993 Jean-loup Gailly + +Permission is granted to make and distribute verbatim copies of +this manual provided the copyright notice and this permission notice +are preserved on all copies. + +Permission is granted to process this file through troff and print the +results, provided the printed document carries copying permission +notice identical to this one except for the removal of this paragraph +(this paragraph not being relevant to the printed manual). + +Permission is granted to copy and distribute modified versions of this +manual under the conditions for verbatim copying, provided that the entire +resulting derived work is distributed under the terms of a permission +notice identical to this one. + +Permission is granted to copy and distribute translations of this manual +into another language, under the above conditions for modified versions, +except that this permission notice may be stated in a translation approved +by the Foundation. + +> Public Domain + +gzip-1.8.tar.xz\gzip-1.8.tar\gzip-1.8\unlzh.c + +The code in this file is directly derived from the public domain 'ar002' +written by Haruhiko Okumura. + +>>> haveged-1.9.1-4.ph2 + +Copyright 2009-2011 Gary Wuertz gary@issiweb.com +Copyright 2011 BenEleventh Consulting manolson@beneleventh.com + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + + +ADDITIONAL LICENSE INFORMATION: + +> MIT-Style + +haveged-1.9.1.tar.gz\haveged-1.9.1.tar\haveged-1.9.1\aclocal.m4 + +Copyright (C) 1996-2012 Free Software Foundation, Inc. + +This file is free software; the Free Software Foundation +gives unlimited permission to copy and/or distribute it, +with or without modifications, as long as this notice is preserved. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY, to the extent permitted by law; without +even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. + + +> LGPL 2.1 + +haveged-1.9.1.tar.gz\haveged-1.9.1.tar\haveged-1.9.1\src\oneiteration.h + +This source is an adaptation of work released as + +Copyright (C) 2006 - André Seznec - Olivier Rochecouste + +under version 2.1 of the GNU Lesser General Public License + +The original form is retained with minor variable renames for +more consistent macro itilization. See havegecollect.c for +details. + +>>> readline-7.0-2.ph2 + +Copyright (C) 1987-2015 Free Software Foundation, Inc. + +This file is part of the GNU Readline Library (Readline), a library +for reading lines of text with interactive input and history editing. + +Readline is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Readline is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Readline. If not, see . + +ADDITIONAL LICENSE INFORMATION: + +> GFDL 1.3 + +readline-7.0.tar.gz\readline-7.0.tar\readline-7.0\doc\history.info + +Copyright (C) 1988-2016 Free Software Foundation, Inc. + +Permission is granted to copy, distribute and/or modify this +document under the terms of the GNU Free Documentation License, +Version 1.3 or any later version published by the Free Software +Foundation; with no Invariant Sections, no Front-Cover Texts, and +no Back-Cover Texts. A copy of the license is included in the +section entitled "GNU Free Documentation License". + + +> GPL 2.0 + +readline-7.0.tar.gz\readline-7.0.tar\readline-7.0\examples\rlfe\extern.h + +Copyright (c) 1993-2002 +Juergen Weigert (jnweiger@immd4.informatik.uni-erlangen.de) +Michael Schroeder (mlschroe@immd4.informatik.uni-erlangen.de) +Copyright (c) 1987 Oliver Laumann + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program (see the file COPYING); if not, write to the +Free Software Foundation, Inc., +59 Temple Place - Suite 330, Boston, MA 02111-1307, USA + +rlwrap-0.30.tar.gz + +> LGPL 2.1 + +readline-7.0.tar.gz\readline-7.0.tar\readline-7.0\examples\rlwrap-0.30.tar.gz\rlwrap-0.30.tar\rlwrap-0.30\src\redblack.h + +Copyright (C) Damian Ivereigh 2000 + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 2.1 of the License, or +(at your option) any later version. See the file COPYING for details. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + +> Public Domain + +readline-7.0.tar.gz\readline-7.0.tar\readline-7.0\support\mkinstalldirs + +Author: Noah Friedman +Created: 1993-05-16 +Public domain + +readline-7.0.tar.gz + +> MIT + +readline-7.0.tar.gz\readline-7.0.tar\readline-7.0\support\wcwidth.c + +Markus Kuhn -- 2007-05-26 (Unicode 5.0) + +Permission to use, copy, modify, and distribute this software +for any purpose and without fee is hereby granted. The author +disclaims all warranties with regard to this software. + +Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c + +--------------- SECTION 6: GNU Lesser General Public License, V2.1 ---------- + +GNU Lesser General Public License, V2.1 is applicable to the following component(s). + + +>>> cracklib-2.9.6-8.ph2 + +A Python binding for cracklib. + +Parts of this code are based on work Copyright (c) 2003 by Domenico +Andreoli. + +Copyright (c) 2008, 2009, 2012 Jan Dittberner + +This file is part of cracklib. + +This library is free software; you can redistribute it and/or modify it under +the terms of the GNU Lesser General Public License as published by the Free +Software Foundation; either version 2.1 of the License, or (at your option) +any later version. + +This library is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +details. + +You should have received a copy of the GNU Lesser General Public License +along with this library; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +ADDITIONAL LICENSE INFORMATION: + +> Public Domain + +cracklib-2.9.6.tar.gz\cracklib-2.9.6.tar\cracklib-2.9.6\po\es.po + +translation of cracklib.po to +This file is put in the public domain. +, 2003 + +>>> glibc-2.26-13.ph2 + +Copyright (C) 1991-2017 Free Software Foundation, Inc. +This file is part of the GNU C Library. + +The GNU C Library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +The GNU C Library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with the GNU C Library; if not, see +. + + +ADDITIONAL LICENSE INFORMATION: + +> + +glibc-2.26.tar.xz\glibc-2.26.tar\glibc-2.26\LICENSES + +This file contains the copying permission notices for various files in the +GNU C Library distribution that have copyright owners other than the Free +Software Foundation. These notices all require that a copy of the notice +be included in the accompanying documentation and be distributed with +binary distributions of the code, so be sure to include this file along +with any binary distributions derived from the GNU C Library. +All code incorporated from 4.4 BSD is distributed under the following +license: + +Copyright (C) 1991 Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. [This condition was removed.] +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. +The DNS resolver code, taken from BIND 4.9.5, is copyrighted by UC +Berkeley, by Digital Equipment Corporation and by Internet Software +Consortium. The DEC portions are under the following license: + +Portions Copyright (C) 1993 by Digital Equipment Corporation. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies, and +that the name of Digital Equipment Corporation not be used in +advertising or publicity pertaining to distribution of the document or +software without specific, written prior permission. + +THE SOFTWARE IS PROVIDED ``AS IS'' AND DIGITAL EQUIPMENT CORP. +DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +DIGITAL EQUIPMENT CORPORATION BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING +FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +The ISC portions are under the following license: + +Portions Copyright (c) 1996-1999 by Internet Software Consortium. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS +ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE +CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. +The Sun RPC support (from rpcsrc-4.0) is covered by the following +license: + +Copyright (c) 2010, Oracle America, Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials +provided with the distribution. + +Neither the name of the "Oracle America, Inc." nor the names of its +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +The following CMU license covers some of the support code for Mach, +derived from Mach 3.0: + +Mach Operating System +Copyright (C) 1991,1990,1989 Carnegie Mellon University +All Rights Reserved. + +Permission to use, copy, modify and distribute this software and its +documentation is hereby granted, provided that both the copyright +notice and this permission notice appear in all copies of the +software, derivative works or modified versions, and any portions +thereof, and that both notices appear in supporting documentation. + +CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS ``AS IS'' +CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR +ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + +Carnegie Mellon requests users of this software to return to + +Software Distribution Coordinator +School of Computer Science +Carnegie Mellon University +Pittsburgh PA 15213-3890 + +or Software.Distribution@CS.CMU.EDU any improvements or +extensions that they make and grant Carnegie Mellon the rights to +redistribute these changes. +The file if_ppp.h is under the following CMU license: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY AND +CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +The following license covers the files from Intel's "Highly Optimized +Mathematical Functions for Itanium" collection: + +Intel License Agreement + +Copyright (c) 2000, Intel Corporation + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +The name of Intel Corporation may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +The files inet/getnameinfo.c and sysdeps/posix/getaddrinfo.c are copyright +(C) by Craig Metz and are distributed under the following license: + +/ The Inner Net License, Version 2.00 + +The author(s) grant permission for redistribution and use in source and +binary forms, with or without modification, of the software and documentation +provided that the following conditions are met: + +0. If you receive a version of the software that is specifically labelled +as not being for redistribution (check the version message and/or README), +you are not permitted to redistribute that version of the software in any +way or form. +1. All terms of the all other applicable copyrights and licenses must be +followed. +2. Redistributions of source code must retain the authors' copyright +notice(s), this list of conditions, and the following disclaimer. + +3. Redistributions in binary form must reproduce the authors' copyright +notice(s), this list of conditions, and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +4. [The copyright holder has authorized the removal of this clause.] + +5. Neither the name(s) of the author(s) nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY ITS AUTHORS AND CONTRIBUTORS ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +If these license terms cause you a real problem, contact the author. / +The file sunrpc/des_impl.c is copyright Eric Young: + +Copyright (C) 1992 Eric Young + +Collected from libdes and modified for SECURE RPC by Martin Kuck 1994 +This file is distributed under the terms of the GNU Lesser General +Public License, version 2.1 or later - see the file COPYING.LIB for details. +If you did not receive a copy of the license with this program, please +see to obtain a copy. + +The libidn code is copyright Simon Josefsson, with portions copyright +The Internet Society, Tom Tromey and Red Hat, Inc.: + +Copyright (C) 2002, 2003, 2004, 2011 Simon Josefsson + +This file is part of GNU Libidn. + +GNU Libidn is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +GNU Libidn is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with GNU Libidn; if not, see . + +The following notice applies to portions of libidn/nfkc.c: + +This file contains functions from GLIB, including gutf8.c and +gunidecomp.c, all licensed under LGPL and copyright hold by: + +Copyright (C) 1999, 2000 Tom Tromey +Copyright 2000 Red Hat, Inc. + +The following applies to portions of libidn/punycode.c and +libidn/punycode.h: + +This file is derived from RFC 3492bis written by Adam M. Costello. + +Disclaimer and license: Regarding this entire document or any +portion of it (including the pseudocode and C code), the author +makes no guarantees and is not responsible for any damage resulting +from its use. The author grants irrevocable permission to anyone +to use, modify, and distribute it in any way that does not diminish +the rights of anyone else to use, modify, and distribute it, +provided that redistributed derivative works do not contain +misleading author or version information. Derivative works need +not be licensed under similar terms. +Copyright (C) The Internet Society (2003). All Rights Reserved. + +This document and translations of it may be copied and furnished to +others, and derivative works that comment on or otherwise explain it +or assist in its implementation may be prepared, copied, published +and distributed, in whole or in part, without restriction of any +kind, provided that the above copyright notice and this paragraph are +included on all such copies and derivative works. However, this +document itself may not be modified in any way, such as by removing +the copyright notice or references to the Internet Society or other +Internet organizations, except as needed for the purpose of +developing Internet standards in which case the procedures for +copyrights defined in the Internet Standards process must be +followed, or as required to translate it into languages other than +English. + +The limited permissions granted above are perpetual and will not be +revoked by the Internet Society or its successors or assigns. + +This document and the information contained herein is provided on an +"AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING +TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING +BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION +HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF +MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. +The file inet/rcmd.c is under a UCB copyright and the following: + +Copyright (C) 1998 WIDE Project. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +3. Neither the name of the project nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +The file posix/runtests.c is copyright Tom Lord: + +Copyright 1995 by Tom Lord + +All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of the copyright holder not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +Tom Lord DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +EVENT SHALL TOM LORD BE LIABLE FOR ANY SPECIAL, INDIRECT OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF +USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + +The posix/rxspencer tests are copyright Henry Spencer: + +Copyright 1992, 1993, 1994, 1997 Henry Spencer. All rights reserved. +This software is not subject to any license of the American Telephone +and Telegraph Company or of the Regents of the University of California. + +Permission is granted to anyone to use this software for any purpose on +any computer system, and to alter it and redistribute it, subject +to the following restrictions: + +1. The author is not responsible for the consequences of use of this +software, no matter how awful, even if they arise from flaws in it. + +2. The origin of this software must not be misrepresented, either by +explicit claim or by omission. Since few users ever read sources, +credits must appear in the documentation. + +3. Altered versions must be plainly marked as such, and must not be +misrepresented as being the original software. Since few users +ever read sources, credits must appear in the documentation. + +4. This notice may not be removed or altered. + +The file posix/PCRE.tests is copyright University of Cambridge: + +Copyright (c) 1997-2003 University of Cambridge + +Permission is granted to anyone to use this software for any purpose on any +computer system, and to redistribute it freely, subject to the following +restrictions: + +1. This software is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +2. The origin of this software must not be misrepresented, either by +explicit claim or by omission. In practice, this means that if you use +PCRE in software that you distribute to others, commercially or +otherwise, you must put a sentence like this + +Regular expression support is provided by the PCRE library package, +which is open source software, written by Philip Hazel, and copyright +by the University of Cambridge, England. + +somewhere reasonably visible in your documentation and in any relevant +files or online help data or similar. A reference to the ftp site for +the source, that is, to + +ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/ + +should also be given in the documentation. However, this condition is not +intended to apply to whole chains of software. If package A includes PCRE, +it must acknowledge it, but if package B is software that includes package +A, the condition is not imposed on package B (unless it uses PCRE +independently). + +3. Altered versions must be plainly marked as such, and must not be +misrepresented as being the original software. + +4. If PCRE is embedded in any software that is released under the GNU +General Purpose Licence (GPL), or Lesser General Purpose Licence (LGPL), +then the terms of that licence shall supersede any condition above with +which it is incompatible. + +Files from Sun fdlibm are copyright Sun Microsystems, Inc.: + +Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + +Developed at SunPro, a Sun Microsystems, Inc. business. +Permission to use, copy, modify, and distribute this +software is freely granted, provided that this notice +is preserved. + +Part of stdio-common/tst-printf.c is copyright C E Chew: + +(C) Copyright C E Chew + +Feel free to copy, use and distribute this software provided: + +1. you do not pretend that you wrote it +2. you leave this copyright notice intact. + +Various long double libm functions are copyright Stephen L. Moshier: + +Copyright 2001 by Stephen L. Moshier + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, see +. / + +>LGPL 2.1 WITH SPECIAL EXCEPTION + +glibc-2.26.tar.xz\glibc-2.26.tar\glibc-2.26\wcsmbs\isoc99_vswscanf.c + +/ Copyright (C) 1993-2017 Free Software Foundation, Inc. +This file is part of the GNU C Library. + +The GNU C Library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +The GNU C Library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with the GNU C Library; if not, see +. + +As a special exception, if you link the code in this file with +files compiled with a GNU compiler to produce an executable, +that does not cause the resulting executable to be covered by +the GNU Lesser General Public License. This exception does not +however invalidate any other reasons why the executable file +might be covered by the GNU Lesser General Public License. +This exception applies to code released by its copyright holders +in files containing the exception. + +>BSD-3 + +glibc-2.26.tar.xz\glibc-2.26.tar\glibc-2.26\termios\sys\ttychars.h + +Copyright (c) 1982, 1986, 1990, 1993 +The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +4. Neither the name of the University nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + + +>GPL 2.0 + +glibc-2.26.tar.xz\glibc-2.26.tar\glibc-2.26\catgets\xopen-msg.awk + +Copyright (C) 2012-2017 Free Software Foundation, Inc. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, see . + +>GPL 3.0 WITH BISON PARSER EXCEPTION + +glibc-2.26.tar.xz\glibc-2.26.tar\glibc-2.26\intl\plural.c + +/ A Bison parser, made by GNU Bison 2.7. / + +/ Bison implementation for Yacc-like parsers in C + +Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . / + +/ As a special exception, you may create a larger work that contains +part or all of the Bison parser skeleton and distribute that work +under terms of your choice, so long as that work isn't itself a +parser generator using the skeleton or a modified version thereof +as a parser skeleton. Alternatively, if you modify or redistribute +the parser skeleton itself, you may (at your option) remove this +special exception, which will cause the skeleton and the resulting +Bison output files to be licensed under the GNU General Public +License without this special exception. + +This special exception was added by the Free Software Foundation in +version 2.2 of Bison. / + +>GFDL 1.3 + +glibc-2.26.tar.xz\glibc-2.26.tar\glibc-2.26\manual\fdl-1.3.texi + +LICENSE: GFDL 1.3 + + +>LGPL 2.0 + + +glibc-2.26.tar.xz\glibc-2.26.tar\glibc-2.26\sysdeps\powerpc\power5+\fpu\s_modf.c + + +Copyright (C) 2013-2017 Free Software Foundation, Inc. +This file is part of the GNU C Library + +The GNU C Library is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public License as +published by the Free Software Foundation; either version 2 of the +License, or (at your option) any later version. + +The GNU C Library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with the GNU C Library; see the file COPYING.LIB. If +not, see . + + +> PUBLIC DOMAIN + +glibc-2.26.tar.xz\glibc-2.26.tar\glibc-2.26\timezone\zone.tab + +
    +@(#)zone.tab	8.28
    +This file is in the public domain, so clarified as of
    +2009-05-17 by Arthur David Olson.
    +
    +>>> hawkey-2017.1-6.ph2
    +
    +Copyright (C) 2014 Red Hat, Inc.
    +
    +Licensed under the GNU Lesser General Public License Version 2.1
    +
    +This library is free software; you can redistribute it and/or
    +modify it under the terms of the GNU Lesser General Public
    +License as published by the Free Software Foundation; either
    +version 2.1 of the License, or (at your option) any later version.
    +
    +This library is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
    +Lesser General Public License for more details.
    +
    +You should have received a copy of the GNU Lesser General Public
    +License along with this library; if not, write to the Free Software
    +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
    +
    +ADDITIONAL LICENSE INFORMATION:
    +
    +> GPL 2.0
    +
    +hawkey-2017.1.tar.gz\hawkey-2017.1.tar\hawkey-hawkey-0.6.4-1\doc\changes.rst
    +
    +Copyright (C) 2014-2015  Red Hat, Inc.
    +
    +This copyrighted material is made available to anyone wishing to use,
    +modify, copy, or redistribute it subject to the terms and conditions of
    +the GNU General Public License v.2, or (at your option) any later version.
    +This program is distributed in the hope that it will be useful, but WITHOUT
    +ANY WARRANTY expressed or implied, including the implied warranties of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
    +Public License for more details.  You should have received a copy of the
    +GNU General Public License along with this program; if not, write to the
    +Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
    +02110-1301, USA.  Any Red Hat trademarks that are incorporated in the
    +source code or documentation are not subject to the GNU General Public
    +License and may only be used or replicated with the express permission of
    +Red Hat, Inc.
    +
    +>>> kmod-24-3.ph2
    +
    +Copyright (C) 2012-2013  ProFUSION embedded systems
    +
    +This program is free software; you can redistribute it and/or
    +modify it under the terms of the GNU Lesser General Public
    +License as published by the Free Software Foundation; either
    +version 2.1 of the License, or (at your option) any later version.
    +
    +This program is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    +Lesser General Public License for more details.
    +
    +You should have received a copy of the GNU Lesser General Public
    +License along with this library; if not, see .
    +
    +
    +ADDITIONAL LICENSE INFORMATION:
    +
    +> GPL 2.0
    +
    +kmod-24.tar.xz\kmod-24.tar\kmod-24\tools\rmmod.c
    +
    +kmod-insert - insert a module into the kernel.
    +
    +Copyright (C) 2015 Intel Corporation. All rights reserved.
    +Copyright (C) 2011-2013  ProFUSION embedded systems
    +
    +This program is free software: you can redistribute it and/or modify
    +it under the terms of the GNU General Public License as published by
    +the Free Software Foundation, either version 2 of the License, or
    +(at your option) any later version.
    +
    +This program is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +GNU General Public License for more details.
    +
    +You should have received a copy of the GNU General Public License
    +along with this program.  If not, see .
    +
    +>>> libgcrypt-1.8.1-2.ph2
    +
    +Copyright (C) 2013 g10 Code GmbH
    +
    +This file is part of Libgcrypt.
    +
    +Libgcrypt is free software; you can redistribute it and/or modify
    +it under the terms of the GNU Lesser General Public License as
    +published by the Free Software Foundation; either version 2.1 of
    +the License, or (at your option) any later version.
    +
    +Libgcrypt is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +GNU Lesser General Public License for more details.
    +
    +You should have received a copy of the GNU Lesser General Public
    +License along with this program; if not, write to the Free Software
    +Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
    +
    +ADDITIONAL LICENSE INFORMATION:
    +
    +>
    +
    +libgcrypt-1.8.1.tar.bz2\libgcrypt-1.8.1.tar\libgcrypt-1.8.1\LICENSES
    +
    +Additional license notices for Libgcrypt.                    -- org --
    +
    +This file contains the copying permission notices for various files in
    +the Libgcrypt distribution which are not covered by the GNU Lesser
    +General Public License (LGPL) or the GNU General Public License (GPL).
    +
    +These notices all require that a copy of the notice be included
    +in the accompanying documentation and be distributed with binary
    +distributions of the code, so be sure to include this file along
    +with any binary distributions derived from the GNU C Library.
    +
    +BSD_3Clause
    +
    +For files:
    +- cipher/sha256-avx-amd64.S
    +- cipher/sha256-avx2-bmi2-amd64.S
    +- cipher/sha256-ssse3-amd64.S
    +- cipher/sha512-avx-amd64.S
    +- cipher/sha512-avx2-bmi2-amd64.S
    +- cipher/sha512-ssse3-amd64.S
    +
    ++begin_quote
    +Copyright (c) 2012, Intel Corporation
    +
    +All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +
    +Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the
    +distribution.
    +
    +Neither the name of the Intel Corporation nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +
    +THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY
    +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
    +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
    +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    ++end_quote
    +
    +
    +For files:
    +- random/jitterentropy-base.c
    +- random/jitterentropy.h
    +- random/rndjent.c (plus common Libgcrypt copyright holders)
    +
    ++begin_quote
    +Copyright Stephan Mueller , 2013
    +
    +[PLEASE NOTE:  VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD LICENSE.  THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.]
    +
    +License
    +=======
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions
    +are met:
    +1. Redistributions of source code must retain the above copyright
    +notice, and the entire permission notice in its entirety,
    +including the disclaimer of warranties.
    +2. Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +3. The name of the author may not be used to endorse or promote
    +products derived from this software without specific prior
    +written permission.
    +
    +ALTERNATIVELY, this product may be distributed under the terms of
    +the GNU General Public License, in which case the provisions of the GPL are
    +required INSTEAD OF the above restrictions.  (This clause is
    +necessary due to a potential bad interaction between the GPL and
    +the restrictions contained in a BSD-style copyright.)
    +
    +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
    +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
    +WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
    +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
    +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
    +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
    +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
    +USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
    +DAMAGE.
    ++end_quote
    +
    +X License
    +
    +For files:
    +- install.sh
    +
    ++begin_quote
    +Copyright (C) 1994 X Consortium
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to
    +deal in the Software without restriction, including without limitation the
    +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
    +sell copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in
    +all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
    +X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
    +AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
    +TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    +
    +Except as contained in this notice, the name of the X Consortium shall not
    +be used in advertising or otherwise to promote the sale, use or other deal-
    +ings in this Software without prior written authorization from the X Consor-
    +tium.
    ++end_quote
    +
    +Public domain
    +
    +For files:
    +- cipher/arcfour-amd64.S
    +
    ++begin_quote
    +Author: Marc Bevand 
    +Licence: I hereby disclaim the copyright on this code and place it
    +in the public domain.
    ++end_quote
    +
    +OCB license 1
    +
    +For files:
    +- cipher/cipher-ocb.c
    +
    ++begin_quote
    +OCB is covered by several patents but may be used freely by most
    +software.  See http://web.cs.ucdavis.edu/~rogaway/ocb/license.htm .
    +In particular license 1 is suitable for Libgcrypt: See
    +http://web.cs.ucdavis.edu/~rogaway/ocb/license1.pdf for the full
    +license document; it basically says:
    +
    +License 1 — License for Open-Source Software Implementations of OCB
    +(Jan 9, 2013)
    +
    +Under this license, you are authorized to make, use, and
    +distribute open-source software implementations of OCB. This
    +license terminates for you if you sue someone over their
    +open-source software implementation of OCB claiming that you have
    +a patent covering their implementation.
    +
    +
    +
    +License for Open Source Software Implementations of OCB
    +January 9, 2013
    +
    +1 Definitions
    +
    +1.1 “Licensor” means Phillip Rogaway.
    +
    +1.2 “Licensed Patents” means any patent that claims priority to United
    +States Patent Application No. 09/918,615 entitled “Method and Apparatus
    +for Facilitating Efficient Authenticated Encryption,” and any utility,
    +divisional, provisional, continuation, continuations-in-part, reexamination,
    +reissue, or foreign counterpart patents that may issue with respect to the
    +aforesaid patent application. This includes, but is not limited to, United
    +States Patent No. 7,046,802; United States Patent No. 7,200,227; United
    +States Patent No. 7,949,129; United States Patent No. 8,321,675 ; and any
    +patent that issues out of United States Patent Application No. 13/669,114.
    +
    +1.3 “Use” means any practice of any invention claimed in the Licensed Patents.
    +
    +1.4 “Software Implementation” means any practice of any invention
    +claimed in the Licensed Patents that takes the form of software executing on
    +a user-programmable, general-purpose computer or that takes the form of a
    +computer-readable medium storing such software. Software Implementation does
    +not include, for example, application-specific integrated circuits (ASICs),
    +field-programmable gate arrays (FPGAs), embedded systems, or IP cores.
    +
    +1.5 “Open Source Software” means software whose source code is published
    +and made available for inspection and use by anyone because either (a) the
    +source code is subject to a license that permits recipients to copy, modify,
    +and distribute the source code without payment of fees or royalties, or
    +(b) the source code is in the public domain, including code released for
    +public use through a CC0 waiver. All licenses certified by the Open Source
    +Initiative at opensource.org as of January 9, 2013 and all Creative Commons
    +licenses identified on the creativecommons.org website as of January 9,
    +2013, including the Public License Fallback of the CC0 waiver, satisfy these
    +requirements for the purposes of this license.
    +
    +1.6 “Open Source Software Implementation” means a Software
    +Implementation in which the software implicating the Licensed Patents is
    +Open Source Software. Open Source Software Implementation does not include
    +any Software Implementation in which the software implicating the Licensed
    +Patents is combined, so as to form a larger program, with software that is
    +not Open Source Software.
    +
    +2 License Grant
    +
    +2.1 License. Subject to your compliance with the term s of this license,
    +including the restriction set forth in Section 2.2, Licensor hereby
    +grants to you a perpetual, worldwide, non-exclusive, non-transferable,
    +non-sublicenseable, no-charge, royalty-free, irrevocable license to practice
    +any invention claimed in the Licensed Patents in any Open Source Software
    +Implementation.
    +
    +2.2 Restriction. If you or your affiliates institute patent litigation
    +(including, but not limited to, a cross-claim or counterclaim in a lawsuit)
    +against any entity alleging that any Use authorized by this license
    +infringes another patent, then any rights granted to you under this license
    +automatically terminate as of the date such litigation is filed.
    +
    +3 Disclaimer
    +YOUR USE OF THE LICENSED PATENTS IS AT YOUR OWN RISK AND UNLESS REQUIRED
    +BY APPLICABLE LAW, LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
    +KIND CONCERNING THE LICENSED PATENTS OR ANY PRODUCT EMBODYING ANY LICENSED
    +PATENT, EXPRESS OR IMPLIED, STATUT ORY OR OTHERWISE, INCLUDING, WITHOUT
    +LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR
    +PURPOSE, OR NONINFRINGEMENT. IN NO EVENT WILL LICENSOR BE LIABLE FOR ANY
    +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
    +ARISING FROM OR RELATED TO ANY USE OF THE LICENSED PATENTS, INCLUDING,
    +WITHOUT LIMITATION, DIRECT, INDIRECT, INCIDENTAL, CONSEQUENTIAL, PUNITIVE
    +OR SPECIAL DAMAGES, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF
    +SUCH DAMAGES PRIOR TO SUCH AN OCCURRENCE.
    ++end_quote
    +
    +
    +>MIT
    +
    +
    +Copyright 1997, 1998, 1999, 2001 Werner Koch (dd9jn)
    +Copyright 2013 g10 Code GmbH
    +
    +Permission is hereby granted, free of charge, to any person obtaining a
    +copy of this software and associated documentation files (the "Software"),
    +to deal in the Software without restriction, including without limitation
    +the rights to use, copy, modify, merge, publish, distribute, sublicense,
    +and/or sell copies of the Software, and to permit persons to whom the
    +Software is furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in
    +all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
    +WERNER KOCH BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
    +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
    +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    +
    +>GPL 2.0
    +
    +libgcrypt-1.8.1.tar.bz2\libgcrypt-1.8.1.tar\libgcrypt-1.8.1\doc\gpl.texi
    +
    +LICENSE: GPL 2.0
    +
    +
    +>GPL 3.0
    +
    +libgcrypt-1.8.1.tar.bz2\libgcrypt-1.8.1.tar\libgcrypt-1.8.1\doc\yat2m.c
    +
    +Copyright (C) 2005, 2013, 2015, 2016 g10 Code GmbH
    +Copyright (C) 2006, 2008, 2011 Free Software Foundation, Inc.
    +
    +This program is free software; you can redistribute it and/or modify
    +it under the terms of the GNU General Public License as published by
    +the Free Software Foundation; either version 3 of the License, or
    +(at your option) any later version.
    +
    +This program is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +GNU General Public License for more details.
    +
    +You should have received a copy of the GNU General Public License
    +along with this program; if not, see .
    +
    +>BSD-3
    +
    +[PLEASE NOTE:  VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD LICENSE.  THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.]
    +
    +libgcrypt-1.8.1.tar.bz2\libgcrypt-1.8.1.tar\libgcrypt-1.8.1\random\jitterentropy-base.c
    +
    +Copyright Peter Gutmann, Paul Kendall, and Chris Wedgwood 1996-1999.
    +Heavily modified for GnuPG by Werner Koch
    +
    +
    +/
    +
    +/ This module is part of the cryptlib continuously seeded pseudorandom
    +number generator.  For usage conditions, see lib_rand.c
    +
    +[Here is the notice from lib_rand.c:]
    +
    +This module and the misc/rnd.c modules represent the cryptlib
    +continuously seeded pseudorandom number generator (CSPRNG) as described in
    +my 1998 Usenix Security Symposium paper "The generation of random numbers
    +for cryptographic purposes".
    +
    +The CSPRNG code is copyright Peter Gutmann (and various others) 1996,
    +1997, 1998, 1999, all rights reserved.  Redistribution of the CSPRNG
    +modules and use in source and binary forms, with or without modification,
    +are permitted provided that the following conditions are met:
    +
    +1. Redistributions of source code must retain the above copyright notice
    +and this permission notice in its entirety.
    +
    +2. Redistributions in binary form must reproduce the copyright notice in
    +the documentation and/or other materials provided with the distribution.
    +
    +3. A copy of any bugfixes or enhancements made must be provided to the
    +author,  to allow them to be added to the
    +baseline version of the code.
    +
    +ALTERNATIVELY, the code may be distributed under the terms of the
    +GNU Lesser General Public License, version 2.1 or any later version
    +published by the Free Software Foundation, in which case the
    +provisions of the GNU LGPL are required INSTEAD OF the above
    +restrictions.
    +
    +Although not required under the terms of the LGPL, it would still be
    +nice if you could make any changes available to the author to allow
    +a consistent code base to be maintained.  /
    +
    +>BSD-3
    +
    +[PLEASE NOTE:  VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD LICENSE.  THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.]
    +
    +libgcrypt-1.8.1.tar.bz2\libgcrypt-1.8.1.tar\libgcrypt-1.8.1\random\random-drbg.c
    +
    +Copyright 2014 Stephan Mueller 
    +
    +DRBG: Deterministic Random Bits Generator
    +Based on NIST Recommended DRBG from NIST SP800-90A with the following
    +properties:
    +CTR DRBG with DF with AES-128, AES-192, AES-256 cores
    +Hash DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
    +HMAC DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
    +with and without prediction resistance
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions
    +are met:
    +1. Redistributions of source code must retain the above copyright
    +notice, and the entire permission notice in its entirety,
    +including the disclaimer of warranties.
    +2. Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +3. The name of the author may not be used to endorse or promote
    +products derived from this software without specific prior
    +written permission.
    +
    +ALTERNATIVELY, this product may be distributed under the terms of
    +LGPLv2+, in which case the provisions of the LGPL are
    +required INSTEAD OF the above restrictions.  (This clause is
    +necessary due to a potential bad interaction between the LGPL and
    +the restrictions contained in a BSD-style copyright.)
    +
    +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
    +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
    +WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
    +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
    +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
    +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
    +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
    +USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
    +DAMAGE.
    +
    +>>> libgpg-error-1.27-1.ph2
    +
    +Copyright (C) 2003-2004, 2010, 2013-2016 g10 Code GmbH
    +
    +This file is part of libgpg-error.
    +
    +libgpg-error is free software; you can redistribute it and/or
    +modify it under the terms of the GNU Lesser General Public License
    +as published by the Free Software Foundation; either version 2.1 of
    +the License, or (at your option) any later version.
    +
    +libgpg-error is distributed in the hope that it will be useful, but
    +WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    +Lesser General Public License for more details.
    +
    +You should have received a copy of the GNU Lesser General Public
    +License along with this program; if not, see .
    +
    +ADDITIONAL LICENSE INFORMATION:
    +
    +>GPL 2.0
    +
    +libgpg-error-1.27.tar.bz2\libgpg-error-1.27.tar\libgpg-error-1.27\doc\gpgrt.info
    +
    +Copyright (C) 2014 g10 Code GmbH
    +
    +Permission is granted to copy, distribute and/or modify this
    +document under the terms of the GNU General Public License as
    +published by the Free Software Foundation; either version 2 of the
    +License, or (at your option) any later version.  The text of the
    +license can be found in the section entitled "GNU General Public
    +License".
    +
    +>GPL 3.0
    +
    +libgpg-error-1.27.tar.bz2\libgpg-error-1.27.tar\libgpg-error-1.27\doc\yat2m.c
    +
    +Copyright (C) 2005, 2013, 2015, 2016 g10 Code GmbH
    +Copyright (C) 2006, 2008, 2011 Free Software Foundation, Inc.
    +
    +This program is free software; you can redistribute it and/or modify
    +it under the terms of the GNU General Public License as published by
    +the Free Software Foundation; either version 3 of the License, or
    +(at your option) any later version.
    +
    +This program is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +GNU General Public License for more details.
    +
    +You should have received a copy of the GNU General Public License
    +along with this program; if not, see .
    +
    +>MIT STYLE
    +
    +libgpg-error-1.27.tar.bz2\libgpg-error-1.27.tar\libgpg-error-1.27\src\mkheader.c
    +
    +Copyright (C) 2010 Free Software Foundation, Inc.
    +Copyright (C) 2014 g10 Code GmbH
    +
    +This file is free software; as a special exception the author gives
    +unlimited permission to copy and/or distribute it, with or without
    +modifications, as long as this notice is preserved.
    +
    +This file is distributed in the hope that it will be useful, but
    +WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
    +implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
    +
    +>BSD-3 Clause
    +
    +[PLEASE NOTE:  VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE BSD-3 CLAUSE LICENSE. THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.]
    +
    +libgpg-error-1.27.tar.bz2\libgpg-error-1.27.tar\libgpg-error-1.27\src\mkheader.c
    +
    +estream.c - Extended Stream I/O Library
    +Copyright (C) 2004, 2005, 2006, 2007, 2009, 2010, 2011,
    +2014, 2015, 2016, 2017 g10 Code GmbH
    +
    +This file is part of Libestream.
    +
    +Libestream is free software; you can redistribute it and/or modify
    +it under the terms of the GNU Lesser General Public License as
    +published by the Free Software Foundation; either version 2.1 of
    +the License, or (at your option) any later version.
    +
    +Libestream is distributed in the hope that it will be useful, but
    +WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    +Lesser General Public License for more details.
    +
    +You should have received a copy of the GNU Lesser General Public
    +License along with Libestream; if not, see .
    +
    +ALTERNATIVELY, Libestream may be distributed under the terms of the
    +following license, in which case the provisions of this license are
    +required INSTEAD OF the GNU General Public License. If you wish to
    +allow use of your version of this file only under the terms of the
    +GNU General Public License, and not to allow others to use your
    +version of this file under the terms of the following license,
    +indicate your decision by deleting this paragraph and the license
    +below.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions
    +are met:
    +1. Redistributions of source code must retain the above copyright
    +notice, and the entire permission notice in its entirety,
    +including the disclaimer of warranties.
    +2. Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +3. The name of the author may not be used to endorse or promote
    +products derived from this software without specific prior
    +written permission.
    +
    +THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
    +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
    +DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
    +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
    +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
    +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
    +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
    +OF THE POSSIBILITY OF SUCH DAMAGE.
    +
    +>LGPL 2.0
    +
    +libgpg-error-1.27.tar.bz2\libgpg-error-1.27.tar\libgpg-error-1.27\src\gettext.h
    +
    +Copyright (C) 1995-1998, 2000-2002 Free Software Foundation, Inc.
    +
    +This program is free software; you can redistribute it and/or modify it
    +under the terms of the GNU Library General Public License as published
    +by the Free Software Foundation; either version 2, or (at your option)
    +any later version.
    +
    +This program is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    +Library General Public License for more details.
    +
    +You should have received a copy of the GNU Library General Public
    +License along with this program; if not, write to the Free Software
    +Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
    +USA.
    +
    +>>> systemd-233-18.ph2
    +
    +This file is part of systemd.
    +
    +Copyright 2008-2011 Kay Sievers
    +Copyright 2012 Lennart Poettering
    +
    +systemd is free software; you can redistribute it and/or modify it
    +under the terms of the GNU Lesser General Public License as published by
    +the Free Software Foundation; either version 2.1 of the License, or
    +(at your option) any later version.
    +
    +systemd is distributed in the hope that it will be useful, but
    +WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
    +Lesser General Public License for more details.
    +
    +You should have received a copy of the GNU Lesser General Public License
    +along with systemd; If not, see .
    +
    +ADDITIONAL LICENSE INFORMATION:
    +
    +> GPL 2.0
    +
    +systemd-233.tar.gz\systemd-233.tar\systemd-233\LICENSE.GPL2
    +
    +License: GPL 2.0
    +
    +> MIT
    +
    +systemd-233.tar.gz\systemd-233.tar\systemd-233\man\glib-event-glue.c
    +
    +Copyright 2014 Tom Gundersen
    +
    +Permission is hereby granted, free of charge, to any person
    +obtaining a copy of this software and associated documentation files
    +(the "Software"), to deal in the Software without restriction,
    +including without limitation the rights to use, copy, modify, merge,
    +publish, distribute, sublicense, and/or sell copies of the Software,
    +and to permit persons to whom the Software is furnished to do so,
    +subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be
    +included in all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
    +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
    +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
    +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    +
    +> Public Domain
    +
    +systemd-233.tar.gz\systemd-233.tar\systemd-233\src\basic\MurmurHash2.c
    +
    +MurmurHash2 was written by Austin Appleby, and is placed in the public
    +domain. The author hereby disclaims copyright to this source code.
    +
    +> LGPL 2.0
    +
    +systemd-233.tar.gz\systemd-233.tar\systemd-233\src\basic\utf8.c
    +
    +Copyright (C) 1999 Tom Tromey
    +Copyright (C) 2000 Red Hat, Inc.
    +
    +This library is free software; you can redistribute it and/or
    +modify it under the terms of the GNU Library General Public
    +License as published by the Free Software Foundation; either
    +version 2 of the License, or (at your option) any later version.
    +
    +This library is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    +Library General Public License for more details.
    +
    +You should have received a copy of the GNU Library General Public
    +License along with this library; if not, write to the Free Software
    +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
    +
    +--------------- SECTION 7: GNU Lesser General Public License, V3.0 ----------
    +
    +GNU Lesser General Public License, V3.0 is applicable to the following component(s).
    +
    +
    +>>> gmp-6.1.2-2.ph2
    +
    +'[PLEASE NOTE:  VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE LGPL 3.0.  PLEASE SEE APPENDIX FOR THE FULL TEXT OF THE LGPL3.0.  THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.]
    +
    +Copyright 2000-2002, 2004 Free Software Foundation, Inc.
    +
    +This file is part of the GNU MP Library.
    +
    +The GNU MP Library is free software; you can redistribute it and/or modify
    +it under the terms of either:
    +
    +* the GNU Lesser General Public License as published by the Free
    +Software Foundation; either version 3 of the License, or (at your
    +option) any later version.
    +
    +or
    +
    +* the GNU General Public License as published by the Free Software
    +Foundation; either version 2 of the License, or (at your option) any
    +later version.
    +
    +or both in parallel, as here.
    +
    +The GNU MP Library is distributed in the hope that it will be useful, but
    +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
    +or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    +for more details.
    +
    +You should have received copies of the GNU General Public License and the
    +GNU Lesser General Public License along with the GNU MP Library.  If not,
    +see https://www.gnu.org/licenses/.
    +
    +ADDITIONAL LICENSE INFORMATION:
    +
    +> GPL3.0
    +
    +gmp-6.1.2.tar.xz\gmp-6.1.2.tar\gmp-6.1.2\demos\factorize.c
    +
    +Factoring with Pollard's rho method.
    +
    +Copyright 1995, 1997-2003, 2005, 2009, 2012, 2015 Free Software
    +Foundation, Inc.
    +
    +This program is free software; you can redistribute it and/or modify it under
    +the terms of the GNU General Public License as published by the Free Software
    +Foundation; either version 3 of the License, or (at your option) any later
    +version.
    +
    +This program is distributed in the hope that it will be useful, but WITHOUT ANY
    +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
    +PARTICULAR PURPOSE.  See the GNU General Public License for more details.
    +
    +You should have received a copy of the GNU General Public License along with
    +this program.  If not, see https://www.gnu.org/licenses/.
    +
    +> GPL3.0 with Bison Parser Exception
    +
    +gmp-6.1.2.tar.xz\gmp-6.1.2.tar\gmp-6.1.2\demos\calc\calc.c
    +
    +Bison implementation for Yacc-like parsers in C
    +
    +Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
    +
    +This program is free software: you can redistribute it and/or modify
    +it under the terms of the GNU General Public License as published by
    +the Free Software Foundation, either version 3 of the License, or
    +(at your option) any later version.
    +
    +This program is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +GNU General Public License for more details.
    +
    +You should have received a copy of the GNU General Public License
    +along with this program.  If not, see .  */
    +
    +/* As a special exception, you may create a larger work that contains
    +part or all of the Bison parser skeleton and distribute that work
    +under terms of your choice, so long as that work isn't itself a
    +parser generator using the skeleton or a modified version thereof
    +as a parser skeleton.  Alternatively, if you modify or redistribute
    +the parser skeleton itself, you may (at your option) remove this
    +special exception, which will cause the skeleton and the resulting
    +Bison output files to be licensed under the GNU General Public
    +License without this special exception.
    +
    +This special exception was added by the Free Software Foundation in
    +version 2.2 of Bison.
    +
    +> GFDL 1.3
    +
    +gmp-6.1.2.tar.xz\gmp-6.1.2.tar\gmp-6.1.2\doc\gmp.info
    +
    +Copyright 1991, 1993-2016 Free Software Foundation, Inc.
    +
    +Permission is granted to copy, distribute and/or modify this document
    +under the terms of the GNU Free Documentation License, Version 1.3 or
    +any later version published by the Free Software Foundation; with no
    +Invariant Sections, with the Front-Cover Texts being "A GNU Manual", and
    +with the Back-Cover Texts being "You have freedom to copy and modify
    +this GNU Manual, like GNU software".  A copy of the license is included
    +in *note GNU Free Documentation License::.
    +
    +> GPL2.0
    +
    +gmp-6.1.2.tar.xz\gmp-6.1.2.tar\gmp-6.1.2\mini-gmp\tests\run-tests
    +
    +Copyright (C) 2000-2002, 2004, 2005, 2011, 2012, 2016  Niels Möller
    +
    +This program is free software; you can redistribute it and/or modify
    +it under the terms of the GNU General Public License as published by
    +the Free Software Foundation; either version 2 of the License, or
    +(at your option) any later version.
    +
    +This program is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +GNU General Public License for more details.
    +
    +You should have received a copy of the GNU General Public License along
    +with this program; if not, write to the Free Software Foundation, Inc.,
    +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
    +
    +> MIT Style
    +
    +gmp-6.1.2.tar.xz\gmp-6.1.2.tar\gmp-6.1.2\aclocal.m4
    +
    +Copyright (C) 1996-2014 Free Software Foundation, Inc.
    +
    +This file is free software; the Free Software Foundation
    +gives unlimited permission to copy and/or distribute it,
    +with or without modifications, as long as this notice is preserved.
    +
    +This program is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY, to the extent permitted by law; without
    +even the implied warranty of MERCHANTABILITY or FITNESS FOR A
    +PARTICULAR PURPOSE.
    +
    +--------------- SECTION 8: GNU Library General Public License, V2.0 ----------
    +
    +GNU Library General Public License, V2.0 is applicable to the following component(s).
    +
    +
    +>>> glib-2.52.1-3.ph2
    +
    +Copyright © 2015 Canonical Limited
    +
    +This library is free software; you can redistribute it and/or
    +modify it under the terms of the GNU Lesser General Public
    +License as published by the Free Software Foundation; either
    +version 2 of the License, or (at your option) any later version.
    +
    +This library is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    +Lesser General Public License for more details.
    +
    +You should have received a copy of the GNU Lesser General
    +Public License along with this library; if not, see .
    +
    +ADDITIONAL LICENSE INFORMATION:
    +
    +> Public Domain
    +
    +glib-2.52.1.tar.xz\glib-2.52.1.tar\glib-2.52.1\build\win32\dirent\dirent.h
    +
    +This file has no copyright assigned and is placed in the Public Domain.
    +This file is a part of the mingw-runtime package.
    +No warranty is given; refer to the file DISCLAIMER within the package.
    +
    +> LGPL 3.0
    +
    +glib-2.52.1.tar.xz\glib-2.52.1.tar\glib-2.52.1\docs\reference\glib\html\glib-resources.html
    +
    +If you develop a bugfix or enhancement for GLib, please file that in Bugzilla as well. Bugzilla allows you to attach files; please attach a patch generated by the utility, using the option to make the patch more readable. All patches must be offered under the terms of the GNU LGPL license, so be sure you are authorized to give us the patch under those terms.
    +
    +> MIT
    +
    +glib-2.52.1.tar.xz\glib-2.52.1.tar\glib-2.52.1\gio\kqueue\kqueue-sub.h
    +
    +Copyright (c) 2011, 2012 Dmitry Matveev 
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in
    +all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    +THE SOFTWARE.
    +
    +> LGPL 2.0
    +
    +glib-2.52.1.tar.xz\glib-2.52.1.tar\glib-2.52.1\gio\xdgmime\xdgmimeparent.c
    +
    +[PLEASE NOTE:  VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE LGPL 2.0.  PLEASE SEE THE APPENDIX TO REVIEW THE FULL TEXT OF THE LGPL 2.0.  THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.]
    +
    +Copyright (C) 2004  Red Hat, Inc.
    +Copyright (C) 2004  Matthias Clasen 
    +
    +Licensed under the Academic Free License version 2.0
    +Or under the following terms:
    +
    +This library is free software; you can redistribute it and/or
    +modify it under the terms of the GNU Lesser General Public
    +License as published by the Free Software Foundation; either
    +version 2 of the License, or (at your option) any later version.
    +
    +This library is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
    +Lesser General Public License for more details.
    +
    +You should have received a copy of the GNU Lesser General Public
    +License along with this library; if not, see .
    +
    +> GPL 2.0
    +
    +glib-2.52.1.tar.xz\glib-2.52.1.tar\glib-2.52.1\glib\gen-unicode-tables.pl
    +
    +Copyright (C) 1998, 1999 Tom Tromey
    +Copyright (C) 2001 Red Hat Software
    +
    +This program is free software; you can redistribute it and/or modify
    +it under the terms of the GNU General Public License as published by
    +the Free Software Foundation; either version 2, or (at your option)
    +any later version.
    +
    +This program is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +GNU General Public License for more details.
    +
    +You should have received a copy of the GNU General Public License
    +along with this program; if not, see .
    +
    +> LGPL 2.1
    +
    +glib-2.52.1.tar.xz\glib-2.52.1.tar\glib-2.52.1\gio\win32\winhttp.h
    +
    +Copyright (C) 2007 Francois Gouget
    +
    +This library is free software; you can redistribute it and/or
    +modify it under the terms of the GNU Lesser General Public
    +License as published by the Free Software Foundation; either
    +version 2.1 of the License, or (at your option) any later version.
    +
    +This library is distributed in the hope that it will be useful,
    +but WITHOUT ANY WARRANTY; without even the implied warranty of
    +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    +Lesser General Public License for more details.
    +
    +You should have received a copy of the GNU Lesser General Public
    +License along with this library; if not, write to the Free Software
    +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
    +
    +> BSD-3 CLAUSE
    +
    +glib-2.52.1.tar.xz\glib-2.52.1.tar\glib-2.52.1\glib\pcre\pcre.h
    +
    +Copyright (c) 1997-2012 University of Cambridge
    +-----------------------------------------------------------------------------
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are met:
    +
    +* Redistributions of source code must retain the above copyright notice,
    +this list of conditions and the following disclaimer.
    +
    +* Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +
    +* Neither the name of the University of Cambridge nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
    +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
    +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
    +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
    +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
    +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
    +POSSIBILITY OF SUCH DAMAGE.
    +
    +> BSD-4
    +
    +glib-2.52.1.tar.xz\glib-2.52.1.tar\glib-2.52.1\glib\valgrind.h
    +
    +   Notice that the following BSD-style license applies to this one
    +   file (valgrind.h) only.  The rest of Valgrind is licensed under the
    +   terms of the GNU General Public License, version 2, unless
    +   otherwise indicated.  See the COPYING file in the source
    +   distribution for details.
    +---------------------------------------------------------------
    +   This file is part of Valgrind, a dynamic binary instrumentation
    +   framework.
    +
    +   Copyright (C) 2000-2013 Julian Seward.  All rights reserved.
    +
    +   Redistribution and use in source and binary forms, with or without
    +   modification, are permitted provided that the following conditions
    +   are met:
    +
    +   1. Redistributions of source code must retain the above copyright
    +      notice, this list of conditions and the following disclaimer.
    +
    +   2. The origin of this software must not be misrepresented; you must 
    +      not claim that you wrote the original software.  If you use this 
    +      software in a product, an acknowledgment in the product 
    +      documentation would be appreciated but is not required.
    +
    +   3. Altered source versions must be plainly marked as such, and must
    +      not be misrepresented as being the original software.
    +
    +   4. The name of the author may not be used to endorse or promote 
    +      products derived from this software without specific prior written 
    +      permission.
    +
    +   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
    +   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
    +   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    +   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
    +   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    +   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
    +   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    +   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
    +   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    +   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    +   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    +----------------------------------------------------------------
    +   Notice that the above BSD-style license applies to this one file
    +   (valgrind.h) only.  The entire rest of Valgrind is licensed under
    +   the terms of the GNU General Public License, version 2.  See the
    +   COPYING file in the source distribution for details.
    +
    +--------------- SECTION 9: Mozilla Public License, V2.0 ----------
    +
    +Mozilla Public License, V2.0 is applicable to the following component(s).
    +
    +
    +>>> ca-certificates-20180919-1.ph2
    +
    +This Source Code Form is subject to the terms of the Mozilla Public
    + License, v. 2.0. If a copy of the MPL was not distributed with this
    + file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +
    +>>> nspr-4.15-1.ph2
    +
    +This Source Code Form is subject to the terms of the Mozilla Public
    +License, v. 2.0. If a copy of the MPL was not distributed with this
    +file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +
    +ADDITIONAL LICENSE INFORMATION:
    +
    +> MPL 2.0
    +
    +[PLEASE NOTE:  VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE MPL 2.0.  PLEASE SEE THE APPENDIX TO REVIEW THE FULL TEXT OF THE MPL 2.0.  THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.]
    +
    +
    +nspr-4.15.tar.gz\..\stage\v4.15\src\nspr-4.15.tar\nspr-4.15\nspr\pkg\linux\sun-nspr.spec
    +
    +Under "MPL/GPL" license.
    +
    +>BEER-WARE LICENSE
    +
    +nspr-4.15.tar.gz\..\stage\v4.15\src\nspr-4.15.tar\nspr-4.15\nspr\pr\src\malloc\prmalloc.c
    +
    +"THE BEER-WARE LICENSE" (Revision 42):
    + wrote this file.  As long as you retain this notice you
    +can do whatever you want with this stuff. If we meet some day, and you think
    +this stuff is worth it, you can buy me a beer in return.   Poul-Henning
    +
    +>BSD-3
    +
    +nspr-4.15.tar.gz\..\stage\v4.15\src\nspr-4.15.tar\nspr-4.15\nspr\pr\src\misc\prcountr.c
    +
    +Copyright (c) 1983, 1990, 1993
    +The Regents of the University of California.  All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions
    +are met:
    +1. Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +2. Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +4. Neither the name of the University nor the names of its contributors
    +may be used to endorse or promote products derived from this software
    +without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
    +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    +ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
    +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
    +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    +SUCH DAMAGE.
    +
    +
    +>MIT
    +
    +
    +nspr-4.15.tar.gz\..\stage\v4.15\src\nspr-4.15.tar\nspr-4.15\nspr\pr\src\misc\prcountr.c
    +
    +Portions Copyright (c) 1993 by Digital Equipment Corporation.
    +
    +Permission to use, copy, modify, and distribute this software for any
    +purpose with or without fee is hereby granted, provided that the above
    +copyright notice and this permission notice appear in all copies, and that
    +the name of Digital Equipment Corporation not be used in advertising or
    +publicity pertaining to distribution of the document or software without
    +specific, written prior permission.
    +
    +THE SOFTWARE IS PROVIDED "AS IS" AND DIGITAL EQUIPMENT CORP. DISCLAIMS ALL
    +WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES
    +OF MERCHANTABILITY AND FITNESS.   IN NO EVENT SHALL DIGITAL EQUIPMENT
    +CORPORATION BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
    +DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
    +PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
    +ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
    +SOFTWARE.
    +
    +>>> nss-3.31.1-1.ph2
    +
    +This Source Code Form is subject to the terms of the Mozilla Public
    +License, v. 2.0. If a copy of the MPL was not distributed with this
    +file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +
    +ADDITIONAL LICENSE INFORMATION:
    +
    +> BSD-2
    +
    +nss-3.31.1.tar.gz\..\stage\NSS_3_31_1_RTM\src\nss-3.31.1.tar\nss-3.31.1\nss\cmd\libpkix\pkix_pl\module\test_socket.c
    +
    +Test Socket Type
    +
    +Copyright 2004-2005 Sun Microsystems, Inc.  All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are met:
    +
    +1. Redistribution of source code must retain the above copyright notice,
    +this list of conditions and the following disclaimer.
    +
    +2. Redistribution in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +
    +Neither the name of Sun Microsystems, Inc. or the names of contributors may
    +be used to endorse or promote products derived from this software without
    +specific prior written permission.
    +
    +This software is provided "AS IS," without a warranty of any kind. ALL
    +EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING
    +ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
    +OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN")
    +AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE
    +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS
    +DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST
    +REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL,
    +INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY
    +OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE,
    +EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
    +
    +You acknowledge that this software is not designed or intended for use in
    +the design, construction, operation or maintenance of any nuclear facility.
    +
    +
    +> MIT
    +
    +nss-3.31.1.tar.gz\..\stage\NSS_3_31_1_RTM\src\nss-3.31.1.tar\nss-3.31.1\nss\coreconf\mkdepend\def.h
    +
    +Copyright (c) 1993, 1994, 1998 The Open Group.
    +
    +Permission to use, copy, modify, distribute, and sell this software and its
    +documentation for any purpose is hereby granted without fee, provided that
    +the above copyright notice appear in all copies and that both that
    +copyright notice and this permission notice appear in supporting
    +documentation.
    +
    +The above copyright notice and this permission notice shall be included in
    +all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
    +OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
    +AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
    +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    +
    +> BSD-3
    +
    +nss-3.31.1.tar.gz\..\stage\NSS_3_31_1_RTM\src\nss-3.31.1.tar\nss-3.31.1\nss\gtests\google_test\gtest\include\gtest\internal\gtest-death-test-internal.h
    +
    +Copyright 2005, Google Inc.
    +All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    +
    +>Public Domain
    +
    +nss-3.31.1.tar.gz\..\stage\NSS_3_31_1_RTM\src\nss-3.31.1.tar\nss-3.31.1\nss\lib\dbm\src\dirent.h
    +
    +A public domain implementation of BSD directory routines for
    +MS-DOS.  Written by Michael Rendell ({uunet,utai}michael@garfield),
    +August 1897
    +
    +
    +> MIT- Style
    +
    +nss-3.31.1.tar.gz\..\stage\NSS_3_31_1_RTM\src\nss-3.31.1.tar\nss-3.31.1\nss\lib\jar\jzlib.h
    +
    +Copyright (C) 1995-1996 Jean-loup Gailly and Mark Adler
    +
    +This software is provided 'as-is', without any express or implied
    +warranty.  In no event will the authors be held liable for any damages
    +arising from the use of this software.
    +
    +Permission is granted to anyone to use this software for any purpose,
    +including commercial applications, and to alter it and redistribute it
    +freely, subject to the following restrictions:
    +
    +1. The origin of this software must not be misrepresented; you must not
    +claim that you wrote the original software. If you use this software
    +in a product, an acknowledgment in the product documentation would be
    +appreciated but is not required.
    +2. Altered source versions must be plainly marked as such, and must not be
    +misrepresented as being the original software.
    +3. This notice may not be removed or altered from any source distribution.
    +
    +Jean-loup Gailly        Mark Adler
    +gzip@prep.ai.mit.edu    madler@alumni.caltech.edu
    +
    +
    +>MPL 2.0
    +
    +[PLEASE NOTE:  VMWARE, INC. ELECTS TO USE AND DISTRIBUTE THIS COMPONENT UNDER THE TERMS OF THE MPL 2.0.  PLEASE SEE THE APPENDIX TO REVIEW THE FULL TEXT OF THE MPL 2.0.  THE ORIGINAL LICENSE TERMS ARE REPRODUCED BELOW ONLY AS A REFERENCE.]
    +
    +
    +nss-3.31.1.tar.gz\..\stage\NSS_3_31_1_RTM\src\nss-3.31.1.tar\nss-3.31.1\nss\pkg\linux\sun-nss.spec
    +
    +Under "MPL/GPL" license.
    +
    +
    +=============== APPENDIX. Standard License Files ============== 
    +
    +
    +--------------- SECTION 1: Apache License, V2.0 -----------
    +
    +Apache License 
    +
    +
    +
    +Version 2.0, January 2004 
    +
    +http://www.apache.org/licenses/ 
    +
    +
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 
    +
    +
    +
    +1. Definitions.
    +
    +
    +
    +"License" shall mean the terms and conditions for use, reproduction,
    +
    +and distribution as defined by Sections 1 through 9 of this document.
    +
    +
    +
    +"Licensor" shall mean the copyright owner or entity authorized by the
    +
    +copyright owner that is granting the License.  
    +
    +
    +
    +"Legal Entity" shall mean the union of the acting entity and all other
    +
    +entities that control, are controlled by, or are under common control
    +
    +with that entity. For the purposes of this definition, "control" means
    +
    +(i) the power, direct or indirect, to cause the direction or management
    +
    +of such entity, whether by contract or otherwise, or (ii) ownership
    +
    +of fifty percent (50) or more of the outstanding shares, or (iii)
    +
    +beneficial ownership of such entity.
    +
    +
    +
    +"You" (or "Your") shall mean an individual or Legal Entity exercising
    +
    +permissions granted by this License.  
    +
    +
    +
    +"Source" form shall mean the preferred form for making modifications,
    +
    +including but not limited to software source code, documentation source,
    +
    +and configuration files.
    +
    +
    +
    +"Object" form shall mean any form resulting from mechanical transformation
    +
    +or translation of a Source form, including but not limited to compiled
    +
    +object code, generated documentation, and conversions to other media
    +
    +types.  
    +
    +
    +
    +"Work" shall mean the work of authorship, whether in Source or
    +
    +Object form, made available under the License, as indicated by a copyright
    +
    +notice that is included in or attached to the work (an example is provided
    +
    +in the Appendix below).  
    +
    +
    +
    +"Derivative Works" shall mean any work, whether in Source or Object form,
    +
    +that is based on (or derived from) the Work and for which the editorial
    +
    +revisions, annotations, elaborations, or other modifications represent,
    +
    +as a whole, an original work of authorship. For the purposes of this
    +
    +License, Derivative Works shall not include works that remain separable
    +
    +from, or merely link (or bind by name) to the interfaces of, the Work
    +
    +and Derivative Works thereof.
    +
    +
    +
    +"Contribution" shall mean any work of authorship, including the
    +
    +original version of the Work and any modifications or additions to
    +
    +that Work or Derivative Works thereof, that is intentionally submitted
    +
    +to Licensor for inclusion in the Work by the copyright owner or by an
    +
    +individual or Legal Entity authorized to submit on behalf of the copyright
    +
    +owner. For the purposes of this definition, "submitted" means any form of
    +
    +electronic, verbal, or written communication sent to the Licensor or its
    +
    +representatives, including but not limited to communication on electronic
    +
    +mailing lists, source code control systems, and issue tracking systems
    +
    +that are managed by, or on behalf of, the Licensor for the purpose of
    +
    +discussing and improving the Work, but excluding communication that is
    +
    +conspicuously marked or otherwise designated in writing by the copyright
    +
    +owner as "Not a Contribution."
    +
    +
    +
    +"Contributor" shall mean Licensor and any individual or Legal Entity
    +
    +on behalf of whom a Contribution has been received by Licensor and
    +
    +subsequently incorporated within the Work.
    +
    +
    +
    +2. Grant of Copyright License.
    +
    +Subject to the terms and conditions of this License, each Contributor
    +
    +hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,
    +
    +royalty-free, irrevocable copyright license to reproduce, prepare
    +
    +Derivative Works of, publicly display, publicly perform, sublicense, and
    +
    +distribute the Work and such Derivative Works in Source or Object form.
    +
    +
    +
    +3. Grant of Patent License.
    +
    +Subject to the terms and conditions of this License, each Contributor
    +
    +hereby grants to You a perpetual, worldwide, non-exclusive, no-charge,
    +
    +royalty- free, irrevocable (except as stated in this section) patent
    +
    +license to make, have made, use, offer to sell, sell, import, and
    +
    +otherwise transfer the Work, where such license applies only to those
    +
    +patent claims licensable by such Contributor that are necessarily
    +
    +infringed by their Contribution(s) alone or by combination of
    +
    +their Contribution(s) with the Work to which such Contribution(s)
    +
    +was submitted. If You institute patent litigation against any entity
    +
    +(including a cross-claim or counterclaim in a lawsuit) alleging that the
    +
    +Work or a Contribution incorporated within the Work constitutes direct
    +
    +or contributory patent infringement, then any patent licenses granted
    +
    +to You under this License for that Work shall terminate as of the date
    +
    +such litigation is filed.
    +
    +
    +
    +4. Redistribution.
    +
    +You may reproduce and distribute copies of the Work or Derivative Works
    +
    +thereof in any medium, with or without modifications, and in Source or
    +
    +Object form, provided that You meet the following conditions:
    +
    +
    +
    +  a. You must give any other recipients of the Work or Derivative Works
    +
    +     a copy of this License; and
    +
    +
    +
    +  b. You must cause any modified files to carry prominent notices stating
    +
    +     that You changed the files; and
    +
    +
    +
    +  c. You must retain, in the Source form of any Derivative Works that
    +
    +     You distribute, all copyright, patent, trademark, and attribution
    +
    +     notices from the Source form of the Work, excluding those notices
    +
    +     that do not pertain to any part of the Derivative Works; and
    +
    +
    +
    +  d. If the Work includes a "NOTICE" text file as part of its
    +
    +     distribution, then any Derivative Works that You distribute must
    +
    +     include a readable copy of the attribution notices contained
    +
    +     within such NOTICE file, excluding those notices that do not
    +
    +     pertain to any part of the Derivative Works, in at least one of
    +
    +     the following places: within a NOTICE text file distributed as part
    +
    +     of the Derivative Works; within the Source form or documentation,
    +
    +     if provided along with the Derivative Works; or, within a display
    +
    +     generated by the Derivative Works, if and wherever such third-party
    +
    +     notices normally appear. The contents of the NOTICE file are for
    +
    +     informational purposes only and do not modify the License. You
    +
    +     may add Your own attribution notices within Derivative Works that
    +
    +     You distribute, alongside or as an addendum to the NOTICE text
    +
    +     from the Work, provided that such additional attribution notices
    +
    +     cannot be construed as modifying the License.  You may add Your own
    +
    +     copyright statement to Your modifications and may provide additional
    +
    +     or different license terms and conditions for use, reproduction, or
    +
    +     distribution of Your modifications, or for any such Derivative Works
    +
    +     as a whole, provided Your use, reproduction, and distribution of the
    +
    +     Work otherwise complies with the conditions stated in this License.
    +
    +
    +
    +5. Submission of Contributions.
    +
    +Unless You explicitly state otherwise, any Contribution intentionally
    +
    +submitted for inclusion in the Work by You to the Licensor shall be
    +
    +under the terms and conditions of this License, without any additional
    +
    +terms or conditions.  Notwithstanding the above, nothing herein shall
    +
    +supersede or modify the terms of any separate license agreement you may
    +
    +have executed with Licensor regarding such Contributions.
    +
    +
    +
    +6. Trademarks.
    +
    +This License does not grant permission to use the trade names, trademarks,
    +
    +service marks, or product names of the Licensor, except as required for
    +
    +reasonable and customary use in describing the origin of the Work and
    +
    +reproducing the content of the NOTICE file.
    +
    +
    +
    +7. Disclaimer of Warranty.
    +
    +Unless required by applicable law or agreed to in writing, Licensor
    +
    +provides the Work (and each Contributor provides its Contributions) on
    +
    +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
    +
    +express or implied, including, without limitation, any warranties or
    +
    +conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR
    +
    +A PARTICULAR PURPOSE. You are solely responsible for determining the
    +
    +appropriateness of using or redistributing the Work and assume any risks
    +
    +associated with Your exercise of permissions under this License.
    +
    +
    +
    +8. Limitation of Liability.
    +
    +In no event and under no legal theory, whether in tort (including
    +
    +negligence), contract, or otherwise, unless required by applicable law
    +
    +(such as deliberate and grossly negligent acts) or agreed to in writing,
    +
    +shall any Contributor be liable to You for damages, including any direct,
    +
    +indirect, special, incidental, or consequential damages of any character
    +
    +arising as a result of this License or out of the use or inability to
    +
    +use the Work (including but not limited to damages for loss of goodwill,
    +
    +work stoppage, computer failure or malfunction, or any and all other
    +
    +commercial damages or losses), even if such Contributor has been advised
    +
    +of the possibility of such damages.
    +
    +
    +
    +9. Accepting Warranty or Additional Liability.
    +
    +While redistributing the Work or Derivative Works thereof, You may
    +
    +choose to offer, and charge a fee for, acceptance of support, warranty,
    +
    +indemnity, or other liability obligations and/or rights consistent with
    +
    +this License. However, in accepting such obligations, You may act only
    +
    +on Your own behalf and on Your sole responsibility, not on behalf of
    +
    +any other Contributor, and only if You agree to indemnify, defend, and
    +
    +hold each Contributor harmless for any liability incurred by, or claims
    +
    +asserted against, such Contributor by reason of your accepting any such
    +
    +warranty or additional liability.
    +
    +
    +
    +END OF TERMS AND CONDITIONS 
    +
    +
    +
    +--------------- SECTION 2: GNU General Public License, V2.0 -----------
    +
    +		    GNU GENERAL PUBLIC LICENSE
    +		       Version 2, June 1991
    +
    + Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
    + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
    + Everyone is permitted to copy and distribute verbatim copies
    + of this license document, but changing it is not allowed.
    +
    +			    Preamble
    +
    +  The licenses for most software are designed to take away your
    +freedom to share and change it.  By contrast, the GNU General Public
    +License is intended to guarantee your freedom to share and change free
    +software--to make sure the software is free for all its users.  This
    +General Public License applies to most of the Free Software
    +Foundation's software and to any other program whose authors commit to
    +using it.  (Some other Free Software Foundation software is covered by
    +the GNU Lesser General Public License instead.)  You can apply it to
    +your programs, too.
    +
    +  When we speak of free software, we are referring to freedom, not
    +price.  Our General Public Licenses are designed to make sure that you
    +have the freedom to distribute copies of free software (and charge for
    +this service if you wish), that you receive source code or can get it
    +if you want it, that you can change the software or use pieces of it
    +in new free programs; and that you know you can do these things.
    +
    +  To protect your rights, we need to make restrictions that forbid
    +anyone to deny you these rights or to ask you to surrender the rights.
    +These restrictions translate to certain responsibilities for you if you
    +distribute copies of the software, or if you modify it.
    +
    +  For example, if you distribute copies of such a program, whether
    +gratis or for a fee, you must give the recipients all the rights that
    +you have.  You must make sure that they, too, receive or can get the
    +source code.  And you must show them these terms so they know their
    +rights.
    +
    +  We protect your rights with two steps: (1) copyright the software, and
    +(2) offer you this license which gives you legal permission to copy,
    +distribute and/or modify the software.
    +
    +  Also, for each author's protection and ours, we want to make certain
    +that everyone understands that there is no warranty for this free
    +software.  If the software is modified by someone else and passed on, we
    +want its recipients to know that what they have is not the original, so
    +that any problems introduced by others will not reflect on the original
    +authors' reputations.
    +
    +  Finally, any free program is threatened constantly by software
    +patents.  We wish to avoid the danger that redistributors of a free
    +program will individually obtain patent licenses, in effect making the
    +program proprietary.  To prevent this, we have made it clear that any
    +patent must be licensed for everyone's free use or not licensed at all.
    +
    +  The precise terms and conditions for copying, distribution and
    +modification follow.
    +
    +		    GNU GENERAL PUBLIC LICENSE
    +   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
    +
    +  0. This License applies to any program or other work which contains
    +a notice placed by the copyright holder saying it may be distributed
    +under the terms of this General Public License.  The "Program", below,
    +refers to any such program or work, and a "work based on the Program"
    +means either the Program or any derivative work under copyright law:
    +that is to say, a work containing the Program or a portion of it,
    +either verbatim or with modifications and/or translated into another
    +language.  (Hereinafter, translation is included without limitation in
    +the term "modification".)  Each licensee is addressed as "you".
    +
    +Activities other than copying, distribution and modification are not
    +covered by this License; they are outside its scope.  The act of
    +running the Program is not restricted, and the output from the Program
    +is covered only if its contents constitute a work based on the
    +Program (independent of having been made by running the Program).
    +Whether that is true depends on what the Program does.
    +
    +  1. You may copy and distribute verbatim copies of the Program's
    +source code as you receive it, in any medium, provided that you
    +conspicuously and appropriately publish on each copy an appropriate
    +copyright notice and disclaimer of warranty; keep intact all the
    +notices that refer to this License and to the absence of any warranty;
    +and give any other recipients of the Program a copy of this License
    +along with the Program.
    +
    +You may charge a fee for the physical act of transferring a copy, and
    +you may at your option offer warranty protection in exchange for a fee.
    +
    +  2. You may modify your copy or copies of the Program or any portion
    +of it, thus forming a work based on the Program, and copy and
    +distribute such modifications or work under the terms of Section 1
    +above, provided that you also meet all of these conditions:
    +
    +    a) You must cause the modified files to carry prominent notices
    +    stating that you changed the files and the date of any change.
    +
    +    b) You must cause any work that you distribute or publish, that in
    +    whole or in part contains or is derived from the Program or any
    +    part thereof, to be licensed as a whole at no charge to all third
    +    parties under the terms of this License.
    +
    +    c) If the modified program normally reads commands interactively
    +    when run, you must cause it, when started running for such
    +    interactive use in the most ordinary way, to print or display an
    +    announcement including an appropriate copyright notice and a
    +    notice that there is no warranty (or else, saying that you provide
    +    a warranty) and that users may redistribute the program under
    +    these conditions, and telling the user how to view a copy of this
    +    License.  (Exception: if the Program itself is interactive but
    +    does not normally print such an announcement, your work based on
    +    the Program is not required to print an announcement.)
    +
    +These requirements apply to the modified work as a whole.  If
    +identifiable sections of that work are not derived from the Program,
    +and can be reasonably considered independent and separate works in
    +themselves, then this License, and its terms, do not apply to those
    +sections when you distribute them as separate works.  But when you
    +distribute the same sections as part of a whole which is a work based
    +on the Program, the distribution of the whole must be on the terms of
    +this License, whose permissions for other licensees extend to the
    +entire whole, and thus to each and every part regardless of who wrote it.
    +
    +Thus, it is not the intent of this section to claim rights or contest
    +your rights to work written entirely by you; rather, the intent is to
    +exercise the right to control the distribution of derivative or
    +collective works based on the Program.
    +
    +In addition, mere aggregation of another work not based on the Program
    +with the Program (or with a work based on the Program) on a volume of
    +a storage or distribution medium does not bring the other work under
    +the scope of this License.
    +
    +  3. You may copy and distribute the Program (or a work based on it,
    +under Section 2) in object code or executable form under the terms of
    +Sections 1 and 2 above provided that you also do one of the following:
    +
    +    a) Accompany it with the complete corresponding machine-readable
    +    source code, which must be distributed under the terms of Sections
    +    1 and 2 above on a medium customarily used for software interchange; or,
    +
    +    b) Accompany it with a written offer, valid for at least three
    +    years, to give any third party, for a charge no more than your
    +    cost of physically performing source distribution, a complete
    +    machine-readable copy of the corresponding source code, to be
    +    distributed under the terms of Sections 1 and 2 above on a medium
    +    customarily used for software interchange; or,
    +
    +    c) Accompany it with the information you received as to the offer
    +    to distribute corresponding source code.  (This alternative is
    +    allowed only for noncommercial distribution and only if you
    +    received the program in object code or executable form with such
    +    an offer, in accord with Subsection b above.)
    +
    +The source code for a work means the preferred form of the work for
    +making modifications to it.  For an executable work, complete source
    +code means all the source code for all modules it contains, plus any
    +associated interface definition files, plus the scripts used to
    +control compilation and installation of the executable.  However, as a
    +special exception, the source code distributed need not include
    +anything that is normally distributed (in either source or binary
    +form) with the major components (compiler, kernel, and so on) of the
    +operating system on which the executable runs, unless that component
    +itself accompanies the executable.
    +
    +If distribution of executable or object code is made by offering
    +access to copy from a designated place, then offering equivalent
    +access to copy the source code from the same place counts as
    +distribution of the source code, even though third parties are not
    +compelled to copy the source along with the object code.
    +
    +  4. You may not copy, modify, sublicense, or distribute the Program
    +except as expressly provided under this License.  Any attempt
    +otherwise to copy, modify, sublicense or distribute the Program is
    +void, and will automatically terminate your rights under this License.
    +However, parties who have received copies, or rights, from you under
    +this License will not have their licenses terminated so long as such
    +parties remain in full compliance.
    +
    +  5. You are not required to accept this License, since you have not
    +signed it.  However, nothing else grants you permission to modify or
    +distribute the Program or its derivative works.  These actions are
    +prohibited by law if you do not accept this License.  Therefore, by
    +modifying or distributing the Program (or any work based on the
    +Program), you indicate your acceptance of this License to do so, and
    +all its terms and conditions for copying, distributing or modifying
    +the Program or works based on it.
    +
    +  6. Each time you redistribute the Program (or any work based on the
    +Program), the recipient automatically receives a license from the
    +original licensor to copy, distribute or modify the Program subject to
    +these terms and conditions.  You may not impose any further
    +restrictions on the recipients' exercise of the rights granted herein.
    +You are not responsible for enforcing compliance by third parties to
    +this License.
    +
    +  7. If, as a consequence of a court judgment or allegation of patent
    +infringement or for any other reason (not limited to patent issues),
    +conditions are imposed on you (whether by court order, agreement or
    +otherwise) that contradict the conditions of this License, they do not
    +excuse you from the conditions of this License.  If you cannot
    +distribute so as to satisfy simultaneously your obligations under this
    +License and any other pertinent obligations, then as a consequence you
    +may not distribute the Program at all.  For example, if a patent
    +license would not permit royalty-free redistribution of the Program by
    +all those who receive copies directly or indirectly through you, then
    +the only way you could satisfy both it and this License would be to
    +refrain entirely from distribution of the Program.
    +
    +If any portion of this section is held invalid or unenforceable under
    +any particular circumstance, the balance of the section is intended to
    +apply and the section as a whole is intended to apply in other
    +circumstances.
    +
    +It is not the purpose of this section to induce you to infringe any
    +patents or other property right claims or to contest validity of any
    +such claims; this section has the sole purpose of protecting the
    +integrity of the free software distribution system, which is
    +implemented by public license practices.  Many people have made
    +generous contributions to the wide range of software distributed
    +through that system in reliance on consistent application of that
    +system; it is up to the author/donor to decide if he or she is willing
    +to distribute software through any other system and a licensee cannot
    +impose that choice.
    +
    +This section is intended to make thoroughly clear what is believed to
    +be a consequence of the rest of this License.
    +
    +  8. If the distribution and/or use of the Program is restricted in
    +certain countries either by patents or by copyrighted interfaces, the
    +original copyright holder who places the Program under this License
    +may add an explicit geographical distribution limitation excluding
    +those countries, so that distribution is permitted only in or among
    +countries not thus excluded.  In such case, this License incorporates
    +the limitation as if written in the body of this License.
    +
    +  9. The Free Software Foundation may publish revised and/or new versions
    +of the General Public License from time to time.  Such new versions will
    +be similar in spirit to the present version, but may differ in detail to
    +address new problems or concerns.
    +
    +Each version is given a distinguishing version number.  If the Program
    +specifies a version number of this License which applies to it and "any
    +later version", you have the option of following the terms and conditions
    +either of that version or of any later version published by the Free
    +Software Foundation.  If the Program does not specify a version number of
    +this License, you may choose any version ever published by the Free Software
    +Foundation.
    +
    +  10. If you wish to incorporate parts of the Program into other free
    +programs whose distribution conditions are different, write to the author
    +to ask for permission.  For software which is copyrighted by the Free
    +Software Foundation, write to the Free Software Foundation; we sometimes
    +make exceptions for this.  Our decision will be guided by the two goals
    +of preserving the free status of all derivatives of our free software and
    +of promoting the sharing and reuse of software generally.
    +
    +			    NO WARRANTY
    +
    +  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
    +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
    +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
    +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
    +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
    +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
    +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
    +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
    +REPAIR OR CORRECTION.
    +
    +  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
    +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
    +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
    +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
    +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
    +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
    +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
    +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
    +POSSIBILITY OF SUCH DAMAGES.
    +
    +		     END OF TERMS AND CONDITIONS
    +
    +	    How to Apply These Terms to Your New Programs
    +
    +  If you develop a new program, and you want it to be of the greatest
    +possible use to the public, the best way to achieve this is to make it
    +free software which everyone can redistribute and change under these terms.
    +
    +  To do so, attach the following notices to the program.  It is safest
    +to attach them to the start of each source file to most effectively
    +convey the exclusion of warranty; and each file should have at least
    +the "copyright" line and a pointer to where the full notice is found.
    +
    +    
    +    Copyright (C)   
    +
    +    This program is free software; you can redistribute it and/or modify
    +    it under the terms of the GNU General Public License as published by
    +    the Free Software Foundation; either version 2 of the License, or
    +    (at your option) any later version.
    +
    +    This program is distributed in the hope that it will be useful,
    +    but WITHOUT ANY WARRANTY; without even the implied warranty of
    +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +    GNU General Public License for more details.
    +
    +    You should have received a copy of the GNU General Public License along
    +    with this program; if not, write to the Free Software Foundation, Inc.,
    +    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
    +
    +Also add information on how to contact you by electronic and paper mail.
    +
    +If the program is interactive, make it output a short notice like this
    +when it starts in an interactive mode:
    +
    +    Gnomovision version 69, Copyright (C) year name of author
    +    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    +    This is free software, and you are welcome to redistribute it
    +    under certain conditions; type `show c' for details.
    +
    +The hypothetical commands `show w' and `show c' should show the appropriate
    +parts of the General Public License.  Of course, the commands you use may
    +be called something other than `show w' and `show c'; they could even be
    +mouse-clicks or menu items--whatever suits your program.
    +
    +You should also get your employer (if you work as a programmer) or your
    +school, if any, to sign a "copyright disclaimer" for the program, if
    +necessary.  Here is a sample; alter the names:
    +
    +  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
    +  `Gnomovision' (which makes passes at compilers) written by James Hacker.
    +
    +  , 1 April 1989
    +  Ty Coon, President of Vice
    +
    +This General Public License does not permit incorporating your program into
    +proprietary programs.  If your program is a subroutine library, you may
    +consider it more useful to permit linking proprietary applications with the
    +library.  If this is what you want to do, use the GNU Lesser General
    +Public License instead of this License.
    +
    +
    +
    +--------------- SECTION 3: GNU General Public License, V3.0 -----------
    +
    +                    GNU GENERAL PUBLIC LICENSE
    +
    +                       Version 3, 29 June 2007
    +
    +
    +
    + Copyright (C) 2007 Free Software Foundation, Inc. 
    +
    + Everyone is permitted to copy and distribute verbatim copies
    +
    + of this license document, but changing it is not allowed.
    +
    +
    +
    +                            Preamble
    +
    +
    +
    +  The GNU General Public License is a free, copyleft license for
    +
    +software and other kinds of works.
    +
    +
    +
    +  The licenses for most software and other practical works are designed
    +
    +to take away your freedom to share and change the works.  By contrast,
    +
    +the GNU General Public License is intended to guarantee your freedom to
    +
    +share and change all versions of a program--to make sure it remains free
    +
    +software for all its users.  We, the Free Software Foundation, use the
    +
    +GNU General Public License for most of our software; it applies also to
    +
    +any other work released this way by its authors.  You can apply it to
    +
    +your programs, too.
    +
    +
    +
    +  When we speak of free software, we are referring to freedom, not
    +
    +price.  Our General Public Licenses are designed to make sure that you
    +
    +have the freedom to distribute copies of free software (and charge for
    +
    +them if you wish), that you receive source code or can get it if you
    +
    +want it, that you can change the software or use pieces of it in new
    +
    +free programs, and that you know you can do these things.
    +
    +
    +
    +  To protect your rights, we need to prevent others from denying you
    +
    +these rights or asking you to surrender the rights.  Therefore, you have
    +
    +certain responsibilities if you distribute copies of the software, or if
    +
    +you modify it: responsibilities to respect the freedom of others.
    +
    +
    +
    +  For example, if you distribute copies of such a program, whether
    +
    +gratis or for a fee, you must pass on to the recipients the same
    +
    +freedoms that you received.  You must make sure that they, too, receive
    +
    +or can get the source code.  And you must show them these terms so they
    +
    +know their rights.
    +
    +
    +
    +  Developers that use the GNU GPL protect your rights with two steps:
    +
    +(1) assert copyright on the software, and (2) offer you this License
    +
    +giving you legal permission to copy, distribute and/or modify it.
    +
    +
    +
    +  For the developers' and authors' protection, the GPL clearly explains
    +
    +that there is no warranty for this free software.  For both users' and
    +
    +authors' sake, the GPL requires that modified versions be marked as
    +
    +changed, so that their problems will not be attributed erroneously to
    +
    +authors of previous versions.
    +
    +
    +
    +  Some devices are designed to deny users access to install or run
    +
    +modified versions of the software inside them, although the manufacturer
    +
    +can do so.  This is fundamentally incompatible with the aim of
    +
    +protecting users' freedom to change the software.  The systematic
    +
    +pattern of such abuse occurs in the area of products for individuals to
    +
    +use, which is precisely where it is most unacceptable.  Therefore, we
    +
    +have designed this version of the GPL to prohibit the practice for those
    +
    +products.  If such problems arise substantially in other domains, we
    +
    +stand ready to extend this provision to those domains in future versions
    +
    +of the GPL, as needed to protect the freedom of users.
    +
    +
    +
    +  Finally, every program is threatened constantly by software patents.
    +
    +States should not allow patents to restrict development and use of
    +
    +software on general-purpose computers, but in those that do, we wish to
    +
    +avoid the special danger that patents applied to a free program could
    +
    +make it effectively proprietary.  To prevent this, the GPL assures that
    +
    +patents cannot be used to render the program non-free.
    +
    +
    +
    +  The precise terms and conditions for copying, distribution and
    +
    +modification follow.
    +
    +
    +
    +                       TERMS AND CONDITIONS
    +
    +
    +
    +  0. Definitions.
    +
    +
    +
    +  "This License" refers to version 3 of the GNU General Public License.
    +
    +
    +
    +  "Copyright" also means copyright-like laws that apply to other kinds of
    +
    +works, such as semiconductor masks.
    +
    +
    +
    +  "The Program" refers to any copyrightable work licensed under this
    +
    +License.  Each licensee is addressed as "you".  "Licensees" and
    +
    +"recipients" may be individuals or organizations.
    +
    +
    +
    +  To "modify" a work means to copy from or adapt all or part of the work
    +
    +in a fashion requiring copyright permission, other than the making of an
    +
    +exact copy.  The resulting work is called a "modified version" of the
    +
    +earlier work or a work "based on" the earlier work.
    +
    +
    +
    +  A "covered work" means either the unmodified Program or a work based
    +
    +on the Program.
    +
    +
    +
    +  To "propagate" a work means to do anything with it that, without
    +
    +permission, would make you directly or secondarily liable for
    +
    +infringement under applicable copyright law, except executing it on a
    +
    +computer or modifying a private copy.  Propagation includes copying,
    +
    +distribution (with or without modification), making available to the
    +
    +public, and in some countries other activities as well.
    +
    +
    +
    +  To "convey" a work means any kind of propagation that enables other
    +
    +parties to make or receive copies.  Mere interaction with a user through
    +
    +a computer network, with no transfer of a copy, is not conveying.
    +
    +
    +
    +  An interactive user interface displays "Appropriate Legal Notices"
    +
    +to the extent that it includes a convenient and prominently visible
    +
    +feature that (1) displays an appropriate copyright notice, and (2)
    +
    +tells the user that there is no warranty for the work (except to the
    +
    +extent that warranties are provided), that licensees may convey the
    +
    +work under this License, and how to view a copy of this License.  If
    +
    +the interface presents a list of user commands or options, such as a
    +
    +menu, a prominent item in the list meets this criterion.
    +
    +
    +
    +  1. Source Code.
    +
    +
    +
    +  The "source code" for a work means the preferred form of the work
    +
    +for making modifications to it.  "Object code" means any non-source
    +
    +form of a work.
    +
    +
    +
    +  A "Standard Interface" means an interface that either is an official
    +
    +standard defined by a recognized standards body, or, in the case of
    +
    +interfaces specified for a particular programming language, one that
    +
    +is widely used among developers working in that language.
    +
    +
    +
    +  The "System Libraries" of an executable work include anything, other
    +
    +than the work as a whole, that (a) is included in the normal form of
    +
    +packaging a Major Component, but which is not part of that Major
    +
    +Component, and (b) serves only to enable use of the work with that
    +
    +Major Component, or to implement a Standard Interface for which an
    +
    +implementation is available to the public in source code form.  A
    +
    +"Major Component", in this context, means a major essential component
    +
    +(kernel, window system, and so on) of the specific operating system
    +
    +(if any) on which the executable work runs, or a compiler used to
    +
    +produce the work, or an object code interpreter used to run it.
    +
    +
    +
    +  The "Corresponding Source" for a work in object code form means all
    +
    +the source code needed to generate, install, and (for an executable
    +
    +work) run the object code and to modify the work, including scripts to
    +
    +control those activities.  However, it does not include the work's
    +
    +System Libraries, or general-purpose tools or generally available free
    +
    +programs which are used unmodified in performing those activities but
    +
    +which are not part of the work.  For example, Corresponding Source
    +
    +includes interface definition files associated with source files for
    +
    +the work, and the source code for shared libraries and dynamically
    +
    +linked subprograms that the work is specifically designed to require,
    +
    +such as by intimate data communication or control flow between those
    +
    +subprograms and other parts of the work.
    +
    +
    +
    +  The Corresponding Source need not include anything that users
    +
    +can regenerate automatically from other parts of the Corresponding
    +
    +Source.
    +
    +
    +
    +  The Corresponding Source for a work in source code form is that
    +
    +same work.
    +
    +
    +
    +  2. Basic Permissions.
    +
    +
    +
    +  All rights granted under this License are granted for the term of
    +
    +copyright on the Program, and are irrevocable provided the stated
    +
    +conditions are met.  This License explicitly affirms your unlimited
    +
    +permission to run the unmodified Program.  The output from running a
    +
    +covered work is covered by this License only if the output, given its
    +
    +content, constitutes a covered work.  This License acknowledges your
    +
    +rights of fair use or other equivalent, as provided by copyright law.
    +
    +
    +
    +  You may make, run and propagate covered works that you do not
    +
    +convey, without conditions so long as your license otherwise remains
    +
    +in force.  You may convey covered works to others for the sole purpose
    +
    +of having them make modifications exclusively for you, or provide you
    +
    +with facilities for running those works, provided that you comply with
    +
    +the terms of this License in conveying all material for which you do
    +
    +not control copyright.  Those thus making or running the covered works
    +
    +for you must do so exclusively on your behalf, under your direction
    +
    +and control, on terms that prohibit them from making any copies of
    +
    +your copyrighted material outside their relationship with you.
    +
    +
    +
    +  Conveying under any other circumstances is permitted solely under
    +
    +the conditions stated below.  Sublicensing is not allowed; section 10
    +
    +makes it unnecessary.
    +
    +
    +
    +  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
    +
    +
    +
    +  No covered work shall be deemed part of an effective technological
    +
    +measure under any applicable law fulfilling obligations under article
    +
    +11 of the WIPO copyright treaty adopted on 20 December 1996, or
    +
    +similar laws prohibiting or restricting circumvention of such
    +
    +measures.
    +
    +
    +
    +  When you convey a covered work, you waive any legal power to forbid
    +
    +circumvention of technological measures to the extent such circumvention
    +
    +is effected by exercising rights under this License with respect to
    +
    +the covered work, and you disclaim any intention to limit operation or
    +
    +modification of the work as a means of enforcing, against the work's
    +
    +users, your or third parties' legal rights to forbid circumvention of
    +
    +technological measures.
    +
    +
    +
    +  4. Conveying Verbatim Copies.
    +
    +
    +
    +  You may convey verbatim copies of the Program's source code as you
    +
    +receive it, in any medium, provided that you conspicuously and
    +
    +appropriately publish on each copy an appropriate copyright notice;
    +
    +keep intact all notices stating that this License and any
    +
    +non-permissive terms added in accord with section 7 apply to the code;
    +
    +keep intact all notices of the absence of any warranty; and give all
    +
    +recipients a copy of this License along with the Program.
    +
    +
    +
    +  You may charge any price or no price for each copy that you convey,
    +
    +and you may offer support or warranty protection for a fee.
    +
    +
    +
    +  5. Conveying Modified Source Versions.
    +
    +
    +
    +  You may convey a work based on the Program, or the modifications to
    +
    +produce it from the Program, in the form of source code under the
    +
    +terms of section 4, provided that you also meet all of these conditions:
    +
    +
    +
    +    a) The work must carry prominent notices stating that you modified
    +
    +    it, and giving a relevant date.
    +
    +
    +
    +    b) The work must carry prominent notices stating that it is
    +
    +    released under this License and any conditions added under section
    +
    +    7.  This requirement modifies the requirement in section 4 to
    +
    +    "keep intact all notices".
    +
    +
    +
    +    c) You must license the entire work, as a whole, under this
    +
    +    License to anyone who comes into possession of a copy.  This
    +
    +    License will therefore apply, along with any applicable section 7
    +
    +    additional terms, to the whole of the work, and all its parts,
    +
    +    regardless of how they are packaged.  This License gives no
    +
    +    permission to license the work in any other way, but it does not
    +
    +    invalidate such permission if you have separately received it.
    +
    +
    +
    +    d) If the work has interactive user interfaces, each must display
    +
    +    Appropriate Legal Notices; however, if the Program has interactive
    +
    +    interfaces that do not display Appropriate Legal Notices, your
    +
    +    work need not make them do so.
    +
    +
    +
    +  A compilation of a covered work with other separate and independent
    +
    +works, which are not by their nature extensions of the covered work,
    +
    +and which are not combined with it such as to form a larger program,
    +
    +in or on a volume of a storage or distribution medium, is called an
    +
    +"aggregate" if the compilation and its resulting copyright are not
    +
    +used to limit the access or legal rights of the compilation's users
    +
    +beyond what the individual works permit.  Inclusion of a covered work
    +
    +in an aggregate does not cause this License to apply to the other
    +
    +parts of the aggregate.
    +
    +
    +
    +  6. Conveying Non-Source Forms.
    +
    +
    +
    +  You may convey a covered work in object code form under the terms
    +
    +of sections 4 and 5, provided that you also convey the
    +
    +machine-readable Corresponding Source under the terms of this License,
    +
    +in one of these ways:
    +
    +
    +
    +    a) Convey the object code in, or embodied in, a physical product
    +
    +    (including a physical distribution medium), accompanied by the
    +
    +    Corresponding Source fixed on a durable physical medium
    +
    +    customarily used for software interchange.
    +
    +
    +
    +    b) Convey the object code in, or embodied in, a physical product
    +
    +    (including a physical distribution medium), accompanied by a
    +
    +    written offer, valid for at least three years and valid for as
    +
    +    long as you offer spare parts or customer support for that product
    +
    +    model, to give anyone who possesses the object code either (1) a
    +
    +    copy of the Corresponding Source for all the software in the
    +
    +    product that is covered by this License, on a durable physical
    +
    +    medium customarily used for software interchange, for a price no
    +
    +    more than your reasonable cost of physically performing this
    +
    +    conveying of source, or (2) access to copy the
    +
    +    Corresponding Source from a network server at no charge.
    +
    +
    +
    +    c) Convey individual copies of the object code with a copy of the
    +
    +    written offer to provide the Corresponding Source.  This
    +
    +    alternative is allowed only occasionally and noncommercially, and
    +
    +    only if you received the object code with such an offer, in accord
    +
    +    with subsection 6b.
    +
    +
    +
    +    d) Convey the object code by offering access from a designated
    +
    +    place (gratis or for a charge), and offer equivalent access to the
    +
    +    Corresponding Source in the same way through the same place at no
    +
    +    further charge.  You need not require recipients to copy the
    +
    +    Corresponding Source along with the object code.  If the place to
    +
    +    copy the object code is a network server, the Corresponding Source
    +
    +    may be on a different server (operated by you or a third party)
    +
    +    that supports equivalent copying facilities, provided you maintain
    +
    +    clear directions next to the object code saying where to find the
    +
    +    Corresponding Source.  Regardless of what server hosts the
    +
    +    Corresponding Source, you remain obligated to ensure that it is
    +
    +    available for as long as needed to satisfy these requirements.
    +
    +
    +
    +    e) Convey the object code using peer-to-peer transmission, provided
    +
    +    you inform other peers where the object code and Corresponding
    +
    +    Source of the work are being offered to the general public at no
    +
    +    charge under subsection 6d.
    +
    +
    +
    +  A separable portion of the object code, whose source code is excluded
    +
    +from the Corresponding Source as a System Library, need not be
    +
    +included in conveying the object code work.
    +
    +
    +
    +  A "User Product" is either (1) a "consumer product", which means any
    +
    +tangible personal property which is normally used for personal, family,
    +
    +or household purposes, or (2) anything designed or sold for incorporation
    +
    +into a dwelling.  In determining whether a product is a consumer product,
    +
    +doubtful cases shall be resolved in favor of coverage.  For a particular
    +
    +product received by a particular user, "normally used" refers to a
    +
    +typical or common use of that class of product, regardless of the status
    +
    +of the particular user or of the way in which the particular user
    +
    +actually uses, or expects or is expected to use, the product.  A product
    +
    +is a consumer product regardless of whether the product has substantial
    +
    +commercial, industrial or non-consumer uses, unless such uses represent
    +
    +the only significant mode of use of the product.
    +
    +
    +
    +  "Installation Information" for a User Product means any methods,
    +
    +procedures, authorization keys, or other information required to install
    +
    +and execute modified versions of a covered work in that User Product from
    +
    +a modified version of its Corresponding Source.  The information must
    +
    +suffice to ensure that the continued functioning of the modified object
    +
    +code is in no case prevented or interfered with solely because
    +
    +modification has been made.
    +
    +
    +
    +  If you convey an object code work under this section in, or with, or
    +
    +specifically for use in, a User Product, and the conveying occurs as
    +
    +part of a transaction in which the right of possession and use of the
    +
    +User Product is transferred to the recipient in perpetuity or for a
    +
    +fixed term (regardless of how the transaction is characterized), the
    +
    +Corresponding Source conveyed under this section must be accompanied
    +
    +by the Installation Information.  But this requirement does not apply
    +
    +if neither you nor any third party retains the ability to install
    +
    +modified object code on the User Product (for example, the work has
    +
    +been installed in ROM).
    +
    +
    +
    +  The requirement to provide Installation Information does not include a
    +
    +requirement to continue to provide support service, warranty, or updates
    +
    +for a work that has been modified or installed by the recipient, or for
    +
    +the User Product in which it has been modified or installed.  Access to a
    +
    +network may be denied when the modification itself materially and
    +
    +adversely affects the operation of the network or violates the rules and
    +
    +protocols for communication across the network.
    +
    +
    +
    +  Corresponding Source conveyed, and Installation Information provided,
    +
    +in accord with this section must be in a format that is publicly
    +
    +documented (and with an implementation available to the public in
    +
    +source code form), and must require no special password or key for
    +
    +unpacking, reading or copying.
    +
    +
    +
    +  7. Additional Terms.
    +
    +
    +
    +  "Additional permissions" are terms that supplement the terms of this
    +
    +License by making exceptions from one or more of its conditions.
    +
    +Additional permissions that are applicable to the entire Program shall
    +
    +be treated as though they were included in this License, to the extent
    +
    +that they are valid under applicable law.  If additional permissions
    +
    +apply only to part of the Program, that part may be used separately
    +
    +under those permissions, but the entire Program remains governed by
    +
    +this License without regard to the additional permissions.
    +
    +
    +
    +  When you convey a copy of a covered work, you may at your option
    +
    +remove any additional permissions from that copy, or from any part of
    +
    +it.  (Additional permissions may be written to require their own
    +
    +removal in certain cases when you modify the work.)  You may place
    +
    +additional permissions on material, added by you to a covered work,
    +
    +for which you have or can give appropriate copyright permission.
    +
    +
    +
    +  Notwithstanding any other provision of this License, for material you
    +
    +add to a covered work, you may (if authorized by the copyright holders of
    +
    +that material) supplement the terms of this License with terms:
    +
    +
    +
    +    a) Disclaiming warranty or limiting liability differently from the
    +
    +    terms of sections 15 and 16 of this License; or
    +
    +
    +
    +    b) Requiring preservation of specified reasonable legal notices or
    +
    +    author attributions in that material or in the Appropriate Legal
    +
    +    Notices displayed by works containing it; or
    +
    +
    +
    +    c) Prohibiting misrepresentation of the origin of that material, or
    +
    +    requiring that modified versions of such material be marked in
    +
    +    reasonable ways as different from the original version; or
    +
    +
    +
    +    d) Limiting the use for publicity purposes of names of licensors or
    +
    +    authors of the material; or
    +
    +
    +
    +    e) Declining to grant rights under trademark law for use of some
    +
    +    trade names, trademarks, or service marks; or
    +
    +
    +
    +    f) Requiring indemnification of licensors and authors of that
    +
    +    material by anyone who conveys the material (or modified versions of
    +
    +    it) with contractual assumptions of liability to the recipient, for
    +
    +    any liability that these contractual assumptions directly impose on
    +
    +    those licensors and authors.
    +
    +
    +
    +  All other non-permissive additional terms are considered "further
    +
    +restrictions" within the meaning of section 10.  If the Program as you
    +
    +received it, or any part of it, contains a notice stating that it is
    +
    +governed by this License along with a term that is a further
    +
    +restriction, you may remove that term.  If a license document contains
    +
    +a further restriction but permits relicensing or conveying under this
    +
    +License, you may add to a covered work material governed by the terms
    +
    +of that license document, provided that the further restriction does
    +
    +not survive such relicensing or conveying.
    +
    +
    +
    +  If you add terms to a covered work in accord with this section, you
    +
    +must place, in the relevant source files, a statement of the
    +
    +additional terms that apply to those files, or a notice indicating
    +
    +where to find the applicable terms.
    +
    +
    +
    +  Additional terms, permissive or non-permissive, may be stated in the
    +
    +form of a separately written license, or stated as exceptions;
    +
    +the above requirements apply either way.
    +
    +
    +
    +  8. Termination.
    +
    +
    +
    +  You may not propagate or modify a covered work except as expressly
    +
    +provided under this License.  Any attempt otherwise to propagate or
    +
    +modify it is void, and will automatically terminate your rights under
    +
    +this License (including any patent licenses granted under the third
    +
    +paragraph of section 11).
    +
    +
    +
    +  However, if you cease all violation of this License, then your
    +
    +license from a particular copyright holder is reinstated (a)
    +
    +provisionally, unless and until the copyright holder explicitly and
    +
    +finally terminates your license, and (b) permanently, if the copyright
    +
    +holder fails to notify you of the violation by some reasonable means
    +
    +prior to 60 days after the cessation.
    +
    +
    +
    +  Moreover, your license from a particular copyright holder is
    +
    +reinstated permanently if the copyright holder notifies you of the
    +
    +violation by some reasonable means, this is the first time you have
    +
    +received notice of violation of this License (for any work) from that
    +
    +copyright holder, and you cure the violation prior to 30 days after
    +
    +your receipt of the notice.
    +
    +
    +
    +  Termination of your rights under this section does not terminate the
    +
    +licenses of parties who have received copies or rights from you under
    +
    +this License.  If your rights have been terminated and not permanently
    +
    +reinstated, you do not qualify to receive new licenses for the same
    +
    +material under section 10.
    +
    +
    +
    +  9. Acceptance Not Required for Having Copies.
    +
    +
    +
    +  You are not required to accept this License in order to receive or
    +
    +run a copy of the Program.  Ancillary propagation of a covered work
    +
    +occurring solely as a consequence of using peer-to-peer transmission
    +
    +to receive a copy likewise does not require acceptance.  However,
    +
    +nothing other than this License grants you permission to propagate or
    +
    +modify any covered work.  These actions infringe copyright if you do
    +
    +not accept this License.  Therefore, by modifying or propagating a
    +
    +covered work, you indicate your acceptance of this License to do so.
    +
    +
    +
    +  10. Automatic Licensing of Downstream Recipients.
    +
    +
    +
    +  Each time you convey a covered work, the recipient automatically
    +
    +receives a license from the original licensors, to run, modify and
    +
    +propagate that work, subject to this License.  You are not responsible
    +
    +for enforcing compliance by third parties with this License.
    +
    +
    +
    +  An "entity transaction" is a transaction transferring control of an
    +
    +organization, or substantially all assets of one, or subdividing an
    +
    +organization, or merging organizations.  If propagation of a covered
    +
    +work results from an entity transaction, each party to that
    +
    +transaction who receives a copy of the work also receives whatever
    +
    +licenses to the work the party's predecessor in interest had or could
    +
    +give under the previous paragraph, plus a right to possession of the
    +
    +Corresponding Source of the work from the predecessor in interest, if
    +
    +the predecessor has it or can get it with reasonable efforts.
    +
    +
    +
    +  You may not impose any further restrictions on the exercise of the
    +
    +rights granted or affirmed under this License.  For example, you may
    +
    +not impose a license fee, royalty, or other charge for exercise of
    +
    +rights granted under this License, and you may not initiate litigation
    +
    +(including a cross-claim or counterclaim in a lawsuit) alleging that
    +
    +any patent claim is infringed by making, using, selling, offering for
    +
    +sale, or importing the Program or any portion of it.
    +
    +
    +
    +  11. Patents.
    +
    +
    +
    +  A "contributor" is a copyright holder who authorizes use under this
    +
    +License of the Program or a work on which the Program is based.  The
    +
    +work thus licensed is called the contributor's "contributor version".
    +
    +
    +
    +  A contributor's "essential patent claims" are all patent claims
    +
    +owned or controlled by the contributor, whether already acquired or
    +
    +hereafter acquired, that would be infringed by some manner, permitted
    +
    +by this License, of making, using, or selling its contributor version,
    +
    +but do not include claims that would be infringed only as a
    +
    +consequence of further modification of the contributor version.  For
    +
    +purposes of this definition, "control" includes the right to grant
    +
    +patent sublicenses in a manner consistent with the requirements of
    +
    +this License.
    +
    +
    +
    +  Each contributor grants you a non-exclusive, worldwide, royalty-free
    +
    +patent license under the contributor's essential patent claims, to
    +
    +make, use, sell, offer for sale, import and otherwise run, modify and
    +
    +propagate the contents of its contributor version.
    +
    +
    +
    +  In the following three paragraphs, a "patent license" is any express
    +
    +agreement or commitment, however denominated, not to enforce a patent
    +
    +(such as an express permission to practice a patent or covenant not to
    +
    +sue for patent infringement).  To "grant" such a patent license to a
    +
    +party means to make such an agreement or commitment not to enforce a
    +
    +patent against the party.
    +
    +
    +
    +  If you convey a covered work, knowingly relying on a patent license,
    +
    +and the Corresponding Source of the work is not available for anyone
    +
    +to copy, free of charge and under the terms of this License, through a
    +
    +publicly available network server or other readily accessible means,
    +
    +then you must either (1) cause the Corresponding Source to be so
    +
    +available, or (2) arrange to deprive yourself of the benefit of the
    +
    +patent license for this particular work, or (3) arrange, in a manner
    +
    +consistent with the requirements of this License, to extend the patent
    +
    +license to downstream recipients.  "Knowingly relying" means you have
    +
    +actual knowledge that, but for the patent license, your conveying the
    +
    +covered work in a country, or your recipient's use of the covered work
    +
    +in a country, would infringe one or more identifiable patents in that
    +
    +country that you have reason to believe are valid.
    +
    +
    +
    +  If, pursuant to or in connection with a single transaction or
    +
    +arrangement, you convey, or propagate by procuring conveyance of, a
    +
    +covered work, and grant a patent license to some of the parties
    +
    +receiving the covered work authorizing them to use, propagate, modify
    +
    +or convey a specific copy of the covered work, then the patent license
    +
    +you grant is automatically extended to all recipients of the covered
    +
    +work and works based on it.
    +
    +
    +
    +  A patent license is "discriminatory" if it does not include within
    +
    +the scope of its coverage, prohibits the exercise of, or is
    +
    +conditioned on the non-exercise of one or more of the rights that are
    +
    +specifically granted under this License.  You may not convey a covered
    +
    +work if you are a party to an arrangement with a third party that is
    +
    +in the business of distributing software, under which you make payment
    +
    +to the third party based on the extent of your activity of conveying
    +
    +the work, and under which the third party grants, to any of the
    +
    +parties who would receive the covered work from you, a discriminatory
    +
    +patent license (a) in connection with copies of the covered work
    +
    +conveyed by you (or copies made from those copies), or (b) primarily
    +
    +for and in connection with specific products or compilations that
    +
    +contain the covered work, unless you entered into that arrangement,
    +
    +or that patent license was granted, prior to 28 March 2007.
    +
    +
    +
    +  Nothing in this License shall be construed as excluding or limiting
    +
    +any implied license or other defenses to infringement that may
    +
    +otherwise be available to you under applicable patent law.
    +
    +
    +
    +  12. No Surrender of Others' Freedom.
    +
    +
    +
    +  If conditions are imposed on you (whether by court order, agreement or
    +
    +otherwise) that contradict the conditions of this License, they do not
    +
    +excuse you from the conditions of this License.  If you cannot convey a
    +
    +covered work so as to satisfy simultaneously your obligations under this
    +
    +License and any other pertinent obligations, then as a consequence you may
    +
    +not convey it at all.  For example, if you agree to terms that obligate you
    +
    +to collect a royalty for further conveying from those to whom you convey
    +
    +the Program, the only way you could satisfy both those terms and this
    +
    +License would be to refrain entirely from conveying the Program.
    +
    +
    +
    +  13. Use with the GNU Affero General Public License.
    +
    +
    +
    +  Notwithstanding any other provision of this License, you have
    +
    +permission to link or combine any covered work with a work licensed
    +
    +under version 3 of the GNU Affero General Public License into a single
    +
    +combined work, and to convey the resulting work.  The terms of this
    +
    +License will continue to apply to the part which is the covered work,
    +
    +but the special requirements of the GNU Affero General Public License,
    +
    +section 13, concerning interaction through a network will apply to the
    +
    +combination as such.
    +
    +
    +
    +  14. Revised Versions of this License.
    +
    +
    +
    +  The Free Software Foundation may publish revised and/or new versions of
    +
    +the GNU General Public License from time to time.  Such new versions will
    +
    +be similar in spirit to the present version, but may differ in detail to
    +
    +address new problems or concerns.
    +
    +
    +
    +  Each version is given a distinguishing version number.  If the
    +
    +Program specifies that a certain numbered version of the GNU General
    +
    +Public License "or any later version" applies to it, you have the
    +
    +option of following the terms and conditions either of that numbered
    +
    +version or of any later version published by the Free Software
    +
    +Foundation.  If the Program does not specify a version number of the
    +
    +GNU General Public License, you may choose any version ever published
    +
    +by the Free Software Foundation.
    +
    +
    +
    +  If the Program specifies that a proxy can decide which future
    +
    +versions of the GNU General Public License can be used, that proxy's
    +
    +public statement of acceptance of a version permanently authorizes you
    +
    +to choose that version for the Program.
    +
    +
    +
    +  Later license versions may give you additional or different
    +
    +permissions.  However, no additional obligations are imposed on any
    +
    +author or copyright holder as a result of your choosing to follow a
    +
    +later version.
    +
    +
    +
    +  15. Disclaimer of Warranty.
    +
    +
    +
    +  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
    +
    +APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
    +
    +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
    +
    +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
    +
    +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    +
    +PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
    +
    +IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
    +
    +ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
    +
    +
    +
    +  16. Limitation of Liability.
    +
    +
    +
    +  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
    +
    +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
    +
    +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
    +
    +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
    +
    +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
    +
    +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
    +
    +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
    +
    +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
    +
    +SUCH DAMAGES.
    +
    +
    +
    +  17. Interpretation of Sections 15 and 16.
    +
    +
    +
    +  If the disclaimer of warranty and limitation of liability provided
    +
    +above cannot be given local legal effect according to their terms,
    +
    +reviewing courts shall apply local law that most closely approximates
    +
    +an absolute waiver of all civil liability in connection with the
    +
    +Program, unless a warranty or assumption of liability accompanies a
    +
    +copy of the Program in return for a fee.
    +
    +
    +
    +                     END OF TERMS AND CONDITIONS
    +
    +
    +
    +            How to Apply These Terms to Your New Programs
    +
    +
    +
    +  If you develop a new program, and you want it to be of the greatest
    +
    +possible use to the public, the best way to achieve this is to make it
    +
    +free software which everyone can redistribute and change under these terms.
    +
    +
    +
    +  To do so, attach the following notices to the program.  It is safest
    +
    +to attach them to the start of each source file to most effectively
    +
    +state the exclusion of warranty; and each file should have at least
    +
    +the "copyright" line and a pointer to where the full notice is found.
    +
    +
    +
    +    
    +
    +    Copyright (C)   
    +
    +
    +
    +    This program is free software: you can redistribute it and/or modify
    +
    +    it under the terms of the GNU General Public License as published by
    +
    +    the Free Software Foundation, either version 3 of the License, or
    +
    +    (at your option) any later version.
    +
    +
    +
    +    This program is distributed in the hope that it will be useful,
    +
    +    but WITHOUT ANY WARRANTY; without even the implied warranty of
    +
    +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +
    +    GNU General Public License for more details.
    +
    +
    +
    +    You should have received a copy of the GNU General Public License
    +
    +    along with this program.  If not, see .
    +
    +
    +
    +Also add information on how to contact you by electronic and paper mail.
    +
    +
    +
    +  If the program does terminal interaction, make it output a short
    +
    +notice like this when it starts in an interactive mode:
    +
    +
    +
    +      Copyright (C)   
    +
    +    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    +
    +    This is free software, and you are welcome to redistribute it
    +
    +    under certain conditions; type `show c' for details.
    +
    +
    +
    +The hypothetical commands `show w' and `show c' should show the appropriate
    +
    +parts of the General Public License.  Of course, your program's commands
    +
    +might be different; for a GUI interface, you would use an "about box".
    +
    +
    +
    +  You should also get your employer (if you work as a programmer) or school,
    +
    +if any, to sign a "copyright disclaimer" for the program, if necessary.
    +
    +For more information on this, and how to apply and follow the GNU GPL, see
    +
    +.
    +
    +
    +
    +  The GNU General Public License does not permit incorporating your program
    +
    +into proprietary programs.  If your program is a subroutine library, you
    +
    +may consider it more useful to permit linking proprietary applications with
    +
    +the library.  If this is what you want to do, use the GNU Lesser General
    +
    +Public License instead of this License.  But first, please read
    +
    +.
    +
    +
    +
    +--------------- SECTION 4: GNU Library General Public License, V2.0 -----------
    +
    +		  GNU LIBRARY GENERAL PUBLIC LICENSE
    +		       Version 2, June 1991
    +
    + Copyright (C) 1991 Free Software Foundation, Inc.
    + 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
    + Everyone is permitted to copy and distribute verbatim copies
    + of this license document, but changing it is not allowed.
    +
    +[This is the first released version of the library GPL.  It is
    + numbered 2 because it goes with version 2 of the ordinary GPL.]
    +
    +			    Preamble
    +
    +  The licenses for most software are designed to take away your
    +freedom to share and change it.  By contrast, the GNU General Public
    +Licenses are intended to guarantee your freedom to share and change
    +free software--to make sure the software is free for all its users.
    +
    +  This license, the Library General Public License, applies to some
    +specially designated Free Software Foundation software, and to any
    +other libraries whose authors decide to use it.  You can use it for
    +your libraries, too.
    +
    +  When we speak of free software, we are referring to freedom, not
    +price.  Our General Public Licenses are designed to make sure that you
    +have the freedom to distribute copies of free software (and charge for
    +this service if you wish), that you receive source code or can get it
    +if you want it, that you can change the software or use pieces of it
    +in new free programs; and that you know you can do these things.
    +
    +  To protect your rights, we need to make restrictions that forbid
    +anyone to deny you these rights or to ask you to surrender the rights.
    +These restrictions translate to certain responsibilities for you if
    +you distribute copies of the library, or if you modify it.
    +
    +  For example, if you distribute copies of the library, whether gratis
    +or for a fee, you must give the recipients all the rights that we gave
    +you.  You must make sure that they, too, receive or can get the source
    +code.  If you link a program with the library, you must provide
    +complete object files to the recipients so that they can relink them
    +with the library, after making changes to the library and recompiling
    +it.  And you must show them these terms so they know their rights.
    +
    +  Our method of protecting your rights has two steps: (1) copyright
    +the library, and (2) offer you this license which gives you legal
    +permission to copy, distribute and/or modify the library.
    +
    +  Also, for each distributor's protection, we want to make certain
    +that everyone understands that there is no warranty for this free
    +library.  If the library is modified by someone else and passed on, we
    +want its recipients to know that what they have is not the original
    +version, so that any problems introduced by others will not reflect on
    +the original authors' reputations.
    +
    +  Finally, any free program is threatened constantly by software
    +patents.  We wish to avoid the danger that companies distributing free
    +software will individually obtain patent licenses, thus in effect
    +transforming the program into proprietary software.  To prevent this,
    +we have made it clear that any patent must be licensed for everyone's
    +free use or not licensed at all.
    +
    +  Most GNU software, including some libraries, is covered by the ordinary
    +GNU General Public License, which was designed for utility programs.  This
    +license, the GNU Library General Public License, applies to certain
    +designated libraries.  This license is quite different from the ordinary
    +one; be sure to read it in full, and don't assume that anything in it is
    +the same as in the ordinary license.
    +
    +  The reason we have a separate public license for some libraries is that
    +they blur the distinction we usually make between modifying or adding to a
    +program and simply using it.  Linking a program with a library, without
    +changing the library, is in some sense simply using the library, and is
    +analogous to running a utility program or application program.  However, in
    +a textual and legal sense, the linked executable is a combined work, a
    +derivative of the original library, and the ordinary General Public License
    +treats it as such.
    +
    +  Because of this blurred distinction, using the ordinary General
    +Public License for libraries did not effectively promote software
    +sharing, because most developers did not use the libraries.  We
    +concluded that weaker conditions might promote sharing better.
    +
    +  However, unrestricted linking of non-free programs would deprive the
    +users of those programs of all benefit from the free status of the
    +libraries themselves.  This Library General Public License is intended to
    +permit developers of non-free programs to use free libraries, while
    +preserving your freedom as a user of such programs to change the free
    +libraries that are incorporated in them.  (We have not seen how to achieve
    +this as regards changes in header files, but we have achieved it as regards
    +changes in the actual functions of the Library.)  The hope is that this
    +will lead to faster development of free libraries.
    +
    +  The precise terms and conditions for copying, distribution and
    +modification follow.  Pay close attention to the difference between a
    +"work based on the library" and a "work that uses the library".  The
    +former contains code derived from the library, while the latter only
    +works together with the library.
    +
    +  Note that it is possible for a library to be covered by the ordinary
    +General Public License rather than by this special one.
    +
    +		  GNU LIBRARY GENERAL PUBLIC LICENSE
    +   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
    +
    +  0. This License Agreement applies to any software library which
    +contains a notice placed by the copyright holder or other authorized
    +party saying it may be distributed under the terms of this Library
    +General Public License (also called "this License").  Each licensee is
    +addressed as "you".
    +
    +  A "library" means a collection of software functions and/or data
    +prepared so as to be conveniently linked with application programs
    +(which use some of those functions and data) to form executables.
    +
    +  The "Library", below, refers to any such software library or work
    +which has been distributed under these terms.  A "work based on the
    +Library" means either the Library or any derivative work under
    +copyright law: that is to say, a work containing the Library or a
    +portion of it, either verbatim or with modifications and/or translated
    +straightforwardly into another language.  (Hereinafter, translation is
    +included without limitation in the term "modification".)
    +
    +  "Source code" for a work means the preferred form of the work for
    +making modifications to it.  For a library, complete source code means
    +all the source code for all modules it contains, plus any associated
    +interface definition files, plus the scripts used to control compilation
    +and installation of the library.
    +
    +  Activities other than copying, distribution and modification are not
    +covered by this License; they are outside its scope.  The act of
    +running a program using the Library is not restricted, and output from
    +such a program is covered only if its contents constitute a work based
    +on the Library (independent of the use of the Library in a tool for
    +writing it).  Whether that is true depends on what the Library does
    +and what the program that uses the Library does.
    +  
    +  1. You may copy and distribute verbatim copies of the Library's
    +complete source code as you receive it, in any medium, provided that
    +you conspicuously and appropriately publish on each copy an
    +appropriate copyright notice and disclaimer of warranty; keep intact
    +all the notices that refer to this License and to the absence of any
    +warranty; and distribute a copy of this License along with the
    +Library.
    +
    +  You may charge a fee for the physical act of transferring a copy,
    +and you may at your option offer warranty protection in exchange for a
    +fee.
    +
    +  2. You may modify your copy or copies of the Library or any portion
    +of it, thus forming a work based on the Library, and copy and
    +distribute such modifications or work under the terms of Section 1
    +above, provided that you also meet all of these conditions:
    +
    +    a) The modified work must itself be a software library.
    +
    +    b) You must cause the files modified to carry prominent notices
    +    stating that you changed the files and the date of any change.
    +
    +    c) You must cause the whole of the work to be licensed at no
    +    charge to all third parties under the terms of this License.
    +
    +    d) If a facility in the modified Library refers to a function or a
    +    table of data to be supplied by an application program that uses
    +    the facility, other than as an argument passed when the facility
    +    is invoked, then you must make a good faith effort to ensure that,
    +    in the event an application does not supply such function or
    +    table, the facility still operates, and performs whatever part of
    +    its purpose remains meaningful.
    +
    +    (For example, a function in a library to compute square roots has
    +    a purpose that is entirely well-defined independent of the
    +    application.  Therefore, Subsection 2d requires that any
    +    application-supplied function or table used by this function must
    +    be optional: if the application does not supply it, the square
    +    root function must still compute square roots.)
    +
    +These requirements apply to the modified work as a whole.  If
    +identifiable sections of that work are not derived from the Library,
    +and can be reasonably considered independent and separate works in
    +themselves, then this License, and its terms, do not apply to those
    +sections when you distribute them as separate works.  But when you
    +distribute the same sections as part of a whole which is a work based
    +on the Library, the distribution of the whole must be on the terms of
    +this License, whose permissions for other licensees extend to the
    +entire whole, and thus to each and every part regardless of who wrote
    +it.
    +
    +Thus, it is not the intent of this section to claim rights or contest
    +your rights to work written entirely by you; rather, the intent is to
    +exercise the right to control the distribution of derivative or
    +collective works based on the Library.
    +
    +In addition, mere aggregation of another work not based on the Library
    +with the Library (or with a work based on the Library) on a volume of
    +a storage or distribution medium does not bring the other work under
    +the scope of this License.
    +
    +  3. You may opt to apply the terms of the ordinary GNU General Public
    +License instead of this License to a given copy of the Library.  To do
    +this, you must alter all the notices that refer to this License, so
    +that they refer to the ordinary GNU General Public License, version 2,
    +instead of to this License.  (If a newer version than version 2 of the
    +ordinary GNU General Public License has appeared, then you can specify
    +that version instead if you wish.)  Do not make any other change in
    +these notices.
    +
    +  Once this change is made in a given copy, it is irreversible for
    +that copy, so the ordinary GNU General Public License applies to all
    +subsequent copies and derivative works made from that copy.
    +
    +  This option is useful when you wish to copy part of the code of
    +the Library into a program that is not a library.
    +
    +  4. You may copy and distribute the Library (or a portion or
    +derivative of it, under Section 2) in object code or executable form
    +under the terms of Sections 1 and 2 above provided that you accompany
    +it with the complete corresponding machine-readable source code, which
    +must be distributed under the terms of Sections 1 and 2 above on a
    +medium customarily used for software interchange.
    +
    +  If distribution of object code is made by offering access to copy
    +from a designated place, then offering equivalent access to copy the
    +source code from the same place satisfies the requirement to
    +distribute the source code, even though third parties are not
    +compelled to copy the source along with the object code.
    +
    +  5. A program that contains no derivative of any portion of the
    +Library, but is designed to work with the Library by being compiled or
    +linked with it, is called a "work that uses the Library".  Such a
    +work, in isolation, is not a derivative work of the Library, and
    +therefore falls outside the scope of this License.
    +
    +  However, linking a "work that uses the Library" with the Library
    +creates an executable that is a derivative of the Library (because it
    +contains portions of the Library), rather than a "work that uses the
    +library".  The executable is therefore covered by this License.
    +Section 6 states terms for distribution of such executables.
    +
    +  When a "work that uses the Library" uses material from a header file
    +that is part of the Library, the object code for the work may be a
    +derivative work of the Library even though the source code is not.
    +Whether this is true is especially significant if the work can be
    +linked without the Library, or if the work is itself a library.  The
    +threshold for this to be true is not precisely defined by law.
    +
    +  If such an object file uses only numerical parameters, data
    +structure layouts and accessors, and small macros and small inline
    +functions (ten lines or less in length), then the use of the object
    +file is unrestricted, regardless of whether it is legally a derivative
    +work.  (Executables containing this object code plus portions of the
    +Library will still fall under Section 6.)
    +
    +  Otherwise, if the work is a derivative of the Library, you may
    +distribute the object code for the work under the terms of Section 6.
    +Any executables containing that work also fall under Section 6,
    +whether or not they are linked directly with the Library itself.
    +
    +  6. As an exception to the Sections above, you may also compile or
    +link a "work that uses the Library" with the Library to produce a
    +work containing portions of the Library, and distribute that work
    +under terms of your choice, provided that the terms permit
    +modification of the work for the customer's own use and reverse
    +engineering for debugging such modifications.
    +
    +  You must give prominent notice with each copy of the work that the
    +Library is used in it and that the Library and its use are covered by
    +this License.  You must supply a copy of this License.  If the work
    +during execution displays copyright notices, you must include the
    +copyright notice for the Library among them, as well as a reference
    +directing the user to the copy of this License.  Also, you must do one
    +of these things:
    +
    +    a) Accompany the work with the complete corresponding
    +    machine-readable source code for the Library including whatever
    +    changes were used in the work (which must be distributed under
    +    Sections 1 and 2 above); and, if the work is an executable linked
    +    with the Library, with the complete machine-readable "work that
    +    uses the Library", as object code and/or source code, so that the
    +    user can modify the Library and then relink to produce a modified
    +    executable containing the modified Library.  (It is understood
    +    that the user who changes the contents of definitions files in the
    +    Library will not necessarily be able to recompile the application
    +    to use the modified definitions.)
    +
    +    b) Accompany the work with a written offer, valid for at
    +    least three years, to give the same user the materials
    +    specified in Subsection 6a, above, for a charge no more
    +    than the cost of performing this distribution.
    +
    +    c) If distribution of the work is made by offering access to copy
    +    from a designated place, offer equivalent access to copy the above
    +    specified materials from the same place.
    +
    +    d) Verify that the user has already received a copy of these
    +    materials or that you have already sent this user a copy.
    +
    +  For an executable, the required form of the "work that uses the
    +Library" must include any data and utility programs needed for
    +reproducing the executable from it.  However, as a special exception,
    +the source code distributed need not include anything that is normally
    +distributed (in either source or binary form) with the major
    +components (compiler, kernel, and so on) of the operating system on
    +which the executable runs, unless that component itself accompanies
    +the executable.
    +
    +  It may happen that this requirement contradicts the license
    +restrictions of other proprietary libraries that do not normally
    +accompany the operating system.  Such a contradiction means you cannot
    +use both them and the Library together in an executable that you
    +distribute.
    +
    +  7. You may place library facilities that are a work based on the
    +Library side-by-side in a single library together with other library
    +facilities not covered by this License, and distribute such a combined
    +library, provided that the separate distribution of the work based on
    +the Library and of the other library facilities is otherwise
    +permitted, and provided that you do these two things:
    +
    +    a) Accompany the combined library with a copy of the same work
    +    based on the Library, uncombined with any other library
    +    facilities.  This must be distributed under the terms of the
    +    Sections above.
    +
    +    b) Give prominent notice with the combined library of the fact
    +    that part of it is a work based on the Library, and explaining
    +    where to find the accompanying uncombined form of the same work.
    +
    +  8. You may not copy, modify, sublicense, link with, or distribute
    +the Library except as expressly provided under this License.  Any
    +attempt otherwise to copy, modify, sublicense, link with, or
    +distribute the Library is void, and will automatically terminate your
    +rights under this License.  However, parties who have received copies,
    +or rights, from you under this License will not have their licenses
    +terminated so long as such parties remain in full compliance.
    +
    +  9. You are not required to accept this License, since you have not
    +signed it.  However, nothing else grants you permission to modify or
    +distribute the Library or its derivative works.  These actions are
    +prohibited by law if you do not accept this License.  Therefore, by
    +modifying or distributing the Library (or any work based on the
    +Library), you indicate your acceptance of this License to do so, and
    +all its terms and conditions for copying, distributing or modifying
    +the Library or works based on it.
    +
    +  10. Each time you redistribute the Library (or any work based on the
    +Library), the recipient automatically receives a license from the
    +original licensor to copy, distribute, link with or modify the Library
    +subject to these terms and conditions.  You may not impose any further
    +restrictions on the recipients' exercise of the rights granted herein.
    +You are not responsible for enforcing compliance by third parties to
    +this License.
    +
    +  11. If, as a consequence of a court judgment or allegation of patent
    +infringement or for any other reason (not limited to patent issues),
    +conditions are imposed on you (whether by court order, agreement or
    +otherwise) that contradict the conditions of this License, they do not
    +excuse you from the conditions of this License.  If you cannot
    +distribute so as to satisfy simultaneously your obligations under this
    +License and any other pertinent obligations, then as a consequence you
    +may not distribute the Library at all.  For example, if a patent
    +license would not permit royalty-free redistribution of the Library by
    +all those who receive copies directly or indirectly through you, then
    +the only way you could satisfy both it and this License would be to
    +refrain entirely from distribution of the Library.
    +
    +If any portion of this section is held invalid or unenforceable under any
    +particular circumstance, the balance of the section is intended to apply,
    +and the section as a whole is intended to apply in other circumstances.
    +
    +It is not the purpose of this section to induce you to infringe any
    +patents or other property right claims or to contest validity of any
    +such claims; this section has the sole purpose of protecting the
    +integrity of the free software distribution system which is
    +implemented by public license practices.  Many people have made
    +generous contributions to the wide range of software distributed
    +through that system in reliance on consistent application of that
    +system; it is up to the author/donor to decide if he or she is willing
    +to distribute software through any other system and a licensee cannot
    +impose that choice.
    +
    +This section is intended to make thoroughly clear what is believed to
    +be a consequence of the rest of this License.
    +
    +  12. If the distribution and/or use of the Library is restricted in
    +certain countries either by patents or by copyrighted interfaces, the
    +original copyright holder who places the Library under this License may add
    +an explicit geographical distribution limitation excluding those countries,
    +so that distribution is permitted only in or among countries not thus
    +excluded.  In such case, this License incorporates the limitation as if
    +written in the body of this License.
    +
    +  13. The Free Software Foundation may publish revised and/or new
    +versions of the Library General Public License from time to time.
    +Such new versions will be similar in spirit to the present version,
    +but may differ in detail to address new problems or concerns.
    +
    +Each version is given a distinguishing version number.  If the Library
    +specifies a version number of this License which applies to it and
    +"any later version", you have the option of following the terms and
    +conditions either of that version or of any later version published by
    +the Free Software Foundation.  If the Library does not specify a
    +license version number, you may choose any version ever published by
    +the Free Software Foundation.
    +
    +  14. If you wish to incorporate parts of the Library into other free
    +programs whose distribution conditions are incompatible with these,
    +write to the author to ask for permission.  For software which is
    +copyrighted by the Free Software Foundation, write to the Free
    +Software Foundation; we sometimes make exceptions for this.  Our
    +decision will be guided by the two goals of preserving the free status
    +of all derivatives of our free software and of promoting the sharing
    +and reuse of software generally.
    +
    +			    NO WARRANTY
    +
    +  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
    +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
    +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
    +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
    +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
    +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    +PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
    +LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
    +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
    +
    +  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
    +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
    +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
    +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
    +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
    +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
    +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
    +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
    +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
    +DAMAGES.
    +
    +		     END OF TERMS AND CONDITIONS
    +
    +           How to Apply These Terms to Your New Libraries
    +
    +  If you develop a new library, and you want it to be of the greatest
    +possible use to the public, we recommend making it free software that
    +everyone can redistribute and change.  You can do so by permitting
    +redistribution under these terms (or, alternatively, under the terms of the
    +ordinary General Public License).
    +
    +  To apply these terms, attach the following notices to the library.  It is
    +safest to attach them to the start of each source file to most effectively
    +convey the exclusion of warranty; and each file should have at least the
    +"copyright" line and a pointer to where the full notice is found.
    +
    +    
    +    Copyright (C)   
    +
    +    This library is free software; you can redistribute it and/or
    +    modify it under the terms of the GNU Library General Public
    +    License as published by the Free Software Foundation; either
    +    version 2 of the License, or (at your option) any later version.
    +
    +    This library is distributed in the hope that it will be useful,
    +    but WITHOUT ANY WARRANTY; without even the implied warranty of
    +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    +    Library General Public License for more details.
    +
    +    You should have received a copy of the GNU Library General Public
    +    License along with this library; if not, write to the Free
    +    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
    +
    +Also add information on how to contact you by electronic and paper mail.
    +
    +You should also get your employer (if you work as a programmer) or your
    +school, if any, to sign a "copyright disclaimer" for the library, if
    +necessary.  Here is a sample; alter the names:
    +
    +  Yoyodyne, Inc., hereby disclaims all copyright interest in the
    +  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
    +
    +  , 1 April 1990
    +  Ty Coon, President of Vice
    +
    +That's all there is to it!
    +
    +
    +
    +--------------- SECTION 5: GNU Lesser General Public License, V2.1 -----------
    +
    +		  GNU LESSER GENERAL PUBLIC LICENSE
    +		       Version 2.1, February 1999
    +
    + Copyright (C) 1991, 1999 Free Software Foundation, Inc.
    + 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
    + Everyone is permitted to copy and distribute verbatim copies
    + of this license document, but changing it is not allowed.
    +
    +[This is the first released version of the Lesser GPL.  It also counts
    + as the successor of the GNU Library Public License, version 2, hence
    + the version number 2.1.]
    +
    +			    Preamble
    +
    +  The licenses for most software are designed to take away your
    +freedom to share and change it.  By contrast, the GNU General Public
    +Licenses are intended to guarantee your freedom to share and change
    +free software--to make sure the software is free for all its users.
    +
    +  This license, the Lesser General Public License, applies to some
    +specially designated software packages--typically libraries--of the
    +Free Software Foundation and other authors who decide to use it.  You
    +can use it too, but we suggest you first think carefully about whether
    +this license or the ordinary General Public License is the better
    +strategy to use in any particular case, based on the explanations below.
    +
    +  When we speak of free software, we are referring to freedom of use,
    +not price.  Our General Public Licenses are designed to make sure that
    +you have the freedom to distribute copies of free software (and charge
    +for this service if you wish); that you receive source code or can get
    +it if you want it; that you can change the software and use pieces of
    +it in new free programs; and that you are informed that you can do
    +these things.
    +
    +  To protect your rights, we need to make restrictions that forbid
    +distributors to deny you these rights or to ask you to surrender these
    +rights.  These restrictions translate to certain responsibilities for
    +you if you distribute copies of the library or if you modify it.
    +
    +  For example, if you distribute copies of the library, whether gratis
    +or for a fee, you must give the recipients all the rights that we gave
    +you.  You must make sure that they, too, receive or can get the source
    +code.  If you link other code with the library, you must provide
    +complete object files to the recipients, so that they can relink them
    +with the library after making changes to the library and recompiling
    +it.  And you must show them these terms so they know their rights.
    +
    +  We protect your rights with a two-step method: (1) we copyright the
    +library, and (2) we offer you this license, which gives you legal
    +permission to copy, distribute and/or modify the library.
    +
    +  To protect each distributor, we want to make it very clear that
    +there is no warranty for the free library.  Also, if the library is
    +modified by someone else and passed on, the recipients should know
    +that what they have is not the original version, so that the original
    +author's reputation will not be affected by problems that might be
    +introduced by others.
    +
    +  Finally, software patents pose a constant threat to the existence of
    +any free program.  We wish to make sure that a company cannot
    +effectively restrict the users of a free program by obtaining a
    +restrictive license from a patent holder.  Therefore, we insist that
    +any patent license obtained for a version of the library must be
    +consistent with the full freedom of use specified in this license.
    +
    +  Most GNU software, including some libraries, is covered by the
    +ordinary GNU General Public License.  This license, the GNU Lesser
    +General Public License, applies to certain designated libraries, and
    +is quite different from the ordinary General Public License.  We use
    +this license for certain libraries in order to permit linking those
    +libraries into non-free programs.
    +
    +  When a program is linked with a library, whether statically or using
    +a shared library, the combination of the two is legally speaking a
    +combined work, a derivative of the original library.  The ordinary
    +General Public License therefore permits such linking only if the
    +entire combination fits its criteria of freedom.  The Lesser General
    +Public License permits more lax criteria for linking other code with
    +the library.
    +
    +  We call this license the "Lesser" General Public License because it
    +does Less to protect the user's freedom than the ordinary General
    +Public License.  It also provides other free software developers Less
    +of an advantage over competing non-free programs.  These disadvantages
    +are the reason we use the ordinary General Public License for many
    +libraries.  However, the Lesser license provides advantages in certain
    +special circumstances.
    +
    +  For example, on rare occasions, there may be a special need to
    +encourage the widest possible use of a certain library, so that it becomes
    +a de-facto standard.  To achieve this, non-free programs must be
    +allowed to use the library.  A more frequent case is that a free
    +library does the same job as widely used non-free libraries.  In this
    +case, there is little to gain by limiting the free library to free
    +software only, so we use the Lesser General Public License.
    +
    +  In other cases, permission to use a particular library in non-free
    +programs enables a greater number of people to use a large body of
    +free software.  For example, permission to use the GNU C Library in
    +non-free programs enables many more people to use the whole GNU
    +operating system, as well as its variant, the GNU/Linux operating
    +system.
    +
    +  Although the Lesser General Public License is Less protective of the
    +users' freedom, it does ensure that the user of a program that is
    +linked with the Library has the freedom and the wherewithal to run
    +that program using a modified version of the Library.
    +
    +  The precise terms and conditions for copying, distribution and
    +modification follow.  Pay close attention to the difference between a
    +"work based on the library" and a "work that uses the library".  The
    +former contains code derived from the library, whereas the latter must
    +be combined with the library in order to run.
    +
    +		  GNU LESSER GENERAL PUBLIC LICENSE
    +   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
    +
    +  0. This License Agreement applies to any software library or other
    +program which contains a notice placed by the copyright holder or
    +other authorized party saying it may be distributed under the terms of
    +this Lesser General Public License (also called "this License").
    +Each licensee is addressed as "you".
    +
    +  A "library" means a collection of software functions and/or data
    +prepared so as to be conveniently linked with application programs
    +(which use some of those functions and data) to form executables.
    +
    +  The "Library", below, refers to any such software library or work
    +which has been distributed under these terms.  A "work based on the
    +Library" means either the Library or any derivative work under
    +copyright law: that is to say, a work containing the Library or a
    +portion of it, either verbatim or with modifications and/or translated
    +straightforwardly into another language.  (Hereinafter, translation is
    +included without limitation in the term "modification".)
    +
    +  "Source code" for a work means the preferred form of the work for
    +making modifications to it.  For a library, complete source code means
    +all the source code for all modules it contains, plus any associated
    +interface definition files, plus the scripts used to control compilation
    +and installation of the library.
    +
    +  Activities other than copying, distribution and modification are not
    +covered by this License; they are outside its scope.  The act of
    +running a program using the Library is not restricted, and output from
    +such a program is covered only if its contents constitute a work based
    +on the Library (independent of the use of the Library in a tool for
    +writing it).  Whether that is true depends on what the Library does
    +and what the program that uses the Library does.
    +
    +  1. You may copy and distribute verbatim copies of the Library's
    +complete source code as you receive it, in any medium, provided that
    +you conspicuously and appropriately publish on each copy an
    +appropriate copyright notice and disclaimer of warranty; keep intact
    +all the notices that refer to this License and to the absence of any
    +warranty; and distribute a copy of this License along with the
    +Library.
    +
    +  You may charge a fee for the physical act of transferring a copy,
    +and you may at your option offer warranty protection in exchange for a
    +fee.
    +
    +  2. You may modify your copy or copies of the Library or any portion
    +of it, thus forming a work based on the Library, and copy and
    +distribute such modifications or work under the terms of Section 1
    +above, provided that you also meet all of these conditions:
    +
    +    a) The modified work must itself be a software library.
    +
    +    b) You must cause the files modified to carry prominent notices
    +    stating that you changed the files and the date of any change.
    +
    +    c) You must cause the whole of the work to be licensed at no
    +    charge to all third parties under the terms of this License.
    +
    +    d) If a facility in the modified Library refers to a function or a
    +    table of data to be supplied by an application program that uses
    +    the facility, other than as an argument passed when the facility
    +    is invoked, then you must make a good faith effort to ensure that,
    +    in the event an application does not supply such function or
    +    table, the facility still operates, and performs whatever part of
    +    its purpose remains meaningful.
    +
    +    (For example, a function in a library to compute square roots has
    +    a purpose that is entirely well-defined independent of the
    +    application.  Therefore, Subsection 2d requires that any
    +    application-supplied function or table used by this function must
    +    be optional: if the application does not supply it, the square
    +    root function must still compute square roots.)
    +
    +These requirements apply to the modified work as a whole.  If
    +identifiable sections of that work are not derived from the Library,
    +and can be reasonably considered independent and separate works in
    +themselves, then this License, and its terms, do not apply to those
    +sections when you distribute them as separate works.  But when you
    +distribute the same sections as part of a whole which is a work based
    +on the Library, the distribution of the whole must be on the terms of
    +this License, whose permissions for other licensees extend to the
    +entire whole, and thus to each and every part regardless of who wrote
    +it.
    +
    +Thus, it is not the intent of this section to claim rights or contest
    +your rights to work written entirely by you; rather, the intent is to
    +exercise the right to control the distribution of derivative or
    +collective works based on the Library.
    +
    +In addition, mere aggregation of another work not based on the Library
    +with the Library (or with a work based on the Library) on a volume of
    +a storage or distribution medium does not bring the other work under
    +the scope of this License.
    +
    +  3. You may opt to apply the terms of the ordinary GNU General Public
    +License instead of this License to a given copy of the Library.  To do
    +this, you must alter all the notices that refer to this License, so
    +that they refer to the ordinary GNU General Public License, version 2,
    +instead of to this License.  (If a newer version than version 2 of the
    +ordinary GNU General Public License has appeared, then you can specify
    +that version instead if you wish.)  Do not make any other change in
    +these notices.
    +
    +  Once this change is made in a given copy, it is irreversible for
    +that copy, so the ordinary GNU General Public License applies to all
    +subsequent copies and derivative works made from that copy.
    +
    +  This option is useful when you wish to copy part of the code of
    +the Library into a program that is not a library.
    +
    +  4. You may copy and distribute the Library (or a portion or
    +derivative of it, under Section 2) in object code or executable form
    +under the terms of Sections 1 and 2 above provided that you accompany
    +it with the complete corresponding machine-readable source code, which
    +must be distributed under the terms of Sections 1 and 2 above on a
    +medium customarily used for software interchange.
    +
    +  If distribution of object code is made by offering access to copy
    +from a designated place, then offering equivalent access to copy the
    +source code from the same place satisfies the requirement to
    +distribute the source code, even though third parties are not
    +compelled to copy the source along with the object code.
    +
    +  5. A program that contains no derivative of any portion of the
    +Library, but is designed to work with the Library by being compiled or
    +linked with it, is called a "work that uses the Library".  Such a
    +work, in isolation, is not a derivative work of the Library, and
    +therefore falls outside the scope of this License.
    +
    +  However, linking a "work that uses the Library" with the Library
    +creates an executable that is a derivative of the Library (because it
    +contains portions of the Library), rather than a "work that uses the
    +library".  The executable is therefore covered by this License.
    +Section 6 states terms for distribution of such executables.
    +
    +  When a "work that uses the Library" uses material from a header file
    +that is part of the Library, the object code for the work may be a
    +derivative work of the Library even though the source code is not.
    +Whether this is true is especially significant if the work can be
    +linked without the Library, or if the work is itself a library.  The
    +threshold for this to be true is not precisely defined by law.
    +
    +  If such an object file uses only numerical parameters, data
    +structure layouts and accessors, and small macros and small inline
    +functions (ten lines or less in length), then the use of the object
    +file is unrestricted, regardless of whether it is legally a derivative
    +work.  (Executables containing this object code plus portions of the
    +Library will still fall under Section 6.)
    +
    +  Otherwise, if the work is a derivative of the Library, you may
    +distribute the object code for the work under the terms of Section 6.
    +Any executables containing that work also fall under Section 6,
    +whether or not they are linked directly with the Library itself.
    +
    +  6. As an exception to the Sections above, you may also combine or
    +link a "work that uses the Library" with the Library to produce a
    +work containing portions of the Library, and distribute that work
    +under terms of your choice, provided that the terms permit
    +modification of the work for the customer's own use and reverse
    +engineering for debugging such modifications.
    +
    +  You must give prominent notice with each copy of the work that the
    +Library is used in it and that the Library and its use are covered by
    +this License.  You must supply a copy of this License.  If the work
    +during execution displays copyright notices, you must include the
    +copyright notice for the Library among them, as well as a reference
    +directing the user to the copy of this License.  Also, you must do one
    +of these things:
    +
    +    a) Accompany the work with the complete corresponding
    +    machine-readable source code for the Library including whatever
    +    changes were used in the work (which must be distributed under
    +    Sections 1 and 2 above); and, if the work is an executable linked
    +    with the Library, with the complete machine-readable "work that
    +    uses the Library", as object code and/or source code, so that the
    +    user can modify the Library and then relink to produce a modified
    +    executable containing the modified Library.  (It is understood
    +    that the user who changes the contents of definitions files in the
    +    Library will not necessarily be able to recompile the application
    +    to use the modified definitions.)
    +
    +    b) Use a suitable shared library mechanism for linking with the
    +    Library.  A suitable mechanism is one that (1) uses at run time a
    +    copy of the library already present on the user's computer system,
    +    rather than copying library functions into the executable, and (2)
    +    will operate properly with a modified version of the library, if
    +    the user installs one, as long as the modified version is
    +    interface-compatible with the version that the work was made with.
    +
    +    c) Accompany the work with a written offer, valid for at
    +    least three years, to give the same user the materials
    +    specified in Subsection 6a, above, for a charge no more
    +    than the cost of performing this distribution.
    +
    +    d) If distribution of the work is made by offering access to copy
    +    from a designated place, offer equivalent access to copy the above
    +    specified materials from the same place.
    +
    +    e) Verify that the user has already received a copy of these
    +    materials or that you have already sent this user a copy.
    +
    +  For an executable, the required form of the "work that uses the
    +Library" must include any data and utility programs needed for
    +reproducing the executable from it.  However, as a special exception,
    +the materials to be distributed need not include anything that is
    +normally distributed (in either source or binary form) with the major
    +components (compiler, kernel, and so on) of the operating system on
    +which the executable runs, unless that component itself accompanies
    +the executable.
    +
    +  It may happen that this requirement contradicts the license
    +restrictions of other proprietary libraries that do not normally
    +accompany the operating system.  Such a contradiction means you cannot
    +use both them and the Library together in an executable that you
    +distribute.
    +
    +  7. You may place library facilities that are a work based on the
    +Library side-by-side in a single library together with other library
    +facilities not covered by this License, and distribute such a combined
    +library, provided that the separate distribution of the work based on
    +the Library and of the other library facilities is otherwise
    +permitted, and provided that you do these two things:
    +
    +    a) Accompany the combined library with a copy of the same work
    +    based on the Library, uncombined with any other library
    +    facilities.  This must be distributed under the terms of the
    +    Sections above.
    +
    +    b) Give prominent notice with the combined library of the fact
    +    that part of it is a work based on the Library, and explaining
    +    where to find the accompanying uncombined form of the same work.
    +
    +  8. You may not copy, modify, sublicense, link with, or distribute
    +the Library except as expressly provided under this License.  Any
    +attempt otherwise to copy, modify, sublicense, link with, or
    +distribute the Library is void, and will automatically terminate your
    +rights under this License.  However, parties who have received copies,
    +or rights, from you under this License will not have their licenses
    +terminated so long as such parties remain in full compliance.
    +
    +  9. You are not required to accept this License, since you have not
    +signed it.  However, nothing else grants you permission to modify or
    +distribute the Library or its derivative works.  These actions are
    +prohibited by law if you do not accept this License.  Therefore, by
    +modifying or distributing the Library (or any work based on the
    +Library), you indicate your acceptance of this License to do so, and
    +all its terms and conditions for copying, distributing or modifying
    +the Library or works based on it.
    +
    +  10. Each time you redistribute the Library (or any work based on the
    +Library), the recipient automatically receives a license from the
    +original licensor to copy, distribute, link with or modify the Library
    +subject to these terms and conditions.  You may not impose any further
    +restrictions on the recipients' exercise of the rights granted herein.
    +You are not responsible for enforcing compliance by third parties with
    +this License.
    +
    +  11. If, as a consequence of a court judgment or allegation of patent
    +infringement or for any other reason (not limited to patent issues),
    +conditions are imposed on you (whether by court order, agreement or
    +otherwise) that contradict the conditions of this License, they do not
    +excuse you from the conditions of this License.  If you cannot
    +distribute so as to satisfy simultaneously your obligations under this
    +License and any other pertinent obligations, then as a consequence you
    +may not distribute the Library at all.  For example, if a patent
    +license would not permit royalty-free redistribution of the Library by
    +all those who receive copies directly or indirectly through you, then
    +the only way you could satisfy both it and this License would be to
    +refrain entirely from distribution of the Library.
    +
    +If any portion of this section is held invalid or unenforceable under any
    +particular circumstance, the balance of the section is intended to apply,
    +and the section as a whole is intended to apply in other circumstances.
    +
    +It is not the purpose of this section to induce you to infringe any
    +patents or other property right claims or to contest validity of any
    +such claims; this section has the sole purpose of protecting the
    +integrity of the free software distribution system which is
    +implemented by public license practices.  Many people have made
    +generous contributions to the wide range of software distributed
    +through that system in reliance on consistent application of that
    +system; it is up to the author/donor to decide if he or she is willing
    +to distribute software through any other system and a licensee cannot
    +impose that choice.
    +
    +This section is intended to make thoroughly clear what is believed to
    +be a consequence of the rest of this License.
    +
    +  12. If the distribution and/or use of the Library is restricted in
    +certain countries either by patents or by copyrighted interfaces, the
    +original copyright holder who places the Library under this License may add
    +an explicit geographical distribution limitation excluding those countries,
    +so that distribution is permitted only in or among countries not thus
    +excluded.  In such case, this License incorporates the limitation as if
    +written in the body of this License.
    +
    +  13. The Free Software Foundation may publish revised and/or new
    +versions of the Lesser General Public License from time to time.
    +Such new versions will be similar in spirit to the present version,
    +but may differ in detail to address new problems or concerns.
    +
    +Each version is given a distinguishing version number.  If the Library
    +specifies a version number of this License which applies to it and
    +"any later version", you have the option of following the terms and
    +conditions either of that version or of any later version published by
    +the Free Software Foundation.  If the Library does not specify a
    +license version number, you may choose any version ever published by
    +the Free Software Foundation.
    +
    +  14. If you wish to incorporate parts of the Library into other free
    +programs whose distribution conditions are incompatible with these,
    +write to the author to ask for permission.  For software which is
    +copyrighted by the Free Software Foundation, write to the Free
    +Software Foundation; we sometimes make exceptions for this.  Our
    +decision will be guided by the two goals of preserving the free status
    +of all derivatives of our free software and of promoting the sharing
    +and reuse of software generally.
    +
    +			    NO WARRANTY
    +
    +  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
    +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
    +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
    +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
    +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
    +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    +PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
    +LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
    +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
    +
    +  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
    +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
    +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
    +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
    +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
    +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
    +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
    +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
    +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
    +DAMAGES.
    +
    +		     END OF TERMS AND CONDITIONS
    +
    +           How to Apply These Terms to Your New Libraries
    +
    +  If you develop a new library, and you want it to be of the greatest
    +possible use to the public, we recommend making it free software that
    +everyone can redistribute and change.  You can do so by permitting
    +redistribution under these terms (or, alternatively, under the terms of the
    +ordinary General Public License).
    +
    +  To apply these terms, attach the following notices to the library.  It is
    +safest to attach them to the start of each source file to most effectively
    +convey the exclusion of warranty; and each file should have at least the
    +"copyright" line and a pointer to where the full notice is found.
    +
    +    
    +    Copyright (C)   
    +
    +    This library is free software; you can redistribute it and/or
    +    modify it under the terms of the GNU Lesser General Public
    +    License as published by the Free Software Foundation; either
    +    version 2.1 of the License, or (at your option) any later version.
    +
    +    This library is distributed in the hope that it will be useful,
    +    but WITHOUT ANY WARRANTY; without even the implied warranty of
    +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    +    Lesser General Public License for more details.
    +
    +    You should have received a copy of the GNU Lesser General Public
    +    License along with this library; if not, write to the Free Software
    +    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
    +
    +Also add information on how to contact you by electronic and paper mail.
    +
    +You should also get your employer (if you work as a programmer) or your
    +school, if any, to sign a "copyright disclaimer" for the library, if
    +necessary.  Here is a sample; alter the names:
    +
    +  Yoyodyne, Inc., hereby disclaims all copyright interest in the
    +  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
    +
    +  , 1 April 1990
    +  Ty Coon, President of Vice
    +
    +That's all there is to it!
    +
    +
    +
    +--------------- SECTION 6: GNU Lesser General Public License, V3.0 -----------
    +
    +		   GNU LESSER GENERAL PUBLIC LICENSE
    +
    +                       Version 3, 29 June 2007
    +
    +
    +
    + Copyright (C) 2007 Free Software Foundation, Inc. 
    +
    + Everyone is permitted to copy and distribute verbatim copies
    +
    + of this license document, but changing it is not allowed.
    +
    +
    +
    +
    +
    +  This version of the GNU Lesser General Public License incorporates
    +
    +the terms and conditions of version 3 of the GNU General Public
    +
    +License, supplemented by the additional permissions listed below.
    +
    +
    +
    +  0. Additional Definitions.
    +
    +
    +
    +  As used herein, "this License" refers to version 3 of the GNU Lesser
    +
    +General Public License, and the "GNU GPL" refers to version 3 of the GNU
    +
    +General Public License.
    +
    +
    +
    +  "The Library" refers to a covered work governed by this License,
    +
    +other than an Application or a Combined Work as defined below.
    +
    +
    +
    +  An "Application" is any work that makes use of an interface provided
    +
    +by the Library, but which is not otherwise based on the Library.
    +
    +Defining a subclass of a class defined by the Library is deemed a mode
    +
    +of using an interface provided by the Library.
    +
    +
    +
    +  A "Combined Work" is a work produced by combining or linking an
    +
    +Application with the Library.  The particular version of the Library
    +
    +with which the Combined Work was made is also called the "Linked
    +
    +Version".
    +
    +
    +
    +  The "Minimal Corresponding Source" for a Combined Work means the
    +
    +Corresponding Source for the Combined Work, excluding any source code
    +
    +for portions of the Combined Work that, considered in isolation, are
    +
    +based on the Application, and not on the Linked Version.
    +
    +
    +
    +  The "Corresponding Application Code" for a Combined Work means the
    +
    +object code and/or source code for the Application, including any data
    +
    +and utility programs needed for reproducing the Combined Work from the
    +
    +Application, but excluding the System Libraries of the Combined Work.
    +
    +
    +
    +  1. Exception to Section 3 of the GNU GPL.
    +
    +
    +
    +  You may convey a covered work under sections 3 and 4 of this License
    +
    +without being bound by section 3 of the GNU GPL.
    +
    +
    +
    +  2. Conveying Modified Versions.
    +
    +
    +
    +  If you modify a copy of the Library, and, in your modifications, a
    +
    +facility refers to a function or data to be supplied by an Application
    +
    +that uses the facility (other than as an argument passed when the
    +
    +facility is invoked), then you may convey a copy of the modified
    +
    +version:
    +
    +
    +
    +   a) under this License, provided that you make a good faith effort to
    +
    +   ensure that, in the event an Application does not supply the
    +
    +   function or data, the facility still operates, and performs
    +
    +   whatever part of its purpose remains meaningful, or
    +
    +
    +
    +   b) under the GNU GPL, with none of the additional permissions of
    +
    +   this License applicable to that copy.
    +
    +
    +
    +  3. Object Code Incorporating Material from Library Header Files.
    +
    +
    +
    +  The object code form of an Application may incorporate material from
    +
    +a header file that is part of the Library.  You may convey such object
    +
    +code under terms of your choice, provided that, if the incorporated
    +
    +material is not limited to numerical parameters, data structure
    +
    +layouts and accessors, or small macros, inline functions and templates
    +
    +(ten or fewer lines in length), you do both of the following:
    +
    +
    +
    +   a) Give prominent notice with each copy of the object code that the
    +
    +   Library is used in it and that the Library and its use are
    +
    +   covered by this License.
    +
    +
    +
    +   b) Accompany the object code with a copy of the GNU GPL and this license
    +
    +   document.
    +
    +
    +
    +  4. Combined Works.
    +
    +
    +
    +  You may convey a Combined Work under terms of your choice that,
    +
    +taken together, effectively do not restrict modification of the
    +
    +portions of the Library contained in the Combined Work and reverse
    +
    +engineering for debugging such modifications, if you also do each of
    +
    +the following:
    +
    +
    +
    +   a) Give prominent notice with each copy of the Combined Work that
    +
    +   the Library is used in it and that the Library and its use are
    +
    +   covered by this License.
    +
    +
    +
    +   b) Accompany the Combined Work with a copy of the GNU GPL and this license
    +
    +   document.
    +
    +
    +
    +   c) For a Combined Work that displays copyright notices during
    +
    +   execution, include the copyright notice for the Library among
    +
    +   these notices, as well as a reference directing the user to the
    +
    +   copies of the GNU GPL and this license document.
    +
    +
    +
    +   d) Do one of the following:
    +
    +
    +
    +       0) Convey the Minimal Corresponding Source under the terms of this
    +
    +       License, and the Corresponding Application Code in a form
    +
    +       suitable for, and under terms that permit, the user to
    +
    +       recombine or relink the Application with a modified version of
    +
    +       the Linked Version to produce a modified Combined Work, in the
    +
    +       manner specified by section 6 of the GNU GPL for conveying
    +
    +       Corresponding Source.
    +
    +
    +
    +       1) Use a suitable shared library mechanism for linking with the
    +
    +       Library.  A suitable mechanism is one that (a) uses at run time
    +
    +       a copy of the Library already present on the user's computer
    +
    +       system, and (b) will operate properly with a modified version
    +
    +       of the Library that is interface-compatible with the Linked
    +
    +       Version.
    +
    +
    +
    +   e) Provide Installation Information, but only if you would otherwise
    +
    +   be required to provide such information under section 6 of the
    +
    +   GNU GPL, and only to the extent that such information is
    +
    +   necessary to install and execute a modified version of the
    +
    +   Combined Work produced by recombining or relinking the
    +
    +   Application with a modified version of the Linked Version. (If
    +
    +   you use option 4d0, the Installation Information must accompany
    +
    +   the Minimal Corresponding Source and Corresponding Application
    +
    +   Code. If you use option 4d1, you must provide the Installation
    +
    +   Information in the manner specified by section 6 of the GNU GPL
    +
    +   for conveying Corresponding Source.)
    +
    +
    +
    +  5. Combined Libraries.
    +
    +
    +
    +  You may place library facilities that are a work based on the
    +
    +Library side by side in a single library together with other library
    +
    +facilities that are not Applications and are not covered by this
    +
    +License, and convey such a combined library under terms of your
    +
    +choice, if you do both of the following:
    +
    +
    +
    +   a) Accompany the combined library with a copy of the same work based
    +
    +   on the Library, uncombined with any other library facilities,
    +
    +   conveyed under the terms of this License.
    +
    +
    +
    +   b) Give prominent notice with the combined library that part of it
    +
    +   is a work based on the Library, and explaining where to find the
    +
    +   accompanying uncombined form of the same work.
    +
    +
    +
    +  6. Revised Versions of the GNU Lesser General Public License.
    +
    +
    +
    +  The Free Software Foundation may publish revised and/or new versions
    +
    +of the GNU Lesser General Public License from time to time. Such new
    +
    +versions will be similar in spirit to the present version, but may
    +
    +differ in detail to address new problems or concerns.
    +
    +
    +
    +  Each version is given a distinguishing version number. If the
    +
    +Library as you received it specifies that a certain numbered version
    +
    +of the GNU Lesser General Public License "or any later version"
    +
    +applies to it, you have the option of following the terms and
    +
    +conditions either of that published version or of any later version
    +
    +published by the Free Software Foundation. If the Library as you
    +
    +received it does not specify a version number of the GNU Lesser
    +
    +General Public License, you may choose any version of the GNU Lesser
    +
    +General Public License ever published by the Free Software Foundation.
    +
    +
    +
    +  If the Library as you received it specifies that a proxy can decide
    +
    +whether future versions of the GNU Lesser General Public License shall
    +
    +apply, that proxy's public statement of acceptance of any version is
    +
    +permanent authorization for you to choose that version for the
    +
    +Library.
    +
    +
    +
    +--------------- SECTION 7: Mozilla Public License, V2.0 -----------
    +
    +Mozilla Public License
    +
    +Version 2.0
    +
    +
    +
    +1. Definitions
    +
    +
    +
    +1.1. “Contributor”
    +
    +means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software.
    +
    +
    +
    +1.2. “Contributor Version”
    +
    +means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor’s Contribution.
    +
    +
    +
    +1.3. “Contribution”
    +
    +means Covered Software of a particular Contributor.
    +
    +
    +
    +1.4. “Covered Software”
    +
    +means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof.
    +
    +
    +
    +1.5. “Incompatible With Secondary Licenses”
    +
    +means
    +
    +
    +
    +that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or
    +
    +
    +
    +that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License.
    +
    +
    +
    +1.6. “Executable Form”
    +
    +means any form of the work other than Source Code Form.
    +
    +
    +
    +1.7. “Larger Work”
    +
    +means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software.
    +
    +
    +
    +1.8. “License”
    +
    +means this document.
    +
    +
    +
    +1.9. “Licensable”
    +
    +means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License.
    +
    +
    +
    +1.10. “Modifications”
    +
    +means any of the following:
    +
    +
    +
    +any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or
    +
    +
    +
    +any new file in Source Code Form that contains any Covered Software.
    +
    +
    +
    +1.11. “Patent Claims” of a Contributor
    +
    +means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version.
    +
    +
    +
    +1.12. “Secondary License”
    +
    +means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses.
    +
    +
    +
    +1.13. “Source Code Form”
    +
    +means the form of the work preferred for making modifications.
    +
    +
    +
    +1.14. “You” (or “Your”)
    +
    +means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50) of the outstanding shares or beneficial ownership of such entity.
    +
    +
    +
    +2. License Grants and Conditions
    +
    +
    +
    +2.1. Grants
    +
    +
    +
    +Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
    +
    +
    +
    +under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and
    +
    +
    +
    +under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version.
    +
    +
    +
    +2.2. Effective Date
    +
    +
    +
    +The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution.
    +
    +
    +
    +2.3. Limitations on Grant Scope
    +
    +
    +
    +The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor:
    +
    +
    +
    +for any code that a Contributor has removed from Covered Software; or
    +
    +
    +
    +for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or
    +
    +
    +
    +under Patent Claims infringed by Covered Software in the absence of its Contributions.
    +
    +
    +
    +This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4).
    +
    +
    +
    +2.4. Subsequent Licenses
    +
    +
    +
    +No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3).
    +
    +
    +
    +2.5. Representation
    +
    +
    +
    +Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License.
    +
    +
    +
    +2.6. Fair Use
    +
    +
    +
    +This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents.
    +
    +
    +
    +2.7. Conditions
    +
    +
    +
    +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1.
    +
    +
    +
    +3. Responsibilities
    +
    +
    +
    +3.1. Distribution of Source Form
    +
    +
    +
    +All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients’ rights in the Source Code Form.
    +
    +
    +
    +3.2. Distribution of Executable Form
    +
    +
    +
    +If You distribute Covered Software in Executable Form then:
    +
    +
    +
    +such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and
    +
    +
    +
    +You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients’ rights in the Source Code Form under this License.
    +
    +
    +
    +3.3. Distribution of a Larger Work
    +
    +
    +
    +You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s).
    +
    +
    +
    +3.4. Notices
    +
    +
    +
    +You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies.
    +
    +
    +
    +3.5. Application of Additional Terms
    +
    +
    +
    +You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction.
    +
    +
    +
    +4. Inability to Comply Due to Statute or Regulation
    +
    +
    +
    +If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it.
    +
    +
    +
    +5. Termination
    +
    +
    +
    +5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice.
    +
    +
    +
    +5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate.
    +
    +
    +
    +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination.
    +
    +
    +
    +6. Disclaimer of Warranty
    +
    +
    +
    +Covered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer.
    +
    +
    +
    +7. Limitation of Liability
    +
    +
    +
    +Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party’s negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.
    +
    +
    +
    +8. Litigation
    +
    +
    +
    +Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party’s ability to bring cross-claims or counter-claims.
    +
    +
    +
    +9. Miscellaneous
    +
    +
    +
    +This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor.
    +
    +
    +
    +10. Versions of the License
    +
    +
    +
    +10.1. New Versions
    +
    +
    +
    +Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number.
    +
    +
    +
    +10.2. Effect of New Versions
    +
    +
    +
    +You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward.
    +
    +
    +
    +10.3. Modified Versions
    +
    +
    +
    +If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License).
    +
    +
    +
    +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
    +
    +
    +
    +If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached.
    +
    +
    +
    +Exhibit A - Source Code Form License Notice
    +
    +
    +
    +This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +
    +
    +
    +If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice.
    +
    +
    +
    +You may add additional accurate notices of copyright ownership.
    +
    +
    +
    +Exhibit B - “Incompatible With Secondary Licenses” Notice
    +
    +
    +
    +This Source Code Form is “Incompatible With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0.
    +
    +
    +
    +--------------- SECTION 8: GNU General Public License, V1.0 -----------
    +
    +
    +		    GNU GENERAL PUBLIC LICENSE
    +		     Version 1, February 1989
    +
    + Copyright (C) 1989 Free Software Foundation, Inc.
    + Everyone is permitted to copy and distribute verbatim copies
    + of this license document, but changing it is not allowed.
    +
    +			    Preamble
    +
    +  The license agreements of most software companies try to keep users
    +at the mercy of those companies.  By contrast, our General Public
    +License is intended to guarantee your freedom to share and change free
    +software--to make sure the software is free for all its users.  The
    +General Public License applies to the Free Software Foundation's
    +software and to any other program whose authors commit to using it.
    +You can use it for your programs, too.
    +
    +  When we speak of free software, we are referring to freedom, not
    +price.  Specifically, the General Public License is designed to make
    +sure that you have the freedom to give away or sell copies of free
    +software, that you receive source code or can get it if you want it,
    +that you can change the software or use pieces of it in new free
    +programs; and that you know you can do these things.
    +
    +  To protect your rights, we need to make restrictions that forbid
    +anyone to deny you these rights or to ask you to surrender the rights.
    +These restrictions translate to certain responsibilities for you if you
    +distribute copies of the software, or if you modify it.
    +
    +  For example, if you distribute copies of a such a program, whether
    +gratis or for a fee, you must give the recipients all the rights that
    +you have.  You must make sure that they, too, receive or can get the
    +source code.  And you must tell them their rights.
    +
    +  We protect your rights with two steps: (1) copyright the software, and
    +(2) offer you this license which gives you legal permission to copy,
    +distribute and/or modify the software.
    +
    +  Also, for each author's protection and ours, we want to make certain
    +that everyone understands that there is no warranty for this free
    +software.  If the software is modified by someone else and passed on, we
    +want its recipients to know that what they have is not the original, so
    +that any problems introduced by others will not reflect on the original
    +authors' reputations.
    +
    +  The precise terms and conditions for copying, distribution and
    +modification follow.
    +
    +		    GNU GENERAL PUBLIC LICENSE
    +   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
    +
    +  0. This License Agreement applies to any program or other work which
    +contains a notice placed by the copyright holder saying it may be
    +distributed under the terms of this General Public License.  The
    +"Program", below, refers to any such program or work, and a "work based
    +on the Program" means either the Program or any work containing the
    +Program or a portion of it, either verbatim or with modifications.  Each
    +licensee is addressed as "you".
    +
    +  1. You may copy and distribute verbatim copies of the Program's source
    +code as you receive it, in any medium, provided that you conspicuously and
    +appropriately publish on each copy an appropriate copyright notice and
    +disclaimer of warranty; keep intact all the notices that refer to this
    +General Public License and to the absence of any warranty; and give any
    +other recipients of the Program a copy of this General Public License
    +along with the Program.  You may charge a fee for the physical act of
    +transferring a copy.
    +
    +  2. You may modify your copy or copies of the Program or any portion of
    +it, and copy and distribute such modifications under the terms of Paragraph
    +1 above, provided that you also do the following:
    +
    +    a) cause the modified files to carry prominent notices stating that
    +    you changed the files and the date of any change; and
    +
    +    b) cause the whole of any work that you distribute or publish, that
    +    in whole or in part contains the Program or any part thereof, either
    +    with or without modifications, to be licensed at no charge to all
    +    third parties under the terms of this General Public License (except
    +    that you may choose to grant warranty protection to some or all
    +    third parties, at your option).
    +
    +    c) If the modified program normally reads commands interactively when
    +    run, you must cause it, when started running for such interactive use
    +    in the simplest and most usual way, to print or display an
    +    announcement including an appropriate copyright notice and a notice
    +    that there is no warranty (or else, saying that you provide a
    +    warranty) and that users may redistribute the program under these
    +    conditions, and telling the user how to view a copy of this General
    +    Public License.
    +
    +    d) You may charge a fee for the physical act of transferring a
    +    copy, and you may at your option offer warranty protection in
    +    exchange for a fee.
    +
    +Mere aggregation of another independent work with the Program (or its
    +derivative) on a volume of a storage or distribution medium does not bring
    +the other work under the scope of these terms.
    +
    +  3. You may copy and distribute the Program (or a portion or derivative of
    +it, under Paragraph 2) in object code or executable form under the terms of
    +Paragraphs 1 and 2 above provided that you also do one of the following:
    +
    +    a) accompany it with the complete corresponding machine-readable
    +    source code, which must be distributed under the terms of
    +    Paragraphs 1 and 2 above; or,
    +
    +    b) accompany it with a written offer, valid for at least three
    +    years, to give any third party free (except for a nominal charge
    +    for the cost of distribution) a complete machine-readable copy of the
    +    corresponding source code, to be distributed under the terms of
    +    Paragraphs 1 and 2 above; or,
    +
    +    c) accompany it with the information you received as to where the
    +    corresponding source code may be obtained.  (This alternative is
    +    allowed only for noncommercial distribution and only if you
    +    received the program in object code or executable form alone.)
    +
    +Source code for a work means the preferred form of the work for making
    +modifications to it.  For an executable file, complete source code means
    +all the source code for all modules it contains; but, as a special
    +exception, it need not include source code for modules which are standard
    +libraries that accompany the operating system on which the executable
    +file runs, or for standard header files or definitions files that
    +accompany that operating system.
    +
    +  4. You may not copy, modify, sublicense, distribute or transfer the
    +Program except as expressly provided under this General Public License.
    +Any attempt otherwise to copy, modify, sublicense, distribute or transfer
    +the Program is void, and will automatically terminate your rights to use
    +the Program under this License.  However, parties who have received
    +copies, or rights to use copies, from you under this General Public
    +License will not have their licenses terminated so long as such parties
    +remain in full compliance.
    +
    +  5. By copying, distributing or modifying the Program (or any work based
    +on the Program) you indicate your acceptance of this license to do so,
    +and all its terms and conditions.
    +
    +  6. Each time you redistribute the Program (or any work based on the
    +Program), the recipient automatically receives a license from the original
    +licensor to copy, distribute or modify the Program subject to these
    +terms and conditions.  You may not impose any further restrictions on the
    +recipients' exercise of the rights granted herein.
    +
    +  7. The Free Software Foundation may publish revised and/or new versions
    +of the General Public License from time to time.  Such new versions will
    +be similar in spirit to the present version, but may differ in detail to
    +address new problems or concerns.
    +
    +Each version is given a distinguishing version number.  If the Program
    +specifies a version number of the license which applies to it and "any
    +later version", you have the option of following the terms and conditions
    +either of that version or of any later version published by the Free
    +Software Foundation.  If the Program does not specify a version number of
    +the license, you may choose any version ever published by the Free Software
    +Foundation.
    +
    +  8. If you wish to incorporate parts of the Program into other free
    +programs whose distribution conditions are different, write to the author
    +to ask for permission.  For software which is copyrighted by the Free
    +Software Foundation, write to the Free Software Foundation; we sometimes
    +make exceptions for this.  Our decision will be guided by the two goals
    +of preserving the free status of all derivatives of our free software and
    +of promoting the sharing and reuse of software generally.
    +
    +			    NO WARRANTY
    +
    +  9. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
    +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
    +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
    +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
    +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
    +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
    +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
    +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
    +REPAIR OR CORRECTION.
    +
    +  10. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
    +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
    +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
    +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
    +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
    +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
    +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
    +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
    +POSSIBILITY OF SUCH DAMAGES.
    +
    +		     END OF TERMS AND CONDITIONS
    +
    +	Appendix: How to Apply These Terms to Your New Programs
    +
    +  If you develop a new program, and you want it to be of the greatest
    +possible use to humanity, the best way to achieve this is to make it
    +free software which everyone can redistribute and change under these
    +terms.
    +
    +  To do so, attach the following notices to the program.  It is safest to
    +attach them to the start of each source file to most effectively convey
    +the exclusion of warranty; and each file should have at least the
    +"copyright" line and a pointer to where the full notice is found.
    +
    +    
    +    Copyright (C) 19yy  
    +
    +    This program is free software; you can redistribute it and/or modify
    +    it under the terms of the GNU General Public License as published by
    +    the Free Software Foundation; either version 1, or (at your option)
    +    any later version.
    +
    +    This program is distributed in the hope that it will be useful,
    +    but WITHOUT ANY WARRANTY; without even the implied warranty of
    +    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    +    GNU General Public License for more details.
    +
    +    You should have received a copy of the GNU General Public License
    +    along with this program; if not, write to the Free Software
    +    Foundation, Inc.
    +
    +Also add information on how to contact you by electronic and paper mail.
    +
    +If the program is interactive, make it output a short notice like this
    +when it starts in an interactive mode:
    +
    +    Gnomovision version 69, Copyright (C) 19xx name of author
    +    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    +    This is free software, and you are welcome to redistribute it
    +    under certain conditions; type `show c' for details.
    +
    +The hypothetical commands `show w' and `show c' should show the
    +appropriate parts of the General Public License.  Of course, the
    +commands you use may be called something other than `show w' and `show
    +c'; they could even be mouse-clicks or menu items--whatever suits your
    +program.
    +
    +You should also get your employer (if you work as a programmer) or your
    +school, if any, to sign a "copyright disclaimer" for the program, if
    +necessary.  Here a sample; alter the names:
    +
    +  Yoyodyne, Inc., hereby disclaims all copyright interest in the
    +  program `Gnomovision' (a program to direct compilers to make passes
    +  at assemblers) written by James Hacker.
    +
    +  , 1 April 1989
    +  Ty Coon, President of Vice
    +
    +That's all there is to it!
    +
    +
    +
    +--------------- SECTION 9: GNU Free Documentation License V1.1 -----------
    +
    +
    +		GNU Free Documentation License
    +		   Version 1.1, March 2000
    +
    + Copyright (C) 2000  Free Software Foundation, Inc.
    +     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    + Everyone is permitted to copy and distribute verbatim copies
    + of this license document, but changing it is not allowed.
    +
    +
    +0. PREAMBLE
    +
    +The purpose of this License is to make a manual, textbook, or other
    +written document "free" in the sense of freedom: to assure everyone
    +the effective freedom to copy and redistribute it, with or without
    +modifying it, either commercially or noncommercially.  Secondarily,
    +this License preserves for the author and publisher a way to get
    +credit for their work, while not being considered responsible for
    +modifications made by others.
    +
    +This License is a kind of "copyleft", which means that derivative
    +works of the document must themselves be free in the same sense.  It
    +complements the GNU General Public License, which is a copyleft
    +license designed for free software.
    +
    +We have designed this License in order to use it for manuals for free
    +software, because free software needs free documentation: a free
    +program should come with manuals providing the same freedoms that the
    +software does.  But this License is not limited to software manuals;
    +it can be used for any textual work, regardless of subject matter or
    +whether it is published as a printed book.  We recommend this License
    +principally for works whose purpose is instruction or reference.
    +
    +
    +1. APPLICABILITY AND DEFINITIONS
    +
    +This License applies to any manual or other work that contains a
    +notice placed by the copyright holder saying it can be distributed
    +under the terms of this License.  The "Document", below, refers to any
    +such manual or work.  Any member of the public is a licensee, and is
    +addressed as "you".
    +
    +A "Modified Version" of the Document means any work containing the
    +Document or a portion of it, either copied verbatim, or with
    +modifications and/or translated into another language.
    +
    +A "Secondary Section" is a named appendix or a front-matter section of
    +the Document that deals exclusively with the relationship of the
    +publishers or authors of the Document to the Document's overall subject
    +(or to related matters) and contains nothing that could fall directly
    +within that overall subject.  (For example, if the Document is in part a
    +textbook of mathematics, a Secondary Section may not explain any
    +mathematics.)  The relationship could be a matter of historical
    +connection with the subject or with related matters, or of legal,
    +commercial, philosophical, ethical or political position regarding
    +them.
    +
    +The "Invariant Sections" are certain Secondary Sections whose titles
    +are designated, as being those of Invariant Sections, in the notice
    +that says that the Document is released under this License.
    +
    +The "Cover Texts" are certain short passages of text that are listed,
    +as Front-Cover Texts or Back-Cover Texts, in the notice that says that
    +the Document is released under this License.
    +
    +A "Transparent" copy of the Document means a machine-readable copy,
    +represented in a format whose specification is available to the
    +general public, whose contents can be viewed and edited directly and
    +straightforwardly with generic text editors or (for images composed of
    +pixels) generic paint programs or (for drawings) some widely available
    +drawing editor, and that is suitable for input to text formatters or
    +for automatic translation to a variety of formats suitable for input
    +to text formatters.  A copy made in an otherwise Transparent file
    +format whose markup has been designed to thwart or discourage
    +subsequent modification by readers is not Transparent.  A copy that is
    +not "Transparent" is called "Opaque".
    +
    +Examples of suitable formats for Transparent copies include plain
    +ASCII without markup, Texinfo input format, LaTeX input format, SGML
    +or XML using a publicly available DTD, and standard-conforming simple
    +HTML designed for human modification.  Opaque formats include
    +PostScript, PDF, proprietary formats that can be read and edited only
    +by proprietary word processors, SGML or XML for which the DTD and/or
    +processing tools are not generally available, and the
    +machine-generated HTML produced by some word processors for output
    +purposes only.
    +
    +The "Title Page" means, for a printed book, the title page itself,
    +plus such following pages as are needed to hold, legibly, the material
    +this License requires to appear in the title page.  For works in
    +formats which do not have any title page as such, "Title Page" means
    +the text near the most prominent appearance of the work's title,
    +preceding the beginning of the body of the text.
    +
    +
    +2. VERBATIM COPYING
    +
    +You may copy and distribute the Document in any medium, either
    +commercially or noncommercially, provided that this License, the
    +copyright notices, and the license notice saying this License applies
    +to the Document are reproduced in all copies, and that you add no other
    +conditions whatsoever to those of this License.  You may not use
    +technical measures to obstruct or control the reading or further
    +copying of the copies you make or distribute.  However, you may accept
    +compensation in exchange for copies.  If you distribute a large enough
    +number of copies you must also follow the conditions in section 3.
    +
    +You may also lend copies, under the same conditions stated above, and
    +you may publicly display copies.
    +
    +
    +3. COPYING IN QUANTITY
    +
    +If you publish printed copies of the Document numbering more than 100,
    +and the Document's license notice requires Cover Texts, you must enclose
    +the copies in covers that carry, clearly and legibly, all these Cover
    +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
    +the back cover.  Both covers must also clearly and legibly identify
    +you as the publisher of these copies.  The front cover must present
    +the full title with all words of the title equally prominent and
    +visible.  You may add other material on the covers in addition.
    +Copying with changes limited to the covers, as long as they preserve
    +the title of the Document and satisfy these conditions, can be treated
    +as verbatim copying in other respects.
    +
    +If the required texts for either cover are too voluminous to fit
    +legibly, you should put the first ones listed (as many as fit
    +reasonably) on the actual cover, and continue the rest onto adjacent
    +pages.
    +
    +If you publish or distribute Opaque copies of the Document numbering
    +more than 100, you must either include a machine-readable Transparent
    +copy along with each Opaque copy, or state in or with each Opaque copy
    +a publicly-accessible computer-network location containing a complete
    +Transparent copy of the Document, free of added material, which the
    +general network-using public has access to download anonymously at no
    +charge using public-standard network protocols.  If you use the latter
    +option, you must take reasonably prudent steps, when you begin
    +distribution of Opaque copies in quantity, to ensure that this
    +Transparent copy will remain thus accessible at the stated location
    +until at least one year after the last time you distribute an Opaque
    +copy (directly or through your agents or retailers) of that edition to
    +the public.
    +
    +It is requested, but not required, that you contact the authors of the
    +Document well before redistributing any large number of copies, to give
    +them a chance to provide you with an updated version of the Document.
    +
    +
    +4. MODIFICATIONS
    +
    +You may copy and distribute a Modified Version of the Document under
    +the conditions of sections 2 and 3 above, provided that you release
    +the Modified Version under precisely this License, with the Modified
    +Version filling the role of the Document, thus licensing distribution
    +and modification of the Modified Version to whoever possesses a copy
    +of it.  In addition, you must do these things in the Modified Version:
    +
    +A. Use in the Title Page (and on the covers, if any) a title distinct
    +   from that of the Document, and from those of previous versions
    +   (which should, if there were any, be listed in the History section
    +   of the Document).  You may use the same title as a previous version
    +   if the original publisher of that version gives permission.
    +B. List on the Title Page, as authors, one or more persons or entities
    +   responsible for authorship of the modifications in the Modified
    +   Version, together with at least five of the principal authors of the
    +   Document (all of its principal authors, if it has less than five).
    +C. State on the Title page the name of the publisher of the
    +   Modified Version, as the publisher.
    +D. Preserve all the copyright notices of the Document.
    +E. Add an appropriate copyright notice for your modifications
    +   adjacent to the other copyright notices.
    +F. Include, immediately after the copyright notices, a license notice
    +   giving the public permission to use the Modified Version under the
    +   terms of this License, in the form shown in the Addendum below.
    +G. Preserve in that license notice the full lists of Invariant Sections
    +   and required Cover Texts given in the Document's license notice.
    +H. Include an unaltered copy of this License.
    +I. Preserve the section entitled "History", and its title, and add to
    +   it an item stating at least the title, year, new authors, and
    +   publisher of the Modified Version as given on the Title Page.  If
    +   there is no section entitled "History" in the Document, create one
    +   stating the title, year, authors, and publisher of the Document as
    +   given on its Title Page, then add an item describing the Modified
    +   Version as stated in the previous sentence.
    +J. Preserve the network location, if any, given in the Document for
    +   public access to a Transparent copy of the Document, and likewise
    +   the network locations given in the Document for previous versions
    +   it was based on.  These may be placed in the "History" section.
    +   You may omit a network location for a work that was published at
    +   least four years before the Document itself, or if the original
    +   publisher of the version it refers to gives permission.
    +K. In any section entitled "Acknowledgements" or "Dedications",
    +   preserve the section's title, and preserve in the section all the
    +   substance and tone of each of the contributor acknowledgements
    +   and/or dedications given therein.
    +L. Preserve all the Invariant Sections of the Document,
    +   unaltered in their text and in their titles.  Section numbers
    +   or the equivalent are not considered part of the section titles.
    +M. Delete any section entitled "Endorsements".  Such a section
    +   may not be included in the Modified Version.
    +N. Do not retitle any existing section as "Endorsements"
    +   or to conflict in title with any Invariant Section.
    +
    +If the Modified Version includes new front-matter sections or
    +appendices that qualify as Secondary Sections and contain no material
    +copied from the Document, you may at your option designate some or all
    +of these sections as invariant.  To do this, add their titles to the
    +list of Invariant Sections in the Modified Version's license notice.
    +These titles must be distinct from any other section titles.
    +
    +You may add a section entitled "Endorsements", provided it contains
    +nothing but endorsements of your Modified Version by various
    +parties--for example, statements of peer review or that the text has
    +been approved by an organization as the authoritative definition of a
    +standard.
    +
    +You may add a passage of up to five words as a Front-Cover Text, and a
    +passage of up to 25 words as a Back-Cover Text, to the end of the list
    +of Cover Texts in the Modified Version.  Only one passage of
    +Front-Cover Text and one of Back-Cover Text may be added by (or
    +through arrangements made by) any one entity.  If the Document already
    +includes a cover text for the same cover, previously added by you or
    +by arrangement made by the same entity you are acting on behalf of,
    +you may not add another; but you may replace the old one, on explicit
    +permission from the previous publisher that added the old one.
    +
    +The author(s) and publisher(s) of the Document do not by this License
    +give permission to use their names for publicity for or to assert or
    +imply endorsement of any Modified Version.
    +
    +
    +5. COMBINING DOCUMENTS
    +
    +You may combine the Document with other documents released under this
    +License, under the terms defined in section 4 above for modified
    +versions, provided that you include in the combination all of the
    +Invariant Sections of all of the original documents, unmodified, and
    +list them all as Invariant Sections of your combined work in its
    +license notice.
    +
    +The combined work need only contain one copy of this License, and
    +multiple identical Invariant Sections may be replaced with a single
    +copy.  If there are multiple Invariant Sections with the same name but
    +different contents, make the title of each such section unique by
    +adding at the end of it, in parentheses, the name of the original
    +author or publisher of that section if known, or else a unique number.
    +Make the same adjustment to the section titles in the list of
    +Invariant Sections in the license notice of the combined work.
    +
    +In the combination, you must combine any sections entitled "History"
    +in the various original documents, forming one section entitled
    +"History"; likewise combine any sections entitled "Acknowledgements",
    +and any sections entitled "Dedications".  You must delete all sections
    +entitled "Endorsements."
    +
    +
    +6. COLLECTIONS OF DOCUMENTS
    +
    +You may make a collection consisting of the Document and other documents
    +released under this License, and replace the individual copies of this
    +License in the various documents with a single copy that is included in
    +the collection, provided that you follow the rules of this License for
    +verbatim copying of each of the documents in all other respects.
    +
    +You may extract a single document from such a collection, and distribute
    +it individually under this License, provided you insert a copy of this
    +License into the extracted document, and follow this License in all
    +other respects regarding verbatim copying of that document.
    +
    +
    +7. AGGREGATION WITH INDEPENDENT WORKS
    +
    +A compilation of the Document or its derivatives with other separate
    +and independent documents or works, in or on a volume of a storage or
    +distribution medium, does not as a whole count as a Modified Version
    +of the Document, provided no compilation copyright is claimed for the
    +compilation.  Such a compilation is called an "aggregate", and this
    +License does not apply to the other self-contained works thus compiled
    +with the Document, on account of their being thus compiled, if they
    +are not themselves derivative works of the Document.
    +
    +If the Cover Text requirement of section 3 is applicable to these
    +copies of the Document, then if the Document is less than one quarter
    +of the entire aggregate, the Document's Cover Texts may be placed on
    +covers that surround only the Document within the aggregate.
    +Otherwise they must appear on covers around the whole aggregate.
    +
    +
    +8. TRANSLATION
    +
    +Translation is considered a kind of modification, so you may
    +distribute translations of the Document under the terms of section 4.
    +Replacing Invariant Sections with translations requires special
    +permission from their copyright holders, but you may include
    +translations of some or all Invariant Sections in addition to the
    +original versions of these Invariant Sections.  You may include a
    +translation of this License provided that you also include the
    +original English version of this License.  In case of a disagreement
    +between the translation and the original English version of this
    +License, the original English version will prevail.
    +
    +
    +9. TERMINATION
    +
    +You may not copy, modify, sublicense, or distribute the Document except
    +as expressly provided for under this License.  Any other attempt to
    +copy, modify, sublicense or distribute the Document is void, and will
    +automatically terminate your rights under this License.  However,
    +parties who have received copies, or rights, from you under this
    +License will not have their licenses terminated so long as such
    +parties remain in full compliance.
    +
    +
    +10. FUTURE REVISIONS OF THIS LICENSE
    +
    +The Free Software Foundation may publish new, revised versions
    +of the GNU Free Documentation License from time to time.  Such new
    +versions will be similar in spirit to the present version, but may
    +differ in detail to address new problems or concerns.  See
    +http://www.gnu.org/copyleft/.
    +
    +Each version of the License is given a distinguishing version number.
    +If the Document specifies that a particular numbered version of this
    +License "or any later version" applies to it, you have the option of
    +following the terms and conditions either of that specified version or
    +of any later version that has been published (not as a draft) by the
    +Free Software Foundation.  If the Document does not specify a version
    +number of this License, you may choose any version ever published (not
    +as a draft) by the Free Software Foundation.
    +
    +
    +ADDENDUM: How to use this License for your documents
    +
    +To use this License in a document you have written, include a copy of
    +the License in the document and put the following copyright and
    +license notices just after the title page:
    +
    +      Copyright (c)  YEAR  YOUR NAME.
    +      Permission is granted to copy, distribute and/or modify this document
    +      under the terms of the GNU Free Documentation License, Version 1.1
    +      or any later version published by the Free Software Foundation;
    +      with the Invariant Sections being LIST THEIR TITLES, with the
    +      Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
    +      A copy of the license is included in the section entitled "GNU
    +      Free Documentation License".
    +
    +If you have no Invariant Sections, write "with no Invariant Sections"
    +instead of saying which ones are invariant.  If you have no
    +Front-Cover Texts, write "no Front-Cover Texts" instead of
    +"Front-Cover Texts being LIST"; likewise for Back-Cover Texts.
    +
    +If your document contains nontrivial examples of program code, we
    +recommend releasing these examples in parallel under your choice of
    +free software license, such as the GNU General Public License,
    +to permit their use in free software.
    +
    +
    +
    +--------------- SECTION 10: GNU Free Documentation License V1.2 -----------
    +
    +
    +                GNU Free Documentation License
    +                  Version 1.2, November 2002
    +
    +
    + Copyright (C) 2000,2001,2002  Free Software Foundation, Inc.
    +     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    + Everyone is permitted to copy and distribute verbatim copies
    + of this license document, but changing it is not allowed.
    +
    +
    +0. PREAMBLE
    +
    +The purpose of this License is to make a manual, textbook, or other
    +functional and useful document "free" in the sense of freedom: to
    +assure everyone the effective freedom to copy and redistribute it,
    +with or without modifying it, either commercially or noncommercially.
    +Secondarily, this License preserves for the author and publisher a way
    +to get credit for their work, while not being considered responsible
    +for modifications made by others.
    +
    +This License is a kind of "copyleft", which means that derivative
    +works of the document must themselves be free in the same sense.  It
    +complements the GNU General Public License, which is a copyleft
    +license designed for free software.
    +
    +We have designed this License in order to use it for manuals for free
    +software, because free software needs free documentation: a free
    +program should come with manuals providing the same freedoms that the
    +software does.  But this License is not limited to software manuals;
    +it can be used for any textual work, regardless of subject matter or
    +whether it is published as a printed book.  We recommend this License
    +principally for works whose purpose is instruction or reference.
    +
    +
    +1. APPLICABILITY AND DEFINITIONS
    +
    +This License applies to any manual or other work, in any medium, that
    +contains a notice placed by the copyright holder saying it can be
    +distributed under the terms of this License.  Such a notice grants a
    +world-wide, royalty-free license, unlimited in duration, to use that
    +work under the conditions stated herein.  The "Document", below,
    +refers to any such manual or work.  Any member of the public is a
    +licensee, and is addressed as "you".  You accept the license if you
    +copy, modify or distribute the work in a way requiring permission
    +under copyright law.
    +
    +A "Modified Version" of the Document means any work containing the
    +Document or a portion of it, either copied verbatim, or with
    +modifications and/or translated into another language.
    +
    +A "Secondary Section" is a named appendix or a front-matter section of
    +the Document that deals exclusively with the relationship of the
    +publishers or authors of the Document to the Document's overall subject
    +(or to related matters) and contains nothing that could fall directly
    +within that overall subject.  (Thus, if the Document is in part a
    +textbook of mathematics, a Secondary Section may not explain any
    +mathematics.)  The relationship could be a matter of historical
    +connection with the subject or with related matters, or of legal,
    +commercial, philosophical, ethical or political position regarding
    +them.
    +
    +The "Invariant Sections" are certain Secondary Sections whose titles
    +are designated, as being those of Invariant Sections, in the notice
    +that says that the Document is released under this License.  If a
    +section does not fit the above definition of Secondary then it is not
    +allowed to be designated as Invariant.  The Document may contain zero
    +Invariant Sections.  If the Document does not identify any Invariant
    +Sections then there are none.
    +
    +The "Cover Texts" are certain short passages of text that are listed,
    +as Front-Cover Texts or Back-Cover Texts, in the notice that says that
    +the Document is released under this License.  A Front-Cover Text may
    +be at most 5 words, and a Back-Cover Text may be at most 25 words.
    +
    +A "Transparent" copy of the Document means a machine-readable copy,
    +represented in a format whose specification is available to the
    +general public, that is suitable for revising the document
    +straightforwardly with generic text editors or (for images composed of
    +pixels) generic paint programs or (for drawings) some widely available
    +drawing editor, and that is suitable for input to text formatters or
    +for automatic translation to a variety of formats suitable for input
    +to text formatters.  A copy made in an otherwise Transparent file
    +format whose markup, or absence of markup, has been arranged to thwart
    +or discourage subsequent modification by readers is not Transparent.
    +An image format is not Transparent if used for any substantial amount
    +of text.  A copy that is not "Transparent" is called "Opaque".
    +
    +Examples of suitable formats for Transparent copies include plain
    +ASCII without markup, Texinfo input format, LaTeX input format, SGML
    +or XML using a publicly available DTD, and standard-conforming simple
    +HTML, PostScript or PDF designed for human modification.  Examples of
    +transparent image formats include PNG, XCF and JPG.  Opaque formats
    +include proprietary formats that can be read and edited only by
    +proprietary word processors, SGML or XML for which the DTD and/or
    +processing tools are not generally available, and the
    +machine-generated HTML, PostScript or PDF produced by some word
    +processors for output purposes only.
    +
    +The "Title Page" means, for a printed book, the title page itself,
    +plus such following pages as are needed to hold, legibly, the material
    +this License requires to appear in the title page.  For works in
    +formats which do not have any title page as such, "Title Page" means
    +the text near the most prominent appearance of the work's title,
    +preceding the beginning of the body of the text.
    +
    +A section "Entitled XYZ" means a named subunit of the Document whose
    +title either is precisely XYZ or contains XYZ in parentheses following
    +text that translates XYZ in another language.  (Here XYZ stands for a
    +specific section name mentioned below, such as "Acknowledgements",
    +"Dedications", "Endorsements", or "History".)  To "Preserve the Title"
    +of such a section when you modify the Document means that it remains a
    +section "Entitled XYZ" according to this definition.
    +
    +The Document may include Warranty Disclaimers next to the notice which
    +states that this License applies to the Document.  These Warranty
    +Disclaimers are considered to be included by reference in this
    +License, but only as regards disclaiming warranties: any other
    +implication that these Warranty Disclaimers may have is void and has
    +no effect on the meaning of this License.
    +
    +
    +2. VERBATIM COPYING
    +
    +You may copy and distribute the Document in any medium, either
    +commercially or noncommercially, provided that this License, the
    +copyright notices, and the license notice saying this License applies
    +to the Document are reproduced in all copies, and that you add no other
    +conditions whatsoever to those of this License.  You may not use
    +technical measures to obstruct or control the reading or further
    +copying of the copies you make or distribute.  However, you may accept
    +compensation in exchange for copies.  If you distribute a large enough
    +number of copies you must also follow the conditions in section 3.
    +
    +You may also lend copies, under the same conditions stated above, and
    +you may publicly display copies.
    +
    +
    +3. COPYING IN QUANTITY
    +
    +If you publish printed copies (or copies in media that commonly have
    +printed covers) of the Document, numbering more than 100, and the
    +Document's license notice requires Cover Texts, you must enclose the
    +copies in covers that carry, clearly and legibly, all these Cover
    +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
    +the back cover.  Both covers must also clearly and legibly identify
    +you as the publisher of these copies.  The front cover must present
    +the full title with all words of the title equally prominent and
    +visible.  You may add other material on the covers in addition.
    +Copying with changes limited to the covers, as long as they preserve
    +the title of the Document and satisfy these conditions, can be treated
    +as verbatim copying in other respects.
    +
    +If the required texts for either cover are too voluminous to fit
    +legibly, you should put the first ones listed (as many as fit
    +reasonably) on the actual cover, and continue the rest onto adjacent
    +pages.
    +
    +If you publish or distribute Opaque copies of the Document numbering
    +more than 100, you must either include a machine-readable Transparent
    +copy along with each Opaque copy, or state in or with each Opaque copy
    +a computer-network location from which the general network-using
    +public has access to download using public-standard network protocols
    +a complete Transparent copy of the Document, free of added material.
    +If you use the latter option, you must take reasonably prudent steps,
    +when you begin distribution of Opaque copies in quantity, to ensure
    +that this Transparent copy will remain thus accessible at the stated
    +location until at least one year after the last time you distribute an
    +Opaque copy (directly or through your agents or retailers) of that
    +edition to the public.
    +
    +It is requested, but not required, that you contact the authors of the
    +Document well before redistributing any large number of copies, to give
    +them a chance to provide you with an updated version of the Document.
    +
    +
    +4. MODIFICATIONS
    +
    +You may copy and distribute a Modified Version of the Document under
    +the conditions of sections 2 and 3 above, provided that you release
    +the Modified Version under precisely this License, with the Modified
    +Version filling the role of the Document, thus licensing distribution
    +and modification of the Modified Version to whoever possesses a copy
    +of it.  In addition, you must do these things in the Modified Version:
    +
    +A. Use in the Title Page (and on the covers, if any) a title distinct
    +   from that of the Document, and from those of previous versions
    +   (which should, if there were any, be listed in the History section
    +   of the Document).  You may use the same title as a previous version
    +   if the original publisher of that version gives permission.
    +B. List on the Title Page, as authors, one or more persons or entities
    +   responsible for authorship of the modifications in the Modified
    +   Version, together with at least five of the principal authors of the
    +   Document (all of its principal authors, if it has fewer than five),
    +   unless they release you from this requirement.
    +C. State on the Title page the name of the publisher of the
    +   Modified Version, as the publisher.
    +D. Preserve all the copyright notices of the Document.
    +E. Add an appropriate copyright notice for your modifications
    +   adjacent to the other copyright notices.
    +F. Include, immediately after the copyright notices, a license notice
    +   giving the public permission to use the Modified Version under the
    +   terms of this License, in the form shown in the Addendum below.
    +G. Preserve in that license notice the full lists of Invariant Sections
    +   and required Cover Texts given in the Document's license notice.
    +H. Include an unaltered copy of this License.
    +I. Preserve the section Entitled "History", Preserve its Title, and add
    +   to it an item stating at least the title, year, new authors, and
    +   publisher of the Modified Version as given on the Title Page.  If
    +   there is no section Entitled "History" in the Document, create one
    +   stating the title, year, authors, and publisher of the Document as
    +   given on its Title Page, then add an item describing the Modified
    +   Version as stated in the previous sentence.
    +J. Preserve the network location, if any, given in the Document for
    +   public access to a Transparent copy of the Document, and likewise
    +   the network locations given in the Document for previous versions
    +   it was based on.  These may be placed in the "History" section.
    +   You may omit a network location for a work that was published at
    +   least four years before the Document itself, or if the original
    +   publisher of the version it refers to gives permission.
    +K. For any section Entitled "Acknowledgements" or "Dedications",
    +   Preserve the Title of the section, and preserve in the section all
    +   the substance and tone of each of the contributor acknowledgements
    +   and/or dedications given therein.
    +L. Preserve all the Invariant Sections of the Document,
    +   unaltered in their text and in their titles.  Section numbers
    +   or the equivalent are not considered part of the section titles.
    +M. Delete any section Entitled "Endorsements".  Such a section
    +   may not be included in the Modified Version.
    +N. Do not retitle any existing section to be Entitled "Endorsements"
    +   or to conflict in title with any Invariant Section.
    +O. Preserve any Warranty Disclaimers.
    +
    +If the Modified Version includes new front-matter sections or
    +appendices that qualify as Secondary Sections and contain no material
    +copied from the Document, you may at your option designate some or all
    +of these sections as invariant.  To do this, add their titles to the
    +list of Invariant Sections in the Modified Version's license notice.
    +These titles must be distinct from any other section titles.
    +
    +You may add a section Entitled "Endorsements", provided it contains
    +nothing but endorsements of your Modified Version by various
    +parties--for example, statements of peer review or that the text has
    +been approved by an organization as the authoritative definition of a
    +standard.
    +
    +You may add a passage of up to five words as a Front-Cover Text, and a
    +passage of up to 25 words as a Back-Cover Text, to the end of the list
    +of Cover Texts in the Modified Version.  Only one passage of
    +Front-Cover Text and one of Back-Cover Text may be added by (or
    +through arrangements made by) any one entity.  If the Document already
    +includes a cover text for the same cover, previously added by you or
    +by arrangement made by the same entity you are acting on behalf of,
    +you may not add another; but you may replace the old one, on explicit
    +permission from the previous publisher that added the old one.
    +
    +The author(s) and publisher(s) of the Document do not by this License
    +give permission to use their names for publicity for or to assert or
    +imply endorsement of any Modified Version.
    +
    +
    +5. COMBINING DOCUMENTS
    +
    +You may combine the Document with other documents released under this
    +License, under the terms defined in section 4 above for modified
    +versions, provided that you include in the combination all of the
    +Invariant Sections of all of the original documents, unmodified, and
    +list them all as Invariant Sections of your combined work in its
    +license notice, and that you preserve all their Warranty Disclaimers.
    +
    +The combined work need only contain one copy of this License, and
    +multiple identical Invariant Sections may be replaced with a single
    +copy.  If there are multiple Invariant Sections with the same name but
    +different contents, make the title of each such section unique by
    +adding at the end of it, in parentheses, the name of the original
    +author or publisher of that section if known, or else a unique number.
    +Make the same adjustment to the section titles in the list of
    +Invariant Sections in the license notice of the combined work.
    +
    +In the combination, you must combine any sections Entitled "History"
    +in the various original documents, forming one section Entitled
    +"History"; likewise combine any sections Entitled "Acknowledgements",
    +and any sections Entitled "Dedications".  You must delete all sections
    +Entitled "Endorsements".
    +
    +
    +6. COLLECTIONS OF DOCUMENTS
    +
    +You may make a collection consisting of the Document and other documents
    +released under this License, and replace the individual copies of this
    +License in the various documents with a single copy that is included in
    +the collection, provided that you follow the rules of this License for
    +verbatim copying of each of the documents in all other respects.
    +
    +You may extract a single document from such a collection, and distribute
    +it individually under this License, provided you insert a copy of this
    +License into the extracted document, and follow this License in all
    +other respects regarding verbatim copying of that document.
    +
    +
    +7. AGGREGATION WITH INDEPENDENT WORKS
    +
    +A compilation of the Document or its derivatives with other separate
    +and independent documents or works, in or on a volume of a storage or
    +distribution medium, is called an "aggregate" if the copyright
    +resulting from the compilation is not used to limit the legal rights
    +of the compilation's users beyond what the individual works permit.
    +When the Document is included in an aggregate, this License does not
    +apply to the other works in the aggregate which are not themselves
    +derivative works of the Document.
    +
    +If the Cover Text requirement of section 3 is applicable to these
    +copies of the Document, then if the Document is less than one half of
    +the entire aggregate, the Document's Cover Texts may be placed on
    +covers that bracket the Document within the aggregate, or the
    +electronic equivalent of covers if the Document is in electronic form.
    +Otherwise they must appear on printed covers that bracket the whole
    +aggregate.
    +
    +
    +8. TRANSLATION
    +
    +Translation is considered a kind of modification, so you may
    +distribute translations of the Document under the terms of section 4.
    +Replacing Invariant Sections with translations requires special
    +permission from their copyright holders, but you may include
    +translations of some or all Invariant Sections in addition to the
    +original versions of these Invariant Sections.  You may include a
    +translation of this License, and all the license notices in the
    +Document, and any Warranty Disclaimers, provided that you also include
    +the original English version of this License and the original versions
    +of those notices and disclaimers.  In case of a disagreement between
    +the translation and the original version of this License or a notice
    +or disclaimer, the original version will prevail.
    +
    +If a section in the Document is Entitled "Acknowledgements",
    +"Dedications", or "History", the requirement (section 4) to Preserve
    +its Title (section 1) will typically require changing the actual
    +title.
    +
    +
    +9. TERMINATION
    +
    +You may not copy, modify, sublicense, or distribute the Document except
    +as expressly provided for under this License.  Any other attempt to
    +copy, modify, sublicense or distribute the Document is void, and will
    +automatically terminate your rights under this License.  However,
    +parties who have received copies, or rights, from you under this
    +License will not have their licenses terminated so long as such
    +parties remain in full compliance.
    +
    +
    +10. FUTURE REVISIONS OF THIS LICENSE
    +
    +The Free Software Foundation may publish new, revised versions
    +of the GNU Free Documentation License from time to time.  Such new
    +versions will be similar in spirit to the present version, but may
    +differ in detail to address new problems or concerns.  See
    +http://www.gnu.org/copyleft/.
    +
    +Each version of the License is given a distinguishing version number.
    +If the Document specifies that a particular numbered version of this
    +License "or any later version" applies to it, you have the option of
    +following the terms and conditions either of that specified version or
    +of any later version that has been published (not as a draft) by the
    +Free Software Foundation.  If the Document does not specify a version
    +number of this License, you may choose any version ever published (not
    +as a draft) by the Free Software Foundation.
    +
    +
    +ADDENDUM: How to use this License for your documents
    +
    +To use this License in a document you have written, include a copy of
    +the License in the document and put the following copyright and
    +license notices just after the title page:
    +
    +    Copyright (c)  YEAR  YOUR NAME.
    +    Permission is granted to copy, distribute and/or modify this document
    +    under the terms of the GNU Free Documentation License, Version 1.2
    +    or any later version published by the Free Software Foundation;
    +    with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
    +    A copy of the license is included in the section entitled "GNU
    +    Free Documentation License".
    +
    +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts,
    +replace the "with...Texts." line with this:
    +
    +    with the Invariant Sections being LIST THEIR TITLES, with the
    +    Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
    +
    +If you have Invariant Sections without Cover Texts, or some other
    +combination of the three, merge those two alternatives to suit the
    +situation.
    +
    +If your document contains nontrivial examples of program code, we
    +recommend releasing these examples in parallel under your choice of
    +free software license, such as the GNU General Public License,
    +to permit their use in free software.
    +
    +
    +
    +--------------- SECTION 11: GNU Free Documentation License V1.3 -----------
    +
    +
    +GNU Free Documentation License
    +
    +Version 1.3, 3 November 2008
    +
    +Copyright (c) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc. 
    +
    +Everyone is permitted to copy and distribute verbatim copies of this
    +license document, but changing it is not allowed.
    +
    +0. PREAMBLE
    +
    +The purpose of this License is to make a manual, textbook, or other
    +functional and useful document "free" in the sense of freedom: to assure
    +everyone the effective freedom to copy and redistribute it, with or
    +without modifying it, either commercially or noncommercially. Secondarily,
    +this License preserves for the author and publisher a way to get credit
    +for their work, while not being considered responsible for modifications
    +made by others.
    +
    +This License is a kind of "copyleft", which means that derivative works
    +of the document must themselves be free in the same sense. It complements
    +the GNU General Public License, which is a copyleft license designed
    +for free software.
    +
    +We have designed this License in order to use it for manuals for free
    +software, because free software needs free documentation: a free program
    +should come with manuals providing the same freedoms that the software
    +does. But this License is not limited to software manuals; it can be
    +used for any textual work, regardless of subject matter or whether it
    +is published as a printed book. We recommend this License principally
    +for works whose purpose is instruction or reference.
    +
    +1. APPLICABILITY AND DEFINITIONS
    +
    +This License applies to any manual or other work, in any medium,
    +that contains a notice placed by the copyright holder saying it can
    +be distributed under the terms of this License. Such a notice grants
    +a world-wide, royalty-free license, unlimited in duration, to use that
    +work under the conditions stated herein. The "Document", below, refers
    +to any such manual or work. Any member of the public is a licensee,
    +and is addressed as "you". You accept the license if you copy, modify
    +or distribute the work in a way requiring permission under copyright law.
    +
    +A "Modified Version" of the Document means any work containing the
    +Document or a portion of it, either copied verbatim, or with modifications
    +and/or translated into another language.
    +
    +A "Secondary Section" is a named appendix or a front-matter section
    +of the Document that deals exclusively with the relationship of the
    +publishers or authors of the Document to the Document's overall subject
    +(or to related matters) and contains nothing that could fall directly
    +within that overall subject. (Thus, if the Document is in part a textbook
    +of mathematics, a Secondary Section may not explain any mathematics.) The
    +relationship could be a matter of historical connection with the subject
    +or with related matters, or of legal, commercial, philosophical, ethical
    +or political position regarding them.
    +
    +The "Invariant Sections" are certain Secondary Sections whose titles are
    +designated, as being those of Invariant Sections, in the notice that
    +says that the Document is released under this License. If a section
    +does not fit the above definition of Secondary then it is not allowed
    +to be designated as Invariant. The Document may contain zero Invariant
    +Sections. If the Document does not identify any Invariant Sections then
    +there are none.
    +
    +The "Cover Texts" are certain short passages of text that are listed,
    +as Front-Cover Texts or Back-Cover Texts, in the notice that says that
    +the Document is released under this License. A Front-Cover Text may be
    +at most 5 words, and a Back-Cover Text may be at most 25 words.
    +
    +A "Transparent" copy of the Document means a machine-readable copy,
    +represented in a format whose specification is available to the general
    +public, that is suitable for revising the document straightforwardly with
    +generic text editors or (for images composed of pixels) generic paint
    +programs or (for drawings) some widely available drawing editor, and that
    +is suitable for input to text formatters or for automatic translation to a
    +variety of formats suitable for input to text formatters. A copy made in
    +an otherwise Transparent file format whose markup, or absence of markup,
    +has been arranged to thwart or discourage subsequent modification by
    +readers is not Transparent. An image format is not Transparent if used
    +for any substantial amount of text. A copy that is not "Transparent"
    +is called "Opaque".
    +
    +Examples of suitable formats for Transparent copies include plain ASCII
    +without markup, Texinfo input format, LaTeX input format, SGML or XML
    +using a publicly available DTD, and standard-conforming simple HTML,
    +PostScript or PDF designed for human modification. Examples of transparent
    +image formats include PNG, XCF and JPG. Opaque formats include proprietary
    +formats that can be read and edited only by proprietary word processors,
    +SGML or XML for which the DTD and/or processing tools are not generally
    +available, and the machine-generated HTML, PostScript or PDF produced
    +by some word processors for output purposes only.
    +
    +The "Title Page" means, for a printed book, the title page itself,
    +plus such following pages as are needed to hold, legibly, the material
    +this License requires to appear in the title page. For works in formats
    +which do not have any title page as such, "Title Page" means the text
    +near the most prominent appearance of the work's title, preceding the
    +beginning of the body of the text.
    +
    +The "publisher" means any person or entity that distributes copies of
    +the Document to the public.
    +
    +A section "Entitled XYZ" means a named subunit of the Document whose
    +title either is precisely XYZ or contains XYZ in parentheses following
    +text that translates XYZ in another language. (Here XYZ stands for
    +a specific section name mentioned below, such as "Acknowledgements",
    +"Dedications", "Endorsements", or "History".) To "Preserve the Title"
    +of such a section when you modify the Document means that it remains a
    +section "Entitled XYZ" according to this definition.
    +
    +The Document may include Warranty Disclaimers next to the notice
    +which states that this License applies to the Document. These Warranty
    +Disclaimers are considered to be included by reference in this License,
    +but only as regards disclaiming warranties: any other implication that
    +these Warranty Disclaimers may have is void and has no effect on the
    +meaning of this License.
    +
    +2. VERBATIM COPYING
    +
    +You may copy and distribute the Document in any medium, either
    +commercially or noncommercially, provided that this License, the
    +copyright notices, and the license notice saying this License applies
    +to the Document are reproduced in all copies, and that you add no other
    +conditions whatsoever to those of this License. You may not use technical
    +measures to obstruct or control the reading or further copying of the
    +copies you make or distribute. However, you may accept compensation in
    +exchange for copies. If you distribute a large enough number of copies
    +you must also follow the conditions in section 3.
    +
    +You may also lend copies, under the same conditions stated above, and
    +you may publicly display copies.
    +
    +3. COPYING IN QUANTITY
    +
    +If you publish printed copies (or copies in media that commonly have
    +printed covers) of the Document, numbering more than 100, and the
    +Document's license notice requires Cover Texts, you must enclose the
    +copies in covers that carry, clearly and legibly, all these Cover Texts:
    +Front-Cover Texts on the front cover, and Back-Cover Texts on the back
    +cover. Both covers must also clearly and legibly identify you as the
    +publisher of these copies. The front cover must present the full title
    +with all words of the title equally prominent and visible. You may add
    +other material on the covers in addition. Copying with changes limited to
    +the covers, as long as they preserve the title of the Document and satisfy
    +these conditions, can be treated as verbatim copying in other respects.
    +
    +If the required texts for either cover are too voluminous to fit legibly,
    +you should put the first ones listed (as many as fit reasonably) on the
    +actual cover, and continue the rest onto adjacent pages.
    +
    +If you publish or distribute Opaque copies of the Document numbering
    +more than 100, you must either include a machine-readable Transparent
    +copy along with each Opaque copy, or state in or with each Opaque copy
    +a computer-network location from which the general network-using public
    +has access to download using public-standard network protocols a complete
    +Transparent copy of the Document, free of added material. If you use the
    +latter option, you must take reasonably prudent steps, when you begin
    +distribution of Opaque copies in quantity, to ensure that this Transparent
    +copy will remain thus accessible at the stated location until at least
    +one year after the last time you distribute an Opaque copy (directly or
    +through your agents or retailers) of that edition to the public.
    +
    +It is requested, but not required, that you contact the authors of the
    +Document well before redistributing any large number of copies, to give
    +them a chance to provide you with an updated version of the Document.
    +
    +4. MODIFICATIONS
    +
    +You may copy and distribute a Modified Version of the Document under
    +the conditions of sections 2 and 3 above, provided that you release
    +the Modified Version under precisely this License, with the Modified
    +Version filling the role of the Document, thus licensing distribution
    +and modification of the Modified Version to whoever possesses a copy of
    +it. In addition, you must do these things in the Modified Version:
    +
    +    * A. Use in the Title Page (and on the covers, if any) a title
    +    distinct from that of the Document, and from those of previous
    +    versions (which should, if there were any, be listed in the History
    +    section of the Document). You may use the same title as a previous
    +    version if the original publisher of that version gives permission.
    +
    +    * B. List on the Title Page, as authors, one or more persons or
    +    entities responsible for authorship of the modifications in the
    +    Modified Version, together with at least five of the principal authors
    +    of the Document (all of its principal authors, if it has fewer than
    +    five), unless they release you from this requirement.
    +
    +    * C. State on the Title page the name of the publisher of the Modified
    +    Version, as the publisher.
    +
    +    * D. Preserve all the copyright notices of the Document.
    +
    +    * E. Add an appropriate copyright notice for your modifications
    +    adjacent to the other copyright notices.
    +
    +    * F. Include, immediately after the copyright notices, a license
    +    notice giving the public permission to use the Modified Version under
    +    the terms of this License, in the form shown in the Addendum below.
    +
    +    * G. Preserve in that license notice the full lists of Invariant
    +    Sections and required Cover Texts given in the Document's license
    +    notice.
    +
    +    * H. Include an unaltered copy of this License.
    +
    +    * I. Preserve the section Entitled "History", Preserve its Title,
    +    and add to it an item stating at least the title, year, new authors,
    +    and publisher of the Modified Version as given on the Title Page. If
    +    there is no section Entitled "History" in the Document, create one
    +    stating the title, year, authors, and publisher of the Document as
    +    given on its Title Page, then add an item describing the Modified
    +    Version as stated in the previous sentence.
    +
    +    * J. Preserve the network location, if any, given in the Document for
    +    public access to a Transparent copy of the Document, and likewise
    +    the network locations given in the Document for previous versions
    +    it was based on. These may be placed in the "History" section. You
    +    may omit a network location for a work that was published at least
    +    four years before the Document itself, or if the original publisher
    +    of the version it refers to gives permission.
    +
    +    * K. For any section Entitled "Acknowledgements" or "Dedications",
    +    Preserve the Title of the section, and preserve in the section all
    +    the substance and tone of each of the contributor acknowledgements
    +    and/or dedications given therein.
    +
    +    * L. Preserve all the Invariant Sections of the Document, unaltered
    +    in their text and in their titles. Section numbers or the equivalent
    +    are not considered part of the section titles.
    +
    +    * M. Delete any section Entitled "Endorsements". Such a section may
    +    not be included in the Modified Version.
    +
    +    * N. Do not retitle any existing section to be Entitled "Endorsements"
    +    or to conflict in title with any Invariant Section.
    +
    +    * O. Preserve any Warranty Disclaimers.
    +
    +If the Modified Version includes new front-matter sections or appendices
    +that qualify as Secondary Sections and contain no material copied
    +from the Document, you may at your option designate some or all of
    +these sections as invariant. To do this, add their titles to the list
    +of Invariant Sections in the Modified Version's license notice. These
    +titles must be distinct from any other section titles.
    +
    +You may add a section Entitled "Endorsements", provided it contains
    +nothing but endorsements of your Modified Version by various parties, for
    +example, statements of peer review or that the text has been approved
    +by an organization as the authoritative definition of a standard.
    +
    +You may add a passage of up to five words as a Front-Cover Text, and a
    +passage of up to 25 words as a Back-Cover Text, to the end of the list
    +of Cover Texts in the Modified Version. Only one passage of Front-Cover
    +Text and one of Back-Cover Text may be added by (or through arrangements
    +made by) any one entity. If the Document already includes a cover text
    +for the same cover, previously added by you or by arrangement made by
    +the same entity you are acting on behalf of, you may not add another;
    +but you may replace the old one, on explicit permission from the previous
    +publisher that added the old one.
    +
    +The author(s) and publisher(s) of the Document do not by this License
    +give permission to use their names for publicity for or to assert or
    +imply endorsement of any Modified Version.
    +
    +5. COMBINING DOCUMENTS
    +
    +You may combine the Document with other documents released under this
    +License, under the terms defined in section 4 above for modified versions,
    +provided that you include in the combination all of the Invariant
    +Sections of all of the original documents, unmodified, and list them
    +all as Invariant Sections of your combined work in its license notice,
    +and that you preserve all their Warranty Disclaimers.
    +
    +The combined work need only contain one copy of this License, and multiple
    +identical Invariant Sections may be replaced with a single copy. If there
    +are multiple Invariant Sections with the same name but different contents,
    +make the title of each such section unique by adding at the end of it,
    +in parentheses, the name of the original author or publisher of that
    +section if known, or else a unique number. Make the same adjustment
    +to the section titles in the list of Invariant Sections in the license
    +notice of the combined work.
    +
    +In the combination, you must combine any sections Entitled "History" in
    +the various original documents, forming one section Entitled "History";
    +likewise combine any sections Entitled "Acknowledgements", and any
    +sections Entitled "Dedications". You must delete all sections Entitled
    +"Endorsements".
    +
    +6. COLLECTIONS OF DOCUMENTS
    +
    +You may make a collection consisting of the Document and other documents
    +released under this License, and replace the individual copies of this
    +License in the various documents with a single copy that is included in
    +the collection, provided that you follow the rules of this License for
    +verbatim copying of each of the documents in all other respects.
    +
    +You may extract a single document from such a collection, and distribute
    +it individually under this License, provided you insert a copy of this
    +License into the extracted document, and follow this License in all
    +other respects regarding verbatim copying of that document.
    +
    +7. AGGREGATION WITH INDEPENDENT WORKS
    +
    +A compilation of the Document or its derivatives with other separate
    +and independent documents or works, in or on a volume of a storage
    +or distribution medium, is called an "aggregate" if the copyright
    +resulting from the compilation is not used to limit the legal rights of
    +the compilation's users beyond what the individual works permit. When
    +the Document is included in an aggregate, this License does not apply
    +to the other works in the aggregate which are not themselves derivative
    +works of the Document.
    +
    +If the Cover Text requirement of section 3 is applicable to these copies
    +of the Document, then if the Document is less than one half of the
    +entire aggregate, the Document's Cover Texts may be placed on covers that
    +bracket the Document within the aggregate, or the electronic equivalent
    +of covers if the Document is in electronic form. Otherwise they must
    +appear on printed covers that bracket the whole aggregate.
    +
    +8. TRANSLATION
    +
    +Translation is considered a kind of modification, so you may distribute
    +translations of the Document under the terms of section 4. Replacing
    +Invariant Sections with translations requires special permission from
    +their copyright holders, but you may include translations of some or all
    +Invariant Sections in addition to the original versions of these Invariant
    +Sections. You may include a translation of this License, and all the
    +license notices in the Document, and any Warranty Disclaimers, provided
    +that you also include the original English version of this License and
    +the original versions of those notices and disclaimers. In case of a
    +disagreement between the translation and the original version of this
    +License or a notice or disclaimer, the original version will prevail.
    +
    +If a section in the Document is Entitled "Acknowledgements",
    +"Dedications", or "History", the requirement (section 4) to Preserve
    +its Title (section 1) will typically require changing the actual title.
    +
    +9. TERMINATION
    +
    +You may not copy, modify, sublicense, or distribute the Document except
    +as expressly provided under this License. Any attempt otherwise to copy,
    +modify, sublicense, or distribute it is void, and will automatically
    +terminate your rights under this License.
    +
    +However, if you cease all violation of this License, then your license
    +from a particular copyright holder is reinstated (a) provisionally,
    +unless and until the copyright holder explicitly and finally terminates
    +your license, and (b) permanently, if the copyright holder fails to
    +notify you of the violation by some reasonable means prior to 60 days
    +after the cessation.
    +
    +Moreover, your license from a particular copyright holder is reinstated
    +permanently if the copyright holder notifies you of the violation by
    +some reasonable means, this is the first time you have received notice of
    +violation of this License (for any work) from that copyright holder, and
    +you cure the violation prior to 30 days after your receipt of the notice.
    +
    +Termination of your rights under this section does not terminate the
    +licenses of parties who have received copies or rights from you under
    +this License. If your rights have been terminated and not permanently
    +reinstated, receipt of a copy of some or all of the same material does
    +not give you any rights to use it.
    +
    +10. FUTURE REVISIONS OF THIS LICENSE
    +
    +The Free Software Foundation may publish new, revised versions of the
    +GNU Free Documentation License from time to time. Such new versions will
    +be similar in spirit to the present version, but may differ in detail
    +to address new problems or concerns. See http://www.gnu.org/copyleft/.
    +
    +Each version of the License is given a distinguishing version number. If
    +the Document specifies that a particular numbered version of this License
    +"or any later version" applies to it, you have the option of following
    +the terms and conditions either of that specified version or of any later
    +version that has been published (not as a draft) by the Free Software
    +Foundation. If the Document does not specify a version number of this
    +License, you may choose any version ever published (not as a draft) by
    +the Free Software Foundation. If the Document specifies that a proxy can
    +decide which future versions of this License can be used, that proxy's
    +public statement of acceptance of a version permanently authorizes you
    +to choose that version for the Document.
    +
    +11. RELICENSING
    +
    +"Massive Multiauthor Collaboration Site" (or "MMC Site") means any
    +World Wide Web server that publishes copyrightable works and also
    +provides prominent facilities for anybody to edit those works. A public
    +wiki that anybody can edit is an example of such a server. A "Massive
    +Multiauthor Collaboration" (or "MMC") contained in the site means any
    +set of copyrightable works thus published on the MMC site.
    +
    +"CC-BY-SA" means the Creative Commons Attribution-Share Alike 3.0 license
    +published by Creative Commons Corporation, a not-for-profit corporation
    +with a principal place of business in San Francisco, California, as
    +well as future copyleft versions of that license published by that same
    +organization.
    +
    +"Incorporate" means to publish or republish a Document, in whole or in
    +part, as part of another Document.
    +
    +An MMC is "eligible for relicensing" if it is licensed under this License,
    +and if all works that were first published under this License somewhere
    +other than this MMC, and subsequently incorporated in whole or in part
    +into the MMC, (1) had no cover texts or invariant sections, and (2)
    +were thus incorporated prior to November 1, 2008.
    +
    +The operator of an MMC Site may republish an MMC contained in the site
    +under CC-BY-SA on the same site at any time before August 1, 2009,
    +provided the MMC is eligible for relicensing.
    +
    +ADDENDUM: How to use this License for your documents
    +
    +To use this License in a document you have written, include a copy of
    +the License in the document and put the following copyright and license
    +notices just after the title page:
    +
    +    Copyright (C)  YEAR  YOUR NAME.
    +    Permission is granted to copy, distribute and/or modify this document
    +    under the terms of the GNU Free Documentation License, Version 1.3
    +    or any later version published by the Free Software Foundation;
    +    with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
    +    A copy of the license is included in the section entitled "GNU
    +    Free Documentation License".
    +
    +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts,
    +replace the "with ... Texts." line with this:
    +
    +    with the Invariant Sections being LIST THEIR TITLES, with the
    +    Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
    +
    +If you have Invariant Sections without Cover Texts, or some other
    +combination of the three, merge those two alternatives to suit the
    +situation.
    +
    +If your document contains nontrivial examples of program code, we
    +recommend releasing these examples in parallel under your choice of
    +free software license, such as the GNU General Public License, to permit
    +their use in free software.
    +
    +
    +
    +--------------- SECTION 12: Artistic License, V1.0 -----------
    +
    +
    +The Artistic License
    +
    +Preamble
    +
    +The intent of this document is to state the conditions under which a Package may be copied, such that the Copyright Holder maintains some semblance of artistic control over the development of the package, while giving the users of the package the right to use and distribute the Package in a more-or-less customary fashion, plus the right to make reasonable modifications.
    +
    +Definitions:
    +
    +"Package" refers to the collection of files distributed by the Copyright Holder, and derivatives of that collection of files created through textual modification.
    +
    +"Standard Version" refers to such a Package if it has not been modified, or has been modified in accordance with the wishes of the Copyright Holder.
    +
    +"Copyright Holder" is whoever is named in the copyright or copyrights for the package.
    +
    +"You" is you, if you're thinking about copying or distributing this Package.
    +
    +"Reasonable copying fee" is whatever you can justify on the basis of media cost, duplication charges, time of people involved, and so on. (You will not be required to justify it to the Copyright Holder, but only to the computing community at large as a market that must bear the fee.)
    +
    +"Freely Available" means that no fee is charged for the item itself, though there may be fees involved in handling the item. It also means that recipients of the item may redistribute it under the same conditions they received it.
    +
    +1. You may make and give away verbatim copies of the source form of the Standard Version of this Package without restriction, provided that you duplicate all of the original copyright notices and associated disclaimers.
    +
    +2. You may apply bug fixes, portability fixes and other modifications derived from the Public Domain or from the Copyright Holder. A Package modified in such a way shall still be considered the Standard Version.
    +
    +3. You may otherwise modify your copy of this Package in any way, provided that you insert a prominent notice in each changed file stating how and when you changed that file, and provided that you do at least ONE of the following:
    +
    +a) place your modifications in the Public Domain or otherwise make them Freely Available, such as by posting said modifications to Usenet or an equivalent medium, or placing the modifications on a major archive site such as ftp.uu.net, or by allowing the Copyright Holder to include your modifications in the Standard Version of the Package.
    +
    +b) use the modified Package only within your corporation or organization.
    +
    +c) rename any non-standard executables so the names do not conflict with standard executables, which must also be provided, and provide a separate manual page for each non-standard executable that clearly documents how it differs from the Standard Version.
    +
    +d) make other distribution arrangements with the Copyright Holder.
    +
    +4. You may distribute the programs of this Package in object code or executable form, provided that you do at least ONE of the following:
    +
    +a) distribute a Standard Version of the executables and library files, together with instructions (in the manual page or equivalent) on where to get the Standard Version.
    +
    +b) accompany the distribution with the machine-readable source of the Package with your modifications.
    +
    +c) accompany any non-standard executables with their corresponding Standard Version executables, giving the non-standard executables non-standard names, and clearly documenting the differences in manual pages (or equivalent), together with instructions on where to get the Standard Version.
    +
    +d) make other distribution arrangements with the Copyright Holder.
    +
    +5. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own.
    +
    +6. The scripts and library files supplied as input to or produced as output from the programs of this Package do not automatically fall under the copyright of this Package, but belong to whomever generated them, and may be sold commercially, and may be aggregated with this Package.
    +
    +7. C or perl subroutines supplied by you and linked into this Package shall not be considered part of this Package.
    +
    +8. The name of the Copyright Holder may not be used to endorse or promote products derived from this software without specific prior written permission.
    +
    +9. THIS PACKAGE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
    +
    +The End
    +
    +
    +
    +--------------- SECTION 13: Artistic License, V2.0 -----------
    +
    +
    +Artistic License 2.0
    +
    +Copyright (c) 2000-2006, The Perl Foundation.
    +
    +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
    +
    +Preamble
    +This license establishes the terms under which a given free software Package may be copied, modified, distributed, and/or redistributed. The intent is that the Copyright Holder maintains some artistic control over the development of that Package while still keeping the Package available as open source and free software.
    +
    +You are always permitted to make arrangements wholly outside of this license directly with the Copyright Holder of a given Package. If the terms of this license do not permit the full use that you propose to make of the Package, you should contact the Copyright Holder and seek a different licensing arrangement.
    +
    +Definitions
    +
    +"Copyright Holder" means the individual(s) or organization(s) named in the copyright notice for the entire Package.
    +
    +"Contributor" means any party that has contributed code or other material to the Package, in accordance with the Copyright Holder's procedures.
    +
    +"You" and "your" means any person who would like to copy, distribute, or modify the Package.
    +
    +"Package" means the collection of files distributed by the Copyright Holder, and derivatives of that collection and/or of those files. A given Package may consist of either the Standard Version, or a Modified Version.
    +
    +"Distribute" means providing a copy of the Package or making it accessible to anyone else, or in the case of a company or organization, to others outside of your company or organization.
    +
    +"Distributor Fee" means any fee that you charge for Distributing this Package or providing support for this Package to another party. It does not mean licensing fees.
    +
    +"Standard Version" refers to the Package if it has not been modified, or has been modified only in ways explicitly requested by the Copyright Holder.
    +
    +"Modified Version" means the Package, if it has been changed, and such changes were not explicitly requested by the Copyright Holder.
    +
    +"Original License" means this Artistic License as Distributed with the Standard Version of the Package, in its current version or as it may be modified by The Perl Foundation in the future.
    +
    +"Source" form means the source code, documentation source, and configuration files for the Package.
    +
    +"Compiled" form means the compiled bytecode, object code, binary, or any other form resulting from mechanical transformation or translation of the Source form.
    +Permission for Use and Modification Without Distribution
    +
    +(1) You are permitted to use the Standard Version and create and use Modified Versions for any purpose without restriction, provided that you do not Distribute the Modified Version.
    +
    +Permissions for Redistribution of the Standard Version
    +
    +(2) You may Distribute verbatim copies of the Source form of the Standard Version of this Package in any medium without restriction, either gratis or for a Distributor Fee, provided that you duplicate all of the original copyright notices and associated disclaimers. At your discretion, such verbatim copies may or may not include a Compiled form of the Package.
    +
    +(3) You may apply any bug fixes, portability changes, and other modifications made available from the Copyright Holder. The resulting Package will still be considered the Standard Version, and as such will be subject to the Original License.
    +
    +Distribution of Modified Versions of the Package as Source
    +
    +(4) You may Distribute your Modified Version as Source (either gratis or for a Distributor Fee, and with or without a Compiled form of the Modified Version) provided that you clearly document how it differs from the Standard Version, including, but not limited to, documenting any non-standard features, executables, or modules, and provided that you do at least ONE of 
    +the following:
    +
    +(a) make the Modified Version available to the Copyright Holder of the Standard Version, under the Original License, so that the Copyright Holder may include your modifications in the Standard Version. 
    +
    +(b) ensure that installation of your Modified Version does not prevent the user installing or running the Standard Version. In addition, the Modified Version must bear a name that is different from the name of the Standard Version. 
    +
    +(c) allow anyone who receives a copy of the Modified Version to make the Source form of the Modified Version available to others under 
    +
    +(i) the Original License or 
    +(ii) a license that permits the licensee to freely copy, modify and redistribute the Modified Version using the same licensing terms that apply to the copy that the licensee received, and requires that the Source form of the Modified Version, and of any works derived from it, be made freely available in that license fees are prohibited but Distributor Fees are allowed.
    +
    +Distribution of Compiled Forms of the Standard Version or Modified Versions 
    +without the Source
    +
    +(5) You may Distribute Compiled forms of the Standard Version without the Source, provided that you include complete instructions on how to get the Source of the Standard Version. Such instructions must be valid at the time of your distribution. If these instructions, at any time while you are carrying out such distribution, become invalid, you must provide new instructions on demand or cease further distribution. If you provide valid instructions or cease distribution within thirty days after you become aware that the instructions are invalid, then you do not forfeit any of your rights under this license.
    +
    +(6) You may Distribute a Modified Version in Compiled form without the Source, provided that you comply with Section 4 with respect to the Source of the Modified Version.
    +
    +Aggregating or Linking the Package
    +
    +(7) You may aggregate the Package (either the Standard Version or Modified Version) with other packages and Distribute the resulting aggregation provided that you do not charge a licensing fee for the Package. Distributor Fees are permitted, and licensing fees for other components in the aggregation are permitted. The terms of this license apply to the use and Distribution of the Standard or Modified Versions as included in the aggregation.
    +
    +(8) You are permitted to link Modified and Standard Versions with other works, to embed the Package in a larger work of your own, or to build stand-alone binary or bytecode versions of applications that include the Package, and Distribute the result without restriction, provided the result does not expose a direct interface to the Package.
    +
    +Items That are Not Considered Part of a Modified Version
    +
    +(9) Works (including, but not limited to, modules and scripts) that merely extend or make use of the Package, do not, by themselves, cause the Package to be a Modified Version. In addition, such works are not considered parts of the Package itself, and are not subject to the terms of this license.
    +
    +General Provisions
    +
    +(10) Any use, modification, and distribution of the Standard or Modified Versions is governed by this Artistic License. By using, modifying or distributing the Package, you accept this license. Do not use, modify, or distribute the Package, if you do not accept this license.
    +
    +(11) If your Modified Version has been derived from a Modified Version made by someone other than you, you are nevertheless required to ensure that your Modified Version complies with the requirements of this license.
    +
    +(12) This license does not grant you the right to use any trademark, service mark, tradename, or logo of the Copyright Holder.
    +
    +(13) This license includes the non-exclusive, worldwide, free-of-charge patent license to make, have made, use, offer to sell, sell, import and otherwise transfer the Package with respect to any patent claims licensable by the Copyright Holder that are necessarily infringed by the Package. 
    +
    +If you institute patent litigation (including a cross-claim or counterclaim) against any party alleging that the Package constitutes direct or contributory patent infringement, then this Artistic License to you shall terminate on the date that such litigation is filed.
    +
    +(14) Disclaimer of Warranty: THE PACKAGE IS PROVIDED BY THE COPYRIGHT HOLDER 
    +AND CONTRIBUTORS "AS IS' AND WITHOUT ANY EXPRESS OR IMPLIED 
    +WARRANTIES. THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 
    +PARTICULAR PURPOSE, OR NON-INFRINGEMENT ARE DISCLAIMED TO THE EXTENT 
    +PERMITTED BY YOUR LOCAL LAW. UNLESS REQUIRED BY LAW, NO COPYRIGHT 
    +HOLDER OR CONTRIBUTOR WILL BE LIABLE FOR ANY DIRECT, INDIRECT, 
    +INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING IN ANY WAY OUT OF THE 
    +USE OF THE PACKAGE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    +
    +
    +
    +--------------- SECTION 14: Microsoft Permissive Public License -----------
    +
    +
    +Microsoft Public License (Ms-PL)
    +
    +This license governs use of the accompanying software. If you use the
    +software, you accept this license. If you do not accept the license,
    +do not use the software.
    +
    +1. Definitions
    +
    +The terms "reproduce," "reproduction," "derivative works," and
    +"distribution" have the same meaning here as under U.S. copyright law.
    +
    +A "contribution" is the original software, or any additions or changes
    +to the software.
    +
    +A "contributor" is any person that distributes its contribution under
    +this license.
    +
    +"Licensed patents" are a contributor's patent claims that read directly
    +on its contribution.
    +
    +2. Grant of Rights
    +
    +(A) Copyright Grant- Subject to the terms of this license, including
    +the license conditions and limitations in section 3, each contributor
    +grants you a non-exclusive, worldwide, royalty-free copyright license to
    +reproduce its contribution, prepare derivative works of its contribution,
    +and distribute its contribution or any derivative works that you create.
    +
    +(B) Patent Grant- Subject to the terms of this license, including the
    +license conditions and limitations in section 3, each contributor grants
    +you a non-exclusive, worldwide, royalty-free license under its licensed
    +patents to make, have made, use, sell, offer for sale, import, and/or
    +otherwise dispose of its contribution in the software or derivative
    +works of the contribution in the software.
    +
    +3. Conditions and Limitations
    +
    +(A) No Trademark License- This license does not grant you rights to use
    +any contributors' name, logo, or trademarks.
    +
    +(B) If you bring a patent claim against any contributor over patents
    +that you claim are infringed by the software, your patent license from
    +such contributor to the software ends automatically.
    +
    +(C) If you distribute any portion of the software, you must retain all
    +copyright, patent, trademark, and attribution notices that are present
    +in the software.
    +
    +(D) If you distribute any portion of the software in source code form,
    +you may do so only under this license by including a complete copy of
    +this license with your distribution. If you distribute any portion of
    +the software in compiled or object code form, you may only do so under
    +a license that complies with this license.
    +
    +(E) The software is licensed "as-is." You bear the risk of using it. The
    +contributors give no express warranties, guarantees or conditions. You
    +may have additional consumer rights under your local laws which this
    +license cannot change. To the extent permitted under your local laws,
    +the contributors exclude the implied warranties of merchantability,
    +fitness for a particular purpose and non-infringement.
    +
    +
    +
    +--------------- SECTION 15: Microsoft Limited Public License -----------
    +
    +
    +Microsoft Limited Public License (Ms-LPL)
    +Published: October 12, 2006
    +
    +This license governs use of the accompanying software. If you use the
    +software, you accept this license. If you do not accept the license,
    +do not use the software.
    +
    +1. Definitions
    +
    +The terms "reproduce," "reproduction," "derivative works," and
    +"distribution" have the same meaning here as under U.S. copyright law.
    +
    +A "contribution" is the original software, or any additions or changes
    +to the software.
    +
    +A "contributor" is any person that distributes its contribution under
    +this license.
    +
    +"Licensed patents" are a contributor's patent claims that read directly
    +on its contribution.
    +
    +Top of pageTop of page
    +2. Grant of Rights
    +
    +(A) Copyright Grant- Subject to the terms of this license, including
    +the license conditions and limitations in section 3, each contributor
    +grants you a non-exclusive, worldwide, royalty-free copyright license to
    +reproduce its contribution, prepare derivative works of its contribution,
    +and distribute its contribution or any derivative works that you create.
    +
    +(B) Patent Grant- Subject to the terms of this license, including the
    +license conditions and limitations in section 3, each contributor grants
    +you a non-exclusive, worldwide, royalty-free license under its licensed
    +patents to make, have made, use, sell, offer for sale, import, and/or
    +otherwise dispose of its contribution in the software or derivative
    +works of the contribution in the software.  Top of pageTop of page
    +
    +3. Conditions and Limitations
    +
    +(A) No Trademark License- This license does not grant you rights to use
    +any contributors' name, logo, or trademarks.
    +
    +(B) If you bring a patent claim against any contributor over patents
    +that you claim are infringed by the software, your patent license from
    +such contributor to the software ends automatically.
    +
    +(C) If you distribute any portion of the software, you must retain all
    +copyright, patent, trademark, and attribution notices that are present
    +in the software.
    +
    +(D) If you distribute any portion of the software in source code form,
    +you may do so only under this license by including a complete copy of
    +this license with your distribution. If you distribute any portion of
    +the software in compiled or object code form, you may only do so under
    +a license that complies with this license.
    +
    +(E) The software is licensed "as-is." You bear the risk of using it. The
    +contributors give no express warranties, guarantees or conditions. You
    +may have additional consumer rights under your local laws which this
    +license cannot change. To the extent permitted under your local laws,
    +the contributors exclude the implied warranties of merchantability,
    +fitness for a particular purpose and non-infringement.
    +
    +(F) Platform Limitation- The licenses granted in sections 2(A) & 2(B)
    +extend only to the software or derivative works that you create that
    +run on a Microsoft Windows operating system product.
    +
    +
    +
    +--------------- SECTION 16: Creative Commons Attribution License, V3.0 -----------
    +
    +
    +Creative Commons Attribution 3.0 Unported
    +
    +CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL
    +SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT
    +RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS"
    +BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION
    +PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE.
    +
    +License
    +
    +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
    +COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
    +COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN
    +AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
    +
    +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
    +TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE
    +MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS
    +CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
    +CONDITIONS.
    +
    +1. Definitions
    +
    +   a. "Adaptation" means a work based upon the Work, or upon the Work
    +   and other pre-existing works, such as a translation, adaptation,
    +   derivative work, arrangement of music or other alterations of a
    +   literary or artistic work, or phonogram or performance and includes
    +   cinematographic adaptations or any other form in which the Work may
    +   be recast, transformed, or adapted including in any form recognizably
    +   derived from the original, except that a work that constitutes a
    +   Collection will not be considered an Adaptation for the purpose of
    +   this License. For the avoidance of doubt, where the Work is a musical
    +   work, performance or phonogram, the synchronization of the Work in
    +   timed-relation with a moving image ("synching") will be considered
    +   an Adaptation for the purpose of this License.
    +
    +   b. "Collection" means a collection of literary or artistic works,
    +   such as encyclopedias and anthologies, or performances, phonograms or
    +   broadcasts, or other works or subject matter other than works listed in
    +   Section 1(f) below, which, by reason of the selection and arrangement
    +   of their contents, constitute intellectual creations, in which the
    +   Work is included in its entirety in unmodified form along with one or
    +   more other contributions, each constituting separate and independent
    +   works in themselves, which together are assembled into a collective
    +   whole. A work that constitutes a Collection will not be considered
    +   an Adaptation (as defined above) for the purposes of this License.
    +
    +   c. "Distribute" means to make available to the public the original
    +   and copies of the Work or Adaptation, as appropriate, through sale
    +   or other transfer of ownership.
    +
    +   d. "Licensor" means the individual, individuals, entity or entities
    +   that offer(s) the Work under the terms of this License.
    +
    +   e. "Original Author" means, in the case of a literary or artistic
    +   work, the individual, individuals, entity or entities who created the
    +   Work or if no individual or entity can be identified, the publisher;
    +   and in addition (i) in the case of a performance the actors, singers,
    +   musicians, dancers, and other persons who act, sing, deliver, declaim,
    +   play in, interpret or otherwise perform literary or artistic works or
    +   expressions of folklore; (ii) in the case of a phonogram the producer
    +   being the person or legal entity who first fixes the sounds of a
    +   performance or other sounds; and, (iii) in the case of broadcasts,
    +   the organization that transmits the broadcast.
    +
    +   f. "Work" means the literary and/or artistic work offered under the
    +   terms of this License including without limitation any production
    +   in the literary, scientific and artistic domain, whatever may be
    +   the mode or form of its expression including digital form, such
    +   as a book, pamphlet and other writing; a lecture, address, sermon
    +   or other work of the same nature; a dramatic or dramatico-musical
    +   work; a choreographic work or entertainment in dumb show; a musical
    +   composition with or without words; a cinematographic work to which are
    +   assimilated works expressed by a process analogous to cinematography;
    +   a work of drawing, painting, architecture, sculpture, engraving
    +   or lithography; a photographic work to which are assimilated works
    +   expressed by a process analogous to photography; a work of applied art;
    +   an illustration, map, plan, sketch or three-dimensional work relative
    +   to geography, topography, architecture or science; a performance;
    +   a broadcast; a phonogram; a compilation of data to the extent it is
    +   protected as a copyrightable work; or a work performed by a variety
    +   or circus performer to the extent it is not otherwise considered a
    +   literary or artistic work.
    +
    +   g. "You" means an individual or entity exercising rights under this
    +   License who has not previously violated the terms of this License
    +   with respect to the Work, or who has received express permission
    +   from the Licensor to exercise rights under this License despite a
    +   previous violation.
    +
    +   h. "Publicly Perform" means to perform public recitations of the
    +   Work and to communicate to the public those public recitations, by
    +   any means or process, including by wire or wireless means or public
    +   digital performances; to make available to the public Works in such
    +   a way that members of the public may access these Works from a place
    +   and at a place individually chosen by them; to perform the Work to the
    +   public by any means or process and the communication to the public of
    +   the performances of the Work, including by public digital performance;
    +   to broadcast and rebroadcast the Work by any means including signs,
    +   sounds or images.
    +
    +   i. "Reproduce" means to make copies of the Work by any means including
    +   without limitation by sound or visual recordings and the right of
    +   fixation and reproducing fixations of the Work, including storage
    +   of a protected performance or phonogram in digital form or other
    +   electronic medium.
    +
    +2. Fair Dealing Rights. Nothing in this License is intended to reduce,
    +limit, or restrict any uses free from copyright or rights arising from
    +limitations or exceptions that are provided for in connection with the
    +copyright protection under copyright law or other applicable laws.
    +
    +3. License Grant. Subject to the terms and conditions of this License,
    +Licensor hereby grants You a worldwide, royalty-free, non-exclusive,
    +perpetual (for the duration of the applicable copyright) license to
    +exercise the rights in the Work as stated below:
    +
    +   a. to Reproduce the Work, to incorporate the Work into one or
    +   more Collections, and to Reproduce the Work as incorporated in the
    +   Collections;
    +
    +   b. to create and Reproduce Adaptations provided that any such
    +   Adaptation, including any translation in any medium, takes reasonable
    +   steps to clearly label, demarcate or otherwise identify that changes
    +   were made to the original Work. For example, a translation could be
    +   marked "The original work was translated from English to Spanish," or
    +   a modification could indicate "The original work has been modified.";
    +
    +   c. to Distribute and Publicly Perform the Work including as
    +   incorporated in Collections; and,
    +
    +   d. to Distribute and Publicly Perform Adaptations.
    +
    +   e. For the avoidance of doubt:
    +
    +      i. Non-waivable Compulsory License Schemes. In those jurisdictions
    +      in which the right to collect royalties through any statutory or
    +      compulsory licensing scheme cannot be waived, the Licensor reserves
    +      the exclusive right to collect such royalties for any exercise by
    +      You of the rights granted under this License;
    +
    +      ii. Waivable Compulsory License Schemes. In those jurisdictions
    +      in which the right to collect royalties through any statutory or
    +      compulsory licensing scheme can be waived, the Licensor waives the
    +      exclusive right to collect such royalties for any exercise by You
    +      of the rights granted under this License; and,
    +
    +      iii. Voluntary License Schemes. The Licensor waives the right
    +      to collect royalties, whether individually or, in the event that
    +      the Licensor is a member of a collecting society that administers
    +      voluntary licensing schemes, via that society, from any exercise
    +      by You of the rights granted under this License.  The above rights
    +      may be exercised in all media and formats whether now known or
    +      hereafter devised. The above rights include the right to make such
    +      modifications as are technically necessary to exercise the rights
    +      in other media and formats. Subject to Section 8(f), all rights
    +      not expressly granted by Licensor are hereby reserved.
    +
    +4. Restrictions. The license granted in Section 3 above is expressly
    +made subject to and limited by the following restrictions:
    +
    +   a. You may Distribute or Publicly Perform the Work only under the
    +   terms of this License.  You must include a copy of, or the Uniform
    +   Resource Identifier (URI) for, this License with every copy of the
    +   Work You Distribute or Publicly Perform. You may not offer or impose
    +   any terms on the Work that restrict the terms of this License or the
    +   ability of the recipient of the Work to exercise the rights granted to
    +   that recipient under the terms of the License. You may not sublicense
    +   the Work. You must keep intact all notices that refer to this License
    +   and to the disclaimer of warranties with every copy of the Work You
    +   Distribute or Publicly Perform. When You Distribute or Publicly Perform
    +   the Work, You may not impose any effective technological measures on
    +   the Work that restrict the ability of a recipient of the Work from
    +   You to exercise the rights granted to that recipient under the terms
    +   of the License. This Section 4(a) applies to the Work as incorporated
    +   in a Collection, but this does not require the Collection apart from
    +   the Work itself to be made subject to the terms of this License. If
    +   You create a Collection, upon notice from any Licensor You must,
    +   to the extent practicable, remove from the Collection any credit as
    +   required by Section 4(b), as requested. If You create an Adaptation,
    +   upon notice from any Licensor You must, to the extent practicable,
    +   remove from the Adaptation any credit as required by Section 4(b),
    +   as requested.
    +
    +   b. If You Distribute, or Publicly Perform the Work or any Adaptations
    +   or Collections, You must, unless a request has been made pursuant
    +   to Section 4(a), keep intact all copyright notices for the Work and
    +   provide, reasonable to the medium or means You are utilizing: (i) the
    +   name of the Original Author (or pseudonym, if applicable) if supplied,
    +   and/or if the Original Author and/or Licensor designate another party
    +   or parties (e.g., a sponsor institute, publishing entity, journal)
    +   for attribution ("Attribution Parties") in Licensor's copyright
    +   notice, terms of service or by other reasonable means, the name of
    +   such party or parties; (ii) the title of the Work if supplied; (iii)
    +   to the extent reasonably practicable, the URI, if any, that Licensor
    +   specifies to be associated with the Work, unless such URI does not
    +   refer to the copyright notice or licensing information for the Work;
    +   and (iv) , consistent with Section 3(b), in the case of an Adaptation,
    +   a credit identifying the use of the Work in the Adaptation (e.g.,
    +   "French translation of the Work by Original Author," or "Screenplay
    +   based on original Work by Original Author"). The credit required
    +   by this Section 4 (b) may be implemented in any reasonable manner;
    +   provided, however, that in the case of a Adaptation or Collection, at
    +   a minimum such credit will appear, if a credit for all contributing
    +   authors of the Adaptation or Collection appears, then as part of
    +   these credits and in a manner at least as prominent as the credits
    +   for the other contributing authors. For the avoidance of doubt, You
    +   may only use the credit required by this Section for the purpose of
    +   attribution in the manner set out above and, by exercising Your rights
    +   under this License, You may not implicitly or explicitly assert or
    +   imply any connection with, sponsorship or endorsement by the Original
    +   Author, Licensor and/or Attribution Parties, as appropriate, of You
    +   or Your use of the Work, without the separate, express prior written
    +   permission of the Original Author, Licensor and/or Attribution Parties.
    +
    +   c. Except as otherwise agreed in writing by the Licensor or as may be
    +   otherwise permitted by applicable law, if You Reproduce, Distribute
    +   or Publicly Perform the Work either by itself or as part of any
    +   Adaptations or Collections, You must not distort, mutilate, modify
    +   or take other derogatory action in relation to the Work which would
    +   be prejudicial to the Original Author's honor or reputation. Licensor
    +   agrees that in those jurisdictions (e.g. Japan), in which any exercise
    +   of the right granted in Section 3(b) of this License (the right to
    +   make Adaptations) would be deemed to be a distortion, mutilation,
    +   modification or other derogatory action prejudicial to the Original
    +   Author's honor and reputation, the Licensor will waive or not assert,
    +   as appropriate, this Section, to the fullest extent permitted by the
    +   applicable national law, to enable You to reasonably exercise Your
    +   right under Section 3(b) of this License (right to make Adaptations)
    +   but not otherwise.
    +
    +5. Representations, Warranties and Disclaimer
    +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR
    +OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
    +KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE,
    +INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
    +FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF
    +LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS,
    +WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION
    +OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
    +
    +6. Limitation on Liability.
    +EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR
    +BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL,
    +CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS
    +LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF
    +THE POSSIBILITY OF SUCH DAMAGES.
    +
    +7. Termination
    +
    +   a. This License and the rights granted hereunder will terminate
    +   automatically upon any breach by You of the terms of this
    +   License. Individuals or entities who have received Adaptations or
    +   Collections from You under this License, however, will not have their
    +   licenses terminated provided such individuals or entities remain in
    +   full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8
    +   will survive any termination of this License.
    +
    +   b. Subject to the above terms and conditions, the license granted
    +   here is perpetual (for the duration of the applicable copyright in
    +   the Work). Notwithstanding the above, Licensor reserves the right to
    +   release the Work under different license terms or to stop distributing
    +   the Work at any time; provided, however that any such election will not
    +   serve to withdraw this License (or any other license that has been,
    +   or is required to be, granted under the terms of this License), and
    +   this License will continue in full force and effect unless terminated
    +   as stated above.
    +
    +8. Miscellaneous
    +
    +   a. Each time You Distribute or Publicly Perform the Work or a
    +   Collection, the Licensor offers to the recipient a license to the
    +   Work on the same terms and conditions as the license granted to You
    +   under this License.
    +
    +   b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
    +   offers to the recipient a license to the original Work on the same
    +   terms and conditions as the license granted to You under this License.
    +
    +   c. If any provision of this License is invalid or unenforceable under
    +   applicable law, it shall not affect the validity or enforceability
    +   of the remainder of the terms of this License, and without further
    +   action by the parties to this agreement, such provision shall be
    +   reformed to the minimum extent necessary to make such provision valid
    +   and enforceable.
    +
    +   d. No term or provision of this License shall be deemed waived and no
    +   breach consented to unless such waiver or consent shall be in writing
    +   and signed by the party to be charged with such waiver or consent.
    +
    +   e. This License constitutes the entire agreement between the parties
    +   with respect to the Work licensed here. There are no understandings,
    +   agreements or representations with respect to the Work not specified
    +   here. Licensor shall not be bound by any additional provisions that
    +   may appear in any communication from You. This License may not be
    +   modified without the mutual written agreement of the Licensor and You.
    +
    +   f. The rights granted under, and the subject matter referenced, in this
    +   License were drafted utilizing the terminology of the Berne Convention
    +   for the Protection of Literary and Artistic Works (as amended on
    +   September 28, 1979), the Rome Convention of 1961, the WIPO Copyright
    +   Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and
    +   the Universal Copyright Convention (as revised on July 24, 1971). These
    +   rights and subject matter take effect in the relevant jurisdiction
    +   in which the License terms are sought to be enforced according to
    +   the corresponding provisions of the implementation of those treaty
    +   provisions in the applicable national law. If the standard suite of
    +   rights granted under applicable copyright law includes additional
    +   rights not granted under this License, such additional rights are
    +   deemed to be included in the License; this License is not intended
    +   to restrict the license of any rights under applicable law.
    +
    +
    +
    +--------------- SECTION 17: Creative Commons Attribution 4.0 International -----------
    +
    +
    +Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.
    +
    +Using Creative Commons Public Licenses
    +
    +Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses.
    +
    +Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. More considerations for licensors.
    +
    +Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public.
    +
    +Creative Commons Attribution 4.0 International Public License
    +
    +By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
    +
    +Section 1 – Definitions.
    +
    +Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
    +Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.
    +Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
    +Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
    +Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
    +Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
    +Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
    +Licensor means the individual(s) or entity(ies) granting rights under this Public License.
    +Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
    +Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
    +You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.
    +
    +Section 2 – Scope.
    +
    +License grant.
    +Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
    +reproduce and Share the Licensed Material, in whole or in part; and
    +produce, reproduce, and Share Adapted Material.
    +Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
    +Term. The term of this Public License is specified in Section 6(a).
    +Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
    +Downstream recipients.
    +Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
    +No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
    +No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
    +
    +Other rights.
    +Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
    +Patent and trademark rights are not licensed under this Public License.
    +To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties.
    +
    +Section 3 – License Conditions.
    +
    +Your exercise of the Licensed Rights is expressly made subject to the following conditions.
    +
    +Attribution.
    +
    +If You Share the Licensed Material (including in modified form), You must:
    +retain the following if it is supplied by the Licensor with the Licensed Material:
    +identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
    +a copyright notice;
    +a notice that refers to this Public License;
    +a notice that refers to the disclaimer of warranties;
    +a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
    +indicate if You modified the Licensed Material and retain an indication of any previous modifications; and
    +indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
    +You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
    +If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
    +If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License.
    +
    +Section 4 – Sui Generis Database Rights.
    +
    +Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
    +
    +for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database;
    +if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and
    +You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.
    +
    +For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
    +
    +Section 5 – Disclaimer of Warranties and Limitation of Liability.
    +
    +Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.
    +To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.
    +
    +The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
    +
    +Section 6 – Term and Termination.
    +
    +This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.
    +
    +Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
    +automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
    +upon express reinstatement by the Licensor.
    +For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
    +For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.
    +Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
    +
    +Section 7 – Other Terms and Conditions.
    +
    +The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
    +Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
    +
    +Section 8 – Interpretation.
    +
    +For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
    +To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
    +No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
    +Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
    +
    +Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.
    +
    +Creative Commons may be contacted at creativecommons.org.
    +
    +
    +
    +--------------- SECTION 18: Creative Commons Attribution-ShareAlike, V4.0 -----------
    +
    +
    +Creative Commons Attribution-ShareAlike 4.0 International Public License
    +
    +Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.
    +
    +Using Creative Commons Public Licenses
    +
    +Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses.
    +
    +    Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. More considerations for licensors.
    +
    +    Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public.
    +
    +Creative Commons Attribution-ShareAlike 4.0 International Public License
    +
    +By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
    +
    +Section 1 – Definitions.
    +
    +    Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
    +    Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.
    +    BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License.
    +    Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
    +    Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
    +    Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
    +    License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike.
    +    Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
    +    Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
    +    Licensor means the individual(s) or entity(ies) granting rights under this Public License.
    +    Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
    +    Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
    +    You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.
    +
    +Section 2 – Scope.
    +
    +    License grant.
    +        Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
    +            reproduce and Share the Licensed Material, in whole or in part; and
    +            produce, reproduce, and Share Adapted Material.
    +        Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
    +        Term. The term of this Public License is specified in Section 6(a).
    +        Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
    +        Downstream recipients.
    +            Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
    +            Additional offer from the Licensor – Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply.
    +            No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
    +        No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
    +
    +    Other rights.
    +        Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
    +        Patent and trademark rights are not licensed under this Public License.
    +        To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties.
    +
    +Section 3 – License Conditions.
    +
    +Your exercise of the Licensed Rights is expressly made subject to the following conditions.
    +
    +    Attribution.
    +
    +        If You Share the Licensed Material (including in modified form), You must:
    +            retain the following if it is supplied by the Licensor with the Licensed Material:
    +                identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
    +                a copyright notice;
    +                a notice that refers to this Public License;
    +                a notice that refers to the disclaimer of warranties;
    +                a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
    +            indicate if You modified the Licensed Material and retain an indication of any previous modifications; and
    +            indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
    +        You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
    +        If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
    +    ShareAlike.
    +
    +    In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply.
    +        The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License.
    +        You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material.
    +        You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply.
    +
    +Section 4 – Sui Generis Database Rights.
    +
    +Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
    +
    +    for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database;
    +    if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and
    +    You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.
    +
    +For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
    +
    +Section 5 – Disclaimer of Warranties and Limitation of Liability.
    +
    +    Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.
    +    To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.
    +
    +    The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
    +
    +Section 6 – Term and Termination.
    +
    +    This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.
    +
    +    Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
    +        automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
    +        upon express reinstatement by the Licensor.
    +    For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
    +    For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.
    +    Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
    +
    +Section 7 – Other Terms and Conditions.
    +
    +    The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
    +    Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
    +
    +Section 8 – Interpretation.
    +
    +    For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
    +    To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
    +    No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
    +    Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
    +
    +Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” The text of the Creative Commons public licenses is dedicated to the public domain under the CC0 Public Domain Dedication. Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.
    +
    +Creative Commons may be contacted at creativecommons.org.
    +
    +
    +
    +--------------- SECTION 19: Creative Commons Attribution-ShareAlike, V3.0 -----------
    +
    +
    +Creative Commons Attribution-ShareAlike 3.0 Unported
    +
    +CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL
    +SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT
    +RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS"
    +BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION
    +PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE.
    +
    +License
    +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
    +COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED
    +BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER
    +THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
    +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
    +TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE
    +MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS
    +CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
    +CONDITIONS.
    +
    +1. Definitions
    +
    +   a. "Adaptation" means a work based upon the Work, or upon the Work
    +   and other pre-existing works, such as a translation, adaptation,
    +   derivative work, arrangement of music or other alterations of a
    +   literary or artistic work, or phonogram or performance and includes
    +   cinematographic adaptations or any other form in which the Work may
    +   be recast, transformed, or adapted including in any form recognizably
    +   derived from the original, except that a work that constitutes a
    +   Collection will not be considered an Adaptation for the purpose of
    +   this License. For the avoidance of doubt, where the Work is a musical
    +   work, performance or phonogram, the synchronization of the Work in
    +   timed-relation with a moving image ("synching") will be considered
    +   an Adaptation for the purpose of this License.
    +
    +   b. "Collection" means a collection of literary or artistic works,
    +   such as encyclopedias and anthologies, or performances, phonograms or
    +   broadcasts, or other works or subject matter other than works listed in
    +   Section 1(f) below, which, by reason of the selection and arrangement
    +   of their contents, constitute intellectual creations, in which the
    +   Work is included in its entirety in unmodified form along with one or
    +   more other contributions, each constituting separate and independent
    +   works in themselves, which together are assembled into a collective
    +   whole. A work that constitutes a Collection will not be considered
    +   an Adaptation (as defined below) for the purposes of this License.
    +
    +   c. "Creative Commons Compatible License" means a license that is
    +   listed at http://creativecommons.org/compatiblelicenses that has
    +   been approved by Creative Commons as being essentially equivalent
    +   to this License, including, at a minimum, because that license: (i)
    +   contains terms that have the same purpose, meaning and effect as the
    +   License Elements of this License; and, (ii) explicitly permits the
    +   relicensing of adaptations of works made available under that license
    +   under this License or a Creative Commons jurisdiction license with
    +   the same License Elements as this License.
    +
    +   d. "Distribute" means to make available to the public the original
    +   and copies of the Work or Adaptation, as appropriate, through sale
    +   or other transfer of ownership.
    +
    +   e. "License Elements" means the following high-level license attributes
    +   as selected by Licensor and indicated in the title of this License:
    +   Attribution, ShareAlike.
    +
    +   f. "Licensor" means the individual, individuals, entity or entities
    +   that offer(s) the Work under the terms of this License.
    +
    +   g. "Original Author" means, in the case of a literary or artistic
    +   work, the individual, individuals, entity or entities who created the
    +   Work or if no individual or entity can be identified, the publisher;
    +   and in addition (i) in the case of a performance the actors, singers,
    +   musicians, dancers, and other persons who act, sing, deliver, declaim,
    +   play in, interpret or otherwise perform literary or artistic works or
    +   expressions of folklore; (ii) in the case of a phonogram the producer
    +   being the person or legal entity who first fixes the sounds of a
    +   performance or other sounds; and, (iii) in the case of broadcasts,
    +   the organization that transmits the broadcast.
    +
    +   h. "Work" means the literary and/or artistic work offered under the
    +   terms of this License including without limitation any production
    +   in the literary, scientific and artistic domain, whatever may be
    +   the mode or form of its expression including digital form, such
    +   as a book, pamphlet and other writing; a lecture, address, sermon
    +   or other work of the same nature; a dramatic or dramatico-musical
    +   work; a choreographic work or entertainment in dumb show; a musical
    +   composition with or without words; a cinematographic work to which are
    +   assimilated works expressed by a process analogous to cinematography;
    +   a work of drawing, painting, architecture, sculpture, engraving
    +   or lithography; a photographic work to which are assimilated works
    +   expressed by a process analogous to photography; a work of applied art;
    +   an illustration, map, plan, sketch or three-dimensional work relative
    +   to geography, topography, architecture or science; a performance;
    +   a broadcast; a phonogram; a compilation of data to the extent it is
    +   protected as a copyrightable work; or a work performed by a variety
    +   or circus performer to the extent it is not otherwise considered a
    +   literary or artistic work.
    +
    +   i. "You" means an individual or entity exercising rights under this
    +   License who has not previously violated the terms of this License
    +   with respect to the Work, or who has received express permission
    +   from the Licensor to exercise rights under this License despite a
    +   previous violation.
    +
    +   j. "Publicly Perform" means to perform public recitations of the
    +   Work and to communicate to the public those public recitations, by
    +   any means or process, including by wire or wireless means or public
    +   digital performances; to make available to the public Works in such
    +   a way that members of the public may access these Works from a place
    +   and at a place individually chosen by them; to perform the Work to the
    +   public by any means or process and the communication to the public of
    +   the performances of the Work, including by public digital performance;
    +   to broadcast and rebroadcast the Work by any means including signs,
    +   sounds or images.
    +
    +   k. "Reproduce" means to make copies of the Work by any means including
    +   without limitation by sound or visual recordings and the right of
    +   fixation and reproducing fixations of the Work, including storage
    +   of a protected performance or phonogram in digital form or other
    +   electronic medium.
    +
    +2. Fair Dealing Rights.
    +Nothing in this License is intended to reduce, limit, or restrict any
    +uses free from copyright or rights arising from limitations or exceptions
    +that are provided for in connection with the copyright protection under
    +copyright law or other applicable laws.
    +
    +3. License Grant.
    +Subject to the terms and conditions of this License, Licensor hereby
    +grants You a worldwide, royalty-free, non-exclusive, perpetual (for the
    +duration of the applicable copyright) license to exercise the rights in
    +the Work as stated below:
    +
    +   a. to Reproduce the Work, to incorporate the Work into one or
    +   more Collections, and to Reproduce the Work as incorporated in the
    +   Collections;
    +
    +   b. to create and Reproduce Adaptations provided that any such
    +   Adaptation, including any translation in any medium, takes reasonable
    +   steps to clearly label, demarcate or otherwise identify that changes
    +   were made to the original Work. For example, a translation could be
    +   marked "The original work was translated from English to Spanish," or
    +   a modification could indicate "The original work has been modified.";
    +
    +   c. to Distribute and Publicly Perform the Work including as
    +   incorporated in Collections; and,
    +
    +   d. to Distribute and Publicly Perform Adaptations.
    +
    +   e. For the avoidance of doubt:
    +
    +      i. Non-waivable Compulsory License Schemes. In those jurisdictions
    +      in which the right to collect royalties through any statutory or
    +      compulsory licensing scheme cannot be waived, the Licensor reserves
    +      the exclusive right to collect such royalties for any exercise by
    +      You of the rights granted under this License;
    +
    +      ii. Waivable Compulsory License Schemes. In those jurisdictions
    +      in which the right to collect royalties through any statutory or
    +      compulsory licensing scheme can be waived, the Licensor waives the
    +      exclusive right to collect such royalties for any exercise by You
    +      of the rights granted under this License; and,
    +
    +      iii. Voluntary License Schemes. The Licensor waives the right
    +      to collect royalties, whether individually or, in the event that
    +      the Licensor is a member of a collecting society that administers
    +      voluntary licensing schemes, via that society, from any exercise
    +      by You of the rights granted under this License.  The above rights
    +      may be exercised in all media and formats whether now known or
    +      hereafter devised. The above rights include the right to make such
    +      modifications as are technically necessary to exercise the rights
    +      in other media and formats. Subject to Section 8(f), all rights
    +      not expressly granted by Licensor are hereby reserved.
    +
    +4. Restrictions.
    +The license granted in Section 3 above is expressly made subject to and
    +limited by the following restrictions:
    +
    +   a. You may Distribute or Publicly Perform the Work only under the
    +   terms of this License.  You must include a copy of, or the Uniform
    +   Resource Identifier (URI) for, this License with every copy of the
    +   Work You Distribute or Publicly Perform. You may not offer or impose
    +   any terms on the Work that restrict the terms of this License or the
    +   ability of the recipient of the Work to exercise the rights granted to
    +   that recipient under the terms of the License. You may not sublicense
    +   the Work. You must keep intact all notices that refer to this License
    +   and to the disclaimer of warranties with every copy of the Work You
    +   Distribute or Publicly Perform. When You Distribute or Publicly Perform
    +   the Work, You may not impose any effective technological measures on
    +   the Work that restrict the ability of a recipient of the Work from
    +   You to exercise the rights granted to that recipient under the terms
    +   of the License. This Section 4(a) applies to the Work as incorporated
    +   in a Collection, but this does not require the Collection apart from
    +   the Work itself to be made subject to the terms of this License. If
    +   You create a Collection, upon notice from any Licensor You must,
    +   to the extent practicable, remove from the Collection any credit as
    +   required by Section 4(c), as requested. If You create an Adaptation,
    +   upon notice from any Licensor You must, to the extent practicable,
    +   remove from the Adaptation any credit as required by Section 4(c),
    +   as requested.
    +
    +   b. You may Distribute or Publicly Perform an Adaptation only under the
    +   terms of: (i) this License; (ii) a later version of this License with
    +   the same License Elements as this License; (iii) a Creative Commons
    +   jurisdiction license (either this or a later license version) that
    +   contains the same License Elements as this License (e.g., Attribution-
    +   ShareAlike 3.0 US)); (iv) a Creative Commons Compatible License. If
    +   you license the Adaptation under one of the licenses mentioned in
    +   (iv), you must comply with the terms of that license. If you license
    +   the Adaptation under the terms of any of the licenses mentioned in
    +   (i), (ii) or (iii) (the "Applicable License"), you must comply with
    +   the terms of the Applicable License generally and the following
    +   provisions: (I) You must include a copy of, or the URI for, the
    +   Applicable License with every copy of each Adaptation You Distribute
    +   or Publicly Perform; (II) You may not offer or impose any terms on
    +   the Adaptation that restrict the terms of the Applicable License or
    +   the ability of the recipient of the Adaptation to exercise the rights
    +   granted to that recipient under the terms of the Applicable License;
    +   (III) You must keep intact all notices that refer to the Applicable
    +   License and to the disclaimer of warranties with every copy of the
    +   Work as included in the Adaptation You Distribute or Publicly Perform;
    +   (IV) when You Distribute or Publicly Perform the Adaptation, You may
    +   not impose any effective technological measures on the Adaptation
    +   that restrict the ability of a recipient of the Adaptation from You
    +   to exercise the rights granted to that recipient under the terms of
    +   the Applicable License. This Section 4(b) applies to the Adaptation as
    +   incorporated in a Collection, but this does not require the Collection
    +   apart from the Adaptation itself to be made subject to the terms of
    +   the Applicable License.
    +
    +   c. If You Distribute, or Publicly Perform the Work or any Adaptations
    +   or Collections, You must, unless a request has been made pursuant
    +   to Section 4(a), keep intact all copyright notices for the Work and
    +   provide, reasonable to the medium or means You are utilizing: (i) the
    +   name of the Original Author (or pseudonym, if applicable) if supplied,
    +   and/or if the Original Author and/or Licensor designate another party
    +   or parties (e.g., a sponsor institute, publishing entity, journal)
    +   for attribution ("Attribution Parties") in Licensor's copyright
    +   notice, terms of service or by other reasonable means, the name of
    +   such party or parties; (ii) the title of the Work if supplied; (iii)
    +   to the extent reasonably practicable, the URI, if any, that Licensor
    +   specifies to be associated with the Work, unless such URI does not
    +   refer to the copyright notice or licensing information for the Work;
    +   and (iv) , consistent with Ssection 3(b), in the case of an Adaptation,
    +   a credit identifying the use of the Work in the Adaptation (e.g.,
    +   "French translation of the Work by Original Author," or "Screenplay
    +   based on original Work by Original Author"). The credit required
    +   by this Section 4(c) may be implemented in any reasonable manner;
    +   provided, however, that in the case of a Adaptation or Collection, at
    +   a minimum such credit will appear, if a credit for all contributing
    +   authors of the Adaptation or Collection appears, then as part of
    +   these credits and in a manner at least as prominent as the credits
    +   for the other contributing authors. For the avoidance of doubt, You
    +   may only use the credit required by this Section for the purpose of
    +   attribution in the manner set out above and, by exercising Your rights
    +   under this License, You may not implicitly or explicitly assert or
    +   imply any connection with, sponsorship or endorsement by the Original
    +   Author, Licensor and/or Attribution Parties, as appropriate, of You
    +   or Your use of the Work, without the separate, express prior written
    +   permission of the Original Author, Licensor and/or Attribution Parties.
    +
    +   d. Except as otherwise agreed in writing by the Licensor or as may be
    +   otherwise permitted by applicable law, if You Reproduce, Distribute
    +   or Publicly Perform the Work either by itself or as part of any
    +   Adaptations or Collections, You must not distort, mutilate, modify
    +   or take other derogatory action in relation to the Work which would
    +   be prejudicial to the Original Author's honor or reputation. Licensor
    +   agrees that in those jurisdictions (e.g. Japan), in which any exercise
    +   of the right granted in Section 3(b) of this License (the right to
    +   make Adaptations) would be deemed to be a distortion, mutilation,
    +   modification or other derogatory action prejudicial to the Original
    +   Author's honor and reputation, the Licensor will waive or not assert,
    +   as appropriate, this Section, to the fullest extent permitted by the
    +   applicable national law, to enable You to reasonably exercise Your
    +   right under Section 3(b) of this License (right to make Adaptations)
    +   but not otherwise.
    +
    +5. Representations, Warranties and Disclaimer
    +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR
    +OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
    +KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE,
    +INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
    +FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF
    +LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS,
    +WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION
    +OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
    +
    +6. Limitation on Liability.
    +EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR
    +BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL,
    +CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS
    +LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF
    +THE POSSIBILITY OF SUCH DAMAGES.
    +
    +7. Termination
    +
    +   a. This License and the rights granted hereunder will terminate
    +   automatically upon any breach by You of the terms of this
    +   License. Individuals or entities who have received Adaptations or
    +   Collections from You under this License, however, will not have their
    +   licenses terminated provided such individuals or entities remain in
    +   full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8
    +   will survive any termination of this License.
    +
    +   b. Subject to the above terms and conditions, the license granted
    +   here is perpetual (for the duration of the applicable copyright in
    +   the Work). Notwithstanding the above, Licensor reserves the right to
    +   release the Work under different license terms or to stop distributing
    +   the Work at any time; provided, however that any such election will not
    +   serve to withdraw this License (or any other license that has been,
    +   or is required to be, granted under the terms of this License), and
    +   this License will continue in full force and effect unless terminated
    +   as stated above.
    +
    +8. Miscellaneous
    +
    +   a. Each time You Distribute or Publicly Perform the Work or a
    +   Collection, the Licensor offers to the recipient a license to the
    +   Work on the same terms and conditions as the license granted to You
    +   under this License.
    +
    +   b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
    +   offers to the recipient a license to the original Work on the same
    +   terms and conditions as the license granted to You under this License.
    +
    +   c. If any provision of this License is invalid or unenforceable under
    +   applicable law, it shall not affect the validity or enforceability
    +   of the remainder of the terms of this License, and without further
    +   action by the parties to this agreement, such provision shall be
    +   reformed to the minimum extent necessary to make such provision valid
    +   and enforceable.
    +
    +   d. No term or provision of this License shall be deemed waived and no
    +   breach consented to unless such waiver or consent shall be in writing
    +   and signed by the party to be charged with such waiver or consent.
    +
    +   e. This License constitutes the entire agreement between the parties
    +   with respect to the Work licensed here. There are no understandings,
    +   agreements or representations with respect to the Work not specified
    +   here. Licensor shall not be bound by any additional provisions that
    +   may appear in any communication from You. This License may not be
    +   modified without the mutual written agreement of the Licensor and You.
    +
    +   f. The rights granted under, and the subject matter referenced, in this
    +   License were drafted utilizing the terminology of the Berne Convention
    +   for the Protection of Literary and Artistic Works (as amended on
    +   September 28, 1979), the Rome Convention of 1961, the WIPO Copyright
    +   Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and
    +   the Universal Copyright Convention (as revised on July 24, 1971). These
    +   rights and subject matter take effect in the relevant jurisdiction
    +   in which the License terms are sought to be enforced according to
    +   the corresponding provisions of the implementation of those treaty
    +   provisions in the applicable national law. If the standard suite of
    +   rights granted under applicable copyright law includes additional
    +   rights not granted under this License, such additional rights are
    +   deemed to be included in the License; this License is not intended
    +   to restrict the license of any rights under applicable law.
    +
    +
    +
    +--------------- SECTION 20: Academic Free License, V2.1 -----------
    +
    +The Academic Free License v. 2.1
    +
    +This Academic Free License (the "License") applies to any original work of 
    +authorship (the "Original Work") whose owner (the "Licensor") has placed the 
    +following notice immediately following the copyright notice for the Original 
    +Work:
    +
    +Licensed under the Academic Free License version 2.1
    +
    +1) Grant of Copyright License.
    +Licensor hereby grants You a world-wide, royalty-free, non-exclusive,
    +perpetual, sublicenseable license to do the following:
    +
    +  a) to reproduce the Original Work in copies;
    +
    +  b) to prepare derivative works ("Derivative Works") based upon the
    +     Original Work;
    +
    +  c) to distribute copies of the Original Work and Derivative Works to
    +     the public;
    +
    +  d) to perform the Original Work publicly; and
    +
    +  e) to display the Original Work publicly.
    +
    +2) Grant of Patent License.
    +Licensor hereby grants You a world-wide, royalty- free, non-exclusive,
    +perpetual, sublicenseable license, under patent claims owned or controlled
    +by the Licensor that are embodied in the Original Work as furnished by
    +the Licensor, to make, use, sell and offer for sale the Original Work
    +and Derivative Works.
    +
    +3) Grant of Source Code License.
    +The term "Source Code" means the preferred form of the Original Work for
    +making modifications to it and all available documentation describing
    +how to modify the Original Work. Licensor hereby agrees to provide a
    +machine-readable copy of the Source Code of the Original Work along with
    +each copy of the Original Work that Licensor distributes.  Licensor
    +reserves the right to satisfy this obligation by placing a machine-
    +readable copy of the Source Code in an information repository reasonably
    +calculated to permit inexpensive and convenient access by You for as long
    +as Licensor continues to distribute the Original Work, and by publishing
    +the address of that information repository in a notice immediately
    +following the copyright notice that applies to the Original Work.
    +
    +4) Exclusions From License Grant.
    +Neither the names of Licensor, nor the names of any contributors to the
    +Original Work, nor any of their trademarks or service marks, may be used
    +to endorse or promote products derived from this Original Work without
    +express prior written permission of the Licensor.  Nothing in this
    +License shall be deemed to grant any rights to trademarks, copyrights,
    +patents, trade secrets or any other intellectual property of Licensor
    +except as expressly stated herein. No patent license is granted to make,
    +use, sell or offer to sell embodiments of any patent claims other than
    +the licensed claims defined in Section 2. No right is granted to the
    +trademarks of Licensor even if such marks are included in the Original
    +Work.  Nothing in this License shall be interpreted to prohibit Licensor
    +from licensing under different terms from this License any Original Work
    +that Licensor otherwise would have a right to license.
    +
    +5) This section intentionally omitted.
    +
    +6) Attribution Rights.
    +You must retain, in the Source Code of any Derivative Works that You
    +create, all copyright, patent or trademark notices from the Source
    +Code of the Original Work, as well as any notices of licensing and any
    +descriptive text identified therein as an "Attribution Notice." You must
    +cause the Source Code for any Derivative Works that You create to carry
    +a prominent Attribution Notice reasonably calculated to inform recipients
    +that You have modified the Original Work.
    +
    +7) Warranty of Provenance and Disclaimer of Warranty.
    +Licensor warrants that the copyright in and to the Original Work and the
    +patent rights granted herein by Licensor are owned by the Licensor or are
    +sublicensed to You under the terms of this License with the permission
    +of the contributor(s) of those copyrights and patent rights. Except as
    +expressly stated in the immediately proceeding sentence, the Original
    +Work is provided under this License on an "AS IS" BASIS and WITHOUT
    +WARRANTY, either express or implied, including, without limitation,
    +the warranties of NON-INFRINGEMENT, MERCHANTABILITY or FITNESS FOR A
    +PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL
    +WORK IS WITH YOU. This DISCLAIMER OF WARRANTY constitutes an essential
    +part of this License. No license to Original Work is granted hereunder
    +except under this disclaimer.
    +
    +8) Limitation of Liability.
    +Under no circumstances and under no legal theory, whether in tort
    +(including negligence), contract, or otherwise, shall the Licensor be
    +liable to any person for any direct, indirect, special, incidental, or
    +consequential damages of any character arising as a result of this License
    +or the use of the Original Work including, without limitation, damages
    +for loss of goodwill, work stoppage, computer failure or malfunction,
    +or any and all other commercial damages or losses. This limitation of
    +liability shall not apply to liability for death or personal injury
    +resulting from Licensor's negligence to the extent applicable law
    +prohibits such limitation. Some jurisdictions do not allow the exclusion
    +or limitation of incidental or consequential damages, so this exclusion
    +and limitation may not apply to You.
    +
    +9) Acceptance and Termination.
    +If You distribute copies of the Original Work or a Derivative Work,
    +You must make a reasonable effort under the circumstances to obtain the
    +express assent of recipients to the terms of this License. Nothing else
    +but this License (or another written agreement between Licensor and You)
    +grants You permission to create Derivative Works based upon the Original
    +Work or to exercise any of the rights granted in Section 1 herein, and
    +any attempt to do so except under the terms of this License (or another
    +written agreement between Licensor and You) is expressly prohibited
    +by U.S. copyright law, the equivalent laws of other countries, and by
    +international treaty. Therefore, by exercising any of the rights granted
    +to You in Section 1 herein, You indicate Your acceptance of this License
    +and all of its terms and conditions.
    +
    +10) Termination for Patent Action.
    +This License shall terminate automatically and You may no longer exercise
    +any of the rights granted to You by this License as of the date You
    +commence an action, including a cross-claim or counterclaim, against
    +Licensor or any licensee alleging that the Original Work infringes
    +a patent. This termination provision shall not apply for an action
    +alleging patent infringement by combinations of the Original Work with
    +other software or hardware.
    +
    +11) Jurisdiction, Venue and Governing Law.
    +Any action or suit relating to this License may be brought only in
    +the courts of a jurisdiction wherein the Licensor resides or in which
    +Licensor conducts its primary business, and under the laws of that
    +jurisdiction excluding its conflict-of-law provisions. The application
    +of the United Nations Convention on Contracts for the International Sale
    +of Goods is expressly excluded. Any use of the Original Work outside
    +the scope of this License or after its termination shall be subject
    +to the requirements and penalties of the U.S. Copyright Act, 17 U.S.C.
    +§ 101 et seq., the equivalent laws of other countries, and international
    +treaty. This section shall survive the termination of this License.
    +
    +12) Attorneys Fees.
    +In any action to enforce the terms of this License or seeking damages
    +relating thereto, the prevailing party shall be entitled to recover its
    +costs and expenses, including, without limitation, reasonable attorneys'
    +fees and costs incurred in connection with such action, including any
    +appeal of such action. This section shall survive the termination of
    +this License.
    +
    +13) Miscellaneous.
    +This License represents the complete agreement concerning the
    +subject matter hereof. If any provision of this License is held to
    +be unenforceable, such provision shall be reformed only to the extent
    +necessary to make it enforceable.
    +
    +14) Definition of "You" in This License.
    +"You" throughout this License, whether in upper or lower case, means an
    +individual or a legal entity exercising rights under, and complying with
    +all of the terms of, this License.  For legal entities, "You" includes
    +any entity that controls, is controlled by, or is under common control
    +with you. For purposes of this definition, "control" means (i) the power,
    +direct or indirect, to cause the direction or management of such entity,
    +whether by contract or otherwise, or (ii) ownership of fifty percent
    +(50%) or more of the outstanding shares, or (iii) beneficial ownership
    +of such entity.
    +
    +15) Right to Use.
    +You may use the Original Work in all ways not otherwise restricted or
    +conditioned by this License or by law, and Licensor promises not to
    +interfere with or be responsible for such uses by You.
    +
    +This license is Copyright (C) 2003-2004 Lawrence E. Rosen. All rights
    +reserved. Permission is hereby granted to copy and distribute this
    +license without modification. This license may not be modified without
    +the express written permission of its copyright owner.
    +
    +
    +
    +--------------- SECTION 21: SIL Open Font License, V1.1 -----------
    +
    +
    +SIL OPEN FONT LICENSE
    +
    +Version 1.1 - 26 February 2007
    +PREAMBLE
    +
    +The goals of the Open Font License (OFL) are to stimulate worldwide
    +development of collaborative font projects, to support the font creation
    +efforts of academic and linguistic communities, and to provide a free and
    +open framework in which fonts may be shared and improved in partnership
    +with others.
    +
    +The OFL allows the licensed fonts to be used, studied, modified and
    +redistributed freely as long as they are not sold by themselves. The
    +fonts, including any derivative works, can be bundled, embedded,
    +redistributed and/or sold with any software provided that any reserved
    +names are not used by derivative works. The fonts and derivatives,
    +however, cannot be released under any other type of license. The
    +requirement for fonts to remain under this license does not apply
    +to any document created using the fonts or their derivatives.
    +DEFINITIONS
    +
    +"Font Software" refers to the set of files released by the Copyright
    +Holder(s) under this license and clearly marked as such. This may
    +include source files, build scripts and documentation.
    +
    +"Reserved Font Name" refers to any names specified as such after the
    +copyright statement(s).
    +
    +"Original Version" refers to the collection of Font Software components as
    +distributed by the Copyright Holder(s).
    +
    +"Modified Version" refers to any derivative made by adding to, deleting,
    +or substituting - in part or in whole - any of the components of the
    +Original Version, by changing formats or by porting the Font Software to a
    +new environment.
    +
    +"Author" refers to any designer, engineer, programmer, technical
    +writer or other person who contributed to the Font Software.
    +PERMISSION & CONDITIONS
    +
    +Permission is hereby granted, free of charge, to any person obtaining
    +a copy of the Font Software, to use, study, copy, merge, embed, modify,
    +redistribute, and sell modified and unmodified copies of the Font
    +Software, subject to the following conditions:
    +
    +1) Neither the Font Software nor any of its individual components,
    +in Original or Modified Versions, may be sold by itself.
    +
    +2) Original or Modified Versions of the Font Software may be bundled,
    +redistributed and/or sold with any software, provided that each copy
    +contains the above copyright notice and this license. These can be
    +included either as stand-alone text files, human-readable headers or
    +in the appropriate machine-readable metadata fields within text or
    +binary files as long as those fields can be easily viewed by the user.
    +
    +3) No Modified Version of the Font Software may use the Reserved Font
    +Name(s) unless explicit written permission is granted by the corresponding
    +Copyright Holder. This restriction only applies to the primary font name as
    +presented to the users.
    +
    +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
    +Software shall not be used to promote, endorse or advertise any
    +Modified Version, except to acknowledge the contribution(s) of the
    +Copyright Holder(s) and the Author(s) or with their explicit written
    +permission.
    +
    +5) The Font Software, modified or unmodified, in part or in whole,
    +must be distributed entirely under this license, and must not be
    +distributed under any other license. The requirement for fonts to
    +remain under this license does not apply to any document created
    +using the Font Software.
    +TERMINATION
    +
    +This license becomes null and void if any of the above conditions are
    +not met.
    +DISCLAIMER
    +
    +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
    +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
    +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
    +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
    +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
    +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
    +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
    +OTHER DEALINGS IN THE FONT SOFTWARE.
    +
    +
    +
    +--------------- SECTION 22: LaTeX Project Public License v1.3c -----------
    +
    +
    +The LaTeX Project Public License 
    +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 
    +LPPL Version 1.3c 2008-05-04
    +Copyright 1999 2002-2008 LaTeX3 Project
    +Everyone is allowed to distribute verbatim copies of this license document, but modification of it is not allowed.
    +PREAMBLE 
    +======== 
    +The LaTeX Project Public License (LPPL) is the primary license under which the LaTeX kernel and the base LaTeX packages are distributed.
    +You may use this license for any work of which you hold the copyright and which you wish to distribute. This license may be particularly suitable if your work is TeX-related (such as a LaTeX package), but it is written in such a way that you can use it even if your work is unrelated to TeX.
    +The section `WHETHER AND HOW TO DISTRIBUTE WORKS UNDER THIS LICENSE', below, gives instructions, examples, and recommendations for authors who are considering distributing their works under this license.
    +This license gives conditions under which a work may be distributed and modified, as well as conditions under which modified versions of that work may be distributed.
    +We, the LaTeX3 Project, believe that the conditions below give you the freedom to make and distribute modified versions of your work that conform with whatever technical specifications you wish while maintaining the availability, integrity, and reliability of that work. If you do not see how to achieve your goal while meeting these conditions, then read the document `cfgguide.tex' and `modguide.tex' in the base LaTeX distribution for suggestions.
    +DEFINITIONS 
    +=========== 
    +In this license document the following terms are used:
    +`Work' Any work being distributed under this License. `Derived Work' Any work that under any applicable law is derived from the Work.
    +`Modification' Any procedure that produces a Derived Work under any applicable law -- for example, the production of a file containing an original file associated with the Work or a significant portion of such a file, either verbatim or with modifications and/or translated into another language.
    +`Modify' To apply any procedure that produces a Derived Work under any applicable law. `Distribution' Making copies of the Work available from one person to another, in whole or in part. Distribution includes (but is not limited to) making any electronic components of the Work accessible by file transfer protocols such as FTP or HTTP or by shared file systems such as Sun's Network File System (NFS).
    +`Compiled Work' A version of the Work that has been processed into a form where it is directly usable on a computer system. This processing may include using installation facilities provided by the Work, transformations of the Work, copying of components of the Work, or other activities. Note that modification of any installation facilities provided by the Work constitutes modification of the Work.
    +`Current Maintainer' A person or persons nominated as such within the Work. If there is no such explicit nomination then it is the `Copyright Holder' under any applicable law.
    +`Base Interpreter' A program or process that is normally needed for running or interpreting a part or the whole of the Work.
    +A Base Interpreter may depend on external components but these are not considered part of the Base Interpreter provided that each external component clearly identifies itself whenever it is used interactively. Unless explicitly specified when applying the license to the Work, the only applicable Base Interpreter is a `LaTeX-Format' or in the case of files belonging to the `LaTeX-format' a program implementing the `TeX language'.
    +CONDITIONS ON DISTRIBUTION AND MODIFICATION 
    +=========================================== 
    +	1. Activities other than distribution and/or modification of the Work are not covered by this license; they are outside its scope. In particular, the act of running the Work is not restricted and no requirements are made concerning any offers of support for the Work. 
    +	2. You may distribute a complete, unmodified copy of the Work as you received it. Distribution of only part of the Work is considered modification of the Work, and no right to distribute such a Derived Work may be assumed under the terms of this clause. 
    +	3. You may distribute a Compiled Work that has been generated from a complete, unmodified copy of the Work as distributed under Clause 2 above, as long as that Compiled Work is distributed in such a way that the recipients may install the Compiled Work on their system exactly as it would have been installed if they generated a Compiled Work directly from the Work. 
    +	4. If you are the Current Maintainer of the Work, you may, without restriction, modify the Work, thus creating a Derived Work. You may also distribute the Derived Work without restriction, including Compiled Works generated from the Derived Work. Derived Works distributed in this manner by the Current Maintainer are considered to be updated versions of the Work. 
    +	5. If you are not the Current Maintainer of the Work, you may modify your copy of the Work, thus creating a Derived Work based on the Work, and compile this Derived Work, thus creating a Compiled Work based on the Derived Work. 
    +	6. If you are not the Current Maintainer of the Work, you may distribute a Derived Work provided the following conditions are met for every component of the Work unless that component clearly states in the copyright notice that it is exempt from that condition. Only the Current Maintainer is allowed to add such statements of exemption to a component of the Work. 
    +	a. If a component of this Derived Work can be a direct replacement for a component of the Work when that component is used with the Base Interpreter, then, wherever this component of the Work identifies itself to the user when used interactively with that Base Interpreter, the replacement component of this Derived Work clearly and unambiguously identifies itself as a modified version of this component to the user when used interactively with that Base Interpreter. 
    +	b. Every component of the Derived Work contains prominent notices detailing the nature of the changes to that component, or a prominent reference to another file that is distributed as part of the Derived Work and that contains a complete and accurate log of the changes. 
    +	c. No information in the Derived Work implies that any persons, including (but not limited to) the authors of the original version of the Work, provide any support, including (but not limited to) the reporting and handling of errors, to recipients of the Derived Work unless those persons have stated explicitly that they do provide such support for the Derived Work. 
    +	d. You distribute at least one of the following with the Derived Work: 
    +	1. A complete, unmodified copy of the Work; if your distribution of a modified component is made by offering access to copy the modified component from a designated place, then offering equivalent access to copy the Work from the same or some similar place meets this condition, even though third parties are not compelled to copy the Work along with the modified component; 
    +	2. Information that is sufficient to obtain a complete, unmodified copy of the Work. 
    +	7. If you are not the Current Maintainer of the Work, you may distribute a Compiled Work generated from a Derived Work, as long as the Derived Work is distributed to all recipients of the Compiled Work, and as long as the conditions of Clause 6, above, are met with regard to the Derived Work. 
    +	8. The conditions above are not intended to prohibit, and hence do not apply to, the modification, by any method, of any component so that it becomes identical to an updated version of that component of the Work as it is distributed by the Current Maintainer under Clause 4, above. 
    +	9. Distribution of the Work or any Derived Work in an alternative format, where the Work or that Derived Work (in whole or in part) is then produced by applying some process to that format, does not relax or nullify any sections of this license as they pertain to the results of applying that process. 
    +	10. 
    +	a. A Derived Work may be distributed under a different license provided that license itself honors the conditions listed in Clause 6 above, in regard to the Work, though it does not have to honor the rest of the conditions in this license. 
    +	b. If a Derived Work is distributed under a different license, that Derived Work must provide sufficient documentation as part of itself to allow each recipient of that Derived Work to honor the restrictions in Clause 6 above, concerning changes from the Work. 
    +	11. This license places no restrictions on works that are unrelated to the Work, nor does this license place any restrictions on aggregating such works with the Work by any means. 
    +	12. Nothing in this license is intended to, or may be used to, prevent complete compliance by all parties with all applicable laws. 
    +NO WARRANTY 
    +=========== 
    +There is no warranty for the Work. Except when otherwise stated in writing, the Copyright Holder provides the Work `as is', without warranty of any kind, either expressed or implied, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose. The entire risk as to the quality and performance of the Work is with you. Should the Work prove defective, you assume the cost of all necessary servicing, repair, or correction.
    +In no event unless required by applicable law or agreed to in writing will The Copyright Holder, or any author named in the components of the Work, or any other party who may distribute and/or modify the Work as permitted above, be liable to you for damages, including any general, special, incidental or consequential damages arising out of any use of the Work or out of inability to use the Work (including, but not limited to, loss of data, data being rendered inaccurate, or losses sustained by anyone as a result of any failure of the Work to operate with any other programs), even if the Copyright Holder or said author or said other party has been advised of the possibility of such damages.
    +MAINTENANCE OF THE WORK 
    +======================= 
    +The Work has the status `author-maintained' if the Copyright Holder explicitly and prominently states near the primary copyright notice in the Work that the Work can only be maintained by the Copyright Holder or simply that it is `author-maintained'.
    +The Work has the status `maintained' if there is a Current Maintainer who has indicated in the Work that they are willing to receive error reports for the Work (for example, by supplying a valid e-mail address). It is not required for the Current Maintainer to acknowledge or act upon these error reports.
    +The Work changes from status `maintained' to `unmaintained' if there is no Current Maintainer, or the person stated to be Current Maintainer of the work cannot be reached through the indicated means of communication for a period of six months, and there are no other significant signs of active maintenance.
    +You can become the Current Maintainer of the Work by agreement with any existing Current Maintainer to take over this role.
    +If the Work is unmaintained, you can become the Current Maintainer of the Work through the following steps:
    +	1. Make a reasonable attempt to trace the Current Maintainer (and the Copyright Holder, if the two differ) through the means of an Internet or similar search. 
    +	2. If this search is successful, then enquire whether the Work is still maintained. 
    +	a. If it is being maintained, then ask the Current Maintainer to update their communication data within one month. 
    +	b. If the search is unsuccessful or no action to resume active maintenance is taken by the Current Maintainer, then announce within the pertinent community your intention to take over maintenance. (If the Work is a LaTeX work, this could be done, for example, by posting to comp.text.tex.) 
    +	3a. If the Current Maintainer is reachable and agrees to pass maintenance of the Work to you, then this takes effect immediately upon announcement. 
    +	b. If the Current Maintainer is not reachable and the Copyright Holder agrees that maintenance of the Work be passed to you, then this takes effect immediately upon announcement. 
    +	4. If you make an `intention announcement' as described in 2b. above and after three months your intention is challenged neither by the Current Maintainer nor by the Copyright Holder nor by other people, then you may arrange for the Work to be changed so as to name you as the (new) Current Maintainer. 
    +	5. If the previously unreachable Current Maintainer becomes reachable once more within three months of a change completed under the terms of 3b) or 4), then that Current Maintainer must become or remain the Current Maintainer upon request provided they then update their communication data within one month. 
    +A change in the Current Maintainer does not, of itself, alter the fact that the Work is distributed under the LPPL license.
    +If you become the Current Maintainer of the Work, you should immediately provide, within the Work, a prominent and unambiguous statement of your status as Current Maintainer. You should also announce your new status to the same pertinent community as in 2b) above.
    +WHETHER AND HOW TO DISTRIBUTE WORKS UNDER THIS LICENSE 
    +====================================================== 
    +This section contains important instructions, examples, and recommendations for authors who are considering distributing their works under this license. These authors are addressed as `you' in this section.
    +Choosing This License or Another License 
    +---------------------------------------- 
    +If for any part of your work you want or need to use *distribution* conditions that differ significantly from those in this license, then do not refer to this license anywhere in your work but, instead, distribute your work under a different license. You may use the text of this license as a model for your own license, but your license should not refer to the LPPL or otherwise give the impression that your work is distributed under the LPPL.
    +The document `modguide.tex' in the base LaTeX distribution explains the motivation behind the conditions of this license. It explains, for example, why distributing LaTeX under the GNU General Public License (GPL) was considered inappropriate. Even if your work is unrelated to LaTeX, the discussion in `modguide.tex' may still be relevant, and authors intending to distribute their works under any license are encouraged to read it.
    +A Recommendation on Modification Without Distribution 
    +----------------------------------------------------- 
    +It is wise never to modify a component of the Work, even for your own personal use, without also meeting the above conditions for distributing the modified component. While you might intend that such modifications will never be distributed, often this will happen by accident -- you may forget that you have modified that component; or it may not occur to you when allowing others to access the modified version that you are thus distributing it and violating the conditions of this license in ways that could have legal implications and, worse, cause problems for the community. It is therefore usually in your best interest to keep your copy of the Work identical with the public one. Many works provide ways to control the behavior of that work without altering any of its licensed components.
    +How to Use This License 
    +----------------------- 
    +To use this license, place in each of the components of your work both an explicit copyright notice including your name and the year the work was authored and/or last substantially modified. Include also a statement that the distribution and/or modification of that component is constrained by the conditions in this license.
    +Here is an example of such a notice and statement:
    + pig.dtx 
    + Copyright 2005 M. Y. Name 
    + 
    + This work may be distributed and/or modified under the 
    + conditions of the LaTeX Project Public License, either version 1.3 
    + of this license or (at your option) any later version. 
    + The latest version of this license is in 
    + http://www.latex-project.org/lppl.txt 
    + and version 1.3 or later is part of all distributions of LaTeX 
    + version 2005/12/01 or later. 
    + 
    + This work has the LPPL maintenance status " maintained". 
    + 
    + The Current Maintainer of this work is M. Y. Name. 
    + 
    + This work consists of the files pig.dtx and pig.ins 
    + and the derived file pig.sty. 
    +Given such a notice and statement in a file, the conditions given in this license document would apply, with the `Work' referring to the three files `pig.dtx', `pig.ins', and `pig.sty' (the last being generated from `pig.dtx' using `pig.ins'), the `Base Interpreter' referring to any `LaTeX-Format', and both `Copyright Holder' and `Current Maintainer' referring to the person `M. Y. Name'.
    +If you do not want the Maintenance section of LPPL to apply to your Work, change `maintained' above into `author-maintained'. However, we recommend that you use `maintained', as the Maintenance section was added in order to ensure that your Work remains useful to the community even when you can no longer maintain and support it yourself.
    +Derived Works That Are Not Replacements 
    +--------------------------------------- 
    +Several clauses of the LPPL specify means to provide reliability and stability for the user community. They therefore concern themselves with the case that a Derived Work is intended to be used as a (compatible or incompatible) replacement of the original Work. If this is not the case (e.g., if a few lines of code are reused for a completely different task), then clauses 6b and 6d shall not apply.
    +Important Recommendations 
    +------------------------- 
    +Defining What Constitutes the Work
    +The LPPL requires that distributions of the Work contain all the files of the Work. It is therefore important that you provide a way for the licensee to determine which files constitute the Work. This could, for example, be achieved by explicitly listing all the files of the Work near the copyright notice of each file or by using a line such as:
    + This work consists of all files listed in manifest.txt.
    +in that place. In the absence of an unequivocal list it might be impossible for the licensee to determine what is considered by you to comprise the Work and, in such a case, the licensee would be entitled to make reasonable conjectures as to which files comprise the Work.
    +
    +
    +
    +--------------- SECTION 23: Open Software License 1.1 -----------
    +
    +
    +The Open Software License v. 1.1
    +This Open Software License (the "License") applies to any original work of authorship (the "Original Work") whose owner (the "Licensor") has placed the following notice immediately following the copyright notice for the Original Work:
    +Licensed under the Open Software License version 1.1
    +	1) Grant of Copyright License. Licensor hereby grants You a world-wide, royalty-free, non-exclusive, perpetual, non-sublicenseable license to do the following: 
    +	a) to reproduce the Original Work in copies; 
    +	b) to prepare derivative works ("Derivative Works") based upon the Original Work; 
    +	c) to distribute copies of the Original Work and Derivative Works to the public, with the proviso that copies of Original Work or Derivative Works that You distribute shall be licensed under the Open Software License; 
    +	d) to perform the Original Work publicly; and 
    +	e) to display the Original Work publicly. 
    +	2) Grant of Patent License. Licensor hereby grants You a world-wide, royalty-free, non-exclusive, perpetual, non-sublicenseable license, under patent claims owned or controlled by the Licensor that are embodied in the Original Work as furnished by the Licensor ("Licensed Claims") to make, use, sell and offer for sale the Original Work. Licensor hereby grants You a world-wide, royalty-free, non-exclusive, perpetual, non-sublicenseable license under the Licensed Claims to make, use, sell and offer for sale Derivative Works. 
    +	3) Grant of Source Code License. The term "Source Code" means the preferred form of the Original Work for making modifications to it and all available documentation describing how to modify the Original Work. Licensor hereby agrees to provide a machine-readable copy of the Source Code of the Original Work along with each copy of the Original Work that Licensor distributes. Licensor reserves the right to satisfy this obligation by placing a machine-readable copy of the Source Code in an information repository reasonably calculated to permit inexpensive and convenient access by You for as long as Licensor continues to distribute the Original Work, and by publishing the address of that information repository in a notice immediately following the copyright notice that applies to the Original Work. 
    +	4) Exclusions From License Grant. Nothing in this License shall be deemed to grant any rights to trademarks, copyrights, patents, trade secrets or any other intellectual property of Licensor except as expressly stated herein. No patent license is granted to make, use, sell or offer to sell embodiments of any patent claims other than the Licensed Claims defined in Section 2. No right is granted to the trademarks of Licensor even if such marks are included in the Original Work. Nothing in this License shall be interpreted to prohibit Licensor from licensing under different terms from this License any Original Work that Licensor otherwise would have a right to license. 
    +	5) External Deployment. The term "External Deployment" means the use or distribution of the Original Work or Derivative Works in any way such that the Original Work or Derivative Works may be used by anyone other than You, whether the Original Work or Derivative Works are distributed to those persons or made available as an application intended for use over a computer network. As an express condition for the grants of license hereunder, You agree that any External Deployment by You of a Derivative Work shall be deemed a distribution and shall be licensed to all under the terms of this License, as prescribed in section 1(c) herein. 
    +	6) Attribution Rights. You must retain, in the Source Code of any Derivative Works that You create, all copyright, patent or trademark notices from the Source Code of the Original Work, as well as any notices of licensing and any descriptive text identified therein as an "Attribution Notice." You must cause the Source Code for any Derivative Works that You create to carry a prominent Attribution Notice reasonably calculated to inform recipients that You have modified the Original Work. 
    +	7) Warranty and Disclaimer of Warranty. Licensor warrants that the copyright in and to the Original Work is owned by the Licensor or that the Original Work is distributed by Licensor under a valid current license from the copyright owner. Except as expressly stated in the immediately proceeding sentence, the Original Work is provided under this License on an "AS IS" BASIS and WITHOUT WARRANTY, either express or implied, including, without limitation, the warranties of NON-INFRINGEMENT, MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK IS WITH YOU. This DISCLAIMER OF WARRANTY constitutes an essential part of this License. No license to Original Work is granted hereunder except under this disclaimer. 
    +	8) Limitation of Liability. Under no circumstances and under no legal theory, whether in tort (including negligence), contract, or otherwise, shall the Licensor be liable to any person for any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or the use of the Original Work including, without limitation, damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses. This limitation of liability shall not apply to liability for death or personal injury resulting from Licensor's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 
    +	9) Acceptance and Termination. If You distribute copies of the Original Work or a Derivative Work, You must make a reasonable effort under the circumstances to obtain the express and volitional assent of recipients to the terms of this License. Nothing else but this License (or another written agreement between Licensor and You) grants You permission to create Derivative Works based upon the Original Work or to exercise any of the rights granted in Sections 1 herein, and any attempt to do so except under the terms of this License (or another written agreement between Licensor and You) is expressly prohibited by U.S. copyright law, the equivalent laws of other countries, and by international treaty. Therefore, by exercising any of the rights granted to You in Sections 1 herein, You indicate Your acceptance of this License and all of its terms and conditions. This License shall terminate immediately and you may no longer exercise any of the rights granted to You by this License upon Your failure to honor the proviso in Section 1(c) herein. 
    +	10) Mutual Termination for Patent Action. This License shall terminate automatically and You may no longer exercise any of the rights granted to You by this License if You file a lawsuit in any court alleging that any OSI Certified open source software that is licensed under any license containing this "Mutual Termination for Patent Action" clause infringes any patent claims that are essential to use that software. 
    +	11) Jurisdiction, Venue and Governing Law. Any action or suit relating to this License may be brought only in the courts of a jurisdiction wherein the Licensor resides or in which Licensor conducts its primary business, and under the laws of that jurisdiction excluding its conflict-of-law provisions. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any use of the Original Work outside the scope of this License or after its termination shall be subject to the requirements and penalties of the U.S. Copyright Act, 17 U.S.C. å¤ 101 et seq., the equivalent laws of other countries, and international treaty. This section shall survive the termination of this License. 
    +	12) Attorneys Fees. In any action to enforce the terms of this License or seeking damages relating thereto, the prevailing party shall be entitled to recover its costs and expenses, including, without limitation, reasonable attorneys' fees and costs incurred in connection with such action, including any appeal of such action. This section shall survive the termination of this License. 
    +	13) Miscellaneous. This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. 
    +	14) Definition of "You" in This License. "You" throughout this License, whether in upper or lower case, means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with you. For purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 
    +	15) Right to Use. You may use the Original Work in all ways not otherwise restricted or conditioned by this License or by law, and Licensor promises not to interfere with or be responsible for such uses by You. 
    +This license is Copyright (C) 2002 Lawrence E. Rosen. All rights reserved. Permission is hereby granted to copy and distribute this license without modification. This license may not be modified without the express written permission of its copyright owner.
    +
    +
    +
    +--------------- SECTION 24: Creative Commons Attribution Share Alike 2.0 Generic -----------
    +
    +
    +Creative Commons Attribution-ShareAlike 2.0
    +CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE.
    +License
    +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
    +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
    +	1. Definitions 
    +	a. "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. 
    +	b. "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. 
    +	c. "Licensor" means the individual or entity that offers the Work under the terms of this License. 
    +	d. "Original Author" means the individual or entity who created the Work. 
    +	e. "Work" means the copyrightable work of authorship offered under the terms of this License. 
    +	f. "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. 
    +	g. "License Elements" means the following high-level license attributes as selected by Licensor and indicated in the title of this License: Attribution, ShareAlike. 
    +	2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. 
    +	3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: 
    +	a. to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; 
    +	b. to create and reproduce Derivative Works; 
    +	c. to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; 
    +	d. to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. 
    +	e. For the avoidance of doubt, where the work is a musical composition: 
    +	i. Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. 
    +	ii. Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights society or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). 
    +	f. Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). 
    +The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved.
    +	4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: 
    +	a. You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any reference to such Licensor or the Original Author, as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any reference to such Licensor or the Original Author, as requested. 
    +	b. You may distribute, publicly display, publicly perform, or publicly digitally perform a Derivative Work only under the terms of this License, a later version of this License with the same License Elements as this License, or a Creative Commons iCommons license that contains the same License Elements as this License (e.g. Attribution-ShareAlike 2.0 Japan). You must include a copy of, or the Uniform Resource Identifier for, this License or other license specified in the previous sentence with every copy or phonorecord of each Derivative Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Derivative Works that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder, and You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Derivative Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Derivative Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Derivative Work itself to be made subject to the terms of this License. 
    +	c. If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and give the Original Author credit reasonable to the medium or means You are utilizing by conveying the name (or pseudonym if applicable) of the Original Author if supplied; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. 
    +	5. Representations, Warranties and Disclaimer 
    +UNLESS OTHERWISE AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE MATERIALS, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
    +	6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 
    +	7. Termination 
    +	a. This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. 
    +	b. Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. 
    +	8. Miscellaneous 
    +	a. Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. 
    +	b. Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. 
    +	c. If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. 
    +	d. No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. 
    +	e. This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. 
    +Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the Work. Creative Commons will not be liable to You or any party on any legal theory for any damages whatsoever, including without limitation any general, special, incidental or consequential damages arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and obligations of Licensor.
    +Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL, neither party will use the trademark "Creative Commons" or any related trademark or logo of Creative Commons without the prior written consent of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current trademark usage guidelines, as may be published on its website or otherwise made available upon request from time to time.
    +Creative Commons may be contacted at http://creativecommons.org/.
    +
    +
    +
    +--------------- SECTION 25:  LDP GENERAL PUBLIC LICENSE v1 -----------
    +
    +
    +  LDP GENERAL PUBLIC LICENSE
    +                   Version 1, September 1998
    +
    +   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
    +
    +  0. This License applies to any document which contains a notice
    +placed by the copyright holder saying it may be distributed
    +under the terms of this LDP General Public License.
    +The "Document" below refers to any such document, and
    +"work based on the Document" means either the Document
    +or any derivative work under copyright law: that is to say,
    +a work containing the Document or part of it, either verbatim
    +or with modifications and/or translated into another language.
    +(Hereinafter, translation is included without limitation in
    +the term "modification".)  Each licensee is addressed as "you".
    +
    +Activities other than copying, distribution and modification are not
    +covered by this License; they are outside its scope.
    +
    +  1. You may copy and distribute verbatim copies of the Document's
    +source code as you receive it, in any medium, provided that you
    +appropriately publish on each copy an appropriate copyright notice
    +and disclaimer of warranty; keep intact all the notices that refer
    +to this License and to the absence of any warranty; and give any
    +other recipients of the Document a copy of this License along with
    +the Document.
    +
    +You may charge a fee for the physical act of producing or transferring
    +a copy.
    +
    +  2. You may modify your copy or copies of the Document or any portion
    +of it, thus forming a work based on the Document, and copy and
    +distribute such modifications or work under the terms of Section 1
    +above, provided that you also meet all of these conditions:
    +
    +    a) You must cause the modified files to carry notices
    +    stating that you changed the files and the date of any change.
    +
    +    b) You must cause any work that you distribute or publish, that in
    +    whole or in part contains or is derived from the Document or any
    +    part thereof, to be licensed as a whole at no charge to all third
    +    parties under the terms of this License.
    +
    +    c) You must not add notes to the Document implying that the
    +    reader had better read something produced using Texinfo.
    +
    +  3. You may copy and distribute the Document (or a work based on it,
    +under Section 2) in any form under the terms of Sections 1 and 2 above
    +provided that you also either accompany it with the complete corresponding
    +machine-readable source code, or provide an URL, valid for at least
    +three months, where this complete corresponding machine-readable source code
    +is available, and can be retrieved by any anonymous user.
    +
    +  4. You may not copy, modify, sublicense, or distribute the Document
    +except as expressly provided under this License.
    +
    +  5. Each time you redistribute the Document (or any work based on the
    +Document), the recipient automatically receives a license from the
    +original licensor to copy, distribute or modify the Document subject to
    +these terms and conditions.  You may not impose any further
    +restrictions on the recipients' exercise of the rights granted herein.
    +
    +
    +
    +======================================================================
    +
    +To the extent any open source components are licensed under the GPL
    +and/or LGPL, or other similar licenses that require the source code
    +and/or modifications to source code to be made available (as would be
    +noted above), you may obtain a copy of the source code corresponding to
    +the binaries for such open source components and modifications thereto,
    +if any, (the "Source Files"), by downloading the Source Files from
    +VMware's github site, or by sending a request, with your name and address to: 
    +VMware, Inc., 3401 Hillview Avenue, Palo Alto, CA 94304, United States of America. 
    +All such requests should clearly specify: OPEN SOURCE FILES REQUEST, Attention General Counsel. 
    +VMware shall mail a copy of the Source Files to you on a CD or equivalent physical medium. 
    +This offer to obtain a copy of the Source Files is valid for three years from the date you acquired this Software product. 
    +
    +[VIC152GAMS030419]
    \ No newline at end of file
    diff --git a/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/categories.go b/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/categories.go
    new file mode 100644
    index 000000000000..8d5e6fd603aa
    --- /dev/null
    +++ b/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/categories.go
    @@ -0,0 +1,212 @@
    +// Copyright 2017 VMware, Inc. All Rights Reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//    http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package tags
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"strings"
    +
    +	"github.com/pkg/errors"
    +)
    +
    +const (
    +	CategoryURL      = "/com/vmware/cis/tagging/category"
    +	ErrAlreadyExists = "already_exists"
    +)
    +
    +type CategoryCreateSpec struct {
    +	CreateSpec CategoryCreate `json:"create_spec"`
    +}
    +
    +type CategoryUpdateSpec struct {
    +	UpdateSpec CategoryUpdate `json:"update_spec"`
    +}
    +
    +type CategoryCreate struct {
    +	AssociableTypes []string `json:"associable_types"`
    +	Cardinality     string   `json:"cardinality"`
    +	Description     string   `json:"description"`
    +	Name            string   `json:"name"`
    +}
    +
    +type CategoryUpdate struct {
    +	AssociableTypes []string `json:"associable_types"`
    +	Cardinality     string   `json:"cardinality"`
    +	Description     string   `json:"description"`
    +	Name            string   `json:"name"`
    +}
    +
    +type Category struct {
    +	ID              string   `json:"id"`
    +	Description     string   `json:"description"`
    +	Name            string   `json:"name"`
    +	Cardinality     string   `json:"cardinality"`
    +	AssociableTypes []string `json:"associable_types"`
    +	UsedBy          []string `json:"used_by"`
    +}
    +
    +func (c *RestClient) CreateCategoryIfNotExist(ctx context.Context, name string, description string, categoryType string, multiValue bool) (*string, error) {
    +	categories, err := c.GetCategoriesByName(ctx, name)
    +	if err != nil {
    +		return nil, errors.Wrapf(err, "failed to query category for %s", name)
    +	}
    +
    +	if categories == nil {
    +		var multiValueStr string
    +		if multiValue {
    +			multiValueStr = "MULTIPLE"
    +		} else {
    +			multiValueStr = "SINGLE"
    +		}
    +		categoryCreate := CategoryCreate{[]string{categoryType}, multiValueStr, description, name}
    +		spec := CategoryCreateSpec{categoryCreate}
    +		id, err := c.CreateCategory(ctx, &spec)
    +		if err != nil {
    +			// in case there are two docker daemon try to create inventory category, query the category once again
    +			if strings.Contains(err.Error(), "ErrAlreadyExists") {
    +				if categories, err = c.GetCategoriesByName(ctx, name); err != nil {
    +					Logger.Debugf("Failed to get inventory category for %s", errors.WithStack(err))
    +					return nil, errors.Wrap(err, "create inventory category failed")
    +				}
    +			} else {
    +				Logger.Debugf("Failed to create inventory category for %s", errors.WithStack(err))
    +				return nil, errors.Wrap(err, "create inventory category failed")
    +			}
    +		} else {
    +			return id, nil
    +		}
    +	}
    +	if categories != nil {
    +		return &categories[0].ID, nil
    +	}
    +	// should not happen
    +	Logger.Debugf("Failed to create inventory for it's existed, but could not query back. Please check system")
    +	return nil, errors.Errorf("Failed to create inventory for it's existed, but could not query back. Please check system")
    +}
    +
    +func (c *RestClient) CreateCategory(ctx context.Context, spec *CategoryCreateSpec) (*string, error) {
    +	Logger.Debugf("Create category %v", spec)
    +	stream, _, status, err := c.call(ctx, "POST", CategoryURL, spec, nil)
    +
    +	Logger.Debugf("Get status code: %d", status)
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Create category failed with status code: %d, error message: %s", status, errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "Status code: %d", status)
    +	}
    +
    +	type RespValue struct {
    +		Value string
    +	}
    +
    +	var pID RespValue
    +	if err := json.NewDecoder(stream).Decode(&pID); err != nil {
    +		Logger.Debugf("Decode response body failed for: %s", errors.WithStack(err))
    +		return nil, errors.Wrap(err, "create category failed")
    +	}
    +	return &(pID.Value), nil
    +}
    +
    +func (c *RestClient) GetCategory(ctx context.Context, id string) (*Category, error) {
    +	Logger.Debugf("Get category %s", id)
    +
    +	stream, _, status, err := c.call(ctx, "GET", fmt.Sprintf("%s/id:%s", CategoryURL, id), nil, nil)
    +
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Get category failed with status code: %s, error message: %s", status, errors.WithStack(err))
    +		return nil, errors.Errorf("Status code: %d, error: %s", status, err)
    +	}
    +
    +	type RespValue struct {
    +		Value Category
    +	}
    +
    +	var pCategory RespValue
    +	if err := json.NewDecoder(stream).Decode(&pCategory); err != nil {
    +		Logger.Debugf("Decode response body failed for: %s", errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "get category %s failed", id)
    +	}
    +	return &(pCategory.Value), nil
    +}
    +
    +func (c *RestClient) UpdateCategory(ctx context.Context, id string, spec *CategoryUpdateSpec) error {
    +	Logger.Debugf("Update category %v", spec)
    +	_, _, status, err := c.call(ctx, "PATCH", fmt.Sprintf("%s/id:%s", CategoryURL, id), spec, nil)
    +
    +	Logger.Debugf("Get status code: %d", status)
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Update category failed with status code: %d, error message: %s", status, errors.WithStack(err))
    +		return errors.Wrapf(err, "Status code: %d", status)
    +	}
    +
    +	return nil
    +}
    +
    +func (c *RestClient) DeleteCategory(ctx context.Context, id string) error {
    +	Logger.Debugf("Delete category %s", id)
    +
    +	_, _, status, err := c.call(ctx, "DELETE", fmt.Sprintf("%s/id:%s", CategoryURL, id), nil, nil)
    +
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Delete category failed with status code: %s, error message: %s", status, errors.WithStack(err))
    +		return errors.Errorf("Status code: %d, error: %s", status, err)
    +	}
    +	return nil
    +}
    +
    +func (c *RestClient) ListCategories(ctx context.Context) ([]string, error) {
    +	Logger.Debugf("List all categories")
    +
    +	stream, _, status, err := c.call(ctx, "GET", CategoryURL, nil, nil)
    +
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Get categories failed with status code: %s, error message: %s", status, errors.WithStack(err))
    +		return nil, errors.Errorf("Status code: %d, error: %s", status, err)
    +	}
    +
    +	type Categories struct {
    +		Value []string
    +	}
    +
    +	var pCategories Categories
    +	if err := json.NewDecoder(stream).Decode(&pCategories); err != nil {
    +		Logger.Debugf("Decode response body failed for: %s", errors.WithStack(err))
    +		return nil, errors.Wrap(err, "list categories failed")
    +	}
    +	return pCategories.Value, nil
    +}
    +
    +func (c *RestClient) GetCategoriesByName(ctx context.Context, name string) ([]Category, error) {
    +	Logger.Debugf("Get category %s", name)
    +	categoryIds, err := c.ListCategories(ctx)
    +	if err != nil {
    +		Logger.Debugf("Get category failed for: %s", errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "get categories by name %s failed", name)
    +	}
    +
    +	var categories []Category
    +	for _, cID := range categoryIds {
    +		category, err := c.GetCategory(ctx, cID)
    +		if err != nil {
    +			Logger.Debugf("Get category %s failed for %s", cID, errors.WithStack(err))
    +		}
    +		if category.Name == name {
    +			categories = append(categories, *category)
    +		}
    +	}
    +	return categories, nil
    +}
    diff --git a/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/rest_client.go b/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/rest_client.go
    new file mode 100644
    index 000000000000..d2fd972d6ae0
    --- /dev/null
    +++ b/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/rest_client.go
    @@ -0,0 +1,278 @@
    +// Copyright 2017 VMware, Inc. All Rights Reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//    http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package tags
    +
    +import (
    +	"bytes"
    +	"context"
    +	"encoding/json"
    +	"io"
    +	"io/ioutil"
    +	"net/http"
    +	"net/url"
    +	"strings"
    +	"sync"
    +
    +	"github.com/pkg/errors"
    +
    +	"github.com/vmware/govmomi/vim25/soap"
    +)
    +
    +const (
    +	RestPrefix          = "/rest"
    +	loginURL            = "/com/vmware/cis/session"
    +	sessionIDCookieName = "vmware-api-session-id"
    +)
    +
    +type RestClient struct {
    +	mu       sync.Mutex
    +	host     string
    +	scheme   string
    +	endpoint *url.URL
    +	user     *url.Userinfo
    +	HTTP     *http.Client
    +	cookies  []*http.Cookie
    +}
    +
    +func NewClient(u *url.URL, insecure bool, thumbprint string) *RestClient {
    +	endpoint := &url.URL{}
    +	*endpoint = *u
    +	Logger.Debugf("Create rest client")
    +	endpoint.Path = RestPrefix
    +
    +	sc := soap.NewClient(endpoint, insecure)
    +	if thumbprint != "" {
    +		sc.SetThumbprint(endpoint.Host, thumbprint)
    +	}
    +
    +	user := endpoint.User
    +	endpoint.User = nil
    +
    +	return &RestClient{
    +		endpoint: endpoint,
    +		user:     user,
    +		host:     endpoint.Host,
    +		scheme:   endpoint.Scheme,
    +		HTTP:     &sc.Client,
    +	}
    +}
    +
    +// NewClientWithSessionID creates a new REST client with a supplied session ID
    +// to re-connect to existing sessions.
    +//
    +// Note that the session is not checked for validity - to check for a valid
    +// session after creating the client, use the Valid method. If the session is
    +// no longer valid and the session needs to be re-saved, Login should be called
    +// again before calling SessionID to extract the new session ID. Clients
    +// created with this function function work in the exact same way as clients
    +// created with NewClient, including supporting re-login on invalid sessions on
    +// all SDK calls.
    +func NewClientWithSessionID(u *url.URL, insecure bool, thumbprint string, sessionID string) *RestClient {
    +	c := NewClient(u, insecure, thumbprint)
    +	c.setSessionID(sessionID)
    +
    +	return c
    +}
    +
    +func (c *RestClient) encodeData(data interface{}) (*bytes.Buffer, error) {
    +	params := bytes.NewBuffer(nil)
    +	if data != nil {
    +		if err := json.NewEncoder(params).Encode(data); err != nil {
    +			return nil, errors.Wrap(err, "failed to encode json data")
    +		}
    +	}
    +	return params, nil
    +}
    +
    +func (c *RestClient) call(ctx context.Context, method, path string, data interface{}, headers map[string][]string) (io.ReadCloser, http.Header, int, error) {
    +	//	Logger.Debugf("%s: %s, headers: %+v", method, path, headers)
    +	params, err := c.encodeData(data)
    +	if err != nil {
    +		return nil, nil, -1, errors.Wrap(err, "call failed")
    +	}
    +
    +	if data != nil {
    +		if headers == nil {
    +			headers = make(map[string][]string)
    +		}
    +		headers["Content-Type"] = []string{"application/json"}
    +	}
    +
    +	body, hdr, statusCode, err := c.clientRequest(ctx, method, path, params, headers)
    +	if statusCode == http.StatusUnauthorized && strings.Contains(err.Error(), "This method requires authentication") {
    +		c.Login(ctx)
    +		Logger.Debugf("Rerun request after login")
    +		return c.clientRequest(ctx, method, path, params, headers)
    +	}
    +
    +	return body, hdr, statusCode, errors.Wrap(err, "call failed")
    +}
    +
    +func (c *RestClient) clientRequest(ctx context.Context, method, path string, in io.Reader, headers map[string][]string) (io.ReadCloser, http.Header, int, error) {
    +	expectedPayload := (method == "POST" || method == "PUT")
    +	if expectedPayload && in == nil {
    +		in = bytes.NewReader([]byte{})
    +	}
    +
    +	req, err := c.newRequest(method, path, in)
    +	if err != nil {
    +		return nil, nil, -1, errors.Wrap(err, "failed to create request")
    +	}
    +
    +	req = req.WithContext(ctx)
    +	c.mu.Lock()
    +	if c.cookies != nil {
    +		req.AddCookie(c.cookies[0])
    +	}
    +	c.mu.Unlock()
    +
    +	if headers != nil {
    +		for k, v := range headers {
    +			req.Header[k] = v
    +		}
    +	}
    +
    +	if expectedPayload && req.Header.Get("Content-Type") == "" {
    +		req.Header.Set("Content-Type", "application/json")
    +	}
    +	req.Header.Set("Accept", "application/json")
    +
    +	resp, err := c.HTTP.Do(req)
    +	return c.handleResponse(resp, err)
    +}
    +
    +func (c *RestClient) handleResponse(resp *http.Response, err error) (io.ReadCloser, http.Header, int, error) {
    +	statusCode := -1
    +	if resp != nil {
    +		statusCode = resp.StatusCode
    +	}
    +	if err != nil {
    +		if strings.Contains(err.Error(), "connection refused") {
    +			return nil, nil, statusCode, errors.Errorf("Cannot connect to endpoint %s. Is vCloud Suite API running on this server?", c.host)
    +		}
    +		return nil, nil, statusCode, errors.Wrap(err, "error occurred trying to connect")
    +	}
    +
    +	if statusCode < http.StatusOK || statusCode >= http.StatusBadRequest {
    +		body, err := ioutil.ReadAll(resp.Body)
    +		resp.Body.Close()
    +		if err != nil {
    +			return nil, nil, statusCode, errors.Wrap(err, "error reading response")
    +		}
    +		if len(body) == 0 {
    +			return nil, nil, statusCode, errors.Errorf("Error: request returned %s", http.StatusText(statusCode))
    +		}
    +		Logger.Debugf("Error response: %s", bytes.TrimSpace(body))
    +		return nil, nil, statusCode, errors.Errorf("Error response from vCloud Suite API: %s", bytes.TrimSpace(body))
    +	}
    +
    +	return resp.Body, resp.Header, statusCode, nil
    +}
    +
    +func (c *RestClient) Login(ctx context.Context) error {
    +	c.mu.Lock()
    +	defer c.mu.Unlock()
    +
    +	Logger.Debugf("Login to %s through rest API.", c.host)
    +
    +	request, err := c.newRequest("POST", loginURL, nil)
    +	if err != nil {
    +		return errors.Wrap(err, "login failed")
    +	}
    +	if c.user != nil {
    +		password, _ := c.user.Password()
    +		request.SetBasicAuth(c.user.Username(), password)
    +	}
    +	resp, err := c.HTTP.Do(request)
    +	if err != nil {
    +		return errors.Wrap(err, "login failed")
    +	}
    +	if resp == nil {
    +		return errors.New("response is nil in Login")
    +	}
    +	if resp.StatusCode != http.StatusOK {
    +		// #nosec: Errors unhandled.
    +		body, _ := ioutil.ReadAll(resp.Body)
    +		resp.Body.Close()
    +		return errors.Errorf("Login failed: body: %s, status: %s", bytes.TrimSpace(body), resp.Status)
    +	}
    +
    +	c.cookies = resp.Cookies()
    +
    +	Logger.Debugf("Login succeeded")
    +	return nil
    +}
    +
    +func (c *RestClient) newRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
    +	return http.NewRequest(method, c.endpoint.String()+urlStr, body)
    +}
    +
    +// SessionID returns the current session ID of the REST client. An empty string
    +// means there was no session cookie currently loaded.
    +func (c *RestClient) SessionID() string {
    +	for _, cookie := range c.cookies {
    +		if cookie.Name == sessionIDCookieName {
    +			return cookie.Value
    +		}
    +	}
    +	return ""
    +}
    +
    +// setSessionID sets the session cookie with the supplied session ID.
    +//
    +// This does not necessarily mean the session is valid. The session should be
    +// checked with Valid before proceeding, and logged back in if it has expired.
    +//
    +// This function will overwrite any existing session.
    +func (c *RestClient) setSessionID(sessionID string) {
    +	Logger.Debugf("Setting existing session ID %q", sessionID)
    +	idx := -1
    +	for i, cookie := range c.cookies {
    +		if cookie.Name == sessionIDCookieName {
    +			idx = i
    +		}
    +	}
    +	sessionCookie := &http.Cookie{
    +		Name:  sessionIDCookieName,
    +		Value: sessionID,
    +		Path:  RestPrefix,
    +	}
    +	if idx > -1 {
    +		c.cookies[idx] = sessionCookie
    +	} else {
    +		c.cookies = append(c.cookies, sessionCookie)
    +	}
    +}
    +
    +// Valid checks to see if the session cookies in a REST client are still valid.
    +// This should be used when restoring a session to determine if a new login is
    +// necessary.
    +func (c *RestClient) Valid(ctx context.Context) bool {
    +	sessionID := c.SessionID()
    +	Logger.Debugf("Checking if session ID %q is still valid", sessionID)
    +
    +	_, _, statusCode, err := c.clientRequest(ctx, "POST", loginURL+"?~action=get", nil, nil)
    +	if err != nil {
    +		Logger.Debugf("Error getting current session information for ID %q - session is invalid (%d - %s)", sessionID, statusCode, err)
    +	}
    +
    +	if statusCode == http.StatusOK {
    +		Logger.Debugf("Session ID %q is valid", sessionID)
    +		return true
    +	}
    +
    +	Logger.Debugf("Session is invalid for %v (%d)", sessionID, statusCode)
    +	return false
    +}
    diff --git a/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/tag_association.go b/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/tag_association.go
    new file mode 100644
    index 000000000000..4eb0cd50b06a
    --- /dev/null
    +++ b/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/tag_association.go
    @@ -0,0 +1,135 @@
    +// Copyright 2017 VMware, Inc. All Rights Reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//    http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package tags
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +
    +	"github.com/pkg/errors"
    +)
    +
    +const (
    +	TagAssociationURL = "/com/vmware/cis/tagging/tag-association"
    +)
    +
    +type AssociatedObject struct {
    +	ID   *string `json:"id"`
    +	Type *string `json:"type"`
    +}
    +
    +type TagAssociationSpec struct {
    +	ObjectID *AssociatedObject `json:"object_id,omitempty"`
    +	TagID    *string           `json:"tag_id,omitempty"`
    +}
    +
    +func (c *RestClient) getAssociatedObject(objID *string, objType *string) *AssociatedObject {
    +	if objID == nil && objType == nil {
    +		return nil
    +	}
    +	object := AssociatedObject{
    +		ID:   objID,
    +		Type: objType,
    +	}
    +	return &object
    +}
    +
    +func (c *RestClient) getAssociationSpec(tagID *string, objID *string, objType *string) *TagAssociationSpec {
    +	object := c.getAssociatedObject(objID, objType)
    +	spec := TagAssociationSpec{
    +		TagID:    tagID,
    +		ObjectID: object,
    +	}
    +	return &spec
    +}
    +
    +func (c *RestClient) AttachTagToObject(ctx context.Context, tagID string, objID string, objType string) error {
    +	Logger.Debugf("Attach Tag %s to object id: %s, type: %s", tagID, objID, objType)
    +
    +	spec := c.getAssociationSpec(&tagID, &objID, &objType)
    +	_, _, status, err := c.call(ctx, "POST", fmt.Sprintf("%s?~action=attach", TagAssociationURL), *spec, nil)
    +
    +	Logger.Debugf("Get status code: %d", status)
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Attach tag failed with status code: %d, error message: %s", status, errors.WithStack(err))
    +		return errors.Wrapf(err, "Get unexpected status code: %d", status)
    +	}
    +	return nil
    +}
    +
    +func (c *RestClient) DetachTagFromObject(ctx context.Context, tagID string, objID string, objType string) error {
    +	Logger.Debugf("Detach Tag %s to object id: %s, type: %s", tagID, objID, objType)
    +
    +	spec := c.getAssociationSpec(&tagID, &objID, &objType)
    +	_, _, status, err := c.call(ctx, "POST", fmt.Sprintf("%s?~action=detach", TagAssociationURL), *spec, nil)
    +
    +	Logger.Debugf("Get status code: %d", status)
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Detach tag failed with status code: %d, error message: %s", status, errors.WithStack(err))
    +		return errors.Wrapf(err, "Get unexpected status code: %d", status)
    +	}
    +	return nil
    +}
    +
    +func (c *RestClient) ListAttachedTags(ctx context.Context, objID string, objType string) ([]string, error) {
    +	Logger.Debugf("List attached tags of object id: %s, type: %s", objID, objType)
    +
    +	spec := c.getAssociationSpec(nil, &objID, &objType)
    +	stream, _, status, err := c.call(ctx, "POST", fmt.Sprintf("%s?~action=list-attached-tags", TagAssociationURL), *spec, nil)
    +
    +	Logger.Debugf("Get status code: %d", status)
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Detach tag failed with status code: %d, error message: %s", status, errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "Get unexpected status code: %d", status)
    +	}
    +
    +	type RespValue struct {
    +		Value []string
    +	}
    +
    +	var pTag RespValue
    +	if err := json.NewDecoder(stream).Decode(&pTag); err != nil {
    +		Logger.Debugf("Decode response body failed for: %s", errors.WithStack(err))
    +		return nil, errors.Wrap(err, "list attached tags failed")
    +	}
    +	return pTag.Value, nil
    +}
    +
    +func (c *RestClient) ListAttachedObjects(ctx context.Context, tagID string) ([]AssociatedObject, error) {
    +	Logger.Debugf("List attached objects of tag: %s", tagID)
    +
    +	spec := c.getAssociationSpec(&tagID, nil, nil)
    +	Logger.Debugf("List attached objects for tag %v", *spec)
    +	//	stream, _, status, err := c.call("POST", fmt.Sprintf("%s?~action=list-attached-objects", TagAssociationURL), *spec, nil)
    +	stream, _, status, err := c.call(ctx, "POST", fmt.Sprintf("%s?~action=list-attached-objects", TagAssociationURL), *spec, nil)
    +	Logger.Debugf("Get status code: %d", status)
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("List object failed with status code: %d, error message: %s", status, errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "Get unexpected status code: %d", status)
    +	}
    +
    +	type RespValue struct {
    +		Value []AssociatedObject
    +	}
    +
    +	var pTag RespValue
    +	if err := json.NewDecoder(stream).Decode(&pTag); err != nil {
    +		Logger.Debugf("Decode response body failed for: %s", errors.WithStack(err))
    +		return nil, errors.Wrap(err, "list attached tags failed")
    +	}
    +	return pTag.Value, nil
    +}
    diff --git a/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/tags.go b/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/tags.go
    new file mode 100644
    index 000000000000..6e0c60fd9f04
    --- /dev/null
    +++ b/vendor/github.com/hashicorp/vic/pkg/vsphere/tags/tags.go
    @@ -0,0 +1,255 @@
    +// Copyright 2017 VMware, Inc. All Rights Reserved.
    +//
    +// Licensed under the Apache License, Version 2.0 (the "License");
    +// you may not use this file except in compliance with the License.
    +// You may obtain a copy of the License at
    +//
    +//    http://www.apache.org/licenses/LICENSE-2.0
    +//
    +// Unless required by applicable law or agreed to in writing, software
    +// distributed under the License is distributed on an "AS IS" BASIS,
    +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +// See the License for the specific language governing permissions and
    +// limitations under the License.
    +
    +package tags
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"io"
    +	"net/http"
    +	"regexp"
    +	"strings"
    +
    +	"github.com/sirupsen/logrus"
    +	"github.com/pkg/errors"
    +)
    +
    +const (
    +	TagURL = "/com/vmware/cis/tagging/tag"
    +)
    +
    +type TagCreateSpec struct {
    +	CreateSpec TagCreate `json:"create_spec"`
    +}
    +
    +type TagCreate struct {
    +	CategoryID  string `json:"category_id"`
    +	Description string `json:"description"`
    +	Name        string `json:"name"`
    +}
    +
    +type TagUpdateSpec struct {
    +	UpdateSpec TagUpdate `json:"update_spec"`
    +}
    +
    +type TagUpdate struct {
    +	Description string `json:"description"`
    +	Name        string `json:"name"`
    +}
    +
    +type Tag struct {
    +	ID          string   `json:"id"`
    +	Description string   `json:"description"`
    +	Name        string   `json:"name"`
    +	CategoryID  string   `json:"category_id"`
    +	UsedBy      []string `json:"used_by"`
    +}
    +
    +var Logger = logrus.New()
    +
    +func (c *RestClient) CreateTagIfNotExist(ctx context.Context, name string, description string, categoryID string) (*string, error) {
    +	tagCreate := TagCreate{categoryID, description, name}
    +	spec := TagCreateSpec{tagCreate}
    +	id, err := c.CreateTag(ctx, &spec)
    +	if err == nil {
    +		return id, nil
    +	}
    +	Logger.Debugf("Created tag %s failed for %s", errors.WithStack(err))
    +	// if already exists, query back
    +	if strings.Contains(err.Error(), ErrAlreadyExists) {
    +		tagObjs, err := c.GetTagByNameForCategory(ctx, name, categoryID)
    +		if err != nil {
    +			return nil, errors.Wrapf(err, "failed to query tag %s for category %s", name, categoryID)
    +		}
    +		if tagObjs != nil {
    +			return &tagObjs[0].ID, nil
    +		}
    +
    +		// should not happen
    +		return nil, errors.New("Failed to create tag for it's existed, but could not query back. Please check system")
    +	}
    +
    +	return nil, errors.Wrap(err, "failed to create tag")
    +}
    +
    +func (c *RestClient) DeleteTagIfNoObjectAttached(ctx context.Context, id string) error {
    +	objs, err := c.ListAttachedObjects(ctx, id)
    +	if err != nil {
    +		return errors.Wrap(err, "failed to delete tag")
    +	}
    +	if objs != nil && len(objs) > 0 {
    +		Logger.Debugf("tag %s related objects is not empty, do not delete it.", id)
    +		return nil
    +	}
    +	return c.DeleteTag(ctx, id)
    +}
    +
    +func (c *RestClient) CreateTag(ctx context.Context, spec *TagCreateSpec) (*string, error) {
    +	Logger.Debugf("Create Tag %v", spec)
    +	stream, _, status, err := c.call(ctx, "POST", TagURL, spec, nil)
    +
    +	Logger.Debugf("Get status code: %d", status)
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Create tag failed with status code: %d, error message: %s", status, errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "Status code: %d", status)
    +	}
    +
    +	type RespValue struct {
    +		Value string
    +	}
    +
    +	var pID RespValue
    +	if err := json.NewDecoder(stream).Decode(&pID); err != nil {
    +		Logger.Debugf("Decode response body failed for: %s", errors.WithStack(err))
    +		return nil, errors.Wrap(err, "create tag failed")
    +	}
    +	return &pID.Value, nil
    +}
    +
    +func (c *RestClient) GetTag(ctx context.Context, id string) (*Tag, error) {
    +	Logger.Debugf("Get tag %s", id)
    +
    +	stream, _, status, err := c.call(ctx, "GET", fmt.Sprintf("%s/id:%s", TagURL, id), nil, nil)
    +
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Get tag failed with status code: %s, error message: %s", status, errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "Status code: %d", status)
    +	}
    +
    +	type RespValue struct {
    +		Value Tag
    +	}
    +
    +	var pTag RespValue
    +	if err := json.NewDecoder(stream).Decode(&pTag); err != nil {
    +		Logger.Debugf("Decode response body failed for: %s", errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "failed to get tag %s", id)
    +	}
    +	return &(pTag.Value), nil
    +}
    +
    +func (c *RestClient) UpdateTag(ctx context.Context, id string, spec *TagUpdateSpec) error {
    +	Logger.Debugf("Update tag %v", spec)
    +	_, _, status, err := c.call(ctx, "PATCH", fmt.Sprintf("%s/id:%s", TagURL, id), spec, nil)
    +
    +	Logger.Debugf("Get status code: %d", status)
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Update tag failed with status code: %d, error message: %s", status, errors.WithStack(err))
    +		return errors.Wrapf(err, "Status code: %d", status)
    +	}
    +
    +	return nil
    +}
    +
    +func (c *RestClient) DeleteTag(ctx context.Context, id string) error {
    +	Logger.Debugf("Delete tag %s", id)
    +
    +	_, _, status, err := c.call(ctx, "DELETE", fmt.Sprintf("%s/id:%s", TagURL, id), nil, nil)
    +
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Delete tag failed with status code: %s, error message: %s", status, errors.WithStack(err))
    +		return errors.Wrapf(err, "Status code: %d", status)
    +	}
    +	return nil
    +}
    +
    +func (c *RestClient) ListTags(ctx context.Context) ([]string, error) {
    +	Logger.Debugf("List all tags")
    +
    +	stream, _, status, err := c.call(ctx, "GET", TagURL, nil, nil)
    +
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("Get tags failed with status code: %s, error message: %s", status, errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "Status code: %d", status)
    +	}
    +
    +	return c.handleTagIDList(stream)
    +}
    +
    +func (c *RestClient) ListTagsForCategory(ctx context.Context, id string) ([]string, error) {
    +	Logger.Debugf("List tags for category: %s", id)
    +
    +	type PostCategory struct {
    +		CId string `json:"category_id"`
    +	}
    +	spec := PostCategory{id}
    +	stream, _, status, err := c.call(ctx, "POST", fmt.Sprintf("%s/id:%s?~action=list-tags-for-category", TagURL, id), spec, nil)
    +
    +	if status != http.StatusOK || err != nil {
    +		Logger.Debugf("List tags for category failed with status code: %s, error message: %s", status, errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "Status code: %d", status)
    +	}
    +
    +	return c.handleTagIDList(stream)
    +}
    +
    +func (c *RestClient) handleTagIDList(stream io.ReadCloser) ([]string, error) {
    +	type Tags struct {
    +		Value []string
    +	}
    +
    +	var pTags Tags
    +	if err := json.NewDecoder(stream).Decode(&pTags); err != nil {
    +		Logger.Debugf("Decode response body failed for: %s", errors.WithStack(err))
    +		return nil, errors.Wrap(err, "failed to decode json")
    +	}
    +	return pTags.Value, nil
    +}
    +
    +// Get tag through tag name and category id
    +func (c *RestClient) GetTagByNameForCategory(ctx context.Context, name string, id string) ([]Tag, error) {
    +	Logger.Debugf("Get tag %s for category %s", name, id)
    +	tagIds, err := c.ListTagsForCategory(ctx, id)
    +	if err != nil {
    +		Logger.Debugf("Get tag failed for %s", errors.WithStack(err))
    +		return nil, errors.Wrapf(err, "get tag failed for name %s category %s", name, id)
    +	}
    +
    +	var tags []Tag
    +	for _, tID := range tagIds {
    +		tag, err := c.GetTag(ctx, tID)
    +		if err != nil {
    +			Logger.Debugf("Get tag %s failed for %s", tID, errors.WithStack(err))
    +			return nil, errors.Wrapf(err, "get tag failed for name %s category %s", name, id)
    +		}
    +		if tag.Name == name {
    +			tags = append(tags, *tag)
    +		}
    +	}
    +	return tags, nil
    +}
    +
    +// Get attached tags through tag name pattern
    +func (c *RestClient) GetAttachedTagsByNamePattern(ctx context.Context, namePattern string, objID string, objType string) ([]Tag, error) {
    +	tagIds, err := c.ListAttachedTags(ctx, objID, objType)
    +	if err != nil {
    +		Logger.Debugf("Get attached tags failed for %s", errors.WithStack(err))
    +		return nil, errors.Wrap(err, "get attached tags failed")
    +	}
    +
    +	var validName = regexp.MustCompile(namePattern)
    +	var tags []Tag
    +	for _, tID := range tagIds {
    +		tag, err := c.GetTag(ctx, tID)
    +		if err != nil {
    +			Logger.Debugf("Get tag %s failed for %s", tID, errors.WithStack(err))
    +		}
    +		if validName.MatchString(tag.Name) {
    +			tags = append(tags, *tag)
    +		}
    +	}
    +	return tags, nil
    +}
    diff --git a/vendor/github.com/joyent/triton-go/compute/client.go b/vendor/github.com/joyent/triton-go/compute/client.go
    new file mode 100644
    index 000000000000..255dabca2f2d
    --- /dev/null
    +++ b/vendor/github.com/joyent/triton-go/compute/client.go
    @@ -0,0 +1,90 @@
    +//
    +// Copyright (c) 2018, Joyent, Inc. All rights reserved.
    +//
    +// This Source Code Form is subject to the terms of the Mozilla Public
    +// License, v. 2.0. If a copy of the MPL was not distributed with this
    +// file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +//
    +
    +package compute
    +
    +import (
    +	"net/http"
    +
    +	triton "github.com/joyent/triton-go"
    +	"github.com/joyent/triton-go/client"
    +)
    +
    +type ComputeClient struct {
    +	Client *client.Client
    +}
    +
    +func newComputeClient(client *client.Client) *ComputeClient {
    +	return &ComputeClient{
    +		Client: client,
    +	}
    +}
    +
    +// NewClient returns a new client for working with Compute endpoints and
    +// resources within CloudAPI
    +func NewClient(config *triton.ClientConfig) (*ComputeClient, error) {
    +	// TODO: Utilize config interface within the function itself
    +	client, err := client.New(
    +		config.TritonURL,
    +		config.MantaURL,
    +		config.AccountName,
    +		config.Signers...,
    +	)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return newComputeClient(client), nil
    +}
    +
    +// SetHeaders allows a consumer of the current client to set custom headers for
    +// the next backend HTTP request sent to CloudAPI
    +func (c *ComputeClient) SetHeader(header *http.Header) {
    +	c.Client.RequestHeader = header
    +}
    +
    +// Datacenters returns a Compute client used for accessing functions pertaining
    +// to DataCenter functionality in the Triton API.
    +func (c *ComputeClient) Datacenters() *DataCentersClient {
    +	return &DataCentersClient{c.Client}
    +}
    +
    +// Images returns a Compute client used for accessing functions pertaining to
    +// Images functionality in the Triton API.
    +func (c *ComputeClient) Images() *ImagesClient {
    +	return &ImagesClient{c.Client}
    +}
    +
    +// Machine returns a Compute client used for accessing functions pertaining to
    +// machine functionality in the Triton API.
    +func (c *ComputeClient) Instances() *InstancesClient {
    +	return &InstancesClient{c.Client}
    +}
    +
    +// Packages returns a Compute client used for accessing functions pertaining to
    +// Packages functionality in the Triton API.
    +func (c *ComputeClient) Packages() *PackagesClient {
    +	return &PackagesClient{c.Client}
    +}
    +
    +// Services returns a Compute client used for accessing functions pertaining to
    +// Services functionality in the Triton API.
    +func (c *ComputeClient) Services() *ServicesClient {
    +	return &ServicesClient{c.Client}
    +}
    +
    +// Snapshots returns a Compute client used for accessing functions pertaining to
    +// Snapshots functionality in the Triton API.
    +func (c *ComputeClient) Snapshots() *SnapshotsClient {
    +	return &SnapshotsClient{c.Client}
    +}
    +
    +// Snapshots returns a Compute client used for accessing functions pertaining to
    +// Snapshots functionality in the Triton API.
    +func (c *ComputeClient) Volumes() *VolumesClient {
    +	return &VolumesClient{c.Client}
    +}
    diff --git a/vendor/github.com/joyent/triton-go/compute/datacenters.go b/vendor/github.com/joyent/triton-go/compute/datacenters.go
    new file mode 100644
    index 000000000000..9f22cffbba1e
    --- /dev/null
    +++ b/vendor/github.com/joyent/triton-go/compute/datacenters.go
    @@ -0,0 +1,101 @@
    +//
    +// Copyright (c) 2018, Joyent, Inc. All rights reserved.
    +//
    +// This Source Code Form is subject to the terms of the Mozilla Public
    +// License, v. 2.0. If a copy of the MPL was not distributed with this
    +// file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +//
    +
    +package compute
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"path"
    +	"sort"
    +
    +	"github.com/joyent/triton-go/client"
    +	"github.com/joyent/triton-go/errors"
    +	pkgerrors "github.com/pkg/errors"
    +)
    +
    +type DataCentersClient struct {
    +	client *client.Client
    +}
    +
    +type DataCenter struct {
    +	Name string `json:"name"`
    +	URL  string `json:"url"`
    +}
    +
    +type ListDataCentersInput struct{}
    +
    +func (c *DataCentersClient) List(ctx context.Context, _ *ListDataCentersInput) ([]*DataCenter, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "datacenters")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to list data centers")
    +	}
    +
    +	var intermediate map[string]string
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&intermediate); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to decode list data centers response")
    +	}
    +
    +	keys := make([]string, len(intermediate))
    +	i := 0
    +	for k := range intermediate {
    +		keys[i] = k
    +		i++
    +	}
    +	sort.Strings(keys)
    +
    +	result := make([]*DataCenter, len(intermediate))
    +	i = 0
    +	for _, key := range keys {
    +		result[i] = &DataCenter{
    +			Name: key,
    +			URL:  intermediate[key],
    +		}
    +		i++
    +	}
    +
    +	return result, nil
    +}
    +
    +type GetDataCenterInput struct {
    +	Name string
    +}
    +
    +func (c *DataCentersClient) Get(ctx context.Context, input *GetDataCenterInput) (*DataCenter, error) {
    +	dcs, err := c.List(ctx, &ListDataCentersInput{})
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to get data center")
    +	}
    +
    +	for _, dc := range dcs {
    +		if dc.Name == input.Name {
    +			return &DataCenter{
    +				Name: input.Name,
    +				URL:  dc.URL,
    +			}, nil
    +		}
    +	}
    +
    +	return nil, &errors.APIError{
    +		StatusCode: http.StatusNotFound,
    +		Code:       "ResourceNotFound",
    +		Message:    fmt.Sprintf("data center %q not found", input.Name),
    +	}
    +}
    diff --git a/vendor/github.com/joyent/triton-go/compute/images.go b/vendor/github.com/joyent/triton-go/compute/images.go
    new file mode 100644
    index 000000000000..8dcae0a89d76
    --- /dev/null
    +++ b/vendor/github.com/joyent/triton-go/compute/images.go
    @@ -0,0 +1,268 @@
    +//
    +// Copyright (c) 2018, Joyent, Inc. All rights reserved.
    +//
    +// This Source Code Form is subject to the terms of the Mozilla Public
    +// License, v. 2.0. If a copy of the MPL was not distributed with this
    +// file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +//
    +
    +package compute
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"net/http"
    +	"net/url"
    +	"path"
    +	"time"
    +
    +	"github.com/joyent/triton-go/client"
    +	"github.com/pkg/errors"
    +)
    +
    +type ImagesClient struct {
    +	client *client.Client
    +}
    +
    +type ImageFile struct {
    +	Compression string `json:"compression"`
    +	SHA1        string `json:"sha1"`
    +	Size        int64  `json:"size"`
    +}
    +
    +type Image struct {
    +	ID           string                 `json:"id"`
    +	Name         string                 `json:"name"`
    +	OS           string                 `json:"os"`
    +	Description  string                 `json:"description"`
    +	Version      string                 `json:"version"`
    +	Type         string                 `json:"type"`
    +	Requirements map[string]interface{} `json:"requirements"`
    +	Homepage     string                 `json:"homepage"`
    +	Files        []*ImageFile           `json:"files"`
    +	PublishedAt  time.Time              `json:"published_at"`
    +	Owner        string                 `json:"owner"`
    +	Public       bool                   `json:"public"`
    +	State        string                 `json:"state"`
    +	Tags         map[string]string      `json:"tags"`
    +	EULA         string                 `json:"eula"`
    +	ACL          []string               `json:"acl"`
    +}
    +
    +type ListImagesInput struct {
    +	Name    string
    +	OS      string
    +	Version string
    +	Public  bool
    +	State   string
    +	Owner   string
    +	Type    string
    +}
    +
    +func (c *ImagesClient) List(ctx context.Context, input *ListImagesInput) ([]*Image, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "images")
    +
    +	query := &url.Values{}
    +	if input.Name != "" {
    +		query.Set("name", input.Name)
    +	}
    +	if input.OS != "" {
    +		query.Set("os", input.OS)
    +	}
    +	if input.Version != "" {
    +		query.Set("version", input.Version)
    +	}
    +	if input.Public {
    +		query.Set("public", "true")
    +	}
    +	if input.State != "" {
    +		query.Set("state", input.State)
    +	}
    +	if input.Owner != "" {
    +		query.Set("owner", input.Owner)
    +	}
    +	if input.Type != "" {
    +		query.Set("type", input.Type)
    +	}
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +		Query:  query,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to list images")
    +	}
    +
    +	var result []*Image
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode list images response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type GetImageInput struct {
    +	ImageID string
    +}
    +
    +func (c *ImagesClient) Get(ctx context.Context, input *GetImageInput) (*Image, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "images", input.ImageID)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to get image")
    +	}
    +
    +	var result *Image
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode get image response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type DeleteImageInput struct {
    +	ImageID string
    +}
    +
    +func (c *ImagesClient) Delete(ctx context.Context, input *DeleteImageInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "images", input.ImageID)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodDelete,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return errors.Wrap(err, "unable to delete image")
    +	}
    +
    +	return nil
    +}
    +
    +type ExportImageInput struct {
    +	ImageID   string
    +	MantaPath string
    +}
    +
    +type MantaLocation struct {
    +	MantaURL     string `json:"manta_url"`
    +	ImagePath    string `json:"image_path"`
    +	ManifestPath string `json:"manifest_path"`
    +}
    +
    +func (c *ImagesClient) Export(ctx context.Context, input *ExportImageInput) (*MantaLocation, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "images", input.ImageID)
    +	query := &url.Values{}
    +	query.Set("action", "export")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  query,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to export image")
    +	}
    +
    +	var result *MantaLocation
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode export image response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type CreateImageFromMachineInput struct {
    +	MachineID   string            `json:"machine"`
    +	Name        string            `json:"name"`
    +	Version     string            `json:"version,omitempty"`
    +	Description string            `json:"description,omitempty"`
    +	HomePage    string            `json:"homepage,omitempty"`
    +	EULA        string            `json:"eula,omitempty"`
    +	ACL         []string          `json:"acl,omitempty"`
    +	Tags        map[string]string `json:"tags,omitempty"`
    +}
    +
    +func (c *ImagesClient) CreateFromMachine(ctx context.Context, input *CreateImageFromMachineInput) (*Image, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "images")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Body:   input,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to create machine from image")
    +	}
    +
    +	var result *Image
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode create machine from image response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type UpdateImageInput struct {
    +	ImageID     string            `json:"-"`
    +	Name        string            `json:"name,omitempty"`
    +	Version     string            `json:"version,omitempty"`
    +	Description string            `json:"description,omitempty"`
    +	HomePage    string            `json:"homepage,omitempty"`
    +	EULA        string            `json:"eula,omitempty"`
    +	ACL         []string          `json:"acl,omitempty"`
    +	Tags        map[string]string `json:"tags,omitempty"`
    +}
    +
    +func (c *ImagesClient) Update(ctx context.Context, input *UpdateImageInput) (*Image, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "images", input.ImageID)
    +	query := &url.Values{}
    +	query.Set("action", "update")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  query,
    +		Body:   input,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to update image")
    +	}
    +
    +	var result *Image
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode update image response")
    +	}
    +
    +	return result, nil
    +}
    diff --git a/vendor/github.com/joyent/triton-go/compute/instances.go b/vendor/github.com/joyent/triton-go/compute/instances.go
    new file mode 100644
    index 000000000000..3ec78f05ce81
    --- /dev/null
    +++ b/vendor/github.com/joyent/triton-go/compute/instances.go
    @@ -0,0 +1,1230 @@
    +//
    +// Copyright 2019 Joyent, Inc.
    +//
    +// This Source Code Form is subject to the terms of the Mozilla Public
    +// License, v. 2.0. If a copy of the MPL was not distributed with this
    +// file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +//
    +
    +package compute
    +
    +import (
    +	"context"
    +	"crypto/sha1"
    +	"encoding/hex"
    +	"encoding/json"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"net/http"
    +	"net/url"
    +	"path"
    +	"strconv"
    +	"strings"
    +	"time"
    +
    +	"github.com/joyent/triton-go/client"
    +	"github.com/joyent/triton-go/errors"
    +	pkgerrors "github.com/pkg/errors"
    +)
    +
    +type InstancesClient struct {
    +	client *client.Client
    +}
    +
    +const (
    +	CNSTagDisable    = "triton.cns.disable"
    +	CNSTagReversePTR = "triton.cns.reverse_ptr"
    +	CNSTagServices   = "triton.cns.services"
    +)
    +
    +// InstanceCNS is a container for the CNS-specific attributes.  In the API these
    +// values are embedded within a Instance's Tags attribute, however they are
    +// exposed to the caller as their native types.
    +type InstanceCNS struct {
    +	Disable    bool
    +	ReversePTR string
    +	Services   []string
    +}
    +
    +type InstanceVolume struct {
    +	Name       string `json:"name,omitempty"`
    +	Type       string `json:"type,omitempty"`
    +	Mode       string `json:"mode,omitempty"`
    +	Mountpoint string `json:"mountpoint,omitempty"`
    +}
    +
    +type NetworkObject struct {
    +	IPv4UUID string   `json:"ipv4_uuid"`
    +	IPv4IPs  []string `json:"ipv4_ips,omitempty"`
    +}
    +
    +type Instance struct {
    +	ID                 string                 `json:"id"`
    +	Name               string                 `json:"name"`
    +	Type               string                 `json:"type"`
    +	Brand              string                 `json:"brand"`
    +	State              string                 `json:"state"`
    +	Image              string                 `json:"image"`
    +	Memory             int                    `json:"memory"`
    +	Disk               int                    `json:"disk"`
    +	Metadata           map[string]string      `json:"metadata"`
    +	Tags               map[string]interface{} `json:"tags"`
    +	Created            time.Time              `json:"created"`
    +	Updated            time.Time              `json:"updated"`
    +	Docker             bool                   `json:"docker"`
    +	IPs                []string               `json:"ips"`
    +	Networks           []string               `json:"networks"`
    +	PrimaryIP          string                 `json:"primaryIp"`
    +	FirewallEnabled    bool                   `json:"firewall_enabled"`
    +	ComputeNode        string                 `json:"compute_node"`
    +	Package            string                 `json:"package"`
    +	DomainNames        []string               `json:"dns_names"`
    +	DeletionProtection bool                   `json:"deletion_protection"`
    +	CNS                InstanceCNS
    +}
    +
    +// _Instance is a private facade over Instance that handles the necessary API
    +// overrides from VMAPI's machine endpoint(s).
    +type _Instance struct {
    +	Instance
    +	Tags map[string]interface{} `json:"tags"`
    +}
    +
    +type NIC struct {
    +	IP      string `json:"ip"`
    +	MAC     string `json:"mac"`
    +	Primary bool   `json:"primary"`
    +	Netmask string `json:"netmask"`
    +	Gateway string `json:"gateway"`
    +	State   string `json:"state"`
    +	Network string `json:"network"`
    +}
    +
    +type GetInstanceInput struct {
    +	ID string
    +}
    +
    +func (gmi *GetInstanceInput) Validate() error {
    +	if gmi.ID == "" {
    +		return fmt.Errorf("machine ID can not be empty")
    +	}
    +
    +	return nil
    +}
    +
    +func (c *InstancesClient) Count(ctx context.Context, input *ListInstancesInput) (int, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodHead,
    +		Path:   fullPath,
    +		Query:  buildQueryFilter(input),
    +	}
    +
    +	response, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return -1, pkgerrors.Wrap(err, "unable to get machines count")
    +	}
    +
    +	if response == nil {
    +		return -1, pkgerrors.New("request to get machines count has empty response")
    +	}
    +	defer response.Body.Close()
    +
    +	var result int
    +
    +	if count := response.Header.Get("X-Resource-Count"); count != "" {
    +		value, err := strconv.Atoi(count)
    +		if err != nil {
    +			return -1, pkgerrors.Wrap(err, "unable to decode machines count response")
    +		}
    +		result = value
    +	}
    +
    +	return result, nil
    +}
    +
    +func (c *InstancesClient) Get(ctx context.Context, input *GetInstanceInput) (*Instance, error) {
    +	if err := input.Validate(); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to get machine")
    +	}
    +
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID)
    +	reqInputs := client.RequestInput{
    +		Method:       http.MethodGet,
    +		Path:         fullPath,
    +		PreserveGone: true,
    +	}
    +	response, reqErr := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if response == nil {
    +		return nil, pkgerrors.Wrap(reqErr, "unable to get machine")
    +	}
    +	if response.Body != nil {
    +		defer response.Body.Close()
    +	}
    +	if reqErr != nil {
    +		reqErr = pkgerrors.Wrap(reqErr, "unable to get machine")
    +
    +		// If this is not a HTTP 410 Gone error, return it immediately to the caller.  Otherwise, we'll return it alongside the instance below.
    +		if response.StatusCode != http.StatusGone {
    +			return nil, reqErr
    +		}
    +	}
    +
    +	var result *_Instance
    +	decoder := json.NewDecoder(response.Body)
    +	if err := decoder.Decode(&result); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to parse JSON in get machine response")
    +	}
    +
    +	native, err := result.toNative()
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to decode get machine response")
    +	}
    +
    +	// To remain compatible with the existing interface, we'll return both an error and an instance object in some cases; e.g., for HTTP 410 Gone responses for deleted instances.
    +	return native, reqErr
    +}
    +
    +type ListInstancesInput struct {
    +	Brand       string
    +	Alias       string
    +	Name        string
    +	Image       string
    +	State       string
    +	Memory      uint16
    +	Limit       uint16
    +	Offset      uint16
    +	Tags        map[string]interface{} // query by arbitrary tags prefixed with "tag."
    +	Tombstone   bool
    +	Docker      bool
    +	Credentials bool
    +}
    +
    +func buildQueryFilter(input *ListInstancesInput) *url.Values {
    +	query := &url.Values{}
    +	if input.Brand != "" {
    +		query.Set("brand", input.Brand)
    +	}
    +	if input.Name != "" {
    +		query.Set("name", input.Name)
    +	}
    +	if input.Image != "" {
    +		query.Set("image", input.Image)
    +	}
    +	if input.State != "" {
    +		query.Set("state", input.State)
    +	}
    +	if input.Memory >= 1 {
    +		query.Set("memory", fmt.Sprintf("%d", input.Memory))
    +	}
    +	if input.Limit >= 1 && input.Limit <= 1000 {
    +		query.Set("limit", fmt.Sprintf("%d", input.Limit))
    +	}
    +	if input.Offset >= 0 {
    +		query.Set("offset", fmt.Sprintf("%d", input.Offset))
    +	}
    +	if input.Tombstone {
    +		query.Set("tombstone", "true")
    +	}
    +	if input.Docker {
    +		query.Set("docker", "true")
    +	}
    +	if input.Credentials {
    +		query.Set("credentials", "true")
    +	}
    +	if input.Tags != nil {
    +		for k, v := range input.Tags {
    +			query.Set(fmt.Sprintf("tag.%s", k), v.(string))
    +		}
    +	}
    +
    +	return query
    +}
    +
    +func (c *InstancesClient) List(ctx context.Context, input *ListInstancesInput) ([]*Instance, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +		Query:  buildQueryFilter(input),
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to list machines")
    +	}
    +
    +	var results []*_Instance
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&results); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to decode list machines response")
    +	}
    +
    +	machines := make([]*Instance, 0, len(results))
    +	for _, machineAPI := range results {
    +		native, err := machineAPI.toNative()
    +		if err != nil {
    +			return nil, pkgerrors.Wrap(err, "unable to decode list machines response")
    +		}
    +		machines = append(machines, native)
    +	}
    +
    +	return machines, nil
    +}
    +
    +type CreateInstanceInput struct {
    +	Name            string
    +	NamePrefix      string
    +	Package         string
    +	Image           string
    +	Networks        []string
    +	NetworkObjects  []NetworkObject
    +	Affinity        []string
    +	LocalityStrict  bool
    +	LocalityNear    []string
    +	LocalityFar     []string
    +	Metadata        map[string]string
    +	Tags            map[string]string //
    +	FirewallEnabled bool              //
    +	CNS             InstanceCNS
    +	Volumes         []InstanceVolume
    +}
    +
    +func buildInstanceName(namePrefix string) string {
    +	h := sha1.New()
    +	io.WriteString(h, namePrefix+time.Now().UTC().String())
    +	return fmt.Sprintf("%s%s", namePrefix, hex.EncodeToString(h.Sum(nil))[:8])
    +}
    +
    +func (input *CreateInstanceInput) toAPI() (map[string]interface{}, error) {
    +	const numExtraParams = 8
    +	result := make(map[string]interface{}, numExtraParams+len(input.Metadata)+len(input.Tags))
    +
    +	result["firewall_enabled"] = input.FirewallEnabled
    +
    +	if input.Name != "" {
    +		result["name"] = input.Name
    +	} else if input.NamePrefix != "" {
    +		result["name"] = buildInstanceName(input.NamePrefix)
    +	}
    +
    +	if input.Package != "" {
    +		result["package"] = input.Package
    +	}
    +
    +	if input.Image != "" {
    +		result["image"] = input.Image
    +	}
    +
    +	// If we are passed []string from input.Networks that do not conflict with networks provided by NetworkObjects, add them to the request sent to CloudAPI
    +	var networks []NetworkObject
    +
    +	if len(input.NetworkObjects) > 0 {
    +		networks = append(networks, input.NetworkObjects...)
    +	}
    +
    +	for _, netuuid := range input.Networks {
    +		found := false
    +
    +		for _, net := range networks {
    +			if net.IPv4UUID == netuuid {
    +				found = true
    +			}
    +		}
    +
    +		if !found {
    +			networks = append(networks, NetworkObject{
    +				IPv4UUID: netuuid,
    +			})
    +		}
    +	}
    +
    +	if len(networks) > 0 {
    +		result["networks"] = networks
    +	}
    +
    +	if len(input.Volumes) > 0 {
    +		result["volumes"] = input.Volumes
    +	}
    +
    +	// validate that affinity and locality are not included together
    +	hasAffinity := len(input.Affinity) > 0
    +	hasLocality := len(input.LocalityNear) > 0 || len(input.LocalityFar) > 0
    +	if hasAffinity && hasLocality {
    +		return nil, fmt.Errorf("Cannot include both Affinity and Locality")
    +	}
    +
    +	// affinity takes precedence over locality regardless
    +	if len(input.Affinity) > 0 {
    +		result["affinity"] = input.Affinity
    +	} else {
    +		locality := struct {
    +			Strict bool     `json:"strict"`
    +			Near   []string `json:"near,omitempty"`
    +			Far    []string `json:"far,omitempty"`
    +		}{
    +			Strict: input.LocalityStrict,
    +			Near:   input.LocalityNear,
    +			Far:    input.LocalityFar,
    +		}
    +		result["locality"] = locality
    +	}
    +
    +	for key, value := range input.Tags {
    +		result[fmt.Sprintf("tag.%s", key)] = value
    +	}
    +
    +	// NOTE(justinwr): CNSTagServices needs to be a tag if available. No other
    +	// CNS tags will be handled at this time.
    +	input.CNS.toTags(result)
    +	if val, found := result[CNSTagServices]; found {
    +		result["tag."+CNSTagServices] = val
    +		delete(result, CNSTagServices)
    +	}
    +
    +	for key, value := range input.Metadata {
    +		result[fmt.Sprintf("metadata.%s", key)] = value
    +	}
    +
    +	return result, nil
    +}
    +
    +func (c *InstancesClient) Create(ctx context.Context, input *CreateInstanceInput) (*Instance, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines")
    +	body, err := input.toAPI()
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to prepare for machine creation")
    +	}
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Body:   body,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to create machine")
    +	}
    +
    +	var result *Instance
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to decode create machine response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type DeleteInstanceInput struct {
    +	ID string
    +}
    +
    +func (c *InstancesClient) Delete(ctx context.Context, input *DeleteInstanceInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodDelete,
    +		Path:   fullPath,
    +	}
    +	response, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if response == nil {
    +		return pkgerrors.Wrap(err, "unable to delete machine")
    +	}
    +	if response.Body != nil {
    +		defer response.Body.Close()
    +	}
    +	if response.StatusCode == http.StatusNotFound || response.StatusCode == http.StatusGone {
    +		return nil
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to decode delete machine response")
    +	}
    +
    +	return nil
    +}
    +
    +type DeleteTagsInput struct {
    +	ID string
    +}
    +
    +func (c *InstancesClient) DeleteTags(ctx context.Context, input *DeleteTagsInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "tags")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodDelete,
    +		Path:   fullPath,
    +	}
    +	response, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to delete tags from machine")
    +	}
    +	if response == nil {
    +		return fmt.Errorf("DeleteTags request has empty response")
    +	}
    +	if response.Body != nil {
    +		defer response.Body.Close()
    +	}
    +	if response.StatusCode == http.StatusNotFound {
    +		return nil
    +	}
    +
    +	return nil
    +}
    +
    +type DeleteTagInput struct {
    +	ID  string
    +	Key string
    +}
    +
    +func (c *InstancesClient) DeleteTag(ctx context.Context, input *DeleteTagInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "tags", input.Key)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodDelete,
    +		Path:   fullPath,
    +	}
    +	response, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to delete tag from machine")
    +	}
    +	if response == nil {
    +		return fmt.Errorf("DeleteTag request has empty response")
    +	}
    +	if response.Body != nil {
    +		defer response.Body.Close()
    +	}
    +	if response.StatusCode == http.StatusNotFound {
    +		return nil
    +	}
    +
    +	return nil
    +}
    +
    +type RenameInstanceInput struct {
    +	ID   string
    +	Name string
    +}
    +
    +func (c *InstancesClient) Rename(ctx context.Context, input *RenameInstanceInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID)
    +
    +	params := &url.Values{}
    +	params.Set("action", "rename")
    +	params.Set("name", input.Name)
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  params,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to rename machine")
    +	}
    +
    +	return nil
    +}
    +
    +type ReplaceTagsInput struct {
    +	ID   string
    +	Tags map[string]string
    +	CNS  InstanceCNS
    +}
    +
    +// toAPI is used to join Tags and CNS tags into the same JSON object before
    +// sending an API request to the API gateway.
    +func (input ReplaceTagsInput) toAPI() map[string]interface{} {
    +	result := map[string]interface{}{}
    +	for key, value := range input.Tags {
    +		result[key] = value
    +	}
    +	input.CNS.toTags(result)
    +	return result
    +}
    +
    +func (c *InstancesClient) ReplaceTags(ctx context.Context, input *ReplaceTagsInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "tags")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPut,
    +		Path:   fullPath,
    +		Body:   input.toAPI(),
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to replace machine tags")
    +	}
    +
    +	return nil
    +}
    +
    +type AddTagsInput struct {
    +	ID   string
    +	Tags map[string]string
    +}
    +
    +func (c *InstancesClient) AddTags(ctx context.Context, input *AddTagsInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "tags")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Body:   input.Tags,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to add tags to machine")
    +	}
    +
    +	return nil
    +}
    +
    +type GetTagInput struct {
    +	ID  string
    +	Key string
    +}
    +
    +func (c *InstancesClient) GetTag(ctx context.Context, input *GetTagInput) (string, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "tags", input.Key)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if err != nil {
    +		return "", pkgerrors.Wrap(err, "unable to get tag")
    +	}
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +
    +	var result string
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return "", pkgerrors.Wrap(err, "unable to decode get tag response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type ListTagsInput struct {
    +	ID string
    +}
    +
    +func (c *InstancesClient) ListTags(ctx context.Context, input *ListTagsInput) (map[string]interface{}, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "tags")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to list machine tags")
    +	}
    +
    +	var result map[string]interface{}
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable decode list machine tags response")
    +	}
    +
    +	_, tags := TagsExtractMeta(result)
    +	return tags, nil
    +}
    +
    +type GetMetadataInput struct {
    +	ID  string
    +	Key string
    +}
    +
    +// GetMetadata returns a single metadata entry associated with an instance.
    +func (c *InstancesClient) GetMetadata(ctx context.Context, input *GetMetadataInput) (string, error) {
    +	if input.Key == "" {
    +		return "", fmt.Errorf("Missing metadata Key from input: %s", input.Key)
    +	}
    +
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "metadata", input.Key)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	response, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return "", pkgerrors.Wrap(err, "unable to get machine metadata")
    +	}
    +	if response != nil {
    +		defer response.Body.Close()
    +	}
    +	if response.StatusCode == http.StatusNotFound || response.StatusCode == http.StatusGone {
    +		return "", &errors.APIError{
    +			StatusCode: response.StatusCode,
    +			Code:       "ResourceNotFound",
    +		}
    +	}
    +
    +	body, err := ioutil.ReadAll(response.Body)
    +	if err != nil {
    +		return "", pkgerrors.Wrap(err, "unable to decode get machine metadata response")
    +	}
    +
    +	return fmt.Sprintf("%s", body), nil
    +}
    +
    +type ListMetadataInput struct {
    +	ID          string
    +	Credentials bool
    +}
    +
    +func (c *InstancesClient) ListMetadata(ctx context.Context, input *ListMetadataInput) (map[string]string, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "metadata")
    +
    +	query := &url.Values{}
    +	if input.Credentials {
    +		query.Set("credentials", "true")
    +	}
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +		Query:  query,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to list machine metadata")
    +	}
    +
    +	var result map[string]string
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to decode list machine metadata response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type UpdateMetadataInput struct {
    +	ID       string
    +	Metadata map[string]string
    +}
    +
    +func (c *InstancesClient) UpdateMetadata(ctx context.Context, input *UpdateMetadataInput) (map[string]string, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "metadata")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Body:   input.Metadata,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to update machine metadata")
    +	}
    +
    +	var result map[string]string
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to decode update machine metadata response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type DeleteMetadataInput struct {
    +	ID  string
    +	Key string
    +}
    +
    +// DeleteMetadata deletes a single metadata key from an instance
    +func (c *InstancesClient) DeleteMetadata(ctx context.Context, input *DeleteMetadataInput) error {
    +	if input.Key == "" {
    +		return fmt.Errorf("Missing metadata Key from input: %s", input.Key)
    +	}
    +
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "metadata", input.Key)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodDelete,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to delete machine metadata")
    +	}
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +
    +	return nil
    +}
    +
    +type DeleteAllMetadataInput struct {
    +	ID string
    +}
    +
    +// DeleteAllMetadata deletes all metadata keys from this instance
    +func (c *InstancesClient) DeleteAllMetadata(ctx context.Context, input *DeleteAllMetadataInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID, "metadata")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodDelete,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to delete all machine metadata")
    +	}
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +
    +	return nil
    +}
    +
    +type ResizeInstanceInput struct {
    +	ID      string
    +	Package string
    +}
    +
    +func (c *InstancesClient) Resize(ctx context.Context, input *ResizeInstanceInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID)
    +
    +	params := &url.Values{}
    +	params.Set("action", "resize")
    +	params.Set("package", input.Package)
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  params,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to resize machine")
    +	}
    +
    +	return nil
    +}
    +
    +type EnableFirewallInput struct {
    +	ID string
    +}
    +
    +func (c *InstancesClient) EnableFirewall(ctx context.Context, input *EnableFirewallInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID)
    +
    +	params := &url.Values{}
    +	params.Set("action", "enable_firewall")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  params,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to enable machine firewall")
    +	}
    +
    +	return nil
    +}
    +
    +type DisableFirewallInput struct {
    +	ID string
    +}
    +
    +func (c *InstancesClient) DisableFirewall(ctx context.Context, input *DisableFirewallInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.ID)
    +
    +	params := &url.Values{}
    +	params.Set("action", "disable_firewall")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  params,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to disable machine firewall")
    +	}
    +
    +	return nil
    +}
    +
    +type ListNICsInput struct {
    +	InstanceID string
    +}
    +
    +func (c *InstancesClient) ListNICs(ctx context.Context, input *ListNICsInput) ([]*NIC, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.InstanceID, "nics")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to list machine NICs")
    +	}
    +
    +	var result []*NIC
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to decode list machine NICs response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type GetNICInput struct {
    +	InstanceID string
    +	MAC        string
    +}
    +
    +func (c *InstancesClient) GetNIC(ctx context.Context, input *GetNICInput) (*NIC, error) {
    +	mac := strings.Replace(input.MAC, ":", "", -1)
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.InstanceID, "nics", mac)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	response, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to get machine NIC")
    +	}
    +	if response != nil {
    +		defer response.Body.Close()
    +	}
    +	switch response.StatusCode {
    +	case http.StatusNotFound:
    +		return nil, &errors.APIError{
    +			StatusCode: response.StatusCode,
    +			Code:       "ResourceNotFound",
    +		}
    +	}
    +
    +	var result *NIC
    +	decoder := json.NewDecoder(response.Body)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to decode get machine NIC response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type AddNICInput struct {
    +	InstanceID    string
    +	Network       string
    +	NetworkObject NetworkObject
    +}
    +
    +// toAPI is used to build up the JSON Object to send to the API gateway.  It
    +// also will resolve the scenario where a user provides both a NetworkObject
    +// and a Network. If both are provided, NetworkObject wins.
    +func (input AddNICInput) toAPI() map[string]interface{} {
    +	result := map[string]interface{}{}
    +
    +	var network NetworkObject
    +
    +	if input.NetworkObject.IPv4UUID != "" {
    +		network = input.NetworkObject
    +	} else {
    +		network = NetworkObject{
    +			IPv4UUID: input.Network,
    +		}
    +	}
    +
    +	result["network"] = network
    +
    +	return result
    +}
    +
    +// AddNIC asynchronously adds a NIC to a given instance.  If a NIC for a given
    +// network already exists, a ResourceFound error will be returned.  The status
    +// of the addition of a NIC can be polled by calling GetNIC()'s and testing NIC
    +// until its state is set to "running".  Only one NIC per network may exist.
    +// Warning: this operation causes the instance to restart.
    +func (c *InstancesClient) AddNIC(ctx context.Context, input *AddNICInput) (*NIC, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.InstanceID, "nics")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Body:   input.toAPI(),
    +	}
    +	response, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to add NIC to machine")
    +	}
    +	if response != nil {
    +		defer response.Body.Close()
    +	}
    +	switch response.StatusCode {
    +	case http.StatusFound:
    +		return nil, &errors.APIError{
    +			StatusCode: response.StatusCode,
    +			Code:       "ResourceFound",
    +			Message:    response.Header.Get("Location"),
    +		}
    +	}
    +
    +	var result *NIC
    +	decoder := json.NewDecoder(response.Body)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to decode add NIC to machine response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type RemoveNICInput struct {
    +	InstanceID string
    +	MAC        string
    +}
    +
    +// RemoveNIC removes a given NIC from a machine asynchronously.  The status of
    +// the removal can be polled via GetNIC().  When GetNIC() returns a 404, the NIC
    +// has been removed from the instance.  Warning: this operation causes the
    +// machine to restart.
    +func (c *InstancesClient) RemoveNIC(ctx context.Context, input *RemoveNICInput) error {
    +	mac := strings.Replace(input.MAC, ":", "", -1)
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.InstanceID, "nics", mac)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodDelete,
    +		Path:   fullPath,
    +	}
    +	response, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to remove NIC from machine")
    +	}
    +	if response == nil {
    +		return pkgerrors.Wrap(err, "unable to remove NIC from machine")
    +	}
    +	if response.Body != nil {
    +		defer response.Body.Close()
    +	}
    +	switch response.StatusCode {
    +	case http.StatusNotFound:
    +		return &errors.APIError{
    +			StatusCode: response.StatusCode,
    +			Code:       "ResourceNotFound",
    +		}
    +	}
    +
    +	return nil
    +}
    +
    +type StopInstanceInput struct {
    +	InstanceID string
    +}
    +
    +func (c *InstancesClient) Stop(ctx context.Context, input *StopInstanceInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.InstanceID)
    +
    +	params := &url.Values{}
    +	params.Set("action", "stop")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  params,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to stop machine")
    +	}
    +
    +	return nil
    +}
    +
    +type StartInstanceInput struct {
    +	InstanceID string
    +}
    +
    +func (c *InstancesClient) Start(ctx context.Context, input *StartInstanceInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.InstanceID)
    +
    +	params := &url.Values{}
    +	params.Set("action", "start")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  params,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to start machine")
    +	}
    +
    +	return nil
    +}
    +
    +type RebootInstanceInput struct {
    +	InstanceID string
    +}
    +
    +func (c *InstancesClient) Reboot(ctx context.Context, input *RebootInstanceInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.InstanceID)
    +
    +	params := &url.Values{}
    +	params.Set("action", "reboot")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  params,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to reboot machine")
    +	}
    +
    +	return nil
    +}
    +
    +type EnableDeletionProtectionInput struct {
    +	InstanceID string
    +}
    +
    +func (c *InstancesClient) EnableDeletionProtection(ctx context.Context, input *EnableDeletionProtectionInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.InstanceID)
    +
    +	params := &url.Values{}
    +	params.Set("action", "enable_deletion_protection")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  params,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to enable deletion protection")
    +	}
    +
    +	return nil
    +}
    +
    +type DisableDeletionProtectionInput struct {
    +	InstanceID string
    +}
    +
    +func (c *InstancesClient) DisableDeletionProtection(ctx context.Context, input *DisableDeletionProtectionInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.InstanceID)
    +
    +	params := &url.Values{}
    +	params.Set("action", "disable_deletion_protection")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Query:  params,
    +	}
    +	respReader, err := c.client.ExecuteRequestURIParams(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return pkgerrors.Wrap(err, "unable to disable deletion protection")
    +	}
    +
    +	return nil
    +}
    +
    +var reservedInstanceCNSTags = map[string]struct{}{
    +	CNSTagDisable:    {},
    +	CNSTagReversePTR: {},
    +	CNSTagServices:   {},
    +}
    +
    +// TagsExtractMeta extracts all of the misc parameters from Tags and returns a
    +// clean CNS and Tags struct.
    +func TagsExtractMeta(tags map[string]interface{}) (InstanceCNS, map[string]interface{}) {
    +	nativeCNS := InstanceCNS{}
    +	nativeTags := make(map[string]interface{}, len(tags))
    +	for k, raw := range tags {
    +		if _, found := reservedInstanceCNSTags[k]; found {
    +			switch k {
    +			case CNSTagDisable:
    +				b := raw.(bool)
    +				nativeCNS.Disable = b
    +			case CNSTagReversePTR:
    +				s := raw.(string)
    +				nativeCNS.ReversePTR = s
    +			case CNSTagServices:
    +				nativeCNS.Services = strings.Split(raw.(string), ",")
    +			default:
    +				// TODO(seanc@): should assert, logic fail
    +			}
    +		} else {
    +			nativeTags[k] = raw
    +		}
    +	}
    +
    +	return nativeCNS, nativeTags
    +}
    +
    +// toNative() exports a given _Instance (API representation) to its native object
    +// format.
    +func (api *_Instance) toNative() (*Instance, error) {
    +	m := Instance(api.Instance)
    +	m.CNS, m.Tags = TagsExtractMeta(api.Tags)
    +	return &m, nil
    +}
    +
    +// toTags() injects its state information into a Tags map suitable for use to
    +// submit an API call to the vmapi machine endpoint
    +func (cns *InstanceCNS) toTags(m map[string]interface{}) {
    +	if cns.Disable {
    +		// NOTE(justinwr): The JSON encoder and API require the CNSTagDisable
    +		// attribute to be an actual boolean, not a bool string.
    +		m[CNSTagDisable] = cns.Disable
    +	}
    +	if cns.ReversePTR != "" {
    +		m[CNSTagReversePTR] = cns.ReversePTR
    +	}
    +	if len(cns.Services) > 0 {
    +		m[CNSTagServices] = strings.Join(cns.Services, ",")
    +	}
    +}
    diff --git a/vendor/github.com/joyent/triton-go/compute/packages.go b/vendor/github.com/joyent/triton-go/compute/packages.go
    new file mode 100644
    index 000000000000..495e81747115
    --- /dev/null
    +++ b/vendor/github.com/joyent/triton-go/compute/packages.go
    @@ -0,0 +1,128 @@
    +//
    +// Copyright (c) 2018, Joyent, Inc. All rights reserved.
    +//
    +// This Source Code Form is subject to the terms of the Mozilla Public
    +// License, v. 2.0. If a copy of the MPL was not distributed with this
    +// file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +//
    +
    +package compute
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +	"path"
    +
    +	"github.com/joyent/triton-go/client"
    +	"github.com/pkg/errors"
    +)
    +
    +type PackagesClient struct {
    +	client *client.Client
    +}
    +
    +type Package struct {
    +	ID          string `json:"id"`
    +	Name        string `json:"name"`
    +	Memory      int64  `json:"memory"`
    +	Disk        int64  `json:"disk"`
    +	Swap        int64  `json:"swap"`
    +	LWPs        int64  `json:"lwps"`
    +	VCPUs       int64  `json:"vcpus"`
    +	Version     string `json:"version"`
    +	Group       string `json:"group"`
    +	Description string `json:"description"`
    +	Default     bool   `json:"default"`
    +}
    +
    +type ListPackagesInput struct {
    +	Name    string `json:"name"`
    +	Memory  int64  `json:"memory"`
    +	Disk    int64  `json:"disk"`
    +	Swap    int64  `json:"swap"`
    +	LWPs    int64  `json:"lwps"`
    +	VCPUs   int64  `json:"vcpus"`
    +	Version string `json:"version"`
    +	Group   string `json:"group"`
    +}
    +
    +func (c *PackagesClient) List(ctx context.Context, input *ListPackagesInput) ([]*Package, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "packages")
    +
    +	query := &url.Values{}
    +	if input.Name != "" {
    +		query.Set("name", input.Name)
    +	}
    +	if input.Memory != 0 {
    +		query.Set("memory", fmt.Sprintf("%d", input.Memory))
    +	}
    +	if input.Disk != 0 {
    +		query.Set("disk", fmt.Sprintf("%d", input.Disk))
    +	}
    +	if input.Swap != 0 {
    +		query.Set("swap", fmt.Sprintf("%d", input.Swap))
    +	}
    +	if input.LWPs != 0 {
    +		query.Set("lwps", fmt.Sprintf("%d", input.LWPs))
    +	}
    +	if input.VCPUs != 0 {
    +		query.Set("vcpus", fmt.Sprintf("%d", input.VCPUs))
    +	}
    +	if input.Version != "" {
    +		query.Set("version", input.Version)
    +	}
    +	if input.Group != "" {
    +		query.Set("group", input.Group)
    +	}
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +		Query:  query,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to list packages")
    +	}
    +
    +	var result []*Package
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode list packages response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type GetPackageInput struct {
    +	ID string
    +}
    +
    +func (c *PackagesClient) Get(ctx context.Context, input *GetPackageInput) (*Package, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "packages", input.ID)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to get package")
    +	}
    +
    +	var result *Package
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode get package response")
    +	}
    +
    +	return result, nil
    +}
    diff --git a/vendor/github.com/joyent/triton-go/compute/ping.go b/vendor/github.com/joyent/triton-go/compute/ping.go
    new file mode 100644
    index 000000000000..f346e5c26219
    --- /dev/null
    +++ b/vendor/github.com/joyent/triton-go/compute/ping.go
    @@ -0,0 +1,58 @@
    +//
    +// Copyright (c) 2018, Joyent, Inc. All rights reserved.
    +//
    +// This Source Code Form is subject to the terms of the Mozilla Public
    +// License, v. 2.0. If a copy of the MPL was not distributed with this
    +// file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +//
    +
    +package compute
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"net/http"
    +
    +	"github.com/joyent/triton-go/client"
    +	pkgerrors "github.com/pkg/errors"
    +)
    +
    +const pingEndpoint = "/--ping"
    +
    +type CloudAPI struct {
    +	Versions []string `json:"versions"`
    +}
    +
    +type PingOutput struct {
    +	Ping     string   `json:"ping"`
    +	CloudAPI CloudAPI `json:"cloudapi"`
    +}
    +
    +// Ping sends a request to the '/--ping' endpoint and returns a `pong` as well
    +// as a list of API version numbers your instance of CloudAPI is presenting.
    +func (c *ComputeClient) Ping(ctx context.Context) (*PingOutput, error) {
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   pingEndpoint,
    +	}
    +	response, err := c.Client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return nil, pkgerrors.Wrap(err, "unable to ping")
    +	}
    +	if response == nil {
    +		return nil, pkgerrors.Wrap(err, "unable to ping")
    +	}
    +	if response.Body != nil {
    +		defer response.Body.Close()
    +	}
    +
    +	var result *PingOutput
    +	decoder := json.NewDecoder(response.Body)
    +	if err = decoder.Decode(&result); err != nil {
    +		if err != nil {
    +			return nil, pkgerrors.Wrap(err, "unable to decode ping response")
    +		}
    +	}
    +
    +	return result, nil
    +}
    diff --git a/vendor/github.com/joyent/triton-go/compute/services.go b/vendor/github.com/joyent/triton-go/compute/services.go
    new file mode 100644
    index 000000000000..3597da9b1281
    --- /dev/null
    +++ b/vendor/github.com/joyent/triton-go/compute/services.go
    @@ -0,0 +1,72 @@
    +//
    +// Copyright (c) 2018, Joyent, Inc. All rights reserved.
    +//
    +// This Source Code Form is subject to the terms of the Mozilla Public
    +// License, v. 2.0. If a copy of the MPL was not distributed with this
    +// file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +//
    +
    +package compute
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"net/http"
    +	"path"
    +	"sort"
    +
    +	"github.com/joyent/triton-go/client"
    +	"github.com/pkg/errors"
    +)
    +
    +type ServicesClient struct {
    +	client *client.Client
    +}
    +
    +type Service struct {
    +	Name     string
    +	Endpoint string
    +}
    +
    +type ListServicesInput struct{}
    +
    +func (c *ServicesClient) List(ctx context.Context, _ *ListServicesInput) ([]*Service, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "services")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to list services")
    +	}
    +
    +	var intermediate map[string]string
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&intermediate); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode list services response")
    +	}
    +
    +	keys := make([]string, len(intermediate))
    +	i := 0
    +	for k := range intermediate {
    +		keys[i] = k
    +		i++
    +	}
    +	sort.Strings(keys)
    +
    +	result := make([]*Service, len(intermediate))
    +	i = 0
    +	for _, key := range keys {
    +		result[i] = &Service{
    +			Name:     key,
    +			Endpoint: intermediate[key],
    +		}
    +		i++
    +	}
    +
    +	return result, nil
    +}
    diff --git a/vendor/github.com/joyent/triton-go/compute/snapshots.go b/vendor/github.com/joyent/triton-go/compute/snapshots.go
    new file mode 100644
    index 000000000000..f30d394ba6e8
    --- /dev/null
    +++ b/vendor/github.com/joyent/triton-go/compute/snapshots.go
    @@ -0,0 +1,164 @@
    +//
    +// Copyright (c) 2018, Joyent, Inc. All rights reserved.
    +//
    +// This Source Code Form is subject to the terms of the Mozilla Public
    +// License, v. 2.0. If a copy of the MPL was not distributed with this
    +// file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +//
    +
    +package compute
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"net/http"
    +	"path"
    +	"time"
    +
    +	"github.com/joyent/triton-go/client"
    +	"github.com/pkg/errors"
    +)
    +
    +type SnapshotsClient struct {
    +	client *client.Client
    +}
    +
    +type Snapshot struct {
    +	Name    string
    +	State   string
    +	Created time.Time
    +	Updated time.Time
    +}
    +
    +type ListSnapshotsInput struct {
    +	MachineID string
    +}
    +
    +func (c *SnapshotsClient) List(ctx context.Context, input *ListSnapshotsInput) ([]*Snapshot, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.MachineID, "snapshots")
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to list snapshots")
    +	}
    +
    +	var result []*Snapshot
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode list snapshots response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type GetSnapshotInput struct {
    +	MachineID string
    +	Name      string
    +}
    +
    +func (c *SnapshotsClient) Get(ctx context.Context, input *GetSnapshotInput) (*Snapshot, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.MachineID, "snapshots", input.Name)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to get snapshot")
    +	}
    +
    +	var result *Snapshot
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode get snapshot response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type DeleteSnapshotInput struct {
    +	MachineID string
    +	Name      string
    +}
    +
    +func (c *SnapshotsClient) Delete(ctx context.Context, input *DeleteSnapshotInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.MachineID, "snapshots", input.Name)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodDelete,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return errors.Wrap(err, "unable to delete snapshot")
    +	}
    +
    +	return nil
    +}
    +
    +type StartMachineFromSnapshotInput struct {
    +	MachineID string
    +	Name      string
    +}
    +
    +func (c *SnapshotsClient) StartMachine(ctx context.Context, input *StartMachineFromSnapshotInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.MachineID, "snapshots", input.Name)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +	}
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return errors.Wrap(err, "unable to start machine")
    +	}
    +
    +	return nil
    +}
    +
    +type CreateSnapshotInput struct {
    +	MachineID string
    +	Name      string
    +}
    +
    +func (c *SnapshotsClient) Create(ctx context.Context, input *CreateSnapshotInput) (*Snapshot, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "machines", input.MachineID, "snapshots")
    +
    +	data := make(map[string]interface{})
    +	data["name"] = input.Name
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Body:   data,
    +	}
    +
    +	respReader, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if respReader != nil {
    +		defer respReader.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to create snapshot")
    +	}
    +
    +	var result *Snapshot
    +	decoder := json.NewDecoder(respReader)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode create snapshot response")
    +	}
    +
    +	return result, nil
    +}
    diff --git a/vendor/github.com/joyent/triton-go/compute/volumes.go b/vendor/github.com/joyent/triton-go/compute/volumes.go
    new file mode 100644
    index 000000000000..af858061fc04
    --- /dev/null
    +++ b/vendor/github.com/joyent/triton-go/compute/volumes.go
    @@ -0,0 +1,217 @@
    +//
    +// Copyright (c) 2018, Joyent, Inc. All rights reserved.
    +//
    +// This Source Code Form is subject to the terms of the Mozilla Public
    +// License, v. 2.0. If a copy of the MPL was not distributed with this
    +// file, You can obtain one at http://mozilla.org/MPL/2.0/.
    +//
    +
    +package compute
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"net/http"
    +	"net/url"
    +	"path"
    +
    +	"github.com/joyent/triton-go/client"
    +	"github.com/pkg/errors"
    +)
    +
    +type VolumesClient struct {
    +	client *client.Client
    +}
    +
    +type Volume struct {
    +	ID             string   `json:"id"`
    +	Name           string   `json:"name"`
    +	Owner          string   `json:"owner_uuid"`
    +	Type           string   `json:"type"`
    +	FileSystemPath string   `json:"filesystem_path"`
    +	Size           int64    `json:"size"`
    +	State          string   `json:"state"`
    +	Networks       []string `json:"networks"`
    +	Refs           []string `json:"refs"`
    +}
    +
    +type ListVolumesInput struct {
    +	Name  string
    +	Size  string
    +	State string
    +	Type  string
    +}
    +
    +func (c *VolumesClient) List(ctx context.Context, input *ListVolumesInput) ([]*Volume, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "volumes")
    +
    +	query := &url.Values{}
    +	if input.Name != "" {
    +		query.Set("name", input.Name)
    +	}
    +	if input.Size != "" {
    +		query.Set("size", input.Size)
    +	}
    +	if input.State != "" {
    +		query.Set("state", input.State)
    +	}
    +	if input.Type != "" {
    +		query.Set("type", input.Type)
    +	}
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +		Query:  query,
    +	}
    +	resp, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if resp != nil {
    +		defer resp.Close()
    +	}
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to list volumes")
    +	}
    +
    +	var result []*Volume
    +	decoder := json.NewDecoder(resp)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode list volumes response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type CreateVolumeInput struct {
    +	Name     string
    +	Size     int64
    +	Networks []string
    +	Type     string
    +}
    +
    +func (input *CreateVolumeInput) toAPI() map[string]interface{} {
    +	result := make(map[string]interface{}, 0)
    +
    +	if input.Name != "" {
    +		result["name"] = input.Name
    +	}
    +
    +	if input.Size != 0 {
    +		result["size"] = input.Size
    +	}
    +
    +	if input.Type != "" {
    +		result["type"] = input.Type
    +	}
    +
    +	if len(input.Networks) > 0 {
    +		result["networks"] = input.Networks
    +	}
    +
    +	return result
    +}
    +
    +func (c *VolumesClient) Create(ctx context.Context, input *CreateVolumeInput) (*Volume, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "volumes")
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Body:   input.toAPI(),
    +	}
    +	resp, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to create volume")
    +	}
    +	if resp != nil {
    +		defer resp.Close()
    +	}
    +
    +	var result *Volume
    +	decoder := json.NewDecoder(resp)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode create volume response")
    +	}
    +
    +	return result, nil
    +}
    +
    +type DeleteVolumeInput struct {
    +	ID string
    +}
    +
    +func (c *VolumesClient) Delete(ctx context.Context, input *DeleteVolumeInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "volumes", input.ID)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodDelete,
    +		Path:   fullPath,
    +	}
    +	resp, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return errors.Wrap(err, "unable to delete volume")
    +	}
    +	if resp == nil {
    +		return errors.Wrap(err, "unable to delete volume")
    +	}
    +	if resp.Body != nil {
    +		defer resp.Body.Close()
    +	}
    +	if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusGone {
    +		return nil
    +	}
    +
    +	return nil
    +}
    +
    +type GetVolumeInput struct {
    +	ID string
    +}
    +
    +func (c *VolumesClient) Get(ctx context.Context, input *GetVolumeInput) (*Volume, error) {
    +	fullPath := path.Join("/", c.client.AccountName, "volumes", input.ID)
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodGet,
    +		Path:   fullPath,
    +	}
    +	resp, err := c.client.ExecuteRequestRaw(ctx, reqInputs)
    +	if err != nil {
    +		return nil, errors.Wrap(err, "unable to get volume")
    +	}
    +	if resp == nil {
    +		return nil, errors.Wrap(err, "unable to get volume")
    +	}
    +	if resp.Body != nil {
    +		defer resp.Body.Close()
    +	}
    +
    +	var result *Volume
    +	decoder := json.NewDecoder(resp.Body)
    +	if err = decoder.Decode(&result); err != nil {
    +		return nil, errors.Wrap(err, "unable to decode get volume volume")
    +	}
    +
    +	return result, nil
    +}
    +
    +type UpdateVolumeInput struct {
    +	ID   string `json:"-"`
    +	Name string `json:"name"`
    +}
    +
    +func (c *VolumesClient) Update(ctx context.Context, input *UpdateVolumeInput) error {
    +	fullPath := path.Join("/", c.client.AccountName, "volumes", input.ID)
    +
    +	reqInputs := client.RequestInput{
    +		Method: http.MethodPost,
    +		Path:   fullPath,
    +		Body:   input,
    +	}
    +	resp, err := c.client.ExecuteRequest(ctx, reqInputs)
    +	if err != nil {
    +		return errors.Wrap(err, "unable to update volume")
    +	}
    +	if resp != nil {
    +		defer resp.Close()
    +	}
    +
    +	return nil
    +}
    diff --git a/vendor/github.com/linode/linodego/.gitignore b/vendor/github.com/linode/linodego/.gitignore
    new file mode 100644
    index 000000000000..d16559bdaf3f
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/.gitignore
    @@ -0,0 +1,19 @@
    +# Binaries for programs and plugins
    +*.exe
    +*.dll
    +*.so
    +*.dylib
    +
    +# Test binary, build with `go test -c`
    +*.test
    +
    +# Output of the go coverage tool, specifically when used with LiteIDE
    +*.out
    +
    +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
    +.glide/
    +
    +vendor/**/
    +.env
    +coverage.txt
    +
    diff --git a/vendor/github.com/linode/linodego/.travis.yml b/vendor/github.com/linode/linodego/.travis.yml
    new file mode 100644
    index 000000000000..83fec93e3352
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/.travis.yml
    @@ -0,0 +1,22 @@
    +language: "go"
    +
    +matrix:
    +  allow_failures:
    +  - go: tip
    +
    +go:
    +  - "1.10"
    +  - tip
    +
    +install:
    +  - go get -u gopkg.in/alecthomas/gometalinter.v2
    +  - gometalinter.v2 --install
    +
    +script:
    +  - touch .env
    +  - make test ARGS='-v -race -count=2 -coverprofile=coverage.txt -covermode=atomic ./...'
    +  - gometalinter.v2 --enable-all --disable=vetshadow --disable=gocyclo --disable=unparam --disable=nakedret --disable=lll --disable=dupl --disable=gosec --disable=gochecknoinits --disable=gochecknoglobals --disable=test --deadline=120s
    +  - gometalinter.v2 --disable-all --enable=vetshadow --enable=gocyclo --enable=unparam --enable=nakedret --enable=lll --enable=dupl --enable=gosec --enable=gochecknoinits --enable=gochecknoglobals --deadline=120s || true
    +
    +after_success:
    +  - bash <(curl -s https://codecov.io/bash)
    diff --git a/vendor/github.com/linode/linodego/API_SUPPORT.md b/vendor/github.com/linode/linodego/API_SUPPORT.md
    new file mode 100644
    index 000000000000..2fd663f4f1db
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/API_SUPPORT.md
    @@ -0,0 +1,379 @@
    +# API Support
    +
    +## Linodes
    +
    +- `/linode/instances`
    +  - [x] `GET`
    +  - [X] `POST`
    +- `/linode/instances/$id`
    +  - [x] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +- `/linode/instances/$id/boot`
    +  - [x] `POST`
    +- `/linode/instances/$id/clone`
    +  - [x] `POST`
    +- `/linode/instances/$id/mutate`
    +  - [X] `POST`
    +- `/linode/instances/$id/reboot`
    +  - [x] `POST`
    +- `/linode/instances/$id/rebuild`
    +  - [X] `POST`
    +- `/linode/instances/$id/rescue`
    +  - [X] `POST`
    +- `/linode/instances/$id/resize`
    +  - [x] `POST`
    +- `/linode/instances/$id/shutdown`
    +  - [x] `POST`
    +- `/linode/instances/$id/volumes`
    +  - [X] `GET`
    +
    +### Backups
    +
    +- `/linode/instances/$id/backups`
    +  - [X] `GET`
    +  - [ ] `POST`
    +- `/linode/instances/$id/backups/$id/restore`
    +  - [ ] `POST`
    +- `/linode/instances/$id/backups/cancel`
    +  - [ ] `POST`
    +- `/linode/instances/$id/backups/enable`
    +  - [ ] `POST`
    +
    +### Configs
    +
    +- `/linode/instances/$id/configs`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/linode/instances/$id/configs/$id`
    +  - [X] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +
    +### Disks
    +
    +- `/linode/instances/$id/disks`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/linode/instances/$id/disks/$id`
    +  - [X] `GET`
    +  - [X] `PUT`
    +  - [X] `POST`
    +  - [X] `DELETE`
    +- `/linode/instances/$id/disks/$id/password`
    +  - [X] `POST`
    +- `/linode/instances/$id/disks/$id/resize`
    +  - [X] `POST`
    +
    +### IPs
    +
    +- `/linode/instances/$id/ips`
    +  - [ ] `GET`
    +  - [ ] `POST`
    +- `/linode/instances/$id/ips/$ip_address`
    +  - [ ] `GET`
    +  - [ ] `PUT`
    +  - [ ] `DELETE`
    +- `/linode/instances/$id/ips/sharing`
    +  - [ ] `POST`
    +
    +### Kernels
    +
    +- `/linode/kernels`
    +  - [X] `GET`
    +- `/linode/kernels/$id`
    +  - [X] `GET`
    +
    +### StackScripts
    +
    +- `/linode/stackscripts`
    +  - [x] `GET`
    +  - [X] `POST`
    +- `/linode/stackscripts/$id`
    +  - [x] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +
    +### Stats
    +
    +- `/linode/instances/$id/stats`
    +  - [ ] `GET`
    +- `/linode/instances/$id/stats/$year/$month`
    +  - [ ] `GET`
    +
    +### Types
    +
    +- `/linode/types`
    +  - [X] `GET`
    +- `/linode/types/$id`
    +  - [X] `GET`
    +
    +## Domains
    +
    +- `/domains`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/domains/$id`
    +  - [X] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +- `/domains/$id/clone`
    +  - [ ] `POST`
    +- `/domains/$id/records`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/domains/$id/records/$id`
    +  - [X] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +
    +## Longview
    +
    +- `/longview/clients`
    +  - [X] `GET`
    +  - [ ] `POST`
    +- `/longview/clients/$id`
    +  - [X] `GET`
    +  - [ ] `PUT`
    +  - [ ] `DELETE`
    +
    +### Subscriptions
    +
    +- `/longview/subscriptions`
    +  - [ ] `GET`
    +- `/longview/subscriptions/$id`
    +  - [ ] `GET`
    +
    +### NodeBalancers
    +
    +- `/nodebalancers`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/nodebalancers/$id`
    +  - [X] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +
    +### NodeBalancer Configs
    +
    +- `/nodebalancers/$id/configs`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/nodebalancers/$id/configs/$id`
    +  - [X] `GET`
    +  - [X] `DELETE`
    +- `/nodebalancers/$id/configs/$id/nodes`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/nodebalancers/$id/configs/$id/nodes/$id`
    +  - [X] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +- `/nodebalancers/$id/configs/$id/rebuild`
    +  - [X] `POST`
    +
    +## Networking
    +
    +- `/networking/ip-assign`
    +  - [ ] `POST`
    +- `/networking/ips`
    +  - [X] `GET`
    +  - [ ] `POST`
    +- `/networking/ips/$address`
    +  - [X] `GET`
    +  - [ ] `PUT`
    +  - [ ] `DELETE`
    +
    +### IPv6
    +
    +- `/networking/ips`
    +  - [X] `GET`
    +- `/networking/ips/$address`
    +  - [X] `GET`
    +  - [ ] `PUT`
    +- /networking/ipv6/ranges
    +  - [X] `GET`
    +- /networking/ipv6/pools
    +  - [X] `GET`
    +
    +## Regions
    +
    +- `/regions`
    +  - [x] `GET`
    +- `/regions/$id`
    +  - [x] `GET`
    +
    +## Support
    +
    +- `/support/tickets`
    +  - [X] `GET`
    +  - [ ] `POST`
    +- `/support/tickets/$id`
    +  - [X] `GET`
    +- `/support/tickets/$id/attachments`
    +  - [ ] `POST`
    +- `/support/tickets/$id/replies`
    +  - [ ] `GET`
    +  - [ ] `POST`
    +
    +## Tags
    +
    +- `/tags/`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/tags/$id`
    +  - [X] `GET`
    +  - [X] `DELETE`
    +
    +## Account
    +
    +### Events
    +
    +- `/account/events`
    +  - [X] `GET`
    +- `/account/events/$id`
    +  - [X] `GET`
    +- `/account/events/$id/read`
    +  - [X] `POST`
    +- `/account/events/$id/seen`
    +  - [X] `POST`
    +
    +### Invoices
    +
    +- `/account/invoices/`
    +  - [X] `GET`
    +- `/account/invoices/$id`
    +  - [X] `GET`
    +- `/account/invoices/$id/items`
    +  - [X] `GET`
    +
    +### Notifications
    +
    +- `/account/notifications`
    +  - [X] `GET`
    +
    +### OAuth Clients
    +
    +- `/account/oauth-clients`
    +  - [ ] `GET`
    +  - [ ] `POST`
    +- `/account/oauth-clients/$id`
    +  - [ ] `GET`
    +  - [ ] `PUT`
    +  - [ ] `DELETE`
    +- `/account/oauth-clients/$id/reset_secret`
    +  - [ ] `POST`
    +- `/account/oauth-clients/$id/thumbnail`
    +  - [ ] `GET`
    +  - [ ] `PUT`
    +
    +### Payments
    +
    +- `/account/payments`
    +  - [ ] `GET`
    +  - [ ] `POST`
    +- `/account/payments/$id`
    +  - [ ] `GET`
    +- `/account/payments/paypal`
    +  - [ ] `GET`
    +- `/account/payments/paypal/execute`
    +  - [ ] `POST`
    +
    +### Settings
    +
    +- `/account/settings`
    +  - [ ] `GET`
    +  - [ ] `PUT`
    +
    +### Users
    +
    +- `/account/users`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/account/users/$username`
    +  - [X] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +- `/account/users/$username/grants`
    +  - [ ] `GET`
    +  - [ ] `PUT`
    +- `/account/users/$username/password`
    +  - [ ] `POST`
    +
    +## Profile
    +
    +### Personalized User Settings
    +
    +- `/profile`
    +  - [X] `GET`
    +  - [X] `PUT`
    +
    +### Granted OAuth Apps
    +
    +- `/profile/apps`
    +  - [ ] `GET`
    +- `/profile/apps/$id`
    +  - [ ] `GET`
    +  - [ ] `DELETE`
    +
    +### Grants to Linode Resources
    +
    +- `/profile/grants`
    +  - [ ] `GET`
    +
    +### SSH Keys
    +
    +- `/profile/sshkeys`
    +  - [x] `GET`
    +  - [x] `POST`
    +- `/profile/sshkeys/$id`
    +  - [x] `GET`
    +  - [x] `PUT`
    +  - [x] `DELETE`
    +  
    +### Two-Factor
    +
    +- `/profile/tfa-disable`
    +  - [ ] `POST`
    +- `/profile/tfa-enable`
    +  - [ ] `POST`
    +- `/profile/tfa-enable-confirm`
    +  - [ ] `POST`
    +
    +### Personal Access API Tokens
    +
    +- `/profile/tokens`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/profile/tokens/$id`
    +  - [X] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +
    +## Images
    +
    +- `/images`
    +  - [x] `GET`
    +- `/images/$id`
    +  - [x] `GET`
    +  - [X] `POST`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +
    +## Volumes
    +
    +- `/volumes`
    +  - [X] `GET`
    +  - [X] `POST`
    +- `/volumes/$id`
    +  - [X] `GET`
    +  - [X] `PUT`
    +  - [X] `DELETE`
    +- `/volumes/$id/attach`
    +  - [X] `POST`
    +- `/volumes/$id/clone`
    +  - [X] `POST`
    +- `/volumes/$id/detach`
    +  - [X] `POST`
    +- `/volumes/$id/resize`
    +  - [X] `POST`
    diff --git a/vendor/github.com/linode/linodego/CHANGELOG.md b/vendor/github.com/linode/linodego/CHANGELOG.md
    new file mode 100644
    index 000000000000..54c176642a07
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/CHANGELOG.md
    @@ -0,0 +1,239 @@
    +# Change Log
    +
    +## Unreleased
    +
    +### Fixes
    +
    +### Features
    +
    +
    +
    +## [v0.7.1](https://github.com/linode/linodego/compare/v0.7.0..v0.7.1) (2018-02-05)
    +
    +### Features
    +
    +* add `ClassDedicated` constant (`dedicated`) for use in `LinodeType` `Class` values
    +  See the [Dedicated CPU Announcement](https://blog.linode.com/2019/02/05/introducing-linode-dedicated-cpu-instances/)
    +
    +
    +
    +## [v0.7.0](https://github.com/linode/linodego/compare/v0.6.2..v0.7.0) (2018-12-03)
    +
    +### Features
    +
    +* add `Tags` field in: `NodeBalancer`, `Domain`, `Volume`
    +* add `UpdateIPAddress` (for setting RDNS)
    +
    +### Fixes
    +
    +* invalid URL for `/v4/networking/` enpoints (IPv6 Ranges and Pools) has been correcrted
    +
    +
    +
    +## [v0.6.2](https://github.com/linode/linodego/compare/v0.6.1..v0.6.2) (2018-10-26)
    +
    +### Fixes
    +
    +* add missing `Account` fields: `address_1`, `address_2`, `phone`
    +
    +
    +## [v0.6.1](https://github.com/linode/linodego/compare/v0.6.0..v0.6.1) (2018-10-26)
    +
    +### Features
    +
    +* Adds support for fetching and updating basic Profile information
    +
    +
    +## [v0.6.0](https://github.com/linode/linodego/compare/v0.5.1..v0.6.0) (2018-10-25)
    +
    +### Fixes
    +
    +* Fixes Image date handling
    +* Fixes broken example code in README
    +* Fixes WaitForEventFinished when encountering events without entity
    +* Fixes ResizeInstanceDisk which was executing CloneInstanceDisk
    +* Fixes go-resty import path to gopkg.in version for future go module support
    +
    +### Features
    +
    +* Adds support for user account operations
    +* Adds support for profile tokens
    +* Adds support for Tags
    +* Adds PasswordResetInstanceDisk
    +* Adds DiskStatus constants
    +* Adds WaitForInstanceDiskStatus
    +* Adds SetPollDelay for configuring poll duration
    +
    +  * Reduced polling time to millisecond granularity
    +  * Change polling default to 3s to avoid 429 conditions
    +  * Use poll delay in waitfor functions
    +
    +
    +## [v0.5.1](https://github.com/linode/linodego/compare/v0.5.0...v0.5.1) (2018-09-10)
    +
    +### Fixes
    +
    +* Domain.Status was not imported from API responses correctly
    +
    +
    +## [v0.5.0](https://github.com/linode/linodego/compare/v0.4.0...v0.5.0) (2018-09-09)
    +
    +### Breaking Changes
    +
    +* List functions return slice of thing instead of slice of pointer to thing
    +
    +### Feature
    +
    +* add SSHKeys methods to client (also affects InstanceCreate, InstanceDiskCreate)
    +* add RebuildNodeBalancerConfig (and CreateNodeBalancerConfig with Nodes)
    +
    +### Fixes
    +
    +* Event.TimeRemaining wouldn't parse all possible API value
    +* Tests no longer rely on known/special instance and volume ids
    +
    +
    +## [0.4.0](https://github.com/linode/linodego/compare/v0.3.0...0.4.0) (2018-08-27)
    +
    +### Breaking Changes
    +
    +Replaces bool, error results with error results, for:
    +
    +* instance\_snapshots.go: EnableInstanceBackups
    +* instance\_snapshots.go: CancelInstanceBackups
    +* instance\_snapshots.go: RestoreInstanceBackup
    +* instances.go: BootInstance
    +* instances.go: RebootInstance
    +* instances.go: MutateInstance
    +* instances.go: RescueInstance
    +* instances.go: ResizeInstance
    +* instances.go: ShutdownInstance
    +* volumes.go: DetachVolume
    +* volumes.go: ResizeVolume
    +
    +
    +### Docs
    +
    +* reword text about breaking changes until first tag
    +
    +### Feat
    +
    +* added MigrateInstance and InstanceResizing from 4.0.1-4.0.3 API Changelog
    +* added gometalinter to travis builds
    +* added missing function and type comments as reported by linting tools
    +* supply json values for all fields, useful for mocking responses using linodego types
    +* use context channels in WaitFor\* functions
    +* add LinodeTypeClass type (enum)
    +* add TicketStatus type (enum)
    +* update template thing and add a test template
    +
    +### Fix
    +
    +* TransferQuota was TransferQuote (and not parsed from the api correctly)
    +* stackscripts udf was not parsed correctly
    +* add InstanceCreateOptions.PrivateIP
    +* check the WaitFor timeout before sleeping to avoid extra sleep
    +* various linting warnings and unhandled err results as reported by linting tools
    +* fix GetStackscript 404 handling
    +
    +
    +
    +
    +## [0.3.0](https://github.com/linode/linodego/compare/v0.2.0...0.3.0) (2018-08-15)
    +
    +### Breaking Changes
    +
    +* WaitForVolumeLinodeID return fetch volume for consistency with out WaitFors
    +* Moved linodego from chiefy to github.com/linode. Thanks [@chiefy](https://github.com/chiefy)!
    +
    +
    +
    +## [v0.2.0](https://github.com/linode/linodego/compare/v0.1.1...v0.2.0) (2018-08-11)
    +
    +### Breaking Changes
    +
    +* WaitFor\* should be client methods
    +  *use `client.WaitFor...` rather than `linodego.WaitFor(..., client, ...)`*
    +
    +* remove ListInstanceSnapshots (does not exist in the API)
    +  *this never worked, so shouldn't cause a problem*
    +
    +* Changes UpdateOptions and CreateOptions and similar Options parameters to values instead of pointers
    +  *these were never optional and the function never updated any values in the Options structures*
    +
    +* fixed various optional/zero Update and Create options
    +  *some values are now pointers, and vice-versa*
    +
    +  * Changes InstanceUpdateOptions to use pointers for optional fields Backups and Alerts
    +  * Changes InstanceClone's Disks and Configs to ints instead of strings
    +
    +* using new enum string aliased types where appropriate
    +  *`InstanceSnapshotStatus`, `DiskFilesystem`, `NodeMode`*
    +
    +### Feature
    +
    +* add RescueInstance and RescueInstanceOptions
    +* add CreateImage, UpdateImage, DeleteImage
    +* add EnableInstanceBackups, CancelInstanceBackups, RestoreInstanceBackup
    +* add WatchdogEnabled to InstanceUpdateOptions
    +
    +### Fix
    +
    +* return Volume from AttachVolume instead of bool
    +* add more boilerplate to template.go
    +* nodebalancers and domain records had no pagination support
    +* NodeBalancer transfer stats are not int
    +
    +### Tests
    +
    +* add fixtures and tests for NodeBalancerNodes
    +* fix nodebalancer tests to handle changes due to random labels
    +* add tests for nodebalancers and nodebalancer configs
    +* added tests for Backups flow
    +* TestListInstanceBackups fixture is hand tweaked because repeated polled events
    +  appear to get the tests stuck
    +
    +### Deps
    +
    +* update all dependencies to latest
    +
    +
    +
    +## [v0.1.1](https://github.com/linode/linodego/compare/v0.0.1...v0.1.0) (2018-07-30)
    +
    +Adds more Domain handling
    +
    +### Fixed
    +
    +* go-resty doesnt pass errors when content-type is not set
    +* Domain, DomainRecords, tests and fixtures
    +
    +### Added
    +
    +* add CreateDomainRecord, UpdateDomainRecord, and DeleteDomainRecord
    +
    +
    +
    +## [v0.1.0](https://github.com/linode/linodego/compare/v0.0.1...v0.1.0) (2018-07-23)
    +
    +Deals with NewClient and context for all http requests
    +
    +### Breaking Changes
    +
    +* changed `NewClient(token, *http.RoundTripper)` to `NewClient(*http.Client)`
    +* changed all `Client` `Get`, `List`, `Create`, `Update`, `Delete`, and `Wait` calls to take context as the first parameter
    +
    +### Fixed
    +
    +* fixed docs should now show Examples for more functions
    +
    +### Added
    +
    +* added `Client.SetBaseURL(url string)`
    +
    +
    +## v0.0.1 (2018-07-20)
    +
    +### Changed
    +
    +* Initial tagged release
    diff --git a/vendor/github.com/linode/linodego/Gopkg.lock b/vendor/github.com/linode/linodego/Gopkg.lock
    new file mode 100644
    index 000000000000..a7ca4f01546b
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/Gopkg.lock
    @@ -0,0 +1,111 @@
    +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
    +
    +
    +[[projects]]
    +  branch = "master"
    +  digest = "1:6e1c13bc32e58ccb4afa1115a3ba4fc071d918ed897b40dfa323ffb3fcc6619d"
    +  name = "github.com/dnaeon/go-vcr"
    +  packages = [
    +    "cassette",
    +    "recorder",
    +  ]
    +  pruneopts = "UT"
    +  revision = "aafff18a5cc28fa0b2f26baf6a14472cda9b54c6"
    +
    +[[projects]]
    +  digest = "1:97df918963298c287643883209a2c3f642e6593379f97ab400c2a2e219ab647d"
    +  name = "github.com/golang/protobuf"
    +  packages = ["proto"]
    +  pruneopts = "UT"
    +  revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
    +  version = "v1.2.0"
    +
    +[[projects]]
    +  branch = "master"
    +  digest = "1:33b9d71d1dde2106309484a388eb7ba53cd1f67014e34a71f7b3dbc20bd186e5"
    +  name = "golang.org/x/net"
    +  packages = [
    +    "context",
    +    "context/ctxhttp",
    +    "idna",
    +    "publicsuffix",
    +  ]
    +  pruneopts = "UT"
    +  revision = "8a410e7b638dca158bf9e766925842f6651ff828"
    +
    +[[projects]]
    +  branch = "master"
    +  digest = "1:363b547c971a2b07474c598b6e9ebcb238d556d8a27f37b3895ad20cd50e7281"
    +  name = "golang.org/x/oauth2"
    +  packages = [
    +    ".",
    +    "internal",
    +  ]
    +  pruneopts = "UT"
    +  revision = "d2e6202438beef2727060aa7cabdd924d92ebfd9"
    +
    +[[projects]]
    +  digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
    +  name = "golang.org/x/text"
    +  packages = [
    +    "collate",
    +    "collate/build",
    +    "internal/colltab",
    +    "internal/gen",
    +    "internal/tag",
    +    "internal/triegen",
    +    "internal/ucd",
    +    "language",
    +    "secure/bidirule",
    +    "transform",
    +    "unicode/bidi",
    +    "unicode/cldr",
    +    "unicode/norm",
    +    "unicode/rangetable",
    +  ]
    +  pruneopts = "UT"
    +  revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
    +  version = "v0.3.0"
    +
    +[[projects]]
    +  digest = "1:328b5e4f197d928c444a51a75385f4b978915c0e75521f0ad6a3db976c97a7d3"
    +  name = "google.golang.org/appengine"
    +  packages = [
    +    "internal",
    +    "internal/base",
    +    "internal/datastore",
    +    "internal/log",
    +    "internal/remote_api",
    +    "internal/urlfetch",
    +    "urlfetch",
    +  ]
    +  pruneopts = "UT"
    +  revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
    +  version = "v1.1.0"
    +
    +[[projects]]
    +  digest = "1:b7fc4c3fd91df516486f53cc86f4b55a0c815782dbe852c5a19cce8e6c577aac"
    +  name = "gopkg.in/resty.v1"
    +  packages = ["."]
    +  pruneopts = "UT"
    +  revision = "d4920dcf5b7689548a6db640278a9b35a5b48ec6"
    +  version = "v1.9.1"
    +
    +[[projects]]
    +  digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
    +  name = "gopkg.in/yaml.v2"
    +  packages = ["."]
    +  pruneopts = "UT"
    +  revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
    +  version = "v2.2.1"
    +
    +[solve-meta]
    +  analyzer-name = "dep"
    +  analyzer-version = 1
    +  input-imports = [
    +    "github.com/dnaeon/go-vcr/recorder",
    +    "golang.org/x/oauth2",
    +    "gopkg.in/resty.v1",
    +  ]
    +  solver-name = "gps-cdcl"
    +  solver-version = 1
    diff --git a/vendor/github.com/linode/linodego/Gopkg.toml b/vendor/github.com/linode/linodego/Gopkg.toml
    new file mode 100644
    index 000000000000..2765dc95c4e2
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/Gopkg.toml
    @@ -0,0 +1,29 @@
    +# Gopkg.toml example
    +#
    +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
    +# for detailed Gopkg.toml documentation.
    +#
    +# required = ["github.com/user/thing/cmd/thing"]
    +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
    +#
    +# [[constraint]]
    +#   name = "github.com/user/project"
    +#   version = "1.0.0"
    +#
    +# [[constraint]]
    +#   name = "github.com/user/project2"
    +#   branch = "dev"
    +#   source = "github.com/myfork/project2"
    +#
    +# [[override]]
    +#   name = "github.com/x/y"
    +#   version = "2.4.0"
    +#
    +# [prune]
    +#   non-go = false
    +#   go-tests = true
    +#   unused-packages = true
    +
    +[prune]
    +  go-tests = true
    +  unused-packages = true
    diff --git a/vendor/github.com/linode/linodego/LICENSE b/vendor/github.com/linode/linodego/LICENSE
    new file mode 100644
    index 000000000000..6b0e6bace741
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/LICENSE
    @@ -0,0 +1,21 @@
    +MIT License
    +
    +Copyright (c) 2017 Christopher "Chief" Najewicz
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    diff --git a/vendor/github.com/linode/linodego/Makefile b/vendor/github.com/linode/linodego/Makefile
    new file mode 100644
    index 000000000000..e5f533d428ad
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/Makefile
    @@ -0,0 +1,43 @@
    +include .env
    +
    +.PHONY: vendor example refresh-fixtures clean-fixtures
    +
    +.PHONY: test
    +test: vendor
    +	@LINODE_FIXTURE_MODE="play" \
    +	LINODE_TOKEN="awesometokenawesometokenawesometoken" \
    +	go test $(ARGS)
    +
    +$(GOPATH)/bin/dep:
    +	@go get -u github.com/golang/dep/cmd/dep
    +
    +vendor: $(GOPATH)/bin/dep
    +	@dep ensure
    +
    +example:
    +	@go run example/main.go
    +
    +clean-fixtures:
    +	@-rm fixtures/*.yaml
    +
    +refresh-fixtures: clean-fixtures fixtures
    +
    +.PHONY: fixtures
    +fixtures:
    +	@echo "* Running fixtures"
    +	@LINODE_TOKEN=$(LINODE_TOKEN) \
    +	LINODE_FIXTURE_MODE="record" go test $(ARGS)
    +	@echo "* Santizing fixtures"
    +	@for yaml in fixtures/*yaml; do \
    +		sed -E -i "" -e "s/$(LINODE_TOKEN)/awesometokenawesometokenawesometoken/g" \
    +			-e 's/20[0-9]{2}-[01][0-9]-[0-3][0-9]T[0-2][0-9]:[0-9]{2}:[0-9]{2}/2018-01-02T03:04:05/g' \
    +			-e 's/nb-[0-9]{1,3}-[0-9]{1,3}-[0-9]{1,3}-[0-9]{1,3}\./nb-10-20-30-40./g' \
    +			-e 's/192\.168\.((1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])\.)(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])/192.168.030.040/g' \
    +			-e '/^192\.168/!s/((1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])/10.20.30.40/g' \
    +			-e 's/(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))/1234::5678/g' \
    +			$$yaml; \
    +	done
    +
    +.PHONY: godoc
    +godoc:
    +	@godoc -http=:6060
    diff --git a/vendor/github.com/linode/linodego/README.md b/vendor/github.com/linode/linodego/README.md
    new file mode 100644
    index 000000000000..7d0ae2bc12f9
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/README.md
    @@ -0,0 +1,178 @@
    +# linodego
    +
    +[![Build Status](https://travis-ci.org/linode/linodego.svg?branch=master)](https://travis-ci.org/linode/linodego)
    +[![GoDoc](https://godoc.org/github.com/linode/linodego?status.svg)](https://godoc.org/github.com/linode/linodego)
    +[![Go Report Card](https://goreportcard.com/badge/github.com/linode/linodego)](https://goreportcard.com/report/github.com/linode/linodego)
    +[![codecov](https://codecov.io/gh/linode/linodego/branch/master/graph/badge.svg)](https://codecov.io/gh/linode/linodego)
    +
    +Go client for [Linode REST v4 API](https://developers.linode.com/api/v4)
    +
    +## Installation
    +
    +```sh
    +go get -u github.com/linode/linodego
    +```
    +
    +## API Support
    +
    +Check [API_SUPPORT.md](API_SUPPORT.md) for current support of the Linode `v4` API endpoints.
    +
    +** Note: This project will change and break until we release a v1.0.0 tagged version. Breaking changes in v0.x.x will be denoted with a minor version bump (v0.2.4 -> v0.3.0) **
    +
    +## Documentation
    +
    +See [godoc](https://godoc.org/github.com/linode/linodego) for a complete reference.
    +
    +The API generally follows the naming patterns prescribed in the [OpenAPIv3 document for Linode APIv4](https://developers.linode.com/api/v4).
    +
    +Deviations in naming have been made to avoid using "Linode" and "Instance" redundantly or inconsistently.
    +
    +A brief summary of the features offered in this API client are shown here.
    +
    +## Examples
    +
    +### General Usage
    +
    +```go
    +package main
    +
    +import (
    +	"context"
    +	"fmt"
    +
    +	"github.com/linode/linodego"
    +	"golang.org/x/oauth2"
    +
    +	"log"
    +	"net/http"
    +	"os"
    +)
    +
    +func main() {
    +  apiKey, ok := os.LookupEnv("LINODE_TOKEN")
    +  if !ok {
    +    log.Fatal("Could not find LINODE_TOKEN, please assert it is set.")
    +  }
    +  tokenSource := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: apiKey})
    +
    +  oauth2Client := &http.Client{
    +    Transport: &oauth2.Transport{
    +      Source: tokenSource,
    +    },
    +  }
    +
    +  linodeClient := linodego.NewClient(oauth2Client)
    +  linodeClient.SetDebug(true)
    +  
    +  res, err := linodeClient.GetInstance(context.Background(), 4090913)
    +  if err != nil {
    +    log.Fatal(err)
    +  }
    +  fmt.Printf("%v", res)
    +}
    +```
    +
    +### Pagination
    +
    +#### Auto-Pagination Requests
    +
    +```go
    +kernels, err := linodego.ListKernels(context.Background(), nil)
    +// len(kernels) == 218
    +```
    +
    +Or, use a page value of "0":
    +
    +```go
    +opts := NewListOptions(0,"")
    +kernels, err := linodego.ListKernels(context.Background(), opts)
    +// len(kernels) == 218
    +```
    +
    +#### Single Page
    +
    +```go
    +opts := NewListOptions(2,"")
    +// or opts := ListOptions{PageOptions: &PageOptions: {Page: 2 }}
    +kernels, err := linodego.ListKernels(context.Background(), opts)
    +// len(kernels) == 100
    +```
    +
    +ListOptions are supplied as a pointer because the Pages and Results
    +values are set in the supplied ListOptions.
    +
    +```go
    +// opts.Results == 218
    +```
    +
    +#### Filtering
    +
    +```go
    +opts := ListOptions{Filter: "{\"mine\":true}"}
    +// or opts := NewListOptions(0, "{\"mine\":true}")
    +stackscripts, err := linodego.ListStackscripts(context.Background(), opts)
    +```
    +
    +### Error Handling
    +
    +#### Getting Single Entities
    +
    +```go
    +linode, err := linodego.GetLinode(context.Background(), 555) // any Linode ID that does not exist or is not yours
    +// linode == nil: true
    +// err.Error() == "[404] Not Found"
    +// err.Code == "404"
    +// err.Message == "Not Found"
    +```
    +
    +#### Lists
    +
    +For lists, the list is still returned as `[]`, but `err` works the same way as on the `Get` request.
    +
    +```go
    +linodes, err := linodego.ListLinodes(context.Background(), NewListOptions(0, "{\"foo\":bar}"))
    +// linodes == []
    +// err.Error() == "[400] [X-Filter] Cannot filter on foo"
    +```
    +
    +Otherwise sane requests beyond the last page do not trigger an error, just an empty result:
    +
    +```go
    +linodes, err := linodego.ListLinodes(context.Background(), NewListOptions(9999, ""))
    +// linodes == []
    +// err = nil
    +```
    +
    +### Writes
    +
    +When performing a `POST` or `PUT` request, multiple field related errors will be returned as a single error, currently like:
    +
    +```go
    +// err.Error() == "[400] [field1] foo problem; [field2] bar problem; [field3] baz problem"
    +```
    +
    +## Tests
    +
    +Run `make test` to run the unit tests.  This is the same as running `go test` except that `make test` will
    +execute the tests while playing back API response fixtures that were recorded during a previous development build.
    +
    +`go test` can be used without the fixtures. Copy `env.sample` to `.env` and configure your persistent test
    +settings, including an API token.
    +
    +`go test -short` can be used to run live API tests that do not require an account token.
    +
    +This will be simplified in future versions.
    +
    +To update the test fixtures, run `make fixtures`.  This will record the API responses into the `fixtures/` directory.
    +Be careful about committing any sensitive account details.  An attempt has been made to sanitize IP addresses and
    +dates, but no automated sanitization will be performed against `fixtures/*Account*.yaml`, for example.
    +
    +To prevent disrupting unaffected fixtures, target fixture generation like so: `make ARGS="-run TestListVolumes" fixtures`.
    +
    +## Discussion / Help
    +
    +Join us at [#linodego](https://gophers.slack.com/messages/CAG93EB2S) on the [gophers slack](https://gophers.slack.com)
    +
    +## License
    +
    +[MIT License](LICENSE)
    diff --git a/vendor/github.com/linode/linodego/account.go b/vendor/github.com/linode/linodego/account.go
    new file mode 100644
    index 000000000000..d6092f07d709
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/account.go
    @@ -0,0 +1,45 @@
    +package linodego
    +
    +import "context"
    +
    +// Account associated with the token in use
    +type Account struct {
    +	FirstName  string      `json:"first_name"`
    +	LastName   string      `json:"last_name"`
    +	Email      string      `json:"email"`
    +	Company    string      `json:"company"`
    +	Address1   string      `json:"address_1"`
    +	Address2   string      `json:"address_2"`
    +	Balance    float32     `json:"balance"`
    +	City       string      `json:"city"`
    +	State      string      `json:"state"`
    +	Zip        string      `json:"zip"`
    +	Country    string      `json:"country"`
    +	TaxID      string      `json:"tax_id"`
    +	Phone      string      `json:"phone"`
    +	CreditCard *CreditCard `json:"credit_card"`
    +}
    +
    +// CreditCard information associated with the Account.
    +type CreditCard struct {
    +	LastFour string `json:"last_four"`
    +	Expiry   string `json:"expiry"`
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (v *Account) fixDates() *Account {
    +	return v
    +}
    +
    +// GetAccount gets the contact and billing information related to the Account
    +func (c *Client) GetAccount(ctx context.Context) (*Account, error) {
    +	e, err := c.Account.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&Account{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Account).fixDates(), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/account_events.go b/vendor/github.com/linode/linodego/account_events.go
    new file mode 100644
    index 000000000000..e2365c35b6db
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/account_events.go
    @@ -0,0 +1,277 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"log"
    +	"strconv"
    +	"strings"
    +	"time"
    +)
    +
    +// Event represents an action taken on the Account.
    +type Event struct {
    +	CreatedStr string `json:"created"`
    +
    +	// The unique ID of this Event.
    +	ID int `json:"id"`
    +
    +	// Current status of the Event, Enum: "failed" "finished" "notification" "scheduled" "started"
    +	Status EventStatus `json:"status"`
    +
    +	// The action that caused this Event. New actions may be added in the future.
    +	Action EventAction `json:"action"`
    +
    +	// A percentage estimating the amount of time remaining for an Event. Returns null for notification events.
    +	PercentComplete int `json:"percent_complete"`
    +
    +	// The rate of completion of the Event. Only some Events will return rate; for example, migration and resize Events.
    +	Rate *string `json:"rate"`
    +
    +	// If this Event has been read.
    +	Read bool `json:"read"`
    +
    +	// If this Event has been seen.
    +	Seen bool `json:"seen"`
    +
    +	// The estimated time remaining until the completion of this Event. This value is only returned for in-progress events.
    +	TimeRemainingMsg json.RawMessage `json:"time_remaining"`
    +	TimeRemaining    *int            `json:"-"`
    +
    +	// The username of the User who caused the Event.
    +	Username string `json:"username"`
    +
    +	// Detailed information about the Event's entity, including ID, type, label, and URL used to access it.
    +	Entity *EventEntity `json:"entity"`
    +
    +	// When this Event was created.
    +	Created *time.Time `json:"-"`
    +}
    +
    +// EventAction constants start with Action and include all known Linode API Event Actions.
    +type EventAction string
    +
    +// EventAction constants represent the actions that cause an Event. New actions may be added in the future.
    +const (
    +	ActionBackupsEnable            EventAction = "backups_enable"
    +	ActionBackupsCancel            EventAction = "backups_cancel"
    +	ActionBackupsRestore           EventAction = "backups_restore"
    +	ActionCommunityQuestionReply   EventAction = "community_question_reply"
    +	ActionCreateCardUpdated        EventAction = "credit_card_updated"
    +	ActionDiskCreate               EventAction = "disk_create"
    +	ActionDiskDelete               EventAction = "disk_delete"
    +	ActionDiskDuplicate            EventAction = "disk_duplicate"
    +	ActionDiskImagize              EventAction = "disk_imagize"
    +	ActionDiskResize               EventAction = "disk_resize"
    +	ActionDNSRecordCreate          EventAction = "dns_record_create"
    +	ActionDNSRecordDelete          EventAction = "dns_record_delete"
    +	ActionDNSZoneCreate            EventAction = "dns_zone_create"
    +	ActionDNSZoneDelete            EventAction = "dns_zone_delete"
    +	ActionImageDelete              EventAction = "image_delete"
    +	ActionLinodeAddIP              EventAction = "linode_addip"
    +	ActionLinodeBoot               EventAction = "linode_boot"
    +	ActionLinodeClone              EventAction = "linode_clone"
    +	ActionLinodeCreate             EventAction = "linode_create"
    +	ActionLinodeDelete             EventAction = "linode_delete"
    +	ActionLinodeDeleteIP           EventAction = "linode_deleteip"
    +	ActionLinodeMigrate            EventAction = "linode_migrate"
    +	ActionLinodeMutate             EventAction = "linode_mutate"
    +	ActionLinodeReboot             EventAction = "linode_reboot"
    +	ActionLinodeRebuild            EventAction = "linode_rebuild"
    +	ActionLinodeResize             EventAction = "linode_resize"
    +	ActionLinodeShutdown           EventAction = "linode_shutdown"
    +	ActionLinodeSnapshot           EventAction = "linode_snapshot"
    +	ActionLongviewClientCreate     EventAction = "longviewclient_create"
    +	ActionLongviewClientDelete     EventAction = "longviewclient_delete"
    +	ActionManagedDisabled          EventAction = "managed_disabled"
    +	ActionManagedEnabled           EventAction = "managed_enabled"
    +	ActionManagedServiceCreate     EventAction = "managed_service_create"
    +	ActionManagedServiceDelete     EventAction = "managed_service_delete"
    +	ActionNodebalancerCreate       EventAction = "nodebalancer_create"
    +	ActionNodebalancerDelete       EventAction = "nodebalancer_delete"
    +	ActionNodebalancerConfigCreate EventAction = "nodebalancer_config_create"
    +	ActionNodebalancerConfigDelete EventAction = "nodebalancer_config_delete"
    +	ActionPasswordReset            EventAction = "password_reset"
    +	ActionPaymentSubmitted         EventAction = "payment_submitted"
    +	ActionStackScriptCreate        EventAction = "stackscript_create"
    +	ActionStackScriptDelete        EventAction = "stackscript_delete"
    +	ActionStackScriptPublicize     EventAction = "stackscript_publicize"
    +	ActionStackScriptRevise        EventAction = "stackscript_revise"
    +	ActionTFADisabled              EventAction = "tfa_disabled"
    +	ActionTFAEnabled               EventAction = "tfa_enabled"
    +	ActionTicketAttachmentUpload   EventAction = "ticket_attachment_upload"
    +	ActionTicketCreate             EventAction = "ticket_create"
    +	ActionTicketReply              EventAction = "ticket_reply"
    +	ActionVolumeAttach             EventAction = "volume_attach"
    +	ActionVolumeClone              EventAction = "volume_clone"
    +	ActionVolumeCreate             EventAction = "volume_create"
    +	ActionVolumeDelte              EventAction = "volume_delete"
    +	ActionVolumeDetach             EventAction = "volume_detach"
    +	ActionVolumeResize             EventAction = "volume_resize"
    +)
    +
    +// EntityType constants start with Entity and include Linode API Event Entity Types
    +type EntityType string
    +
    +// EntityType contants are the entities an Event can be related to
    +const (
    +	EntityLinode EntityType = "linode"
    +	EntityDisk   EntityType = "disk"
    +)
    +
    +// EventStatus constants start with Event and include Linode API Event Status values
    +type EventStatus string
    +
    +// EventStatus constants reflect the current status of an Event
    +const (
    +	EventFailed       EventStatus = "failed"
    +	EventFinished     EventStatus = "finished"
    +	EventNotification EventStatus = "notification"
    +	EventScheduled    EventStatus = "scheduled"
    +	EventStarted      EventStatus = "started"
    +)
    +
    +// EventEntity provides detailed information about the Event's
    +// associated entity, including ID, Type, Label, and a URL that
    +// can be used to access it.
    +type EventEntity struct {
    +	// ID may be a string or int, it depends on the EntityType
    +	ID    interface{} `json:"id"`
    +	Label string      `json:"label"`
    +	Type  EntityType  `json:"type"`
    +	URL   string      `json:"url"`
    +}
    +
    +// EventsPagedResponse represents a paginated Events API response
    +type EventsPagedResponse struct {
    +	*PageOptions
    +	Data []Event `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for Event
    +func (EventsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Events.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// endpointWithID gets the endpoint URL for a specific Event
    +func (e Event) endpointWithID(c *Client) string {
    +	endpoint, err := c.Events.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	endpoint = fmt.Sprintf("%s/%d", endpoint, e.ID)
    +	return endpoint
    +}
    +
    +// appendData appends Events when processing paginated Event responses
    +func (resp *EventsPagedResponse) appendData(r *EventsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListEvents gets a collection of Event objects representing actions taken
    +// on the Account. The Events returned depend on the token grants and the grants
    +// of the associated user.
    +func (c *Client) ListEvents(ctx context.Context, opts *ListOptions) ([]Event, error) {
    +	response := EventsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// GetEvent gets the Event with the Event ID
    +func (c *Client) GetEvent(ctx context.Context, id int) (*Event, error) {
    +	e, err := c.Events.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := c.R(ctx).SetResult(&Event{}).Get(e)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Event).fixDates(), nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (e *Event) fixDates() *Event {
    +	e.Created, _ = parseDates(e.CreatedStr)
    +	e.TimeRemaining = unmarshalTimeRemaining(e.TimeRemainingMsg)
    +	return e
    +}
    +
    +// MarkEventRead marks a single Event as read.
    +func (c *Client) MarkEventRead(ctx context.Context, event *Event) error {
    +	e := event.endpointWithID(c)
    +	e = fmt.Sprintf("%s/read", e)
    +
    +	_, err := coupleAPIErrors(c.R(ctx).Post(e))
    +
    +	return err
    +}
    +
    +// MarkEventsSeen marks all Events up to and including this Event by ID as seen.
    +func (c *Client) MarkEventsSeen(ctx context.Context, event *Event) error {
    +	e := event.endpointWithID(c)
    +	e = fmt.Sprintf("%s/seen", e)
    +
    +	_, err := coupleAPIErrors(c.R(ctx).Post(e))
    +
    +	return err
    +}
    +
    +func unmarshalTimeRemaining(m json.RawMessage) *int {
    +	jsonBytes, err := m.MarshalJSON()
    +	if err != nil {
    +		panic(jsonBytes)
    +	}
    +
    +	if len(jsonBytes) == 4 && string(jsonBytes) == "null" {
    +		return nil
    +	}
    +
    +	var timeStr string
    +	if err := json.Unmarshal(jsonBytes, &timeStr); err == nil && len(timeStr) > 0 {
    +		if dur, err := durationToSeconds(timeStr); err != nil {
    +			panic(err)
    +		} else {
    +			return &dur
    +		}
    +	} else {
    +		var intPtr int
    +		if err := json.Unmarshal(jsonBytes, &intPtr); err == nil {
    +			return &intPtr
    +		}
    +	}
    +
    +	log.Println("[WARN] Unexpected unmarshalTimeRemaining value: ", jsonBytes)
    +	return nil
    +}
    +
    +// durationToSeconds takes a hh:mm:ss string and returns the number of seconds
    +func durationToSeconds(s string) (int, error) {
    +	multipliers := [3]int{60 * 60, 60, 1}
    +	segs := strings.Split(s, ":")
    +	if len(segs) > len(multipliers) {
    +		return 0, fmt.Errorf("too many ':' separators in time duration: %s", s)
    +	}
    +	var d int
    +	l := len(segs)
    +	for i := 0; i < l; i++ {
    +		m, err := strconv.Atoi(segs[i])
    +		if err != nil {
    +			return 0, err
    +		}
    +		d += m * multipliers[i+len(multipliers)-l]
    +	}
    +	return d, nil
    +}
    diff --git a/vendor/github.com/linode/linodego/account_invoices.go b/vendor/github.com/linode/linodego/account_invoices.go
    new file mode 100644
    index 000000000000..75ca2f73d23b
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/account_invoices.go
    @@ -0,0 +1,125 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +	"time"
    +)
    +
    +// Invoice structs reflect an invoice for billable activity on the account.
    +type Invoice struct {
    +	DateStr string `json:"date"`
    +
    +	ID    int        `json:"id"`
    +	Label string     `json:"label"`
    +	Total float32    `json:"total"`
    +	Date  *time.Time `json:"-"`
    +}
    +
    +// InvoiceItem structs reflect an single billable activity associate with an Invoice
    +type InvoiceItem struct {
    +	FromStr string `json:"from"`
    +	ToStr   string `json:"to"`
    +
    +	Label     string     `json:"label"`
    +	Type      string     `json:"type"`
    +	UnitPrice int        `json:"unitprice"`
    +	Quantity  int        `json:"quantity"`
    +	Amount    float32    `json:"amount"`
    +	From      *time.Time `json:"-"`
    +	To        *time.Time `json:"-"`
    +}
    +
    +// InvoicesPagedResponse represents a paginated Invoice API response
    +type InvoicesPagedResponse struct {
    +	*PageOptions
    +	Data []Invoice `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for Invoice
    +func (InvoicesPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Invoices.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends Invoices when processing paginated Invoice responses
    +func (resp *InvoicesPagedResponse) appendData(r *InvoicesPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListInvoices gets a paginated list of Invoices against the Account
    +func (c *Client) ListInvoices(ctx context.Context, opts *ListOptions) ([]Invoice, error) {
    +	response := InvoicesPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (v *Invoice) fixDates() *Invoice {
    +	v.Date, _ = parseDates(v.DateStr)
    +	return v
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (v *InvoiceItem) fixDates() *InvoiceItem {
    +	v.From, _ = parseDates(v.FromStr)
    +	v.To, _ = parseDates(v.ToStr)
    +	return v
    +}
    +
    +// GetInvoice gets the a single Invoice matching the provided ID
    +func (c *Client) GetInvoice(ctx context.Context, id int) (*Invoice, error) {
    +	e, err := c.Invoices.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&Invoice{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Invoice).fixDates(), nil
    +}
    +
    +// InvoiceItemsPagedResponse represents a paginated Invoice Item API response
    +type InvoiceItemsPagedResponse struct {
    +	*PageOptions
    +	Data []InvoiceItem `json:"data"`
    +}
    +
    +// endpointWithID gets the endpoint URL for InvoiceItems associated with a specific Invoice
    +func (InvoiceItemsPagedResponse) endpointWithID(c *Client, id int) string {
    +	endpoint, err := c.InvoiceItems.endpointWithID(id)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends InvoiceItems when processing paginated Invoice Item responses
    +func (resp *InvoiceItemsPagedResponse) appendData(r *InvoiceItemsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListInvoiceItems gets the invoice items associated with a specific Invoice
    +func (c *Client) ListInvoiceItems(ctx context.Context, id int, opts *ListOptions) ([]InvoiceItem, error) {
    +	response := InvoiceItemsPagedResponse{}
    +	err := c.listHelperWithID(ctx, &response, id, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    diff --git a/vendor/github.com/linode/linodego/account_notifications.go b/vendor/github.com/linode/linodego/account_notifications.go
    new file mode 100644
    index 000000000000..1d2de2e62cdd
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/account_notifications.go
    @@ -0,0 +1,101 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"time"
    +)
    +
    +// Notification represents a notification on an Account
    +type Notification struct {
    +	UntilStr string `json:"until"`
    +	WhenStr  string `json:"when"`
    +
    +	Label    string               `json:"label"`
    +	Body     *string              `json:"body"`
    +	Message  string               `json:"message"`
    +	Type     NotificationType     `json:"type"`
    +	Severity NotificationSeverity `json:"severity"`
    +	Entity   *NotificationEntity  `json:"entity"`
    +	Until    *time.Time           `json:"-"`
    +	When     *time.Time           `json:"-"`
    +}
    +
    +// NotificationEntity adds detailed information about the Notification.
    +// This could refer to the ticket that triggered the notification, for example.
    +type NotificationEntity struct {
    +	ID    int    `json:"id"`
    +	Label string `json:"label"`
    +	Type  string `json:"type"`
    +	URL   string `json:"url"`
    +}
    +
    +// NotificationSeverity constants start with Notification and include all known Linode API Notification Severities.
    +type NotificationSeverity string
    +
    +// NotificationSeverity constants represent the actions that cause a Notification. New severities may be added in the future.
    +const (
    +	NotificationMinor    NotificationSeverity = "minor"
    +	NotificationMajor    NotificationSeverity = "major"
    +	NotificationCritical NotificationSeverity = "critical"
    +)
    +
    +// NotificationType constants start with Notification and include all known Linode API Notification Types.
    +type NotificationType string
    +
    +// NotificationType constants represent the actions that cause a Notification. New types may be added in the future.
    +const (
    +	NotificationMigrationScheduled NotificationType = "migration_scheduled"
    +	NotificationMigrationImminent  NotificationType = "migration_imminent"
    +	NotificationMigrationPending   NotificationType = "migration_pending"
    +	NotificationRebootScheduled    NotificationType = "reboot_scheduled"
    +	NotificationOutage             NotificationType = "outage"
    +	NotificationPaymentDue         NotificationType = "payment_due"
    +	NotificationTicketImportant    NotificationType = "ticket_important"
    +	NotificationTicketAbuse        NotificationType = "ticket_abuse"
    +	NotificationNotice             NotificationType = "notice"
    +	NotificationMaintenance        NotificationType = "maintenance"
    +)
    +
    +// NotificationsPagedResponse represents a paginated Notifications API response
    +type NotificationsPagedResponse struct {
    +	*PageOptions
    +	Data []Notification `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for Notification
    +func (NotificationsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Notifications.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends Notifications when processing paginated Notification responses
    +func (resp *NotificationsPagedResponse) appendData(r *NotificationsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListNotifications gets a collection of Notification objects representing important,
    +// often time-sensitive items related to the Account. An account cannot interact directly with
    +// Notifications, and a Notification will disappear when the circumstances causing it
    +// have been resolved. For example, if the account has an important Ticket open, a response
    +// to the Ticket will dismiss the Notification.
    +func (c *Client) ListNotifications(ctx context.Context, opts *ListOptions) ([]Notification, error) {
    +	response := NotificationsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (v *Notification) fixDates() *Notification {
    +	v.Until, _ = parseDates(v.UntilStr)
    +	v.When, _ = parseDates(v.WhenStr)
    +	return v
    +}
    diff --git a/vendor/github.com/linode/linodego/account_users.go b/vendor/github.com/linode/linodego/account_users.go
    new file mode 100644
    index 000000000000..a51fbe7f1942
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/account_users.go
    @@ -0,0 +1,164 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +)
    +
    +// User represents a User object
    +type User struct {
    +	Username   string   `json:"username"`
    +	Email      string   `json:"email"`
    +	Restricted bool     `json:"restricted"`
    +	SSHKeys    []string `json:"ssh_keys"`
    +}
    +
    +// UserCreateOptions fields are those accepted by CreateUser
    +type UserCreateOptions struct {
    +	Username   string `json:"username"`
    +	Email      string `json:"email"`
    +	Restricted bool   `json:"restricted,omitempty"`
    +}
    +
    +// UserUpdateOptions fields are those accepted by UpdateUser
    +type UserUpdateOptions struct {
    +	Username   string    `json:"username,omitempty"`
    +	Email      string    `json:"email,omitempty"`
    +	Restricted *bool     `json:"restricted,omitempty"`
    +	SSHKeys    *[]string `json:"ssh_keys,omitempty"`
    +}
    +
    +// GetCreateOptions converts a User to UserCreateOptions for use in CreateUser
    +func (i User) GetCreateOptions() (o UserCreateOptions) {
    +	o.Username = i.Username
    +	o.Email = i.Email
    +	o.Restricted = i.Restricted
    +	return
    +}
    +
    +// GetUpdateOptions converts a User to UserUpdateOptions for use in UpdateUser
    +func (i User) GetUpdateOptions() (o UserUpdateOptions) {
    +	o.Username = i.Username
    +	o.Email = i.Email
    +	o.Restricted = copyBool(&i.Restricted)
    +	return
    +}
    +
    +// UsersPagedResponse represents a paginated User API response
    +type UsersPagedResponse struct {
    +	*PageOptions
    +	Data []User `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for User
    +func (UsersPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Users.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends Users when processing paginated User responses
    +func (resp *UsersPagedResponse) appendData(r *UsersPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListUsers lists Users on the account
    +func (c *Client) ListUsers(ctx context.Context, opts *ListOptions) ([]User, error) {
    +	response := UsersPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (i *User) fixDates() *User {
    +	return i
    +}
    +
    +// GetUser gets the user with the provided ID
    +func (c *Client) GetUser(ctx context.Context, id string) (*User, error) {
    +	e, err := c.Users.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&User{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*User).fixDates(), nil
    +}
    +
    +// CreateUser creates a User.  The email address must be confirmed before the
    +// User account can be accessed.
    +func (c *Client) CreateUser(ctx context.Context, createOpts UserCreateOptions) (*User, error) {
    +	var body string
    +	e, err := c.Users.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&User{})
    +
    +	if bodyData, err := json.Marshal(createOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*User).fixDates(), nil
    +}
    +
    +// UpdateUser updates the User with the specified id
    +func (c *Client) UpdateUser(ctx context.Context, id string, updateOpts UserUpdateOptions) (*User, error) {
    +	var body string
    +	e, err := c.Users.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +
    +	req := c.R(ctx).SetResult(&User{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*User).fixDates(), nil
    +}
    +
    +// DeleteUser deletes the User with the specified id
    +func (c *Client) DeleteUser(ctx context.Context, id string) error {
    +	e, err := c.Users.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/client.go b/vendor/github.com/linode/linodego/client.go
    new file mode 100644
    index 000000000000..224a496decb4
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/client.go
    @@ -0,0 +1,257 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +	"log"
    +	"net/http"
    +	"os"
    +	"strconv"
    +	"time"
    +
    +	"gopkg.in/resty.v1"
    +)
    +
    +const (
    +	// APIHost Linode API hostname
    +	APIHost = "api.linode.com"
    +	// APIVersion Linode API version
    +	APIVersion = "v4"
    +	// APIProto connect to API with http(s)
    +	APIProto = "https"
    +	// Version of linodego
    +	Version = "0.7.0"
    +	// APIEnvVar environment var to check for API token
    +	APIEnvVar = "LINODE_TOKEN"
    +	// APISecondsPerPoll how frequently to poll for new Events or Status in WaitFor functions
    +	APISecondsPerPoll = 3
    +	// DefaultUserAgent is the default User-Agent sent in HTTP request headers
    +	DefaultUserAgent = "linodego " + Version + " https://github.com/linode/linodego"
    +)
    +
    +var (
    +	envDebug = false
    +)
    +
    +// Client is a wrapper around the Resty client
    +type Client struct {
    +	resty     *resty.Client
    +	userAgent string
    +	resources map[string]*Resource
    +	debug     bool
    +
    +	millisecondsPerPoll time.Duration
    +
    +	Images                *Resource
    +	InstanceDisks         *Resource
    +	InstanceConfigs       *Resource
    +	InstanceSnapshots     *Resource
    +	InstanceIPs           *Resource
    +	InstanceVolumes       *Resource
    +	Instances             *Resource
    +	IPAddresses           *Resource
    +	IPv6Pools             *Resource
    +	IPv6Ranges            *Resource
    +	Regions               *Resource
    +	StackScripts          *Resource
    +	Volumes               *Resource
    +	Kernels               *Resource
    +	Types                 *Resource
    +	Domains               *Resource
    +	DomainRecords         *Resource
    +	Longview              *Resource
    +	LongviewClients       *Resource
    +	LongviewSubscriptions *Resource
    +	NodeBalancers         *Resource
    +	NodeBalancerConfigs   *Resource
    +	NodeBalancerNodes     *Resource
    +	SSHKeys               *Resource
    +	Tickets               *Resource
    +	Tokens                *Resource
    +	Token                 *Resource
    +	Account               *Resource
    +	Invoices              *Resource
    +	InvoiceItems          *Resource
    +	Events                *Resource
    +	Notifications         *Resource
    +	Profile               *Resource
    +	Managed               *Resource
    +	Tags                  *Resource
    +	Users                 *Resource
    +}
    +
    +func init() {
    +	// Wether or not we will enable Resty debugging output
    +	if apiDebug, ok := os.LookupEnv("LINODE_DEBUG"); ok {
    +		if parsed, err := strconv.ParseBool(apiDebug); err == nil {
    +			envDebug = parsed
    +			log.Println("[INFO] LINODE_DEBUG being set to", envDebug)
    +		} else {
    +			log.Println("[WARN] LINODE_DEBUG should be an integer, 0 or 1")
    +		}
    +	}
    +
    +}
    +
    +// SetUserAgent sets a custom user-agent for HTTP requests
    +func (c *Client) SetUserAgent(ua string) *Client {
    +	c.userAgent = ua
    +	c.resty.SetHeader("User-Agent", c.userAgent)
    +
    +	return c
    +}
    +
    +// R wraps resty's R method
    +func (c *Client) R(ctx context.Context) *resty.Request {
    +	return c.resty.R().
    +		ExpectContentType("application/json").
    +		SetHeader("Content-Type", "application/json").
    +		SetContext(ctx).
    +		SetError(APIError{})
    +}
    +
    +// SetDebug sets the debug on resty's client
    +func (c *Client) SetDebug(debug bool) *Client {
    +	c.debug = debug
    +	c.resty.SetDebug(debug)
    +	return c
    +}
    +
    +// SetBaseURL sets the base URL of the Linode v4 API (https://api.linode.com/v4)
    +func (c *Client) SetBaseURL(url string) *Client {
    +	c.resty.SetHostURL(url)
    +	return c
    +}
    +
    +// SetPollDelay sets the number of milliseconds to wait between events or status polls.
    +// Affects all WaitFor* functions.
    +func (c *Client) SetPollDelay(delay time.Duration) *Client {
    +	c.millisecondsPerPoll = delay
    +	return c
    +}
    +
    +// Resource looks up a resource by name
    +func (c Client) Resource(resourceName string) *Resource {
    +	selectedResource, ok := c.resources[resourceName]
    +	if !ok {
    +		log.Fatalf("Could not find resource named '%s', exiting.", resourceName)
    +	}
    +	return selectedResource
    +}
    +
    +// NewClient factory to create new Client struct
    +func NewClient(hc *http.Client) (client Client) {
    +	restyClient := resty.NewWithClient(hc)
    +	client.resty = restyClient
    +	client.SetUserAgent(DefaultUserAgent)
    +	client.SetBaseURL(fmt.Sprintf("%s://%s/%s", APIProto, APIHost, APIVersion))
    +	client.SetPollDelay(1000 * APISecondsPerPoll)
    +
    +	resources := map[string]*Resource{
    +		stackscriptsName:          NewResource(&client, stackscriptsName, stackscriptsEndpoint, false, Stackscript{}, StackscriptsPagedResponse{}),
    +		imagesName:                NewResource(&client, imagesName, imagesEndpoint, false, Image{}, ImagesPagedResponse{}),
    +		instancesName:             NewResource(&client, instancesName, instancesEndpoint, false, Instance{}, InstancesPagedResponse{}),
    +		instanceDisksName:         NewResource(&client, instanceDisksName, instanceDisksEndpoint, true, InstanceDisk{}, InstanceDisksPagedResponse{}),
    +		instanceConfigsName:       NewResource(&client, instanceConfigsName, instanceConfigsEndpoint, true, InstanceConfig{}, InstanceConfigsPagedResponse{}),
    +		instanceSnapshotsName:     NewResource(&client, instanceSnapshotsName, instanceSnapshotsEndpoint, true, InstanceSnapshot{}, nil),
    +		instanceIPsName:           NewResource(&client, instanceIPsName, instanceIPsEndpoint, true, InstanceIP{}, nil),                           // really?
    +		instanceVolumesName:       NewResource(&client, instanceVolumesName, instanceVolumesEndpoint, true, nil, InstanceVolumesPagedResponse{}), // really?
    +		ipaddressesName:           NewResource(&client, ipaddressesName, ipaddressesEndpoint, false, nil, IPAddressesPagedResponse{}),            // really?
    +		ipv6poolsName:             NewResource(&client, ipv6poolsName, ipv6poolsEndpoint, false, nil, IPv6PoolsPagedResponse{}),                  // really?
    +		ipv6rangesName:            NewResource(&client, ipv6rangesName, ipv6rangesEndpoint, false, IPv6Range{}, IPv6RangesPagedResponse{}),
    +		regionsName:               NewResource(&client, regionsName, regionsEndpoint, false, Region{}, RegionsPagedResponse{}),
    +		volumesName:               NewResource(&client, volumesName, volumesEndpoint, false, Volume{}, VolumesPagedResponse{}),
    +		kernelsName:               NewResource(&client, kernelsName, kernelsEndpoint, false, LinodeKernel{}, LinodeKernelsPagedResponse{}),
    +		typesName:                 NewResource(&client, typesName, typesEndpoint, false, LinodeType{}, LinodeTypesPagedResponse{}),
    +		domainsName:               NewResource(&client, domainsName, domainsEndpoint, false, Domain{}, DomainsPagedResponse{}),
    +		domainRecordsName:         NewResource(&client, domainRecordsName, domainRecordsEndpoint, true, DomainRecord{}, DomainRecordsPagedResponse{}),
    +		longviewName:              NewResource(&client, longviewName, longviewEndpoint, false, nil, nil), // really?
    +		longviewclientsName:       NewResource(&client, longviewclientsName, longviewclientsEndpoint, false, LongviewClient{}, LongviewClientsPagedResponse{}),
    +		longviewsubscriptionsName: NewResource(&client, longviewsubscriptionsName, longviewsubscriptionsEndpoint, false, LongviewSubscription{}, LongviewSubscriptionsPagedResponse{}),
    +		nodebalancersName:         NewResource(&client, nodebalancersName, nodebalancersEndpoint, false, NodeBalancer{}, NodeBalancerConfigsPagedResponse{}),
    +		nodebalancerconfigsName:   NewResource(&client, nodebalancerconfigsName, nodebalancerconfigsEndpoint, true, NodeBalancerConfig{}, NodeBalancerConfigsPagedResponse{}),
    +		nodebalancernodesName:     NewResource(&client, nodebalancernodesName, nodebalancernodesEndpoint, true, NodeBalancerNode{}, NodeBalancerNodesPagedResponse{}),
    +		notificationsName:         NewResource(&client, notificationsName, notificationsEndpoint, false, Notification{}, NotificationsPagedResponse{}),
    +		sshkeysName:               NewResource(&client, sshkeysName, sshkeysEndpoint, false, SSHKey{}, SSHKeysPagedResponse{}),
    +		ticketsName:               NewResource(&client, ticketsName, ticketsEndpoint, false, Ticket{}, TicketsPagedResponse{}),
    +		tokensName:                NewResource(&client, tokensName, tokensEndpoint, false, Token{}, TokensPagedResponse{}),
    +		accountName:               NewResource(&client, accountName, accountEndpoint, false, Account{}, nil), // really?
    +		eventsName:                NewResource(&client, eventsName, eventsEndpoint, false, Event{}, EventsPagedResponse{}),
    +		invoicesName:              NewResource(&client, invoicesName, invoicesEndpoint, false, Invoice{}, InvoicesPagedResponse{}),
    +		invoiceItemsName:          NewResource(&client, invoiceItemsName, invoiceItemsEndpoint, true, InvoiceItem{}, InvoiceItemsPagedResponse{}),
    +		profileName:               NewResource(&client, profileName, profileEndpoint, false, nil, nil), // really?
    +		managedName:               NewResource(&client, managedName, managedEndpoint, false, nil, nil), // really?
    +		tagsName:                  NewResource(&client, tagsName, tagsEndpoint, false, Tag{}, TagsPagedResponse{}),
    +		usersName:                 NewResource(&client, usersName, usersEndpoint, false, User{}, UsersPagedResponse{}),
    +	}
    +
    +	client.resources = resources
    +
    +	client.SetDebug(envDebug)
    +	client.Images = resources[imagesName]
    +	client.StackScripts = resources[stackscriptsName]
    +	client.Instances = resources[instancesName]
    +	client.Regions = resources[regionsName]
    +	client.InstanceDisks = resources[instanceDisksName]
    +	client.InstanceConfigs = resources[instanceConfigsName]
    +	client.InstanceSnapshots = resources[instanceSnapshotsName]
    +	client.InstanceIPs = resources[instanceIPsName]
    +	client.InstanceVolumes = resources[instanceVolumesName]
    +	client.IPAddresses = resources[ipaddressesName]
    +	client.IPv6Pools = resources[ipv6poolsName]
    +	client.IPv6Ranges = resources[ipv6rangesName]
    +	client.Volumes = resources[volumesName]
    +	client.Kernels = resources[kernelsName]
    +	client.Types = resources[typesName]
    +	client.Domains = resources[domainsName]
    +	client.DomainRecords = resources[domainRecordsName]
    +	client.Longview = resources[longviewName]
    +	client.LongviewSubscriptions = resources[longviewsubscriptionsName]
    +	client.NodeBalancers = resources[nodebalancersName]
    +	client.NodeBalancerConfigs = resources[nodebalancerconfigsName]
    +	client.NodeBalancerNodes = resources[nodebalancernodesName]
    +	client.Notifications = resources[notificationsName]
    +	client.SSHKeys = resources[sshkeysName]
    +	client.Tickets = resources[ticketsName]
    +	client.Tokens = resources[tokensName]
    +	client.Account = resources[accountName]
    +	client.Events = resources[eventsName]
    +	client.Invoices = resources[invoicesName]
    +	client.Profile = resources[profileName]
    +	client.Managed = resources[managedName]
    +	client.Tags = resources[tagsName]
    +	client.Users = resources[usersName]
    +	return
    +}
    +
    +func copyBool(bPtr *bool) *bool {
    +	if bPtr == nil {
    +		return nil
    +	}
    +	var t = *bPtr
    +	return &t
    +}
    +
    +func copyInt(iPtr *int) *int {
    +	if iPtr == nil {
    +		return nil
    +	}
    +	var t = *iPtr
    +	return &t
    +}
    +
    +func copyString(sPtr *string) *string {
    +	if sPtr == nil {
    +		return nil
    +	}
    +	var t = *sPtr
    +	return &t
    +}
    +
    +func copyTime(tPtr *time.Time) *time.Time {
    +	if tPtr == nil {
    +		return nil
    +	}
    +	var t = *tPtr
    +	return &t
    +}
    diff --git a/vendor/github.com/linode/linodego/domain_records.go b/vendor/github.com/linode/linodego/domain_records.go
    new file mode 100644
    index 000000000000..215ac800451b
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/domain_records.go
    @@ -0,0 +1,195 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +)
    +
    +// DomainRecord represents a DomainRecord object
    +type DomainRecord struct {
    +	ID       int              `json:"id"`
    +	Type     DomainRecordType `json:"type"`
    +	Name     string           `json:"name"`
    +	Target   string           `json:"target"`
    +	Priority int              `json:"priority"`
    +	Weight   int              `json:"weight"`
    +	Port     int              `json:"port"`
    +	Service  *string          `json:"service"`
    +	Protocol *string          `json:"protocol"`
    +	TTLSec   int              `json:"ttl_sec"`
    +	Tag      *string          `json:"tag"`
    +}
    +
    +// DomainRecordCreateOptions fields are those accepted by CreateDomainRecord
    +type DomainRecordCreateOptions struct {
    +	Type     DomainRecordType `json:"type"`
    +	Name     string           `json:"name"`
    +	Target   string           `json:"target"`
    +	Priority *int             `json:"priority,omitempty"`
    +	Weight   *int             `json:"weight,omitempty"`
    +	Port     *int             `json:"port,omitempty"`
    +	Service  *string          `json:"service,omitempty"`
    +	Protocol *string          `json:"protocol,omitempty"`
    +	TTLSec   int              `json:"ttl_sec,omitempty"` // 0 is not accepted by Linode, so can be omitted
    +	Tag      *string          `json:"tag,omitempty"`
    +}
    +
    +// DomainRecordUpdateOptions fields are those accepted by UpdateDomainRecord
    +type DomainRecordUpdateOptions struct {
    +	Type     DomainRecordType `json:"type,omitempty"`
    +	Name     string           `json:"name,omitempty"`
    +	Target   string           `json:"target,omitempty"`
    +	Priority *int             `json:"priority,omitempty"` // 0 is valid, so omit only nil values
    +	Weight   *int             `json:"weight,omitempty"`   // 0 is valid, so omit only nil values
    +	Port     *int             `json:"port,omitempty"`     // 0 is valid to spec, so omit only nil values
    +	Service  *string          `json:"service,omitempty"`
    +	Protocol *string          `json:"protocol,omitempty"`
    +	TTLSec   int              `json:"ttl_sec,omitempty"` // 0 is not accepted by Linode, so can be omitted
    +	Tag      *string          `json:"tag,omitempty"`
    +}
    +
    +// DomainRecordType constants start with RecordType and include Linode API Domain Record Types
    +type DomainRecordType string
    +
    +// DomainRecordType contants are the DNS record types a DomainRecord can assign
    +const (
    +	RecordTypeA     DomainRecordType = "A"
    +	RecordTypeAAAA  DomainRecordType = "AAAA"
    +	RecordTypeNS    DomainRecordType = "NS"
    +	RecordTypeMX    DomainRecordType = "MX"
    +	RecordTypeCNAME DomainRecordType = "CNAME"
    +	RecordTypeTXT   DomainRecordType = "TXT"
    +	RecordTypeSRV   DomainRecordType = "SRV"
    +	RecordTypePTR   DomainRecordType = "PTR"
    +	RecordTypeCAA   DomainRecordType = "CAA"
    +)
    +
    +// GetUpdateOptions converts a DomainRecord to DomainRecordUpdateOptions for use in UpdateDomainRecord
    +func (d DomainRecord) GetUpdateOptions() (du DomainRecordUpdateOptions) {
    +	du.Type = d.Type
    +	du.Name = d.Name
    +	du.Target = d.Target
    +	du.Priority = copyInt(&d.Priority)
    +	du.Weight = copyInt(&d.Weight)
    +	du.Port = copyInt(&d.Port)
    +	du.Service = copyString(d.Service)
    +	du.Protocol = copyString(d.Protocol)
    +	du.TTLSec = d.TTLSec
    +	du.Tag = copyString(d.Tag)
    +	return
    +}
    +
    +// DomainRecordsPagedResponse represents a paginated DomainRecord API response
    +type DomainRecordsPagedResponse struct {
    +	*PageOptions
    +	Data []DomainRecord `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for InstanceConfig
    +func (DomainRecordsPagedResponse) endpointWithID(c *Client, id int) string {
    +	endpoint, err := c.DomainRecords.endpointWithID(id)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends DomainRecords when processing paginated DomainRecord responses
    +func (resp *DomainRecordsPagedResponse) appendData(r *DomainRecordsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListDomainRecords lists DomainRecords
    +func (c *Client) ListDomainRecords(ctx context.Context, domainID int, opts *ListOptions) ([]DomainRecord, error) {
    +	response := DomainRecordsPagedResponse{}
    +	err := c.listHelperWithID(ctx, &response, domainID, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (d *DomainRecord) fixDates() *DomainRecord {
    +	return d
    +}
    +
    +// GetDomainRecord gets the domainrecord with the provided ID
    +func (c *Client) GetDomainRecord(ctx context.Context, domainID int, id int) (*DomainRecord, error) {
    +	e, err := c.DomainRecords.endpointWithID(domainID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&DomainRecord{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*DomainRecord), nil
    +}
    +
    +// CreateDomainRecord creates a DomainRecord
    +func (c *Client) CreateDomainRecord(ctx context.Context, domainID int, domainrecord DomainRecordCreateOptions) (*DomainRecord, error) {
    +	var body string
    +	e, err := c.DomainRecords.endpointWithID(domainID)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&DomainRecord{})
    +
    +	bodyData, err := json.Marshal(domainrecord)
    +	if err != nil {
    +		return nil, NewError(err)
    +	}
    +	body = string(bodyData)
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*DomainRecord).fixDates(), nil
    +}
    +
    +// UpdateDomainRecord updates the DomainRecord with the specified id
    +func (c *Client) UpdateDomainRecord(ctx context.Context, domainID int, id int, domainrecord DomainRecordUpdateOptions) (*DomainRecord, error) {
    +	var body string
    +	e, err := c.DomainRecords.endpointWithID(domainID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	req := c.R(ctx).SetResult(&DomainRecord{})
    +
    +	if bodyData, err := json.Marshal(domainrecord); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*DomainRecord).fixDates(), nil
    +}
    +
    +// DeleteDomainRecord deletes the DomainRecord with the specified id
    +func (c *Client) DeleteDomainRecord(ctx context.Context, domainID int, id int) error {
    +	e, err := c.DomainRecords.endpointWithID(domainID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/domains.go b/vendor/github.com/linode/linodego/domains.go
    new file mode 100644
    index 000000000000..43e4a65f5eef
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/domains.go
    @@ -0,0 +1,295 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +)
    +
    +// Domain represents a Domain object
    +type Domain struct {
    +	//	This Domain's unique ID
    +	ID int `json:"id"`
    +
    +	// The domain this Domain represents. These must be unique in our system; you cannot have two Domains representing the same domain.
    +	Domain string `json:"domain"`
    +
    +	// If this Domain represents the authoritative source of information for the domain it describes, or if it is a read-only copy of a master (also called a slave).
    +	Type DomainType `json:"type"` // Enum:"master" "slave"
    +
    +	// Deprecated: The group this Domain belongs to. This is for display purposes only.
    +	Group string `json:"group"`
    +
    +	// Used to control whether this Domain is currently being rendered.
    +	Status DomainStatus `json:"status"` // Enum:"disabled" "active" "edit_mode" "has_errors"
    +
    +	// A description for this Domain. This is for display purposes only.
    +	Description string `json:"description"`
    +
    +	// Start of Authority email address. This is required for master Domains.
    +	SOAEmail string `json:"soa_email"`
    +
    +	// The interval, in seconds, at which a failed refresh should be retried.
    +	// Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	RetrySec int `json:"retry_sec"`
    +
    +	// The IP addresses representing the master DNS for this Domain.
    +	MasterIPs []string `json:"master_ips"`
    +
    +	// The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it.
    +	AXfrIPs []string `json:"axfr_ips"`
    +
    +	// An array of tags applied to this object. Tags are for organizational purposes only.
    +	Tags []string `json:"tags"`
    +
    +	// The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	ExpireSec int `json:"expire_sec"`
    +
    +	// The amount of time in seconds before this Domain should be refreshed. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	RefreshSec int `json:"refresh_sec"`
    +
    +	// "Time to Live" - the amount of time in seconds that this Domain's records may be cached by resolvers or other domain servers. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	TTLSec int `json:"ttl_sec"`
    +}
    +
    +// DomainCreateOptions fields are those accepted by CreateDomain
    +type DomainCreateOptions struct {
    +	// The domain this Domain represents. These must be unique in our system; you cannot have two Domains representing the same domain.
    +	Domain string `json:"domain"`
    +
    +	// If this Domain represents the authoritative source of information for the domain it describes, or if it is a read-only copy of a master (also called a slave).
    +	// Enum:"master" "slave"
    +	Type DomainType `json:"type"`
    +
    +	// Deprecated: The group this Domain belongs to. This is for display purposes only.
    +	Group string `json:"group,omitempty"`
    +
    +	// Used to control whether this Domain is currently being rendered.
    +	// Enum:"disabled" "active" "edit_mode" "has_errors"
    +	Status DomainStatus `json:"status,omitempty"`
    +
    +	// A description for this Domain. This is for display purposes only.
    +	Description string `json:"description,omitempty"`
    +
    +	// Start of Authority email address. This is required for master Domains.
    +	SOAEmail string `json:"soa_email,omitempty"`
    +
    +	// The interval, in seconds, at which a failed refresh should be retried.
    +	// Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	RetrySec int `json:"retry_sec,omitempty"`
    +
    +	// The IP addresses representing the master DNS for this Domain.
    +	MasterIPs []string `json:"master_ips,omitempty"`
    +
    +	// The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it.
    +	AXfrIPs []string `json:"axfr_ips,omitempty"`
    +
    +	// An array of tags applied to this object. Tags are for organizational purposes only.
    +	Tags []string `json:"tags"`
    +
    +	// The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	ExpireSec int `json:"expire_sec,omitempty"`
    +
    +	// The amount of time in seconds before this Domain should be refreshed. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	RefreshSec int `json:"refresh_sec,omitempty"`
    +
    +	// "Time to Live" - the amount of time in seconds that this Domain's records may be cached by resolvers or other domain servers. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	TTLSec int `json:"ttl_sec,omitempty"`
    +}
    +
    +// DomainUpdateOptions converts a Domain to DomainUpdateOptions for use in UpdateDomain
    +type DomainUpdateOptions struct {
    +	// The domain this Domain represents. These must be unique in our system; you cannot have two Domains representing the same domain.
    +	Domain string `json:"domain,omitempty"`
    +
    +	// If this Domain represents the authoritative source of information for the domain it describes, or if it is a read-only copy of a master (also called a slave).
    +	// Enum:"master" "slave"
    +	Type DomainType `json:"type,omitempty"`
    +
    +	// Deprecated: The group this Domain belongs to. This is for display purposes only.
    +	Group string `json:"group,omitempty"`
    +
    +	// Used to control whether this Domain is currently being rendered.
    +	// Enum:"disabled" "active" "edit_mode" "has_errors"
    +	Status DomainStatus `json:"status,omitempty"`
    +
    +	// A description for this Domain. This is for display purposes only.
    +	Description string `json:"description,omitempty"`
    +
    +	// Start of Authority email address. This is required for master Domains.
    +	SOAEmail string `json:"soa_email,omitempty"`
    +
    +	// The interval, in seconds, at which a failed refresh should be retried.
    +	// Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	RetrySec int `json:"retry_sec,omitempty"`
    +
    +	// The IP addresses representing the master DNS for this Domain.
    +	MasterIPs []string `json:"master_ips,omitempty"`
    +
    +	// The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it.
    +	AXfrIPs []string `json:"axfr_ips,omitempty"`
    +
    +	// An array of tags applied to this object. Tags are for organizational purposes only.
    +	Tags []string `json:"tags"`
    +
    +	// The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	ExpireSec int `json:"expire_sec,omitempty"`
    +
    +	// The amount of time in seconds before this Domain should be refreshed. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	RefreshSec int `json:"refresh_sec,omitempty"`
    +
    +	// "Time to Live" - the amount of time in seconds that this Domain's records may be cached by resolvers or other domain servers. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
    +	TTLSec int `json:"ttl_sec,omitempty"`
    +}
    +
    +// DomainType constants start with DomainType and include Linode API Domain Type values
    +type DomainType string
    +
    +// DomainType constants reflect the DNS zone type of a Domain
    +const (
    +	DomainTypeMaster DomainType = "master"
    +	DomainTypeSlave  DomainType = "slave"
    +)
    +
    +// DomainStatus constants start with DomainStatus and include Linode API Domain Status values
    +type DomainStatus string
    +
    +// DomainStatus constants reflect the current status of a Domain
    +const (
    +	DomainStatusDisabled  DomainStatus = "disabled"
    +	DomainStatusActive    DomainStatus = "active"
    +	DomainStatusEditMode  DomainStatus = "edit_mode"
    +	DomainStatusHasErrors DomainStatus = "has_errors"
    +)
    +
    +// GetUpdateOptions converts a Domain to DomainUpdateOptions for use in UpdateDomain
    +func (d Domain) GetUpdateOptions() (du DomainUpdateOptions) {
    +	du.Domain = d.Domain
    +	du.Type = d.Type
    +	du.Group = d.Group
    +	du.Status = d.Status
    +	du.Description = d.Description
    +	du.SOAEmail = d.SOAEmail
    +	du.RetrySec = d.RetrySec
    +	du.MasterIPs = d.MasterIPs
    +	du.AXfrIPs = d.AXfrIPs
    +	du.Tags = d.Tags
    +	du.ExpireSec = d.ExpireSec
    +	du.RefreshSec = d.RefreshSec
    +	du.TTLSec = d.TTLSec
    +	return
    +}
    +
    +// DomainsPagedResponse represents a paginated Domain API response
    +type DomainsPagedResponse struct {
    +	*PageOptions
    +	Data []Domain `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for Domain
    +func (DomainsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Domains.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends Domains when processing paginated Domain responses
    +func (resp *DomainsPagedResponse) appendData(r *DomainsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListDomains lists Domains
    +func (c *Client) ListDomains(ctx context.Context, opts *ListOptions) ([]Domain, error) {
    +	response := DomainsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (d *Domain) fixDates() *Domain {
    +	return d
    +}
    +
    +// GetDomain gets the domain with the provided ID
    +func (c *Client) GetDomain(ctx context.Context, id int) (*Domain, error) {
    +	e, err := c.Domains.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&Domain{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Domain).fixDates(), nil
    +}
    +
    +// CreateDomain creates a Domain
    +func (c *Client) CreateDomain(ctx context.Context, domain DomainCreateOptions) (*Domain, error) {
    +	var body string
    +	e, err := c.Domains.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&Domain{})
    +
    +	bodyData, err := json.Marshal(domain)
    +	if err != nil {
    +		return nil, NewError(err)
    +	}
    +	body = string(bodyData)
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Domain).fixDates(), nil
    +}
    +
    +// UpdateDomain updates the Domain with the specified id
    +func (c *Client) UpdateDomain(ctx context.Context, id int, domain DomainUpdateOptions) (*Domain, error) {
    +	var body string
    +	e, err := c.Domains.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	req := c.R(ctx).SetResult(&Domain{})
    +
    +	if bodyData, err := json.Marshal(domain); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Domain).fixDates(), nil
    +}
    +
    +// DeleteDomain deletes the Domain with the specified id
    +func (c *Client) DeleteDomain(ctx context.Context, id int) error {
    +	e, err := c.Domains.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/env.sample b/vendor/github.com/linode/linodego/env.sample
    new file mode 100644
    index 000000000000..b1c9d1803492
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/env.sample
    @@ -0,0 +1,2 @@
    +LINODE_TOKEN=
    +LINODE_DEBUG=0
    diff --git a/vendor/github.com/linode/linodego/errors.go b/vendor/github.com/linode/linodego/errors.go
    new file mode 100644
    index 000000000000..9ceb881a21b1
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/errors.go
    @@ -0,0 +1,109 @@
    +package linodego
    +
    +import (
    +	"fmt"
    +	"log"
    +	"net/http"
    +	"strings"
    +
    +	"gopkg.in/resty.v1"
    +)
    +
    +const (
    +	// ErrorFromString is the Code identifying Errors created by string types
    +	ErrorFromString = 1
    +	// ErrorFromError is the Code identifying Errors created by error types
    +	ErrorFromError = 2
    +	// ErrorFromStringer is the Code identifying Errors created by fmt.Stringer types
    +	ErrorFromStringer = 3
    +)
    +
    +// Error wraps the LinodeGo error with the relevant http.Response
    +type Error struct {
    +	Response *http.Response
    +	Code     int
    +	Message  string
    +}
    +
    +// APIErrorReason is an individual invalid request message returned by the Linode API
    +type APIErrorReason struct {
    +	Reason string `json:"reason"`
    +	Field  string `json:"field"`
    +}
    +
    +func (r APIErrorReason) Error() string {
    +	if len(r.Field) == 0 {
    +		return r.Reason
    +	}
    +	return fmt.Sprintf("[%s] %s", r.Field, r.Reason)
    +}
    +
    +// APIError is the error-set returned by the Linode API when presented with an invalid request
    +type APIError struct {
    +	Errors []APIErrorReason `json:"errors"`
    +}
    +
    +func coupleAPIErrors(r *resty.Response, err error) (*resty.Response, error) {
    +	if err != nil {
    +		return nil, NewError(err)
    +	}
    +
    +	if r.Error() != nil {
    +		apiError, ok := r.Error().(*APIError)
    +		if !ok || (ok && len(apiError.Errors) == 0) {
    +			return r, nil
    +		}
    +		return nil, NewError(r)
    +	}
    +
    +	return r, nil
    +}
    +
    +func (e APIError) Error() string {
    +	var x []string
    +	for _, msg := range e.Errors {
    +		x = append(x, msg.Error())
    +	}
    +	return strings.Join(x, "; ")
    +}
    +
    +func (g Error) Error() string {
    +	return fmt.Sprintf("[%03d] %s", g.Code, g.Message)
    +}
    +
    +// NewError creates a linodego.Error with a Code identifying the source err type,
    +// - ErrorFromString   (1) from a string
    +// - ErrorFromError    (2) for an error
    +// - ErrorFromStringer (3) for a Stringer
    +// - HTTP Status Codes (100-600) for a resty.Response object
    +func NewError(err interface{}) *Error {
    +	if err == nil {
    +		return nil
    +	}
    +
    +	switch e := err.(type) {
    +	case *Error:
    +		return e
    +	case *resty.Response:
    +		apiError, ok := e.Error().(*APIError)
    +
    +		if !ok {
    +			log.Fatalln("Unexpected Resty Error Response")
    +		}
    +
    +		return &Error{
    +			Code:     e.RawResponse.StatusCode,
    +			Message:  apiError.Error(),
    +			Response: e.RawResponse,
    +		}
    +	case error:
    +		return &Error{Code: ErrorFromError, Message: e.Error()}
    +	case string:
    +		return &Error{Code: ErrorFromString, Message: e}
    +	case fmt.Stringer:
    +		return &Error{Code: ErrorFromStringer, Message: e.String()}
    +	default:
    +		log.Fatalln("Unsupported type to linodego.NewError")
    +		panic(err)
    +	}
    +}
    diff --git a/vendor/github.com/linode/linodego/images.go b/vendor/github.com/linode/linodego/images.go
    new file mode 100644
    index 000000000000..979ce2ed101a
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/images.go
    @@ -0,0 +1,168 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"time"
    +)
    +
    +// Image represents a deployable Image object for use with Linode Instances
    +type Image struct {
    +	CreatedStr  string `json:"created"`
    +	ExpiryStr   string `json:"expiry"`
    +	ID          string `json:"id"`
    +	CreatedBy   string `json:"created_by"`
    +	Label       string `json:"label"`
    +	Description string `json:"description"`
    +	Type        string `json:"type"`
    +	Vendor      string `json:"vendor"`
    +	Size        int    `json:"size"`
    +	IsPublic    bool   `json:"is_public"`
    +	Deprecated  bool   `json:"deprecated"`
    +
    +	Created *time.Time `json:"-"`
    +	Expiry  *time.Time `json:"-"`
    +}
    +
    +// ImageCreateOptions fields are those accepted by CreateImage
    +type ImageCreateOptions struct {
    +	DiskID      int    `json:"disk_id"`
    +	Label       string `json:"label"`
    +	Description string `json:"description,omitempty"`
    +}
    +
    +// ImageUpdateOptions fields are those accepted by UpdateImage
    +type ImageUpdateOptions struct {
    +	Label       string  `json:"label,omitempty"`
    +	Description *string `json:"description,omitempty"`
    +}
    +
    +func (i *Image) fixDates() *Image {
    +	i.Created, _ = parseDates(i.CreatedStr)
    +
    +	if len(i.ExpiryStr) > 0 {
    +		i.Expiry, _ = parseDates(i.ExpiryStr)
    +	} else {
    +		i.Expiry = nil
    +	}
    +	return i
    +}
    +
    +// GetUpdateOptions converts an Image to ImageUpdateOptions for use in UpdateImage
    +func (i Image) GetUpdateOptions() (iu ImageUpdateOptions) {
    +	iu.Label = i.Label
    +	iu.Description = copyString(&i.Description)
    +	return
    +}
    +
    +// ImagesPagedResponse represents a linode API response for listing of images
    +type ImagesPagedResponse struct {
    +	*PageOptions
    +	Data []Image `json:"data"`
    +}
    +
    +func (ImagesPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Images.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +func (resp *ImagesPagedResponse) appendData(r *ImagesPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListImages lists Images
    +func (c *Client) ListImages(ctx context.Context, opts *ListOptions) ([]Image, error) {
    +	response := ImagesPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +
    +}
    +
    +// GetImage gets the Image with the provided ID
    +func (c *Client) GetImage(ctx context.Context, id string) (*Image, error) {
    +	e, err := c.Images.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +	r, err := coupleAPIErrors(c.Images.R(ctx).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Image).fixDates(), nil
    +}
    +
    +// CreateImage creates a Image
    +func (c *Client) CreateImage(ctx context.Context, createOpts ImageCreateOptions) (*Image, error) {
    +	var body string
    +	e, err := c.Images.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&Image{})
    +
    +	if bodyData, err := json.Marshal(createOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Image).fixDates(), nil
    +}
    +
    +// UpdateImage updates the Image with the specified id
    +func (c *Client) UpdateImage(ctx context.Context, id string, updateOpts ImageUpdateOptions) (*Image, error) {
    +	var body string
    +	e, err := c.Images.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +
    +	req := c.R(ctx).SetResult(&Image{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Image).fixDates(), nil
    +}
    +
    +// DeleteImage deletes the Image with the specified id
    +func (c *Client) DeleteImage(ctx context.Context, id string) error {
    +	e, err := c.Images.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/instance_configs.go b/vendor/github.com/linode/linodego/instance_configs.go
    new file mode 100644
    index 000000000000..979a27495c51
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/instance_configs.go
    @@ -0,0 +1,246 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"time"
    +)
    +
    +// InstanceConfig represents all of the settings that control the boot and run configuration of a Linode Instance
    +type InstanceConfig struct {
    +	CreatedStr string `json:"created"`
    +	UpdatedStr string `json:"updated"`
    +
    +	ID          int                      `json:"id"`
    +	Label       string                   `json:"label"`
    +	Comments    string                   `json:"comments"`
    +	Devices     *InstanceConfigDeviceMap `json:"devices"`
    +	Helpers     *InstanceConfigHelpers   `json:"helpers"`
    +	MemoryLimit int                      `json:"memory_limit"`
    +	Kernel      string                   `json:"kernel"`
    +	InitRD      *int                     `json:"init_rd"`
    +	RootDevice  string                   `json:"root_device"`
    +	RunLevel    string                   `json:"run_level"`
    +	VirtMode    string                   `json:"virt_mode"`
    +	Created     *time.Time               `json:"-"`
    +	Updated     *time.Time               `json:"-"`
    +}
    +
    +// InstanceConfigDevice contains either the DiskID or VolumeID assigned to a Config Device
    +type InstanceConfigDevice struct {
    +	DiskID   int `json:"disk_id,omitempty"`
    +	VolumeID int `json:"volume_id,omitempty"`
    +}
    +
    +// InstanceConfigDeviceMap contains SDA-SDH InstanceConfigDevice settings
    +type InstanceConfigDeviceMap struct {
    +	SDA *InstanceConfigDevice `json:"sda,omitempty"`
    +	SDB *InstanceConfigDevice `json:"sdb,omitempty"`
    +	SDC *InstanceConfigDevice `json:"sdc,omitempty"`
    +	SDD *InstanceConfigDevice `json:"sdd,omitempty"`
    +	SDE *InstanceConfigDevice `json:"sde,omitempty"`
    +	SDF *InstanceConfigDevice `json:"sdf,omitempty"`
    +	SDG *InstanceConfigDevice `json:"sdg,omitempty"`
    +	SDH *InstanceConfigDevice `json:"sdh,omitempty"`
    +}
    +
    +// InstanceConfigHelpers are Instance Config options that control Linux distribution specific tweaks
    +type InstanceConfigHelpers struct {
    +	UpdateDBDisabled  bool `json:"updatedb_disabled"`
    +	Distro            bool `json:"distro"`
    +	ModulesDep        bool `json:"modules_dep"`
    +	Network           bool `json:"network"`
    +	DevTmpFsAutomount bool `json:"devtmpfs_automount"`
    +}
    +
    +// InstanceConfigsPagedResponse represents a paginated InstanceConfig API response
    +type InstanceConfigsPagedResponse struct {
    +	*PageOptions
    +	Data []InstanceConfig `json:"data"`
    +}
    +
    +// InstanceConfigCreateOptions are InstanceConfig settings that can be used at creation
    +type InstanceConfigCreateOptions struct {
    +	Label       string                  `json:"label,omitempty"`
    +	Comments    string                  `json:"comments,omitempty"`
    +	Devices     InstanceConfigDeviceMap `json:"devices"`
    +	Helpers     *InstanceConfigHelpers  `json:"helpers,omitempty"`
    +	MemoryLimit int                     `json:"memory_limit,omitempty"`
    +	Kernel      string                  `json:"kernel,omitempty"`
    +	InitRD      int                     `json:"init_rd,omitempty"`
    +	RootDevice  *string                 `json:"root_device,omitempty"`
    +	RunLevel    string                  `json:"run_level,omitempty"`
    +	VirtMode    string                  `json:"virt_mode,omitempty"`
    +}
    +
    +// InstanceConfigUpdateOptions are InstanceConfig settings that can be used in updates
    +type InstanceConfigUpdateOptions struct {
    +	Label    string                   `json:"label,omitempty"`
    +	Comments string                   `json:"comments"`
    +	Devices  *InstanceConfigDeviceMap `json:"devices,omitempty"`
    +	Helpers  *InstanceConfigHelpers   `json:"helpers,omitempty"`
    +	// MemoryLimit 0 means unlimitted, this is not omitted
    +	MemoryLimit int    `json:"memory_limit"`
    +	Kernel      string `json:"kernel,omitempty"`
    +	// InitRD is nullable, permit the sending of null
    +	InitRD     *int   `json:"init_rd"`
    +	RootDevice string `json:"root_device,omitempty"`
    +	RunLevel   string `json:"run_level,omitempty"`
    +	VirtMode   string `json:"virt_mode,omitempty"`
    +}
    +
    +// GetCreateOptions converts a InstanceConfig to InstanceConfigCreateOptions for use in CreateInstanceConfig
    +func (i InstanceConfig) GetCreateOptions() InstanceConfigCreateOptions {
    +	initrd := 0
    +	if i.InitRD != nil {
    +		initrd = *i.InitRD
    +	}
    +	return InstanceConfigCreateOptions{
    +		Label:       i.Label,
    +		Comments:    i.Comments,
    +		Devices:     *i.Devices,
    +		Helpers:     i.Helpers,
    +		MemoryLimit: i.MemoryLimit,
    +		Kernel:      i.Kernel,
    +		InitRD:      initrd,
    +		RootDevice:  copyString(&i.RootDevice),
    +		RunLevel:    i.RunLevel,
    +		VirtMode:    i.VirtMode,
    +	}
    +}
    +
    +// GetUpdateOptions converts a InstanceConfig to InstanceConfigUpdateOptions for use in UpdateInstanceConfig
    +func (i InstanceConfig) GetUpdateOptions() InstanceConfigUpdateOptions {
    +	return InstanceConfigUpdateOptions{
    +		Label:       i.Label,
    +		Comments:    i.Comments,
    +		Devices:     i.Devices,
    +		Helpers:     i.Helpers,
    +		MemoryLimit: i.MemoryLimit,
    +		Kernel:      i.Kernel,
    +		InitRD:      copyInt(i.InitRD),
    +		RootDevice:  i.RootDevice,
    +		RunLevel:    i.RunLevel,
    +		VirtMode:    i.VirtMode,
    +	}
    +}
    +
    +// endpointWithID gets the endpoint URL for InstanceConfigs of a given Instance
    +func (InstanceConfigsPagedResponse) endpointWithID(c *Client, id int) string {
    +	endpoint, err := c.InstanceConfigs.endpointWithID(id)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends InstanceConfigs when processing paginated InstanceConfig responses
    +func (resp *InstanceConfigsPagedResponse) appendData(r *InstanceConfigsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListInstanceConfigs lists InstanceConfigs
    +func (c *Client) ListInstanceConfigs(ctx context.Context, linodeID int, opts *ListOptions) ([]InstanceConfig, error) {
    +	response := InstanceConfigsPagedResponse{}
    +	err := c.listHelperWithID(ctx, &response, linodeID, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (i *InstanceConfig) fixDates() *InstanceConfig {
    +	i.Created, _ = parseDates(i.CreatedStr)
    +	i.Updated, _ = parseDates(i.UpdatedStr)
    +	return i
    +}
    +
    +// GetInstanceConfig gets the template with the provided ID
    +func (c *Client) GetInstanceConfig(ctx context.Context, linodeID int, configID int) (*InstanceConfig, error) {
    +	e, err := c.InstanceConfigs.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, configID)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceConfig{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*InstanceConfig).fixDates(), nil
    +}
    +
    +// CreateInstanceConfig creates a new InstanceConfig for the given Instance
    +func (c *Client) CreateInstanceConfig(ctx context.Context, linodeID int, createOpts InstanceConfigCreateOptions) (*InstanceConfig, error) {
    +	var body string
    +	e, err := c.InstanceConfigs.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&InstanceConfig{})
    +
    +	if bodyData, err := json.Marshal(createOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, err
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return r.Result().(*InstanceConfig).fixDates(), nil
    +}
    +
    +// UpdateInstanceConfig update an InstanceConfig for the given Instance
    +func (c *Client) UpdateInstanceConfig(ctx context.Context, linodeID int, configID int, updateOpts InstanceConfigUpdateOptions) (*InstanceConfig, error) {
    +	var body string
    +	e, err := c.InstanceConfigs.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, configID)
    +	req := c.R(ctx).SetResult(&InstanceConfig{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, err
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return r.Result().(*InstanceConfig).fixDates(), nil
    +}
    +
    +// RenameInstanceConfig renames an InstanceConfig
    +func (c *Client) RenameInstanceConfig(ctx context.Context, linodeID int, configID int, label string) (*InstanceConfig, error) {
    +	return c.UpdateInstanceConfig(ctx, linodeID, configID, InstanceConfigUpdateOptions{Label: label})
    +}
    +
    +// DeleteInstanceConfig deletes a Linode InstanceConfig
    +func (c *Client) DeleteInstanceConfig(ctx context.Context, linodeID int, configID int) error {
    +	e, err := c.InstanceConfigs.endpointWithID(linodeID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, configID)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/instance_disks.go b/vendor/github.com/linode/linodego/instance_disks.go
    new file mode 100644
    index 000000000000..9b43d6b8d94c
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/instance_disks.go
    @@ -0,0 +1,251 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"time"
    +)
    +
    +// InstanceDisk represents an Instance Disk object
    +type InstanceDisk struct {
    +	CreatedStr string `json:"created"`
    +	UpdatedStr string `json:"updated"`
    +
    +	ID         int            `json:"id"`
    +	Label      string         `json:"label"`
    +	Status     DiskStatus     `json:"status"`
    +	Size       int            `json:"size"`
    +	Filesystem DiskFilesystem `json:"filesystem"`
    +	Created    time.Time      `json:"-"`
    +	Updated    time.Time      `json:"-"`
    +}
    +
    +// DiskFilesystem constants start with Filesystem and include Linode API Filesystems
    +type DiskFilesystem string
    +
    +// DiskFilesystem constants represent the filesystems types an Instance Disk may use
    +const (
    +	FilesystemRaw    DiskFilesystem = "raw"
    +	FilesystemSwap   DiskFilesystem = "swap"
    +	FilesystemExt3   DiskFilesystem = "ext3"
    +	FilesystemExt4   DiskFilesystem = "ext4"
    +	FilesystemInitrd DiskFilesystem = "initrd"
    +)
    +
    +// DiskStatus constants have the prefix "Disk" and include Linode API Instance Disk Status
    +type DiskStatus string
    +
    +// DiskStatus constants represent the status values an Instance Disk may have
    +const (
    +	DiskReady    DiskStatus = "ready"
    +	DiskNotReady DiskStatus = "not ready"
    +	DiskDeleting DiskStatus = "deleting"
    +)
    +
    +// InstanceDisksPagedResponse represents a paginated InstanceDisk API response
    +type InstanceDisksPagedResponse struct {
    +	*PageOptions
    +	Data []InstanceDisk `json:"data"`
    +}
    +
    +// InstanceDiskCreateOptions are InstanceDisk settings that can be used at creation
    +type InstanceDiskCreateOptions struct {
    +	Label string `json:"label"`
    +	Size  int    `json:"size"`
    +
    +	// Image is optional, but requires RootPass if provided
    +	Image    string `json:"image,omitempty"`
    +	RootPass string `json:"root_pass,omitempty"`
    +
    +	Filesystem      string            `json:"filesystem,omitempty"`
    +	AuthorizedKeys  []string          `json:"authorized_keys,omitempty"`
    +	AuthorizedUsers []string          `json:"authorized_users,omitempty"`
    +	ReadOnly        bool              `json:"read_only,omitempty"`
    +	StackscriptID   int               `json:"stackscript_id,omitempty"`
    +	StackscriptData map[string]string `json:"stackscript_data,omitempty"`
    +}
    +
    +// InstanceDiskUpdateOptions are InstanceDisk settings that can be used in updates
    +type InstanceDiskUpdateOptions struct {
    +	Label    string `json:"label"`
    +	ReadOnly bool   `json:"read_only"`
    +}
    +
    +// endpointWithID gets the endpoint URL for InstanceDisks of a given Instance
    +func (InstanceDisksPagedResponse) endpointWithID(c *Client, id int) string {
    +	endpoint, err := c.InstanceDisks.endpointWithID(id)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends InstanceDisks when processing paginated InstanceDisk responses
    +func (resp *InstanceDisksPagedResponse) appendData(r *InstanceDisksPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListInstanceDisks lists InstanceDisks
    +func (c *Client) ListInstanceDisks(ctx context.Context, linodeID int, opts *ListOptions) ([]InstanceDisk, error) {
    +	response := InstanceDisksPagedResponse{}
    +	err := c.listHelperWithID(ctx, &response, linodeID, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (v *InstanceDisk) fixDates() *InstanceDisk {
    +	if created, err := parseDates(v.CreatedStr); err == nil {
    +		v.Created = *created
    +	}
    +	if updated, err := parseDates(v.UpdatedStr); err == nil {
    +		v.Updated = *updated
    +	}
    +	return v
    +}
    +
    +// GetInstanceDisk gets the template with the provided ID
    +func (c *Client) GetInstanceDisk(ctx context.Context, linodeID int, configID int) (*InstanceDisk, error) {
    +	e, err := c.InstanceDisks.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, configID)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceDisk{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*InstanceDisk).fixDates(), nil
    +}
    +
    +// CreateInstanceDisk creates a new InstanceDisk for the given Instance
    +func (c *Client) CreateInstanceDisk(ctx context.Context, linodeID int, createOpts InstanceDiskCreateOptions) (*InstanceDisk, error) {
    +	var body string
    +	e, err := c.InstanceDisks.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&InstanceDisk{})
    +
    +	if bodyData, err := json.Marshal(createOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return r.Result().(*InstanceDisk).fixDates(), nil
    +}
    +
    +// UpdateInstanceDisk creates a new InstanceDisk for the given Instance
    +func (c *Client) UpdateInstanceDisk(ctx context.Context, linodeID int, diskID int, updateOpts InstanceDiskUpdateOptions) (*InstanceDisk, error) {
    +	var body string
    +	e, err := c.InstanceDisks.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, diskID)
    +
    +	req := c.R(ctx).SetResult(&InstanceDisk{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return r.Result().(*InstanceDisk).fixDates(), nil
    +}
    +
    +// RenameInstanceDisk renames an InstanceDisk
    +func (c *Client) RenameInstanceDisk(ctx context.Context, linodeID int, diskID int, label string) (*InstanceDisk, error) {
    +	return c.UpdateInstanceDisk(ctx, linodeID, diskID, InstanceDiskUpdateOptions{Label: label})
    +}
    +
    +// ResizeInstanceDisk resizes the size of the Instance disk
    +func (c *Client) ResizeInstanceDisk(ctx context.Context, linodeID int, diskID int, size int) error {
    +	var body string
    +	e, err := c.InstanceDisks.endpointWithID(linodeID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d/resize", e, diskID)
    +
    +	req := c.R(ctx).SetResult(&InstanceDisk{})
    +	updateOpts := map[string]interface{}{
    +		"size": size,
    +	}
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return NewError(err)
    +	}
    +
    +	_, err = coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	return err
    +}
    +
    +// PasswordResetInstanceDisk resets the "root" account password on the Instance disk
    +func (c *Client) PasswordResetInstanceDisk(ctx context.Context, linodeID int, diskID int, password string) error {
    +	var body string
    +	e, err := c.InstanceDisks.endpointWithID(linodeID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d/password", e, diskID)
    +
    +	req := c.R(ctx).SetResult(&InstanceDisk{})
    +	updateOpts := map[string]interface{}{
    +		"password": password,
    +	}
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return NewError(err)
    +	}
    +
    +	_, err = coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	return err
    +}
    +
    +// DeleteInstanceDisk deletes a Linode Instance Disk
    +func (c *Client) DeleteInstanceDisk(ctx context.Context, linodeID int, diskID int) error {
    +	e, err := c.InstanceDisks.endpointWithID(linodeID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, diskID)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/instance_ips.go b/vendor/github.com/linode/linodego/instance_ips.go
    new file mode 100644
    index 000000000000..a30d04f16dcb
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/instance_ips.go
    @@ -0,0 +1,106 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +)
    +
    +// InstanceIPAddressResponse contains the IPv4 and IPv6 details for an Instance
    +type InstanceIPAddressResponse struct {
    +	IPv4 *InstanceIPv4Response `json:"ipv4"`
    +	IPv6 *InstanceIPv6Response `json:"ipv6"`
    +}
    +
    +// InstanceIPv4Response contains the details of all IPv4 addresses associated with an Instance
    +type InstanceIPv4Response struct {
    +	Public  []*InstanceIP `json:"public"`
    +	Private []*InstanceIP `json:"private"`
    +	Shared  []*InstanceIP `json:"shared"`
    +}
    +
    +// InstanceIP represents an Instance IP with additional DNS and networking details
    +type InstanceIP struct {
    +	Address    string `json:"address"`
    +	Gateway    string `json:"gateway"`
    +	SubnetMask string `json:"subnet_mask"`
    +	Prefix     int    `json:"prefix"`
    +	Type       string `json:"type"`
    +	Public     bool   `json:"public"`
    +	RDNS       string `json:"rdns"`
    +	LinodeID   int    `json:"linode_id"`
    +	Region     string `json:"region"`
    +}
    +
    +// InstanceIPv6Response contains the IPv6 addresses and ranges for an Instance
    +type InstanceIPv6Response struct {
    +	LinkLocal *InstanceIP  `json:"link_local"`
    +	SLAAC     *InstanceIP  `json:"slaac"`
    +	Global    []*IPv6Range `json:"global"`
    +}
    +
    +// IPv6Range represents a range of IPv6 addresses routed to a single Linode in a given Region
    +type IPv6Range struct {
    +	Range  string `json:"range"`
    +	Region string `json:"region"`
    +}
    +
    +// GetInstanceIPAddresses gets the IPAddresses for a Linode instance
    +func (c *Client) GetInstanceIPAddresses(ctx context.Context, linodeID int) (*InstanceIPAddressResponse, error) {
    +	e, err := c.InstanceIPs.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceIPAddressResponse{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*InstanceIPAddressResponse), nil
    +}
    +
    +// GetInstanceIPAddress gets the IPAddress for a Linode instance matching a supplied IP address
    +func (c *Client) GetInstanceIPAddress(ctx context.Context, linodeID int, ipaddress string) (*InstanceIP, error) {
    +	e, err := c.InstanceIPs.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, ipaddress)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceIP{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*InstanceIP), nil
    +}
    +
    +// AddInstanceIPAddress adds a public or private IP to a Linode instance
    +func (c *Client) AddInstanceIPAddress(ctx context.Context, linodeID int, public bool) (*InstanceIP, error) {
    +	var body string
    +	e, err := c.InstanceIPs.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&InstanceIP{})
    +
    +	instanceipRequest := struct {
    +		Type   string `json:"type"`
    +		Public bool   `json:"public"`
    +	}{"ipv4", public}
    +
    +	if bodyData, err := json.Marshal(instanceipRequest); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetHeader("Content-Type", "application/json").
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return r.Result().(*InstanceIP), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/instance_snapshots.go b/vendor/github.com/linode/linodego/instance_snapshots.go
    new file mode 100644
    index 000000000000..7933507dce14
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/instance_snapshots.go
    @@ -0,0 +1,188 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"time"
    +)
    +
    +// InstanceBackupsResponse response struct for backup snapshot
    +type InstanceBackupsResponse struct {
    +	Automatic []*InstanceSnapshot             `json:"automatic"`
    +	Snapshot  *InstanceBackupSnapshotResponse `json:"snapshot"`
    +}
    +
    +// InstanceBackupSnapshotResponse fields are those representing Instance Backup Snapshots
    +type InstanceBackupSnapshotResponse struct {
    +	Current    *InstanceSnapshot `json:"current"`
    +	InProgress *InstanceSnapshot `json:"in_progress"`
    +}
    +
    +// RestoreInstanceOptions fields are those accepted by InstanceRestore
    +type RestoreInstanceOptions struct {
    +	LinodeID  int  `json:"linode_id"`
    +	Overwrite bool `json:"overwrite"`
    +}
    +
    +// InstanceSnapshot represents a linode backup snapshot
    +type InstanceSnapshot struct {
    +	CreatedStr  string `json:"created"`
    +	UpdatedStr  string `json:"updated"`
    +	FinishedStr string `json:"finished"`
    +
    +	ID       int                     `json:"id"`
    +	Label    string                  `json:"label"`
    +	Status   InstanceSnapshotStatus  `json:"status"`
    +	Type     string                  `json:"type"`
    +	Created  *time.Time              `json:"-"`
    +	Updated  *time.Time              `json:"-"`
    +	Finished *time.Time              `json:"-"`
    +	Configs  []string                `json:"configs"`
    +	Disks    []*InstanceSnapshotDisk `json:"disks"`
    +}
    +
    +// InstanceSnapshotDisk fields represent the source disk of a Snapshot
    +type InstanceSnapshotDisk struct {
    +	Label      string `json:"label"`
    +	Size       int    `json:"size"`
    +	Filesystem string `json:"filesystem"`
    +}
    +
    +// InstanceSnapshotStatus constants start with Snapshot and include Linode API Instance Backup Snapshot status values
    +type InstanceSnapshotStatus string
    +
    +// InstanceSnapshotStatus constants reflect the current status of an Instance Snapshot
    +var (
    +	SnapshotPaused              InstanceSnapshotStatus = "paused"
    +	SnapshotPending             InstanceSnapshotStatus = "pending"
    +	SnapshotRunning             InstanceSnapshotStatus = "running"
    +	SnapshotNeedsPostProcessing InstanceSnapshotStatus = "needsPostProcessing"
    +	SnapshotSuccessful          InstanceSnapshotStatus = "successful"
    +	SnapshotFailed              InstanceSnapshotStatus = "failed"
    +	SnapshotUserAborted         InstanceSnapshotStatus = "userAborted"
    +)
    +
    +func (l *InstanceSnapshot) fixDates() *InstanceSnapshot {
    +	l.Created, _ = parseDates(l.CreatedStr)
    +	l.Updated, _ = parseDates(l.UpdatedStr)
    +	l.Finished, _ = parseDates(l.FinishedStr)
    +	return l
    +}
    +
    +// GetInstanceSnapshot gets the snapshot with the provided ID
    +func (c *Client) GetInstanceSnapshot(ctx context.Context, linodeID int, snapshotID int) (*InstanceSnapshot, error) {
    +	e, err := c.InstanceSnapshots.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, snapshotID)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceSnapshot{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*InstanceSnapshot).fixDates(), nil
    +}
    +
    +// CreateInstanceSnapshot Creates or Replaces the snapshot Backup of a Linode. If a previous snapshot exists for this Linode, it will be deleted.
    +func (c *Client) CreateInstanceSnapshot(ctx context.Context, linodeID int, label string) (*InstanceSnapshot, error) {
    +	o, err := json.Marshal(map[string]string{"label": label})
    +	if err != nil {
    +		return nil, err
    +	}
    +	body := string(o)
    +	e, err := c.InstanceSnapshots.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	r, err := coupleAPIErrors(c.R(ctx).
    +		SetBody(body).
    +		SetResult(&InstanceSnapshot{}).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return r.Result().(*InstanceSnapshot).fixDates(), nil
    +}
    +
    +// GetInstanceBackups gets the Instance's available Backups.
    +// This is not called ListInstanceBackups because a single object is returned, matching the API response.
    +func (c *Client) GetInstanceBackups(ctx context.Context, linodeID int) (*InstanceBackupsResponse, error) {
    +	e, err := c.InstanceSnapshots.endpointWithID(linodeID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	r, err := coupleAPIErrors(c.R(ctx).
    +		SetResult(&InstanceBackupsResponse{}).
    +		Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*InstanceBackupsResponse).fixDates(), nil
    +}
    +
    +// EnableInstanceBackups Enables backups for the specified Linode.
    +func (c *Client) EnableInstanceBackups(ctx context.Context, linodeID int) error {
    +	e, err := c.InstanceSnapshots.endpointWithID(linodeID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/enable", e)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Post(e))
    +	return err
    +}
    +
    +// CancelInstanceBackups Cancels backups for the specified Linode.
    +func (c *Client) CancelInstanceBackups(ctx context.Context, linodeID int) error {
    +	e, err := c.InstanceSnapshots.endpointWithID(linodeID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/cancel", e)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Post(e))
    +	return err
    +}
    +
    +// RestoreInstanceBackup Restores a Linode's Backup to the specified Linode.
    +func (c *Client) RestoreInstanceBackup(ctx context.Context, linodeID int, backupID int, opts RestoreInstanceOptions) error {
    +	o, err := json.Marshal(opts)
    +	if err != nil {
    +		return NewError(err)
    +	}
    +	body := string(o)
    +	e, err := c.InstanceSnapshots.endpointWithID(linodeID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d/restore", e, backupID)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).SetBody(body).Post(e))
    +
    +	return err
    +
    +}
    +
    +func (l *InstanceBackupSnapshotResponse) fixDates() *InstanceBackupSnapshotResponse {
    +	if l.Current != nil {
    +		l.Current.fixDates()
    +	}
    +	if l.InProgress != nil {
    +		l.InProgress.fixDates()
    +	}
    +	return l
    +}
    +
    +func (l *InstanceBackupsResponse) fixDates() *InstanceBackupsResponse {
    +	for i := range l.Automatic {
    +		l.Automatic[i].fixDates()
    +	}
    +	if l.Snapshot != nil {
    +		l.Snapshot.fixDates()
    +	}
    +	return l
    +}
    diff --git a/vendor/github.com/linode/linodego/instance_volumes.go b/vendor/github.com/linode/linodego/instance_volumes.go
    new file mode 100644
    index 000000000000..b6c9a9ae05f1
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/instance_volumes.go
    @@ -0,0 +1,38 @@
    +package linodego
    +
    +import (
    +	"context"
    +)
    +
    +// InstanceVolumesPagedResponse represents a paginated InstanceVolume API response
    +type InstanceVolumesPagedResponse struct {
    +	*PageOptions
    +	Data []Volume `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for InstanceVolume
    +func (InstanceVolumesPagedResponse) endpointWithID(c *Client, id int) string {
    +	endpoint, err := c.InstanceVolumes.endpointWithID(id)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends InstanceVolumes when processing paginated InstanceVolume responses
    +func (resp *InstanceVolumesPagedResponse) appendData(r *InstanceVolumesPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListInstanceVolumes lists InstanceVolumes
    +func (c *Client) ListInstanceVolumes(ctx context.Context, linodeID int, opts *ListOptions) ([]Volume, error) {
    +	response := InstanceVolumesPagedResponse{}
    +	err := c.listHelperWithID(ctx, &response, linodeID, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    diff --git a/vendor/github.com/linode/linodego/instances.go b/vendor/github.com/linode/linodego/instances.go
    new file mode 100644
    index 000000000000..85775508a09f
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/instances.go
    @@ -0,0 +1,454 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"net"
    +	"time"
    +)
    +
    +/*
    + * https://developers.linode.com/v4/reference/endpoints/linode/instances
    + */
    +
    +// InstanceStatus constants start with Instance and include Linode API Instance Status values
    +type InstanceStatus string
    +
    +// InstanceStatus constants reflect the current status of an Instance
    +const (
    +	InstanceBooting      InstanceStatus = "booting"
    +	InstanceRunning      InstanceStatus = "running"
    +	InstanceOffline      InstanceStatus = "offline"
    +	InstanceShuttingDown InstanceStatus = "shutting_down"
    +	InstanceRebooting    InstanceStatus = "rebooting"
    +	InstanceProvisioning InstanceStatus = "provisioning"
    +	InstanceDeleting     InstanceStatus = "deleting"
    +	InstanceMigrating    InstanceStatus = "migrating"
    +	InstanceRebuilding   InstanceStatus = "rebuilding"
    +	InstanceCloning      InstanceStatus = "cloning"
    +	InstanceRestoring    InstanceStatus = "restoring"
    +	InstanceResizing     InstanceStatus = "resizing"
    +)
    +
    +// Instance represents a linode object
    +type Instance struct {
    +	CreatedStr string `json:"created"`
    +	UpdatedStr string `json:"updated"`
    +
    +	ID              int             `json:"id"`
    +	Created         *time.Time      `json:"-"`
    +	Updated         *time.Time      `json:"-"`
    +	Region          string          `json:"region"`
    +	Alerts          *InstanceAlert  `json:"alerts"`
    +	Backups         *InstanceBackup `json:"backups"`
    +	Image           string          `json:"image"`
    +	Group           string          `json:"group"`
    +	IPv4            []*net.IP       `json:"ipv4"`
    +	IPv6            string          `json:"ipv6"`
    +	Label           string          `json:"label"`
    +	Type            string          `json:"type"`
    +	Status          InstanceStatus  `json:"status"`
    +	Hypervisor      string          `json:"hypervisor"`
    +	Specs           *InstanceSpec   `json:"specs"`
    +	WatchdogEnabled bool            `json:"watchdog_enabled"`
    +	Tags            []string        `json:"tags"`
    +}
    +
    +// InstanceSpec represents a linode spec
    +type InstanceSpec struct {
    +	Disk     int `json:"disk"`
    +	Memory   int `json:"memory"`
    +	VCPUs    int `json:"vcpus"`
    +	Transfer int `json:"transfer"`
    +}
    +
    +// InstanceAlert represents a metric alert
    +type InstanceAlert struct {
    +	CPU           int `json:"cpu"`
    +	IO            int `json:"io"`
    +	NetworkIn     int `json:"network_in"`
    +	NetworkOut    int `json:"network_out"`
    +	TransferQuota int `json:"transfer_quota"`
    +}
    +
    +// InstanceBackup represents backup settings for an instance
    +type InstanceBackup struct {
    +	Enabled  bool `json:"enabled"`
    +	Schedule struct {
    +		Day    string `json:"day,omitempty"`
    +		Window string `json:"window,omitempty"`
    +	}
    +}
    +
    +// InstanceCreateOptions require only Region and Type
    +type InstanceCreateOptions struct {
    +	Region          string            `json:"region"`
    +	Type            string            `json:"type"`
    +	Label           string            `json:"label,omitempty"`
    +	Group           string            `json:"group,omitempty"`
    +	RootPass        string            `json:"root_pass,omitempty"`
    +	AuthorizedKeys  []string          `json:"authorized_keys,omitempty"`
    +	AuthorizedUsers []string          `json:"authorized_users,omitempty"`
    +	StackScriptID   int               `json:"stackscript_id,omitempty"`
    +	StackScriptData map[string]string `json:"stackscript_data,omitempty"`
    +	BackupID        int               `json:"backup_id,omitempty"`
    +	Image           string            `json:"image,omitempty"`
    +	BackupsEnabled  bool              `json:"backups_enabled,omitempty"`
    +	PrivateIP       bool              `json:"private_ip,omitempty"`
    +	Tags            []string          `json:"tags,omitempty"`
    +
    +	// Creation fields that need to be set explicitly false, "", or 0 use pointers
    +	SwapSize *int  `json:"swap_size,omitempty"`
    +	Booted   *bool `json:"booted,omitempty"`
    +}
    +
    +// InstanceUpdateOptions is an options struct used when Updating an Instance
    +type InstanceUpdateOptions struct {
    +	Label           string          `json:"label,omitempty"`
    +	Group           string          `json:"group,omitempty"`
    +	Backups         *InstanceBackup `json:"backups,omitempty"`
    +	Alerts          *InstanceAlert  `json:"alerts,omitempty"`
    +	WatchdogEnabled *bool           `json:"watchdog_enabled,omitempty"`
    +	Tags            *[]string       `json:"tags,omitempty"`
    +}
    +
    +// GetUpdateOptions converts an Instance to InstanceUpdateOptions for use in UpdateInstance
    +func (l *Instance) GetUpdateOptions() InstanceUpdateOptions {
    +	return InstanceUpdateOptions{
    +		Label:           l.Label,
    +		Group:           l.Group,
    +		Backups:         l.Backups,
    +		Alerts:          l.Alerts,
    +		WatchdogEnabled: &l.WatchdogEnabled,
    +		Tags:            &l.Tags,
    +	}
    +}
    +
    +// InstanceCloneOptions is an options struct sent when Cloning an Instance
    +type InstanceCloneOptions struct {
    +	Region string `json:"region,omitempty"`
    +	Type   string `json:"type,omitempty"`
    +
    +	// LinodeID is an optional existing instance to use as the target of the clone
    +	LinodeID       int    `json:"linode_id,omitempty"`
    +	Label          string `json:"label,omitempty"`
    +	Group          string `json:"group,omitempty"`
    +	BackupsEnabled bool   `json:"backups_enabled"`
    +	Disks          []int  `json:"disks,omitempty"`
    +	Configs        []int  `json:"configs,omitempty"`
    +}
    +
    +func (l *Instance) fixDates() *Instance {
    +	l.Created, _ = parseDates(l.CreatedStr)
    +	l.Updated, _ = parseDates(l.UpdatedStr)
    +	return l
    +}
    +
    +// InstancesPagedResponse represents a linode API response for listing
    +type InstancesPagedResponse struct {
    +	*PageOptions
    +	Data []Instance `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for Instance
    +func (InstancesPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Instances.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends Instances when processing paginated Instance responses
    +func (resp *InstancesPagedResponse) appendData(r *InstancesPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListInstances lists linode instances
    +func (c *Client) ListInstances(ctx context.Context, opts *ListOptions) ([]Instance, error) {
    +	response := InstancesPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// GetInstance gets the instance with the provided ID
    +func (c *Client) GetInstance(ctx context.Context, linodeID int) (*Instance, error) {
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, linodeID)
    +	r, err := coupleAPIErrors(c.R(ctx).
    +		SetResult(Instance{}).
    +		Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Instance).fixDates(), nil
    +}
    +
    +// CreateInstance creates a Linode instance
    +func (c *Client) CreateInstance(ctx context.Context, instance InstanceCreateOptions) (*Instance, error) {
    +	var body string
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&Instance{})
    +
    +	if bodyData, err := json.Marshal(instance); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Instance).fixDates(), nil
    +}
    +
    +// UpdateInstance creates a Linode instance
    +func (c *Client) UpdateInstance(ctx context.Context, id int, instance InstanceUpdateOptions) (*Instance, error) {
    +	var body string
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	req := c.R(ctx).SetResult(&Instance{})
    +
    +	if bodyData, err := json.Marshal(instance); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Instance).fixDates(), nil
    +}
    +
    +// RenameInstance renames an Instance
    +func (c *Client) RenameInstance(ctx context.Context, linodeID int, label string) (*Instance, error) {
    +	return c.UpdateInstance(ctx, linodeID, InstanceUpdateOptions{Label: label})
    +}
    +
    +// DeleteInstance deletes a Linode instance
    +func (c *Client) DeleteInstance(ctx context.Context, id int) error {
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    +
    +// BootInstance will boot a Linode instance
    +// A configID of 0 will cause Linode to choose the last/best config
    +func (c *Client) BootInstance(ctx context.Context, id int, configID int) error {
    +	bodyStr := ""
    +
    +	if configID != 0 {
    +		bodyMap := map[string]int{"config_id": configID}
    +		bodyJSON, err := json.Marshal(bodyMap)
    +		if err != nil {
    +			return NewError(err)
    +		}
    +		bodyStr = string(bodyJSON)
    +	}
    +
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +
    +	e = fmt.Sprintf("%s/%d/boot", e, id)
    +	_, err = coupleAPIErrors(c.R(ctx).
    +		SetBody(bodyStr).
    +		Post(e))
    +
    +	return err
    +}
    +
    +// CloneInstance clone an existing Instances Disks and Configuration profiles to another Linode Instance
    +func (c *Client) CloneInstance(ctx context.Context, id int, options InstanceCloneOptions) (*Instance, error) {
    +	var body string
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d/clone", e, id)
    +
    +	req := c.R(ctx).SetResult(&Instance{})
    +
    +	if bodyData, err := json.Marshal(options); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return r.Result().(*Instance).fixDates(), nil
    +}
    +
    +// RebootInstance reboots a Linode instance
    +// A configID of 0 will cause Linode to choose the last/best config
    +func (c *Client) RebootInstance(ctx context.Context, id int, configID int) error {
    +	bodyStr := "{}"
    +
    +	if configID != 0 {
    +		bodyMap := map[string]int{"config_id": configID}
    +		bodyJSON, err := json.Marshal(bodyMap)
    +		if err != nil {
    +			return NewError(err)
    +		}
    +		bodyStr = string(bodyJSON)
    +	}
    +
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +
    +	e = fmt.Sprintf("%s/%d/reboot", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).
    +		SetBody(bodyStr).
    +		Post(e))
    +
    +	return err
    +}
    +
    +// RebuildInstanceOptions is a struct representing the options to send to the rebuild linode endpoint
    +type RebuildInstanceOptions struct {
    +	Image           string            `json:"image"`
    +	RootPass        string            `json:"root_pass"`
    +	AuthorizedKeys  []string          `json:"authorized_keys"`
    +	AuthorizedUsers []string          `json:"authorized_users"`
    +	StackscriptID   int               `json:"stackscript_id"`
    +	StackscriptData map[string]string `json:"stackscript_data"`
    +	Booted          bool              `json:"booted"`
    +}
    +
    +// RebuildInstance Deletes all Disks and Configs on this Linode,
    +// then deploys a new Image to this Linode with the given attributes.
    +func (c *Client) RebuildInstance(ctx context.Context, id int, opts RebuildInstanceOptions) (*Instance, error) {
    +	o, err := json.Marshal(opts)
    +	if err != nil {
    +		return nil, NewError(err)
    +	}
    +	b := string(o)
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d/rebuild", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).
    +		SetBody(b).
    +		SetResult(&Instance{}).
    +		Post(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Instance).fixDates(), nil
    +}
    +
    +// RescueInstanceOptions fields are those accepted by RescueInstance
    +type RescueInstanceOptions struct {
    +	Devices InstanceConfigDeviceMap `json:"devices"`
    +}
    +
    +// RescueInstance reboots an instance into a safe environment for performing many system recovery and disk management tasks.
    +// Rescue Mode is based on the Finnix recovery distribution, a self-contained and bootable Linux distribution.
    +// You can also use Rescue Mode for tasks other than disaster recovery, such as formatting disks to use different filesystems,
    +// copying data between disks, and downloading files from a disk via SSH and SFTP.
    +func (c *Client) RescueInstance(ctx context.Context, id int, opts RescueInstanceOptions) error {
    +	o, err := json.Marshal(opts)
    +	if err != nil {
    +		return NewError(err)
    +	}
    +	b := string(o)
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d/rescue", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).
    +		SetBody(b).
    +		Post(e))
    +
    +	return err
    +}
    +
    +// ResizeInstance resizes an instance to new Linode type
    +func (c *Client) ResizeInstance(ctx context.Context, id int, linodeType string) error {
    +	body := fmt.Sprintf("{\"type\":\"%s\"}", linodeType)
    +
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d/resize", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).
    +		SetBody(body).
    +		Post(e))
    +
    +	return err
    +}
    +
    +// ShutdownInstance - Shutdown an instance
    +func (c *Client) ShutdownInstance(ctx context.Context, id int) error {
    +	return c.simpleInstanceAction(ctx, "shutdown", id)
    +}
    +
    +// MutateInstance Upgrades a Linode to its next generation.
    +func (c *Client) MutateInstance(ctx context.Context, id int) error {
    +	return c.simpleInstanceAction(ctx, "mutate", id)
    +}
    +
    +// MigrateInstance - Migrate an instance
    +func (c *Client) MigrateInstance(ctx context.Context, id int) error {
    +	return c.simpleInstanceAction(ctx, "migrate", id)
    +}
    +
    +// simpleInstanceAction is a helper for Instance actions that take no parameters
    +// and return empty responses `{}` unless they return a standard error
    +func (c *Client) simpleInstanceAction(ctx context.Context, action string, id int) error {
    +	e, err := c.Instances.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d/%s", e, id, action)
    +	_, err = coupleAPIErrors(c.R(ctx).Post(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/kernels.go b/vendor/github.com/linode/linodego/kernels.go
    new file mode 100644
    index 000000000000..233a38db5456
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/kernels.go
    @@ -0,0 +1,61 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +)
    +
    +// LinodeKernel represents a Linode Instance kernel object
    +type LinodeKernel struct {
    +	ID           string `json:"id"`
    +	Label        string `json:"label"`
    +	Version      string `json:"version"`
    +	Architecture string `json:"architecture"`
    +	KVM          bool   `json:"kvm"`
    +	XEN          bool   `json:"xen"`
    +	PVOPS        bool   `json:"pvops"`
    +}
    +
    +// LinodeKernelsPagedResponse represents a Linode kernels API response for listing
    +type LinodeKernelsPagedResponse struct {
    +	*PageOptions
    +	Data []LinodeKernel `json:"data"`
    +}
    +
    +// ListKernels lists linode kernels
    +func (c *Client) ListKernels(ctx context.Context, opts *ListOptions) ([]LinodeKernel, error) {
    +	response := LinodeKernelsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +func (LinodeKernelsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Kernels.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +func (resp *LinodeKernelsPagedResponse) appendData(r *LinodeKernelsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// GetKernel gets the kernel with the provided ID
    +func (c *Client) GetKernel(ctx context.Context, kernelID string) (*LinodeKernel, error) {
    +	e, err := c.Kernels.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, kernelID)
    +	r, err := c.R(ctx).
    +		SetResult(&LinodeKernel{}).
    +		Get(e)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*LinodeKernel), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/longview.go b/vendor/github.com/linode/linodego/longview.go
    new file mode 100644
    index 000000000000..524af67768ff
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/longview.go
    @@ -0,0 +1,67 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +)
    +
    +// LongviewClient represents a LongviewClient object
    +type LongviewClient struct {
    +	ID int `json:"id"`
    +	// UpdatedStr string `json:"updated"`
    +	// Updated *time.Time `json:"-"`
    +}
    +
    +// LongviewClientsPagedResponse represents a paginated LongviewClient API response
    +type LongviewClientsPagedResponse struct {
    +	*PageOptions
    +	Data []LongviewClient `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for LongviewClient
    +func (LongviewClientsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.LongviewClients.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends LongviewClients when processing paginated LongviewClient responses
    +func (resp *LongviewClientsPagedResponse) appendData(r *LongviewClientsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListLongviewClients lists LongviewClients
    +func (c *Client) ListLongviewClients(ctx context.Context, opts *ListOptions) ([]LongviewClient, error) {
    +	response := LongviewClientsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (v *LongviewClient) fixDates() *LongviewClient {
    +	// v.Created, _ = parseDates(v.CreatedStr)
    +	// v.Updated, _ = parseDates(v.UpdatedStr)
    +	return v
    +}
    +
    +// GetLongviewClient gets the template with the provided ID
    +func (c *Client) GetLongviewClient(ctx context.Context, id string) (*LongviewClient, error) {
    +	e, err := c.LongviewClients.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +	r, err := c.R(ctx).SetResult(&LongviewClient{}).Get(e)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*LongviewClient).fixDates(), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/longview_subscriptions.go b/vendor/github.com/linode/linodego/longview_subscriptions.go
    new file mode 100644
    index 000000000000..253316599a0f
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/longview_subscriptions.go
    @@ -0,0 +1,70 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +)
    +
    +// LongviewSubscription represents a LongviewSubscription object
    +type LongviewSubscription struct {
    +	ID              string       `json:"id"`
    +	Label           string       `json:"label"`
    +	ClientsIncluded int          `json:"clients_included"`
    +	Price           *LinodePrice `json:"price"`
    +	// UpdatedStr string `json:"updated"`
    +	// Updated *time.Time `json:"-"`
    +}
    +
    +// LongviewSubscriptionsPagedResponse represents a paginated LongviewSubscription API response
    +type LongviewSubscriptionsPagedResponse struct {
    +	*PageOptions
    +	Data []LongviewSubscription `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for LongviewSubscription
    +func (LongviewSubscriptionsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.LongviewSubscriptions.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends LongviewSubscriptions when processing paginated LongviewSubscription responses
    +func (resp *LongviewSubscriptionsPagedResponse) appendData(r *LongviewSubscriptionsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListLongviewSubscriptions lists LongviewSubscriptions
    +func (c *Client) ListLongviewSubscriptions(ctx context.Context, opts *ListOptions) ([]LongviewSubscription, error) {
    +	response := LongviewSubscriptionsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (v *LongviewSubscription) fixDates() *LongviewSubscription {
    +	// v.Created, _ = parseDates(v.CreatedStr)
    +	// v.Updated, _ = parseDates(v.UpdatedStr)
    +	return v
    +}
    +
    +// GetLongviewSubscription gets the template with the provided ID
    +func (c *Client) GetLongviewSubscription(ctx context.Context, id string) (*LongviewSubscription, error) {
    +	e, err := c.LongviewSubscriptions.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +	r, err := c.R(ctx).SetResult(&LongviewSubscription{}).Get(e)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*LongviewSubscription).fixDates(), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/managed.go b/vendor/github.com/linode/linodego/managed.go
    new file mode 100644
    index 000000000000..152361292822
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/managed.go
    @@ -0,0 +1 @@
    +package linodego
    diff --git a/vendor/github.com/linode/linodego/network_ips.go b/vendor/github.com/linode/linodego/network_ips.go
    new file mode 100644
    index 000000000000..01dd5c1d3b1c
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/network_ips.go
    @@ -0,0 +1,90 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +)
    +
    +// IPAddressesPagedResponse represents a paginated IPAddress API response
    +type IPAddressesPagedResponse struct {
    +	*PageOptions
    +	Data []InstanceIP `json:"data"`
    +}
    +
    +// IPAddressUpdateOptions fields are those accepted by UpdateToken
    +type IPAddressUpdateOptions struct {
    +	// The reverse DNS assigned to this address. For public IPv4 addresses, this will be set to a default value provided by Linode if set to nil.
    +	RDNS *string `json:"rdns"`
    +}
    +
    +// GetUpdateOptions converts a IPAddress to IPAddressUpdateOptions for use in UpdateIPAddress
    +func (i InstanceIP) GetUpdateOptions() (o IPAddressUpdateOptions) {
    +	o.RDNS = copyString(&i.RDNS)
    +	return
    +}
    +
    +// endpoint gets the endpoint URL for IPAddress
    +func (IPAddressesPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.IPAddresses.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends IPAddresses when processing paginated InstanceIPAddress responses
    +func (resp *IPAddressesPagedResponse) appendData(r *IPAddressesPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListIPAddresses lists IPAddresses
    +func (c *Client) ListIPAddresses(ctx context.Context, opts *ListOptions) ([]InstanceIP, error) {
    +	response := IPAddressesPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// GetIPAddress gets the template with the provided ID
    +func (c *Client) GetIPAddress(ctx context.Context, id string) (*InstanceIP, error) {
    +	e, err := c.IPAddresses.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceIP{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*InstanceIP), nil
    +}
    +
    +// UpdateIPAddress updates the IPAddress with the specified id
    +func (c *Client) UpdateIPAddress(ctx context.Context, id string, updateOpts IPAddressUpdateOptions) (*InstanceIP, error) {
    +	var body string
    +	e, err := c.IPAddresses.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +
    +	req := c.R(ctx).SetResult(&InstanceIP{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*InstanceIP), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/network_pools.go b/vendor/github.com/linode/linodego/network_pools.go
    new file mode 100644
    index 000000000000..03085923c2af
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/network_pools.go
    @@ -0,0 +1,50 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +)
    +
    +// IPv6PoolsPagedResponse represents a paginated IPv6Pool API response
    +type IPv6PoolsPagedResponse struct {
    +	*PageOptions
    +	Data []IPv6Range `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for IPv6Pool
    +func (IPv6PoolsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.IPv6Pools.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends IPv6Pools when processing paginated IPv6Pool responses
    +func (resp *IPv6PoolsPagedResponse) appendData(r *IPv6PoolsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListIPv6Pools lists IPv6Pools
    +func (c *Client) ListIPv6Pools(ctx context.Context, opts *ListOptions) ([]IPv6Range, error) {
    +	response := IPv6PoolsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// GetIPv6Pool gets the template with the provided ID
    +func (c *Client) GetIPv6Pool(ctx context.Context, id string) (*IPv6Range, error) {
    +	e, err := c.IPv6Pools.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&IPv6Range{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*IPv6Range), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/network_ranges.go b/vendor/github.com/linode/linodego/network_ranges.go
    new file mode 100644
    index 000000000000..0c0ba15a3773
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/network_ranges.go
    @@ -0,0 +1,50 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +)
    +
    +// IPv6RangesPagedResponse represents a paginated IPv6Range API response
    +type IPv6RangesPagedResponse struct {
    +	*PageOptions
    +	Data []IPv6Range `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for IPv6Range
    +func (IPv6RangesPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.IPv6Ranges.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends IPv6Ranges when processing paginated IPv6Range responses
    +func (resp *IPv6RangesPagedResponse) appendData(r *IPv6RangesPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListIPv6Ranges lists IPv6Ranges
    +func (c *Client) ListIPv6Ranges(ctx context.Context, opts *ListOptions) ([]IPv6Range, error) {
    +	response := IPv6RangesPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// GetIPv6Range gets the template with the provided ID
    +func (c *Client) GetIPv6Range(ctx context.Context, id string) (*IPv6Range, error) {
    +	e, err := c.IPv6Ranges.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&IPv6Range{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*IPv6Range), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/nodebalancer.go b/vendor/github.com/linode/linodego/nodebalancer.go
    new file mode 100644
    index 000000000000..a8d797d24046
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/nodebalancer.go
    @@ -0,0 +1,199 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"time"
    +)
    +
    +// NodeBalancer represents a NodeBalancer object
    +type NodeBalancer struct {
    +	CreatedStr string `json:"created"`
    +	UpdatedStr string `json:"updated"`
    +	// This NodeBalancer's unique ID.
    +	ID int `json:"id"`
    +	// This NodeBalancer's label. These must be unique on your Account.
    +	Label *string `json:"label"`
    +	// The Region where this NodeBalancer is located. NodeBalancers only support backends in the same Region.
    +	Region string `json:"region"`
    +	// This NodeBalancer's hostname, ending with .nodebalancer.linode.com
    +	Hostname *string `json:"hostname"`
    +	// This NodeBalancer's public IPv4 address.
    +	IPv4 *string `json:"ipv4"`
    +	// This NodeBalancer's public IPv6 address.
    +	IPv6 *string `json:"ipv6"`
    +	// Throttle connections per second (0-20). Set to 0 (zero) to disable throttling.
    +	ClientConnThrottle int `json:"client_conn_throttle"`
    +	// Information about the amount of transfer this NodeBalancer has had so far this month.
    +	Transfer NodeBalancerTransfer `json:"transfer"`
    +
    +	// An array of tags applied to this object. Tags are for organizational purposes only.
    +	Tags []string `json:"tags"`
    +
    +	Created *time.Time `json:"-"`
    +	Updated *time.Time `json:"-"`
    +}
    +
    +// NodeBalancerTransfer contains information about the amount of transfer a NodeBalancer has had in the current month
    +type NodeBalancerTransfer struct {
    +	// The total transfer, in MB, used by this NodeBalancer this month.
    +	Total *float64 `json:"total"`
    +	// The total inbound transfer, in MB, used for this NodeBalancer this month.
    +	Out *float64 `json:"out"`
    +	// The total outbound transfer, in MB, used for this NodeBalancer this month.
    +	In *float64 `json:"in"`
    +}
    +
    +// NodeBalancerCreateOptions are the options permitted for CreateNodeBalancer
    +type NodeBalancerCreateOptions struct {
    +	Label              *string                            `json:"label,omitempty"`
    +	Region             string                             `json:"region,omitempty"`
    +	ClientConnThrottle *int                               `json:"client_conn_throttle,omitempty"`
    +	Configs            []*NodeBalancerConfigCreateOptions `json:"configs,omitempty"`
    +	Tags               []string                           `json:"tags"`
    +}
    +
    +// NodeBalancerUpdateOptions are the options permitted for UpdateNodeBalancer
    +type NodeBalancerUpdateOptions struct {
    +	Label              *string   `json:"label,omitempty"`
    +	ClientConnThrottle *int      `json:"client_conn_throttle,omitempty"`
    +	Tags               *[]string `json:"tags,omitempty"`
    +}
    +
    +// GetCreateOptions converts a NodeBalancer to NodeBalancerCreateOptions for use in CreateNodeBalancer
    +func (i NodeBalancer) GetCreateOptions() NodeBalancerCreateOptions {
    +	return NodeBalancerCreateOptions{
    +		Label:              i.Label,
    +		Region:             i.Region,
    +		ClientConnThrottle: &i.ClientConnThrottle,
    +		Tags:               i.Tags,
    +	}
    +}
    +
    +// GetUpdateOptions converts a NodeBalancer to NodeBalancerUpdateOptions for use in UpdateNodeBalancer
    +func (i NodeBalancer) GetUpdateOptions() NodeBalancerUpdateOptions {
    +	return NodeBalancerUpdateOptions{
    +		Label:              i.Label,
    +		ClientConnThrottle: &i.ClientConnThrottle,
    +		Tags:               &i.Tags,
    +	}
    +}
    +
    +// NodeBalancersPagedResponse represents a paginated NodeBalancer API response
    +type NodeBalancersPagedResponse struct {
    +	*PageOptions
    +	Data []NodeBalancer `json:"data"`
    +}
    +
    +func (NodeBalancersPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.NodeBalancers.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +func (resp *NodeBalancersPagedResponse) appendData(r *NodeBalancersPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListNodeBalancers lists NodeBalancers
    +func (c *Client) ListNodeBalancers(ctx context.Context, opts *ListOptions) ([]NodeBalancer, error) {
    +	response := NodeBalancersPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (i *NodeBalancer) fixDates() *NodeBalancer {
    +	i.Created, _ = parseDates(i.CreatedStr)
    +	i.Updated, _ = parseDates(i.UpdatedStr)
    +	return i
    +}
    +
    +// GetNodeBalancer gets the NodeBalancer with the provided ID
    +func (c *Client) GetNodeBalancer(ctx context.Context, id int) (*NodeBalancer, error) {
    +	e, err := c.NodeBalancers.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).
    +		SetResult(&NodeBalancer{}).
    +		Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancer).fixDates(), nil
    +}
    +
    +// CreateNodeBalancer creates a NodeBalancer
    +func (c *Client) CreateNodeBalancer(ctx context.Context, nodebalancer NodeBalancerCreateOptions) (*NodeBalancer, error) {
    +	var body string
    +	e, err := c.NodeBalancers.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&NodeBalancer{})
    +
    +	if bodyData, err := json.Marshal(nodebalancer); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetHeader("Content-Type", "application/json").
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancer).fixDates(), nil
    +}
    +
    +// UpdateNodeBalancer updates the NodeBalancer with the specified id
    +func (c *Client) UpdateNodeBalancer(ctx context.Context, id int, updateOpts NodeBalancerUpdateOptions) (*NodeBalancer, error) {
    +	var body string
    +	e, err := c.NodeBalancers.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	req := c.R(ctx).SetResult(&NodeBalancer{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancer).fixDates(), nil
    +}
    +
    +// DeleteNodeBalancer deletes the NodeBalancer with the specified id
    +func (c *Client) DeleteNodeBalancer(ctx context.Context, id int) error {
    +	e, err := c.NodeBalancers.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/nodebalancer_config_nodes.go b/vendor/github.com/linode/linodego/nodebalancer_config_nodes.go
    new file mode 100644
    index 000000000000..e1ca55cb54ab
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/nodebalancer_config_nodes.go
    @@ -0,0 +1,186 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +)
    +
    +// NodeBalancerNode objects represent a backend that can accept traffic for a NodeBalancer Config
    +type NodeBalancerNode struct {
    +	ID             int      `json:"id"`
    +	Address        string   `json:"address"`
    +	Label          string   `json:"label"`
    +	Status         string   `json:"status"`
    +	Weight         int      `json:"weight"`
    +	Mode           NodeMode `json:"mode"`
    +	ConfigID       int      `json:"config_id"`
    +	NodeBalancerID int      `json:"nodebalancer_id"`
    +}
    +
    +// NodeMode is the mode a NodeBalancer should use when sending traffic to a NodeBalancer Node
    +type NodeMode string
    +
    +var (
    +	// ModeAccept is the NodeMode indicating a NodeBalancer Node is accepting traffic
    +	ModeAccept NodeMode = "accept"
    +
    +	// ModeReject is the NodeMode indicating a NodeBalancer Node is not receiving traffic
    +	ModeReject NodeMode = "reject"
    +
    +	// ModeDrain is the NodeMode indicating a NodeBalancer Node is not receiving new traffic, but may continue receiving traffic from pinned connections
    +	ModeDrain NodeMode = "drain"
    +)
    +
    +// NodeBalancerNodeCreateOptions fields are those accepted by CreateNodeBalancerNode
    +type NodeBalancerNodeCreateOptions struct {
    +	Address string   `json:"address"`
    +	Label   string   `json:"label"`
    +	Weight  int      `json:"weight,omitempty"`
    +	Mode    NodeMode `json:"mode,omitempty"`
    +}
    +
    +// NodeBalancerNodeUpdateOptions fields are those accepted by UpdateNodeBalancerNode
    +type NodeBalancerNodeUpdateOptions struct {
    +	Address string   `json:"address,omitempty"`
    +	Label   string   `json:"label,omitempty"`
    +	Weight  int      `json:"weight,omitempty"`
    +	Mode    NodeMode `json:"mode,omitempty"`
    +}
    +
    +// GetCreateOptions converts a NodeBalancerNode to NodeBalancerNodeCreateOptions for use in CreateNodeBalancerNode
    +func (i NodeBalancerNode) GetCreateOptions() NodeBalancerNodeCreateOptions {
    +	return NodeBalancerNodeCreateOptions{
    +		Address: i.Address,
    +		Label:   i.Label,
    +		Weight:  i.Weight,
    +		Mode:    i.Mode,
    +	}
    +}
    +
    +// GetUpdateOptions converts a NodeBalancerNode to NodeBalancerNodeUpdateOptions for use in UpdateNodeBalancerNode
    +func (i NodeBalancerNode) GetUpdateOptions() NodeBalancerNodeUpdateOptions {
    +	return NodeBalancerNodeUpdateOptions{
    +		Address: i.Address,
    +		Label:   i.Label,
    +		Weight:  i.Weight,
    +		Mode:    i.Mode,
    +	}
    +}
    +
    +// NodeBalancerNodesPagedResponse represents a paginated NodeBalancerNode API response
    +type NodeBalancerNodesPagedResponse struct {
    +	*PageOptions
    +	Data []NodeBalancerNode `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for NodeBalancerNode
    +func (NodeBalancerNodesPagedResponse) endpointWithTwoIDs(c *Client, nodebalancerID int, configID int) string {
    +	endpoint, err := c.NodeBalancerNodes.endpointWithID(nodebalancerID, configID)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends NodeBalancerNodes when processing paginated NodeBalancerNode responses
    +func (resp *NodeBalancerNodesPagedResponse) appendData(r *NodeBalancerNodesPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListNodeBalancerNodes lists NodeBalancerNodes
    +func (c *Client) ListNodeBalancerNodes(ctx context.Context, nodebalancerID int, configID int, opts *ListOptions) ([]NodeBalancerNode, error) {
    +	response := NodeBalancerNodesPagedResponse{}
    +	err := c.listHelperWithTwoIDs(ctx, &response, nodebalancerID, configID, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (i *NodeBalancerNode) fixDates() *NodeBalancerNode {
    +	return i
    +}
    +
    +// GetNodeBalancerNode gets the template with the provided ID
    +func (c *Client) GetNodeBalancerNode(ctx context.Context, nodebalancerID int, configID int, nodeID int) (*NodeBalancerNode, error) {
    +	e, err := c.NodeBalancerNodes.endpointWithID(nodebalancerID, configID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, nodeID)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&NodeBalancerNode{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancerNode).fixDates(), nil
    +}
    +
    +// CreateNodeBalancerNode creates a NodeBalancerNode
    +func (c *Client) CreateNodeBalancerNode(ctx context.Context, nodebalancerID int, configID int, createOpts NodeBalancerNodeCreateOptions) (*NodeBalancerNode, error) {
    +	var body string
    +	e, err := c.NodeBalancerNodes.endpointWithID(nodebalancerID, configID)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&NodeBalancerNode{})
    +
    +	if bodyData, err := json.Marshal(createOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancerNode).fixDates(), nil
    +}
    +
    +// UpdateNodeBalancerNode updates the NodeBalancerNode with the specified id
    +func (c *Client) UpdateNodeBalancerNode(ctx context.Context, nodebalancerID int, configID int, nodeID int, updateOpts NodeBalancerNodeUpdateOptions) (*NodeBalancerNode, error) {
    +	var body string
    +	e, err := c.NodeBalancerNodes.endpointWithID(nodebalancerID, configID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, nodeID)
    +
    +	req := c.R(ctx).SetResult(&NodeBalancerNode{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancerNode).fixDates(), nil
    +}
    +
    +// DeleteNodeBalancerNode deletes the NodeBalancerNode with the specified id
    +func (c *Client) DeleteNodeBalancerNode(ctx context.Context, nodebalancerID int, configID int, nodeID int) error {
    +	e, err := c.NodeBalancerNodes.endpointWithID(nodebalancerID, configID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, nodeID)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/nodebalancer_configs.go b/vendor/github.com/linode/linodego/nodebalancer_configs.go
    new file mode 100644
    index 000000000000..aa87019c5827
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/nodebalancer_configs.go
    @@ -0,0 +1,334 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +)
    +
    +// NodeBalancerConfig objects allow a NodeBalancer to accept traffic on a new port
    +type NodeBalancerConfig struct {
    +	ID             int                     `json:"id"`
    +	Port           int                     `json:"port"`
    +	Protocol       ConfigProtocol          `json:"protocol"`
    +	Algorithm      ConfigAlgorithm         `json:"algorithm"`
    +	Stickiness     ConfigStickiness        `json:"stickiness"`
    +	Check          ConfigCheck             `json:"check"`
    +	CheckInterval  int                     `json:"check_interval"`
    +	CheckAttempts  int                     `json:"check_attempts"`
    +	CheckPath      string                  `json:"check_path"`
    +	CheckBody      string                  `json:"check_body"`
    +	CheckPassive   bool                    `json:"check_passive"`
    +	CheckTimeout   int                     `json:"check_timeout"`
    +	CipherSuite    ConfigCipher            `json:"cipher_suite"`
    +	NodeBalancerID int                     `json:"nodebalancer_id"`
    +	SSLCommonName  string                  `json:"ssl_commonname"`
    +	SSLFingerprint string                  `json:"ssl_fingerprint"`
    +	SSLCert        string                  `json:"ssl_cert"`
    +	SSLKey         string                  `json:"ssl_key"`
    +	NodesStatus    *NodeBalancerNodeStatus `json:"nodes_status"`
    +}
    +
    +// ConfigAlgorithm constants start with Algorithm and include Linode API NodeBalancer Config Algorithms
    +type ConfigAlgorithm string
    +
    +// ConfigAlgorithm constants reflect the NodeBalancer Config Algorithm
    +const (
    +	AlgorithmRoundRobin ConfigAlgorithm = "roundrobin"
    +	AlgorithmLeastConn  ConfigAlgorithm = "leastconn"
    +	AlgorithmSource     ConfigAlgorithm = "source"
    +)
    +
    +// ConfigStickiness constants start with Stickiness and include Linode API NodeBalancer Config Stickiness
    +type ConfigStickiness string
    +
    +// ConfigStickiness constants reflect the node stickiness method for a NodeBalancer Config
    +const (
    +	StickinessNone       ConfigStickiness = "none"
    +	StickinessTable      ConfigStickiness = "table"
    +	StickinessHTTPCookie ConfigStickiness = "http_cookie"
    +)
    +
    +// ConfigCheck constants start with Check and include Linode API NodeBalancer Config Check methods
    +type ConfigCheck string
    +
    +// ConfigCheck constants reflect the node health status checking method for a NodeBalancer Config
    +const (
    +	CheckNone       ConfigCheck = "none"
    +	CheckConnection ConfigCheck = "connection"
    +	CheckHTTP       ConfigCheck = "http"
    +	CheckHTTPBody   ConfigCheck = "http_body"
    +)
    +
    +// ConfigProtocol constants start with Protocol and include Linode API Nodebalancer Config protocols
    +type ConfigProtocol string
    +
    +// ConfigProtocol constants reflect the protocol used by a NodeBalancer Config
    +const (
    +	ProtocolHTTP  ConfigProtocol = "http"
    +	ProtocolHTTPS ConfigProtocol = "https"
    +	ProtocolTCP   ConfigProtocol = "tcp"
    +)
    +
    +// ConfigCipher constants start with Cipher and include Linode API NodeBalancer Config Cipher values
    +type ConfigCipher string
    +
    +// ConfigCipher constants reflect the preferred cipher set for a NodeBalancer Config
    +const (
    +	CipherRecommended ConfigCipher = "recommended"
    +	CipherLegacy      ConfigCipher = "legacy"
    +)
    +
    +// NodeBalancerNodeStatus represents the total number of nodes whose status is Up or Down
    +type NodeBalancerNodeStatus struct {
    +	Up   int `json:"up"`
    +	Down int `json:"down"`
    +}
    +
    +// NodeBalancerConfigCreateOptions are permitted by CreateNodeBalancerConfig
    +type NodeBalancerConfigCreateOptions struct {
    +	Port          int                             `json:"port"`
    +	Protocol      ConfigProtocol                  `json:"protocol,omitempty"`
    +	Algorithm     ConfigAlgorithm                 `json:"algorithm,omitempty"`
    +	Stickiness    ConfigStickiness                `json:"stickiness,omitempty"`
    +	Check         ConfigCheck                     `json:"check,omitempty"`
    +	CheckInterval int                             `json:"check_interval,omitempty"`
    +	CheckAttempts int                             `json:"check_attempts,omitempty"`
    +	CheckPath     string                          `json:"check_path,omitempty"`
    +	CheckBody     string                          `json:"check_body,omitempty"`
    +	CheckPassive  *bool                           `json:"check_passive,omitempty"`
    +	CheckTimeout  int                             `json:"check_timeout,omitempty"`
    +	CipherSuite   ConfigCipher                    `json:"cipher_suite,omitempty"`
    +	SSLCert       string                          `json:"ssl_cert,omitempty"`
    +	SSLKey        string                          `json:"ssl_key,omitempty"`
    +	Nodes         []NodeBalancerNodeCreateOptions `json:"nodes,omitempty"`
    +}
    +
    +// NodeBalancerConfigRebuildOptions used by RebuildNodeBalancerConfig
    +type NodeBalancerConfigRebuildOptions struct {
    +	Port          int                             `json:"port"`
    +	Protocol      ConfigProtocol                  `json:"protocol,omitempty"`
    +	Algorithm     ConfigAlgorithm                 `json:"algorithm,omitempty"`
    +	Stickiness    ConfigStickiness                `json:"stickiness,omitempty"`
    +	Check         ConfigCheck                     `json:"check,omitempty"`
    +	CheckInterval int                             `json:"check_interval,omitempty"`
    +	CheckAttempts int                             `json:"check_attempts,omitempty"`
    +	CheckPath     string                          `json:"check_path,omitempty"`
    +	CheckBody     string                          `json:"check_body,omitempty"`
    +	CheckPassive  *bool                           `json:"check_passive,omitempty"`
    +	CheckTimeout  int                             `json:"check_timeout,omitempty"`
    +	CipherSuite   ConfigCipher                    `json:"cipher_suite,omitempty"`
    +	SSLCert       string                          `json:"ssl_cert,omitempty"`
    +	SSLKey        string                          `json:"ssl_key,omitempty"`
    +	Nodes         []NodeBalancerNodeCreateOptions `json:"nodes"`
    +}
    +
    +// NodeBalancerConfigUpdateOptions are permitted by UpdateNodeBalancerConfig
    +type NodeBalancerConfigUpdateOptions NodeBalancerConfigCreateOptions
    +
    +// GetCreateOptions converts a NodeBalancerConfig to NodeBalancerConfigCreateOptions for use in CreateNodeBalancerConfig
    +func (i NodeBalancerConfig) GetCreateOptions() NodeBalancerConfigCreateOptions {
    +	return NodeBalancerConfigCreateOptions{
    +		Port:          i.Port,
    +		Protocol:      i.Protocol,
    +		Algorithm:     i.Algorithm,
    +		Stickiness:    i.Stickiness,
    +		Check:         i.Check,
    +		CheckInterval: i.CheckInterval,
    +		CheckAttempts: i.CheckAttempts,
    +		CheckTimeout:  i.CheckTimeout,
    +		CheckPath:     i.CheckPath,
    +		CheckBody:     i.CheckBody,
    +		CheckPassive:  copyBool(&i.CheckPassive),
    +		CipherSuite:   i.CipherSuite,
    +		SSLCert:       i.SSLCert,
    +		SSLKey:        i.SSLKey,
    +	}
    +}
    +
    +// GetUpdateOptions converts a NodeBalancerConfig to NodeBalancerConfigUpdateOptions for use in UpdateNodeBalancerConfig
    +func (i NodeBalancerConfig) GetUpdateOptions() NodeBalancerConfigUpdateOptions {
    +	return NodeBalancerConfigUpdateOptions{
    +		Port:          i.Port,
    +		Protocol:      i.Protocol,
    +		Algorithm:     i.Algorithm,
    +		Stickiness:    i.Stickiness,
    +		Check:         i.Check,
    +		CheckInterval: i.CheckInterval,
    +		CheckAttempts: i.CheckAttempts,
    +		CheckPath:     i.CheckPath,
    +		CheckBody:     i.CheckBody,
    +		CheckPassive:  copyBool(&i.CheckPassive),
    +		CheckTimeout:  i.CheckTimeout,
    +		CipherSuite:   i.CipherSuite,
    +		SSLCert:       i.SSLCert,
    +		SSLKey:        i.SSLKey,
    +	}
    +}
    +
    +// GetRebuildOptions converts a NodeBalancerConfig to NodeBalancerConfigRebuildOptions for use in RebuildNodeBalancerConfig
    +func (i NodeBalancerConfig) GetRebuildOptions() NodeBalancerConfigRebuildOptions {
    +	return NodeBalancerConfigRebuildOptions{
    +		Port:          i.Port,
    +		Protocol:      i.Protocol,
    +		Algorithm:     i.Algorithm,
    +		Stickiness:    i.Stickiness,
    +		Check:         i.Check,
    +		CheckInterval: i.CheckInterval,
    +		CheckAttempts: i.CheckAttempts,
    +		CheckTimeout:  i.CheckTimeout,
    +		CheckPath:     i.CheckPath,
    +		CheckBody:     i.CheckBody,
    +		CheckPassive:  copyBool(&i.CheckPassive),
    +		CipherSuite:   i.CipherSuite,
    +		SSLCert:       i.SSLCert,
    +		SSLKey:        i.SSLKey,
    +		Nodes:         make([]NodeBalancerNodeCreateOptions, 0),
    +	}
    +}
    +
    +// NodeBalancerConfigsPagedResponse represents a paginated NodeBalancerConfig API response
    +type NodeBalancerConfigsPagedResponse struct {
    +	*PageOptions
    +	Data []NodeBalancerConfig `json:"data"`
    +}
    +
    +// endpointWithID gets the endpoint URL for NodeBalancerConfig
    +func (NodeBalancerConfigsPagedResponse) endpointWithID(c *Client, id int) string {
    +	endpoint, err := c.NodeBalancerConfigs.endpointWithID(id)
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends NodeBalancerConfigs when processing paginated NodeBalancerConfig responses
    +func (resp *NodeBalancerConfigsPagedResponse) appendData(r *NodeBalancerConfigsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListNodeBalancerConfigs lists NodeBalancerConfigs
    +func (c *Client) ListNodeBalancerConfigs(ctx context.Context, nodebalancerID int, opts *ListOptions) ([]NodeBalancerConfig, error) {
    +	response := NodeBalancerConfigsPagedResponse{}
    +	err := c.listHelperWithID(ctx, &response, nodebalancerID, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (i *NodeBalancerConfig) fixDates() *NodeBalancerConfig {
    +	return i
    +}
    +
    +// GetNodeBalancerConfig gets the template with the provided ID
    +func (c *Client) GetNodeBalancerConfig(ctx context.Context, nodebalancerID int, configID int) (*NodeBalancerConfig, error) {
    +	e, err := c.NodeBalancerConfigs.endpointWithID(nodebalancerID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, configID)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&NodeBalancerConfig{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancerConfig).fixDates(), nil
    +}
    +
    +// CreateNodeBalancerConfig creates a NodeBalancerConfig
    +func (c *Client) CreateNodeBalancerConfig(ctx context.Context, nodebalancerID int, nodebalancerConfig NodeBalancerConfigCreateOptions) (*NodeBalancerConfig, error) {
    +	var body string
    +	e, err := c.NodeBalancerConfigs.endpointWithID(nodebalancerID)
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&NodeBalancerConfig{})
    +
    +	if bodyData, err := json.Marshal(nodebalancerConfig); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetHeader("Content-Type", "application/json").
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancerConfig).fixDates(), nil
    +}
    +
    +// UpdateNodeBalancerConfig updates the NodeBalancerConfig with the specified id
    +func (c *Client) UpdateNodeBalancerConfig(ctx context.Context, nodebalancerID int, configID int, updateOpts NodeBalancerConfigUpdateOptions) (*NodeBalancerConfig, error) {
    +	var body string
    +	e, err := c.NodeBalancerConfigs.endpointWithID(nodebalancerID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, configID)
    +
    +	req := c.R(ctx).SetResult(&NodeBalancerConfig{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancerConfig).fixDates(), nil
    +}
    +
    +// DeleteNodeBalancerConfig deletes the NodeBalancerConfig with the specified id
    +func (c *Client) DeleteNodeBalancerConfig(ctx context.Context, nodebalancerID int, configID int) error {
    +	e, err := c.NodeBalancerConfigs.endpointWithID(nodebalancerID)
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, configID)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    +
    +// RebuildNodeBalancerConfig updates the NodeBalancer with the specified id
    +func (c *Client) RebuildNodeBalancerConfig(ctx context.Context, nodeBalancerID int, configID int, rebuildOpts NodeBalancerConfigRebuildOptions) (*NodeBalancerConfig, error) {
    +	var body string
    +	e, err := c.NodeBalancerConfigs.endpointWithID(nodeBalancerID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d/rebuild", e, configID)
    +
    +	req := c.R(ctx).SetResult(&NodeBalancerConfig{})
    +
    +	if bodyData, err := json.Marshal(rebuildOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*NodeBalancerConfig).fixDates(), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/pagination.go b/vendor/github.com/linode/linodego/pagination.go
    new file mode 100644
    index 000000000000..bfe6de3b7d54
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/pagination.go
    @@ -0,0 +1,430 @@
    +package linodego
    +
    +/**
    + * Pagination and Filtering types and helpers
    + */
    +
    +import (
    +	"context"
    +	"fmt"
    +	"log"
    +	"strconv"
    +
    +	"gopkg.in/resty.v1"
    +)
    +
    +// PageOptions are the pagination parameters for List endpoints
    +type PageOptions struct {
    +	Page    int `url:"page,omitempty" json:"page"`
    +	Pages   int `url:"pages,omitempty" json:"pages"`
    +	Results int `url:"results,omitempty" json:"results"`
    +}
    +
    +// ListOptions are the pagination and filtering (TODO) parameters for endpoints
    +type ListOptions struct {
    +	*PageOptions
    +	Filter string
    +}
    +
    +// NewListOptions simplified construction of ListOptions using only
    +// the two writable properties, Page and Filter
    +func NewListOptions(Page int, Filter string) *ListOptions {
    +	return &ListOptions{PageOptions: &PageOptions{Page: Page}, Filter: Filter}
    +
    +}
    +
    +// listHelper abstracts fetching and pagination for GET endpoints that
    +// do not require any Ids (top level endpoints).
    +// When opts (or opts.Page) is nil, all pages will be fetched and
    +// returned in a single (endpoint-specific)PagedResponse
    +// opts.results and opts.pages will be updated from the API response
    +func (c *Client) listHelper(ctx context.Context, i interface{}, opts *ListOptions) error {
    +	req := c.R(ctx)
    +	if opts != nil && opts.PageOptions != nil && opts.Page > 0 {
    +		req.SetQueryParam("page", strconv.Itoa(opts.Page))
    +	}
    +
    +	var (
    +		err     error
    +		pages   int
    +		results int
    +		r       *resty.Response
    +	)
    +
    +	if opts != nil && len(opts.Filter) > 0 {
    +		req.SetHeader("X-Filter", opts.Filter)
    +	}
    +
    +	switch v := i.(type) {
    +	case *LinodeKernelsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(LinodeKernelsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*LinodeKernelsPagedResponse).Pages
    +			results = r.Result().(*LinodeKernelsPagedResponse).Results
    +			v.appendData(r.Result().(*LinodeKernelsPagedResponse))
    +		}
    +	case *LinodeTypesPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(LinodeTypesPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*LinodeTypesPagedResponse).Pages
    +			results = r.Result().(*LinodeTypesPagedResponse).Results
    +			v.appendData(r.Result().(*LinodeTypesPagedResponse))
    +		}
    +	case *ImagesPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(ImagesPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*ImagesPagedResponse).Pages
    +			results = r.Result().(*ImagesPagedResponse).Results
    +			v.appendData(r.Result().(*ImagesPagedResponse))
    +		}
    +	case *StackscriptsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(StackscriptsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*StackscriptsPagedResponse).Pages
    +			results = r.Result().(*StackscriptsPagedResponse).Results
    +			v.appendData(r.Result().(*StackscriptsPagedResponse))
    +		}
    +	case *InstancesPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(InstancesPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*InstancesPagedResponse).Pages
    +			results = r.Result().(*InstancesPagedResponse).Results
    +			v.appendData(r.Result().(*InstancesPagedResponse))
    +		}
    +	case *RegionsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(RegionsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*RegionsPagedResponse).Pages
    +			results = r.Result().(*RegionsPagedResponse).Results
    +			v.appendData(r.Result().(*RegionsPagedResponse))
    +		}
    +	case *VolumesPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(VolumesPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*VolumesPagedResponse).Pages
    +			results = r.Result().(*VolumesPagedResponse).Results
    +			v.appendData(r.Result().(*VolumesPagedResponse))
    +		}
    +	case *DomainsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(DomainsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			response, ok := r.Result().(*DomainsPagedResponse)
    +			if !ok {
    +				return fmt.Errorf("Response is not a *DomainsPagedResponse")
    +			}
    +			pages = response.Pages
    +			results = response.Results
    +			v.appendData(response)
    +		}
    +	case *EventsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(EventsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*EventsPagedResponse).Pages
    +			results = r.Result().(*EventsPagedResponse).Results
    +			v.appendData(r.Result().(*EventsPagedResponse))
    +		}
    +	case *LongviewSubscriptionsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(LongviewSubscriptionsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*LongviewSubscriptionsPagedResponse).Pages
    +			results = r.Result().(*LongviewSubscriptionsPagedResponse).Results
    +			v.appendData(r.Result().(*LongviewSubscriptionsPagedResponse))
    +		}
    +	case *LongviewClientsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(LongviewClientsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*LongviewClientsPagedResponse).Pages
    +			results = r.Result().(*LongviewClientsPagedResponse).Results
    +			v.appendData(r.Result().(*LongviewClientsPagedResponse))
    +		}
    +	case *IPAddressesPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(IPAddressesPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*IPAddressesPagedResponse).Pages
    +			results = r.Result().(*IPAddressesPagedResponse).Results
    +			v.appendData(r.Result().(*IPAddressesPagedResponse))
    +		}
    +	case *IPv6PoolsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(IPv6PoolsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*IPv6PoolsPagedResponse).Pages
    +			results = r.Result().(*IPv6PoolsPagedResponse).Results
    +			v.appendData(r.Result().(*IPv6PoolsPagedResponse))
    +		}
    +	case *IPv6RangesPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(IPv6RangesPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*IPv6RangesPagedResponse).Pages
    +			results = r.Result().(*IPv6RangesPagedResponse).Results
    +			v.appendData(r.Result().(*IPv6RangesPagedResponse))
    +			// @TODO consolidate this type with IPv6PoolsPagedResponse?
    +		}
    +	case *SSHKeysPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(SSHKeysPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			response, ok := r.Result().(*SSHKeysPagedResponse)
    +			if !ok {
    +				return fmt.Errorf("Response is not a *SSHKeysPagedResponse")
    +			}
    +			pages = response.Pages
    +			results = response.Results
    +			v.appendData(response)
    +		}
    +	case *TicketsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(TicketsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*TicketsPagedResponse).Pages
    +			results = r.Result().(*TicketsPagedResponse).Results
    +			v.appendData(r.Result().(*TicketsPagedResponse))
    +		}
    +	case *InvoicesPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(InvoicesPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*InvoicesPagedResponse).Pages
    +			results = r.Result().(*InvoicesPagedResponse).Results
    +			v.appendData(r.Result().(*InvoicesPagedResponse))
    +		}
    +	case *NotificationsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(NotificationsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*NotificationsPagedResponse).Pages
    +			results = r.Result().(*NotificationsPagedResponse).Results
    +			v.appendData(r.Result().(*NotificationsPagedResponse))
    +		}
    +	case *NodeBalancersPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(NodeBalancersPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*NodeBalancersPagedResponse).Pages
    +			results = r.Result().(*NodeBalancersPagedResponse).Results
    +			v.appendData(r.Result().(*NodeBalancersPagedResponse))
    +		}
    +	case *TagsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(TagsPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*TagsPagedResponse).Pages
    +			results = r.Result().(*TagsPagedResponse).Results
    +			v.appendData(r.Result().(*TagsPagedResponse))
    +		}
    +	case *TokensPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(TokensPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*TokensPagedResponse).Pages
    +			results = r.Result().(*TokensPagedResponse).Results
    +			v.appendData(r.Result().(*TokensPagedResponse))
    +		}
    +	case *UsersPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(UsersPagedResponse{}).Get(v.endpoint(c))); err == nil {
    +			pages = r.Result().(*UsersPagedResponse).Pages
    +			results = r.Result().(*UsersPagedResponse).Results
    +			v.appendData(r.Result().(*UsersPagedResponse))
    +		}
    +	/**
    +	case AccountOauthClientsPagedResponse:
    +	case AccountPaymentsPagedResponse:
    +	case ProfileAppsPagedResponse:
    +	case ProfileWhitelistPagedResponse:
    +	case ManagedContactsPagedResponse:
    +	case ManagedCredentialsPagedResponse:
    +	case ManagedIssuesPagedResponse:
    +	case ManagedLinodeSettingsPagedResponse:
    +	case ManagedServicesPagedResponse:
    +	**/
    +	default:
    +		log.Fatalf("listHelper interface{} %+v used", i)
    +	}
    +
    +	if err != nil {
    +		return err
    +	}
    +
    +	if opts == nil {
    +		for page := 2; page <= pages; page = page + 1 {
    +			if err := c.listHelper(ctx, i, &ListOptions{PageOptions: &PageOptions{Page: page}}); err != nil {
    +				return err
    +			}
    +		}
    +	} else {
    +		if opts.PageOptions == nil {
    +			opts.PageOptions = &PageOptions{}
    +		}
    +
    +		if opts.Page == 0 {
    +			for page := 2; page <= pages; page = page + 1 {
    +				opts.Page = page
    +				if err := c.listHelper(ctx, i, opts); err != nil {
    +					return err
    +				}
    +			}
    +		}
    +		opts.Results = results
    +		opts.Pages = pages
    +	}
    +
    +	return nil
    +}
    +
    +// listHelperWithID abstracts fetching and pagination for GET endpoints that
    +// require an Id (second level endpoints).
    +// When opts (or opts.Page) is nil, all pages will be fetched and
    +// returned in a single (endpoint-specific)PagedResponse
    +// opts.results and opts.pages will be updated from the API response
    +func (c *Client) listHelperWithID(ctx context.Context, i interface{}, idRaw interface{}, opts *ListOptions) error {
    +	req := c.R(ctx)
    +	if opts != nil && opts.Page > 0 {
    +		req.SetQueryParam("page", strconv.Itoa(opts.Page))
    +	}
    +
    +	var (
    +		err     error
    +		pages   int
    +		results int
    +		r       *resty.Response
    +	)
    +
    +	id, _ := idRaw.(int)
    +
    +	if opts != nil && len(opts.Filter) > 0 {
    +		req.SetHeader("X-Filter", opts.Filter)
    +	}
    +
    +	switch v := i.(type) {
    +	case *InvoiceItemsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(InvoiceItemsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
    +			pages = r.Result().(*InvoiceItemsPagedResponse).Pages
    +			results = r.Result().(*InvoiceItemsPagedResponse).Results
    +			v.appendData(r.Result().(*InvoiceItemsPagedResponse))
    +		}
    +	case *DomainRecordsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(DomainRecordsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
    +			response, ok := r.Result().(*DomainRecordsPagedResponse)
    +			if !ok {
    +				return fmt.Errorf("Response is not a *DomainRecordsPagedResponse")
    +			}
    +			pages = response.Pages
    +			results = response.Results
    +			v.appendData(response)
    +		}
    +	case *InstanceConfigsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(InstanceConfigsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
    +			pages = r.Result().(*InstanceConfigsPagedResponse).Pages
    +			results = r.Result().(*InstanceConfigsPagedResponse).Results
    +			v.appendData(r.Result().(*InstanceConfigsPagedResponse))
    +		}
    +	case *InstanceDisksPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(InstanceDisksPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
    +			pages = r.Result().(*InstanceDisksPagedResponse).Pages
    +			results = r.Result().(*InstanceDisksPagedResponse).Results
    +			v.appendData(r.Result().(*InstanceDisksPagedResponse))
    +		}
    +	case *NodeBalancerConfigsPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(NodeBalancerConfigsPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
    +			pages = r.Result().(*NodeBalancerConfigsPagedResponse).Pages
    +			results = r.Result().(*NodeBalancerConfigsPagedResponse).Results
    +			v.appendData(r.Result().(*NodeBalancerConfigsPagedResponse))
    +		}
    +	case *InstanceVolumesPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(InstanceVolumesPagedResponse{}).Get(v.endpointWithID(c, id))); err == nil {
    +			pages = r.Result().(*InstanceVolumesPagedResponse).Pages
    +			results = r.Result().(*InstanceVolumesPagedResponse).Results
    +			v.appendData(r.Result().(*InstanceVolumesPagedResponse))
    +		}
    +	case *TaggedObjectsPagedResponse:
    +		idStr := idRaw.(string)
    +
    +		if r, err = coupleAPIErrors(req.SetResult(TaggedObjectsPagedResponse{}).Get(v.endpointWithID(c, idStr))); err == nil {
    +			pages = r.Result().(*TaggedObjectsPagedResponse).Pages
    +			results = r.Result().(*TaggedObjectsPagedResponse).Results
    +			v.appendData(r.Result().(*TaggedObjectsPagedResponse))
    +		}
    +	/**
    +	case TicketAttachmentsPagedResponse:
    +		if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
    +			return NewError(r)
    +		} else if err == nil {
    +			pages = r.Result().(*TicketAttachmentsPagedResponse).Pages
    +			results = r.Result().(*TicketAttachmentsPagedResponse).Results
    +			v.appendData(r.Result().(*TicketAttachmentsPagedResponse))
    +		}
    +	case TicketRepliesPagedResponse:
    +		if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
    +			return NewError(r)
    +		} else if err == nil {
    +			pages = r.Result().(*TicketRepliesPagedResponse).Pages
    +			results = r.Result().(*TicketRepliesPagedResponse).Results
    +			v.appendData(r.Result().(*TicketRepliesPagedResponse))
    +		}
    +	**/
    +	default:
    +		log.Fatalf("Unknown listHelperWithID interface{} %T used", i)
    +	}
    +
    +	if err != nil {
    +		return err
    +	}
    +
    +	if opts == nil {
    +		for page := 2; page <= pages; page = page + 1 {
    +			if err := c.listHelperWithID(ctx, i, id, &ListOptions{PageOptions: &PageOptions{Page: page}}); err != nil {
    +				return err
    +			}
    +		}
    +	} else {
    +		if opts.PageOptions == nil {
    +			opts.PageOptions = &PageOptions{}
    +		}
    +		if opts.Page == 0 {
    +			for page := 2; page <= pages; page = page + 1 {
    +				opts.Page = page
    +				if err := c.listHelperWithID(ctx, i, id, opts); err != nil {
    +					return err
    +				}
    +			}
    +		}
    +		opts.Results = results
    +		opts.Pages = pages
    +	}
    +
    +	return nil
    +}
    +
    +// listHelperWithTwoIDs abstracts fetching and pagination for GET endpoints that
    +// require twos IDs (third level endpoints).
    +// When opts (or opts.Page) is nil, all pages will be fetched and
    +// returned in a single (endpoint-specific)PagedResponse
    +// opts.results and opts.pages will be updated from the API response
    +func (c *Client) listHelperWithTwoIDs(ctx context.Context, i interface{}, firstID, secondID int, opts *ListOptions) error {
    +	req := c.R(ctx)
    +	if opts != nil && opts.Page > 0 {
    +		req.SetQueryParam("page", strconv.Itoa(opts.Page))
    +	}
    +
    +	var (
    +		err     error
    +		pages   int
    +		results int
    +		r       *resty.Response
    +	)
    +
    +	if opts != nil && len(opts.Filter) > 0 {
    +		req.SetHeader("X-Filter", opts.Filter)
    +	}
    +
    +	switch v := i.(type) {
    +	case *NodeBalancerNodesPagedResponse:
    +		if r, err = coupleAPIErrors(req.SetResult(NodeBalancerNodesPagedResponse{}).Get(v.endpointWithTwoIDs(c, firstID, secondID))); err == nil {
    +			pages = r.Result().(*NodeBalancerNodesPagedResponse).Pages
    +			results = r.Result().(*NodeBalancerNodesPagedResponse).Results
    +			v.appendData(r.Result().(*NodeBalancerNodesPagedResponse))
    +		}
    +
    +	default:
    +		log.Fatalf("Unknown listHelperWithTwoIDs interface{} %T used", i)
    +	}
    +
    +	if err != nil {
    +		return err
    +	}
    +
    +	if opts == nil {
    +		for page := 2; page <= pages; page = page + 1 {
    +			if err := c.listHelper(ctx, i, &ListOptions{PageOptions: &PageOptions{Page: page}}); err != nil {
    +				return err
    +			}
    +		}
    +	} else {
    +		if opts.PageOptions == nil {
    +			opts.PageOptions = &PageOptions{}
    +		}
    +		if opts.Page == 0 {
    +			for page := 2; page <= pages; page = page + 1 {
    +				opts.Page = page
    +				if err := c.listHelperWithTwoIDs(ctx, i, firstID, secondID, opts); err != nil {
    +					return err
    +				}
    +			}
    +		}
    +		opts.Results = results
    +		opts.Pages = pages
    +	}
    +
    +	return nil
    +}
    diff --git a/vendor/github.com/linode/linodego/profile.go b/vendor/github.com/linode/linodego/profile.go
    new file mode 100644
    index 000000000000..3d29b2a5246d
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/profile.go
    @@ -0,0 +1,117 @@
    +package linodego
    +
    +/*
    + - copy profile_test.go and do the same
    + - When updating Profile structs,
    +   - use pointers where ever null'able would have a different meaning if the wrapper
    +	 supplied "" or 0 instead
    + - Add "NameOfResource" to client.go, resources.go, pagination.go
    +*/
    +
    +import (
    +	"context"
    +	"encoding/json"
    +)
    +
    +// LishAuthMethod constants start with AuthMethod and include Linode API Lish Authentication Methods
    +type LishAuthMethod string
    +
    +// LishAuthMethod constants are the methods of authentication allowed when connecting via Lish
    +const (
    +	AuthMethodPasswordKeys LishAuthMethod = "password_keys"
    +	AuthMethodKeysOnly     LishAuthMethod = "keys_only"
    +	AuthMethodDisabled     LishAuthMethod = "disabled"
    +)
    +
    +// ProfileReferrals represent a User's status in the Referral Program
    +type ProfileReferrals struct {
    +	Total     int     `json:"total"`
    +	Completed int     `json:"completed"`
    +	Pending   int     `json:"pending"`
    +	Credit    float64 `json:"credit"`
    +	Code      string  `json:"code"`
    +	URL       string  `json:"url"`
    +}
    +
    +// Profile represents a Profile object
    +type Profile struct {
    +	UID                int              `json:"uid"`
    +	Username           string           `json:"username"`
    +	Email              string           `json:"email"`
    +	Timezone           string           `json:"timezone"`
    +	EmailNotifications bool             `json:"email_notifications"`
    +	IPWhitelistEnabled bool             `json:"ip_whitelist_enabled"`
    +	TwoFactorAuth      bool             `json:"two_factor_auth"`
    +	Restricted         bool             `json:"restricted"`
    +	LishAuthMethod     LishAuthMethod   `json:"lish_auth_method"`
    +	Referrals          ProfileReferrals `json:"referrals"`
    +	AuthorizedKeys     []string         `json:"authorized_keys"`
    +}
    +
    +// ProfileUpdateOptions fields are those accepted by UpdateProfile
    +type ProfileUpdateOptions struct {
    +	Email              string         `json:"email,omitempty"`
    +	Timezone           string         `json:"timezone,omitempty"`
    +	EmailNotifications *bool          `json:"email_notifications,omitempty"`
    +	IPWhitelistEnabled *bool          `json:"ip_whitelist_enabled,omitempty"`
    +	LishAuthMethod     LishAuthMethod `json:"lish_auth_method,omitempty"`
    +	AuthorizedKeys     *[]string      `json:"authorized_keys,omitempty"`
    +	TwoFactorAuth      *bool          `json:"two_factor_auth,omitempty"`
    +	Restricted         *bool          `json:"restricted,omitempty"`
    +}
    +
    +// GetUpdateOptions converts a Profile to ProfileUpdateOptions for use in UpdateProfile
    +func (i Profile) GetUpdateOptions() (o ProfileUpdateOptions) {
    +	o.Email = i.Email
    +	o.Timezone = i.Timezone
    +	o.EmailNotifications = copyBool(&i.EmailNotifications)
    +	o.IPWhitelistEnabled = copyBool(&i.IPWhitelistEnabled)
    +	o.LishAuthMethod = i.LishAuthMethod
    +	authorizedKeys := make([]string, len(i.AuthorizedKeys))
    +	copy(authorizedKeys, i.AuthorizedKeys)
    +	o.AuthorizedKeys = &authorizedKeys
    +	o.TwoFactorAuth = copyBool(&i.TwoFactorAuth)
    +	o.Restricted = copyBool(&i.Restricted)
    +
    +	return
    +}
    +
    +// GetProfile gets the profile with the provided ID
    +func (c *Client) GetProfile(ctx context.Context) (*Profile, error) {
    +	e, err := c.Profile.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&Profile{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Profile), nil
    +}
    +
    +// UpdateProfile updates the Profile with the specified id
    +func (c *Client) UpdateProfile(ctx context.Context, updateOpts ProfileUpdateOptions) (*Profile, error) {
    +	var body string
    +	e, err := c.Profile.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&Profile{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Profile), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/profile_sshkeys.go b/vendor/github.com/linode/linodego/profile_sshkeys.go
    new file mode 100644
    index 000000000000..2ec4d4f71521
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/profile_sshkeys.go
    @@ -0,0 +1,160 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"time"
    +)
    +
    +// SSHKey represents a SSHKey object
    +type SSHKey struct {
    +	ID         int        `json:"id"`
    +	Label      string     `json:"label"`
    +	SSHKey     string     `json:"ssh_key"`
    +	CreatedStr string     `json:"created"`
    +	Created    *time.Time `json:"-"`
    +}
    +
    +// SSHKeyCreateOptions fields are those accepted by CreateSSHKey
    +type SSHKeyCreateOptions struct {
    +	Label  string `json:"label"`
    +	SSHKey string `json:"ssh_key"`
    +}
    +
    +// SSHKeyUpdateOptions fields are those accepted by UpdateSSHKey
    +type SSHKeyUpdateOptions struct {
    +	Label string `json:"label"`
    +}
    +
    +// GetCreateOptions converts a SSHKey to SSHKeyCreateOptions for use in CreateSSHKey
    +func (i SSHKey) GetCreateOptions() (o SSHKeyCreateOptions) {
    +	o.Label = i.Label
    +	o.SSHKey = i.SSHKey
    +	return
    +}
    +
    +// GetUpdateOptions converts a SSHKey to SSHKeyCreateOptions for use in UpdateSSHKey
    +func (i SSHKey) GetUpdateOptions() (o SSHKeyUpdateOptions) {
    +	o.Label = i.Label
    +	return
    +}
    +
    +// SSHKeysPagedResponse represents a paginated SSHKey API response
    +type SSHKeysPagedResponse struct {
    +	*PageOptions
    +	Data []SSHKey `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for SSHKey
    +func (SSHKeysPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.SSHKeys.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends SSHKeys when processing paginated SSHKey responses
    +func (resp *SSHKeysPagedResponse) appendData(r *SSHKeysPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListSSHKeys lists SSHKeys
    +func (c *Client) ListSSHKeys(ctx context.Context, opts *ListOptions) ([]SSHKey, error) {
    +	response := SSHKeysPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (i *SSHKey) fixDates() *SSHKey {
    +	i.Created, _ = parseDates(i.CreatedStr)
    +	return i
    +}
    +
    +// GetSSHKey gets the sshkey with the provided ID
    +func (c *Client) GetSSHKey(ctx context.Context, id int) (*SSHKey, error) {
    +	e, err := c.SSHKeys.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&SSHKey{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*SSHKey).fixDates(), nil
    +}
    +
    +// CreateSSHKey creates a SSHKey
    +func (c *Client) CreateSSHKey(ctx context.Context, createOpts SSHKeyCreateOptions) (*SSHKey, error) {
    +	var body string
    +	e, err := c.SSHKeys.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&SSHKey{})
    +
    +	if bodyData, err := json.Marshal(createOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*SSHKey).fixDates(), nil
    +}
    +
    +// UpdateSSHKey updates the SSHKey with the specified id
    +func (c *Client) UpdateSSHKey(ctx context.Context, id int, updateOpts SSHKeyUpdateOptions) (*SSHKey, error) {
    +	var body string
    +	e, err := c.SSHKeys.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	req := c.R(ctx).SetResult(&SSHKey{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*SSHKey).fixDates(), nil
    +}
    +
    +// DeleteSSHKey deletes the SSHKey with the specified id
    +func (c *Client) DeleteSSHKey(ctx context.Context, id int) error {
    +	e, err := c.SSHKeys.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +
    +}
    diff --git a/vendor/github.com/linode/linodego/profile_tokens.go b/vendor/github.com/linode/linodego/profile_tokens.go
    new file mode 100644
    index 000000000000..bd95dfe05b1d
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/profile_tokens.go
    @@ -0,0 +1,195 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"time"
    +)
    +
    +// Token represents a Token object
    +type Token struct {
    +	// This token's unique ID, which can be used to revoke it.
    +	ID int `json:"id"`
    +
    +	// The scopes this token was created with. These define what parts of the Account the token can be used to access. Many command-line tools, such as the Linode CLI, require tokens with access to *. Tokens with more restrictive scopes are generally more secure.
    +	Scopes string `json:"scopes"`
    +
    +	// This token's label. This is for display purposes only, but can be used to more easily track what you're using each token for. (1-100 Characters)
    +	Label string `json:"label"`
    +
    +	// The token used to access the API. When the token is created, the full token is returned here. Otherwise, only the first 16 characters are returned.
    +	Token string `json:"token"`
    +
    +	// The date and time this token was created.
    +	Created    *time.Time `json:"-"`
    +	CreatedStr string     `json:"created"`
    +
    +	// When this token will expire. Personal Access Tokens cannot be renewed, so after this time the token will be completely unusable and a new token will need to be generated. Tokens may be created with "null" as their expiry and will never expire unless revoked.
    +	Expiry    *time.Time `json:"-"`
    +	ExpiryStr string     `json:"expiry"`
    +}
    +
    +// TokenCreateOptions fields are those accepted by CreateToken
    +type TokenCreateOptions struct {
    +	// The scopes this token was created with. These define what parts of the Account the token can be used to access. Many command-line tools, such as the Linode CLI, require tokens with access to *. Tokens with more restrictive scopes are generally more secure.
    +	Scopes string `json:"scopes"`
    +
    +	// This token's label. This is for display purposes only, but can be used to more easily track what you're using each token for. (1-100 Characters)
    +	Label string `json:"label"`
    +
    +	// When this token will expire. Personal Access Tokens cannot be renewed, so after this time the token will be completely unusable and a new token will need to be generated. Tokens may be created with "null" as their expiry and will never expire unless revoked.
    +	Expiry *time.Time `json:"expiry"`
    +}
    +
    +// TokenUpdateOptions fields are those accepted by UpdateToken
    +type TokenUpdateOptions struct {
    +	// This token's label. This is for display purposes only, but can be used to more easily track what you're using each token for. (1-100 Characters)
    +	Label string `json:"label"`
    +}
    +
    +// GetCreateOptions converts a Token to TokenCreateOptions for use in CreateToken
    +func (i Token) GetCreateOptions() (o TokenCreateOptions) {
    +	o.Label = i.Label
    +	o.Expiry = copyTime(i.Expiry)
    +	o.Scopes = i.Scopes
    +	return
    +}
    +
    +// GetUpdateOptions converts a Token to TokenUpdateOptions for use in UpdateToken
    +func (i Token) GetUpdateOptions() (o TokenUpdateOptions) {
    +	o.Label = i.Label
    +	return
    +}
    +
    +// TokensPagedResponse represents a paginated Token API response
    +type TokensPagedResponse struct {
    +	*PageOptions
    +	Data []Token `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for Token
    +func (TokensPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Tokens.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends Tokens when processing paginated Token responses
    +func (resp *TokensPagedResponse) appendData(r *TokensPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListTokens lists Tokens
    +func (c *Client) ListTokens(ctx context.Context, opts *ListOptions) ([]Token, error) {
    +	response := TokensPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (i *Token) fixDates() *Token {
    +	i.Created, _ = parseDates(i.CreatedStr)
    +	i.Expiry, _ = parseDates(i.ExpiryStr)
    +	return i
    +}
    +
    +// GetToken gets the token with the provided ID
    +func (c *Client) GetToken(ctx context.Context, id int) (*Token, error) {
    +	e, err := c.Tokens.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&Token{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Token).fixDates(), nil
    +}
    +
    +// CreateToken creates a Token
    +func (c *Client) CreateToken(ctx context.Context, createOpts TokenCreateOptions) (*Token, error) {
    +	var body string
    +	e, err := c.Tokens.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&Token{})
    +
    +	// Format the Time as a string to meet the ISO8601 requirement
    +	createOptsFixed := struct {
    +		Label  string  `json:"label"`
    +		Scopes string  `json:"scopes"`
    +		Expiry *string `json:"expiry"`
    +	}{}
    +	createOptsFixed.Label = createOpts.Label
    +	createOptsFixed.Scopes = createOpts.Scopes
    +	if createOpts.Expiry != nil {
    +		iso8601Expiry := createOpts.Expiry.UTC().Format("2006-01-02T15:04:05")
    +		createOptsFixed.Expiry = &iso8601Expiry
    +	}
    +
    +	if bodyData, err := json.Marshal(createOptsFixed); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Token).fixDates(), nil
    +}
    +
    +// UpdateToken updates the Token with the specified id
    +func (c *Client) UpdateToken(ctx context.Context, id int, updateOpts TokenUpdateOptions) (*Token, error) {
    +	var body string
    +	e, err := c.Tokens.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	req := c.R(ctx).SetResult(&Token{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Token).fixDates(), nil
    +}
    +
    +// DeleteToken deletes the Token with the specified id
    +func (c *Client) DeleteToken(ctx context.Context, id int) error {
    +	e, err := c.Tokens.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/regions.go b/vendor/github.com/linode/linodego/regions.go
    new file mode 100644
    index 000000000000..b13b996ea2cd
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/regions.go
    @@ -0,0 +1,64 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +)
    +
    +// Region represents a linode region object
    +type Region struct {
    +	ID      string `json:"id"`
    +	Country string `json:"country"`
    +}
    +
    +// RegionsPagedResponse represents a linode API response for listing
    +type RegionsPagedResponse struct {
    +	*PageOptions
    +	Data []Region `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for Region
    +func (RegionsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Regions.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends Regions when processing paginated Region responses
    +func (resp *RegionsPagedResponse) appendData(r *RegionsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListRegions lists Regions
    +func (c *Client) ListRegions(ctx context.Context, opts *ListOptions) ([]Region, error) {
    +	response := RegionsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (v *Region) fixDates() *Region {
    +	return v
    +}
    +
    +// GetRegion gets the template with the provided ID
    +func (c *Client) GetRegion(ctx context.Context, id string) (*Region, error) {
    +	e, err := c.Regions.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&Region{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Region).fixDates(), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/resources.go b/vendor/github.com/linode/linodego/resources.go
    new file mode 100644
    index 000000000000..fe0c258cf682
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/resources.go
    @@ -0,0 +1,162 @@
    +package linodego
    +
    +import (
    +	"bytes"
    +	"context"
    +	"fmt"
    +	"text/template"
    +
    +	"gopkg.in/resty.v1"
    +)
    +
    +const (
    +	stackscriptsName          = "stackscripts"
    +	imagesName                = "images"
    +	instancesName             = "instances"
    +	instanceDisksName         = "disks"
    +	instanceConfigsName       = "configs"
    +	instanceIPsName           = "ips"
    +	instanceSnapshotsName     = "snapshots"
    +	instanceVolumesName       = "instancevolumes"
    +	ipaddressesName           = "ipaddresses"
    +	ipv6poolsName             = "ipv6pools"
    +	ipv6rangesName            = "ipv6ranges"
    +	regionsName               = "regions"
    +	volumesName               = "volumes"
    +	kernelsName               = "kernels"
    +	typesName                 = "types"
    +	domainsName               = "domains"
    +	domainRecordsName         = "records"
    +	longviewName              = "longview"
    +	longviewclientsName       = "longviewclients"
    +	longviewsubscriptionsName = "longviewsubscriptions"
    +	nodebalancersName         = "nodebalancers"
    +	nodebalancerconfigsName   = "nodebalancerconfigs"
    +	nodebalancernodesName     = "nodebalancernodes"
    +	notificationsName         = "notifications"
    +	sshkeysName               = "sshkeys"
    +	ticketsName               = "tickets"
    +	tokensName                = "tokens"
    +	accountName               = "account"
    +	eventsName                = "events"
    +	invoicesName              = "invoices"
    +	invoiceItemsName          = "invoiceitems"
    +	profileName               = "profile"
    +	managedName               = "managed"
    +	tagsName                  = "tags"
    +	usersName                 = "users"
    +	// notificationsName = "notifications"
    +
    +	stackscriptsEndpoint          = "linode/stackscripts"
    +	imagesEndpoint                = "images"
    +	instancesEndpoint             = "linode/instances"
    +	instanceConfigsEndpoint       = "linode/instances/{{ .ID }}/configs"
    +	instanceDisksEndpoint         = "linode/instances/{{ .ID }}/disks"
    +	instanceSnapshotsEndpoint     = "linode/instances/{{ .ID }}/backups"
    +	instanceIPsEndpoint           = "linode/instances/{{ .ID }}/ips"
    +	instanceVolumesEndpoint       = "linode/instances/{{ .ID }}/volumes"
    +	ipaddressesEndpoint           = "networking/ips"
    +	ipv6poolsEndpoint             = "networking/ipv6/pools"
    +	ipv6rangesEndpoint            = "networking/ipv6/ranges"
    +	regionsEndpoint               = "regions"
    +	volumesEndpoint               = "volumes"
    +	kernelsEndpoint               = "linode/kernels"
    +	typesEndpoint                 = "linode/types"
    +	domainsEndpoint               = "domains"
    +	domainRecordsEndpoint         = "domains/{{ .ID }}/records"
    +	longviewEndpoint              = "longview"
    +	longviewclientsEndpoint       = "longview/clients"
    +	longviewsubscriptionsEndpoint = "longview/subscriptions"
    +	nodebalancersEndpoint         = "nodebalancers"
    +	// @TODO we can't use these nodebalancer endpoints unless we include these templated fields
    +	// The API seems inconsistent about including parent IDs in objects, (compare instance configs to nb configs)
    +	// Parent IDs would be immutable for updates and are ignored in create requests ..
    +	// Should we include these fields in CreateOpts and UpdateOpts?
    +	nodebalancerconfigsEndpoint = "nodebalancers/{{ .ID }}/configs"
    +	nodebalancernodesEndpoint   = "nodebalancers/{{ .ID }}/configs/{{ .SecondID }}/nodes"
    +	sshkeysEndpoint             = "profile/sshkeys"
    +	ticketsEndpoint             = "support/tickets"
    +	tokensEndpoint              = "profile/tokens"
    +	accountEndpoint             = "account"
    +	eventsEndpoint              = "account/events"
    +	invoicesEndpoint            = "account/invoices"
    +	invoiceItemsEndpoint        = "account/invoices/{{ .ID }}/items"
    +	profileEndpoint             = "profile"
    +	managedEndpoint             = "managed"
    +	tagsEndpoint                = "tags"
    +	usersEndpoint               = "account/users"
    +	notificationsEndpoint       = "account/notifications"
    +)
    +
    +// Resource represents a linode API resource
    +type Resource struct {
    +	name             string
    +	endpoint         string
    +	isTemplate       bool
    +	endpointTemplate *template.Template
    +	R                func(ctx context.Context) *resty.Request
    +	PR               func(ctx context.Context) *resty.Request
    +}
    +
    +// NewResource is the factory to create a new Resource struct. If it has a template string the useTemplate bool must be set.
    +func NewResource(client *Client, name string, endpoint string, useTemplate bool, singleType interface{}, pagedType interface{}) *Resource {
    +	var tmpl *template.Template
    +
    +	if useTemplate {
    +		tmpl = template.Must(template.New(name).Parse(endpoint))
    +	}
    +
    +	r := func(ctx context.Context) *resty.Request {
    +		return client.R(ctx).SetResult(singleType)
    +	}
    +
    +	pr := func(ctx context.Context) *resty.Request {
    +		return client.R(ctx).SetResult(pagedType)
    +	}
    +
    +	return &Resource{name, endpoint, useTemplate, tmpl, r, pr}
    +}
    +
    +func (r Resource) render(data ...interface{}) (string, error) {
    +	if data == nil {
    +		return "", NewError("Cannot template endpoint with  data")
    +	}
    +	out := ""
    +	buf := bytes.NewBufferString(out)
    +
    +	var substitutions interface{}
    +	if len(data) == 1 {
    +		substitutions = struct{ ID interface{} }{data[0]}
    +	} else if len(data) == 2 {
    +		substitutions = struct {
    +			ID       interface{}
    +			SecondID interface{}
    +		}{data[0], data[1]}
    +	} else {
    +		return "", NewError("Too many arguments to render template (expected 1 or 2)")
    +	}
    +	if err := r.endpointTemplate.Execute(buf, substitutions); err != nil {
    +		return "", NewError(err)
    +	}
    +	return buf.String(), nil
    +}
    +
    +// endpointWithID will return the rendered endpoint string for the resource with provided id
    +func (r Resource) endpointWithID(id ...int) (string, error) {
    +	if !r.isTemplate {
    +		return r.endpoint, nil
    +	}
    +	data := make([]interface{}, len(id))
    +	for i, v := range id {
    +		data[i] = v
    +	}
    +	return r.render(data...)
    +}
    +
    +// Endpoint will return the non-templated endpoint string for resource
    +func (r Resource) Endpoint() (string, error) {
    +	if r.isTemplate {
    +		return "", NewError(fmt.Sprintf("Tried to get endpoint for %s without providing data for template", r.name))
    +	}
    +	return r.endpoint, nil
    +}
    diff --git a/vendor/github.com/linode/linodego/stackscripts.go b/vendor/github.com/linode/linodego/stackscripts.go
    new file mode 100644
    index 000000000000..daf339b2ffc7
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/stackscripts.go
    @@ -0,0 +1,208 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"time"
    +)
    +
    +// Stackscript represents a Linode StackScript
    +type Stackscript struct {
    +	CreatedStr string `json:"created"`
    +	UpdatedStr string `json:"updated"`
    +
    +	ID                int               `json:"id"`
    +	Username          string            `json:"username"`
    +	Label             string            `json:"label"`
    +	Description       string            `json:"description"`
    +	Images            []string          `json:"images"`
    +	DeploymentsTotal  int               `json:"deployments_total"`
    +	DeploymentsActive int               `json:"deployments_active"`
    +	IsPublic          bool              `json:"is_public"`
    +	Created           *time.Time        `json:"-"`
    +	Updated           *time.Time        `json:"-"`
    +	RevNote           string            `json:"rev_note"`
    +	Script            string            `json:"script"`
    +	UserDefinedFields *[]StackscriptUDF `json:"user_defined_fields"`
    +	UserGravatarID    string            `json:"user_gravatar_id"`
    +}
    +
    +// StackscriptUDF define a single variable that is accepted by a Stackscript
    +type StackscriptUDF struct {
    +	// A human-readable label for the field that will serve as the input prompt for entering the value during deployment.
    +	Label string `json:"label"`
    +
    +	// The name of the field.
    +	Name string `json:"name"`
    +
    +	// An example value for the field.
    +	Example string `json:"example"`
    +
    +	// A list of acceptable single values for the field.
    +	OneOf string `json:"oneOf,omitempty"`
    +
    +	// A list of acceptable values for the field in any quantity, combination or order.
    +	ManyOf string `json:"manyOf,omitempty"`
    +
    +	// The default value. If not specified, this value will be used.
    +	Default string `json:"default,omitempty"`
    +}
    +
    +// StackscriptCreateOptions fields are those accepted by CreateStackscript
    +type StackscriptCreateOptions struct {
    +	Label       string   `json:"label"`
    +	Description string   `json:"description"`
    +	Images      []string `json:"images"`
    +	IsPublic    bool     `json:"is_public"`
    +	RevNote     string   `json:"rev_note"`
    +	Script      string   `json:"script"`
    +}
    +
    +// StackscriptUpdateOptions fields are those accepted by UpdateStackscript
    +type StackscriptUpdateOptions StackscriptCreateOptions
    +
    +// GetCreateOptions converts a Stackscript to StackscriptCreateOptions for use in CreateStackscript
    +func (i Stackscript) GetCreateOptions() StackscriptCreateOptions {
    +	return StackscriptCreateOptions{
    +		Label:       i.Label,
    +		Description: i.Description,
    +		Images:      i.Images,
    +		IsPublic:    i.IsPublic,
    +		RevNote:     i.RevNote,
    +		Script:      i.Script,
    +	}
    +}
    +
    +// GetUpdateOptions converts a Stackscript to StackscriptUpdateOptions for use in UpdateStackscript
    +func (i Stackscript) GetUpdateOptions() StackscriptUpdateOptions {
    +	return StackscriptUpdateOptions{
    +		Label:       i.Label,
    +		Description: i.Description,
    +		Images:      i.Images,
    +		IsPublic:    i.IsPublic,
    +		RevNote:     i.RevNote,
    +		Script:      i.Script,
    +	}
    +}
    +
    +// StackscriptsPagedResponse represents a paginated Stackscript API response
    +type StackscriptsPagedResponse struct {
    +	*PageOptions
    +	Data []Stackscript `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for Stackscript
    +func (StackscriptsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.StackScripts.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends Stackscripts when processing paginated Stackscript responses
    +func (resp *StackscriptsPagedResponse) appendData(r *StackscriptsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListStackscripts lists Stackscripts
    +func (c *Client) ListStackscripts(ctx context.Context, opts *ListOptions) ([]Stackscript, error) {
    +	response := StackscriptsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (i *Stackscript) fixDates() *Stackscript {
    +	i.Created, _ = parseDates(i.CreatedStr)
    +	i.Updated, _ = parseDates(i.UpdatedStr)
    +	return i
    +}
    +
    +// GetStackscript gets the Stackscript with the provided ID
    +func (c *Client) GetStackscript(ctx context.Context, id int) (*Stackscript, error) {
    +	e, err := c.StackScripts.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).
    +		SetResult(&Stackscript{}).
    +		Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Stackscript).fixDates(), nil
    +}
    +
    +// CreateStackscript creates a StackScript
    +func (c *Client) CreateStackscript(ctx context.Context, createOpts StackscriptCreateOptions) (*Stackscript, error) {
    +	var body string
    +	e, err := c.StackScripts.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&Stackscript{})
    +
    +	if bodyData, err := json.Marshal(createOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Stackscript).fixDates(), nil
    +}
    +
    +// UpdateStackscript updates the StackScript with the specified id
    +func (c *Client) UpdateStackscript(ctx context.Context, id int, updateOpts StackscriptUpdateOptions) (*Stackscript, error) {
    +	var body string
    +	e, err := c.StackScripts.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	req := c.R(ctx).SetResult(&Stackscript{})
    +
    +	if bodyData, err := json.Marshal(updateOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Stackscript).fixDates(), nil
    +}
    +
    +// DeleteStackscript deletes the StackScript with the specified id
    +func (c *Client) DeleteStackscript(ctx context.Context, id int) error {
    +	e, err := c.StackScripts.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/support.go b/vendor/github.com/linode/linodego/support.go
    new file mode 100644
    index 000000000000..6c72a4adf367
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/support.go
    @@ -0,0 +1,88 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +	"time"
    +)
    +
    +// Ticket represents a support ticket object
    +type Ticket struct {
    +	ID          int           `json:"id"`
    +	Attachments []string      `json:"attachments"`
    +	Closed      *time.Time    `json:"-"`
    +	Description string        `json:"description"`
    +	Entity      *TicketEntity `json:"entity"`
    +	GravatarID  string        `json:"gravatar_id"`
    +	Opened      *time.Time    `json:"-"`
    +	OpenedBy    string        `json:"opened_by"`
    +	Status      TicketStatus  `json:"status"`
    +	Summary     string        `json:"summary"`
    +	Updated     *time.Time    `json:"-"`
    +	UpdatedBy   string        `json:"updated_by"`
    +}
    +
    +// TicketEntity refers a ticket to a specific entity
    +type TicketEntity struct {
    +	ID    int    `json:"id"`
    +	Label string `json:"label"`
    +	Type  string `json:"type"`
    +	URL   string `json:"url"`
    +}
    +
    +// TicketStatus constants start with Ticket and include Linode API Ticket Status values
    +type TicketStatus string
    +
    +// TicketStatus constants reflect the current status of a Ticket
    +const (
    +	TicketNew    TicketStatus = "new"
    +	TicketClosed TicketStatus = "closed"
    +	TicketOpen   TicketStatus = "open"
    +)
    +
    +// TicketsPagedResponse represents a paginated ticket API response
    +type TicketsPagedResponse struct {
    +	*PageOptions
    +	Data []Ticket `json:"data"`
    +}
    +
    +func (TicketsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Tickets.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +func (resp *TicketsPagedResponse) appendData(r *TicketsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListTickets returns a collection of Support Tickets on the Account. Support Tickets
    +// can be both tickets opened with Linode for support, as well as tickets generated by
    +// Linode regarding the Account. This collection includes all Support Tickets generated
    +// on the Account, with open tickets returned first.
    +func (c *Client) ListTickets(ctx context.Context, opts *ListOptions) ([]Ticket, error) {
    +	response := TicketsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// GetTicket gets a Support Ticket on the Account with the specified ID
    +func (c *Client) GetTicket(ctx context.Context, id int) (*Ticket, error) {
    +	e, err := c.Tickets.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).
    +		SetResult(&Ticket{}).
    +		Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Ticket), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/tags.go b/vendor/github.com/linode/linodego/tags.go
    new file mode 100644
    index 000000000000..2cb44272b287
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/tags.go
    @@ -0,0 +1,219 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"errors"
    +	"fmt"
    +)
    +
    +// Tag represents a Tag object
    +type Tag struct {
    +	Label string `json:"label"`
    +}
    +
    +// TaggedObject represents a Tagged Object object
    +type TaggedObject struct {
    +	Type    string          `json:"type"`
    +	RawData json.RawMessage `json:"data"`
    +	Data    interface{}     `json:"-"`
    +}
    +
    +// SortedObjects currently only includes Instances
    +type SortedObjects struct {
    +	Instances     []Instance
    +	Domains       []Domain
    +	Volumes       []Volume
    +	NodeBalancers []NodeBalancer
    +	/*
    +		StackScripts  []Stackscript
    +	*/
    +}
    +
    +// TaggedObjectList are a list of TaggedObjects, as returning by ListTaggedObjects
    +type TaggedObjectList []TaggedObject
    +
    +// TagCreateOptions fields are those accepted by CreateTag
    +type TagCreateOptions struct {
    +	Label         string `json:"label"`
    +	Linodes       []int  `json:"linodes,omitempty"`
    +	Domains       []int  `json:"domains,omitempty"`
    +	Volumes       []int  `json:"volumes,omitempty"`
    +	NodeBalancers []int  `json:"nodebalancers,omitempty"`
    +}
    +
    +// GetCreateOptions converts a Tag to TagCreateOptions for use in CreateTag
    +func (i Tag) GetCreateOptions() (o TagCreateOptions) {
    +	o.Label = i.Label
    +	return
    +}
    +
    +// TaggedObjectsPagedResponse represents a paginated Tag API response
    +type TaggedObjectsPagedResponse struct {
    +	*PageOptions
    +	Data []TaggedObject `json:"data"`
    +}
    +
    +// TagsPagedResponse represents a paginated Tag API response
    +type TagsPagedResponse struct {
    +	*PageOptions
    +	Data []Tag `json:"data"`
    +}
    +
    +// endpoint gets the endpoint URL for Tag
    +func (TagsPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Tags.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// endpoint gets the endpoint URL for Tag
    +func (TaggedObjectsPagedResponse) endpointWithID(c *Client, id string) string {
    +	endpoint, err := c.Tags.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	endpoint = fmt.Sprintf("%s/%s", endpoint, id)
    +	return endpoint
    +}
    +
    +// appendData appends Tags when processing paginated Tag responses
    +func (resp *TagsPagedResponse) appendData(r *TagsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// appendData appends TaggedObjects when processing paginated TaggedObjects responses
    +func (resp *TaggedObjectsPagedResponse) appendData(r *TaggedObjectsPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListTags lists Tags
    +func (c *Client) ListTags(ctx context.Context, opts *ListOptions) ([]Tag, error) {
    +	response := TagsPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixData stores an object of the type defined by Type in Data using RawData
    +func (i *TaggedObject) fixData() (*TaggedObject, error) {
    +	switch i.Type {
    +	case "linode":
    +		obj := Instance{}
    +		if err := json.Unmarshal(i.RawData, &obj); err != nil {
    +			return nil, err
    +		}
    +		i.Data = obj
    +	case "nodebalancer":
    +		obj := NodeBalancer{}
    +		if err := json.Unmarshal(i.RawData, &obj); err != nil {
    +			return nil, err
    +		}
    +		i.Data = obj
    +	case "domain":
    +		obj := Domain{}
    +		if err := json.Unmarshal(i.RawData, &obj); err != nil {
    +			return nil, err
    +		}
    +		i.Data = obj
    +	case "volume":
    +		obj := Volume{}
    +		if err := json.Unmarshal(i.RawData, &obj); err != nil {
    +			return nil, err
    +		}
    +		i.Data = obj
    +	}
    +
    +	return i, nil
    +}
    +
    +// ListTaggedObjects lists Tagged Objects
    +func (c *Client) ListTaggedObjects(ctx context.Context, label string, opts *ListOptions) (TaggedObjectList, error) {
    +	response := TaggedObjectsPagedResponse{}
    +	err := c.listHelperWithID(ctx, &response, label, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	for i := range response.Data {
    +		if _, err := response.Data[i].fixData(); err != nil {
    +			return nil, err
    +		}
    +	}
    +	return response.Data, nil
    +}
    +
    +// SortedObjects converts a list of TaggedObjects into a Sorted Objects struct, for easier access
    +func (t TaggedObjectList) SortedObjects() (SortedObjects, error) {
    +	so := SortedObjects{}
    +	for _, o := range t {
    +		switch o.Type {
    +		case "linode":
    +			if instance, ok := o.Data.(Instance); ok {
    +				so.Instances = append(so.Instances, instance)
    +			} else {
    +				return so, errors.New("Expected an Instance when Type was \"linode\"")
    +			}
    +		case "domain":
    +			if domain, ok := o.Data.(Domain); ok {
    +				so.Domains = append(so.Domains, domain)
    +			} else {
    +				return so, errors.New("Expected a Domain when Type was \"domain\"")
    +			}
    +		case "volume":
    +			if volume, ok := o.Data.(Volume); ok {
    +				so.Volumes = append(so.Volumes, volume)
    +			} else {
    +				return so, errors.New("Expected an Volume when Type was \"volume\"")
    +			}
    +		case "nodebalancer":
    +			if nodebalancer, ok := o.Data.(NodeBalancer); ok {
    +				so.NodeBalancers = append(so.NodeBalancers, nodebalancer)
    +			} else {
    +				return so, errors.New("Expected an NodeBalancer when Type was \"nodebalancer\"")
    +			}
    +		}
    +	}
    +	return so, nil
    +}
    +
    +// CreateTag creates a Tag
    +func (c *Client) CreateTag(ctx context.Context, createOpts TagCreateOptions) (*Tag, error) {
    +	var body string
    +	e, err := c.Tags.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req := c.R(ctx).SetResult(&Tag{})
    +
    +	if bodyData, err := json.Marshal(createOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Tag), nil
    +}
    +
    +// DeleteTag deletes the Tag with the specified id
    +func (c *Client) DeleteTag(ctx context.Context, label string) error {
    +	e, err := c.Tags.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, label)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/types.go b/vendor/github.com/linode/linodego/types.go
    new file mode 100644
    index 000000000000..d1ffbafef952
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/types.go
    @@ -0,0 +1,90 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"fmt"
    +)
    +
    +// LinodeType represents a linode type object
    +type LinodeType struct {
    +	ID         string          `json:"id"`
    +	Disk       int             `json:"disk"`
    +	Class      LinodeTypeClass `json:"class"` // enum: nanode, standard, highmem, dedicated
    +	Price      *LinodePrice    `json:"price"`
    +	Label      string          `json:"label"`
    +	Addons     *LinodeAddons   `json:"addons"`
    +	NetworkOut int             `json:"network_out"`
    +	Memory     int             `json:"memory"`
    +	Transfer   int             `json:"transfer"`
    +	VCPUs      int             `json:"vcpus"`
    +}
    +
    +// LinodePrice represents a linode type price object
    +type LinodePrice struct {
    +	Hourly  float32 `json:"hourly"`
    +	Monthly float32 `json:"monthly"`
    +}
    +
    +// LinodeBackupsAddon represents a linode backups addon object
    +type LinodeBackupsAddon struct {
    +	Price *LinodePrice `json:"price"`
    +}
    +
    +// LinodeAddons represent the linode addons object
    +type LinodeAddons struct {
    +	Backups *LinodeBackupsAddon `json:"backups"`
    +}
    +
    +// LinodeTypeClass constants start with Class and include Linode API Instance Type Classes
    +type LinodeTypeClass string
    +
    +// LinodeTypeClass contants are the Instance Type Classes that an Instance Type can be assigned
    +const (
    +	ClassNanode    LinodeTypeClass = "nanode"
    +	ClassStandard  LinodeTypeClass = "standard"
    +	ClassHighmem   LinodeTypeClass = "highmem"
    +	ClassDedicated LinodeTypeClass = "dedicated"
    +)
    +
    +// LinodeTypesPagedResponse represents a linode types API response for listing
    +type LinodeTypesPagedResponse struct {
    +	*PageOptions
    +	Data []LinodeType `json:"data"`
    +}
    +
    +func (LinodeTypesPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Types.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +func (resp *LinodeTypesPagedResponse) appendData(r *LinodeTypesPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListTypes lists linode types
    +func (c *Client) ListTypes(ctx context.Context, opts *ListOptions) ([]LinodeType, error) {
    +	response := LinodeTypesPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// GetType gets the type with the provided ID
    +func (c *Client) GetType(ctx context.Context, typeID string) (*LinodeType, error) {
    +	e, err := c.Types.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%s", e, typeID)
    +
    +	r, err := coupleAPIErrors(c.Types.R(ctx).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*LinodeType), nil
    +}
    diff --git a/vendor/github.com/linode/linodego/util.go b/vendor/github.com/linode/linodego/util.go
    new file mode 100644
    index 000000000000..a37a304e4a73
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/util.go
    @@ -0,0 +1,15 @@
    +package linodego
    +
    +import "time"
    +
    +const (
    +	dateLayout = "2006-01-02T15:04:05"
    +)
    +
    +func parseDates(dateStr string) (*time.Time, error) {
    +	d, err := time.Parse(dateLayout, dateStr)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return &d, nil
    +}
    diff --git a/vendor/github.com/linode/linodego/volumes.go b/vendor/github.com/linode/linodego/volumes.go
    new file mode 100644
    index 000000000000..0a528b5452d4
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/volumes.go
    @@ -0,0 +1,299 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"time"
    +)
    +
    +// VolumeStatus indicates the status of the Volume
    +type VolumeStatus string
    +
    +const (
    +	// VolumeCreating indicates the Volume is being created and is not yet available for use
    +	VolumeCreating VolumeStatus = "creating"
    +
    +	// VolumeActive indicates the Volume is online and available for use
    +	VolumeActive VolumeStatus = "active"
    +
    +	// VolumeResizing indicates the Volume is in the process of upgrading its current capacity
    +	VolumeResizing VolumeStatus = "resizing"
    +
    +	// VolumeContactSupport indicates there is a problem with the Volume. A support ticket must be opened to resolve the issue
    +	VolumeContactSupport VolumeStatus = "contact_support"
    +)
    +
    +// Volume represents a linode volume object
    +type Volume struct {
    +	CreatedStr string `json:"created"`
    +	UpdatedStr string `json:"updated"`
    +
    +	ID             int          `json:"id"`
    +	Label          string       `json:"label"`
    +	Status         VolumeStatus `json:"status"`
    +	Region         string       `json:"region"`
    +	Size           int          `json:"size"`
    +	LinodeID       *int         `json:"linode_id"`
    +	FilesystemPath string       `json:"filesystem_path"`
    +	Tags           []string     `json:"tags"`
    +	Created        time.Time    `json:"-"`
    +	Updated        time.Time    `json:"-"`
    +}
    +
    +// VolumeCreateOptions fields are those accepted by CreateVolume
    +type VolumeCreateOptions struct {
    +	Label    string `json:"label,omitempty"`
    +	Region   string `json:"region,omitempty"`
    +	LinodeID int    `json:"linode_id,omitempty"`
    +	ConfigID int    `json:"config_id,omitempty"`
    +	// The Volume's size, in GiB. Minimum size is 10GiB, maximum size is 10240GiB. A "0" value will result in the default size.
    +	Size int `json:"size,omitempty"`
    +	// An array of tags applied to this object. Tags are for organizational purposes only.
    +	Tags []string `json:"tags"`
    +}
    +
    +// VolumeUpdateOptions fields are those accepted by UpdateVolume
    +type VolumeUpdateOptions struct {
    +	Label string    `json:"label,omitempty"`
    +	Tags  *[]string `json:"tags,omitempty"`
    +}
    +
    +// VolumeAttachOptions fields are those accepted by AttachVolume
    +type VolumeAttachOptions struct {
    +	LinodeID int `json:"linode_id"`
    +	ConfigID int `json:"config_id,omitempty"`
    +}
    +
    +// VolumesPagedResponse represents a linode API response for listing of volumes
    +type VolumesPagedResponse struct {
    +	*PageOptions
    +	Data []Volume `json:"data"`
    +}
    +
    +// GetUpdateOptions converts a Volume to VolumeUpdateOptions for use in UpdateVolume
    +func (v Volume) GetUpdateOptions() (updateOpts VolumeUpdateOptions) {
    +	updateOpts.Label = v.Label
    +	updateOpts.Tags = &v.Tags
    +	return
    +}
    +
    +// GetCreateOptions converts a Volume to VolumeCreateOptions for use in CreateVolume
    +func (v Volume) GetCreateOptions() (createOpts VolumeCreateOptions) {
    +	createOpts.Label = v.Label
    +	createOpts.Tags = v.Tags
    +	createOpts.Region = v.Region
    +	createOpts.Size = v.Size
    +	if v.LinodeID != nil && *v.LinodeID > 0 {
    +		createOpts.LinodeID = *v.LinodeID
    +	}
    +	return
    +}
    +
    +// endpoint gets the endpoint URL for Volume
    +func (VolumesPagedResponse) endpoint(c *Client) string {
    +	endpoint, err := c.Volumes.Endpoint()
    +	if err != nil {
    +		panic(err)
    +	}
    +	return endpoint
    +}
    +
    +// appendData appends Volumes when processing paginated Volume responses
    +func (resp *VolumesPagedResponse) appendData(r *VolumesPagedResponse) {
    +	resp.Data = append(resp.Data, r.Data...)
    +}
    +
    +// ListVolumes lists Volumes
    +func (c *Client) ListVolumes(ctx context.Context, opts *ListOptions) ([]Volume, error) {
    +	response := VolumesPagedResponse{}
    +	err := c.listHelper(ctx, &response, opts)
    +	for i := range response.Data {
    +		response.Data[i].fixDates()
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return response.Data, nil
    +}
    +
    +// fixDates converts JSON timestamps to Go time.Time values
    +func (v *Volume) fixDates() *Volume {
    +	if parsed, err := parseDates(v.CreatedStr); err != nil {
    +		v.Created = *parsed
    +	}
    +	if parsed, err := parseDates(v.UpdatedStr); err != nil {
    +		v.Updated = *parsed
    +	}
    +	return v
    +}
    +
    +// GetVolume gets the template with the provided ID
    +func (c *Client) GetVolume(ctx context.Context, id int) (*Volume, error) {
    +	e, err := c.Volumes.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +	r, err := coupleAPIErrors(c.R(ctx).SetResult(&Volume{}).Get(e))
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Volume).fixDates(), nil
    +}
    +
    +// AttachVolume attaches a volume to a Linode instance
    +func (c *Client) AttachVolume(ctx context.Context, id int, options *VolumeAttachOptions) (*Volume, error) {
    +	body := ""
    +	if bodyData, err := json.Marshal(options); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	e, err := c.Volumes.Endpoint()
    +	if err != nil {
    +		return nil, NewError(err)
    +	}
    +
    +	e = fmt.Sprintf("%s/%d/attach", e, id)
    +	resp, err := coupleAPIErrors(c.R(ctx).
    +		SetResult(&Volume{}).
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return resp.Result().(*Volume).fixDates(), nil
    +}
    +
    +// CreateVolume creates a Linode Volume
    +func (c *Client) CreateVolume(ctx context.Context, createOpts VolumeCreateOptions) (*Volume, error) {
    +	body := ""
    +	if bodyData, err := json.Marshal(createOpts); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	e, err := c.Volumes.Endpoint()
    +	if err != nil {
    +		return nil, NewError(err)
    +	}
    +
    +	resp, err := coupleAPIErrors(c.R(ctx).
    +		SetResult(&Volume{}).
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return resp.Result().(*Volume).fixDates(), nil
    +}
    +
    +// RenameVolume renames the label of a Linode volume
    +// DEPRECATED: use UpdateVolume
    +func (c *Client) RenameVolume(ctx context.Context, id int, label string) (*Volume, error) {
    +	updateOpts := VolumeUpdateOptions{Label: label}
    +	return c.UpdateVolume(ctx, id, updateOpts)
    +}
    +
    +// UpdateVolume updates the Volume with the specified id
    +func (c *Client) UpdateVolume(ctx context.Context, id int, volume VolumeUpdateOptions) (*Volume, error) {
    +	var body string
    +	e, err := c.Volumes.Endpoint()
    +	if err != nil {
    +		return nil, err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	req := c.R(ctx).SetResult(&Volume{})
    +
    +	if bodyData, err := json.Marshal(volume); err == nil {
    +		body = string(bodyData)
    +	} else {
    +		return nil, NewError(err)
    +	}
    +
    +	r, err := coupleAPIErrors(req.
    +		SetBody(body).
    +		Put(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +	return r.Result().(*Volume).fixDates(), nil
    +}
    +
    +// CloneVolume clones a Linode volume
    +func (c *Client) CloneVolume(ctx context.Context, id int, label string) (*Volume, error) {
    +	body := fmt.Sprintf("{\"label\":\"%s\"}", label)
    +
    +	e, err := c.Volumes.Endpoint()
    +	if err != nil {
    +		return nil, NewError(err)
    +	}
    +	e = fmt.Sprintf("%s/%d/clone", e, id)
    +
    +	resp, err := coupleAPIErrors(c.R(ctx).
    +		SetResult(&Volume{}).
    +		SetBody(body).
    +		Post(e))
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return resp.Result().(*Volume).fixDates(), nil
    +}
    +
    +// DetachVolume detaches a Linode volume
    +func (c *Client) DetachVolume(ctx context.Context, id int) error {
    +	body := ""
    +
    +	e, err := c.Volumes.Endpoint()
    +	if err != nil {
    +		return NewError(err)
    +	}
    +
    +	e = fmt.Sprintf("%s/%d/detach", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).
    +		SetBody(body).
    +		Post(e))
    +
    +	return err
    +}
    +
    +// ResizeVolume resizes an instance to new Linode type
    +func (c *Client) ResizeVolume(ctx context.Context, id int, size int) error {
    +	body := fmt.Sprintf("{\"size\": %d}", size)
    +
    +	e, err := c.Volumes.Endpoint()
    +	if err != nil {
    +		return NewError(err)
    +	}
    +	e = fmt.Sprintf("%s/%d/resize", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).
    +		SetBody(body).
    +		Post(e))
    +
    +	return err
    +}
    +
    +// DeleteVolume deletes the Volume with the specified id
    +func (c *Client) DeleteVolume(ctx context.Context, id int) error {
    +	e, err := c.Volumes.Endpoint()
    +	if err != nil {
    +		return err
    +	}
    +	e = fmt.Sprintf("%s/%d", e, id)
    +
    +	_, err = coupleAPIErrors(c.R(ctx).Delete(e))
    +	return err
    +}
    diff --git a/vendor/github.com/linode/linodego/waitfor.go b/vendor/github.com/linode/linodego/waitfor.go
    new file mode 100644
    index 000000000000..8f29a41505ba
    --- /dev/null
    +++ b/vendor/github.com/linode/linodego/waitfor.go
    @@ -0,0 +1,273 @@
    +package linodego
    +
    +import (
    +	"context"
    +	"encoding/json"
    +	"fmt"
    +	"log"
    +	"strconv"
    +	"strings"
    +	"time"
    +)
    +
    +// WaitForInstanceStatus waits for the Linode instance to reach the desired state
    +// before returning. It will timeout with an error after timeoutSeconds.
    +func (client Client) WaitForInstanceStatus(ctx context.Context, instanceID int, status InstanceStatus, timeoutSeconds int) (*Instance, error) {
    +	ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
    +	defer cancel()
    +
    +	ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
    +	defer ticker.Stop()
    +	for {
    +		select {
    +		case <-ticker.C:
    +			instance, err := client.GetInstance(ctx, instanceID)
    +			if err != nil {
    +				return instance, err
    +			}
    +			complete := (instance.Status == status)
    +
    +			if complete {
    +				return instance, nil
    +			}
    +		case <-ctx.Done():
    +			return nil, fmt.Errorf("Error waiting for Instance %d status %s: %s", instanceID, status, ctx.Err())
    +		}
    +	}
    +}
    +
    +// WaitForInstanceDiskStatus waits for the Linode instance disk to reach the desired state
    +// before returning. It will timeout with an error after timeoutSeconds.
    +func (client Client) WaitForInstanceDiskStatus(ctx context.Context, instanceID int, diskID int, status DiskStatus, timeoutSeconds int) (*InstanceDisk, error) {
    +	ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
    +	defer cancel()
    +
    +	ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
    +	defer ticker.Stop()
    +	for {
    +		select {
    +		case <-ticker.C:
    +			// GetInstanceDisk will 404 on newly created disks. use List instead.
    +			// disk, err := client.GetInstanceDisk(ctx, instanceID, diskID)
    +			disks, err := client.ListInstanceDisks(ctx, instanceID, nil)
    +			if err != nil {
    +				return nil, err
    +			}
    +			for _, disk := range disks {
    +				if disk.ID == diskID {
    +					complete := (disk.Status == status)
    +					if complete {
    +						return &disk, nil
    +					}
    +					break
    +				}
    +			}
    +
    +		case <-ctx.Done():
    +			return nil, fmt.Errorf("Error waiting for Instance %d Disk %d status %s: %s", instanceID, diskID, status, ctx.Err())
    +		}
    +	}
    +}
    +
    +// WaitForVolumeStatus waits for the Volume to reach the desired state
    +// before returning. It will timeout with an error after timeoutSeconds.
    +func (client Client) WaitForVolumeStatus(ctx context.Context, volumeID int, status VolumeStatus, timeoutSeconds int) (*Volume, error) {
    +	ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
    +	defer cancel()
    +
    +	ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
    +	defer ticker.Stop()
    +	for {
    +		select {
    +		case <-ticker.C:
    +			volume, err := client.GetVolume(ctx, volumeID)
    +			if err != nil {
    +				return volume, err
    +			}
    +			complete := (volume.Status == status)
    +
    +			if complete {
    +				return volume, nil
    +			}
    +		case <-ctx.Done():
    +			return nil, fmt.Errorf("Error waiting for Volume %d status %s: %s", volumeID, status, ctx.Err())
    +		}
    +	}
    +}
    +
    +// WaitForSnapshotStatus waits for the Snapshot to reach the desired state
    +// before returning. It will timeout with an error after timeoutSeconds.
    +func (client Client) WaitForSnapshotStatus(ctx context.Context, instanceID int, snapshotID int, status InstanceSnapshotStatus, timeoutSeconds int) (*InstanceSnapshot, error) {
    +	ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
    +	defer cancel()
    +
    +	ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
    +	defer ticker.Stop()
    +	for {
    +		select {
    +		case <-ticker.C:
    +			snapshot, err := client.GetInstanceSnapshot(ctx, instanceID, snapshotID)
    +			if err != nil {
    +				return snapshot, err
    +			}
    +			complete := (snapshot.Status == status)
    +
    +			if complete {
    +				return snapshot, nil
    +			}
    +		case <-ctx.Done():
    +			return nil, fmt.Errorf("Error waiting for Instance %d Snapshot %d status %s: %s", instanceID, snapshotID, status, ctx.Err())
    +		}
    +	}
    +}
    +
    +// WaitForVolumeLinodeID waits for the Volume to match the desired LinodeID
    +// before returning. An active Instance will not immediately attach or detach a volume, so the
    +// the LinodeID must be polled to determine volume readiness from the API.
    +// WaitForVolumeLinodeID will timeout with an error after timeoutSeconds.
    +func (client Client) WaitForVolumeLinodeID(ctx context.Context, volumeID int, linodeID *int, timeoutSeconds int) (*Volume, error) {
    +	ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
    +	defer cancel()
    +
    +	ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
    +	defer ticker.Stop()
    +	for {
    +		select {
    +		case <-ticker.C:
    +			volume, err := client.GetVolume(ctx, volumeID)
    +			if err != nil {
    +				return volume, err
    +			}
    +
    +			if linodeID == nil && volume.LinodeID == nil {
    +				return volume, nil
    +			} else if linodeID == nil || volume.LinodeID == nil {
    +				// continue waiting
    +			} else if *volume.LinodeID == *linodeID {
    +				return volume, nil
    +			}
    +
    +		case <-ctx.Done():
    +			return nil, fmt.Errorf("Error waiting for Volume %d to have Instance %v: %s", volumeID, linodeID, ctx.Err())
    +		}
    +	}
    +}
    +
    +// WaitForEventFinished waits for an entity action to reach the 'finished' state
    +// before returning. It will timeout with an error after timeoutSeconds.
    +// If the event indicates a failure both the failed event and the error will be returned.
    +func (client Client) WaitForEventFinished(ctx context.Context, id interface{}, entityType EntityType, action EventAction, minStart time.Time, timeoutSeconds int) (*Event, error) {
    +	titledEntityType := strings.Title(string(entityType))
    +	filter, _ := json.Marshal(map[string]interface{}{
    +		// Entity is not filtered by the API
    +		// Perhaps one day they will permit Entity ID/Type filtering.
    +		// We'll have to verify these values manually, for now.
    +		//"entity": map[string]interface{}{
    +		//	"id":   fmt.Sprintf("%v", id),
    +		//	"type": entityType,
    +		//},
    +
    +		// Nor is action
    +		//"action": action,
    +
    +		// Created is not correctly filtered by the API
    +		// We'll have to verify these values manually, for now.
    +		//"created": map[string]interface{}{
    +		//	"+gte": minStart.Format(time.RFC3339),
    +		//},
    +
    +		// With potentially 1000+ events coming back, we should filter on something
    +		"seen": false,
    +
    +		// Float the latest events to page 1
    +		"+order_by": "created",
    +		"+order":    "desc",
    +	})
    +
    +	// Optimistically restrict results to page 1.  We should remove this when more
    +	// precise filtering options exist.
    +	listOptions := NewListOptions(1, string(filter))
    +
    +	ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
    +	defer cancel()
    +
    +	if deadline, ok := ctx.Deadline(); ok {
    +		duration := time.Until(deadline)
    +		log.Printf("[INFO] Waiting %d seconds for %s events since %v for %s %v", int(duration.Seconds()), action, minStart, titledEntityType, id)
    +	}
    +
    +	ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
    +	defer ticker.Stop()
    +	for {
    +		select {
    +		case <-ticker.C:
    +
    +			events, err := client.ListEvents(ctx, listOptions)
    +			if err != nil {
    +				return nil, err
    +			}
    +
    +			// If there are events for this instance + action, inspect them
    +			for _, event := range events {
    +				if event.Action != action {
    +					// log.Println("action mismatch", event.Action, action)
    +					continue
    +				}
    +				if event.Entity == nil || event.Entity.Type != entityType {
    +					// log.Println("type mismatch", event.Entity.Type, entityType)
    +					continue
    +				}
    +
    +				var entID string
    +
    +				switch event.Entity.ID.(type) {
    +				case float64, float32:
    +					entID = fmt.Sprintf("%.f", event.Entity.ID)
    +				case int:
    +					entID = strconv.Itoa(event.Entity.ID.(int))
    +				default:
    +					entID = fmt.Sprintf("%v", event.Entity.ID)
    +				}
    +
    +				var findID string
    +				switch id.(type) {
    +				case float64, float32:
    +					findID = fmt.Sprintf("%.f", id)
    +				case int:
    +					findID = strconv.Itoa(id.(int))
    +				default:
    +					findID = fmt.Sprintf("%v", id)
    +				}
    +
    +				if entID != findID {
    +					// log.Println("id mismatch", entID, findID)
    +					continue
    +				}
    +
    +				// @TODO(displague) This event.Created check shouldn't be needed, but it appears
    +				// that the ListEvents method is not populating it correctly
    +				if event.Created == nil {
    +					log.Printf("[WARN] event.Created is nil when API returned: %#+v", event.CreatedStr)
    +				} else if *event.Created != minStart && !event.Created.After(minStart) {
    +					// Not the event we were looking for
    +					// log.Println(event.Created, "is not >=", minStart)
    +					continue
    +
    +				}
    +
    +				if event.Status == EventFailed {
    +					return &event, fmt.Errorf("%s %v action %s failed", titledEntityType, id, action)
    +				} else if event.Status == EventScheduled {
    +					log.Printf("[INFO] %s %v action %s is scheduled", titledEntityType, id, action)
    +				} else if event.Status == EventFinished {
    +					log.Printf("[INFO] %s %v action %s is finished", titledEntityType, id, action)
    +					return &event, nil
    +				}
    +				// TODO(displague) can we bump the ticker to TimeRemaining/2 (>=1) when non-nil?
    +				log.Printf("[INFO] %s %v action %s is %s", titledEntityType, id, action, event.Status)
    +			}
    +		case <-ctx.Done():
    +			return nil, fmt.Errorf("Error waiting for Event Status '%s' of %s %v action '%s': %s", EventFinished, titledEntityType, id, action, ctx.Err())
    +		}
    +	}
    +}
    diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/miekg/dns/.codecov.yml
    new file mode 100644
    index 000000000000..f91e5c1fe57c
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/.codecov.yml
    @@ -0,0 +1,8 @@
    +coverage:
    +  status:
    +    project:
    +      default:
    +        target: 40%
    +        threshold: null
    +    patch: false
    +    changes: false
    diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore
    new file mode 100644
    index 000000000000..776cd950c25c
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/.gitignore
    @@ -0,0 +1,4 @@
    +*.6
    +tags
    +test.out
    +a.out
    diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml
    new file mode 100644
    index 000000000000..013b5ae44d41
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/.travis.yml
    @@ -0,0 +1,19 @@
    +language: go
    +sudo: false
    +
    +go:
    +  - 1.10.x
    +  - 1.11.x
    +  - 1.12.x
    +  - tip
    +
    +before_install:
    +  # don't use the miekg/dns when testing forks
    +  - mkdir -p $GOPATH/src/github.com/miekg
    +  - ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true
    +
    +script:
    +  - go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
    +
    +after_success:
    +  - bash <(curl -s https://codecov.io/bash)
    diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS
    new file mode 100644
    index 000000000000..1965683525ad
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/AUTHORS
    @@ -0,0 +1 @@
    +Miek Gieben 
    diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS
    new file mode 100644
    index 000000000000..5903779d81fa
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/CONTRIBUTORS
    @@ -0,0 +1,10 @@
    +Alex A. Skinner
    +Andrew Tunnell-Jones
    +Ask Bjørn Hansen
    +Dave Cheney
    +Dusty Wilson
    +Marek Majkowski
    +Peter van Dijk
    +Omri Bahumi
    +Alex Sergeyev
    +James Hartig
    diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT
    new file mode 100644
    index 000000000000..35702b10e87e
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/COPYRIGHT
    @@ -0,0 +1,9 @@
    +Copyright 2009 The Go Authors. All rights reserved. Use of this source code
    +is governed by a BSD-style license that can be found in the LICENSE file.
    +Extensions of the original work are copyright (c) 2011 Miek Gieben
    +
    +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
    +governed by a BSD-style license that can be found in the LICENSE file.
    +
    +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
    +governed by a BSD-style license that can be found in the LICENSE file.
    diff --git a/vendor/github.com/miekg/dns/Gopkg.lock b/vendor/github.com/miekg/dns/Gopkg.lock
    new file mode 100644
    index 000000000000..66be346a4bd1
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/Gopkg.lock
    @@ -0,0 +1,33 @@
    +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
    +
    +
    +[[projects]]
    +  branch = "master"
    +  name = "golang.org/x/crypto"
    +  packages = ["ed25519","ed25519/internal/edwards25519"]
    +  revision = "20be4c3c3ed52bfccdb2d59a412ee1a936d175a7"
    +
    +[[projects]]
    +  branch = "master"
    +  name = "golang.org/x/net"
    +  packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"]
    +  revision = "60506f45cf65977eb3a9c6e30f995f54a721c271"
    +
    +[[projects]]
    +  branch = "master"
    +  name = "golang.org/x/sync"
    +  packages = ["errgroup"]
    +  revision = "112230192c580c3556b8cee6403af37a4fc5f28c"
    +
    +[[projects]]
    +  branch = "master"
    +  name = "golang.org/x/sys"
    +  packages = ["unix","windows"]
    +  revision = "4c4f7f33c9ed00de01c4c741d2177abfcfe19307"
    +
    +[solve-meta]
    +  analyzer-name = "dep"
    +  analyzer-version = 1
    +  inputs-digest = "fc70ece982d3660454fe1b9ccd5c91162a1cc080b58ad82aa065ad4a2ec93ce9"
    +  solver-name = "gps-cdcl"
    +  solver-version = 1
    diff --git a/vendor/github.com/miekg/dns/Gopkg.toml b/vendor/github.com/miekg/dns/Gopkg.toml
    new file mode 100644
    index 000000000000..85e6ff31b222
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/Gopkg.toml
    @@ -0,0 +1,38 @@
    +
    +# Gopkg.toml example
    +#
    +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
    +# for detailed Gopkg.toml documentation.
    +#
    +# required = ["github.com/user/thing/cmd/thing"]
    +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
    +#
    +# [[constraint]]
    +#   name = "github.com/user/project"
    +#   version = "1.0.0"
    +#
    +# [[constraint]]
    +#   name = "github.com/user/project2"
    +#   branch = "dev"
    +#   source = "github.com/myfork/project2"
    +#
    +# [[override]]
    +#  name = "github.com/x/y"
    +#  version = "2.4.0"
    +
    +
    +[[constraint]]
    +  branch = "master"
    +  name = "golang.org/x/crypto"
    +
    +[[constraint]]
    +  branch = "master"
    +  name = "golang.org/x/net"
    +
    +[[constraint]]
    +  branch = "master"
    +  name = "golang.org/x/sys"
    +
    +[[constraint]]
    +  branch = "master"
    +  name = "golang.org/x/sync"
    diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE
    new file mode 100644
    index 000000000000..5763fa7fe5d9
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/LICENSE
    @@ -0,0 +1,32 @@
    +Extensions of the original work are copyright (c) 2011 Miek Gieben
    +
    +As this is fork of the official Go code the same license applies:
    +
    +Copyright (c) 2009 The Go Authors. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    +
    diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/miekg/dns/Makefile.fuzz
    new file mode 100644
    index 000000000000..dc158c4acee1
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/Makefile.fuzz
    @@ -0,0 +1,33 @@
    +# Makefile for fuzzing
    +#
    +# Use go-fuzz and needs the tools installed.
    +# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/
    +#
    +# Installing go-fuzz:
    +# $ make -f Makefile.fuzz get
    +# Installs:
    +# * github.com/dvyukov/go-fuzz/go-fuzz
    +# * get github.com/dvyukov/go-fuzz/go-fuzz-build
    +
    +all: build
    +
    +.PHONY: build
    +build:
    +	go-fuzz-build -tags fuzz github.com/miekg/dns
    +
    +.PHONY: build-newrr
    +build-newrr:
    +	go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns
    +
    +.PHONY: fuzz
    +fuzz:
    +	go-fuzz -bin=dns-fuzz.zip -workdir=fuzz
    +
    +.PHONY: get
    +get:
    +	go get github.com/dvyukov/go-fuzz/go-fuzz
    +	go get github.com/dvyukov/go-fuzz/go-fuzz-build
    +
    +.PHONY: clean
    +clean:
    +	rm *-fuzz.zip
    diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release
    new file mode 100644
    index 000000000000..8fb748e8aaef
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/Makefile.release
    @@ -0,0 +1,52 @@
    +# Makefile for releasing.
    +#
    +# The release is controlled from version.go. The version found there is
    +# used to tag the git repo, we're not building any artifects so there is nothing
    +# to upload to github.
    +#
    +# * Up the version in version.go
    +# * Run: make -f Makefile.release release
    +#   * will *commit* your change with 'Release $VERSION'
    +#   * push to github
    +#
    +
    +define GO
    +//+build ignore
    +
    +package main
    +
    +import (
    +	"fmt"
    +
    +	"github.com/miekg/dns"
    +)
    +
    +func main() {
    +	fmt.Println(dns.Version.String())
    +}
    +endef
    +
    +$(file > version_release.go,$(GO))
    +VERSION:=$(shell go run version_release.go)
    +TAG="v$(VERSION)"
    +
    +all:
    +	@echo Use the \'release\' target to start a release $(VERSION)
    +	rm -f version_release.go
    +
    +.PHONY: release
    +release: commit push
    +	@echo Released $(VERSION)
    +	rm -f version_release.go
    +
    +.PHONY: commit
    +commit:
    +	@echo Committing release $(VERSION)
    +	git commit -am"Release $(VERSION)"
    +	git tag $(TAG)
    +
    +.PHONY: push
    +push:
    +	@echo Pushing release $(VERSION) to master
    +	git push --tags
    +	git push
    diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
    new file mode 100644
    index 000000000000..de4afed69ab0
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/README.md
    @@ -0,0 +1,174 @@
    +[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
    +[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master)
    +[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns)
    +[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns)
    +
    +# Alternative (more granular) approach to a DNS library
    +
    +> Less is more.
    +
    +Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types.
    +It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there
    +isn't a convenience function for it. Server side and client side programming is supported, i.e. you
    +can build servers and resolvers with it.
    +
    +We try to keep the "master" branch as sane as possible and at the bleeding edge of standards,
    +avoiding breaking changes wherever reasonable. We support the last two versions of Go.
    +
    +# Goals
    +
    +* KISS;
    +* Fast;
    +* Small API. If it's easy to code in Go, don't make a function for it.
    +
    +# Users
    +
    +A not-so-up-to-date-list-that-may-be-actually-current:
    +
    +* https://github.com/coredns/coredns
    +* https://cloudflare.com
    +* https://github.com/abh/geodns
    +* http://www.statdns.com/
    +* http://www.dnsinspect.com/
    +* https://github.com/chuangbo/jianbing-dictionary-dns
    +* http://www.dns-lg.com/
    +* https://github.com/fcambus/rrda
    +* https://github.com/kenshinx/godns
    +* https://github.com/skynetservices/skydns
    +* https://github.com/hashicorp/consul
    +* https://github.com/DevelopersPL/godnsagent
    +* https://github.com/duedil-ltd/discodns
    +* https://github.com/StalkR/dns-reverse-proxy
    +* https://github.com/tianon/rawdns
    +* https://mesosphere.github.io/mesos-dns/
    +* https://pulse.turbobytes.com/
    +* https://github.com/fcambus/statzone
    +* https://github.com/benschw/dns-clb-go
    +* https://github.com/corny/dnscheck for 
    +* https://namesmith.io
    +* https://github.com/miekg/unbound
    +* https://github.com/miekg/exdns
    +* https://dnslookup.org
    +* https://github.com/looterz/grimd
    +* https://github.com/phamhongviet/serf-dns
    +* https://github.com/mehrdadrad/mylg
    +* https://github.com/bamarni/dockness
    +* https://github.com/fffaraz/microdns
    +* http://kelda.io
    +* https://github.com/ipdcode/hades 
    +* https://github.com/StackExchange/dnscontrol/
    +* https://www.dnsperf.com/
    +* https://dnssectest.net/
    +* https://dns.apebits.com
    +* https://github.com/oif/apex
    +* https://github.com/jedisct1/dnscrypt-proxy
    +* https://github.com/jedisct1/rpdns
    +* https://github.com/xor-gate/sshfp
    +* https://github.com/rs/dnstrace
    +* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
    +* https://github.com/semihalev/sdns
    +* https://render.com
    +* https://github.com/peterzen/goresolver
    +* https://github.com/folbricht/routedns
    +
    +Send pull request if you want to be listed here.
    +
    +# Features
    +
    +* UDP/TCP queries, IPv4 and IPv6
    +* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported
    +* Fast
    +* Server side programming (mimicking the net/http package)
    +* Client side programming
    +* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519
    +* EDNS0, NSID, Cookies
    +* AXFR/IXFR
    +* TSIG, SIG(0)
    +* DNS over TLS (DoT): encrypted connection between client and server over TCP
    +* DNS name compression
    +
    +Have fun!
    +
    +Miek Gieben  -  2010-2012  -  
    +DNS Authors 2012-
    +
    +# Building
    +
    +Building is done with the `go` tool. If you have setup your GOPATH correctly, the following should
    +work:
    +
    +    go get github.com/miekg/dns
    +    go build github.com/miekg/dns
    +
    +## Examples
    +
    +A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc
    +github.com/miekg/dns`).
    +
    +Example programs can be found in the `github.com/miekg/exdns` repository.
    +
    +## Supported RFCs
    +
    +*all of them*
    +
    +* 103{4,5} - DNS standard
    +* 1348 - NSAP record (removed the record)
    +* 1982 - Serial Arithmetic
    +* 1876 - LOC record
    +* 1995 - IXFR
    +* 1996 - DNS notify
    +* 2136 - DNS Update (dynamic updates)
    +* 2181 - RRset definition - there is no RRset type though, just []RR
    +* 2537 - RSAMD5 DNS keys
    +* 2065 - DNSSEC (updated in later RFCs)
    +* 2671 - EDNS record
    +* 2782 - SRV record
    +* 2845 - TSIG record
    +* 2915 - NAPTR record
    +* 2929 - DNS IANA Considerations
    +* 3110 - RSASHA1 DNS keys
    +* 3225 - DO bit (DNSSEC OK)
    +* 340{1,2,3} - NAPTR record
    +* 3445 - Limiting the scope of (DNS)KEY
    +* 3597 - Unknown RRs
    +* 403{3,4,5} - DNSSEC + validation functions
    +* 4255 - SSHFP record
    +* 4343 - Case insensitivity
    +* 4408 - SPF record
    +* 4509 - SHA256 Hash in DS
    +* 4592 - Wildcards in the DNS
    +* 4635 - HMAC SHA TSIG
    +* 4701 - DHCID
    +* 4892 - id.server
    +* 5001 - NSID
    +* 5155 - NSEC3 record
    +* 5205 - HIP record
    +* 5702 - SHA2 in the DNS
    +* 5936 - AXFR
    +* 5966 - TCP implementation recommendations
    +* 6605 - ECDSA
    +* 6725 - IANA Registry Update
    +* 6742 - ILNP DNS
    +* 6840 - Clarifications and Implementation Notes for DNS Security
    +* 6844 - CAA record
    +* 6891 - EDNS0 update
    +* 6895 - DNS IANA considerations
    +* 6944 - DNSSEC DNSKEY Algorithm Status
    +* 6975 - Algorithm Understanding in DNSSEC
    +* 7043 - EUI48/EUI64 records
    +* 7314 - DNS (EDNS) EXPIRE Option
    +* 7477 - CSYNC RR
    +* 7828 - edns-tcp-keepalive EDNS0 Option
    +* 7553 - URI record
    +* 7858 - DNS over TLS: Initiation and Performance Considerations
    +* 7871 - EDNS0 Client Subnet
    +* 7873 - Domain Name System (DNS) Cookies
    +* 8080 - EdDSA for DNSSEC
    +* 8499 - DNS Terminology
    +
    +## Loosely Based Upon
    +
    +* ldns - 
    +* NSD - 
    +* Net::DNS - 
    +* GRONG - 
    diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go
    new file mode 100644
    index 000000000000..c6920f792cdf
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/acceptfunc.go
    @@ -0,0 +1,54 @@
    +package dns
    +
    +// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError.
    +// It returns a MsgAcceptAction to indicate what should happen with the message.
    +type MsgAcceptFunc func(dh Header) MsgAcceptAction
    +
    +// DefaultMsgAcceptFunc checks the request and will reject if:
    +//
    +// * isn't a request (don't respond in that case).
    +// * opcode isn't OpcodeQuery or OpcodeNotify
    +// * Zero bit isn't zero
    +// * has more than 1 question in the question section
    +// * has more than 1 RR in the Answer section
    +// * has more than 0 RRs in the Authority section
    +// * has more than 2 RRs in the Additional section
    +var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
    +
    +// MsgAcceptAction represents the action to be taken.
    +type MsgAcceptAction int
    +
    +const (
    +	MsgAccept         MsgAcceptAction = iota // Accept the message
    +	MsgReject                                // Reject the message with a RcodeFormatError
    +	MsgIgnore                                // Ignore the error and send nothing back.
    +	MsgRejectNotImplemented                        // Reject the message with a RcodeNotImplemented
    +)
    +
    +func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
    +	if isResponse := dh.Bits&_QR != 0; isResponse {
    +		return MsgIgnore
    +	}
    +
    +	// Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs.
    +	opcode := int(dh.Bits>>11) & 0xF
    +	if opcode != OpcodeQuery && opcode != OpcodeNotify {
    +		return MsgRejectNotImplemented
    +	}
    +
    +	if dh.Qdcount != 1 {
    +		return MsgReject
    +	}
    +	// NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11.
    +	if dh.Ancount > 1 {
    +		return MsgReject
    +	}
    +	// IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3.
    +	if dh.Nscount > 1 {
    +		return MsgReject
    +	}
    +	if dh.Arcount > 2 {
    +		return MsgReject
    +	}
    +	return MsgAccept
    +}
    diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go
    new file mode 100644
    index 000000000000..db2761d45bc1
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/client.go
    @@ -0,0 +1,415 @@
    +package dns
    +
    +// A client implementation.
    +
    +import (
    +	"context"
    +	"crypto/tls"
    +	"encoding/binary"
    +	"fmt"
    +	"io"
    +	"net"
    +	"strings"
    +	"time"
    +)
    +
    +const (
    +	dnsTimeout     time.Duration = 2 * time.Second
    +	tcpIdleTimeout time.Duration = 8 * time.Second
    +)
    +
    +// A Conn represents a connection to a DNS server.
    +type Conn struct {
    +	net.Conn                         // a net.Conn holding the connection
    +	UDPSize        uint16            // minimum receive buffer for UDP messages
    +	TsigSecret     map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
    +	tsigRequestMAC string
    +}
    +
    +// A Client defines parameters for a DNS client.
    +type Client struct {
    +	Net       string      // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
    +	UDPSize   uint16      // minimum receive buffer for UDP messages
    +	TLSConfig *tls.Config // TLS connection configuration
    +	Dialer    *net.Dialer // a net.Dialer used to set local address, timeouts and more
    +	// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
    +	// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
    +	// Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
    +	Timeout        time.Duration
    +	DialTimeout    time.Duration     // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
    +	ReadTimeout    time.Duration     // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
    +	WriteTimeout   time.Duration     // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
    +	TsigSecret     map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
    +	SingleInflight bool              // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
    +	group          singleflight
    +}
    +
    +// Exchange performs a synchronous UDP query. It sends the message m to the address
    +// contained in a and waits for a reply. Exchange does not retry a failed query, nor
    +// will it fall back to TCP in case of truncation.
    +// See client.Exchange for more information on setting larger buffer sizes.
    +func Exchange(m *Msg, a string) (r *Msg, err error) {
    +	client := Client{Net: "udp"}
    +	r, _, err = client.Exchange(m, a)
    +	return r, err
    +}
    +
    +func (c *Client) dialTimeout() time.Duration {
    +	if c.Timeout != 0 {
    +		return c.Timeout
    +	}
    +	if c.DialTimeout != 0 {
    +		return c.DialTimeout
    +	}
    +	return dnsTimeout
    +}
    +
    +func (c *Client) readTimeout() time.Duration {
    +	if c.ReadTimeout != 0 {
    +		return c.ReadTimeout
    +	}
    +	return dnsTimeout
    +}
    +
    +func (c *Client) writeTimeout() time.Duration {
    +	if c.WriteTimeout != 0 {
    +		return c.WriteTimeout
    +	}
    +	return dnsTimeout
    +}
    +
    +// Dial connects to the address on the named network.
    +func (c *Client) Dial(address string) (conn *Conn, err error) {
    +	// create a new dialer with the appropriate timeout
    +	var d net.Dialer
    +	if c.Dialer == nil {
    +		d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())}
    +	} else {
    +		d = *c.Dialer
    +	}
    +
    +	network := c.Net
    +	if network == "" {
    +		network = "udp"
    +	}
    +
    +	useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls")
    +
    +	conn = new(Conn)
    +	if useTLS {
    +		network = strings.TrimSuffix(network, "-tls")
    +
    +		conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
    +	} else {
    +		conn.Conn, err = d.Dial(network, address)
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return conn, nil
    +}
    +
    +// Exchange performs a synchronous query. It sends the message m to the address
    +// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
    +//
    +//	c := new(dns.Client)
    +//	in, rtt, err := c.Exchange(message, "127.0.0.1:53")
    +//
    +// Exchange does not retry a failed query, nor will it fall back to TCP in
    +// case of truncation.
    +// It is up to the caller to create a message that allows for larger responses to be
    +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
    +// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
    +// of 512 bytes
    +// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
    +// attribute appropriately
    +func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
    +	if !c.SingleInflight {
    +		return c.exchange(m, address)
    +	}
    +
    +	q := m.Question[0]
    +	key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
    +	r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
    +		return c.exchange(m, address)
    +	})
    +	if r != nil && shared {
    +		r = r.Copy()
    +	}
    +
    +	return r, rtt, err
    +}
    +
    +func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
    +	var co *Conn
    +
    +	co, err = c.Dial(a)
    +
    +	if err != nil {
    +		return nil, 0, err
    +	}
    +	defer co.Close()
    +
    +	opt := m.IsEdns0()
    +	// If EDNS0 is used use that for size.
    +	if opt != nil && opt.UDPSize() >= MinMsgSize {
    +		co.UDPSize = opt.UDPSize()
    +	}
    +	// Otherwise use the client's configured UDP size.
    +	if opt == nil && c.UDPSize >= MinMsgSize {
    +		co.UDPSize = c.UDPSize
    +	}
    +
    +	co.TsigSecret = c.TsigSecret
    +	t := time.Now()
    +	// write with the appropriate write timeout
    +	co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
    +	if err = co.WriteMsg(m); err != nil {
    +		return nil, 0, err
    +	}
    +
    +	co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
    +	r, err = co.ReadMsg()
    +	if err == nil && r.Id != m.Id {
    +		err = ErrId
    +	}
    +	rtt = time.Since(t)
    +	return r, rtt, err
    +}
    +
    +// ReadMsg reads a message from the connection co.
    +// If the received message contains a TSIG record the transaction signature
    +// is verified. This method always tries to return the message, however if an
    +// error is returned there are no guarantees that the returned message is a
    +// valid representation of the packet read.
    +func (co *Conn) ReadMsg() (*Msg, error) {
    +	p, err := co.ReadMsgHeader(nil)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	m := new(Msg)
    +	if err := m.Unpack(p); err != nil {
    +		// If an error was returned, we still want to allow the user to use
    +		// the message, but naively they can just check err if they don't want
    +		// to use an erroneous message
    +		return m, err
    +	}
    +	if t := m.IsTsig(); t != nil {
    +		if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
    +			return m, ErrSecret
    +		}
    +		// Need to work on the original message p, as that was used to calculate the tsig.
    +		err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
    +	}
    +	return m, err
    +}
    +
    +// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
    +// Returns message as a byte slice to be parsed with Msg.Unpack later on.
    +// Note that error handling on the message body is not possible as only the header is parsed.
    +func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
    +	var (
    +		p   []byte
    +		n   int
    +		err error
    +	)
    +
    +	if _, ok := co.Conn.(net.PacketConn); ok {
    +		if co.UDPSize > MinMsgSize {
    +			p = make([]byte, co.UDPSize)
    +		} else {
    +			p = make([]byte, MinMsgSize)
    +		}
    +		n, err = co.Read(p)
    +	} else {
    +		var length uint16
    +		if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
    +			return nil, err
    +		}
    +
    +		p = make([]byte, length)
    +		n, err = io.ReadFull(co.Conn, p)
    +	}
    +
    +	if err != nil {
    +		return nil, err
    +	} else if n < headerSize {
    +		return nil, ErrShortRead
    +	}
    +
    +	p = p[:n]
    +	if hdr != nil {
    +		dh, _, err := unpackMsgHdr(p, 0)
    +		if err != nil {
    +			return nil, err
    +		}
    +		*hdr = dh
    +	}
    +	return p, err
    +}
    +
    +// Read implements the net.Conn read method.
    +func (co *Conn) Read(p []byte) (n int, err error) {
    +	if co.Conn == nil {
    +		return 0, ErrConnEmpty
    +	}
    +
    +	if _, ok := co.Conn.(net.PacketConn); ok {
    +		// UDP connection
    +		return co.Conn.Read(p)
    +	}
    +
    +	var length uint16
    +	if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
    +		return 0, err
    +	}
    +	if int(length) > len(p) {
    +		return 0, io.ErrShortBuffer
    +	}
    +
    +	return io.ReadFull(co.Conn, p[:length])
    +}
    +
    +// WriteMsg sends a message through the connection co.
    +// If the message m contains a TSIG record the transaction
    +// signature is calculated.
    +func (co *Conn) WriteMsg(m *Msg) (err error) {
    +	var out []byte
    +	if t := m.IsTsig(); t != nil {
    +		mac := ""
    +		if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
    +			return ErrSecret
    +		}
    +		out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
    +		// Set for the next read, although only used in zone transfers
    +		co.tsigRequestMAC = mac
    +	} else {
    +		out, err = m.Pack()
    +	}
    +	if err != nil {
    +		return err
    +	}
    +	_, err = co.Write(out)
    +	return err
    +}
    +
    +// Write implements the net.Conn Write method.
    +func (co *Conn) Write(p []byte) (int, error) {
    +	if len(p) > MaxMsgSize {
    +		return 0, &Error{err: "message too large"}
    +	}
    +
    +	if _, ok := co.Conn.(net.PacketConn); ok {
    +		return co.Conn.Write(p)
    +	}
    +
    +	l := make([]byte, 2)
    +	binary.BigEndian.PutUint16(l, uint16(len(p)))
    +
    +	n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
    +	return int(n), err
    +}
    +
    +// Return the appropriate timeout for a specific request
    +func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
    +	var requestTimeout time.Duration
    +	if c.Timeout != 0 {
    +		requestTimeout = c.Timeout
    +	} else {
    +		requestTimeout = timeout
    +	}
    +	// net.Dialer.Timeout has priority if smaller than the timeouts computed so
    +	// far
    +	if c.Dialer != nil && c.Dialer.Timeout != 0 {
    +		if c.Dialer.Timeout < requestTimeout {
    +			requestTimeout = c.Dialer.Timeout
    +		}
    +	}
    +	return requestTimeout
    +}
    +
    +// Dial connects to the address on the named network.
    +func Dial(network, address string) (conn *Conn, err error) {
    +	conn = new(Conn)
    +	conn.Conn, err = net.Dial(network, address)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return conn, nil
    +}
    +
    +// ExchangeContext performs a synchronous UDP query, like Exchange. It
    +// additionally obeys deadlines from the passed Context.
    +func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
    +	client := Client{Net: "udp"}
    +	r, _, err = client.ExchangeContext(ctx, m, a)
    +	// ignorint rtt to leave the original ExchangeContext API unchanged, but
    +	// this function will go away
    +	return r, err
    +}
    +
    +// ExchangeConn performs a synchronous query. It sends the message m via the connection
    +// c and waits for a reply. The connection c is not closed by ExchangeConn.
    +// Deprecated: This function is going away, but can easily be mimicked:
    +//
    +//	co := &dns.Conn{Conn: c} // c is your net.Conn
    +//	co.WriteMsg(m)
    +//	in, _  := co.ReadMsg()
    +//	co.Close()
    +//
    +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
    +	println("dns: ExchangeConn: this function is deprecated")
    +	co := new(Conn)
    +	co.Conn = c
    +	if err = co.WriteMsg(m); err != nil {
    +		return nil, err
    +	}
    +	r, err = co.ReadMsg()
    +	if err == nil && r.Id != m.Id {
    +		err = ErrId
    +	}
    +	return r, err
    +}
    +
    +// DialTimeout acts like Dial but takes a timeout.
    +func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
    +	client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
    +	return client.Dial(address)
    +}
    +
    +// DialWithTLS connects to the address on the named network with TLS.
    +func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
    +	if !strings.HasSuffix(network, "-tls") {
    +		network += "-tls"
    +	}
    +	client := Client{Net: network, TLSConfig: tlsConfig}
    +	return client.Dial(address)
    +}
    +
    +// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
    +func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
    +	if !strings.HasSuffix(network, "-tls") {
    +		network += "-tls"
    +	}
    +	client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
    +	return client.Dial(address)
    +}
    +
    +// ExchangeContext acts like Exchange, but honors the deadline on the provided
    +// context, if present. If there is both a context deadline and a configured
    +// timeout on the client, the earliest of the two takes effect.
    +func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
    +	var timeout time.Duration
    +	if deadline, ok := ctx.Deadline(); !ok {
    +		timeout = 0
    +	} else {
    +		timeout = time.Until(deadline)
    +	}
    +	// not passing the context to the underlying calls, as the API does not support
    +	// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
    +	// TODO(tmthrgd,miekg): this is a race condition.
    +	c.Dialer = &net.Dialer{Timeout: timeout}
    +	return c.Exchange(m, a)
    +}
    diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go
    new file mode 100644
    index 000000000000..e11b630df9f0
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/clientconfig.go
    @@ -0,0 +1,135 @@
    +package dns
    +
    +import (
    +	"bufio"
    +	"io"
    +	"os"
    +	"strconv"
    +	"strings"
    +)
    +
    +// ClientConfig wraps the contents of the /etc/resolv.conf file.
    +type ClientConfig struct {
    +	Servers  []string // servers to use
    +	Search   []string // suffixes to append to local name
    +	Port     string   // what port to use
    +	Ndots    int      // number of dots in name to trigger absolute lookup
    +	Timeout  int      // seconds before giving up on packet
    +	Attempts int      // lost packets before giving up on server, not used in the package dns
    +}
    +
    +// ClientConfigFromFile parses a resolv.conf(5) like file and returns
    +// a *ClientConfig.
    +func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
    +	file, err := os.Open(resolvconf)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer file.Close()
    +	return ClientConfigFromReader(file)
    +}
    +
    +// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument
    +func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
    +	c := new(ClientConfig)
    +	scanner := bufio.NewScanner(resolvconf)
    +	c.Servers = make([]string, 0)
    +	c.Search = make([]string, 0)
    +	c.Port = "53"
    +	c.Ndots = 1
    +	c.Timeout = 5
    +	c.Attempts = 2
    +
    +	for scanner.Scan() {
    +		if err := scanner.Err(); err != nil {
    +			return nil, err
    +		}
    +		line := scanner.Text()
    +		f := strings.Fields(line)
    +		if len(f) < 1 {
    +			continue
    +		}
    +		switch f[0] {
    +		case "nameserver": // add one name server
    +			if len(f) > 1 {
    +				// One more check: make sure server name is
    +				// just an IP address.  Otherwise we need DNS
    +				// to look it up.
    +				name := f[1]
    +				c.Servers = append(c.Servers, name)
    +			}
    +
    +		case "domain": // set search path to just this domain
    +			if len(f) > 1 {
    +				c.Search = make([]string, 1)
    +				c.Search[0] = f[1]
    +			} else {
    +				c.Search = make([]string, 0)
    +			}
    +
    +		case "search": // set search path to given servers
    +			c.Search = append([]string(nil), f[1:]...)
    +
    +		case "options": // magic options
    +			for _, s := range f[1:] {
    +				switch {
    +				case len(s) >= 6 && s[:6] == "ndots:":
    +					n, _ := strconv.Atoi(s[6:])
    +					if n < 0 {
    +						n = 0
    +					} else if n > 15 {
    +						n = 15
    +					}
    +					c.Ndots = n
    +				case len(s) >= 8 && s[:8] == "timeout:":
    +					n, _ := strconv.Atoi(s[8:])
    +					if n < 1 {
    +						n = 1
    +					}
    +					c.Timeout = n
    +				case len(s) >= 9 && s[:9] == "attempts:":
    +					n, _ := strconv.Atoi(s[9:])
    +					if n < 1 {
    +						n = 1
    +					}
    +					c.Attempts = n
    +				case s == "rotate":
    +					/* not imp */
    +				}
    +			}
    +		}
    +	}
    +	return c, nil
    +}
    +
    +// NameList returns all of the names that should be queried based on the
    +// config. It is based off of go's net/dns name building, but it does not
    +// check the length of the resulting names.
    +func (c *ClientConfig) NameList(name string) []string {
    +	// if this domain is already fully qualified, no append needed.
    +	if IsFqdn(name) {
    +		return []string{name}
    +	}
    +
    +	// Check to see if the name has more labels than Ndots. Do this before making
    +	// the domain fully qualified.
    +	hasNdots := CountLabel(name) > c.Ndots
    +	// Make the domain fully qualified.
    +	name = Fqdn(name)
    +
    +	// Make a list of names based off search.
    +	names := []string{}
    +
    +	// If name has enough dots, try that first.
    +	if hasNdots {
    +		names = append(names, name)
    +	}
    +	for _, s := range c.Search {
    +		names = append(names, Fqdn(name+s))
    +	}
    +	// If we didn't have enough dots, try after suffixes.
    +	if !hasNdots {
    +		names = append(names, name)
    +	}
    +	return names
    +}
    diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go
    new file mode 100644
    index 000000000000..8c4a14ef1906
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/dane.go
    @@ -0,0 +1,43 @@
    +package dns
    +
    +import (
    +	"crypto/sha256"
    +	"crypto/sha512"
    +	"crypto/x509"
    +	"encoding/hex"
    +	"errors"
    +)
    +
    +// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
    +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
    +	switch matchingType {
    +	case 0:
    +		switch selector {
    +		case 0:
    +			return hex.EncodeToString(cert.Raw), nil
    +		case 1:
    +			return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
    +		}
    +	case 1:
    +		h := sha256.New()
    +		switch selector {
    +		case 0:
    +			h.Write(cert.Raw)
    +			return hex.EncodeToString(h.Sum(nil)), nil
    +		case 1:
    +			h.Write(cert.RawSubjectPublicKeyInfo)
    +			return hex.EncodeToString(h.Sum(nil)), nil
    +		}
    +	case 2:
    +		h := sha512.New()
    +		switch selector {
    +		case 0:
    +			h.Write(cert.Raw)
    +			return hex.EncodeToString(h.Sum(nil)), nil
    +		case 1:
    +			h.Write(cert.RawSubjectPublicKeyInfo)
    +			return hex.EncodeToString(h.Sum(nil)), nil
    +		}
    +	}
    +	return "", errors.New("dns: bad MatchingType or Selector")
    +}
    diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go
    new file mode 100644
    index 000000000000..b059f6fc677a
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/defaults.go
    @@ -0,0 +1,378 @@
    +package dns
    +
    +import (
    +	"errors"
    +	"net"
    +	"strconv"
    +	"strings"
    +)
    +
    +const hexDigit = "0123456789abcdef"
    +
    +// Everything is assumed in ClassINET.
    +
    +// SetReply creates a reply message from a request message.
    +func (dns *Msg) SetReply(request *Msg) *Msg {
    +	dns.Id = request.Id
    +	dns.Response = true
    +	dns.Opcode = request.Opcode
    +	if dns.Opcode == OpcodeQuery {
    +		dns.RecursionDesired = request.RecursionDesired // Copy rd bit
    +		dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit
    +	}
    +	dns.Rcode = RcodeSuccess
    +	if len(request.Question) > 0 {
    +		dns.Question = make([]Question, 1)
    +		dns.Question[0] = request.Question[0]
    +	}
    +	return dns
    +}
    +
    +// SetQuestion creates a question message, it sets the Question
    +// section, generates an Id and sets the RecursionDesired (RD)
    +// bit to true.
    +func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
    +	dns.Id = Id()
    +	dns.RecursionDesired = true
    +	dns.Question = make([]Question, 1)
    +	dns.Question[0] = Question{z, t, ClassINET}
    +	return dns
    +}
    +
    +// SetNotify creates a notify message, it sets the Question
    +// section, generates an Id and sets the Authoritative (AA)
    +// bit to true.
    +func (dns *Msg) SetNotify(z string) *Msg {
    +	dns.Opcode = OpcodeNotify
    +	dns.Authoritative = true
    +	dns.Id = Id()
    +	dns.Question = make([]Question, 1)
    +	dns.Question[0] = Question{z, TypeSOA, ClassINET}
    +	return dns
    +}
    +
    +// SetRcode creates an error message suitable for the request.
    +func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
    +	dns.SetReply(request)
    +	dns.Rcode = rcode
    +	return dns
    +}
    +
    +// SetRcodeFormatError creates a message with FormError set.
    +func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
    +	dns.Rcode = RcodeFormatError
    +	dns.Opcode = OpcodeQuery
    +	dns.Response = true
    +	dns.Authoritative = false
    +	dns.Id = request.Id
    +	return dns
    +}
    +
    +// SetUpdate makes the message a dynamic update message. It
    +// sets the ZONE section to: z, TypeSOA, ClassINET.
    +func (dns *Msg) SetUpdate(z string) *Msg {
    +	dns.Id = Id()
    +	dns.Response = false
    +	dns.Opcode = OpcodeUpdate
    +	dns.Compress = false // BIND9 cannot handle compression
    +	dns.Question = make([]Question, 1)
    +	dns.Question[0] = Question{z, TypeSOA, ClassINET}
    +	return dns
    +}
    +
    +// SetIxfr creates message for requesting an IXFR.
    +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
    +	dns.Id = Id()
    +	dns.Question = make([]Question, 1)
    +	dns.Ns = make([]RR, 1)
    +	s := new(SOA)
    +	s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
    +	s.Serial = serial
    +	s.Ns = ns
    +	s.Mbox = mbox
    +	dns.Question[0] = Question{z, TypeIXFR, ClassINET}
    +	dns.Ns[0] = s
    +	return dns
    +}
    +
    +// SetAxfr creates message for requesting an AXFR.
    +func (dns *Msg) SetAxfr(z string) *Msg {
    +	dns.Id = Id()
    +	dns.Question = make([]Question, 1)
    +	dns.Question[0] = Question{z, TypeAXFR, ClassINET}
    +	return dns
    +}
    +
    +// SetTsig appends a TSIG RR to the message.
    +// This is only a skeleton TSIG RR that is added as the last RR in the
    +// additional section. The Tsig is calculated when the message is being send.
    +func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
    +	t := new(TSIG)
    +	t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
    +	t.Algorithm = algo
    +	t.Fudge = fudge
    +	t.TimeSigned = uint64(timesigned)
    +	t.OrigId = dns.Id
    +	dns.Extra = append(dns.Extra, t)
    +	return dns
    +}
    +
    +// SetEdns0 appends a EDNS0 OPT RR to the message.
    +// TSIG should always the last RR in a message.
    +func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
    +	e := new(OPT)
    +	e.Hdr.Name = "."
    +	e.Hdr.Rrtype = TypeOPT
    +	e.SetUDPSize(udpsize)
    +	if do {
    +		e.SetDo()
    +	}
    +	dns.Extra = append(dns.Extra, e)
    +	return dns
    +}
    +
    +// IsTsig checks if the message has a TSIG record as the last record
    +// in the additional section. It returns the TSIG record found or nil.
    +func (dns *Msg) IsTsig() *TSIG {
    +	if len(dns.Extra) > 0 {
    +		if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
    +			return dns.Extra[len(dns.Extra)-1].(*TSIG)
    +		}
    +	}
    +	return nil
    +}
    +
    +// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
    +// record in the additional section will do. It returns the OPT record
    +// found or nil.
    +func (dns *Msg) IsEdns0() *OPT {
    +	// RFC 6891, Section 6.1.1 allows the OPT record to appear
    +	// anywhere in the additional record section, but it's usually at
    +	// the end so start there.
    +	for i := len(dns.Extra) - 1; i >= 0; i-- {
    +		if dns.Extra[i].Header().Rrtype == TypeOPT {
    +			return dns.Extra[i].(*OPT)
    +		}
    +	}
    +	return nil
    +}
    +
    +// popEdns0 is like IsEdns0, but it removes the record from the message.
    +func (dns *Msg) popEdns0() *OPT {
    +	// RFC 6891, Section 6.1.1 allows the OPT record to appear
    +	// anywhere in the additional record section, but it's usually at
    +	// the end so start there.
    +	for i := len(dns.Extra) - 1; i >= 0; i-- {
    +		if dns.Extra[i].Header().Rrtype == TypeOPT {
    +			opt := dns.Extra[i].(*OPT)
    +			dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...)
    +			return opt
    +		}
    +	}
    +	return nil
    +}
    +
    +// IsDomainName checks if s is a valid domain name, it returns the number of
    +// labels and true, when a domain name is valid.  Note that non fully qualified
    +// domain name is considered valid, in this case the last label is counted in
    +// the number of labels.  When false is returned the number of labels is not
    +// defined.  Also note that this function is extremely liberal; almost any
    +// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
    +// label fits in 63 characters and that the entire name will fit into the 255
    +// octet wire format limit.
    +func IsDomainName(s string) (labels int, ok bool) {
    +	// XXX: The logic in this function was copied from packDomainName and
    +	// should be kept in sync with that function.
    +
    +	const lenmsg = 256
    +
    +	if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata.
    +		return 0, false
    +	}
    +
    +	s = Fqdn(s)
    +
    +	// Each dot ends a segment of the name. Except for escaped dots (\.), which
    +	// are normal dots.
    +
    +	var (
    +		off    int
    +		begin  int
    +		wasDot bool
    +	)
    +	for i := 0; i < len(s); i++ {
    +		switch s[i] {
    +		case '\\':
    +			if off+1 > lenmsg {
    +				return labels, false
    +			}
    +
    +			// check for \DDD
    +			if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
    +				i += 3
    +				begin += 3
    +			} else {
    +				i++
    +				begin++
    +			}
    +
    +			wasDot = false
    +		case '.':
    +			if wasDot {
    +				// two dots back to back is not legal
    +				return labels, false
    +			}
    +			wasDot = true
    +
    +			labelLen := i - begin
    +			if labelLen >= 1<<6 { // top two bits of length must be clear
    +				return labels, false
    +			}
    +
    +			// off can already (we're in a loop) be bigger than lenmsg
    +			// this happens when a name isn't fully qualified
    +			off += 1 + labelLen
    +			if off > lenmsg {
    +				return labels, false
    +			}
    +
    +			labels++
    +			begin = i + 1
    +		default:
    +			wasDot = false
    +		}
    +	}
    +
    +	return labels, true
    +}
    +
    +// IsSubDomain checks if child is indeed a child of the parent. If child and parent
    +// are the same domain true is returned as well.
    +func IsSubDomain(parent, child string) bool {
    +	// Entire child is contained in parent
    +	return CompareDomainName(parent, child) == CountLabel(parent)
    +}
    +
    +// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
    +// The checking is performed on the binary payload.
    +func IsMsg(buf []byte) error {
    +	// Header
    +	if len(buf) < headerSize {
    +		return errors.New("dns: bad message header")
    +	}
    +	// Header: Opcode
    +	// TODO(miek): more checks here, e.g. check all header bits.
    +	return nil
    +}
    +
    +// IsFqdn checks if a domain name is fully qualified.
    +func IsFqdn(s string) bool {
    +	s2 := strings.TrimSuffix(s, ".")
    +	if s == s2 {
    +		return false
    +	}
    +
    +	i := strings.LastIndexFunc(s2, func(r rune) bool {
    +		return r != '\\'
    +	})
    +
    +	// Test whether we have an even number of escape sequences before
    +	// the dot or none.
    +	return (len(s2)-i)%2 != 0
    +}
    +
    +// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
    +// This means the RRs need to have the same type, name, and class. Returns true
    +// if the RR set is valid, otherwise false.
    +func IsRRset(rrset []RR) bool {
    +	if len(rrset) == 0 {
    +		return false
    +	}
    +	if len(rrset) == 1 {
    +		return true
    +	}
    +	rrHeader := rrset[0].Header()
    +	rrType := rrHeader.Rrtype
    +	rrClass := rrHeader.Class
    +	rrName := rrHeader.Name
    +
    +	for _, rr := range rrset[1:] {
    +		curRRHeader := rr.Header()
    +		if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
    +			// Mismatch between the records, so this is not a valid rrset for
    +			//signing/verifying
    +			return false
    +		}
    +	}
    +
    +	return true
    +}
    +
    +// Fqdn return the fully qualified domain name from s.
    +// If s is already fully qualified, it behaves as the identity function.
    +func Fqdn(s string) string {
    +	if IsFqdn(s) {
    +		return s
    +	}
    +	return s + "."
    +}
    +
    +// Copied from the official Go code.
    +
    +// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
    +// address suitable for reverse DNS (PTR) record lookups or an error if it fails
    +// to parse the IP address.
    +func ReverseAddr(addr string) (arpa string, err error) {
    +	ip := net.ParseIP(addr)
    +	if ip == nil {
    +		return "", &Error{err: "unrecognized address: " + addr}
    +	}
    +	if v4 := ip.To4(); v4 != nil {
    +		buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa."))
    +		// Add it, in reverse, to the buffer
    +		for i := len(v4) - 1; i >= 0; i-- {
    +			buf = strconv.AppendInt(buf, int64(v4[i]), 10)
    +			buf = append(buf, '.')
    +		}
    +		// Append "in-addr.arpa." and return (buf already has the final .)
    +		buf = append(buf, "in-addr.arpa."...)
    +		return string(buf), nil
    +	}
    +	// Must be IPv6
    +	buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa."))
    +	// Add it, in reverse, to the buffer
    +	for i := len(ip) - 1; i >= 0; i-- {
    +		v := ip[i]
    +		buf = append(buf, hexDigit[v&0xF])
    +		buf = append(buf, '.')
    +		buf = append(buf, hexDigit[v>>4])
    +		buf = append(buf, '.')
    +	}
    +	// Append "ip6.arpa." and return (buf already has the final .)
    +	buf = append(buf, "ip6.arpa."...)
    +	return string(buf), nil
    +}
    +
    +// String returns the string representation for the type t.
    +func (t Type) String() string {
    +	if t1, ok := TypeToString[uint16(t)]; ok {
    +		return t1
    +	}
    +	return "TYPE" + strconv.Itoa(int(t))
    +}
    +
    +// String returns the string representation for the class c.
    +func (c Class) String() string {
    +	if s, ok := ClassToString[uint16(c)]; ok {
    +		// Only emit mnemonics when they are unambiguous, specically ANY is in both.
    +		if _, ok := StringToType[s]; !ok {
    +			return s
    +		}
    +	}
    +	return "CLASS" + strconv.Itoa(int(c))
    +}
    +
    +// String returns the string representation for the name n.
    +func (n Name) String() string {
    +	return sprintName(string(n))
    +}
    diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go
    new file mode 100644
    index 000000000000..ad83a27ecfab
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/dns.go
    @@ -0,0 +1,134 @@
    +package dns
    +
    +import "strconv"
    +
    +const (
    +	year68     = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
    +	defaultTtl = 3600    // Default internal TTL.
    +
    +	// DefaultMsgSize is the standard default for messages larger than 512 bytes.
    +	DefaultMsgSize = 4096
    +	// MinMsgSize is the minimal size of a DNS packet.
    +	MinMsgSize = 512
    +	// MaxMsgSize is the largest possible DNS packet.
    +	MaxMsgSize = 65535
    +)
    +
    +// Error represents a DNS error.
    +type Error struct{ err string }
    +
    +func (e *Error) Error() string {
    +	if e == nil {
    +		return "dns: "
    +	}
    +	return "dns: " + e.err
    +}
    +
    +// An RR represents a resource record.
    +type RR interface {
    +	// Header returns the header of an resource record. The header contains
    +	// everything up to the rdata.
    +	Header() *RR_Header
    +	// String returns the text representation of the resource record.
    +	String() string
    +
    +	// copy returns a copy of the RR
    +	copy() RR
    +
    +	// len returns the length (in octets) of the compressed or uncompressed RR in wire format.
    +	//
    +	// If compression is nil, the uncompressed size will be returned, otherwise the compressed
    +	// size will be returned and domain names will be added to the map for future compression.
    +	len(off int, compression map[string]struct{}) int
    +
    +	// pack packs the records RDATA into wire format. The header will
    +	// already have been packed into msg.
    +	pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error)
    +
    +	// unpack unpacks an RR from wire format.
    +	//
    +	// This will only be called on a new and empty RR type with only the header populated. It
    +	// will only be called if the record's RDATA is non-empty.
    +	unpack(msg []byte, off int) (off1 int, err error)
    +
    +	// parse parses an RR from zone file format.
    +	//
    +	// This will only be called on a new and empty RR type with only the header populated.
    +	parse(c *zlexer, origin string) *ParseError
    +
    +	// isDuplicate returns whether the two RRs are duplicates.
    +	isDuplicate(r2 RR) bool
    +}
    +
    +// RR_Header is the header all DNS resource records share.
    +type RR_Header struct {
    +	Name     string `dns:"cdomain-name"`
    +	Rrtype   uint16
    +	Class    uint16
    +	Ttl      uint32
    +	Rdlength uint16 // Length of data after header.
    +}
    +
    +// Header returns itself. This is here to make RR_Header implements the RR interface.
    +func (h *RR_Header) Header() *RR_Header { return h }
    +
    +// Just to implement the RR interface.
    +func (h *RR_Header) copy() RR { return nil }
    +
    +func (h *RR_Header) String() string {
    +	var s string
    +
    +	if h.Rrtype == TypeOPT {
    +		s = ";"
    +		// and maybe other things
    +	}
    +
    +	s += sprintName(h.Name) + "\t"
    +	s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
    +	s += Class(h.Class).String() + "\t"
    +	s += Type(h.Rrtype).String() + "\t"
    +	return s
    +}
    +
    +func (h *RR_Header) len(off int, compression map[string]struct{}) int {
    +	l := domainNameLen(h.Name, off, compression, true)
    +	l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
    +	return l
    +}
    +
    +func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	// RR_Header has no RDATA to pack.
    +	return off, nil
    +}
    +
    +func (h *RR_Header) unpack(msg []byte, off int) (int, error) {
    +	panic("dns: internal error: unpack should never be called on RR_Header")
    +}
    +
    +func (h *RR_Header) parse(c *zlexer, origin string) *ParseError {
    +	panic("dns: internal error: parse should never be called on RR_Header")
    +}
    +
    +// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
    +func (rr *RFC3597) ToRFC3597(r RR) error {
    +	buf := make([]byte, Len(r)*2)
    +	headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
    +	if err != nil {
    +		return err
    +	}
    +	buf = buf[:off]
    +
    +	*rr = RFC3597{Hdr: *r.Header()}
    +	rr.Hdr.Rdlength = uint16(off - headerEnd)
    +
    +	if noRdata(rr.Hdr) {
    +		return nil
    +	}
    +
    +	_, err = rr.unpack(buf, headerEnd)
    +	if err != nil {
    +		return err
    +	}
    +
    +	return nil
    +}
    diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go
    new file mode 100644
    index 000000000000..12a693f97cf9
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/dnssec.go
    @@ -0,0 +1,794 @@
    +package dns
    +
    +import (
    +	"bytes"
    +	"crypto"
    +	"crypto/dsa"
    +	"crypto/ecdsa"
    +	"crypto/elliptic"
    +	_ "crypto/md5"
    +	"crypto/rand"
    +	"crypto/rsa"
    +	_ "crypto/sha1"
    +	_ "crypto/sha256"
    +	_ "crypto/sha512"
    +	"encoding/asn1"
    +	"encoding/binary"
    +	"encoding/hex"
    +	"math/big"
    +	"sort"
    +	"strings"
    +	"time"
    +
    +	"golang.org/x/crypto/ed25519"
    +)
    +
    +// DNSSEC encryption algorithm codes.
    +const (
    +	_ uint8 = iota
    +	RSAMD5
    +	DH
    +	DSA
    +	_ // Skip 4, RFC 6725, section 2.1
    +	RSASHA1
    +	DSANSEC3SHA1
    +	RSASHA1NSEC3SHA1
    +	RSASHA256
    +	_ // Skip 9, RFC 6725, section 2.1
    +	RSASHA512
    +	_ // Skip 11, RFC 6725, section 2.1
    +	ECCGOST
    +	ECDSAP256SHA256
    +	ECDSAP384SHA384
    +	ED25519
    +	ED448
    +	INDIRECT   uint8 = 252
    +	PRIVATEDNS uint8 = 253 // Private (experimental keys)
    +	PRIVATEOID uint8 = 254
    +)
    +
    +// AlgorithmToString is a map of algorithm IDs to algorithm names.
    +var AlgorithmToString = map[uint8]string{
    +	RSAMD5:           "RSAMD5",
    +	DH:               "DH",
    +	DSA:              "DSA",
    +	RSASHA1:          "RSASHA1",
    +	DSANSEC3SHA1:     "DSA-NSEC3-SHA1",
    +	RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
    +	RSASHA256:        "RSASHA256",
    +	RSASHA512:        "RSASHA512",
    +	ECCGOST:          "ECC-GOST",
    +	ECDSAP256SHA256:  "ECDSAP256SHA256",
    +	ECDSAP384SHA384:  "ECDSAP384SHA384",
    +	ED25519:          "ED25519",
    +	ED448:            "ED448",
    +	INDIRECT:         "INDIRECT",
    +	PRIVATEDNS:       "PRIVATEDNS",
    +	PRIVATEOID:       "PRIVATEOID",
    +}
    +
    +// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
    +var AlgorithmToHash = map[uint8]crypto.Hash{
    +	RSAMD5:           crypto.MD5, // Deprecated in RFC 6725
    +	DSA:              crypto.SHA1,
    +	RSASHA1:          crypto.SHA1,
    +	RSASHA1NSEC3SHA1: crypto.SHA1,
    +	RSASHA256:        crypto.SHA256,
    +	ECDSAP256SHA256:  crypto.SHA256,
    +	ECDSAP384SHA384:  crypto.SHA384,
    +	RSASHA512:        crypto.SHA512,
    +	ED25519:          crypto.Hash(0),
    +}
    +
    +// DNSSEC hashing algorithm codes.
    +const (
    +	_      uint8 = iota
    +	SHA1         // RFC 4034
    +	SHA256       // RFC 4509
    +	GOST94       // RFC 5933
    +	SHA384       // Experimental
    +	SHA512       // Experimental
    +)
    +
    +// HashToString is a map of hash IDs to names.
    +var HashToString = map[uint8]string{
    +	SHA1:   "SHA1",
    +	SHA256: "SHA256",
    +	GOST94: "GOST94",
    +	SHA384: "SHA384",
    +	SHA512: "SHA512",
    +}
    +
    +// DNSKEY flag values.
    +const (
    +	SEP    = 1
    +	REVOKE = 1 << 7
    +	ZONE   = 1 << 8
    +)
    +
    +// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
    +type rrsigWireFmt struct {
    +	TypeCovered uint16
    +	Algorithm   uint8
    +	Labels      uint8
    +	OrigTtl     uint32
    +	Expiration  uint32
    +	Inception   uint32
    +	KeyTag      uint16
    +	SignerName  string `dns:"domain-name"`
    +	/* No Signature */
    +}
    +
    +// Used for converting DNSKEY's rdata to wirefmt.
    +type dnskeyWireFmt struct {
    +	Flags     uint16
    +	Protocol  uint8
    +	Algorithm uint8
    +	PublicKey string `dns:"base64"`
    +	/* Nothing is left out */
    +}
    +
    +func divRoundUp(a, b int) int {
    +	return (a + b - 1) / b
    +}
    +
    +// KeyTag calculates the keytag (or key-id) of the DNSKEY.
    +func (k *DNSKEY) KeyTag() uint16 {
    +	if k == nil {
    +		return 0
    +	}
    +	var keytag int
    +	switch k.Algorithm {
    +	case RSAMD5:
    +		// Look at the bottom two bytes of the modules, which the last
    +		// item in the pubkey.
    +		// This algorithm has been deprecated, but keep this key-tag calculation.
    +		modulus, _ := fromBase64([]byte(k.PublicKey))
    +		if len(modulus) > 1 {
    +			x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
    +			keytag = int(x)
    +		}
    +	default:
    +		keywire := new(dnskeyWireFmt)
    +		keywire.Flags = k.Flags
    +		keywire.Protocol = k.Protocol
    +		keywire.Algorithm = k.Algorithm
    +		keywire.PublicKey = k.PublicKey
    +		wire := make([]byte, DefaultMsgSize)
    +		n, err := packKeyWire(keywire, wire)
    +		if err != nil {
    +			return 0
    +		}
    +		wire = wire[:n]
    +		for i, v := range wire {
    +			if i&1 != 0 {
    +				keytag += int(v) // must be larger than uint32
    +			} else {
    +				keytag += int(v) << 8
    +			}
    +		}
    +		keytag += keytag >> 16 & 0xFFFF
    +		keytag &= 0xFFFF
    +	}
    +	return uint16(keytag)
    +}
    +
    +// ToDS converts a DNSKEY record to a DS record.
    +func (k *DNSKEY) ToDS(h uint8) *DS {
    +	if k == nil {
    +		return nil
    +	}
    +	ds := new(DS)
    +	ds.Hdr.Name = k.Hdr.Name
    +	ds.Hdr.Class = k.Hdr.Class
    +	ds.Hdr.Rrtype = TypeDS
    +	ds.Hdr.Ttl = k.Hdr.Ttl
    +	ds.Algorithm = k.Algorithm
    +	ds.DigestType = h
    +	ds.KeyTag = k.KeyTag()
    +
    +	keywire := new(dnskeyWireFmt)
    +	keywire.Flags = k.Flags
    +	keywire.Protocol = k.Protocol
    +	keywire.Algorithm = k.Algorithm
    +	keywire.PublicKey = k.PublicKey
    +	wire := make([]byte, DefaultMsgSize)
    +	n, err := packKeyWire(keywire, wire)
    +	if err != nil {
    +		return nil
    +	}
    +	wire = wire[:n]
    +
    +	owner := make([]byte, 255)
    +	off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
    +	if err1 != nil {
    +		return nil
    +	}
    +	owner = owner[:off]
    +	// RFC4034:
    +	// digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
    +	// "|" denotes concatenation
    +	// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
    +
    +	var hash crypto.Hash
    +	switch h {
    +	case SHA1:
    +		hash = crypto.SHA1
    +	case SHA256:
    +		hash = crypto.SHA256
    +	case SHA384:
    +		hash = crypto.SHA384
    +	case SHA512:
    +		hash = crypto.SHA512
    +	default:
    +		return nil
    +	}
    +
    +	s := hash.New()
    +	s.Write(owner)
    +	s.Write(wire)
    +	ds.Digest = hex.EncodeToString(s.Sum(nil))
    +	return ds
    +}
    +
    +// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
    +func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
    +	c := &CDNSKEY{DNSKEY: *k}
    +	c.Hdr = k.Hdr
    +	c.Hdr.Rrtype = TypeCDNSKEY
    +	return c
    +}
    +
    +// ToCDS converts a DS record to a CDS record.
    +func (d *DS) ToCDS() *CDS {
    +	c := &CDS{DS: *d}
    +	c.Hdr = d.Hdr
    +	c.Hdr.Rrtype = TypeCDS
    +	return c
    +}
    +
    +// Sign signs an RRSet. The signature needs to be filled in with the values:
    +// Inception, Expiration, KeyTag, SignerName and Algorithm.  The rest is copied
    +// from the RRset. Sign returns a non-nill error when the signing went OK.
    +// There is no check if RRSet is a proper (RFC 2181) RRSet.  If OrigTTL is non
    +// zero, it is used as-is, otherwise the TTL of the RRset is used as the
    +// OrigTTL.
    +func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
    +	if k == nil {
    +		return ErrPrivKey
    +	}
    +	// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
    +	if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
    +		return ErrKey
    +	}
    +
    +	h0 := rrset[0].Header()
    +	rr.Hdr.Rrtype = TypeRRSIG
    +	rr.Hdr.Name = h0.Name
    +	rr.Hdr.Class = h0.Class
    +	if rr.OrigTtl == 0 { // If set don't override
    +		rr.OrigTtl = h0.Ttl
    +	}
    +	rr.TypeCovered = h0.Rrtype
    +	rr.Labels = uint8(CountLabel(h0.Name))
    +
    +	if strings.HasPrefix(h0.Name, "*") {
    +		rr.Labels-- // wildcard, remove from label count
    +	}
    +
    +	sigwire := new(rrsigWireFmt)
    +	sigwire.TypeCovered = rr.TypeCovered
    +	sigwire.Algorithm = rr.Algorithm
    +	sigwire.Labels = rr.Labels
    +	sigwire.OrigTtl = rr.OrigTtl
    +	sigwire.Expiration = rr.Expiration
    +	sigwire.Inception = rr.Inception
    +	sigwire.KeyTag = rr.KeyTag
    +	// For signing, lowercase this name
    +	sigwire.SignerName = strings.ToLower(rr.SignerName)
    +
    +	// Create the desired binary blob
    +	signdata := make([]byte, DefaultMsgSize)
    +	n, err := packSigWire(sigwire, signdata)
    +	if err != nil {
    +		return err
    +	}
    +	signdata = signdata[:n]
    +	wire, err := rawSignatureData(rrset, rr)
    +	if err != nil {
    +		return err
    +	}
    +
    +	hash, ok := AlgorithmToHash[rr.Algorithm]
    +	if !ok {
    +		return ErrAlg
    +	}
    +
    +	switch rr.Algorithm {
    +	case ED25519:
    +		// ed25519 signs the raw message and performs hashing internally.
    +		// All other supported signature schemes operate over the pre-hashed
    +		// message, and thus ed25519 must be handled separately here.
    +		//
    +		// The raw message is passed directly into sign and crypto.Hash(0) is
    +		// used to signal to the crypto.Signer that the data has not been hashed.
    +		signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm)
    +		if err != nil {
    +			return err
    +		}
    +
    +		rr.Signature = toBase64(signature)
    +	case RSAMD5, DSA, DSANSEC3SHA1:
    +		// See RFC 6944.
    +		return ErrAlg
    +	default:
    +		h := hash.New()
    +		h.Write(signdata)
    +		h.Write(wire)
    +
    +		signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
    +		if err != nil {
    +			return err
    +		}
    +
    +		rr.Signature = toBase64(signature)
    +	}
    +
    +	return nil
    +}
    +
    +func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
    +	signature, err := k.Sign(rand.Reader, hashed, hash)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	switch alg {
    +	case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
    +		return signature, nil
    +
    +	case ECDSAP256SHA256, ECDSAP384SHA384:
    +		ecdsaSignature := &struct {
    +			R, S *big.Int
    +		}{}
    +		if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
    +			return nil, err
    +		}
    +
    +		var intlen int
    +		switch alg {
    +		case ECDSAP256SHA256:
    +			intlen = 32
    +		case ECDSAP384SHA384:
    +			intlen = 48
    +		}
    +
    +		signature := intToBytes(ecdsaSignature.R, intlen)
    +		signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
    +		return signature, nil
    +
    +	// There is no defined interface for what a DSA backed crypto.Signer returns
    +	case DSA, DSANSEC3SHA1:
    +		// 	t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
    +		// 	signature := []byte{byte(t)}
    +		// 	signature = append(signature, intToBytes(r1, 20)...)
    +		// 	signature = append(signature, intToBytes(s1, 20)...)
    +		// 	rr.Signature = signature
    +
    +	case ED25519:
    +		return signature, nil
    +	}
    +
    +	return nil, ErrAlg
    +}
    +
    +// Verify validates an RRSet with the signature and key. This is only the
    +// cryptographic test, the signature validity period must be checked separately.
    +// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
    +func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
    +	// First the easy checks
    +	if !IsRRset(rrset) {
    +		return ErrRRset
    +	}
    +	if rr.KeyTag != k.KeyTag() {
    +		return ErrKey
    +	}
    +	if rr.Hdr.Class != k.Hdr.Class {
    +		return ErrKey
    +	}
    +	if rr.Algorithm != k.Algorithm {
    +		return ErrKey
    +	}
    +	if !strings.EqualFold(rr.SignerName, k.Hdr.Name) {
    +		return ErrKey
    +	}
    +	if k.Protocol != 3 {
    +		return ErrKey
    +	}
    +
    +	// IsRRset checked that we have at least one RR and that the RRs in
    +	// the set have consistent type, class, and name. Also check that type and
    +	// class matches the RRSIG record.
    +	if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
    +		return ErrRRset
    +	}
    +
    +	// RFC 4035 5.3.2.  Reconstructing the Signed Data
    +	// Copy the sig, except the rrsig data
    +	sigwire := new(rrsigWireFmt)
    +	sigwire.TypeCovered = rr.TypeCovered
    +	sigwire.Algorithm = rr.Algorithm
    +	sigwire.Labels = rr.Labels
    +	sigwire.OrigTtl = rr.OrigTtl
    +	sigwire.Expiration = rr.Expiration
    +	sigwire.Inception = rr.Inception
    +	sigwire.KeyTag = rr.KeyTag
    +	sigwire.SignerName = strings.ToLower(rr.SignerName)
    +	// Create the desired binary blob
    +	signeddata := make([]byte, DefaultMsgSize)
    +	n, err := packSigWire(sigwire, signeddata)
    +	if err != nil {
    +		return err
    +	}
    +	signeddata = signeddata[:n]
    +	wire, err := rawSignatureData(rrset, rr)
    +	if err != nil {
    +		return err
    +	}
    +
    +	sigbuf := rr.sigBuf()           // Get the binary signature data
    +	if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
    +		// TODO(miek)
    +		// remove the domain name and assume its ours?
    +	}
    +
    +	hash, ok := AlgorithmToHash[rr.Algorithm]
    +	if !ok {
    +		return ErrAlg
    +	}
    +
    +	switch rr.Algorithm {
    +	case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
    +		// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
    +		pubkey := k.publicKeyRSA() // Get the key
    +		if pubkey == nil {
    +			return ErrKey
    +		}
    +
    +		h := hash.New()
    +		h.Write(signeddata)
    +		h.Write(wire)
    +		return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
    +
    +	case ECDSAP256SHA256, ECDSAP384SHA384:
    +		pubkey := k.publicKeyECDSA()
    +		if pubkey == nil {
    +			return ErrKey
    +		}
    +
    +		// Split sigbuf into the r and s coordinates
    +		r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
    +		s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
    +
    +		h := hash.New()
    +		h.Write(signeddata)
    +		h.Write(wire)
    +		if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
    +			return nil
    +		}
    +		return ErrSig
    +
    +	case ED25519:
    +		pubkey := k.publicKeyED25519()
    +		if pubkey == nil {
    +			return ErrKey
    +		}
    +
    +		if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) {
    +			return nil
    +		}
    +		return ErrSig
    +
    +	default:
    +		return ErrAlg
    +	}
    +}
    +
    +// ValidityPeriod uses RFC1982 serial arithmetic to calculate
    +// if a signature period is valid. If t is the zero time, the
    +// current time is taken other t is. Returns true if the signature
    +// is valid at the given time, otherwise returns false.
    +func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
    +	var utc int64
    +	if t.IsZero() {
    +		utc = time.Now().UTC().Unix()
    +	} else {
    +		utc = t.UTC().Unix()
    +	}
    +	modi := (int64(rr.Inception) - utc) / year68
    +	mode := (int64(rr.Expiration) - utc) / year68
    +	ti := int64(rr.Inception) + modi*year68
    +	te := int64(rr.Expiration) + mode*year68
    +	return ti <= utc && utc <= te
    +}
    +
    +// Return the signatures base64 encodedig sigdata as a byte slice.
    +func (rr *RRSIG) sigBuf() []byte {
    +	sigbuf, err := fromBase64([]byte(rr.Signature))
    +	if err != nil {
    +		return nil
    +	}
    +	return sigbuf
    +}
    +
    +// publicKeyRSA returns the RSA public key from a DNSKEY record.
    +func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
    +	keybuf, err := fromBase64([]byte(k.PublicKey))
    +	if err != nil {
    +		return nil
    +	}
    +
    +	if len(keybuf) < 1+1+64 {
    +		// Exponent must be at least 1 byte and modulus at least 64
    +		return nil
    +	}
    +
    +	// RFC 2537/3110, section 2. RSA Public KEY Resource Records
    +	// Length is in the 0th byte, unless its zero, then it
    +	// it in bytes 1 and 2 and its a 16 bit number
    +	explen := uint16(keybuf[0])
    +	keyoff := 1
    +	if explen == 0 {
    +		explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
    +		keyoff = 3
    +	}
    +
    +	if explen > 4 || explen == 0 || keybuf[keyoff] == 0 {
    +		// Exponent larger than supported by the crypto package,
    +		// empty, or contains prohibited leading zero.
    +		return nil
    +	}
    +
    +	modoff := keyoff + int(explen)
    +	modlen := len(keybuf) - modoff
    +	if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 {
    +		// Modulus is too small, large, or contains prohibited leading zero.
    +		return nil
    +	}
    +
    +	pubkey := new(rsa.PublicKey)
    +
    +	var expo uint64
    +	// The exponent of length explen is between keyoff and modoff.
    +	for _, v := range keybuf[keyoff:modoff] {
    +		expo <<= 8
    +		expo |= uint64(v)
    +	}
    +	if expo > 1<<31-1 {
    +		// Larger exponent than supported by the crypto package.
    +		return nil
    +	}
    +
    +	pubkey.E = int(expo)
    +	pubkey.N = new(big.Int).SetBytes(keybuf[modoff:])
    +	return pubkey
    +}
    +
    +// publicKeyECDSA returns the Curve public key from the DNSKEY record.
    +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
    +	keybuf, err := fromBase64([]byte(k.PublicKey))
    +	if err != nil {
    +		return nil
    +	}
    +	pubkey := new(ecdsa.PublicKey)
    +	switch k.Algorithm {
    +	case ECDSAP256SHA256:
    +		pubkey.Curve = elliptic.P256()
    +		if len(keybuf) != 64 {
    +			// wrongly encoded key
    +			return nil
    +		}
    +	case ECDSAP384SHA384:
    +		pubkey.Curve = elliptic.P384()
    +		if len(keybuf) != 96 {
    +			// Wrongly encoded key
    +			return nil
    +		}
    +	}
    +	pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2])
    +	pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:])
    +	return pubkey
    +}
    +
    +func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
    +	keybuf, err := fromBase64([]byte(k.PublicKey))
    +	if err != nil {
    +		return nil
    +	}
    +	if len(keybuf) < 22 {
    +		return nil
    +	}
    +	t, keybuf := int(keybuf[0]), keybuf[1:]
    +	size := 64 + t*8
    +	q, keybuf := keybuf[:20], keybuf[20:]
    +	if len(keybuf) != 3*size {
    +		return nil
    +	}
    +	p, keybuf := keybuf[:size], keybuf[size:]
    +	g, y := keybuf[:size], keybuf[size:]
    +	pubkey := new(dsa.PublicKey)
    +	pubkey.Parameters.Q = new(big.Int).SetBytes(q)
    +	pubkey.Parameters.P = new(big.Int).SetBytes(p)
    +	pubkey.Parameters.G = new(big.Int).SetBytes(g)
    +	pubkey.Y = new(big.Int).SetBytes(y)
    +	return pubkey
    +}
    +
    +func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
    +	keybuf, err := fromBase64([]byte(k.PublicKey))
    +	if err != nil {
    +		return nil
    +	}
    +	if len(keybuf) != ed25519.PublicKeySize {
    +		return nil
    +	}
    +	return keybuf
    +}
    +
    +type wireSlice [][]byte
    +
    +func (p wireSlice) Len() int      { return len(p) }
    +func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
    +func (p wireSlice) Less(i, j int) bool {
    +	_, ioff, _ := UnpackDomainName(p[i], 0)
    +	_, joff, _ := UnpackDomainName(p[j], 0)
    +	return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
    +}
    +
    +// Return the raw signature data.
    +func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
    +	wires := make(wireSlice, len(rrset))
    +	for i, r := range rrset {
    +		r1 := r.copy()
    +		h := r1.Header()
    +		h.Ttl = s.OrigTtl
    +		labels := SplitDomainName(h.Name)
    +		// 6.2. Canonical RR Form. (4) - wildcards
    +		if len(labels) > int(s.Labels) {
    +			// Wildcard
    +			h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
    +		}
    +		// RFC 4034: 6.2.  Canonical RR Form. (2) - domain name to lowercase
    +		h.Name = strings.ToLower(h.Name)
    +		// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
    +		//   NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
    +		//   HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
    +		//   SRV, DNAME, A6
    +		//
    +		// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
    +		//	Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
    +		//	that needs conversion to lowercase, and twice at that.  Since HINFO
    +		//	records contain no domain names, they are not subject to case
    +		//	conversion.
    +		switch x := r1.(type) {
    +		case *NS:
    +			x.Ns = strings.ToLower(x.Ns)
    +		case *MD:
    +			x.Md = strings.ToLower(x.Md)
    +		case *MF:
    +			x.Mf = strings.ToLower(x.Mf)
    +		case *CNAME:
    +			x.Target = strings.ToLower(x.Target)
    +		case *SOA:
    +			x.Ns = strings.ToLower(x.Ns)
    +			x.Mbox = strings.ToLower(x.Mbox)
    +		case *MB:
    +			x.Mb = strings.ToLower(x.Mb)
    +		case *MG:
    +			x.Mg = strings.ToLower(x.Mg)
    +		case *MR:
    +			x.Mr = strings.ToLower(x.Mr)
    +		case *PTR:
    +			x.Ptr = strings.ToLower(x.Ptr)
    +		case *MINFO:
    +			x.Rmail = strings.ToLower(x.Rmail)
    +			x.Email = strings.ToLower(x.Email)
    +		case *MX:
    +			x.Mx = strings.ToLower(x.Mx)
    +		case *RP:
    +			x.Mbox = strings.ToLower(x.Mbox)
    +			x.Txt = strings.ToLower(x.Txt)
    +		case *AFSDB:
    +			x.Hostname = strings.ToLower(x.Hostname)
    +		case *RT:
    +			x.Host = strings.ToLower(x.Host)
    +		case *SIG:
    +			x.SignerName = strings.ToLower(x.SignerName)
    +		case *PX:
    +			x.Map822 = strings.ToLower(x.Map822)
    +			x.Mapx400 = strings.ToLower(x.Mapx400)
    +		case *NAPTR:
    +			x.Replacement = strings.ToLower(x.Replacement)
    +		case *KX:
    +			x.Exchanger = strings.ToLower(x.Exchanger)
    +		case *SRV:
    +			x.Target = strings.ToLower(x.Target)
    +		case *DNAME:
    +			x.Target = strings.ToLower(x.Target)
    +		}
    +		// 6.2. Canonical RR Form. (5) - origTTL
    +		wire := make([]byte, Len(r1)+1) // +1 to be safe(r)
    +		off, err1 := PackRR(r1, wire, 0, nil, false)
    +		if err1 != nil {
    +			return nil, err1
    +		}
    +		wire = wire[:off]
    +		wires[i] = wire
    +	}
    +	sort.Sort(wires)
    +	for i, wire := range wires {
    +		if i > 0 && bytes.Equal(wire, wires[i-1]) {
    +			continue
    +		}
    +		buf = append(buf, wire...)
    +	}
    +	return buf, nil
    +}
    +
    +func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
    +	// copied from zmsg.go RRSIG packing
    +	off, err := packUint16(sw.TypeCovered, msg, 0)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(sw.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(sw.Labels, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(sw.OrigTtl, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(sw.Expiration, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(sw.Inception, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(sw.KeyTag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
    +	// copied from zmsg.go DNSKEY packing
    +	off, err := packUint16(dw.Flags, msg, 0)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(dw.Protocol, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(dw.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase64(dw.PublicKey, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go
    new file mode 100644
    index 000000000000..60737e5b2b34
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go
    @@ -0,0 +1,140 @@
    +package dns
    +
    +import (
    +	"crypto"
    +	"crypto/ecdsa"
    +	"crypto/elliptic"
    +	"crypto/rand"
    +	"crypto/rsa"
    +	"math/big"
    +
    +	"golang.org/x/crypto/ed25519"
    +)
    +
    +// Generate generates a DNSKEY of the given bit size.
    +// The public part is put inside the DNSKEY record.
    +// The Algorithm in the key must be set as this will define
    +// what kind of DNSKEY will be generated.
    +// The ECDSA algorithms imply a fixed keysize, in that case
    +// bits should be set to the size of the algorithm.
    +func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
    +	switch k.Algorithm {
    +	case RSAMD5, DSA, DSANSEC3SHA1:
    +		return nil, ErrAlg
    +	case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
    +		if bits < 512 || bits > 4096 {
    +			return nil, ErrKeySize
    +		}
    +	case RSASHA512:
    +		if bits < 1024 || bits > 4096 {
    +			return nil, ErrKeySize
    +		}
    +	case ECDSAP256SHA256:
    +		if bits != 256 {
    +			return nil, ErrKeySize
    +		}
    +	case ECDSAP384SHA384:
    +		if bits != 384 {
    +			return nil, ErrKeySize
    +		}
    +	case ED25519:
    +		if bits != 256 {
    +			return nil, ErrKeySize
    +		}
    +	}
    +
    +	switch k.Algorithm {
    +	case RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
    +		priv, err := rsa.GenerateKey(rand.Reader, bits)
    +		if err != nil {
    +			return nil, err
    +		}
    +		k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
    +		return priv, nil
    +	case ECDSAP256SHA256, ECDSAP384SHA384:
    +		var c elliptic.Curve
    +		switch k.Algorithm {
    +		case ECDSAP256SHA256:
    +			c = elliptic.P256()
    +		case ECDSAP384SHA384:
    +			c = elliptic.P384()
    +		}
    +		priv, err := ecdsa.GenerateKey(c, rand.Reader)
    +		if err != nil {
    +			return nil, err
    +		}
    +		k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
    +		return priv, nil
    +	case ED25519:
    +		pub, priv, err := ed25519.GenerateKey(rand.Reader)
    +		if err != nil {
    +			return nil, err
    +		}
    +		k.setPublicKeyED25519(pub)
    +		return priv, nil
    +	default:
    +		return nil, ErrAlg
    +	}
    +}
    +
    +// Set the public key (the value E and N)
    +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
    +	if _E == 0 || _N == nil {
    +		return false
    +	}
    +	buf := exponentToBuf(_E)
    +	buf = append(buf, _N.Bytes()...)
    +	k.PublicKey = toBase64(buf)
    +	return true
    +}
    +
    +// Set the public key for Elliptic Curves
    +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
    +	if _X == nil || _Y == nil {
    +		return false
    +	}
    +	var intlen int
    +	switch k.Algorithm {
    +	case ECDSAP256SHA256:
    +		intlen = 32
    +	case ECDSAP384SHA384:
    +		intlen = 48
    +	}
    +	k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
    +	return true
    +}
    +
    +// Set the public key for Ed25519
    +func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
    +	if _K == nil {
    +		return false
    +	}
    +	k.PublicKey = toBase64(_K)
    +	return true
    +}
    +
    +// Set the public key (the values E and N) for RSA
    +// RFC 3110: Section 2. RSA Public KEY Resource Records
    +func exponentToBuf(_E int) []byte {
    +	var buf []byte
    +	i := big.NewInt(int64(_E)).Bytes()
    +	if len(i) < 256 {
    +		buf = make([]byte, 1, 1+len(i))
    +		buf[0] = uint8(len(i))
    +	} else {
    +		buf = make([]byte, 3, 3+len(i))
    +		buf[0] = 0
    +		buf[1] = uint8(len(i) >> 8)
    +		buf[2] = uint8(len(i))
    +	}
    +	buf = append(buf, i...)
    +	return buf
    +}
    +
    +// Set the public key for X and Y for Curve. The two
    +// values are just concatenated.
    +func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
    +	buf := intToBytes(_X, intlen)
    +	buf = append(buf, intToBytes(_Y, intlen)...)
    +	return buf
    +}
    diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go
    new file mode 100644
    index 000000000000..0e6f3201656b
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go
    @@ -0,0 +1,322 @@
    +package dns
    +
    +import (
    +	"bufio"
    +	"crypto"
    +	"crypto/ecdsa"
    +	"crypto/rsa"
    +	"io"
    +	"math/big"
    +	"strconv"
    +	"strings"
    +
    +	"golang.org/x/crypto/ed25519"
    +)
    +
    +// NewPrivateKey returns a PrivateKey by parsing the string s.
    +// s should be in the same form of the BIND private key files.
    +func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
    +	if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
    +		return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
    +	}
    +	return k.ReadPrivateKey(strings.NewReader(s), "")
    +}
    +
    +// ReadPrivateKey reads a private key from the io.Reader q. The string file is
    +// only used in error reporting.
    +// The public key must be known, because some cryptographic algorithms embed
    +// the public inside the privatekey.
    +func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
    +	m, err := parseKey(q, file)
    +	if m == nil {
    +		return nil, err
    +	}
    +	if _, ok := m["private-key-format"]; !ok {
    +		return nil, ErrPrivKey
    +	}
    +	if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
    +		return nil, ErrPrivKey
    +	}
    +	// TODO(mg): check if the pubkey matches the private key
    +	algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
    +	if err != nil {
    +		return nil, ErrPrivKey
    +	}
    +	switch uint8(algo) {
    +	case RSAMD5, DSA, DSANSEC3SHA1:
    +		return nil, ErrAlg
    +	case RSASHA1:
    +		fallthrough
    +	case RSASHA1NSEC3SHA1:
    +		fallthrough
    +	case RSASHA256:
    +		fallthrough
    +	case RSASHA512:
    +		priv, err := readPrivateKeyRSA(m)
    +		if err != nil {
    +			return nil, err
    +		}
    +		pub := k.publicKeyRSA()
    +		if pub == nil {
    +			return nil, ErrKey
    +		}
    +		priv.PublicKey = *pub
    +		return priv, nil
    +	case ECCGOST:
    +		return nil, ErrPrivKey
    +	case ECDSAP256SHA256:
    +		fallthrough
    +	case ECDSAP384SHA384:
    +		priv, err := readPrivateKeyECDSA(m)
    +		if err != nil {
    +			return nil, err
    +		}
    +		pub := k.publicKeyECDSA()
    +		if pub == nil {
    +			return nil, ErrKey
    +		}
    +		priv.PublicKey = *pub
    +		return priv, nil
    +	case ED25519:
    +		return readPrivateKeyED25519(m)
    +	default:
    +		return nil, ErrPrivKey
    +	}
    +}
    +
    +// Read a private key (file) string and create a public key. Return the private key.
    +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
    +	p := new(rsa.PrivateKey)
    +	p.Primes = []*big.Int{nil, nil}
    +	for k, v := range m {
    +		switch k {
    +		case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
    +			v1, err := fromBase64([]byte(v))
    +			if err != nil {
    +				return nil, err
    +			}
    +			switch k {
    +			case "modulus":
    +				p.PublicKey.N = new(big.Int).SetBytes(v1)
    +			case "publicexponent":
    +				i := new(big.Int).SetBytes(v1)
    +				p.PublicKey.E = int(i.Int64()) // int64 should be large enough
    +			case "privateexponent":
    +				p.D = new(big.Int).SetBytes(v1)
    +			case "prime1":
    +				p.Primes[0] = new(big.Int).SetBytes(v1)
    +			case "prime2":
    +				p.Primes[1] = new(big.Int).SetBytes(v1)
    +			}
    +		case "exponent1", "exponent2", "coefficient":
    +			// not used in Go (yet)
    +		case "created", "publish", "activate":
    +			// not used in Go (yet)
    +		}
    +	}
    +	return p, nil
    +}
    +
    +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
    +	p := new(ecdsa.PrivateKey)
    +	p.D = new(big.Int)
    +	// TODO: validate that the required flags are present
    +	for k, v := range m {
    +		switch k {
    +		case "privatekey":
    +			v1, err := fromBase64([]byte(v))
    +			if err != nil {
    +				return nil, err
    +			}
    +			p.D.SetBytes(v1)
    +		case "created", "publish", "activate":
    +			/* not used in Go (yet) */
    +		}
    +	}
    +	return p, nil
    +}
    +
    +func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
    +	var p ed25519.PrivateKey
    +	// TODO: validate that the required flags are present
    +	for k, v := range m {
    +		switch k {
    +		case "privatekey":
    +			p1, err := fromBase64([]byte(v))
    +			if err != nil {
    +				return nil, err
    +			}
    +			if len(p1) != ed25519.SeedSize {
    +				return nil, ErrPrivKey
    +			}
    +			p = ed25519.NewKeyFromSeed(p1)
    +		case "created", "publish", "activate":
    +			/* not used in Go (yet) */
    +		}
    +	}
    +	return p, nil
    +}
    +
    +// parseKey reads a private key from r. It returns a map[string]string,
    +// with the key-value pairs, or an error when the file is not correct.
    +func parseKey(r io.Reader, file string) (map[string]string, error) {
    +	m := make(map[string]string)
    +	var k string
    +
    +	c := newKLexer(r)
    +
    +	for l, ok := c.Next(); ok; l, ok = c.Next() {
    +		// It should alternate
    +		switch l.value {
    +		case zKey:
    +			k = l.token
    +		case zValue:
    +			if k == "" {
    +				return nil, &ParseError{file, "no private key seen", l}
    +			}
    +
    +			m[strings.ToLower(k)] = l.token
    +			k = ""
    +		}
    +	}
    +
    +	// Surface any read errors from r.
    +	if err := c.Err(); err != nil {
    +		return nil, &ParseError{file: file, err: err.Error()}
    +	}
    +
    +	return m, nil
    +}
    +
    +type klexer struct {
    +	br io.ByteReader
    +
    +	readErr error
    +
    +	line   int
    +	column int
    +
    +	key bool
    +
    +	eol bool // end-of-line
    +}
    +
    +func newKLexer(r io.Reader) *klexer {
    +	br, ok := r.(io.ByteReader)
    +	if !ok {
    +		br = bufio.NewReaderSize(r, 1024)
    +	}
    +
    +	return &klexer{
    +		br: br,
    +
    +		line: 1,
    +
    +		key: true,
    +	}
    +}
    +
    +func (kl *klexer) Err() error {
    +	if kl.readErr == io.EOF {
    +		return nil
    +	}
    +
    +	return kl.readErr
    +}
    +
    +// readByte returns the next byte from the input
    +func (kl *klexer) readByte() (byte, bool) {
    +	if kl.readErr != nil {
    +		return 0, false
    +	}
    +
    +	c, err := kl.br.ReadByte()
    +	if err != nil {
    +		kl.readErr = err
    +		return 0, false
    +	}
    +
    +	// delay the newline handling until the next token is delivered,
    +	// fixes off-by-one errors when reporting a parse error.
    +	if kl.eol {
    +		kl.line++
    +		kl.column = 0
    +		kl.eol = false
    +	}
    +
    +	if c == '\n' {
    +		kl.eol = true
    +	} else {
    +		kl.column++
    +	}
    +
    +	return c, true
    +}
    +
    +func (kl *klexer) Next() (lex, bool) {
    +	var (
    +		l lex
    +
    +		str strings.Builder
    +
    +		commt bool
    +	)
    +
    +	for x, ok := kl.readByte(); ok; x, ok = kl.readByte() {
    +		l.line, l.column = kl.line, kl.column
    +
    +		switch x {
    +		case ':':
    +			if commt || !kl.key {
    +				break
    +			}
    +
    +			kl.key = false
    +
    +			// Next token is a space, eat it
    +			kl.readByte()
    +
    +			l.value = zKey
    +			l.token = str.String()
    +			return l, true
    +		case ';':
    +			commt = true
    +		case '\n':
    +			if commt {
    +				// Reset a comment
    +				commt = false
    +			}
    +
    +			if kl.key && str.Len() == 0 {
    +				// ignore empty lines
    +				break
    +			}
    +
    +			kl.key = true
    +
    +			l.value = zValue
    +			l.token = str.String()
    +			return l, true
    +		default:
    +			if commt {
    +				break
    +			}
    +
    +			str.WriteByte(x)
    +		}
    +	}
    +
    +	if kl.readErr != nil && kl.readErr != io.EOF {
    +		// Don't return any tokens after a read error occurs.
    +		return lex{value: zEOF}, false
    +	}
    +
    +	if str.Len() > 0 {
    +		// Send remainder
    +		l.value = zValue
    +		l.token = str.String()
    +		return l, true
    +	}
    +
    +	return lex{value: zEOF}, false
    +}
    diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go
    new file mode 100644
    index 000000000000..4493c9d57455
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go
    @@ -0,0 +1,94 @@
    +package dns
    +
    +import (
    +	"crypto"
    +	"crypto/dsa"
    +	"crypto/ecdsa"
    +	"crypto/rsa"
    +	"math/big"
    +	"strconv"
    +
    +	"golang.org/x/crypto/ed25519"
    +)
    +
    +const format = "Private-key-format: v1.3\n"
    +
    +var bigIntOne = big.NewInt(1)
    +
    +// PrivateKeyString converts a PrivateKey to a string. This string has the same
    +// format as the private-key-file of BIND9 (Private-key-format: v1.3).
    +// It needs some info from the key (the algorithm), so its a method of the DNSKEY
    +// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
    +func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
    +	algorithm := strconv.Itoa(int(r.Algorithm))
    +	algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
    +
    +	switch p := p.(type) {
    +	case *rsa.PrivateKey:
    +		modulus := toBase64(p.PublicKey.N.Bytes())
    +		e := big.NewInt(int64(p.PublicKey.E))
    +		publicExponent := toBase64(e.Bytes())
    +		privateExponent := toBase64(p.D.Bytes())
    +		prime1 := toBase64(p.Primes[0].Bytes())
    +		prime2 := toBase64(p.Primes[1].Bytes())
    +		// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
    +		// and from: http://code.google.com/p/go/issues/detail?id=987
    +		p1 := new(big.Int).Sub(p.Primes[0], bigIntOne)
    +		q1 := new(big.Int).Sub(p.Primes[1], bigIntOne)
    +		exp1 := new(big.Int).Mod(p.D, p1)
    +		exp2 := new(big.Int).Mod(p.D, q1)
    +		coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0])
    +
    +		exponent1 := toBase64(exp1.Bytes())
    +		exponent2 := toBase64(exp2.Bytes())
    +		coefficient := toBase64(coeff.Bytes())
    +
    +		return format +
    +			"Algorithm: " + algorithm + "\n" +
    +			"Modulus: " + modulus + "\n" +
    +			"PublicExponent: " + publicExponent + "\n" +
    +			"PrivateExponent: " + privateExponent + "\n" +
    +			"Prime1: " + prime1 + "\n" +
    +			"Prime2: " + prime2 + "\n" +
    +			"Exponent1: " + exponent1 + "\n" +
    +			"Exponent2: " + exponent2 + "\n" +
    +			"Coefficient: " + coefficient + "\n"
    +
    +	case *ecdsa.PrivateKey:
    +		var intlen int
    +		switch r.Algorithm {
    +		case ECDSAP256SHA256:
    +			intlen = 32
    +		case ECDSAP384SHA384:
    +			intlen = 48
    +		}
    +		private := toBase64(intToBytes(p.D, intlen))
    +		return format +
    +			"Algorithm: " + algorithm + "\n" +
    +			"PrivateKey: " + private + "\n"
    +
    +	case *dsa.PrivateKey:
    +		T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
    +		prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
    +		subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
    +		base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
    +		priv := toBase64(intToBytes(p.X, 20))
    +		pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
    +		return format +
    +			"Algorithm: " + algorithm + "\n" +
    +			"Prime(p): " + prime + "\n" +
    +			"Subprime(q): " + subprime + "\n" +
    +			"Base(g): " + base + "\n" +
    +			"Private_value(x): " + priv + "\n" +
    +			"Public_value(y): " + pub + "\n"
    +
    +	case ed25519.PrivateKey:
    +		private := toBase64(p.Seed())
    +		return format +
    +			"Algorithm: " + algorithm + "\n" +
    +			"PrivateKey: " + private + "\n"
    +
    +	default:
    +		return ""
    +	}
    +}
    diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go
    new file mode 100644
    index 000000000000..d3d7cec9ef5c
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/doc.go
    @@ -0,0 +1,269 @@
    +/*
    +Package dns implements a full featured interface to the Domain Name System.
    +Both server- and client-side programming is supported. The package allows
    +complete control over what is sent out to the DNS. The API follows the
    +less-is-more principle, by presenting a small, clean interface.
    +
    +It supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
    +TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
    +
    +Note that domain names MUST be fully qualified before sending them, unqualified
    +names in a message will result in a packing failure.
    +
    +Resource records are native types. They are not stored in wire format. Basic
    +usage pattern for creating a new resource record:
    +
    +     r := new(dns.MX)
    +     r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
    +     r.Preference = 10
    +     r.Mx = "mx.miek.nl."
    +
    +Or directly from a string:
    +
    +     mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
    +
    +Or when the default origin (.) and TTL (3600) and class (IN) suit you:
    +
    +     mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
    +
    +Or even:
    +
    +     mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
    +
    +In the DNS messages are exchanged, these messages contain resource records
    +(sets). Use pattern for creating a message:
    +
    +     m := new(dns.Msg)
    +     m.SetQuestion("miek.nl.", dns.TypeMX)
    +
    +Or when not certain if the domain name is fully qualified:
    +
    +	m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
    +
    +The message m is now a message with the question section set to ask the MX
    +records for the miek.nl. zone.
    +
    +The following is slightly more verbose, but more flexible:
    +
    +     m1 := new(dns.Msg)
    +     m1.Id = dns.Id()
    +     m1.RecursionDesired = true
    +     m1.Question = make([]dns.Question, 1)
    +     m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
    +
    +After creating a message it can be sent. Basic use pattern for synchronous
    +querying the DNS at a server configured on 127.0.0.1 and port 53:
    +
    +     c := new(dns.Client)
    +     in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
    +
    +Suppressing multiple outstanding queries (with the same question, type and
    +class) is as easy as setting:
    +
    +	c.SingleInflight = true
    +
    +More advanced options are available using a net.Dialer and the corresponding API.
    +For example it is possible to set a timeout, or to specify a source IP address
    +and port to use for the connection:
    +
    +	c := new(dns.Client)
    +	laddr := net.UDPAddr{
    +		IP: net.ParseIP("[::1]"),
    +		Port: 12345,
    +		Zone: "",
    +	}
    +	c.Dialer := &net.Dialer{
    +		Timeout: 200 * time.Millisecond,
    +		LocalAddr: &laddr,
    +	}
    +	in, rtt, err := c.Exchange(m1, "8.8.8.8:53")
    +
    +If these "advanced" features are not needed, a simple UDP query can be sent,
    +with:
    +
    +	in, err := dns.Exchange(m1, "127.0.0.1:53")
    +
    +When this functions returns you will get dns message. A dns message consists
    +out of four sections.
    +The question section: in.Question, the answer section: in.Answer,
    +the authority section: in.Ns and the additional section: in.Extra.
    +
    +Each of these sections (except the Question section) contain a []RR. Basic
    +use pattern for accessing the rdata of a TXT RR as the first RR in
    +the Answer section:
    +
    +	if t, ok := in.Answer[0].(*dns.TXT); ok {
    +		// do something with t.Txt
    +	}
    +
    +Domain Name and TXT Character String Representations
    +
    +Both domain names and TXT character strings are converted to presentation form
    +both when unpacked and when converted to strings.
    +
    +For TXT character strings, tabs, carriage returns and line feeds will be
    +converted to \t, \r and \n respectively. Back slashes and quotations marks will
    +be escaped. Bytes below 32 and above 127 will be converted to \DDD form.
    +
    +For domain names, in addition to the above rules brackets, periods, spaces,
    +semicolons and the at symbol are escaped.
    +
    +DNSSEC
    +
    +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses
    +public key cryptography to sign resource records. The public keys are stored in
    +DNSKEY records and the signatures in RRSIG records.
    +
    +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK)
    +bit to a request.
    +
    +     m := new(dns.Msg)
    +     m.SetEdns0(4096, true)
    +
    +Signature generation, signature verification and key generation are all supported.
    +
    +DYNAMIC UPDATES
    +
    +Dynamic updates reuses the DNS message format, but renames three of the
    +sections. Question is Zone, Answer is Prerequisite, Authority is Update, only
    +the Additional is not renamed. See RFC 2136 for the gory details.
    +
    +You can set a rather complex set of rules for the existence of absence of
    +certain resource records or names in a zone to specify if resource records
    +should be added or removed. The table from RFC 2136 supplemented with the Go
    +DNS function shows which functions exist to specify the prerequisites.
    +
    + 3.2.4 - Table Of Metavalues Used In Prerequisite Section
    +
    +  CLASS    TYPE     RDATA    Meaning                    Function
    +  --------------------------------------------------------------
    +  ANY      ANY      empty    Name is in use             dns.NameUsed
    +  ANY      rrset    empty    RRset exists (value indep) dns.RRsetUsed
    +  NONE     ANY      empty    Name is not in use         dns.NameNotUsed
    +  NONE     rrset    empty    RRset does not exist       dns.RRsetNotUsed
    +  zone     rrset    rr       RRset exists (value dep)   dns.Used
    +
    +The prerequisite section can also be left empty. If you have decided on the
    +prerequisites you can tell what RRs should be added or deleted. The next table
    +shows the options you have and what functions to call.
    +
    + 3.4.2.6 - Table Of Metavalues Used In Update Section
    +
    +  CLASS    TYPE     RDATA    Meaning                     Function
    +  ---------------------------------------------------------------
    +  ANY      ANY      empty    Delete all RRsets from name dns.RemoveName
    +  ANY      rrset    empty    Delete an RRset             dns.RemoveRRset
    +  NONE     rrset    rr       Delete an RR from RRset     dns.Remove
    +  zone     rrset    rr       Add to an RRset             dns.Insert
    +
    +TRANSACTION SIGNATURE
    +
    +An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
    +The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
    +
    +Basic use pattern when querying with a TSIG name "axfr." (note that these key names
    +must be fully qualified - as they are domain names) and the base64 secret
    +"so6ZGir4GPAqINNh9U5c3A==":
    +
    +If an incoming message contains a TSIG record it MUST be the last record in
    +the additional section (RFC2845 3.2).  This means that you should make the
    +call to SetTsig last, right before executing the query.  If you make any
    +changes to the RRset after calling SetTsig() the signature will be incorrect.
    +
    +	c := new(dns.Client)
    +	c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
    +	m := new(dns.Msg)
    +	m.SetQuestion("miek.nl.", dns.TypeMX)
    +	m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
    +	...
    +	// When sending the TSIG RR is calculated and filled in before sending
    +
    +When requesting an zone transfer (almost all TSIG usage is when requesting zone
    +transfers), with TSIG, this is the basic use pattern. In this example we
    +request an AXFR for miek.nl. with TSIG key named "axfr." and secret
    +"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54:
    +
    +	t := new(dns.Transfer)
    +	m := new(dns.Msg)
    +	t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
    +	m.SetAxfr("miek.nl.")
    +	m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
    +	c, err := t.In(m, "176.58.119.54:53")
    +	for r := range c { ... }
    +
    +You can now read the records from the transfer as they come in. Each envelope
    +is checked with TSIG. If something is not correct an error is returned.
    +
    +Basic use pattern validating and replying to a message that has TSIG set.
    +
    +	server := &dns.Server{Addr: ":53", Net: "udp"}
    +	server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
    +	go server.ListenAndServe()
    +	dns.HandleFunc(".", handleRequest)
    +
    +	func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
    +		m := new(dns.Msg)
    +		m.SetReply(r)
    +		if r.IsTsig() != nil {
    +			if w.TsigStatus() == nil {
    +				// *Msg r has an TSIG record and it was validated
    +				m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
    +			} else {
    +				// *Msg r has an TSIG records and it was not valided
    +			}
    +		}
    +		w.WriteMsg(m)
    +	}
    +
    +PRIVATE RRS
    +
    +RFC 6895 sets aside a range of type codes for private use. This range is 65,280
    +- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
    +can be used, before requesting an official type code from IANA.
    +
    +See https://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more
    +information.
    +
    +EDNS0
    +
    +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by
    +RFC 6891. It defines an new RR type, the OPT RR, which is then completely
    +abused.
    +
    +Basic use pattern for creating an (empty) OPT RR:
    +
    +	o := new(dns.OPT)
    +	o.Hdr.Name = "." // MUST be the root zone, per definition.
    +	o.Hdr.Rrtype = dns.TypeOPT
    +
    +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces.
    +Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and
    +EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note that these options
    +may be combined in an OPT RR. Basic use pattern for a server to check if (and
    +which) options are set:
    +
    +	// o is a dns.OPT
    +	for _, s := range o.Option {
    +		switch e := s.(type) {
    +		case *dns.EDNS0_NSID:
    +			// do stuff with e.Nsid
    +		case *dns.EDNS0_SUBNET:
    +			// access e.Family, e.Address, etc.
    +		}
    +	}
    +
    +SIG(0)
    +
    +From RFC 2931:
    +
    +    SIG(0) provides protection for DNS transactions and requests ....
    +    ... protection for glue records, DNS requests, protection for message headers
    +    on requests and responses, and protection of the overall integrity of a response.
    +
    +It works like TSIG, except that SIG(0) uses public key cryptography, instead of
    +the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256,
    +ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512.
    +
    +Signing subsequent messages in multi-message sessions is not implemented.
    +*/
    +package dns
    diff --git a/vendor/github.com/miekg/dns/duplicate.go b/vendor/github.com/miekg/dns/duplicate.go
    new file mode 100644
    index 000000000000..00cda0aa29f6
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/duplicate.go
    @@ -0,0 +1,38 @@
    +package dns
    +
    +//go:generate go run duplicate_generate.go
    +
    +// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
    +// So this means the header data is equal *and* the RDATA is the same. Return true
    +// is so, otherwise false.
    +// It's is a protocol violation to have identical RRs in a message.
    +func IsDuplicate(r1, r2 RR) bool {
    +	// Check whether the record header is identical.
    +	if !r1.Header().isDuplicate(r2.Header()) {
    +		return false
    +	}
    +
    +	// Check whether the RDATA is identical.
    +	return r1.isDuplicate(r2)
    +}
    +
    +func (r1 *RR_Header) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*RR_Header)
    +	if !ok {
    +		return false
    +	}
    +	if r1.Class != r2.Class {
    +		return false
    +	}
    +	if r1.Rrtype != r2.Rrtype {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Name, r2.Name) {
    +		return false
    +	}
    +	// ignore TTL
    +	return true
    +}
    +
    +// isDuplicateName checks if the domain names s1 and s2 are equal.
    +func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) }
    diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
    new file mode 100644
    index 000000000000..8cdbfb0b0792
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/edns.go
    @@ -0,0 +1,659 @@
    +package dns
    +
    +import (
    +	"encoding/binary"
    +	"encoding/hex"
    +	"errors"
    +	"fmt"
    +	"net"
    +	"strconv"
    +)
    +
    +// EDNS0 Option codes.
    +const (
    +	EDNS0LLQ          = 0x1     // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
    +	EDNS0UL           = 0x2     // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
    +	EDNS0NSID         = 0x3     // nsid (See RFC 5001)
    +	EDNS0DAU          = 0x5     // DNSSEC Algorithm Understood
    +	EDNS0DHU          = 0x6     // DS Hash Understood
    +	EDNS0N3U          = 0x7     // NSEC3 Hash Understood
    +	EDNS0SUBNET       = 0x8     // client-subnet (See RFC 7871)
    +	EDNS0EXPIRE       = 0x9     // EDNS0 expire
    +	EDNS0COOKIE       = 0xa     // EDNS0 Cookie
    +	EDNS0TCPKEEPALIVE = 0xb     // EDNS0 tcp keep alive (See RFC 7828)
    +	EDNS0PADDING      = 0xc     // EDNS0 padding (See RFC 7830)
    +	EDNS0LOCALSTART   = 0xFDE9  // Beginning of range reserved for local/experimental use (See RFC 6891)
    +	EDNS0LOCALEND     = 0xFFFE  // End of range reserved for local/experimental use (See RFC 6891)
    +	_DO               = 1 << 15 // DNSSEC OK
    +)
    +
    +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
    +// See RFC 6891.
    +type OPT struct {
    +	Hdr    RR_Header
    +	Option []EDNS0 `dns:"opt"`
    +}
    +
    +func (rr *OPT) String() string {
    +	s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
    +	if rr.Do() {
    +		s += "flags: do; "
    +	} else {
    +		s += "flags: ; "
    +	}
    +	s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
    +
    +	for _, o := range rr.Option {
    +		switch o.(type) {
    +		case *EDNS0_NSID:
    +			s += "\n; NSID: " + o.String()
    +			h, e := o.pack()
    +			var r string
    +			if e == nil {
    +				for _, c := range h {
    +					r += "(" + string(c) + ")"
    +				}
    +				s += "  " + r
    +			}
    +		case *EDNS0_SUBNET:
    +			s += "\n; SUBNET: " + o.String()
    +		case *EDNS0_COOKIE:
    +			s += "\n; COOKIE: " + o.String()
    +		case *EDNS0_UL:
    +			s += "\n; UPDATE LEASE: " + o.String()
    +		case *EDNS0_LLQ:
    +			s += "\n; LONG LIVED QUERIES: " + o.String()
    +		case *EDNS0_DAU:
    +			s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
    +		case *EDNS0_DHU:
    +			s += "\n; DS HASH UNDERSTOOD: " + o.String()
    +		case *EDNS0_N3U:
    +			s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
    +		case *EDNS0_LOCAL:
    +			s += "\n; LOCAL OPT: " + o.String()
    +		case *EDNS0_PADDING:
    +			s += "\n; PADDING: " + o.String()
    +		}
    +	}
    +	return s
    +}
    +
    +func (rr *OPT) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	for _, o := range rr.Option {
    +		l += 4 // Account for 2-byte option code and 2-byte option length.
    +		lo, _ := o.pack()
    +		l += len(lo)
    +	}
    +	return l
    +}
    +
    +func (rr *OPT) parse(c *zlexer, origin string) *ParseError {
    +	panic("dns: internal error: parse should never be called on OPT")
    +}
    +
    +func (r1 *OPT) isDuplicate(r2 RR) bool { return false }
    +
    +// return the old value -> delete SetVersion?
    +
    +// Version returns the EDNS version used. Only zero is defined.
    +func (rr *OPT) Version() uint8 {
    +	return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
    +}
    +
    +// SetVersion sets the version of EDNS. This is usually zero.
    +func (rr *OPT) SetVersion(v uint8) {
    +	rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16
    +}
    +
    +// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
    +func (rr *OPT) ExtendedRcode() int {
    +	return int(rr.Hdr.Ttl&0xFF000000>>24) << 4
    +}
    +
    +// SetExtendedRcode sets the EDNS extended RCODE field.
    +//
    +// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0.
    +func (rr *OPT) SetExtendedRcode(v uint16) {
    +	rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24
    +}
    +
    +// UDPSize returns the UDP buffer size.
    +func (rr *OPT) UDPSize() uint16 {
    +	return rr.Hdr.Class
    +}
    +
    +// SetUDPSize sets the UDP buffer size.
    +func (rr *OPT) SetUDPSize(size uint16) {
    +	rr.Hdr.Class = size
    +}
    +
    +// Do returns the value of the DO (DNSSEC OK) bit.
    +func (rr *OPT) Do() bool {
    +	return rr.Hdr.Ttl&_DO == _DO
    +}
    +
    +// SetDo sets the DO (DNSSEC OK) bit.
    +// If we pass an argument, set the DO bit to that value.
    +// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
    +func (rr *OPT) SetDo(do ...bool) {
    +	if len(do) == 1 {
    +		if do[0] {
    +			rr.Hdr.Ttl |= _DO
    +		} else {
    +			rr.Hdr.Ttl &^= _DO
    +		}
    +	} else {
    +		rr.Hdr.Ttl |= _DO
    +	}
    +}
    +
    +// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
    +type EDNS0 interface {
    +	// Option returns the option code for the option.
    +	Option() uint16
    +	// pack returns the bytes of the option data.
    +	pack() ([]byte, error)
    +	// unpack sets the data as found in the buffer. Is also sets
    +	// the length of the slice as the length of the option data.
    +	unpack([]byte) error
    +	// String returns the string representation of the option.
    +	String() string
    +	// copy returns a deep-copy of the option.
    +	copy() EDNS0
    +}
    +
    +// EDNS0_NSID option is used to retrieve a nameserver
    +// identifier. When sending a request Nsid must be set to the empty string
    +// The identifier is an opaque string encoded as hex.
    +// Basic use pattern for creating an nsid option:
    +//
    +//	o := new(dns.OPT)
    +//	o.Hdr.Name = "."
    +//	o.Hdr.Rrtype = dns.TypeOPT
    +//	e := new(dns.EDNS0_NSID)
    +//	e.Code = dns.EDNS0NSID
    +//	e.Nsid = "AA"
    +//	o.Option = append(o.Option, e)
    +type EDNS0_NSID struct {
    +	Code uint16 // Always EDNS0NSID
    +	Nsid string // This string needs to be hex encoded
    +}
    +
    +func (e *EDNS0_NSID) pack() ([]byte, error) {
    +	h, err := hex.DecodeString(e.Nsid)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return h, nil
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_NSID) Option() uint16        { return EDNS0NSID } // Option returns the option code.
    +func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
    +func (e *EDNS0_NSID) String() string        { return e.Nsid }
    +func (e *EDNS0_NSID) copy() EDNS0           { return &EDNS0_NSID{e.Code, e.Nsid} }
    +
    +// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
    +// an idea of where the client lives. See RFC 7871. It can then give back a different
    +// answer depending on the location or network topology.
    +// Basic use pattern for creating an subnet option:
    +//
    +//	o := new(dns.OPT)
    +//	o.Hdr.Name = "."
    +//	o.Hdr.Rrtype = dns.TypeOPT
    +//	e := new(dns.EDNS0_SUBNET)
    +//	e.Code = dns.EDNS0SUBNET
    +//	e.Family = 1	// 1 for IPv4 source address, 2 for IPv6
    +//	e.SourceNetmask = 32	// 32 for IPV4, 128 for IPv6
    +//	e.SourceScope = 0
    +//	e.Address = net.ParseIP("127.0.0.1").To4()	// for IPv4
    +//	// e.Address = net.ParseIP("2001:7b8:32a::2")	// for IPV6
    +//	o.Option = append(o.Option, e)
    +//
    +// This code will parse all the available bits when unpacking (up to optlen).
    +// When packing it will apply SourceNetmask. If you need more advanced logic,
    +// patches welcome and good luck.
    +type EDNS0_SUBNET struct {
    +	Code          uint16 // Always EDNS0SUBNET
    +	Family        uint16 // 1 for IP, 2 for IP6
    +	SourceNetmask uint8
    +	SourceScope   uint8
    +	Address       net.IP
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET }
    +
    +func (e *EDNS0_SUBNET) pack() ([]byte, error) {
    +	b := make([]byte, 4)
    +	binary.BigEndian.PutUint16(b[0:], e.Family)
    +	b[2] = e.SourceNetmask
    +	b[3] = e.SourceScope
    +	switch e.Family {
    +	case 0:
    +		// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
    +		// We might don't need to complain either
    +		if e.SourceNetmask != 0 {
    +			return nil, errors.New("dns: bad address family")
    +		}
    +	case 1:
    +		if e.SourceNetmask > net.IPv4len*8 {
    +			return nil, errors.New("dns: bad netmask")
    +		}
    +		if len(e.Address.To4()) != net.IPv4len {
    +			return nil, errors.New("dns: bad address")
    +		}
    +		ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
    +		needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
    +		b = append(b, ip[:needLength]...)
    +	case 2:
    +		if e.SourceNetmask > net.IPv6len*8 {
    +			return nil, errors.New("dns: bad netmask")
    +		}
    +		if len(e.Address) != net.IPv6len {
    +			return nil, errors.New("dns: bad address")
    +		}
    +		ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
    +		needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
    +		b = append(b, ip[:needLength]...)
    +	default:
    +		return nil, errors.New("dns: bad address family")
    +	}
    +	return b, nil
    +}
    +
    +func (e *EDNS0_SUBNET) unpack(b []byte) error {
    +	if len(b) < 4 {
    +		return ErrBuf
    +	}
    +	e.Family = binary.BigEndian.Uint16(b)
    +	e.SourceNetmask = b[2]
    +	e.SourceScope = b[3]
    +	switch e.Family {
    +	case 0:
    +		// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
    +		// It's okay to accept such a packet
    +		if e.SourceNetmask != 0 {
    +			return errors.New("dns: bad address family")
    +		}
    +		e.Address = net.IPv4(0, 0, 0, 0)
    +	case 1:
    +		if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
    +			return errors.New("dns: bad netmask")
    +		}
    +		addr := make(net.IP, net.IPv4len)
    +		copy(addr, b[4:])
    +		e.Address = addr.To16()
    +	case 2:
    +		if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
    +			return errors.New("dns: bad netmask")
    +		}
    +		addr := make(net.IP, net.IPv6len)
    +		copy(addr, b[4:])
    +		e.Address = addr
    +	default:
    +		return errors.New("dns: bad address family")
    +	}
    +	return nil
    +}
    +
    +func (e *EDNS0_SUBNET) String() (s string) {
    +	if e.Address == nil {
    +		s = ""
    +	} else if e.Address.To4() != nil {
    +		s = e.Address.String()
    +	} else {
    +		s = "[" + e.Address.String() + "]"
    +	}
    +	s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
    +	return
    +}
    +
    +func (e *EDNS0_SUBNET) copy() EDNS0 {
    +	return &EDNS0_SUBNET{
    +		e.Code,
    +		e.Family,
    +		e.SourceNetmask,
    +		e.SourceScope,
    +		e.Address,
    +	}
    +}
    +
    +// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
    +//
    +//	o := new(dns.OPT)
    +//	o.Hdr.Name = "."
    +//	o.Hdr.Rrtype = dns.TypeOPT
    +//	e := new(dns.EDNS0_COOKIE)
    +//	e.Code = dns.EDNS0COOKIE
    +//	e.Cookie = "24a5ac.."
    +//	o.Option = append(o.Option, e)
    +//
    +// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
    +// always 8 bytes. It may then optionally be followed by the server cookie. The server
    +// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
    +//
    +//	cCookie := o.Cookie[:16]
    +//	sCookie := o.Cookie[16:]
    +//
    +// There is no guarantee that the Cookie string has a specific length.
    +type EDNS0_COOKIE struct {
    +	Code   uint16 // Always EDNS0COOKIE
    +	Cookie string // Hex-encoded cookie data
    +}
    +
    +func (e *EDNS0_COOKIE) pack() ([]byte, error) {
    +	h, err := hex.DecodeString(e.Cookie)
    +	if err != nil {
    +		return nil, err
    +	}
    +	return h, nil
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_COOKIE) Option() uint16        { return EDNS0COOKIE }
    +func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
    +func (e *EDNS0_COOKIE) String() string        { return e.Cookie }
    +func (e *EDNS0_COOKIE) copy() EDNS0           { return &EDNS0_COOKIE{e.Code, e.Cookie} }
    +
    +// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
    +// an expiration on an update RR. This is helpful for clients that cannot clean
    +// up after themselves. This is a draft RFC and more information can be found at
    +// http://files.dns-sd.org/draft-sekar-dns-ul.txt
    +//
    +//	o := new(dns.OPT)
    +//	o.Hdr.Name = "."
    +//	o.Hdr.Rrtype = dns.TypeOPT
    +//	e := new(dns.EDNS0_UL)
    +//	e.Code = dns.EDNS0UL
    +//	e.Lease = 120 // in seconds
    +//	o.Option = append(o.Option, e)
    +type EDNS0_UL struct {
    +	Code  uint16 // Always EDNS0UL
    +	Lease uint32
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
    +func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
    +func (e *EDNS0_UL) copy() EDNS0    { return &EDNS0_UL{e.Code, e.Lease} }
    +
    +// Copied: http://golang.org/src/pkg/net/dnsmsg.go
    +func (e *EDNS0_UL) pack() ([]byte, error) {
    +	b := make([]byte, 4)
    +	binary.BigEndian.PutUint32(b, e.Lease)
    +	return b, nil
    +}
    +
    +func (e *EDNS0_UL) unpack(b []byte) error {
    +	if len(b) < 4 {
    +		return ErrBuf
    +	}
    +	e.Lease = binary.BigEndian.Uint32(b)
    +	return nil
    +}
    +
    +// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
    +// Implemented for completeness, as the EDNS0 type code is assigned.
    +type EDNS0_LLQ struct {
    +	Code      uint16 // Always EDNS0LLQ
    +	Version   uint16
    +	Opcode    uint16
    +	Error     uint16
    +	Id        uint64
    +	LeaseLife uint32
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
    +
    +func (e *EDNS0_LLQ) pack() ([]byte, error) {
    +	b := make([]byte, 18)
    +	binary.BigEndian.PutUint16(b[0:], e.Version)
    +	binary.BigEndian.PutUint16(b[2:], e.Opcode)
    +	binary.BigEndian.PutUint16(b[4:], e.Error)
    +	binary.BigEndian.PutUint64(b[6:], e.Id)
    +	binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
    +	return b, nil
    +}
    +
    +func (e *EDNS0_LLQ) unpack(b []byte) error {
    +	if len(b) < 18 {
    +		return ErrBuf
    +	}
    +	e.Version = binary.BigEndian.Uint16(b[0:])
    +	e.Opcode = binary.BigEndian.Uint16(b[2:])
    +	e.Error = binary.BigEndian.Uint16(b[4:])
    +	e.Id = binary.BigEndian.Uint64(b[6:])
    +	e.LeaseLife = binary.BigEndian.Uint32(b[14:])
    +	return nil
    +}
    +
    +func (e *EDNS0_LLQ) String() string {
    +	s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
    +		" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) +
    +		" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
    +	return s
    +}
    +func (e *EDNS0_LLQ) copy() EDNS0 {
    +	return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife}
    +}
    +
    +// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
    +type EDNS0_DAU struct {
    +	Code    uint16 // Always EDNS0DAU
    +	AlgCode []uint8
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_DAU) Option() uint16        { return EDNS0DAU }
    +func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
    +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
    +
    +func (e *EDNS0_DAU) String() string {
    +	s := ""
    +	for _, alg := range e.AlgCode {
    +		if a, ok := AlgorithmToString[alg]; ok {
    +			s += " " + a
    +		} else {
    +			s += " " + strconv.Itoa(int(alg))
    +		}
    +	}
    +	return s
    +}
    +func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} }
    +
    +// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
    +type EDNS0_DHU struct {
    +	Code    uint16 // Always EDNS0DHU
    +	AlgCode []uint8
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_DHU) Option() uint16        { return EDNS0DHU }
    +func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
    +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
    +
    +func (e *EDNS0_DHU) String() string {
    +	s := ""
    +	for _, alg := range e.AlgCode {
    +		if a, ok := HashToString[alg]; ok {
    +			s += " " + a
    +		} else {
    +			s += " " + strconv.Itoa(int(alg))
    +		}
    +	}
    +	return s
    +}
    +func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} }
    +
    +// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
    +type EDNS0_N3U struct {
    +	Code    uint16 // Always EDNS0N3U
    +	AlgCode []uint8
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_N3U) Option() uint16        { return EDNS0N3U }
    +func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
    +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
    +
    +func (e *EDNS0_N3U) String() string {
    +	// Re-use the hash map
    +	s := ""
    +	for _, alg := range e.AlgCode {
    +		if a, ok := HashToString[alg]; ok {
    +			s += " " + a
    +		} else {
    +			s += " " + strconv.Itoa(int(alg))
    +		}
    +	}
    +	return s
    +}
    +func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
    +
    +// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
    +type EDNS0_EXPIRE struct {
    +	Code   uint16 // Always EDNS0EXPIRE
    +	Expire uint32
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
    +func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
    +func (e *EDNS0_EXPIRE) copy() EDNS0    { return &EDNS0_EXPIRE{e.Code, e.Expire} }
    +
    +func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
    +	b := make([]byte, 4)
    +	binary.BigEndian.PutUint32(b, e.Expire)
    +	return b, nil
    +}
    +
    +func (e *EDNS0_EXPIRE) unpack(b []byte) error {
    +	if len(b) < 4 {
    +		return ErrBuf
    +	}
    +	e.Expire = binary.BigEndian.Uint32(b)
    +	return nil
    +}
    +
    +// The EDNS0_LOCAL option is used for local/experimental purposes. The option
    +// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
    +// (RFC6891), although any unassigned code can actually be used.  The content of
    +// the option is made available in Data, unaltered.
    +// Basic use pattern for creating a local option:
    +//
    +//	o := new(dns.OPT)
    +//	o.Hdr.Name = "."
    +//	o.Hdr.Rrtype = dns.TypeOPT
    +//	e := new(dns.EDNS0_LOCAL)
    +//	e.Code = dns.EDNS0LOCALSTART
    +//	e.Data = []byte{72, 82, 74}
    +//	o.Option = append(o.Option, e)
    +type EDNS0_LOCAL struct {
    +	Code uint16
    +	Data []byte
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
    +func (e *EDNS0_LOCAL) String() string {
    +	return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
    +}
    +func (e *EDNS0_LOCAL) copy() EDNS0 {
    +	b := make([]byte, len(e.Data))
    +	copy(b, e.Data)
    +	return &EDNS0_LOCAL{e.Code, b}
    +}
    +
    +func (e *EDNS0_LOCAL) pack() ([]byte, error) {
    +	b := make([]byte, len(e.Data))
    +	copied := copy(b, e.Data)
    +	if copied != len(e.Data) {
    +		return nil, ErrBuf
    +	}
    +	return b, nil
    +}
    +
    +func (e *EDNS0_LOCAL) unpack(b []byte) error {
    +	e.Data = make([]byte, len(b))
    +	copied := copy(e.Data, b)
    +	if copied != len(b) {
    +		return ErrBuf
    +	}
    +	return nil
    +}
    +
    +// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
    +// the TCP connection alive. See RFC 7828.
    +type EDNS0_TCP_KEEPALIVE struct {
    +	Code    uint16 // Always EDNSTCPKEEPALIVE
    +	Length  uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
    +	Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
    +
    +func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
    +	if e.Timeout != 0 && e.Length != 2 {
    +		return nil, errors.New("dns: timeout specified but length is not 2")
    +	}
    +	if e.Timeout == 0 && e.Length != 0 {
    +		return nil, errors.New("dns: timeout not specified but length is not 0")
    +	}
    +	b := make([]byte, 4+e.Length)
    +	binary.BigEndian.PutUint16(b[0:], e.Code)
    +	binary.BigEndian.PutUint16(b[2:], e.Length)
    +	if e.Length == 2 {
    +		binary.BigEndian.PutUint16(b[4:], e.Timeout)
    +	}
    +	return b, nil
    +}
    +
    +func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
    +	if len(b) < 4 {
    +		return ErrBuf
    +	}
    +	e.Length = binary.BigEndian.Uint16(b[2:4])
    +	if e.Length != 0 && e.Length != 2 {
    +		return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10))
    +	}
    +	if e.Length == 2 {
    +		if len(b) < 6 {
    +			return ErrBuf
    +		}
    +		e.Timeout = binary.BigEndian.Uint16(b[4:6])
    +	}
    +	return nil
    +}
    +
    +func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
    +	s = "use tcp keep-alive"
    +	if e.Length == 0 {
    +		s += ", timeout omitted"
    +	} else {
    +		s += fmt.Sprintf(", timeout %dms", e.Timeout*100)
    +	}
    +	return
    +}
    +func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} }
    +
    +// EDNS0_PADDING option is used to add padding to a request/response. The default
    +// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
    +// compression is applied before encryption which may break signatures.
    +type EDNS0_PADDING struct {
    +	Padding []byte
    +}
    +
    +// Option implements the EDNS0 interface.
    +func (e *EDNS0_PADDING) Option() uint16        { return EDNS0PADDING }
    +func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
    +func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
    +func (e *EDNS0_PADDING) String() string        { return fmt.Sprintf("%0X", e.Padding) }
    +func (e *EDNS0_PADDING) copy() EDNS0 {
    +	b := make([]byte, len(e.Padding))
    +	copy(b, e.Padding)
    +	return &EDNS0_PADDING{b}
    +}
    diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go
    new file mode 100644
    index 000000000000..0ec79f2fc126
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/format.go
    @@ -0,0 +1,93 @@
    +package dns
    +
    +import (
    +	"net"
    +	"reflect"
    +	"strconv"
    +)
    +
    +// NumField returns the number of rdata fields r has.
    +func NumField(r RR) int {
    +	return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
    +}
    +
    +// Field returns the rdata field i as a string. Fields are indexed starting from 1.
    +// RR types that holds slice data, for instance the NSEC type bitmap will return a single
    +// string where the types are concatenated using a space.
    +// Accessing non existing fields will cause a panic.
    +func Field(r RR, i int) string {
    +	if i == 0 {
    +		return ""
    +	}
    +	d := reflect.ValueOf(r).Elem().Field(i)
    +	switch d.Kind() {
    +	case reflect.String:
    +		return d.String()
    +	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +		return strconv.FormatInt(d.Int(), 10)
    +	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
    +		return strconv.FormatUint(d.Uint(), 10)
    +	case reflect.Slice:
    +		switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
    +		case `dns:"a"`:
    +			// TODO(miek): Hmm store this as 16 bytes
    +			if d.Len() < net.IPv4len {
    +				return ""
    +			}
    +			if d.Len() < net.IPv6len {
    +				return net.IPv4(byte(d.Index(0).Uint()),
    +					byte(d.Index(1).Uint()),
    +					byte(d.Index(2).Uint()),
    +					byte(d.Index(3).Uint())).String()
    +			}
    +			return net.IPv4(byte(d.Index(12).Uint()),
    +				byte(d.Index(13).Uint()),
    +				byte(d.Index(14).Uint()),
    +				byte(d.Index(15).Uint())).String()
    +		case `dns:"aaaa"`:
    +			if d.Len() < net.IPv6len {
    +				return ""
    +			}
    +			return net.IP{
    +				byte(d.Index(0).Uint()),
    +				byte(d.Index(1).Uint()),
    +				byte(d.Index(2).Uint()),
    +				byte(d.Index(3).Uint()),
    +				byte(d.Index(4).Uint()),
    +				byte(d.Index(5).Uint()),
    +				byte(d.Index(6).Uint()),
    +				byte(d.Index(7).Uint()),
    +				byte(d.Index(8).Uint()),
    +				byte(d.Index(9).Uint()),
    +				byte(d.Index(10).Uint()),
    +				byte(d.Index(11).Uint()),
    +				byte(d.Index(12).Uint()),
    +				byte(d.Index(13).Uint()),
    +				byte(d.Index(14).Uint()),
    +				byte(d.Index(15).Uint()),
    +			}.String()
    +		case `dns:"nsec"`:
    +			if d.Len() == 0 {
    +				return ""
    +			}
    +			s := Type(d.Index(0).Uint()).String()
    +			for i := 1; i < d.Len(); i++ {
    +				s += " " + Type(d.Index(i).Uint()).String()
    +			}
    +			return s
    +		default:
    +			// if it does not have a tag its a string slice
    +			fallthrough
    +		case `dns:"txt"`:
    +			if d.Len() == 0 {
    +				return ""
    +			}
    +			s := d.Index(0).String()
    +			for i := 1; i < d.Len(); i++ {
    +				s += " " + d.Index(i).String()
    +			}
    +			return s
    +		}
    +	}
    +	return ""
    +}
    diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go
    new file mode 100644
    index 000000000000..a8a09184d404
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/fuzz.go
    @@ -0,0 +1,23 @@
    +// +build fuzz
    +
    +package dns
    +
    +func Fuzz(data []byte) int {
    +	msg := new(Msg)
    +
    +	if err := msg.Unpack(data); err != nil {
    +		return 0
    +	}
    +	if _, err := msg.Pack(); err != nil {
    +		return 0
    +	}
    +
    +	return 1
    +}
    +
    +func FuzzNewRR(data []byte) int {
    +	if _, err := NewRR(string(data)); err != nil {
    +		return 0
    +	}
    +	return 1
    +}
    diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go
    new file mode 100644
    index 000000000000..97bc39f58a82
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/generate.go
    @@ -0,0 +1,242 @@
    +package dns
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"strconv"
    +	"strings"
    +)
    +
    +// Parse the $GENERATE statement as used in BIND9 zones.
    +// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
    +// We are called after '$GENERATE '. After which we expect:
    +// * the range (12-24/2)
    +// * lhs (ownername)
    +// * [[ttl][class]]
    +// * type
    +// * rhs (rdata)
    +// But we are lazy here, only the range is parsed *all* occurrences
    +// of $ after that are interpreted.
    +func (zp *ZoneParser) generate(l lex) (RR, bool) {
    +	token := l.token
    +	step := 1
    +	if i := strings.IndexByte(token, '/'); i >= 0 {
    +		if i+1 == len(token) {
    +			return zp.setParseError("bad step in $GENERATE range", l)
    +		}
    +
    +		s, err := strconv.Atoi(token[i+1:])
    +		if err != nil || s <= 0 {
    +			return zp.setParseError("bad step in $GENERATE range", l)
    +		}
    +
    +		step = s
    +		token = token[:i]
    +	}
    +
    +	sx := strings.SplitN(token, "-", 2)
    +	if len(sx) != 2 {
    +		return zp.setParseError("bad start-stop in $GENERATE range", l)
    +	}
    +
    +	start, err := strconv.Atoi(sx[0])
    +	if err != nil {
    +		return zp.setParseError("bad start in $GENERATE range", l)
    +	}
    +
    +	end, err := strconv.Atoi(sx[1])
    +	if err != nil {
    +		return zp.setParseError("bad stop in $GENERATE range", l)
    +	}
    +	if end < 0 || start < 0 || end < start {
    +		return zp.setParseError("bad range in $GENERATE range", l)
    +	}
    +
    +	zp.c.Next() // _BLANK
    +
    +	// Create a complete new string, which we then parse again.
    +	var s string
    +	for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
    +		if l.err {
    +			return zp.setParseError("bad data in $GENERATE directive", l)
    +		}
    +		if l.value == zNewline {
    +			break
    +		}
    +
    +		s += l.token
    +	}
    +
    +	r := &generateReader{
    +		s: s,
    +
    +		cur:   start,
    +		start: start,
    +		end:   end,
    +		step:  step,
    +
    +		file: zp.file,
    +		lex:  &l,
    +	}
    +	zp.sub = NewZoneParser(r, zp.origin, zp.file)
    +	zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
    +	zp.sub.SetDefaultTTL(defaultTtl)
    +	return zp.subNext()
    +}
    +
    +type generateReader struct {
    +	s  string
    +	si int
    +
    +	cur   int
    +	start int
    +	end   int
    +	step  int
    +
    +	mod bytes.Buffer
    +
    +	escape bool
    +
    +	eof bool
    +
    +	file string
    +	lex  *lex
    +}
    +
    +func (r *generateReader) parseError(msg string, end int) *ParseError {
    +	r.eof = true // Make errors sticky.
    +
    +	l := *r.lex
    +	l.token = r.s[r.si-1 : end]
    +	l.column += r.si // l.column starts one zBLANK before r.s
    +
    +	return &ParseError{r.file, msg, l}
    +}
    +
    +func (r *generateReader) Read(p []byte) (int, error) {
    +	// NewZLexer, through NewZoneParser, should use ReadByte and
    +	// not end up here.
    +
    +	panic("not implemented")
    +}
    +
    +func (r *generateReader) ReadByte() (byte, error) {
    +	if r.eof {
    +		return 0, io.EOF
    +	}
    +	if r.mod.Len() > 0 {
    +		return r.mod.ReadByte()
    +	}
    +
    +	if r.si >= len(r.s) {
    +		r.si = 0
    +		r.cur += r.step
    +
    +		r.eof = r.cur > r.end || r.cur < 0
    +		return '\n', nil
    +	}
    +
    +	si := r.si
    +	r.si++
    +
    +	switch r.s[si] {
    +	case '\\':
    +		if r.escape {
    +			r.escape = false
    +			return '\\', nil
    +		}
    +
    +		r.escape = true
    +		return r.ReadByte()
    +	case '$':
    +		if r.escape {
    +			r.escape = false
    +			return '$', nil
    +		}
    +
    +		mod := "%d"
    +
    +		if si >= len(r.s)-1 {
    +			// End of the string
    +			fmt.Fprintf(&r.mod, mod, r.cur)
    +			return r.mod.ReadByte()
    +		}
    +
    +		if r.s[si+1] == '$' {
    +			r.si++
    +			return '$', nil
    +		}
    +
    +		var offset int
    +
    +		// Search for { and }
    +		if r.s[si+1] == '{' {
    +			// Modifier block
    +			sep := strings.Index(r.s[si+2:], "}")
    +			if sep < 0 {
    +				return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
    +			}
    +
    +			var errMsg string
    +			mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
    +			if errMsg != "" {
    +				return 0, r.parseError(errMsg, si+3+sep)
    +			}
    +			if r.start+offset < 0 || r.end+offset > 1<<31-1 {
    +				return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
    +			}
    +
    +			r.si += 2 + sep // Jump to it
    +		}
    +
    +		fmt.Fprintf(&r.mod, mod, r.cur+offset)
    +		return r.mod.ReadByte()
    +	default:
    +		if r.escape { // Pretty useless here
    +			r.escape = false
    +			return r.ReadByte()
    +		}
    +
    +		return r.s[si], nil
    +	}
    +}
    +
    +// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
    +func modToPrintf(s string) (string, int, string) {
    +	// Modifier is { offset [ ,width [ ,base ] ] } - provide default
    +	// values for optional width and type, if necessary.
    +	var offStr, widthStr, base string
    +	switch xs := strings.Split(s, ","); len(xs) {
    +	case 1:
    +		offStr, widthStr, base = xs[0], "0", "d"
    +	case 2:
    +		offStr, widthStr, base = xs[0], xs[1], "d"
    +	case 3:
    +		offStr, widthStr, base = xs[0], xs[1], xs[2]
    +	default:
    +		return "", 0, "bad modifier in $GENERATE"
    +	}
    +
    +	switch base {
    +	case "o", "d", "x", "X":
    +	default:
    +		return "", 0, "bad base in $GENERATE"
    +	}
    +
    +	offset, err := strconv.Atoi(offStr)
    +	if err != nil {
    +		return "", 0, "bad offset in $GENERATE"
    +	}
    +
    +	width, err := strconv.Atoi(widthStr)
    +	if err != nil || width < 0 || width > 255 {
    +		return "", 0, "bad width in $GENERATE"
    +	}
    +
    +	if width == 0 {
    +		return "%" + base, offset, ""
    +	}
    +
    +	return "%0" + widthStr + base, offset, ""
    +}
    diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go
    new file mode 100644
    index 000000000000..e32d2a1d2546
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/labels.go
    @@ -0,0 +1,188 @@
    +package dns
    +
    +// Holds a bunch of helper functions for dealing with labels.
    +
    +// SplitDomainName splits a name string into it's labels.
    +// www.miek.nl. returns []string{"www", "miek", "nl"}
    +// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
    +// The root label (.) returns nil. Note that using
    +// strings.Split(s) will work in most cases, but does not handle
    +// escaped dots (\.) for instance.
    +// s must be a syntactically valid domain name, see IsDomainName.
    +func SplitDomainName(s string) (labels []string) {
    +	if len(s) == 0 {
    +		return nil
    +	}
    +	fqdnEnd := 0 // offset of the final '.' or the length of the name
    +	idx := Split(s)
    +	begin := 0
    +	if IsFqdn(s) {
    +		fqdnEnd = len(s) - 1
    +	} else {
    +		fqdnEnd = len(s)
    +	}
    +
    +	switch len(idx) {
    +	case 0:
    +		return nil
    +	case 1:
    +		// no-op
    +	default:
    +		for _, end := range idx[1:] {
    +			labels = append(labels, s[begin:end-1])
    +			begin = end
    +		}
    +	}
    +
    +	return append(labels, s[begin:fqdnEnd])
    +}
    +
    +// CompareDomainName compares the names s1 and s2 and
    +// returns how many labels they have in common starting from the *right*.
    +// The comparison stops at the first inequality. The names are downcased
    +// before the comparison.
    +//
    +// www.miek.nl. and miek.nl. have two labels in common: miek and nl
    +// www.miek.nl. and www.bla.nl. have one label in common: nl
    +//
    +// s1 and s2 must be syntactically valid domain names.
    +func CompareDomainName(s1, s2 string) (n int) {
    +	// the first check: root label
    +	if s1 == "." || s2 == "." {
    +		return 0
    +	}
    +
    +	l1 := Split(s1)
    +	l2 := Split(s2)
    +
    +	j1 := len(l1) - 1 // end
    +	i1 := len(l1) - 2 // start
    +	j2 := len(l2) - 1
    +	i2 := len(l2) - 2
    +	// the second check can be done here: last/only label
    +	// before we fall through into the for-loop below
    +	if equal(s1[l1[j1]:], s2[l2[j2]:]) {
    +		n++
    +	} else {
    +		return
    +	}
    +	for {
    +		if i1 < 0 || i2 < 0 {
    +			break
    +		}
    +		if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
    +			n++
    +		} else {
    +			break
    +		}
    +		j1--
    +		i1--
    +		j2--
    +		i2--
    +	}
    +	return
    +}
    +
    +// CountLabel counts the the number of labels in the string s.
    +// s must be a syntactically valid domain name.
    +func CountLabel(s string) (labels int) {
    +	if s == "." {
    +		return
    +	}
    +	off := 0
    +	end := false
    +	for {
    +		off, end = NextLabel(s, off)
    +		labels++
    +		if end {
    +			return
    +		}
    +	}
    +}
    +
    +// Split splits a name s into its label indexes.
    +// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
    +// The root name (.) returns nil. Also see SplitDomainName.
    +// s must be a syntactically valid domain name.
    +func Split(s string) []int {
    +	if s == "." {
    +		return nil
    +	}
    +	idx := make([]int, 1, 3)
    +	off := 0
    +	end := false
    +
    +	for {
    +		off, end = NextLabel(s, off)
    +		if end {
    +			return idx
    +		}
    +		idx = append(idx, off)
    +	}
    +}
    +
    +// NextLabel returns the index of the start of the next label in the
    +// string s starting at offset.
    +// The bool end is true when the end of the string has been reached.
    +// Also see PrevLabel.
    +func NextLabel(s string, offset int) (i int, end bool) {
    +	quote := false
    +	for i = offset; i < len(s)-1; i++ {
    +		switch s[i] {
    +		case '\\':
    +			quote = !quote
    +		default:
    +			quote = false
    +		case '.':
    +			if quote {
    +				quote = !quote
    +				continue
    +			}
    +			return i + 1, false
    +		}
    +	}
    +	return i + 1, true
    +}
    +
    +// PrevLabel returns the index of the label when starting from the right and
    +// jumping n labels to the left.
    +// The bool start is true when the start of the string has been overshot.
    +// Also see NextLabel.
    +func PrevLabel(s string, n int) (i int, start bool) {
    +	if n == 0 {
    +		return len(s), false
    +	}
    +	lab := Split(s)
    +	if lab == nil {
    +		return 0, true
    +	}
    +	if n > len(lab) {
    +		return 0, true
    +	}
    +	return lab[len(lab)-n], false
    +}
    +
    +// equal compares a and b while ignoring case. It returns true when equal otherwise false.
    +func equal(a, b string) bool {
    +	// might be lifted into API function.
    +	la := len(a)
    +	lb := len(b)
    +	if la != lb {
    +		return false
    +	}
    +
    +	for i := la - 1; i >= 0; i-- {
    +		ai := a[i]
    +		bi := b[i]
    +		if ai >= 'A' && ai <= 'Z' {
    +			ai |= 'a' - 'A'
    +		}
    +		if bi >= 'A' && bi <= 'Z' {
    +			bi |= 'a' - 'A'
    +		}
    +		if ai != bi {
    +			return false
    +		}
    +	}
    +	return true
    +}
    diff --git a/vendor/github.com/miekg/dns/listen_go111.go b/vendor/github.com/miekg/dns/listen_go111.go
    new file mode 100644
    index 000000000000..fad195cfeb4d
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/listen_go111.go
    @@ -0,0 +1,44 @@
    +// +build go1.11
    +// +build aix darwin dragonfly freebsd linux netbsd openbsd
    +
    +package dns
    +
    +import (
    +	"context"
    +	"net"
    +	"syscall"
    +
    +	"golang.org/x/sys/unix"
    +)
    +
    +const supportsReusePort = true
    +
    +func reuseportControl(network, address string, c syscall.RawConn) error {
    +	var opErr error
    +	err := c.Control(func(fd uintptr) {
    +		opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
    +	})
    +	if err != nil {
    +		return err
    +	}
    +
    +	return opErr
    +}
    +
    +func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
    +	var lc net.ListenConfig
    +	if reuseport {
    +		lc.Control = reuseportControl
    +	}
    +
    +	return lc.Listen(context.Background(), network, addr)
    +}
    +
    +func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
    +	var lc net.ListenConfig
    +	if reuseport {
    +		lc.Control = reuseportControl
    +	}
    +
    +	return lc.ListenPacket(context.Background(), network, addr)
    +}
    diff --git a/vendor/github.com/miekg/dns/listen_go_not111.go b/vendor/github.com/miekg/dns/listen_go_not111.go
    new file mode 100644
    index 000000000000..b9201417abeb
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/listen_go_not111.go
    @@ -0,0 +1,23 @@
    +// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
    +
    +package dns
    +
    +import "net"
    +
    +const supportsReusePort = false
    +
    +func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
    +	if reuseport {
    +		// TODO(tmthrgd): return an error?
    +	}
    +
    +	return net.Listen(network, addr)
    +}
    +
    +func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
    +	if reuseport {
    +		// TODO(tmthrgd): return an error?
    +	}
    +
    +	return net.ListenPacket(network, addr)
    +}
    diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
    new file mode 100644
    index 000000000000..e04fb5d77a78
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/msg.go
    @@ -0,0 +1,1228 @@
    +// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
    +// and to - Pack() - wire format.
    +// All the packers and unpackers take a (msg []byte, off int)
    +// and return (off1 int, ok bool).  If they return ok==false, they
    +// also return off1==len(msg), so that the next unpacker will
    +// also fail.  This lets us avoid checks of ok until the end of a
    +// packing sequence.
    +
    +package dns
    +
    +//go:generate go run msg_generate.go
    +
    +import (
    +	crand "crypto/rand"
    +	"encoding/binary"
    +	"fmt"
    +	"math/big"
    +	"math/rand"
    +	"strconv"
    +	"strings"
    +	"sync"
    +)
    +
    +const (
    +	maxCompressionOffset    = 2 << 13 // We have 14 bits for the compression pointer
    +	maxDomainNameWireOctets = 255     // See RFC 1035 section 2.3.4
    +
    +	// This is the maximum number of compression pointers that should occur in a
    +	// semantically valid message. Each label in a domain name must be at least one
    +	// octet and is separated by a period. The root label won't be represented by a
    +	// compression pointer to a compression pointer, hence the -2 to exclude the
    +	// smallest valid root label.
    +	//
    +	// It is possible to construct a valid message that has more compression pointers
    +	// than this, and still doesn't loop, by pointing to a previous pointer. This is
    +	// not something a well written implementation should ever do, so we leave them
    +	// to trip the maximum compression pointer check.
    +	maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2
    +
    +	// This is the maximum length of a domain name in presentation format. The
    +	// maximum wire length of a domain name is 255 octets (see above), with the
    +	// maximum label length being 63. The wire format requires one extra byte over
    +	// the presentation format, reducing the number of octets by 1. Each label in
    +	// the name will be separated by a single period, with each octet in the label
    +	// expanding to at most 4 bytes (\DDD). If all other labels are of the maximum
    +	// length, then the final label can only be 61 octets long to not exceed the
    +	// maximum allowed wire length.
    +	maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1
    +)
    +
    +// Errors defined in this package.
    +var (
    +	ErrAlg           error = &Error{err: "bad algorithm"}                  // ErrAlg indicates an error with the (DNSSEC) algorithm.
    +	ErrAuth          error = &Error{err: "bad authentication"}             // ErrAuth indicates an error in the TSIG authentication.
    +	ErrBuf           error = &Error{err: "buffer size too small"}          // ErrBuf indicates that the buffer used is too small for the message.
    +	ErrConnEmpty     error = &Error{err: "conn has no connection"}         // ErrConnEmpty indicates a connection is being used before it is initialized.
    +	ErrExtendedRcode error = &Error{err: "bad extended rcode"}             // ErrExtendedRcode ...
    +	ErrFqdn          error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot.
    +	ErrId            error = &Error{err: "id mismatch"}                    // ErrId indicates there is a mismatch with the message's ID.
    +	ErrKeyAlg        error = &Error{err: "bad key algorithm"}              // ErrKeyAlg indicates that the algorithm in the key is not valid.
    +	ErrKey           error = &Error{err: "bad key"}
    +	ErrKeySize       error = &Error{err: "bad key size"}
    +	ErrLongDomain    error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)}
    +	ErrNoSig         error = &Error{err: "no signature found"}
    +	ErrPrivKey       error = &Error{err: "bad private key"}
    +	ErrRcode         error = &Error{err: "bad rcode"}
    +	ErrRdata         error = &Error{err: "bad rdata"}
    +	ErrRRset         error = &Error{err: "bad rrset"}
    +	ErrSecret        error = &Error{err: "no secrets defined"}
    +	ErrShortRead     error = &Error{err: "short read"}
    +	ErrSig           error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated.
    +	ErrSoa           error = &Error{err: "no SOA"}        // ErrSOA indicates that no SOA RR was seen when doing zone transfers.
    +	ErrTime          error = &Error{err: "bad time"}      // ErrTime indicates a timing error in TSIG authentication.
    +)
    +
    +// Id by default, returns a 16 bits random number to be used as a
    +// message id. The random provided should be good enough. This being a
    +// variable the function can be reassigned to a custom function.
    +// For instance, to make it return a static value:
    +//
    +//	dns.Id = func() uint16 { return 3 }
    +var Id = id
    +
    +var (
    +	idLock sync.Mutex
    +	idRand *rand.Rand
    +)
    +
    +// id returns a 16 bits random number to be used as a
    +// message id. The random provided should be good enough.
    +func id() uint16 {
    +	idLock.Lock()
    +
    +	if idRand == nil {
    +		// This (partially) works around
    +		// https://github.com/golang/go/issues/11833 by only
    +		// seeding idRand upon the first call to id.
    +
    +		var seed int64
    +		var buf [8]byte
    +
    +		if _, err := crand.Read(buf[:]); err == nil {
    +			seed = int64(binary.LittleEndian.Uint64(buf[:]))
    +		} else {
    +			seed = rand.Int63()
    +		}
    +
    +		idRand = rand.New(rand.NewSource(seed))
    +	}
    +
    +	// The call to idRand.Uint32 must be within the
    +	// mutex lock because *rand.Rand is not safe for
    +	// concurrent use.
    +	//
    +	// There is no added performance overhead to calling
    +	// idRand.Uint32 inside a mutex lock over just
    +	// calling rand.Uint32 as the global math/rand rng
    +	// is internally protected by a sync.Mutex.
    +	id := uint16(idRand.Uint32())
    +
    +	idLock.Unlock()
    +	return id
    +}
    +
    +// MsgHdr is a a manually-unpacked version of (id, bits).
    +type MsgHdr struct {
    +	Id                 uint16
    +	Response           bool
    +	Opcode             int
    +	Authoritative      bool
    +	Truncated          bool
    +	RecursionDesired   bool
    +	RecursionAvailable bool
    +	Zero               bool
    +	AuthenticatedData  bool
    +	CheckingDisabled   bool
    +	Rcode              int
    +}
    +
    +// Msg contains the layout of a DNS message.
    +type Msg struct {
    +	MsgHdr
    +	Compress bool       `json:"-"` // If true, the message will be compressed when converted to wire format.
    +	Question []Question // Holds the RR(s) of the question section.
    +	Answer   []RR       // Holds the RR(s) of the answer section.
    +	Ns       []RR       // Holds the RR(s) of the authority section.
    +	Extra    []RR       // Holds the RR(s) of the additional section.
    +}
    +
    +// ClassToString is a maps Classes to strings for each CLASS wire type.
    +var ClassToString = map[uint16]string{
    +	ClassINET:   "IN",
    +	ClassCSNET:  "CS",
    +	ClassCHAOS:  "CH",
    +	ClassHESIOD: "HS",
    +	ClassNONE:   "NONE",
    +	ClassANY:    "ANY",
    +}
    +
    +// OpcodeToString maps Opcodes to strings.
    +var OpcodeToString = map[int]string{
    +	OpcodeQuery:  "QUERY",
    +	OpcodeIQuery: "IQUERY",
    +	OpcodeStatus: "STATUS",
    +	OpcodeNotify: "NOTIFY",
    +	OpcodeUpdate: "UPDATE",
    +}
    +
    +// RcodeToString maps Rcodes to strings.
    +var RcodeToString = map[int]string{
    +	RcodeSuccess:        "NOERROR",
    +	RcodeFormatError:    "FORMERR",
    +	RcodeServerFailure:  "SERVFAIL",
    +	RcodeNameError:      "NXDOMAIN",
    +	RcodeNotImplemented: "NOTIMP",
    +	RcodeRefused:        "REFUSED",
    +	RcodeYXDomain:       "YXDOMAIN", // See RFC 2136
    +	RcodeYXRrset:        "YXRRSET",
    +	RcodeNXRrset:        "NXRRSET",
    +	RcodeNotAuth:        "NOTAUTH",
    +	RcodeNotZone:        "NOTZONE",
    +	RcodeBadSig:         "BADSIG", // Also known as RcodeBadVers, see RFC 6891
    +	//	RcodeBadVers:        "BADVERS",
    +	RcodeBadKey:    "BADKEY",
    +	RcodeBadTime:   "BADTIME",
    +	RcodeBadMode:   "BADMODE",
    +	RcodeBadName:   "BADNAME",
    +	RcodeBadAlg:    "BADALG",
    +	RcodeBadTrunc:  "BADTRUNC",
    +	RcodeBadCookie: "BADCOOKIE",
    +}
    +
    +// compressionMap is used to allow a more efficient compression map
    +// to be used for internal packDomainName calls without changing the
    +// signature or functionality of public API.
    +//
    +// In particular, map[string]uint16 uses 25% less per-entry memory
    +// than does map[string]int.
    +type compressionMap struct {
    +	ext map[string]int    // external callers
    +	int map[string]uint16 // internal callers
    +}
    +
    +func (m compressionMap) valid() bool {
    +	return m.int != nil || m.ext != nil
    +}
    +
    +func (m compressionMap) insert(s string, pos int) {
    +	if m.ext != nil {
    +		m.ext[s] = pos
    +	} else {
    +		m.int[s] = uint16(pos)
    +	}
    +}
    +
    +func (m compressionMap) find(s string) (int, bool) {
    +	if m.ext != nil {
    +		pos, ok := m.ext[s]
    +		return pos, ok
    +	}
    +
    +	pos, ok := m.int[s]
    +	return int(pos), ok
    +}
    +
    +// Domain names are a sequence of counted strings
    +// split at the dots. They end with a zero-length string.
    +
    +// PackDomainName packs a domain name s into msg[off:].
    +// If compression is wanted compress must be true and the compression
    +// map needs to hold a mapping between domain names and offsets
    +// pointing into msg.
    +func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
    +	return packDomainName(s, msg, off, compressionMap{ext: compression}, compress)
    +}
    +
    +func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	// XXX: A logical copy of this function exists in IsDomainName and
    +	// should be kept in sync with this function.
    +
    +	ls := len(s)
    +	if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
    +		return off, nil
    +	}
    +
    +	// If not fully qualified, error out.
    +	if !IsFqdn(s) {
    +		return len(msg), ErrFqdn
    +	}
    +
    +	// Each dot ends a segment of the name.
    +	// We trade each dot byte for a length byte.
    +	// Except for escaped dots (\.), which are normal dots.
    +	// There is also a trailing zero.
    +
    +	// Compression
    +	pointer := -1
    +
    +	// Emit sequence of counted strings, chopping at dots.
    +	var (
    +		begin     int
    +		compBegin int
    +		compOff   int
    +		bs        []byte
    +		wasDot    bool
    +	)
    +loop:
    +	for i := 0; i < ls; i++ {
    +		var c byte
    +		if bs == nil {
    +			c = s[i]
    +		} else {
    +			c = bs[i]
    +		}
    +
    +		switch c {
    +		case '\\':
    +			if off+1 > len(msg) {
    +				return len(msg), ErrBuf
    +			}
    +
    +			if bs == nil {
    +				bs = []byte(s)
    +			}
    +
    +			// check for \DDD
    +			if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) {
    +				bs[i] = dddToByte(bs[i+1:])
    +				copy(bs[i+1:ls-3], bs[i+4:])
    +				ls -= 3
    +				compOff += 3
    +			} else {
    +				copy(bs[i:ls-1], bs[i+1:])
    +				ls--
    +				compOff++
    +			}
    +
    +			wasDot = false
    +		case '.':
    +			if wasDot {
    +				// two dots back to back is not legal
    +				return len(msg), ErrRdata
    +			}
    +			wasDot = true
    +
    +			labelLen := i - begin
    +			if labelLen >= 1<<6 { // top two bits of length must be clear
    +				return len(msg), ErrRdata
    +			}
    +
    +			// off can already (we're in a loop) be bigger than len(msg)
    +			// this happens when a name isn't fully qualified
    +			if off+1+labelLen > len(msg) {
    +				return len(msg), ErrBuf
    +			}
    +
    +			// Don't try to compress '.'
    +			// We should only compress when compress is true, but we should also still pick
    +			// up names that can be used for *future* compression(s).
    +			if compression.valid() && !isRootLabel(s, bs, begin, ls) {
    +				if p, ok := compression.find(s[compBegin:]); ok {
    +					// The first hit is the longest matching dname
    +					// keep the pointer offset we get back and store
    +					// the offset of the current name, because that's
    +					// where we need to insert the pointer later
    +
    +					// If compress is true, we're allowed to compress this dname
    +					if compress {
    +						pointer = p // Where to point to
    +						break loop
    +					}
    +				} else if off < maxCompressionOffset {
    +					// Only offsets smaller than maxCompressionOffset can be used.
    +					compression.insert(s[compBegin:], off)
    +				}
    +			}
    +
    +			// The following is covered by the length check above.
    +			msg[off] = byte(labelLen)
    +
    +			if bs == nil {
    +				copy(msg[off+1:], s[begin:i])
    +			} else {
    +				copy(msg[off+1:], bs[begin:i])
    +			}
    +			off += 1 + labelLen
    +
    +			begin = i + 1
    +			compBegin = begin + compOff
    +		default:
    +			wasDot = false
    +		}
    +	}
    +
    +	// Root label is special
    +	if isRootLabel(s, bs, 0, ls) {
    +		return off, nil
    +	}
    +
    +	// If we did compression and we find something add the pointer here
    +	if pointer != -1 {
    +		// We have two bytes (14 bits) to put the pointer in
    +		binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000))
    +		return off + 2, nil
    +	}
    +
    +	if off < len(msg) {
    +		msg[off] = 0
    +	}
    +
    +	return off + 1, nil
    +}
    +
    +// isRootLabel returns whether s or bs, from off to end, is the root
    +// label ".".
    +//
    +// If bs is nil, s will be checked, otherwise bs will be checked.
    +func isRootLabel(s string, bs []byte, off, end int) bool {
    +	if bs == nil {
    +		return s[off:end] == "."
    +	}
    +
    +	return end-off == 1 && bs[off] == '.'
    +}
    +
    +// Unpack a domain name.
    +// In addition to the simple sequences of counted strings above,
    +// domain names are allowed to refer to strings elsewhere in the
    +// packet, to avoid repeating common suffixes when returning
    +// many entries in a single domain.  The pointers are marked
    +// by a length byte with the top two bits set.  Ignoring those
    +// two bits, that byte and the next give a 14 bit offset from msg[0]
    +// where we should pick up the trail.
    +// Note that if we jump elsewhere in the packet,
    +// we return off1 == the offset after the first pointer we found,
    +// which is where the next record will start.
    +// In theory, the pointers are only allowed to jump backward.
    +// We let them jump anywhere and stop jumping after a while.
    +
    +// UnpackDomainName unpacks a domain name into a string. It returns
    +// the name, the new offset into msg and any error that occurred.
    +//
    +// When an error is encountered, the unpacked name will be discarded
    +// and len(msg) will be returned as the offset.
    +func UnpackDomainName(msg []byte, off int) (string, int, error) {
    +	s := make([]byte, 0, maxDomainNamePresentationLength)
    +	off1 := 0
    +	lenmsg := len(msg)
    +	budget := maxDomainNameWireOctets
    +	ptr := 0 // number of pointers followed
    +Loop:
    +	for {
    +		if off >= lenmsg {
    +			return "", lenmsg, ErrBuf
    +		}
    +		c := int(msg[off])
    +		off++
    +		switch c & 0xC0 {
    +		case 0x00:
    +			if c == 0x00 {
    +				// end of name
    +				break Loop
    +			}
    +			// literal string
    +			if off+c > lenmsg {
    +				return "", lenmsg, ErrBuf
    +			}
    +			budget -= c + 1 // +1 for the label separator
    +			if budget <= 0 {
    +				return "", lenmsg, ErrLongDomain
    +			}
    +			for _, b := range msg[off : off+c] {
    +				switch b {
    +				case '.', '(', ')', ';', ' ', '@':
    +					fallthrough
    +				case '"', '\\':
    +					s = append(s, '\\', b)
    +				default:
    +					if b < ' ' || b > '~' { // unprintable, use \DDD
    +						s = append(s, escapeByte(b)...)
    +					} else {
    +						s = append(s, b)
    +					}
    +				}
    +			}
    +			s = append(s, '.')
    +			off += c
    +		case 0xC0:
    +			// pointer to somewhere else in msg.
    +			// remember location after first ptr,
    +			// since that's how many bytes we consumed.
    +			// also, don't follow too many pointers --
    +			// maybe there's a loop.
    +			if off >= lenmsg {
    +				return "", lenmsg, ErrBuf
    +			}
    +			c1 := msg[off]
    +			off++
    +			if ptr == 0 {
    +				off1 = off
    +			}
    +			if ptr++; ptr > maxCompressionPointers {
    +				return "", lenmsg, &Error{err: "too many compression pointers"}
    +			}
    +			// pointer should guarantee that it advances and points forwards at least
    +			// but the condition on previous three lines guarantees that it's
    +			// at least loop-free
    +			off = (c^0xC0)<<8 | int(c1)
    +		default:
    +			// 0x80 and 0x40 are reserved
    +			return "", lenmsg, ErrRdata
    +		}
    +	}
    +	if ptr == 0 {
    +		off1 = off
    +	}
    +	if len(s) == 0 {
    +		return ".", off1, nil
    +	}
    +	return string(s), off1, nil
    +}
    +
    +func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
    +	if len(txt) == 0 {
    +		if offset >= len(msg) {
    +			return offset, ErrBuf
    +		}
    +		msg[offset] = 0
    +		return offset, nil
    +	}
    +	var err error
    +	for _, s := range txt {
    +		if len(s) > len(tmp) {
    +			return offset, ErrBuf
    +		}
    +		offset, err = packTxtString(s, msg, offset, tmp)
    +		if err != nil {
    +			return offset, err
    +		}
    +	}
    +	return offset, nil
    +}
    +
    +func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
    +	lenByteOffset := offset
    +	if offset >= len(msg) || len(s) > len(tmp) {
    +		return offset, ErrBuf
    +	}
    +	offset++
    +	bs := tmp[:len(s)]
    +	copy(bs, s)
    +	for i := 0; i < len(bs); i++ {
    +		if len(msg) <= offset {
    +			return offset, ErrBuf
    +		}
    +		if bs[i] == '\\' {
    +			i++
    +			if i == len(bs) {
    +				break
    +			}
    +			// check for \DDD
    +			if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
    +				msg[offset] = dddToByte(bs[i:])
    +				i += 2
    +			} else {
    +				msg[offset] = bs[i]
    +			}
    +		} else {
    +			msg[offset] = bs[i]
    +		}
    +		offset++
    +	}
    +	l := offset - lenByteOffset - 1
    +	if l > 255 {
    +		return offset, &Error{err: "string exceeded 255 bytes in txt"}
    +	}
    +	msg[lenByteOffset] = byte(l)
    +	return offset, nil
    +}
    +
    +func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
    +	if offset >= len(msg) || len(s) > len(tmp) {
    +		return offset, ErrBuf
    +	}
    +	bs := tmp[:len(s)]
    +	copy(bs, s)
    +	for i := 0; i < len(bs); i++ {
    +		if len(msg) <= offset {
    +			return offset, ErrBuf
    +		}
    +		if bs[i] == '\\' {
    +			i++
    +			if i == len(bs) {
    +				break
    +			}
    +			// check for \DDD
    +			if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
    +				msg[offset] = dddToByte(bs[i:])
    +				i += 2
    +			} else {
    +				msg[offset] = bs[i]
    +			}
    +		} else {
    +			msg[offset] = bs[i]
    +		}
    +		offset++
    +	}
    +	return offset, nil
    +}
    +
    +func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
    +	off = off0
    +	var s string
    +	for off < len(msg) && err == nil {
    +		s, off, err = unpackString(msg, off)
    +		if err == nil {
    +			ss = append(ss, s)
    +		}
    +	}
    +	return
    +}
    +
    +// Helpers for dealing with escaped bytes
    +func isDigit(b byte) bool { return b >= '0' && b <= '9' }
    +
    +func dddToByte(s []byte) byte {
    +	_ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
    +	return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
    +}
    +
    +func dddStringToByte(s string) byte {
    +	_ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
    +	return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
    +}
    +
    +// Helper function for packing and unpacking
    +func intToBytes(i *big.Int, length int) []byte {
    +	buf := i.Bytes()
    +	if len(buf) < length {
    +		b := make([]byte, length)
    +		copy(b[length-len(buf):], buf)
    +		return b
    +	}
    +	return buf
    +}
    +
    +// PackRR packs a resource record rr into msg[off:].
    +// See PackDomainName for documentation about the compression.
    +func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
    +	headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress)
    +	if err == nil {
    +		// packRR no longer sets the Rdlength field on the rr, but
    +		// callers might be expecting it so we set it here.
    +		rr.Header().Rdlength = uint16(off1 - headerEnd)
    +	}
    +	return off1, err
    +}
    +
    +func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) {
    +	if rr == nil {
    +		return len(msg), len(msg), &Error{err: "nil rr"}
    +	}
    +
    +	headerEnd, err = rr.Header().packHeader(msg, off, compression, compress)
    +	if err != nil {
    +		return headerEnd, len(msg), err
    +	}
    +
    +	off1, err = rr.pack(msg, headerEnd, compression, compress)
    +	if err != nil {
    +		return headerEnd, len(msg), err
    +	}
    +
    +	rdlength := off1 - headerEnd
    +	if int(uint16(rdlength)) != rdlength { // overflow
    +		return headerEnd, len(msg), ErrRdata
    +	}
    +
    +	// The RDLENGTH field is the last field in the header and we set it here.
    +	binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength))
    +	return headerEnd, off1, nil
    +}
    +
    +// UnpackRR unpacks msg[off:] into an RR.
    +func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
    +	h, off, msg, err := unpackHeader(msg, off)
    +	if err != nil {
    +		return nil, len(msg), err
    +	}
    +
    +	return UnpackRRWithHeader(h, msg, off)
    +}
    +
    +// UnpackRRWithHeader unpacks the record type specific payload given an existing
    +// RR_Header.
    +func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) {
    +	if newFn, ok := TypeToRR[h.Rrtype]; ok {
    +		rr = newFn()
    +		*rr.Header() = h
    +	} else {
    +		rr = &RFC3597{Hdr: h}
    +	}
    +
    +	if noRdata(h) {
    +		return rr, off, nil
    +	}
    +
    +	end := off + int(h.Rdlength)
    +
    +	off, err = rr.unpack(msg, off)
    +	if err != nil {
    +		return nil, end, err
    +	}
    +	if off != end {
    +		return &h, end, &Error{err: "bad rdlength"}
    +	}
    +
    +	return rr, off, nil
    +}
    +
    +// unpackRRslice unpacks msg[off:] into an []RR.
    +// If we cannot unpack the whole array, then it will return nil
    +func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) {
    +	var r RR
    +	// Don't pre-allocate, l may be under attacker control
    +	var dst []RR
    +	for i := 0; i < l; i++ {
    +		off1 := off
    +		r, off, err = UnpackRR(msg, off)
    +		if err != nil {
    +			off = len(msg)
    +			break
    +		}
    +		// If offset does not increase anymore, l is a lie
    +		if off1 == off {
    +			l = i
    +			break
    +		}
    +		dst = append(dst, r)
    +	}
    +	if err != nil && off == len(msg) {
    +		dst = nil
    +	}
    +	return dst, off, err
    +}
    +
    +// Convert a MsgHdr to a string, with dig-like headers:
    +//
    +//;; opcode: QUERY, status: NOERROR, id: 48404
    +//
    +//;; flags: qr aa rd ra;
    +func (h *MsgHdr) String() string {
    +	if h == nil {
    +		return " MsgHdr"
    +	}
    +
    +	s := ";; opcode: " + OpcodeToString[h.Opcode]
    +	s += ", status: " + RcodeToString[h.Rcode]
    +	s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
    +
    +	s += ";; flags:"
    +	if h.Response {
    +		s += " qr"
    +	}
    +	if h.Authoritative {
    +		s += " aa"
    +	}
    +	if h.Truncated {
    +		s += " tc"
    +	}
    +	if h.RecursionDesired {
    +		s += " rd"
    +	}
    +	if h.RecursionAvailable {
    +		s += " ra"
    +	}
    +	if h.Zero { // Hmm
    +		s += " z"
    +	}
    +	if h.AuthenticatedData {
    +		s += " ad"
    +	}
    +	if h.CheckingDisabled {
    +		s += " cd"
    +	}
    +
    +	s += ";"
    +	return s
    +}
    +
    +// Pack packs a Msg: it is converted to to wire format.
    +// If the dns.Compress is true the message will be in compressed wire format.
    +func (dns *Msg) Pack() (msg []byte, err error) {
    +	return dns.PackBuffer(nil)
    +}
    +
    +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated.
    +func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
    +	// If this message can't be compressed, avoid filling the
    +	// compression map and creating garbage.
    +	if dns.Compress && dns.isCompressible() {
    +		compression := make(map[string]uint16) // Compression pointer mappings.
    +		return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true)
    +	}
    +
    +	return dns.packBufferWithCompressionMap(buf, compressionMap{}, false)
    +}
    +
    +// packBufferWithCompressionMap packs a Msg, using the given buffer buf.
    +func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) {
    +	if dns.Rcode < 0 || dns.Rcode > 0xFFF {
    +		return nil, ErrRcode
    +	}
    +
    +	// Set extended rcode unconditionally if we have an opt, this will allow
    +	// reseting the extended rcode bits if they need to.
    +	if opt := dns.IsEdns0(); opt != nil {
    +		opt.SetExtendedRcode(uint16(dns.Rcode))
    +	} else if dns.Rcode > 0xF {
    +		// If Rcode is an extended one and opt is nil, error out.
    +		return nil, ErrExtendedRcode
    +	}
    +
    +	// Convert convenient Msg into wire-like Header.
    +	var dh Header
    +	dh.Id = dns.Id
    +	dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF)
    +	if dns.Response {
    +		dh.Bits |= _QR
    +	}
    +	if dns.Authoritative {
    +		dh.Bits |= _AA
    +	}
    +	if dns.Truncated {
    +		dh.Bits |= _TC
    +	}
    +	if dns.RecursionDesired {
    +		dh.Bits |= _RD
    +	}
    +	if dns.RecursionAvailable {
    +		dh.Bits |= _RA
    +	}
    +	if dns.Zero {
    +		dh.Bits |= _Z
    +	}
    +	if dns.AuthenticatedData {
    +		dh.Bits |= _AD
    +	}
    +	if dns.CheckingDisabled {
    +		dh.Bits |= _CD
    +	}
    +
    +	dh.Qdcount = uint16(len(dns.Question))
    +	dh.Ancount = uint16(len(dns.Answer))
    +	dh.Nscount = uint16(len(dns.Ns))
    +	dh.Arcount = uint16(len(dns.Extra))
    +
    +	// We need the uncompressed length here, because we first pack it and then compress it.
    +	msg = buf
    +	uncompressedLen := msgLenWithCompressionMap(dns, nil)
    +	if packLen := uncompressedLen + 1; len(msg) < packLen {
    +		msg = make([]byte, packLen)
    +	}
    +
    +	// Pack it in: header and then the pieces.
    +	off := 0
    +	off, err = dh.pack(msg, off, compression, compress)
    +	if err != nil {
    +		return nil, err
    +	}
    +	for _, r := range dns.Question {
    +		off, err = r.pack(msg, off, compression, compress)
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +	for _, r := range dns.Answer {
    +		_, off, err = packRR(r, msg, off, compression, compress)
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +	for _, r := range dns.Ns {
    +		_, off, err = packRR(r, msg, off, compression, compress)
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +	for _, r := range dns.Extra {
    +		_, off, err = packRR(r, msg, off, compression, compress)
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +	return msg[:off], nil
    +}
    +
    +func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
    +	// If we are at the end of the message we should return *just* the
    +	// header. This can still be useful to the caller. 9.9.9.9 sends these
    +	// when responding with REFUSED for instance.
    +	if off == len(msg) {
    +		// reset sections before returning
    +		dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil
    +		return nil
    +	}
    +
    +	// Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are
    +	// attacker controlled. This means we can't use them to pre-allocate
    +	// slices.
    +	dns.Question = nil
    +	for i := 0; i < int(dh.Qdcount); i++ {
    +		off1 := off
    +		var q Question
    +		q, off, err = unpackQuestion(msg, off)
    +		if err != nil {
    +			return err
    +		}
    +		if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie!
    +			dh.Qdcount = uint16(i)
    +			break
    +		}
    +		dns.Question = append(dns.Question, q)
    +	}
    +
    +	dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off)
    +	// The header counts might have been wrong so we need to update it
    +	dh.Ancount = uint16(len(dns.Answer))
    +	if err == nil {
    +		dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off)
    +	}
    +	// The header counts might have been wrong so we need to update it
    +	dh.Nscount = uint16(len(dns.Ns))
    +	if err == nil {
    +		dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off)
    +	}
    +	// The header counts might have been wrong so we need to update it
    +	dh.Arcount = uint16(len(dns.Extra))
    +
    +	// Set extended Rcode
    +	if opt := dns.IsEdns0(); opt != nil {
    +		dns.Rcode |= opt.ExtendedRcode()
    +	}
    +
    +	if off != len(msg) {
    +		// TODO(miek) make this an error?
    +		// use PackOpt to let people tell how detailed the error reporting should be?
    +		// println("dns: extra bytes in dns packet", off, "<", len(msg))
    +	}
    +	return err
    +
    +}
    +
    +// Unpack unpacks a binary message to a Msg structure.
    +func (dns *Msg) Unpack(msg []byte) (err error) {
    +	dh, off, err := unpackMsgHdr(msg, 0)
    +	if err != nil {
    +		return err
    +	}
    +
    +	dns.setHdr(dh)
    +	return dns.unpack(dh, msg, off)
    +}
    +
    +// Convert a complete message to a string with dig-like output.
    +func (dns *Msg) String() string {
    +	if dns == nil {
    +		return " MsgHdr"
    +	}
    +	s := dns.MsgHdr.String() + " "
    +	s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
    +	s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
    +	s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
    +	s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
    +	if len(dns.Question) > 0 {
    +		s += "\n;; QUESTION SECTION:\n"
    +		for _, r := range dns.Question {
    +			s += r.String() + "\n"
    +		}
    +	}
    +	if len(dns.Answer) > 0 {
    +		s += "\n;; ANSWER SECTION:\n"
    +		for _, r := range dns.Answer {
    +			if r != nil {
    +				s += r.String() + "\n"
    +			}
    +		}
    +	}
    +	if len(dns.Ns) > 0 {
    +		s += "\n;; AUTHORITY SECTION:\n"
    +		for _, r := range dns.Ns {
    +			if r != nil {
    +				s += r.String() + "\n"
    +			}
    +		}
    +	}
    +	if len(dns.Extra) > 0 {
    +		s += "\n;; ADDITIONAL SECTION:\n"
    +		for _, r := range dns.Extra {
    +			if r != nil {
    +				s += r.String() + "\n"
    +			}
    +		}
    +	}
    +	return s
    +}
    +
    +// isCompressible returns whether the msg may be compressible.
    +func (dns *Msg) isCompressible() bool {
    +	// If we only have one question, there is nothing we can ever compress.
    +	return len(dns.Question) > 1 || len(dns.Answer) > 0 ||
    +		len(dns.Ns) > 0 || len(dns.Extra) > 0
    +}
    +
    +// Len returns the message length when in (un)compressed wire format.
    +// If dns.Compress is true compression it is taken into account. Len()
    +// is provided to be a faster way to get the size of the resulting packet,
    +// than packing it, measuring the size and discarding the buffer.
    +func (dns *Msg) Len() int {
    +	// If this message can't be compressed, avoid filling the
    +	// compression map and creating garbage.
    +	if dns.Compress && dns.isCompressible() {
    +		compression := make(map[string]struct{})
    +		return msgLenWithCompressionMap(dns, compression)
    +	}
    +
    +	return msgLenWithCompressionMap(dns, nil)
    +}
    +
    +func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int {
    +	l := headerSize
    +
    +	for _, r := range dns.Question {
    +		l += r.len(l, compression)
    +	}
    +	for _, r := range dns.Answer {
    +		if r != nil {
    +			l += r.len(l, compression)
    +		}
    +	}
    +	for _, r := range dns.Ns {
    +		if r != nil {
    +			l += r.len(l, compression)
    +		}
    +	}
    +	for _, r := range dns.Extra {
    +		if r != nil {
    +			l += r.len(l, compression)
    +		}
    +	}
    +
    +	return l
    +}
    +
    +func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int {
    +	if s == "" || s == "." {
    +		return 1
    +	}
    +
    +	escaped := strings.Contains(s, "\\")
    +
    +	if compression != nil && (compress || off < maxCompressionOffset) {
    +		// compressionLenSearch will insert the entry into the compression
    +		// map if it doesn't contain it.
    +		if l, ok := compressionLenSearch(compression, s, off); ok && compress {
    +			if escaped {
    +				return escapedNameLen(s[:l]) + 2
    +			}
    +
    +			return l + 2
    +		}
    +	}
    +
    +	if escaped {
    +		return escapedNameLen(s) + 1
    +	}
    +
    +	return len(s) + 1
    +}
    +
    +func escapedNameLen(s string) int {
    +	nameLen := len(s)
    +	for i := 0; i < len(s); i++ {
    +		if s[i] != '\\' {
    +			continue
    +		}
    +
    +		if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
    +			nameLen -= 3
    +			i += 3
    +		} else {
    +			nameLen--
    +			i++
    +		}
    +	}
    +
    +	return nameLen
    +}
    +
    +func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) {
    +	for off, end := 0, false; !end; off, end = NextLabel(s, off) {
    +		if _, ok := c[s[off:]]; ok {
    +			return off, true
    +		}
    +
    +		if msgOff+off < maxCompressionOffset {
    +			c[s[off:]] = struct{}{}
    +		}
    +	}
    +
    +	return 0, false
    +}
    +
    +// Copy returns a new RR which is a deep-copy of r.
    +func Copy(r RR) RR { return r.copy() }
    +
    +// Len returns the length (in octets) of the uncompressed RR in wire format.
    +func Len(r RR) int { return r.len(0, nil) }
    +
    +// Copy returns a new *Msg which is a deep-copy of dns.
    +func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) }
    +
    +// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
    +func (dns *Msg) CopyTo(r1 *Msg) *Msg {
    +	r1.MsgHdr = dns.MsgHdr
    +	r1.Compress = dns.Compress
    +
    +	if len(dns.Question) > 0 {
    +		r1.Question = make([]Question, len(dns.Question))
    +		copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
    +	}
    +
    +	rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
    +	r1.Answer, rrArr = rrArr[:0:len(dns.Answer)], rrArr[len(dns.Answer):]
    +	r1.Ns, rrArr = rrArr[:0:len(dns.Ns)], rrArr[len(dns.Ns):]
    +	r1.Extra = rrArr[:0:len(dns.Extra)]
    +
    +	for _, r := range dns.Answer {
    +		r1.Answer = append(r1.Answer, r.copy())
    +	}
    +
    +	for _, r := range dns.Ns {
    +		r1.Ns = append(r1.Ns, r.copy())
    +	}
    +
    +	for _, r := range dns.Extra {
    +		r1.Extra = append(r1.Extra, r.copy())
    +	}
    +
    +	return r1
    +}
    +
    +func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
    +	off, err := packDomainName(q.Name, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(q.Qtype, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(q.Qclass, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func unpackQuestion(msg []byte, off int) (Question, int, error) {
    +	var (
    +		q   Question
    +		err error
    +	)
    +	q.Name, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return q, off, err
    +	}
    +	if off == len(msg) {
    +		return q, off, nil
    +	}
    +	q.Qtype, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return q, off, err
    +	}
    +	if off == len(msg) {
    +		return q, off, nil
    +	}
    +	q.Qclass, off, err = unpackUint16(msg, off)
    +	if off == len(msg) {
    +		return q, off, nil
    +	}
    +	return q, off, err
    +}
    +
    +func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
    +	off, err := packUint16(dh.Id, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(dh.Bits, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(dh.Qdcount, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(dh.Ancount, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(dh.Nscount, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(dh.Arcount, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
    +	var (
    +		dh  Header
    +		err error
    +	)
    +	dh.Id, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return dh, off, err
    +	}
    +	dh.Bits, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return dh, off, err
    +	}
    +	dh.Qdcount, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return dh, off, err
    +	}
    +	dh.Ancount, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return dh, off, err
    +	}
    +	dh.Nscount, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return dh, off, err
    +	}
    +	dh.Arcount, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return dh, off, err
    +	}
    +	return dh, off, nil
    +}
    +
    +// setHdr set the header in the dns using the binary data in dh.
    +func (dns *Msg) setHdr(dh Header) {
    +	dns.Id = dh.Id
    +	dns.Response = dh.Bits&_QR != 0
    +	dns.Opcode = int(dh.Bits>>11) & 0xF
    +	dns.Authoritative = dh.Bits&_AA != 0
    +	dns.Truncated = dh.Bits&_TC != 0
    +	dns.RecursionDesired = dh.Bits&_RD != 0
    +	dns.RecursionAvailable = dh.Bits&_RA != 0
    +	dns.Zero = dh.Bits&_Z != 0 // _Z covers the zero bit, which should be zero; not sure why we set it to the opposite.
    +	dns.AuthenticatedData = dh.Bits&_AD != 0
    +	dns.CheckingDisabled = dh.Bits&_CD != 0
    +	dns.Rcode = int(dh.Bits & 0xF)
    +}
    diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go
    new file mode 100644
    index 000000000000..cb4ae764b4f4
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/msg_helpers.go
    @@ -0,0 +1,671 @@
    +package dns
    +
    +import (
    +	"encoding/base32"
    +	"encoding/base64"
    +	"encoding/binary"
    +	"encoding/hex"
    +	"net"
    +	"strings"
    +)
    +
    +// helper functions called from the generated zmsg.go
    +
    +// These function are named after the tag to help pack/unpack, if there is no tag it is the name
    +// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
    +// packDataDomainName.
    +
    +func unpackDataA(msg []byte, off int) (net.IP, int, error) {
    +	if off+net.IPv4len > len(msg) {
    +		return nil, len(msg), &Error{err: "overflow unpacking a"}
    +	}
    +	a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
    +	off += net.IPv4len
    +	return a, off, nil
    +}
    +
    +func packDataA(a net.IP, msg []byte, off int) (int, error) {
    +	switch len(a) {
    +	case net.IPv4len, net.IPv6len:
    +		// It must be a slice of 4, even if it is 16, we encode only the first 4
    +		if off+net.IPv4len > len(msg) {
    +			return len(msg), &Error{err: "overflow packing a"}
    +		}
    +
    +		copy(msg[off:], a.To4())
    +		off += net.IPv4len
    +	case 0:
    +		// Allowed, for dynamic updates.
    +	default:
    +		return len(msg), &Error{err: "overflow packing a"}
    +	}
    +	return off, nil
    +}
    +
    +func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
    +	if off+net.IPv6len > len(msg) {
    +		return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
    +	}
    +	aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
    +	off += net.IPv6len
    +	return aaaa, off, nil
    +}
    +
    +func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
    +	switch len(aaaa) {
    +	case net.IPv6len:
    +		if off+net.IPv6len > len(msg) {
    +			return len(msg), &Error{err: "overflow packing aaaa"}
    +		}
    +
    +		copy(msg[off:], aaaa)
    +		off += net.IPv6len
    +	case 0:
    +		// Allowed, dynamic updates.
    +	default:
    +		return len(msg), &Error{err: "overflow packing aaaa"}
    +	}
    +	return off, nil
    +}
    +
    +// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
    +// re-sliced msg according to the expected length of the RR.
    +func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
    +	hdr := RR_Header{}
    +	if off == len(msg) {
    +		return hdr, off, msg, nil
    +	}
    +
    +	hdr.Name, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return hdr, len(msg), msg, err
    +	}
    +	hdr.Rrtype, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return hdr, len(msg), msg, err
    +	}
    +	hdr.Class, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return hdr, len(msg), msg, err
    +	}
    +	hdr.Ttl, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return hdr, len(msg), msg, err
    +	}
    +	hdr.Rdlength, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return hdr, len(msg), msg, err
    +	}
    +	msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
    +	return hdr, off, msg, err
    +}
    +
    +// packHeader packs an RR header, returning the offset to the end of the header.
    +// See PackDomainName for documentation about the compression.
    +func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +
    +	off, err := packDomainName(hdr.Name, msg, off, compression, compress)
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	off, err = packUint16(hdr.Rrtype, msg, off)
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	off, err = packUint16(hdr.Class, msg, off)
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	off, err = packUint32(hdr.Ttl, msg, off)
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR.
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	return off, nil
    +}
    +
    +// helper helper functions.
    +
    +// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
    +// Returns an error if msg is smaller than the expected size.
    +func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
    +	lenrd := off + int(rdlength)
    +	if lenrd > len(msg) {
    +		return msg, &Error{err: "overflowing header size"}
    +	}
    +	return msg[:lenrd], nil
    +}
    +
    +var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
    +
    +func fromBase32(s []byte) (buf []byte, err error) {
    +	for i, b := range s {
    +		if b >= 'a' && b <= 'z' {
    +			s[i] = b - 32
    +		}
    +	}
    +	buflen := base32HexNoPadEncoding.DecodedLen(len(s))
    +	buf = make([]byte, buflen)
    +	n, err := base32HexNoPadEncoding.Decode(buf, s)
    +	buf = buf[:n]
    +	return
    +}
    +
    +func toBase32(b []byte) string {
    +	return base32HexNoPadEncoding.EncodeToString(b)
    +}
    +
    +func fromBase64(s []byte) (buf []byte, err error) {
    +	buflen := base64.StdEncoding.DecodedLen(len(s))
    +	buf = make([]byte, buflen)
    +	n, err := base64.StdEncoding.Decode(buf, s)
    +	buf = buf[:n]
    +	return
    +}
    +
    +func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
    +
    +// dynamicUpdate returns true if the Rdlength is zero.
    +func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
    +
    +func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
    +	if off+1 > len(msg) {
    +		return 0, len(msg), &Error{err: "overflow unpacking uint8"}
    +	}
    +	return msg[off], off + 1, nil
    +}
    +
    +func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
    +	if off+1 > len(msg) {
    +		return len(msg), &Error{err: "overflow packing uint8"}
    +	}
    +	msg[off] = i
    +	return off + 1, nil
    +}
    +
    +func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
    +	if off+2 > len(msg) {
    +		return 0, len(msg), &Error{err: "overflow unpacking uint16"}
    +	}
    +	return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
    +}
    +
    +func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
    +	if off+2 > len(msg) {
    +		return len(msg), &Error{err: "overflow packing uint16"}
    +	}
    +	binary.BigEndian.PutUint16(msg[off:], i)
    +	return off + 2, nil
    +}
    +
    +func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
    +	if off+4 > len(msg) {
    +		return 0, len(msg), &Error{err: "overflow unpacking uint32"}
    +	}
    +	return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
    +}
    +
    +func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
    +	if off+4 > len(msg) {
    +		return len(msg), &Error{err: "overflow packing uint32"}
    +	}
    +	binary.BigEndian.PutUint32(msg[off:], i)
    +	return off + 4, nil
    +}
    +
    +func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
    +	if off+6 > len(msg) {
    +		return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
    +	}
    +	// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
    +	i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
    +		uint64(msg[off+4])<<8 | uint64(msg[off+5])
    +	off += 6
    +	return i, off, nil
    +}
    +
    +func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
    +	if off+6 > len(msg) {
    +		return len(msg), &Error{err: "overflow packing uint64 as uint48"}
    +	}
    +	msg[off] = byte(i >> 40)
    +	msg[off+1] = byte(i >> 32)
    +	msg[off+2] = byte(i >> 24)
    +	msg[off+3] = byte(i >> 16)
    +	msg[off+4] = byte(i >> 8)
    +	msg[off+5] = byte(i)
    +	off += 6
    +	return off, nil
    +}
    +
    +func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
    +	if off+8 > len(msg) {
    +		return 0, len(msg), &Error{err: "overflow unpacking uint64"}
    +	}
    +	return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
    +}
    +
    +func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
    +	if off+8 > len(msg) {
    +		return len(msg), &Error{err: "overflow packing uint64"}
    +	}
    +	binary.BigEndian.PutUint64(msg[off:], i)
    +	off += 8
    +	return off, nil
    +}
    +
    +func unpackString(msg []byte, off int) (string, int, error) {
    +	if off+1 > len(msg) {
    +		return "", off, &Error{err: "overflow unpacking txt"}
    +	}
    +	l := int(msg[off])
    +	if off+l+1 > len(msg) {
    +		return "", off, &Error{err: "overflow unpacking txt"}
    +	}
    +	var s strings.Builder
    +	s.Grow(l)
    +	for _, b := range msg[off+1 : off+1+l] {
    +		switch {
    +		case b == '"' || b == '\\':
    +			s.WriteByte('\\')
    +			s.WriteByte(b)
    +		case b < ' ' || b > '~': // unprintable
    +			s.WriteString(escapeByte(b))
    +		default:
    +			s.WriteByte(b)
    +		}
    +	}
    +	off += 1 + l
    +	return s.String(), off, nil
    +}
    +
    +func packString(s string, msg []byte, off int) (int, error) {
    +	txtTmp := make([]byte, 256*4+1)
    +	off, err := packTxtString(s, msg, off, txtTmp)
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	return off, nil
    +}
    +
    +func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
    +	if end > len(msg) {
    +		return "", len(msg), &Error{err: "overflow unpacking base32"}
    +	}
    +	s := toBase32(msg[off:end])
    +	return s, end, nil
    +}
    +
    +func packStringBase32(s string, msg []byte, off int) (int, error) {
    +	b32, err := fromBase32([]byte(s))
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	if off+len(b32) > len(msg) {
    +		return len(msg), &Error{err: "overflow packing base32"}
    +	}
    +	copy(msg[off:off+len(b32)], b32)
    +	off += len(b32)
    +	return off, nil
    +}
    +
    +func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
    +	// Rest of the RR is base64 encoded value, so we don't need an explicit length
    +	// to be set. Thus far all RR's that have base64 encoded fields have those as their
    +	// last one. What we do need is the end of the RR!
    +	if end > len(msg) {
    +		return "", len(msg), &Error{err: "overflow unpacking base64"}
    +	}
    +	s := toBase64(msg[off:end])
    +	return s, end, nil
    +}
    +
    +func packStringBase64(s string, msg []byte, off int) (int, error) {
    +	b64, err := fromBase64([]byte(s))
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	if off+len(b64) > len(msg) {
    +		return len(msg), &Error{err: "overflow packing base64"}
    +	}
    +	copy(msg[off:off+len(b64)], b64)
    +	off += len(b64)
    +	return off, nil
    +}
    +
    +func unpackStringHex(msg []byte, off, end int) (string, int, error) {
    +	// Rest of the RR is hex encoded value, so we don't need an explicit length
    +	// to be set. NSEC and TSIG have hex fields with a length field.
    +	// What we do need is the end of the RR!
    +	if end > len(msg) {
    +		return "", len(msg), &Error{err: "overflow unpacking hex"}
    +	}
    +
    +	s := hex.EncodeToString(msg[off:end])
    +	return s, end, nil
    +}
    +
    +func packStringHex(s string, msg []byte, off int) (int, error) {
    +	h, err := hex.DecodeString(s)
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	if off+len(h) > len(msg) {
    +		return len(msg), &Error{err: "overflow packing hex"}
    +	}
    +	copy(msg[off:off+len(h)], h)
    +	off += len(h)
    +	return off, nil
    +}
    +
    +func unpackStringAny(msg []byte, off, end int) (string, int, error) {
    +	if end > len(msg) {
    +		return "", len(msg), &Error{err: "overflow unpacking anything"}
    +	}
    +	return string(msg[off:end]), end, nil
    +}
    +
    +func packStringAny(s string, msg []byte, off int) (int, error) {
    +	if off+len(s) > len(msg) {
    +		return len(msg), &Error{err: "overflow packing anything"}
    +	}
    +	copy(msg[off:off+len(s)], s)
    +	off += len(s)
    +	return off, nil
    +}
    +
    +func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
    +	txt, off, err := unpackTxt(msg, off)
    +	if err != nil {
    +		return nil, len(msg), err
    +	}
    +	return txt, off, nil
    +}
    +
    +func packStringTxt(s []string, msg []byte, off int) (int, error) {
    +	txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
    +	off, err := packTxt(s, msg, off, txtTmp)
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	return off, nil
    +}
    +
    +func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
    +	var edns []EDNS0
    +Option:
    +	var code uint16
    +	if off+4 > len(msg) {
    +		return nil, len(msg), &Error{err: "overflow unpacking opt"}
    +	}
    +	code = binary.BigEndian.Uint16(msg[off:])
    +	off += 2
    +	optlen := binary.BigEndian.Uint16(msg[off:])
    +	off += 2
    +	if off+int(optlen) > len(msg) {
    +		return nil, len(msg), &Error{err: "overflow unpacking opt"}
    +	}
    +	switch code {
    +	case EDNS0NSID:
    +		e := new(EDNS0_NSID)
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	case EDNS0SUBNET:
    +		e := new(EDNS0_SUBNET)
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	case EDNS0COOKIE:
    +		e := new(EDNS0_COOKIE)
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	case EDNS0UL:
    +		e := new(EDNS0_UL)
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	case EDNS0LLQ:
    +		e := new(EDNS0_LLQ)
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	case EDNS0DAU:
    +		e := new(EDNS0_DAU)
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	case EDNS0DHU:
    +		e := new(EDNS0_DHU)
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	case EDNS0N3U:
    +		e := new(EDNS0_N3U)
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	case EDNS0PADDING:
    +		e := new(EDNS0_PADDING)
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	default:
    +		e := new(EDNS0_LOCAL)
    +		e.Code = code
    +		if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
    +			return nil, len(msg), err
    +		}
    +		edns = append(edns, e)
    +		off += int(optlen)
    +	}
    +
    +	if off < len(msg) {
    +		goto Option
    +	}
    +
    +	return edns, off, nil
    +}
    +
    +func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
    +	for _, el := range options {
    +		b, err := el.pack()
    +		if err != nil || off+4 > len(msg) {
    +			return len(msg), &Error{err: "overflow packing opt"}
    +		}
    +		binary.BigEndian.PutUint16(msg[off:], el.Option())      // Option code
    +		binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
    +		off += 4
    +		if off+len(b) > len(msg) {
    +			copy(msg[off:], b)
    +			off = len(msg)
    +			continue
    +		}
    +		// Actual data
    +		copy(msg[off:off+len(b)], b)
    +		off += len(b)
    +	}
    +	return off, nil
    +}
    +
    +func unpackStringOctet(msg []byte, off int) (string, int, error) {
    +	s := string(msg[off:])
    +	return s, len(msg), nil
    +}
    +
    +func packStringOctet(s string, msg []byte, off int) (int, error) {
    +	txtTmp := make([]byte, 256*4+1)
    +	off, err := packOctetString(s, msg, off, txtTmp)
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	return off, nil
    +}
    +
    +func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
    +	var nsec []uint16
    +	length, window, lastwindow := 0, 0, -1
    +	for off < len(msg) {
    +		if off+2 > len(msg) {
    +			return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
    +		}
    +		window = int(msg[off])
    +		length = int(msg[off+1])
    +		off += 2
    +		if window <= lastwindow {
    +			// RFC 4034: Blocks are present in the NSEC RR RDATA in
    +			// increasing numerical order.
    +			return nsec, len(msg), &Error{err: "out of order NSEC block"}
    +		}
    +		if length == 0 {
    +			// RFC 4034: Blocks with no types present MUST NOT be included.
    +			return nsec, len(msg), &Error{err: "empty NSEC block"}
    +		}
    +		if length > 32 {
    +			return nsec, len(msg), &Error{err: "NSEC block too long"}
    +		}
    +		if off+length > len(msg) {
    +			return nsec, len(msg), &Error{err: "overflowing NSEC block"}
    +		}
    +
    +		// Walk the bytes in the window and extract the type bits
    +		for j, b := range msg[off : off+length] {
    +			// Check the bits one by one, and set the type
    +			if b&0x80 == 0x80 {
    +				nsec = append(nsec, uint16(window*256+j*8+0))
    +			}
    +			if b&0x40 == 0x40 {
    +				nsec = append(nsec, uint16(window*256+j*8+1))
    +			}
    +			if b&0x20 == 0x20 {
    +				nsec = append(nsec, uint16(window*256+j*8+2))
    +			}
    +			if b&0x10 == 0x10 {
    +				nsec = append(nsec, uint16(window*256+j*8+3))
    +			}
    +			if b&0x8 == 0x8 {
    +				nsec = append(nsec, uint16(window*256+j*8+4))
    +			}
    +			if b&0x4 == 0x4 {
    +				nsec = append(nsec, uint16(window*256+j*8+5))
    +			}
    +			if b&0x2 == 0x2 {
    +				nsec = append(nsec, uint16(window*256+j*8+6))
    +			}
    +			if b&0x1 == 0x1 {
    +				nsec = append(nsec, uint16(window*256+j*8+7))
    +			}
    +		}
    +		off += length
    +		lastwindow = window
    +	}
    +	return nsec, off, nil
    +}
    +
    +// typeBitMapLen is a helper function which computes the "maximum" length of
    +// a the NSEC Type BitMap field.
    +func typeBitMapLen(bitmap []uint16) int {
    +	var l int
    +	var lastwindow, lastlength uint16
    +	for _, t := range bitmap {
    +		window := t / 256
    +		length := (t-window*256)/8 + 1
    +		if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
    +			l += int(lastlength) + 2
    +			lastlength = 0
    +		}
    +		if window < lastwindow || length < lastlength {
    +			// packDataNsec would return Error{err: "nsec bits out of order"} here, but
    +			// when computing the length, we want do be liberal.
    +			continue
    +		}
    +		lastwindow, lastlength = window, length
    +	}
    +	l += int(lastlength) + 2
    +	return l
    +}
    +
    +func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
    +	if len(bitmap) == 0 {
    +		return off, nil
    +	}
    +	var lastwindow, lastlength uint16
    +	for _, t := range bitmap {
    +		window := t / 256
    +		length := (t-window*256)/8 + 1
    +		if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
    +			off += int(lastlength) + 2
    +			lastlength = 0
    +		}
    +		if window < lastwindow || length < lastlength {
    +			return len(msg), &Error{err: "nsec bits out of order"}
    +		}
    +		if off+2+int(length) > len(msg) {
    +			return len(msg), &Error{err: "overflow packing nsec"}
    +		}
    +		// Setting the window #
    +		msg[off] = byte(window)
    +		// Setting the octets length
    +		msg[off+1] = byte(length)
    +		// Setting the bit value for the type in the right octet
    +		msg[off+1+int(length)] |= byte(1 << (7 - t%8))
    +		lastwindow, lastlength = window, length
    +	}
    +	off += int(lastlength) + 2
    +	return off, nil
    +}
    +
    +func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
    +	var (
    +		servers []string
    +		s       string
    +		err     error
    +	)
    +	if end > len(msg) {
    +		return nil, len(msg), &Error{err: "overflow unpacking domain names"}
    +	}
    +	for off < end {
    +		s, off, err = UnpackDomainName(msg, off)
    +		if err != nil {
    +			return servers, len(msg), err
    +		}
    +		servers = append(servers, s)
    +	}
    +	return servers, off, nil
    +}
    +
    +func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) {
    +	var err error
    +	for _, name := range names {
    +		off, err = packDomainName(name, msg, off, compression, compress)
    +		if err != nil {
    +			return len(msg), err
    +		}
    +	}
    +	return off, nil
    +}
    diff --git a/vendor/github.com/miekg/dns/msg_truncate.go b/vendor/github.com/miekg/dns/msg_truncate.go
    new file mode 100644
    index 000000000000..89d40757dbdd
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/msg_truncate.go
    @@ -0,0 +1,111 @@
    +package dns
    +
    +// Truncate ensures the reply message will fit into the requested buffer
    +// size by removing records that exceed the requested size.
    +//
    +// It will first check if the reply fits without compression and then with
    +// compression. If it won't fit with compression, Truncate then walks the
    +// record adding as many records as possible without exceeding the
    +// requested buffer size.
    +//
    +// The TC bit will be set if any records were excluded from the message.
    +// This indicates to that the client should retry over TCP.
    +//
    +// According to RFC 2181, the TC bit should only be set if not all of the
    +// "required" RRs can be included in the response. Unfortunately, we have
    +// no way of knowing which RRs are required so we set the TC bit if any RR
    +// had to be omitted from the response.
    +//
    +// The appropriate buffer size can be retrieved from the requests OPT
    +// record, if present, and is transport specific otherwise. dns.MinMsgSize
    +// should be used for UDP requests without an OPT record, and
    +// dns.MaxMsgSize for TCP requests without an OPT record.
    +func (dns *Msg) Truncate(size int) {
    +	if dns.IsTsig() != nil {
    +		// To simplify this implementation, we don't perform
    +		// truncation on responses with a TSIG record.
    +		return
    +	}
    +
    +	// RFC 6891 mandates that the payload size in an OPT record
    +	// less than 512 bytes must be treated as equal to 512 bytes.
    +	//
    +	// For ease of use, we impose that restriction here.
    +	if size < 512 {
    +		size = 512
    +	}
    +
    +	l := msgLenWithCompressionMap(dns, nil) // uncompressed length
    +	if l <= size {
    +		// Don't waste effort compressing this message.
    +		dns.Compress = false
    +		return
    +	}
    +
    +	dns.Compress = true
    +
    +	edns0 := dns.popEdns0()
    +	if edns0 != nil {
    +		// Account for the OPT record that gets added at the end,
    +		// by subtracting that length from our budget.
    +		//
    +		// The EDNS(0) OPT record must have the root domain and
    +		// it's length is thus unaffected by compression.
    +		size -= Len(edns0)
    +	}
    +
    +	compression := make(map[string]struct{})
    +
    +	l = headerSize
    +	for _, r := range dns.Question {
    +		l += r.len(l, compression)
    +	}
    +
    +	var numAnswer int
    +	if l < size {
    +		l, numAnswer = truncateLoop(dns.Answer, size, l, compression)
    +	}
    +
    +	var numNS int
    +	if l < size {
    +		l, numNS = truncateLoop(dns.Ns, size, l, compression)
    +	}
    +
    +	var numExtra int
    +	if l < size {
    +		l, numExtra = truncateLoop(dns.Extra, size, l, compression)
    +	}
    +
    +	// See the function documentation for when we set this.
    +	dns.Truncated = len(dns.Answer) > numAnswer ||
    +		len(dns.Ns) > numNS || len(dns.Extra) > numExtra
    +
    +	dns.Answer = dns.Answer[:numAnswer]
    +	dns.Ns = dns.Ns[:numNS]
    +	dns.Extra = dns.Extra[:numExtra]
    +
    +	if edns0 != nil {
    +		// Add the OPT record back onto the additional section.
    +		dns.Extra = append(dns.Extra, edns0)
    +	}
    +}
    +
    +func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) {
    +	for i, r := range rrs {
    +		if r == nil {
    +			continue
    +		}
    +
    +		l += r.len(l, compression)
    +		if l > size {
    +			// Return size, rather than l prior to this record,
    +			// to prevent any further records being added.
    +			return size, i
    +		}
    +		if l == size {
    +			return l, i + 1
    +		}
    +	}
    +
    +	return l, len(rrs)
    +}
    diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go
    new file mode 100644
    index 000000000000..8f071a47399d
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/nsecx.go
    @@ -0,0 +1,95 @@
    +package dns
    +
    +import (
    +	"crypto/sha1"
    +	"encoding/hex"
    +	"strings"
    +)
    +
    +// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
    +func HashName(label string, ha uint8, iter uint16, salt string) string {
    +	if ha != SHA1 {
    +		return ""
    +	}
    +
    +	wireSalt := make([]byte, hex.DecodedLen(len(salt)))
    +	n, err := packStringHex(salt, wireSalt, 0)
    +	if err != nil {
    +		return ""
    +	}
    +	wireSalt = wireSalt[:n]
    +
    +	name := make([]byte, 255)
    +	off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
    +	if err != nil {
    +		return ""
    +	}
    +	name = name[:off]
    +
    +	s := sha1.New()
    +	// k = 0
    +	s.Write(name)
    +	s.Write(wireSalt)
    +	nsec3 := s.Sum(nil)
    +
    +	// k > 0
    +	for k := uint16(0); k < iter; k++ {
    +		s.Reset()
    +		s.Write(nsec3)
    +		s.Write(wireSalt)
    +		nsec3 = s.Sum(nsec3[:0])
    +	}
    +
    +	return toBase32(nsec3)
    +}
    +
    +// Cover returns true if a name is covered by the NSEC3 record
    +func (rr *NSEC3) Cover(name string) bool {
    +	nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
    +	owner := strings.ToUpper(rr.Hdr.Name)
    +	labelIndices := Split(owner)
    +	if len(labelIndices) < 2 {
    +		return false
    +	}
    +	ownerHash := owner[:labelIndices[1]-1]
    +	ownerZone := owner[labelIndices[1]:]
    +	if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
    +		return false
    +	}
    +
    +	nextHash := rr.NextDomain
    +
    +	// if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash
    +	if ownerHash == nextHash && nameHash != ownerHash { // empty interval
    +		return true
    +	}
    +	if ownerHash > nextHash { // end of zone
    +		if nameHash > ownerHash { // covered since there is nothing after ownerHash
    +			return true
    +		}
    +		return nameHash < nextHash // if nameHash is before beginning of zone it is covered
    +	}
    +	if nameHash < ownerHash { // nameHash is before ownerHash, not covered
    +		return false
    +	}
    +	return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
    +}
    +
    +// Match returns true if a name matches the NSEC3 record
    +func (rr *NSEC3) Match(name string) bool {
    +	nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
    +	owner := strings.ToUpper(rr.Hdr.Name)
    +	labelIndices := Split(owner)
    +	if len(labelIndices) < 2 {
    +		return false
    +	}
    +	ownerHash := owner[:labelIndices[1]-1]
    +	ownerZone := owner[labelIndices[1]:]
    +	if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
    +		return false
    +	}
    +	if ownerHash == nameHash {
    +		return true
    +	}
    +	return false
    +}
    diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go
    new file mode 100644
    index 000000000000..e28f06637408
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/privaterr.go
    @@ -0,0 +1,114 @@
    +package dns
    +
    +import "strings"
    +
    +// PrivateRdata is an interface used for implementing "Private Use" RR types, see
    +// RFC 6895. This allows one to experiment with new RR types, without requesting an
    +// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
    +type PrivateRdata interface {
    +	// String returns the text presentaton of the Rdata of the Private RR.
    +	String() string
    +	// Parse parses the Rdata of the private RR.
    +	Parse([]string) error
    +	// Pack is used when packing a private RR into a buffer.
    +	Pack([]byte) (int, error)
    +	// Unpack is used when unpacking a private RR from a buffer.
    +	// TODO(miek): diff. signature than Pack, see edns0.go for instance.
    +	Unpack([]byte) (int, error)
    +	// Copy copies the Rdata into the PrivateRdata argument.
    +	Copy(PrivateRdata) error
    +	// Len returns the length in octets of the Rdata.
    +	Len() int
    +}
    +
    +// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
    +// It mocks normal RRs and implements dns.RR interface.
    +type PrivateRR struct {
    +	Hdr  RR_Header
    +	Data PrivateRdata
    +
    +	generator func() PrivateRdata // for copy
    +}
    +
    +// Header return the RR header of r.
    +func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
    +
    +func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
    +
    +// Private len and copy parts to satisfy RR interface.
    +func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
    +	l := r.Hdr.len(off, compression)
    +	l += r.Data.Len()
    +	return l
    +}
    +
    +func (r *PrivateRR) copy() RR {
    +	// make new RR like this:
    +	rr := &PrivateRR{r.Hdr, r.generator(), r.generator}
    +
    +	if err := r.Data.Copy(rr.Data); err != nil {
    +		panic("dns: got value that could not be used to copy Private rdata: " + err.Error())
    +	}
    +
    +	return rr
    +}
    +
    +func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
    +	n, err := r.Data.Pack(msg[off:])
    +	if err != nil {
    +		return len(msg), err
    +	}
    +	off += n
    +	return off, nil
    +}
    +
    +func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
    +	off1, err := r.Data.Unpack(msg[off:])
    +	off += off1
    +	return off, err
    +}
    +
    +func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError {
    +	var l lex
    +	text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
    +Fetch:
    +	for {
    +		// TODO(miek): we could also be returning _QUOTE, this might or might not
    +		// be an issue (basically parsing TXT becomes hard)
    +		switch l, _ = c.Next(); l.value {
    +		case zNewline, zEOF:
    +			break Fetch
    +		case zString:
    +			text = append(text, l.token)
    +		}
    +	}
    +
    +	err := r.Data.Parse(text)
    +	if err != nil {
    +		return &ParseError{"", err.Error(), l}
    +	}
    +
    +	return nil
    +}
    +
    +func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false }
    +
    +// PrivateHandle registers a private resource record type. It requires
    +// string and numeric representation of private RR type and generator function as argument.
    +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
    +	rtypestr = strings.ToUpper(rtypestr)
    +
    +	TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} }
    +	TypeToString[rtype] = rtypestr
    +	StringToType[rtypestr] = rtype
    +}
    +
    +// PrivateHandleRemove removes definitions required to support private RR type.
    +func PrivateHandleRemove(rtype uint16) {
    +	rtypestr, ok := TypeToString[rtype]
    +	if ok {
    +		delete(TypeToRR, rtype)
    +		delete(TypeToString, rtype)
    +		delete(StringToType, rtypestr)
    +	}
    +}
    diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go
    new file mode 100644
    index 000000000000..28151af83598
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/reverse.go
    @@ -0,0 +1,52 @@
    +package dns
    +
    +// StringToType is the reverse of TypeToString, needed for string parsing.
    +var StringToType = reverseInt16(TypeToString)
    +
    +// StringToClass is the reverse of ClassToString, needed for string parsing.
    +var StringToClass = reverseInt16(ClassToString)
    +
    +// StringToOpcode is a map of opcodes to strings.
    +var StringToOpcode = reverseInt(OpcodeToString)
    +
    +// StringToRcode is a map of rcodes to strings.
    +var StringToRcode = reverseInt(RcodeToString)
    +
    +func init() {
    +	// Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733.
    +	StringToRcode["NOTIMPL"] = RcodeNotImplemented
    +}
    +
    +// StringToAlgorithm is the reverse of AlgorithmToString.
    +var StringToAlgorithm = reverseInt8(AlgorithmToString)
    +
    +// StringToHash is a map of names to hash IDs.
    +var StringToHash = reverseInt8(HashToString)
    +
    +// StringToCertType is the reverseof CertTypeToString.
    +var StringToCertType = reverseInt16(CertTypeToString)
    +
    +// Reverse a map
    +func reverseInt8(m map[uint8]string) map[string]uint8 {
    +	n := make(map[string]uint8, len(m))
    +	for u, s := range m {
    +		n[s] = u
    +	}
    +	return n
    +}
    +
    +func reverseInt16(m map[uint16]string) map[string]uint16 {
    +	n := make(map[string]uint16, len(m))
    +	for u, s := range m {
    +		n[s] = u
    +	}
    +	return n
    +}
    +
    +func reverseInt(m map[int]string) map[string]int {
    +	n := make(map[string]int, len(m))
    +	for u, s := range m {
    +		n[s] = u
    +	}
    +	return n
    +}
    diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go
    new file mode 100644
    index 000000000000..a638e862e3ab
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/sanitize.go
    @@ -0,0 +1,86 @@
    +package dns
    +
    +// Dedup removes identical RRs from rrs. It preserves the original ordering.
    +// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
    +// rrs.
    +// m is used to store the RRs temporary. If it is nil a new map will be allocated.
    +func Dedup(rrs []RR, m map[string]RR) []RR {
    +
    +	if m == nil {
    +		m = make(map[string]RR)
    +	}
    +	// Save the keys, so we don't have to call normalizedString twice.
    +	keys := make([]*string, 0, len(rrs))
    +
    +	for _, r := range rrs {
    +		key := normalizedString(r)
    +		keys = append(keys, &key)
    +		if mr, ok := m[key]; ok {
    +			// Shortest TTL wins.
    +			rh, mrh := r.Header(), mr.Header()
    +			if mrh.Ttl > rh.Ttl {
    +				mrh.Ttl = rh.Ttl
    +			}
    +			continue
    +		}
    +
    +		m[key] = r
    +	}
    +	// If the length of the result map equals the amount of RRs we got,
    +	// it means they were all different. We can then just return the original rrset.
    +	if len(m) == len(rrs) {
    +		return rrs
    +	}
    +
    +	j := 0
    +	for i, r := range rrs {
    +		// If keys[i] lives in the map, we should copy and remove it.
    +		if _, ok := m[*keys[i]]; ok {
    +			delete(m, *keys[i])
    +			rrs[j] = r
    +			j++
    +		}
    +
    +		if len(m) == 0 {
    +			break
    +		}
    +	}
    +
    +	return rrs[:j]
    +}
    +
    +// normalizedString returns a normalized string from r. The TTL
    +// is removed and the domain name is lowercased. We go from this:
    +// DomainNameTTLCLASSTYPERDATA to:
    +// lowercasenameCLASSTYPE...
    +func normalizedString(r RR) string {
    +	// A string Go DNS makes has: domainnameTTL...
    +	b := []byte(r.String())
    +
    +	// find the first non-escaped tab, then another, so we capture where the TTL lives.
    +	esc := false
    +	ttlStart, ttlEnd := 0, 0
    +	for i := 0; i < len(b) && ttlEnd == 0; i++ {
    +		switch {
    +		case b[i] == '\\':
    +			esc = !esc
    +		case b[i] == '\t' && !esc:
    +			if ttlStart == 0 {
    +				ttlStart = i
    +				continue
    +			}
    +			if ttlEnd == 0 {
    +				ttlEnd = i
    +			}
    +		case b[i] >= 'A' && b[i] <= 'Z' && !esc:
    +			b[i] += 32
    +		default:
    +			esc = false
    +		}
    +	}
    +
    +	// remove TTL.
    +	copy(b[ttlStart:], b[ttlEnd:])
    +	cut := ttlEnd - ttlStart
    +	return string(b[:len(b)-cut])
    +}
    diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
    new file mode 100644
    index 000000000000..7da14c88f2ac
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/scan.go
    @@ -0,0 +1,1397 @@
    +package dns
    +
    +import (
    +	"bufio"
    +	"fmt"
    +	"io"
    +	"os"
    +	"path/filepath"
    +	"strconv"
    +	"strings"
    +)
    +
    +const maxTok = 2048 // Largest token we can return.
    +
    +// The maximum depth of $INCLUDE directives supported by the
    +// ZoneParser API.
    +const maxIncludeDepth = 7
    +
    +// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
    +// * Add ownernames if they are left blank;
    +// * Suppress sequences of spaces;
    +// * Make each RR fit on one line (_NEWLINE is send as last)
    +// * Handle comments: ;
    +// * Handle braces - anywhere.
    +const (
    +	// Zonefile
    +	zEOF = iota
    +	zString
    +	zBlank
    +	zQuote
    +	zNewline
    +	zRrtpe
    +	zOwner
    +	zClass
    +	zDirOrigin   // $ORIGIN
    +	zDirTTL      // $TTL
    +	zDirInclude  // $INCLUDE
    +	zDirGenerate // $GENERATE
    +
    +	// Privatekey file
    +	zValue
    +	zKey
    +
    +	zExpectOwnerDir      // Ownername
    +	zExpectOwnerBl       // Whitespace after the ownername
    +	zExpectAny           // Expect rrtype, ttl or class
    +	zExpectAnyNoClass    // Expect rrtype or ttl
    +	zExpectAnyNoClassBl  // The whitespace after _EXPECT_ANY_NOCLASS
    +	zExpectAnyNoTTL      // Expect rrtype or class
    +	zExpectAnyNoTTLBl    // Whitespace after _EXPECT_ANY_NOTTL
    +	zExpectRrtype        // Expect rrtype
    +	zExpectRrtypeBl      // Whitespace BEFORE rrtype
    +	zExpectRdata         // The first element of the rdata
    +	zExpectDirTTLBl      // Space after directive $TTL
    +	zExpectDirTTL        // Directive $TTL
    +	zExpectDirOriginBl   // Space after directive $ORIGIN
    +	zExpectDirOrigin     // Directive $ORIGIN
    +	zExpectDirIncludeBl  // Space after directive $INCLUDE
    +	zExpectDirInclude    // Directive $INCLUDE
    +	zExpectDirGenerate   // Directive $GENERATE
    +	zExpectDirGenerateBl // Space after directive $GENERATE
    +)
    +
    +// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
    +// where the error occurred.
    +type ParseError struct {
    +	file string
    +	err  string
    +	lex  lex
    +}
    +
    +func (e *ParseError) Error() (s string) {
    +	if e.file != "" {
    +		s = e.file + ": "
    +	}
    +	s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
    +		strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
    +	return
    +}
    +
    +type lex struct {
    +	token  string // text of the token
    +	err    bool   // when true, token text has lexer error
    +	value  uint8  // value: zString, _BLANK, etc.
    +	torc   uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
    +	line   int    // line in the file
    +	column int    // column in the file
    +}
    +
    +// Token holds the token that are returned when a zone file is parsed.
    +type Token struct {
    +	// The scanned resource record when error is not nil.
    +	RR
    +	// When an error occurred, this has the error specifics.
    +	Error *ParseError
    +	// A potential comment positioned after the RR and on the same line.
    +	Comment string
    +}
    +
    +// ttlState describes the state necessary to fill in an omitted RR TTL
    +type ttlState struct {
    +	ttl           uint32 // ttl is the current default TTL
    +	isByDirective bool   // isByDirective indicates whether ttl was set by a $TTL directive
    +}
    +
    +// NewRR reads the RR contained in the string s. Only the first RR is
    +// returned. If s contains no records, NewRR will return nil with no
    +// error.
    +//
    +// The class defaults to IN and TTL defaults to 3600. The full zone
    +// file syntax like $TTL, $ORIGIN, etc. is supported.
    +//
    +// All fields of the returned RR are set, except RR.Header().Rdlength
    +// which is set to 0.
    +func NewRR(s string) (RR, error) {
    +	if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
    +		return ReadRR(strings.NewReader(s+"\n"), "")
    +	}
    +	return ReadRR(strings.NewReader(s), "")
    +}
    +
    +// ReadRR reads the RR contained in r.
    +//
    +// The string file is used in error reporting and to resolve relative
    +// $INCLUDE directives.
    +//
    +// See NewRR for more documentation.
    +func ReadRR(r io.Reader, file string) (RR, error) {
    +	zp := NewZoneParser(r, ".", file)
    +	zp.SetDefaultTTL(defaultTtl)
    +	zp.SetIncludeAllowed(true)
    +	rr, _ := zp.Next()
    +	return rr, zp.Err()
    +}
    +
    +// ParseZone reads a RFC 1035 style zonefile from r. It returns
    +// *Tokens on the returned channel, each consisting of either a
    +// parsed RR and optional comment or a nil RR and an error. The
    +// channel is closed by ParseZone when the end of r is reached.
    +//
    +// The string file is used in error reporting and to resolve relative
    +// $INCLUDE directives. The string origin is used as the initial
    +// origin, as if the file would start with an $ORIGIN directive.
    +//
    +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
    +// supported.
    +//
    +// Basic usage pattern when reading from a string (z) containing the
    +// zone data:
    +//
    +//	for x := range dns.ParseZone(strings.NewReader(z), "", "") {
    +//		if x.Error != nil {
    +//                  // log.Println(x.Error)
    +//              } else {
    +//                  // Do something with x.RR
    +//              }
    +//	}
    +//
    +// Comments specified after an RR (and on the same line!) are
    +// returned too:
    +//
    +//	foo. IN A 10.0.0.1 ; this is a comment
    +//
    +// The text "; this is comment" is returned in Token.Comment.
    +// Comments inside the RR are returned concatenated along with the
    +// RR. Comments on a line by themselves are discarded.
    +//
    +// To prevent memory leaks it is important to always fully drain the
    +// returned channel. If an error occurs, it will always be the last
    +// Token sent on the channel.
    +//
    +// Deprecated: New users should prefer the ZoneParser API.
    +func ParseZone(r io.Reader, origin, file string) chan *Token {
    +	t := make(chan *Token, 10000)
    +	go parseZone(r, origin, file, t)
    +	return t
    +}
    +
    +func parseZone(r io.Reader, origin, file string, t chan *Token) {
    +	defer close(t)
    +
    +	zp := NewZoneParser(r, origin, file)
    +	zp.SetIncludeAllowed(true)
    +
    +	for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
    +		t <- &Token{RR: rr, Comment: zp.Comment()}
    +	}
    +
    +	if err := zp.Err(); err != nil {
    +		pe, ok := err.(*ParseError)
    +		if !ok {
    +			pe = &ParseError{file: file, err: err.Error()}
    +		}
    +
    +		t <- &Token{Error: pe}
    +	}
    +}
    +
    +// ZoneParser is a parser for an RFC 1035 style zonefile.
    +//
    +// Each parsed RR in the zone is returned sequentially from Next. An
    +// optional comment can be retrieved with Comment.
    +//
    +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
    +// supported. Although $INCLUDE is disabled by default.
    +//
    +// Basic usage pattern when reading from a string (z) containing the
    +// zone data:
    +//
    +//	zp := NewZoneParser(strings.NewReader(z), "", "")
    +//
    +//	for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
    +//		// Do something with rr
    +//	}
    +//
    +//	if err := zp.Err(); err != nil {
    +//		// log.Println(err)
    +//	}
    +//
    +// Comments specified after an RR (and on the same line!) are
    +// returned too:
    +//
    +//	foo. IN A 10.0.0.1 ; this is a comment
    +//
    +// The text "; this is comment" is returned from Comment. Comments inside
    +// the RR are returned concatenated along with the RR. Comments on a line
    +// by themselves are discarded.
    +type ZoneParser struct {
    +	c *zlexer
    +
    +	parseErr *ParseError
    +
    +	origin string
    +	file   string
    +
    +	defttl *ttlState
    +
    +	h RR_Header
    +
    +	// sub is used to parse $INCLUDE files and $GENERATE directives.
    +	// Next, by calling subNext, forwards the resulting RRs from this
    +	// sub parser to the calling code.
    +	sub    *ZoneParser
    +	osFile *os.File
    +
    +	includeDepth uint8
    +
    +	includeAllowed bool
    +}
    +
    +// NewZoneParser returns an RFC 1035 style zonefile parser that reads
    +// from r.
    +//
    +// The string file is used in error reporting and to resolve relative
    +// $INCLUDE directives. The string origin is used as the initial
    +// origin, as if the file would start with an $ORIGIN directive.
    +func NewZoneParser(r io.Reader, origin, file string) *ZoneParser {
    +	var pe *ParseError
    +	if origin != "" {
    +		origin = Fqdn(origin)
    +		if _, ok := IsDomainName(origin); !ok {
    +			pe = &ParseError{file, "bad initial origin name", lex{}}
    +		}
    +	}
    +
    +	return &ZoneParser{
    +		c: newZLexer(r),
    +
    +		parseErr: pe,
    +
    +		origin: origin,
    +		file:   file,
    +	}
    +}
    +
    +// SetDefaultTTL sets the parsers default TTL to ttl.
    +func (zp *ZoneParser) SetDefaultTTL(ttl uint32) {
    +	zp.defttl = &ttlState{ttl, false}
    +}
    +
    +// SetIncludeAllowed controls whether $INCLUDE directives are
    +// allowed. $INCLUDE directives are not supported by default.
    +//
    +// The $INCLUDE directive will open and read from a user controlled
    +// file on the system. Even if the file is not a valid zonefile, the
    +// contents of the file may be revealed in error messages, such as:
    +//
    +//	/etc/passwd: dns: not a TTL: "root:x:0:0:root:/root:/bin/bash" at line: 1:31
    +//	/etc/shadow: dns: not a TTL: "root:$6$::0:99999:7:::" at line: 1:125
    +func (zp *ZoneParser) SetIncludeAllowed(v bool) {
    +	zp.includeAllowed = v
    +}
    +
    +// Err returns the first non-EOF error that was encountered by the
    +// ZoneParser.
    +func (zp *ZoneParser) Err() error {
    +	if zp.parseErr != nil {
    +		return zp.parseErr
    +	}
    +
    +	if zp.sub != nil {
    +		if err := zp.sub.Err(); err != nil {
    +			return err
    +		}
    +	}
    +
    +	return zp.c.Err()
    +}
    +
    +func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) {
    +	zp.parseErr = &ParseError{zp.file, err, l}
    +	return nil, false
    +}
    +
    +// Comment returns an optional text comment that occurred alongside
    +// the RR.
    +func (zp *ZoneParser) Comment() string {
    +	if zp.parseErr != nil {
    +		return ""
    +	}
    +
    +	if zp.sub != nil {
    +		return zp.sub.Comment()
    +	}
    +
    +	return zp.c.Comment()
    +}
    +
    +func (zp *ZoneParser) subNext() (RR, bool) {
    +	if rr, ok := zp.sub.Next(); ok {
    +		return rr, true
    +	}
    +
    +	if zp.sub.osFile != nil {
    +		zp.sub.osFile.Close()
    +		zp.sub.osFile = nil
    +	}
    +
    +	if zp.sub.Err() != nil {
    +		// We have errors to surface.
    +		return nil, false
    +	}
    +
    +	zp.sub = nil
    +	return zp.Next()
    +}
    +
    +// Next advances the parser to the next RR in the zonefile and
    +// returns the (RR, true). It will return (nil, false) when the
    +// parsing stops, either by reaching the end of the input or an
    +// error. After Next returns (nil, false), the Err method will return
    +// any error that occurred during parsing.
    +func (zp *ZoneParser) Next() (RR, bool) {
    +	if zp.parseErr != nil {
    +		return nil, false
    +	}
    +	if zp.sub != nil {
    +		return zp.subNext()
    +	}
    +
    +	// 6 possible beginnings of a line (_ is a space):
    +	//
    +	//   0. zRRTYPE                              -> all omitted until the rrtype
    +	//   1. zOwner _ zRrtype                     -> class/ttl omitted
    +	//   2. zOwner _ zString _ zRrtype           -> class omitted
    +	//   3. zOwner _ zString _ zClass  _ zRrtype -> ttl/class
    +	//   4. zOwner _ zClass  _ zRrtype           -> ttl omitted
    +	//   5. zOwner _ zClass  _ zString _ zRrtype -> class/ttl (reversed)
    +	//
    +	// After detecting these, we know the zRrtype so we can jump to functions
    +	// handling the rdata for each of these types.
    +
    +	st := zExpectOwnerDir // initial state
    +	h := &zp.h
    +
    +	for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
    +		// zlexer spotted an error already
    +		if l.err {
    +			return zp.setParseError(l.token, l)
    +		}
    +
    +		switch st {
    +		case zExpectOwnerDir:
    +			// We can also expect a directive, like $TTL or $ORIGIN
    +			if zp.defttl != nil {
    +				h.Ttl = zp.defttl.ttl
    +			}
    +
    +			h.Class = ClassINET
    +
    +			switch l.value {
    +			case zNewline:
    +				st = zExpectOwnerDir
    +			case zOwner:
    +				name, ok := toAbsoluteName(l.token, zp.origin)
    +				if !ok {
    +					return zp.setParseError("bad owner name", l)
    +				}
    +
    +				h.Name = name
    +
    +				st = zExpectOwnerBl
    +			case zDirTTL:
    +				st = zExpectDirTTLBl
    +			case zDirOrigin:
    +				st = zExpectDirOriginBl
    +			case zDirInclude:
    +				st = zExpectDirIncludeBl
    +			case zDirGenerate:
    +				st = zExpectDirGenerateBl
    +			case zRrtpe:
    +				h.Rrtype = l.torc
    +
    +				st = zExpectRdata
    +			case zClass:
    +				h.Class = l.torc
    +
    +				st = zExpectAnyNoClassBl
    +			case zBlank:
    +				// Discard, can happen when there is nothing on the
    +				// line except the RR type
    +			case zString:
    +				ttl, ok := stringToTTL(l.token)
    +				if !ok {
    +					return zp.setParseError("not a TTL", l)
    +				}
    +
    +				h.Ttl = ttl
    +
    +				if zp.defttl == nil || !zp.defttl.isByDirective {
    +					zp.defttl = &ttlState{ttl, false}
    +				}
    +
    +				st = zExpectAnyNoTTLBl
    +			default:
    +				return zp.setParseError("syntax error at beginning", l)
    +			}
    +		case zExpectDirIncludeBl:
    +			if l.value != zBlank {
    +				return zp.setParseError("no blank after $INCLUDE-directive", l)
    +			}
    +
    +			st = zExpectDirInclude
    +		case zExpectDirInclude:
    +			if l.value != zString {
    +				return zp.setParseError("expecting $INCLUDE value, not this...", l)
    +			}
    +
    +			neworigin := zp.origin // There may be optionally a new origin set after the filename, if not use current one
    +			switch l, _ := zp.c.Next(); l.value {
    +			case zBlank:
    +				l, _ := zp.c.Next()
    +				if l.value == zString {
    +					name, ok := toAbsoluteName(l.token, zp.origin)
    +					if !ok {
    +						return zp.setParseError("bad origin name", l)
    +					}
    +
    +					neworigin = name
    +				}
    +			case zNewline, zEOF:
    +				// Ok
    +			default:
    +				return zp.setParseError("garbage after $INCLUDE", l)
    +			}
    +
    +			if !zp.includeAllowed {
    +				return zp.setParseError("$INCLUDE directive not allowed", l)
    +			}
    +			if zp.includeDepth >= maxIncludeDepth {
    +				return zp.setParseError("too deeply nested $INCLUDE", l)
    +			}
    +
    +			// Start with the new file
    +			includePath := l.token
    +			if !filepath.IsAbs(includePath) {
    +				includePath = filepath.Join(filepath.Dir(zp.file), includePath)
    +			}
    +
    +			r1, e1 := os.Open(includePath)
    +			if e1 != nil {
    +				var as string
    +				if !filepath.IsAbs(l.token) {
    +					as = fmt.Sprintf(" as `%s'", includePath)
    +				}
    +
    +				msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1)
    +				return zp.setParseError(msg, l)
    +			}
    +
    +			zp.sub = NewZoneParser(r1, neworigin, includePath)
    +			zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1
    +			zp.sub.SetIncludeAllowed(true)
    +			return zp.subNext()
    +		case zExpectDirTTLBl:
    +			if l.value != zBlank {
    +				return zp.setParseError("no blank after $TTL-directive", l)
    +			}
    +
    +			st = zExpectDirTTL
    +		case zExpectDirTTL:
    +			if l.value != zString {
    +				return zp.setParseError("expecting $TTL value, not this...", l)
    +			}
    +
    +			if err := slurpRemainder(zp.c); err != nil {
    +				return zp.setParseError(err.err, err.lex)
    +			}
    +
    +			ttl, ok := stringToTTL(l.token)
    +			if !ok {
    +				return zp.setParseError("expecting $TTL value, not this...", l)
    +			}
    +
    +			zp.defttl = &ttlState{ttl, true}
    +
    +			st = zExpectOwnerDir
    +		case zExpectDirOriginBl:
    +			if l.value != zBlank {
    +				return zp.setParseError("no blank after $ORIGIN-directive", l)
    +			}
    +
    +			st = zExpectDirOrigin
    +		case zExpectDirOrigin:
    +			if l.value != zString {
    +				return zp.setParseError("expecting $ORIGIN value, not this...", l)
    +			}
    +
    +			if err := slurpRemainder(zp.c); err != nil {
    +				return zp.setParseError(err.err, err.lex)
    +			}
    +
    +			name, ok := toAbsoluteName(l.token, zp.origin)
    +			if !ok {
    +				return zp.setParseError("bad origin name", l)
    +			}
    +
    +			zp.origin = name
    +
    +			st = zExpectOwnerDir
    +		case zExpectDirGenerateBl:
    +			if l.value != zBlank {
    +				return zp.setParseError("no blank after $GENERATE-directive", l)
    +			}
    +
    +			st = zExpectDirGenerate
    +		case zExpectDirGenerate:
    +			if l.value != zString {
    +				return zp.setParseError("expecting $GENERATE value, not this...", l)
    +			}
    +
    +			return zp.generate(l)
    +		case zExpectOwnerBl:
    +			if l.value != zBlank {
    +				return zp.setParseError("no blank after owner", l)
    +			}
    +
    +			st = zExpectAny
    +		case zExpectAny:
    +			switch l.value {
    +			case zRrtpe:
    +				if zp.defttl == nil {
    +					return zp.setParseError("missing TTL with no previous value", l)
    +				}
    +
    +				h.Rrtype = l.torc
    +
    +				st = zExpectRdata
    +			case zClass:
    +				h.Class = l.torc
    +
    +				st = zExpectAnyNoClassBl
    +			case zString:
    +				ttl, ok := stringToTTL(l.token)
    +				if !ok {
    +					return zp.setParseError("not a TTL", l)
    +				}
    +
    +				h.Ttl = ttl
    +
    +				if zp.defttl == nil || !zp.defttl.isByDirective {
    +					zp.defttl = &ttlState{ttl, false}
    +				}
    +
    +				st = zExpectAnyNoTTLBl
    +			default:
    +				return zp.setParseError("expecting RR type, TTL or class, not this...", l)
    +			}
    +		case zExpectAnyNoClassBl:
    +			if l.value != zBlank {
    +				return zp.setParseError("no blank before class", l)
    +			}
    +
    +			st = zExpectAnyNoClass
    +		case zExpectAnyNoTTLBl:
    +			if l.value != zBlank {
    +				return zp.setParseError("no blank before TTL", l)
    +			}
    +
    +			st = zExpectAnyNoTTL
    +		case zExpectAnyNoTTL:
    +			switch l.value {
    +			case zClass:
    +				h.Class = l.torc
    +
    +				st = zExpectRrtypeBl
    +			case zRrtpe:
    +				h.Rrtype = l.torc
    +
    +				st = zExpectRdata
    +			default:
    +				return zp.setParseError("expecting RR type or class, not this...", l)
    +			}
    +		case zExpectAnyNoClass:
    +			switch l.value {
    +			case zString:
    +				ttl, ok := stringToTTL(l.token)
    +				if !ok {
    +					return zp.setParseError("not a TTL", l)
    +				}
    +
    +				h.Ttl = ttl
    +
    +				if zp.defttl == nil || !zp.defttl.isByDirective {
    +					zp.defttl = &ttlState{ttl, false}
    +				}
    +
    +				st = zExpectRrtypeBl
    +			case zRrtpe:
    +				h.Rrtype = l.torc
    +
    +				st = zExpectRdata
    +			default:
    +				return zp.setParseError("expecting RR type or TTL, not this...", l)
    +			}
    +		case zExpectRrtypeBl:
    +			if l.value != zBlank {
    +				return zp.setParseError("no blank before RR type", l)
    +			}
    +
    +			st = zExpectRrtype
    +		case zExpectRrtype:
    +			if l.value != zRrtpe {
    +				return zp.setParseError("unknown RR type", l)
    +			}
    +
    +			h.Rrtype = l.torc
    +
    +			st = zExpectRdata
    +		case zExpectRdata:
    +			var rr RR
    +			if newFn, ok := TypeToRR[h.Rrtype]; ok && canParseAsRR(h.Rrtype) {
    +				rr = newFn()
    +				*rr.Header() = *h
    +			} else {
    +				rr = &RFC3597{Hdr: *h}
    +			}
    +
    +			_, isPrivate := rr.(*PrivateRR)
    +			if !isPrivate && zp.c.Peek().token == "" {
    +				// This is a dynamic update rr.
    +
    +				// TODO(tmthrgd): Previously slurpRemainder was only called
    +				// for certain RR types, which may have been important.
    +				if err := slurpRemainder(zp.c); err != nil {
    +					return zp.setParseError(err.err, err.lex)
    +				}
    +
    +				return rr, true
    +			} else if l.value == zNewline {
    +				return zp.setParseError("unexpected newline", l)
    +			}
    +
    +			if err := rr.parse(zp.c, zp.origin); err != nil {
    +				// err is a concrete *ParseError without the file field set.
    +				// The setParseError call below will construct a new
    +				// *ParseError with file set to zp.file.
    +
    +				// If err.lex is nil than we have encounter an unknown RR type
    +				// in that case we substitute our current lex token.
    +				if err.lex == (lex{}) {
    +					return zp.setParseError(err.err, l)
    +				}
    +
    +				return zp.setParseError(err.err, err.lex)
    +			}
    +
    +			return rr, true
    +		}
    +	}
    +
    +	// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
    +	// is not an error, because an empty zone file is still a zone file.
    +	return nil, false
    +}
    +
    +// canParseAsRR returns true if the record type can be parsed as a
    +// concrete RR. It blacklists certain record types that must be parsed
    +// according to RFC 3597 because they lack a presentation format.
    +func canParseAsRR(rrtype uint16) bool {
    +	switch rrtype {
    +	case TypeANY, TypeNULL, TypeOPT, TypeTSIG:
    +		return false
    +	default:
    +		return true
    +	}
    +}
    +
    +type zlexer struct {
    +	br io.ByteReader
    +
    +	readErr error
    +
    +	line   int
    +	column int
    +
    +	comBuf  string
    +	comment string
    +
    +	l       lex
    +	cachedL *lex
    +
    +	brace  int
    +	quote  bool
    +	space  bool
    +	commt  bool
    +	rrtype bool
    +	owner  bool
    +
    +	nextL bool
    +
    +	eol bool // end-of-line
    +}
    +
    +func newZLexer(r io.Reader) *zlexer {
    +	br, ok := r.(io.ByteReader)
    +	if !ok {
    +		br = bufio.NewReaderSize(r, 1024)
    +	}
    +
    +	return &zlexer{
    +		br: br,
    +
    +		line: 1,
    +
    +		owner: true,
    +	}
    +}
    +
    +func (zl *zlexer) Err() error {
    +	if zl.readErr == io.EOF {
    +		return nil
    +	}
    +
    +	return zl.readErr
    +}
    +
    +// readByte returns the next byte from the input
    +func (zl *zlexer) readByte() (byte, bool) {
    +	if zl.readErr != nil {
    +		return 0, false
    +	}
    +
    +	c, err := zl.br.ReadByte()
    +	if err != nil {
    +		zl.readErr = err
    +		return 0, false
    +	}
    +
    +	// delay the newline handling until the next token is delivered,
    +	// fixes off-by-one errors when reporting a parse error.
    +	if zl.eol {
    +		zl.line++
    +		zl.column = 0
    +		zl.eol = false
    +	}
    +
    +	if c == '\n' {
    +		zl.eol = true
    +	} else {
    +		zl.column++
    +	}
    +
    +	return c, true
    +}
    +
    +func (zl *zlexer) Peek() lex {
    +	if zl.nextL {
    +		return zl.l
    +	}
    +
    +	l, ok := zl.Next()
    +	if !ok {
    +		return l
    +	}
    +
    +	if zl.nextL {
    +		// Cache l. Next returns zl.cachedL then zl.l.
    +		zl.cachedL = &l
    +	} else {
    +		// In this case l == zl.l, so we just tell Next to return zl.l.
    +		zl.nextL = true
    +	}
    +
    +	return l
    +}
    +
    +func (zl *zlexer) Next() (lex, bool) {
    +	l := &zl.l
    +	switch {
    +	case zl.cachedL != nil:
    +		l, zl.cachedL = zl.cachedL, nil
    +		return *l, true
    +	case zl.nextL:
    +		zl.nextL = false
    +		return *l, true
    +	case l.err:
    +		// Parsing errors should be sticky.
    +		return lex{value: zEOF}, false
    +	}
    +
    +	var (
    +		str [maxTok]byte // Hold string text
    +		com [maxTok]byte // Hold comment text
    +
    +		stri int // Offset in str (0 means empty)
    +		comi int // Offset in com (0 means empty)
    +
    +		escape bool
    +	)
    +
    +	if zl.comBuf != "" {
    +		comi = copy(com[:], zl.comBuf)
    +		zl.comBuf = ""
    +	}
    +
    +	zl.comment = ""
    +
    +	for x, ok := zl.readByte(); ok; x, ok = zl.readByte() {
    +		l.line, l.column = zl.line, zl.column
    +
    +		if stri >= len(str) {
    +			l.token = "token length insufficient for parsing"
    +			l.err = true
    +			return *l, true
    +		}
    +		if comi >= len(com) {
    +			l.token = "comment length insufficient for parsing"
    +			l.err = true
    +			return *l, true
    +		}
    +
    +		switch x {
    +		case ' ', '\t':
    +			if escape || zl.quote {
    +				// Inside quotes or escaped this is legal.
    +				str[stri] = x
    +				stri++
    +
    +				escape = false
    +				break
    +			}
    +
    +			if zl.commt {
    +				com[comi] = x
    +				comi++
    +				break
    +			}
    +
    +			var retL lex
    +			if stri == 0 {
    +				// Space directly in the beginning, handled in the grammar
    +			} else if zl.owner {
    +				// If we have a string and its the first, make it an owner
    +				l.value = zOwner
    +				l.token = string(str[:stri])
    +
    +				// escape $... start with a \ not a $, so this will work
    +				switch strings.ToUpper(l.token) {
    +				case "$TTL":
    +					l.value = zDirTTL
    +				case "$ORIGIN":
    +					l.value = zDirOrigin
    +				case "$INCLUDE":
    +					l.value = zDirInclude
    +				case "$GENERATE":
    +					l.value = zDirGenerate
    +				}
    +
    +				retL = *l
    +			} else {
    +				l.value = zString
    +				l.token = string(str[:stri])
    +
    +				if !zl.rrtype {
    +					tokenUpper := strings.ToUpper(l.token)
    +					if t, ok := StringToType[tokenUpper]; ok {
    +						l.value = zRrtpe
    +						l.torc = t
    +
    +						zl.rrtype = true
    +					} else if strings.HasPrefix(tokenUpper, "TYPE") {
    +						t, ok := typeToInt(l.token)
    +						if !ok {
    +							l.token = "unknown RR type"
    +							l.err = true
    +							return *l, true
    +						}
    +
    +						l.value = zRrtpe
    +						l.torc = t
    +
    +						zl.rrtype = true
    +					}
    +
    +					if t, ok := StringToClass[tokenUpper]; ok {
    +						l.value = zClass
    +						l.torc = t
    +					} else if strings.HasPrefix(tokenUpper, "CLASS") {
    +						t, ok := classToInt(l.token)
    +						if !ok {
    +							l.token = "unknown class"
    +							l.err = true
    +							return *l, true
    +						}
    +
    +						l.value = zClass
    +						l.torc = t
    +					}
    +				}
    +
    +				retL = *l
    +			}
    +
    +			zl.owner = false
    +
    +			if !zl.space {
    +				zl.space = true
    +
    +				l.value = zBlank
    +				l.token = " "
    +
    +				if retL == (lex{}) {
    +					return *l, true
    +				}
    +
    +				zl.nextL = true
    +			}
    +
    +			if retL != (lex{}) {
    +				return retL, true
    +			}
    +		case ';':
    +			if escape || zl.quote {
    +				// Inside quotes or escaped this is legal.
    +				str[stri] = x
    +				stri++
    +
    +				escape = false
    +				break
    +			}
    +
    +			zl.commt = true
    +			zl.comBuf = ""
    +
    +			if comi > 1 {
    +				// A newline was previously seen inside a comment that
    +				// was inside braces and we delayed adding it until now.
    +				com[comi] = ' ' // convert newline to space
    +				comi++
    +			}
    +
    +			com[comi] = ';'
    +			comi++
    +
    +			if stri > 0 {
    +				zl.comBuf = string(com[:comi])
    +
    +				l.value = zString
    +				l.token = string(str[:stri])
    +				return *l, true
    +			}
    +		case '\r':
    +			escape = false
    +
    +			if zl.quote {
    +				str[stri] = x
    +				stri++
    +			}
    +
    +			// discard if outside of quotes
    +		case '\n':
    +			escape = false
    +
    +			// Escaped newline
    +			if zl.quote {
    +				str[stri] = x
    +				stri++
    +				break
    +			}
    +
    +			if zl.commt {
    +				// Reset a comment
    +				zl.commt = false
    +				zl.rrtype = false
    +
    +				// If not in a brace this ends the comment AND the RR
    +				if zl.brace == 0 {
    +					zl.owner = true
    +
    +					l.value = zNewline
    +					l.token = "\n"
    +					zl.comment = string(com[:comi])
    +					return *l, true
    +				}
    +
    +				zl.comBuf = string(com[:comi])
    +				break
    +			}
    +
    +			if zl.brace == 0 {
    +				// If there is previous text, we should output it here
    +				var retL lex
    +				if stri != 0 {
    +					l.value = zString
    +					l.token = string(str[:stri])
    +
    +					if !zl.rrtype {
    +						tokenUpper := strings.ToUpper(l.token)
    +						if t, ok := StringToType[tokenUpper]; ok {
    +							zl.rrtype = true
    +
    +							l.value = zRrtpe
    +							l.torc = t
    +						}
    +					}
    +
    +					retL = *l
    +				}
    +
    +				l.value = zNewline
    +				l.token = "\n"
    +
    +				zl.comment = zl.comBuf
    +				zl.comBuf = ""
    +				zl.rrtype = false
    +				zl.owner = true
    +
    +				if retL != (lex{}) {
    +					zl.nextL = true
    +					return retL, true
    +				}
    +
    +				return *l, true
    +			}
    +		case '\\':
    +			// comments do not get escaped chars, everything is copied
    +			if zl.commt {
    +				com[comi] = x
    +				comi++
    +				break
    +			}
    +
    +			// something already escaped must be in string
    +			if escape {
    +				str[stri] = x
    +				stri++
    +
    +				escape = false
    +				break
    +			}
    +
    +			// something escaped outside of string gets added to string
    +			str[stri] = x
    +			stri++
    +
    +			escape = true
    +		case '"':
    +			if zl.commt {
    +				com[comi] = x
    +				comi++
    +				break
    +			}
    +
    +			if escape {
    +				str[stri] = x
    +				stri++
    +
    +				escape = false
    +				break
    +			}
    +
    +			zl.space = false
    +
    +			// send previous gathered text and the quote
    +			var retL lex
    +			if stri != 0 {
    +				l.value = zString
    +				l.token = string(str[:stri])
    +
    +				retL = *l
    +			}
    +
    +			// send quote itself as separate token
    +			l.value = zQuote
    +			l.token = "\""
    +
    +			zl.quote = !zl.quote
    +
    +			if retL != (lex{}) {
    +				zl.nextL = true
    +				return retL, true
    +			}
    +
    +			return *l, true
    +		case '(', ')':
    +			if zl.commt {
    +				com[comi] = x
    +				comi++
    +				break
    +			}
    +
    +			if escape || zl.quote {
    +				// Inside quotes or escaped this is legal.
    +				str[stri] = x
    +				stri++
    +
    +				escape = false
    +				break
    +			}
    +
    +			switch x {
    +			case ')':
    +				zl.brace--
    +
    +				if zl.brace < 0 {
    +					l.token = "extra closing brace"
    +					l.err = true
    +					return *l, true
    +				}
    +			case '(':
    +				zl.brace++
    +			}
    +		default:
    +			escape = false
    +
    +			if zl.commt {
    +				com[comi] = x
    +				comi++
    +				break
    +			}
    +
    +			str[stri] = x
    +			stri++
    +
    +			zl.space = false
    +		}
    +	}
    +
    +	if zl.readErr != nil && zl.readErr != io.EOF {
    +		// Don't return any tokens after a read error occurs.
    +		return lex{value: zEOF}, false
    +	}
    +
    +	var retL lex
    +	if stri > 0 {
    +		// Send remainder of str
    +		l.value = zString
    +		l.token = string(str[:stri])
    +		retL = *l
    +
    +		if comi <= 0 {
    +			return retL, true
    +		}
    +	}
    +
    +	if comi > 0 {
    +		// Send remainder of com
    +		l.value = zNewline
    +		l.token = "\n"
    +		zl.comment = string(com[:comi])
    +
    +		if retL != (lex{}) {
    +			zl.nextL = true
    +			return retL, true
    +		}
    +
    +		return *l, true
    +	}
    +
    +	if zl.brace != 0 {
    +		l.token = "unbalanced brace"
    +		l.err = true
    +		return *l, true
    +	}
    +
    +	return lex{value: zEOF}, false
    +}
    +
    +func (zl *zlexer) Comment() string {
    +	if zl.l.err {
    +		return ""
    +	}
    +
    +	return zl.comment
    +}
    +
    +// Extract the class number from CLASSxx
    +func classToInt(token string) (uint16, bool) {
    +	offset := 5
    +	if len(token) < offset+1 {
    +		return 0, false
    +	}
    +	class, err := strconv.ParseUint(token[offset:], 10, 16)
    +	if err != nil {
    +		return 0, false
    +	}
    +	return uint16(class), true
    +}
    +
    +// Extract the rr number from TYPExxx
    +func typeToInt(token string) (uint16, bool) {
    +	offset := 4
    +	if len(token) < offset+1 {
    +		return 0, false
    +	}
    +	typ, err := strconv.ParseUint(token[offset:], 10, 16)
    +	if err != nil {
    +		return 0, false
    +	}
    +	return uint16(typ), true
    +}
    +
    +// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds.
    +func stringToTTL(token string) (uint32, bool) {
    +	var s, i uint32
    +	for _, c := range token {
    +		switch c {
    +		case 's', 'S':
    +			s += i
    +			i = 0
    +		case 'm', 'M':
    +			s += i * 60
    +			i = 0
    +		case 'h', 'H':
    +			s += i * 60 * 60
    +			i = 0
    +		case 'd', 'D':
    +			s += i * 60 * 60 * 24
    +			i = 0
    +		case 'w', 'W':
    +			s += i * 60 * 60 * 24 * 7
    +			i = 0
    +		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
    +			i *= 10
    +			i += uint32(c) - '0'
    +		default:
    +			return 0, false
    +		}
    +	}
    +	return s + i, true
    +}
    +
    +// Parse LOC records' [.][mM] into a
    +// mantissa exponent format. Token should contain the entire
    +// string (i.e. no spaces allowed)
    +func stringToCm(token string) (e, m uint8, ok bool) {
    +	if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
    +		token = token[0 : len(token)-1]
    +	}
    +	s := strings.SplitN(token, ".", 2)
    +	var meters, cmeters, val int
    +	var err error
    +	switch len(s) {
    +	case 2:
    +		if cmeters, err = strconv.Atoi(s[1]); err != nil {
    +			return
    +		}
    +		fallthrough
    +	case 1:
    +		if meters, err = strconv.Atoi(s[0]); err != nil {
    +			return
    +		}
    +	case 0:
    +		// huh?
    +		return 0, 0, false
    +	}
    +	ok = true
    +	if meters > 0 {
    +		e = 2
    +		val = meters
    +	} else {
    +		e = 0
    +		val = cmeters
    +	}
    +	for val > 10 {
    +		e++
    +		val /= 10
    +	}
    +	if e > 9 {
    +		ok = false
    +	}
    +	m = uint8(val)
    +	return
    +}
    +
    +func toAbsoluteName(name, origin string) (absolute string, ok bool) {
    +	// check for an explicit origin reference
    +	if name == "@" {
    +		// require a nonempty origin
    +		if origin == "" {
    +			return "", false
    +		}
    +		return origin, true
    +	}
    +
    +	// require a valid domain name
    +	_, ok = IsDomainName(name)
    +	if !ok || name == "" {
    +		return "", false
    +	}
    +
    +	// check if name is already absolute
    +	if IsFqdn(name) {
    +		return name, true
    +	}
    +
    +	// require a nonempty origin
    +	if origin == "" {
    +		return "", false
    +	}
    +	return appendOrigin(name, origin), true
    +}
    +
    +func appendOrigin(name, origin string) string {
    +	if origin == "." {
    +		return name + origin
    +	}
    +	return name + "." + origin
    +}
    +
    +// LOC record helper function
    +func locCheckNorth(token string, latitude uint32) (uint32, bool) {
    +	switch token {
    +	case "n", "N":
    +		return LOC_EQUATOR + latitude, true
    +	case "s", "S":
    +		return LOC_EQUATOR - latitude, true
    +	}
    +	return latitude, false
    +}
    +
    +// LOC record helper function
    +func locCheckEast(token string, longitude uint32) (uint32, bool) {
    +	switch token {
    +	case "e", "E":
    +		return LOC_EQUATOR + longitude, true
    +	case "w", "W":
    +		return LOC_EQUATOR - longitude, true
    +	}
    +	return longitude, false
    +}
    +
    +// "Eat" the rest of the "line"
    +func slurpRemainder(c *zlexer) *ParseError {
    +	l, _ := c.Next()
    +	switch l.value {
    +	case zBlank:
    +		l, _ = c.Next()
    +		if l.value != zNewline && l.value != zEOF {
    +			return &ParseError{"", "garbage after rdata", l}
    +		}
    +	case zNewline:
    +	case zEOF:
    +	default:
    +		return &ParseError{"", "garbage after rdata", l}
    +	}
    +	return nil
    +}
    +
    +// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
    +// Used for NID and L64 record.
    +func stringToNodeID(l lex) (uint64, *ParseError) {
    +	if len(l.token) < 19 {
    +		return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
    +	}
    +	// There must be three colons at fixes postitions, if not its a parse error
    +	if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
    +		return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
    +	}
    +	s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
    +	u, err := strconv.ParseUint(s, 16, 64)
    +	if err != nil {
    +		return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
    +	}
    +	return u, nil
    +}
    diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
    new file mode 100644
    index 000000000000..93b24a697de2
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/scan_rr.go
    @@ -0,0 +1,1698 @@
    +package dns
    +
    +import (
    +	"encoding/base64"
    +	"net"
    +	"strconv"
    +	"strings"
    +)
    +
    +// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces)
    +// or an error
    +func endingToString(c *zlexer, errstr string) (string, *ParseError) {
    +	var s string
    +	l, _ := c.Next() // zString
    +	for l.value != zNewline && l.value != zEOF {
    +		if l.err {
    +			return s, &ParseError{"", errstr, l}
    +		}
    +		switch l.value {
    +		case zString:
    +			s += l.token
    +		case zBlank: // Ok
    +		default:
    +			return "", &ParseError{"", errstr, l}
    +		}
    +		l, _ = c.Next()
    +	}
    +
    +	return s, nil
    +}
    +
    +// A remainder of the rdata with embedded spaces, split on unquoted whitespace
    +// and return the parsed string slice or an error
    +func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) {
    +	// Get the remaining data until we see a zNewline
    +	l, _ := c.Next()
    +	if l.err {
    +		return nil, &ParseError{"", errstr, l}
    +	}
    +
    +	// Build the slice
    +	s := make([]string, 0)
    +	quote := false
    +	empty := false
    +	for l.value != zNewline && l.value != zEOF {
    +		if l.err {
    +			return nil, &ParseError{"", errstr, l}
    +		}
    +		switch l.value {
    +		case zString:
    +			empty = false
    +			if len(l.token) > 255 {
    +				// split up tokens that are larger than 255 into 255-chunks
    +				sx := []string{}
    +				p, i := 0, 255
    +				for {
    +					if i <= len(l.token) {
    +						sx = append(sx, l.token[p:i])
    +					} else {
    +						sx = append(sx, l.token[p:])
    +						break
    +
    +					}
    +					p, i = p+255, i+255
    +				}
    +				s = append(s, sx...)
    +				break
    +			}
    +
    +			s = append(s, l.token)
    +		case zBlank:
    +			if quote {
    +				// zBlank can only be seen in between txt parts.
    +				return nil, &ParseError{"", errstr, l}
    +			}
    +		case zQuote:
    +			if empty && quote {
    +				s = append(s, "")
    +			}
    +			quote = !quote
    +			empty = true
    +		default:
    +			return nil, &ParseError{"", errstr, l}
    +		}
    +		l, _ = c.Next()
    +	}
    +
    +	if quote {
    +		return nil, &ParseError{"", errstr, l}
    +	}
    +
    +	return s, nil
    +}
    +
    +func (rr *A) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	rr.A = net.ParseIP(l.token)
    +	// IPv4 addresses cannot include ":".
    +	// We do this rather than use net.IP's To4() because
    +	// To4() treats IPv4-mapped IPv6 addresses as being
    +	// IPv4.
    +	isIPv4 := !strings.Contains(l.token, ":")
    +	if rr.A == nil || !isIPv4 || l.err {
    +		return &ParseError{"", "bad A A", l}
    +	}
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *AAAA) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	rr.AAAA = net.ParseIP(l.token)
    +	// IPv6 addresses must include ":", and IPv4
    +	// addresses cannot include ":".
    +	isIPv6 := strings.Contains(l.token, ":")
    +	if rr.AAAA == nil || !isIPv6 || l.err {
    +		return &ParseError{"", "bad AAAA AAAA", l}
    +	}
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *NS) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad NS Ns", l}
    +	}
    +	rr.Ns = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *PTR) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad PTR Ptr", l}
    +	}
    +	rr.Ptr = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *NSAPPTR) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad NSAP-PTR Ptr", l}
    +	}
    +	rr.Ptr = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *RP) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	mbox, mboxOk := toAbsoluteName(l.token, o)
    +	if l.err || !mboxOk {
    +		return &ParseError{"", "bad RP Mbox", l}
    +	}
    +	rr.Mbox = mbox
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	rr.Txt = l.token
    +
    +	txt, txtOk := toAbsoluteName(l.token, o)
    +	if l.err || !txtOk {
    +		return &ParseError{"", "bad RP Txt", l}
    +	}
    +	rr.Txt = txt
    +
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *MR) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad MR Mr", l}
    +	}
    +	rr.Mr = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *MB) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad MB Mb", l}
    +	}
    +	rr.Mb = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *MG) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad MG Mg", l}
    +	}
    +	rr.Mg = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *HINFO) parse(c *zlexer, o string) *ParseError {
    +	chunks, e := endingToTxtSlice(c, "bad HINFO Fields")
    +	if e != nil {
    +		return e
    +	}
    +
    +	if ln := len(chunks); ln == 0 {
    +		return nil
    +	} else if ln == 1 {
    +		// Can we split it?
    +		if out := strings.Fields(chunks[0]); len(out) > 1 {
    +			chunks = out
    +		} else {
    +			chunks = append(chunks, "")
    +		}
    +	}
    +
    +	rr.Cpu = chunks[0]
    +	rr.Os = strings.Join(chunks[1:], " ")
    +
    +	return nil
    +}
    +
    +func (rr *MINFO) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	rmail, rmailOk := toAbsoluteName(l.token, o)
    +	if l.err || !rmailOk {
    +		return &ParseError{"", "bad MINFO Rmail", l}
    +	}
    +	rr.Rmail = rmail
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	rr.Email = l.token
    +
    +	email, emailOk := toAbsoluteName(l.token, o)
    +	if l.err || !emailOk {
    +		return &ParseError{"", "bad MINFO Email", l}
    +	}
    +	rr.Email = email
    +
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *MF) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad MF Mf", l}
    +	}
    +	rr.Mf = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *MD) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad MD Md", l}
    +	}
    +	rr.Md = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *MX) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad MX Pref", l}
    +	}
    +	rr.Preference = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Mx = l.token
    +
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad MX Mx", l}
    +	}
    +	rr.Mx = name
    +
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *RT) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil {
    +		return &ParseError{"", "bad RT Preference", l}
    +	}
    +	rr.Preference = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Host = l.token
    +
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad RT Host", l}
    +	}
    +	rr.Host = name
    +
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *AFSDB) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad AFSDB Subtype", l}
    +	}
    +	rr.Subtype = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Hostname = l.token
    +
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad AFSDB Hostname", l}
    +	}
    +	rr.Hostname = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *X25) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	if l.err {
    +		return &ParseError{"", "bad X25 PSDNAddress", l}
    +	}
    +	rr.PSDNAddress = l.token
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *KX) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad KX Pref", l}
    +	}
    +	rr.Preference = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Exchanger = l.token
    +
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad KX Exchanger", l}
    +	}
    +	rr.Exchanger = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *CNAME) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad CNAME Target", l}
    +	}
    +	rr.Target = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *DNAME) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad DNAME Target", l}
    +	}
    +	rr.Target = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *SOA) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	ns, nsOk := toAbsoluteName(l.token, o)
    +	if l.err || !nsOk {
    +		return &ParseError{"", "bad SOA Ns", l}
    +	}
    +	rr.Ns = ns
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	rr.Mbox = l.token
    +
    +	mbox, mboxOk := toAbsoluteName(l.token, o)
    +	if l.err || !mboxOk {
    +		return &ParseError{"", "bad SOA Mbox", l}
    +	}
    +	rr.Mbox = mbox
    +
    +	c.Next() // zBlank
    +
    +	var (
    +		v  uint32
    +		ok bool
    +	)
    +	for i := 0; i < 5; i++ {
    +		l, _ = c.Next()
    +		if l.err {
    +			return &ParseError{"", "bad SOA zone parameter", l}
    +		}
    +		if j, e := strconv.ParseUint(l.token, 10, 32); e != nil {
    +			if i == 0 {
    +				// Serial must be a number
    +				return &ParseError{"", "bad SOA zone parameter", l}
    +			}
    +			// We allow other fields to be unitful duration strings
    +			if v, ok = stringToTTL(l.token); !ok {
    +				return &ParseError{"", "bad SOA zone parameter", l}
    +
    +			}
    +		} else {
    +			v = uint32(j)
    +		}
    +		switch i {
    +		case 0:
    +			rr.Serial = v
    +			c.Next() // zBlank
    +		case 1:
    +			rr.Refresh = v
    +			c.Next() // zBlank
    +		case 2:
    +			rr.Retry = v
    +			c.Next() // zBlank
    +		case 3:
    +			rr.Expire = v
    +			c.Next() // zBlank
    +		case 4:
    +			rr.Minttl = v
    +		}
    +	}
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *SRV) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad SRV Priority", l}
    +	}
    +	rr.Priority = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	i, e = strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad SRV Weight", l}
    +	}
    +	rr.Weight = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	i, e = strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad SRV Port", l}
    +	}
    +	rr.Port = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Target = l.token
    +
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad SRV Target", l}
    +	}
    +	rr.Target = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *NAPTR) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad NAPTR Order", l}
    +	}
    +	rr.Order = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	i, e = strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad NAPTR Preference", l}
    +	}
    +	rr.Preference = uint16(i)
    +
    +	// Flags
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // _QUOTE
    +	if l.value != zQuote {
    +		return &ParseError{"", "bad NAPTR Flags", l}
    +	}
    +	l, _ = c.Next() // Either String or Quote
    +	if l.value == zString {
    +		rr.Flags = l.token
    +		l, _ = c.Next() // _QUOTE
    +		if l.value != zQuote {
    +			return &ParseError{"", "bad NAPTR Flags", l}
    +		}
    +	} else if l.value == zQuote {
    +		rr.Flags = ""
    +	} else {
    +		return &ParseError{"", "bad NAPTR Flags", l}
    +	}
    +
    +	// Service
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // _QUOTE
    +	if l.value != zQuote {
    +		return &ParseError{"", "bad NAPTR Service", l}
    +	}
    +	l, _ = c.Next() // Either String or Quote
    +	if l.value == zString {
    +		rr.Service = l.token
    +		l, _ = c.Next() // _QUOTE
    +		if l.value != zQuote {
    +			return &ParseError{"", "bad NAPTR Service", l}
    +		}
    +	} else if l.value == zQuote {
    +		rr.Service = ""
    +	} else {
    +		return &ParseError{"", "bad NAPTR Service", l}
    +	}
    +
    +	// Regexp
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // _QUOTE
    +	if l.value != zQuote {
    +		return &ParseError{"", "bad NAPTR Regexp", l}
    +	}
    +	l, _ = c.Next() // Either String or Quote
    +	if l.value == zString {
    +		rr.Regexp = l.token
    +		l, _ = c.Next() // _QUOTE
    +		if l.value != zQuote {
    +			return &ParseError{"", "bad NAPTR Regexp", l}
    +		}
    +	} else if l.value == zQuote {
    +		rr.Regexp = ""
    +	} else {
    +		return &ParseError{"", "bad NAPTR Regexp", l}
    +	}
    +
    +	// After quote no space??
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Replacement = l.token
    +
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad NAPTR Replacement", l}
    +	}
    +	rr.Replacement = name
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *TALINK) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	previousName, previousNameOk := toAbsoluteName(l.token, o)
    +	if l.err || !previousNameOk {
    +		return &ParseError{"", "bad TALINK PreviousName", l}
    +	}
    +	rr.PreviousName = previousName
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	rr.NextName = l.token
    +
    +	nextName, nextNameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nextNameOk {
    +		return &ParseError{"", "bad TALINK NextName", l}
    +	}
    +	rr.NextName = nextName
    +
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *LOC) parse(c *zlexer, o string) *ParseError {
    +	// Non zero defaults for LOC record, see RFC 1876, Section 3.
    +	rr.HorizPre = 165 // 10000
    +	rr.VertPre = 162  // 10
    +	rr.Size = 18      // 1
    +	ok := false
    +
    +	// North
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 32)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad LOC Latitude", l}
    +	}
    +	rr.Latitude = 1000 * 60 * 60 * uint32(i)
    +
    +	c.Next() // zBlank
    +	// Either number, 'N' or 'S'
    +	l, _ = c.Next()
    +	if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
    +		goto East
    +	}
    +	i, e = strconv.ParseUint(l.token, 10, 32)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad LOC Latitude minutes", l}
    +	}
    +	rr.Latitude += 1000 * 60 * uint32(i)
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
    +		return &ParseError{"", "bad LOC Latitude seconds", l}
    +	} else {
    +		rr.Latitude += uint32(1000 * i)
    +	}
    +	c.Next() // zBlank
    +	// Either number, 'N' or 'S'
    +	l, _ = c.Next()
    +	if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
    +		goto East
    +	}
    +	// If still alive, flag an error
    +	return &ParseError{"", "bad LOC Latitude North/South", l}
    +
    +East:
    +	// East
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
    +		return &ParseError{"", "bad LOC Longitude", l}
    +	} else {
    +		rr.Longitude = 1000 * 60 * 60 * uint32(i)
    +	}
    +	c.Next() // zBlank
    +	// Either number, 'E' or 'W'
    +	l, _ = c.Next()
    +	if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
    +		goto Altitude
    +	}
    +	if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
    +		return &ParseError{"", "bad LOC Longitude minutes", l}
    +	} else {
    +		rr.Longitude += 1000 * 60 * uint32(i)
    +	}
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
    +		return &ParseError{"", "bad LOC Longitude seconds", l}
    +	} else {
    +		rr.Longitude += uint32(1000 * i)
    +	}
    +	c.Next() // zBlank
    +	// Either number, 'E' or 'W'
    +	l, _ = c.Next()
    +	if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
    +		goto Altitude
    +	}
    +	// If still alive, flag an error
    +	return &ParseError{"", "bad LOC Longitude East/West", l}
    +
    +Altitude:
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if len(l.token) == 0 || l.err {
    +		return &ParseError{"", "bad LOC Altitude", l}
    +	}
    +	if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
    +		l.token = l.token[0 : len(l.token)-1]
    +	}
    +	if i, e := strconv.ParseFloat(l.token, 32); e != nil {
    +		return &ParseError{"", "bad LOC Altitude", l}
    +	} else {
    +		rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5)
    +	}
    +
    +	// And now optionally the other values
    +	l, _ = c.Next()
    +	count := 0
    +	for l.value != zNewline && l.value != zEOF {
    +		switch l.value {
    +		case zString:
    +			switch count {
    +			case 0: // Size
    +				e, m, ok := stringToCm(l.token)
    +				if !ok {
    +					return &ParseError{"", "bad LOC Size", l}
    +				}
    +				rr.Size = e&0x0f | m<<4&0xf0
    +			case 1: // HorizPre
    +				e, m, ok := stringToCm(l.token)
    +				if !ok {
    +					return &ParseError{"", "bad LOC HorizPre", l}
    +				}
    +				rr.HorizPre = e&0x0f | m<<4&0xf0
    +			case 2: // VertPre
    +				e, m, ok := stringToCm(l.token)
    +				if !ok {
    +					return &ParseError{"", "bad LOC VertPre", l}
    +				}
    +				rr.VertPre = e&0x0f | m<<4&0xf0
    +			}
    +			count++
    +		case zBlank:
    +			// Ok
    +		default:
    +			return &ParseError{"", "bad LOC Size, HorizPre or VertPre", l}
    +		}
    +		l, _ = c.Next()
    +	}
    +	return nil
    +}
    +
    +func (rr *HIP) parse(c *zlexer, o string) *ParseError {
    +	// HitLength is not represented
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad HIP PublicKeyAlgorithm", l}
    +	}
    +	rr.PublicKeyAlgorithm = uint8(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	if len(l.token) == 0 || l.err {
    +		return &ParseError{"", "bad HIP Hit", l}
    +	}
    +	rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6.
    +	rr.HitLength = uint8(len(rr.Hit)) / 2
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	if len(l.token) == 0 || l.err {
    +		return &ParseError{"", "bad HIP PublicKey", l}
    +	}
    +	rr.PublicKey = l.token // This cannot contain spaces
    +	rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey)))
    +
    +	// RendezvousServers (if any)
    +	l, _ = c.Next()
    +	var xs []string
    +	for l.value != zNewline && l.value != zEOF {
    +		switch l.value {
    +		case zString:
    +			name, nameOk := toAbsoluteName(l.token, o)
    +			if l.err || !nameOk {
    +				return &ParseError{"", "bad HIP RendezvousServers", l}
    +			}
    +			xs = append(xs, name)
    +		case zBlank:
    +			// Ok
    +		default:
    +			return &ParseError{"", "bad HIP RendezvousServers", l}
    +		}
    +		l, _ = c.Next()
    +	}
    +
    +	rr.RendezvousServers = xs
    +	return nil
    +}
    +
    +func (rr *CERT) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	if v, ok := StringToCertType[l.token]; ok {
    +		rr.Type = v
    +	} else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil {
    +		return &ParseError{"", "bad CERT Type", l}
    +	} else {
    +		rr.Type = uint16(i)
    +	}
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad CERT KeyTag", l}
    +	}
    +	rr.KeyTag = uint16(i)
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	if v, ok := StringToAlgorithm[l.token]; ok {
    +		rr.Algorithm = v
    +	} else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
    +		return &ParseError{"", "bad CERT Algorithm", l}
    +	} else {
    +		rr.Algorithm = uint8(i)
    +	}
    +	s, e1 := endingToString(c, "bad CERT Certificate")
    +	if e1 != nil {
    +		return e1
    +	}
    +	rr.Certificate = s
    +	return nil
    +}
    +
    +func (rr *OPENPGPKEY) parse(c *zlexer, o string) *ParseError {
    +	s, e := endingToString(c, "bad OPENPGPKEY PublicKey")
    +	if e != nil {
    +		return e
    +	}
    +	rr.PublicKey = s
    +	return nil
    +}
    +
    +func (rr *CSYNC) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	j, e := strconv.ParseUint(l.token, 10, 32)
    +	if e != nil {
    +		// Serial must be a number
    +		return &ParseError{"", "bad CSYNC serial", l}
    +	}
    +	rr.Serial = uint32(j)
    +
    +	c.Next() // zBlank
    +
    +	l, _ = c.Next()
    +	j, e = strconv.ParseUint(l.token, 10, 16)
    +	if e != nil {
    +		// Serial must be a number
    +		return &ParseError{"", "bad CSYNC flags", l}
    +	}
    +	rr.Flags = uint16(j)
    +
    +	rr.TypeBitMap = make([]uint16, 0)
    +	var (
    +		k  uint16
    +		ok bool
    +	)
    +	l, _ = c.Next()
    +	for l.value != zNewline && l.value != zEOF {
    +		switch l.value {
    +		case zBlank:
    +			// Ok
    +		case zString:
    +			tokenUpper := strings.ToUpper(l.token)
    +			if k, ok = StringToType[tokenUpper]; !ok {
    +				if k, ok = typeToInt(l.token); !ok {
    +					return &ParseError{"", "bad CSYNC TypeBitMap", l}
    +				}
    +			}
    +			rr.TypeBitMap = append(rr.TypeBitMap, k)
    +		default:
    +			return &ParseError{"", "bad CSYNC TypeBitMap", l}
    +		}
    +		l, _ = c.Next()
    +	}
    +	return nil
    +}
    +
    +func (rr *SIG) parse(c *zlexer, o string) *ParseError {
    +	return rr.RRSIG.parse(c, o)
    +}
    +
    +func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	tokenUpper := strings.ToUpper(l.token)
    +	if t, ok := StringToType[tokenUpper]; !ok {
    +		if strings.HasPrefix(tokenUpper, "TYPE") {
    +			t, ok = typeToInt(l.token)
    +			if !ok {
    +				return &ParseError{"", "bad RRSIG Typecovered", l}
    +			}
    +			rr.TypeCovered = t
    +		} else {
    +			return &ParseError{"", "bad RRSIG Typecovered", l}
    +		}
    +	} else {
    +		rr.TypeCovered = t
    +	}
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, err := strconv.ParseUint(l.token, 10, 8)
    +	if err != nil || l.err {
    +		return &ParseError{"", "bad RRSIG Algorithm", l}
    +	}
    +	rr.Algorithm = uint8(i)
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, err = strconv.ParseUint(l.token, 10, 8)
    +	if err != nil || l.err {
    +		return &ParseError{"", "bad RRSIG Labels", l}
    +	}
    +	rr.Labels = uint8(i)
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, err = strconv.ParseUint(l.token, 10, 32)
    +	if err != nil || l.err {
    +		return &ParseError{"", "bad RRSIG OrigTtl", l}
    +	}
    +	rr.OrigTtl = uint32(i)
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if i, err := StringToTime(l.token); err != nil {
    +		// Try to see if all numeric and use it as epoch
    +		if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
    +			// TODO(miek): error out on > MAX_UINT32, same below
    +			rr.Expiration = uint32(i)
    +		} else {
    +			return &ParseError{"", "bad RRSIG Expiration", l}
    +		}
    +	} else {
    +		rr.Expiration = i
    +	}
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if i, err := StringToTime(l.token); err != nil {
    +		if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
    +			rr.Inception = uint32(i)
    +		} else {
    +			return &ParseError{"", "bad RRSIG Inception", l}
    +		}
    +	} else {
    +		rr.Inception = i
    +	}
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, err = strconv.ParseUint(l.token, 10, 16)
    +	if err != nil || l.err {
    +		return &ParseError{"", "bad RRSIG KeyTag", l}
    +	}
    +	rr.KeyTag = uint16(i)
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	rr.SignerName = l.token
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad RRSIG SignerName", l}
    +	}
    +	rr.SignerName = name
    +
    +	s, e := endingToString(c, "bad RRSIG Signature")
    +	if e != nil {
    +		return e
    +	}
    +	rr.Signature = s
    +
    +	return nil
    +}
    +
    +func (rr *NSEC) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad NSEC NextDomain", l}
    +	}
    +	rr.NextDomain = name
    +
    +	rr.TypeBitMap = make([]uint16, 0)
    +	var (
    +		k  uint16
    +		ok bool
    +	)
    +	l, _ = c.Next()
    +	for l.value != zNewline && l.value != zEOF {
    +		switch l.value {
    +		case zBlank:
    +			// Ok
    +		case zString:
    +			tokenUpper := strings.ToUpper(l.token)
    +			if k, ok = StringToType[tokenUpper]; !ok {
    +				if k, ok = typeToInt(l.token); !ok {
    +					return &ParseError{"", "bad NSEC TypeBitMap", l}
    +				}
    +			}
    +			rr.TypeBitMap = append(rr.TypeBitMap, k)
    +		default:
    +			return &ParseError{"", "bad NSEC TypeBitMap", l}
    +		}
    +		l, _ = c.Next()
    +	}
    +	return nil
    +}
    +
    +func (rr *NSEC3) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad NSEC3 Hash", l}
    +	}
    +	rr.Hash = uint8(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad NSEC3 Flags", l}
    +	}
    +	rr.Flags = uint8(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad NSEC3 Iterations", l}
    +	}
    +	rr.Iterations = uint16(i)
    +	c.Next()
    +	l, _ = c.Next()
    +	if len(l.token) == 0 || l.err {
    +		return &ParseError{"", "bad NSEC3 Salt", l}
    +	}
    +	if l.token != "-" {
    +		rr.SaltLength = uint8(len(l.token)) / 2
    +		rr.Salt = l.token
    +	}
    +
    +	c.Next()
    +	l, _ = c.Next()
    +	if len(l.token) == 0 || l.err {
    +		return &ParseError{"", "bad NSEC3 NextDomain", l}
    +	}
    +	rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits)
    +	rr.NextDomain = l.token
    +
    +	rr.TypeBitMap = make([]uint16, 0)
    +	var (
    +		k  uint16
    +		ok bool
    +	)
    +	l, _ = c.Next()
    +	for l.value != zNewline && l.value != zEOF {
    +		switch l.value {
    +		case zBlank:
    +			// Ok
    +		case zString:
    +			tokenUpper := strings.ToUpper(l.token)
    +			if k, ok = StringToType[tokenUpper]; !ok {
    +				if k, ok = typeToInt(l.token); !ok {
    +					return &ParseError{"", "bad NSEC3 TypeBitMap", l}
    +				}
    +			}
    +			rr.TypeBitMap = append(rr.TypeBitMap, k)
    +		default:
    +			return &ParseError{"", "bad NSEC3 TypeBitMap", l}
    +		}
    +		l, _ = c.Next()
    +	}
    +	return nil
    +}
    +
    +func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad NSEC3PARAM Hash", l}
    +	}
    +	rr.Hash = uint8(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad NSEC3PARAM Flags", l}
    +	}
    +	rr.Flags = uint8(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad NSEC3PARAM Iterations", l}
    +	}
    +	rr.Iterations = uint16(i)
    +	c.Next()
    +	l, _ = c.Next()
    +	if l.token != "-" {
    +		rr.SaltLength = uint8(len(l.token))
    +		rr.Salt = l.token
    +	}
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *EUI48) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	if len(l.token) != 17 || l.err {
    +		return &ParseError{"", "bad EUI48 Address", l}
    +	}
    +	addr := make([]byte, 12)
    +	dash := 0
    +	for i := 0; i < 10; i += 2 {
    +		addr[i] = l.token[i+dash]
    +		addr[i+1] = l.token[i+1+dash]
    +		dash++
    +		if l.token[i+1+dash] != '-' {
    +			return &ParseError{"", "bad EUI48 Address", l}
    +		}
    +	}
    +	addr[10] = l.token[15]
    +	addr[11] = l.token[16]
    +
    +	i, e := strconv.ParseUint(string(addr), 16, 48)
    +	if e != nil {
    +		return &ParseError{"", "bad EUI48 Address", l}
    +	}
    +	rr.Address = i
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *EUI64) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	if len(l.token) != 23 || l.err {
    +		return &ParseError{"", "bad EUI64 Address", l}
    +	}
    +	addr := make([]byte, 16)
    +	dash := 0
    +	for i := 0; i < 14; i += 2 {
    +		addr[i] = l.token[i+dash]
    +		addr[i+1] = l.token[i+1+dash]
    +		dash++
    +		if l.token[i+1+dash] != '-' {
    +			return &ParseError{"", "bad EUI64 Address", l}
    +		}
    +	}
    +	addr[14] = l.token[21]
    +	addr[15] = l.token[22]
    +
    +	i, e := strconv.ParseUint(string(addr), 16, 64)
    +	if e != nil {
    +		return &ParseError{"", "bad EUI68 Address", l}
    +	}
    +	rr.Address = i
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *SSHFP) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad SSHFP Algorithm", l}
    +	}
    +	rr.Algorithm = uint8(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad SSHFP Type", l}
    +	}
    +	rr.Type = uint8(i)
    +	c.Next() // zBlank
    +	s, e1 := endingToString(c, "bad SSHFP Fingerprint")
    +	if e1 != nil {
    +		return e1
    +	}
    +	rr.FingerPrint = s
    +	return nil
    +}
    +
    +func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad " + typ + " Flags", l}
    +	}
    +	rr.Flags = uint16(i)
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad " + typ + " Protocol", l}
    +	}
    +	rr.Protocol = uint8(i)
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad " + typ + " Algorithm", l}
    +	}
    +	rr.Algorithm = uint8(i)
    +	s, e1 := endingToString(c, "bad "+typ+" PublicKey")
    +	if e1 != nil {
    +		return e1
    +	}
    +	rr.PublicKey = s
    +	return nil
    +}
    +
    +func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError {
    +	return rr.parseDNSKEY(c, o, "DNSKEY")
    +}
    +
    +func (rr *KEY) parse(c *zlexer, o string) *ParseError {
    +	return rr.parseDNSKEY(c, o, "KEY")
    +}
    +
    +func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError {
    +	return rr.parseDNSKEY(c, o, "CDNSKEY")
    +}
    +
    +func (rr *RKEY) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad RKEY Flags", l}
    +	}
    +	rr.Flags = uint16(i)
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad RKEY Protocol", l}
    +	}
    +	rr.Protocol = uint8(i)
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad RKEY Algorithm", l}
    +	}
    +	rr.Algorithm = uint8(i)
    +	s, e1 := endingToString(c, "bad RKEY PublicKey")
    +	if e1 != nil {
    +		return e1
    +	}
    +	rr.PublicKey = s
    +	return nil
    +}
    +
    +func (rr *EID) parse(c *zlexer, o string) *ParseError {
    +	s, e := endingToString(c, "bad EID Endpoint")
    +	if e != nil {
    +		return e
    +	}
    +	rr.Endpoint = s
    +	return nil
    +}
    +
    +func (rr *NIMLOC) parse(c *zlexer, o string) *ParseError {
    +	s, e := endingToString(c, "bad NIMLOC Locator")
    +	if e != nil {
    +		return e
    +	}
    +	rr.Locator = s
    +	return nil
    +}
    +
    +func (rr *GPOS) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	_, e := strconv.ParseFloat(l.token, 64)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad GPOS Longitude", l}
    +	}
    +	rr.Longitude = l.token
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	_, e = strconv.ParseFloat(l.token, 64)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad GPOS Latitude", l}
    +	}
    +	rr.Latitude = l.token
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	_, e = strconv.ParseFloat(l.token, 64)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad GPOS Altitude", l}
    +	}
    +	rr.Altitude = l.token
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad " + typ + " KeyTag", l}
    +	}
    +	rr.KeyTag = uint16(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if i, e = strconv.ParseUint(l.token, 10, 8); e != nil {
    +		tokenUpper := strings.ToUpper(l.token)
    +		i, ok := StringToAlgorithm[tokenUpper]
    +		if !ok || l.err {
    +			return &ParseError{"", "bad " + typ + " Algorithm", l}
    +		}
    +		rr.Algorithm = i
    +	} else {
    +		rr.Algorithm = uint8(i)
    +	}
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad " + typ + " DigestType", l}
    +	}
    +	rr.DigestType = uint8(i)
    +	s, e1 := endingToString(c, "bad "+typ+" Digest")
    +	if e1 != nil {
    +		return e1
    +	}
    +	rr.Digest = s
    +	return nil
    +}
    +
    +func (rr *DS) parse(c *zlexer, o string) *ParseError {
    +	return rr.parseDS(c, o, "DS")
    +}
    +
    +func (rr *DLV) parse(c *zlexer, o string) *ParseError {
    +	return rr.parseDS(c, o, "DLV")
    +}
    +
    +func (rr *CDS) parse(c *zlexer, o string) *ParseError {
    +	return rr.parseDS(c, o, "CDS")
    +}
    +
    +func (rr *TA) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad TA KeyTag", l}
    +	}
    +	rr.KeyTag = uint16(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
    +		tokenUpper := strings.ToUpper(l.token)
    +		i, ok := StringToAlgorithm[tokenUpper]
    +		if !ok || l.err {
    +			return &ParseError{"", "bad TA Algorithm", l}
    +		}
    +		rr.Algorithm = i
    +	} else {
    +		rr.Algorithm = uint8(i)
    +	}
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad TA DigestType", l}
    +	}
    +	rr.DigestType = uint8(i)
    +	s, err := endingToString(c, "bad TA Digest")
    +	if err != nil {
    +		return err
    +	}
    +	rr.Digest = s
    +	return nil
    +}
    +
    +func (rr *TLSA) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad TLSA Usage", l}
    +	}
    +	rr.Usage = uint8(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad TLSA Selector", l}
    +	}
    +	rr.Selector = uint8(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad TLSA MatchingType", l}
    +	}
    +	rr.MatchingType = uint8(i)
    +	// So this needs be e2 (i.e. different than e), because...??t
    +	s, e2 := endingToString(c, "bad TLSA Certificate")
    +	if e2 != nil {
    +		return e2
    +	}
    +	rr.Certificate = s
    +	return nil
    +}
    +
    +func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad SMIMEA Usage", l}
    +	}
    +	rr.Usage = uint8(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad SMIMEA Selector", l}
    +	}
    +	rr.Selector = uint8(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 8)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad SMIMEA MatchingType", l}
    +	}
    +	rr.MatchingType = uint8(i)
    +	// So this needs be e2 (i.e. different than e), because...??t
    +	s, e2 := endingToString(c, "bad SMIMEA Certificate")
    +	if e2 != nil {
    +		return e2
    +	}
    +	rr.Certificate = s
    +	return nil
    +}
    +
    +func (rr *RFC3597) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	if l.token != "\\#" {
    +		return &ParseError{"", "bad RFC3597 Rdata", l}
    +	}
    +
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	rdlength, e := strconv.Atoi(l.token)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad RFC3597 Rdata ", l}
    +	}
    +
    +	s, e1 := endingToString(c, "bad RFC3597 Rdata")
    +	if e1 != nil {
    +		return e1
    +	}
    +	if rdlength*2 != len(s) {
    +		return &ParseError{"", "bad RFC3597 Rdata", l}
    +	}
    +	rr.Rdata = s
    +	return nil
    +}
    +
    +func (rr *SPF) parse(c *zlexer, o string) *ParseError {
    +	s, e := endingToTxtSlice(c, "bad SPF Txt")
    +	if e != nil {
    +		return e
    +	}
    +	rr.Txt = s
    +	return nil
    +}
    +
    +func (rr *AVC) parse(c *zlexer, o string) *ParseError {
    +	s, e := endingToTxtSlice(c, "bad AVC Txt")
    +	if e != nil {
    +		return e
    +	}
    +	rr.Txt = s
    +	return nil
    +}
    +
    +func (rr *TXT) parse(c *zlexer, o string) *ParseError {
    +	// no zBlank reading here, because all this rdata is TXT
    +	s, e := endingToTxtSlice(c, "bad TXT Txt")
    +	if e != nil {
    +		return e
    +	}
    +	rr.Txt = s
    +	return nil
    +}
    +
    +// identical to setTXT
    +func (rr *NINFO) parse(c *zlexer, o string) *ParseError {
    +	s, e := endingToTxtSlice(c, "bad NINFO ZSData")
    +	if e != nil {
    +		return e
    +	}
    +	rr.ZSData = s
    +	return nil
    +}
    +
    +func (rr *URI) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad URI Priority", l}
    +	}
    +	rr.Priority = uint16(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	i, e = strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad URI Weight", l}
    +	}
    +	rr.Weight = uint16(i)
    +
    +	c.Next() // zBlank
    +	s, err := endingToTxtSlice(c, "bad URI Target")
    +	if err != nil {
    +		return err
    +	}
    +	if len(s) != 1 {
    +		return &ParseError{"", "bad URI Target", l}
    +	}
    +	rr.Target = s[0]
    +	return nil
    +}
    +
    +func (rr *DHCID) parse(c *zlexer, o string) *ParseError {
    +	// awesome record to parse!
    +	s, e := endingToString(c, "bad DHCID Digest")
    +	if e != nil {
    +		return e
    +	}
    +	rr.Digest = s
    +	return nil
    +}
    +
    +func (rr *NID) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad NID Preference", l}
    +	}
    +	rr.Preference = uint16(i)
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	u, err := stringToNodeID(l)
    +	if err != nil || l.err {
    +		return err
    +	}
    +	rr.NodeID = u
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *L32) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad L32 Preference", l}
    +	}
    +	rr.Preference = uint16(i)
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Locator32 = net.ParseIP(l.token)
    +	if rr.Locator32 == nil || l.err {
    +		return &ParseError{"", "bad L32 Locator", l}
    +	}
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *LP) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad LP Preference", l}
    +	}
    +	rr.Preference = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Fqdn = l.token
    +	name, nameOk := toAbsoluteName(l.token, o)
    +	if l.err || !nameOk {
    +		return &ParseError{"", "bad LP Fqdn", l}
    +	}
    +	rr.Fqdn = name
    +
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *L64) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad L64 Preference", l}
    +	}
    +	rr.Preference = uint16(i)
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	u, err := stringToNodeID(l)
    +	if err != nil || l.err {
    +		return err
    +	}
    +	rr.Locator64 = u
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *UID) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 32)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad UID Uid", l}
    +	}
    +	rr.Uid = uint32(i)
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *GID) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 32)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad GID Gid", l}
    +	}
    +	rr.Gid = uint32(i)
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *UINFO) parse(c *zlexer, o string) *ParseError {
    +	s, e := endingToTxtSlice(c, "bad UINFO Uinfo")
    +	if e != nil {
    +		return e
    +	}
    +	if ln := len(s); ln == 0 {
    +		return nil
    +	}
    +	rr.Uinfo = s[0] // silently discard anything after the first character-string
    +	return nil
    +}
    +
    +func (rr *PX) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, e := strconv.ParseUint(l.token, 10, 16)
    +	if e != nil || l.err {
    +		return &ParseError{"", "bad PX Preference", l}
    +	}
    +	rr.Preference = uint16(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Map822 = l.token
    +	map822, map822Ok := toAbsoluteName(l.token, o)
    +	if l.err || !map822Ok {
    +		return &ParseError{"", "bad PX Map822", l}
    +	}
    +	rr.Map822 = map822
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	rr.Mapx400 = l.token
    +	mapx400, mapx400Ok := toAbsoluteName(l.token, o)
    +	if l.err || !mapx400Ok {
    +		return &ParseError{"", "bad PX Mapx400", l}
    +	}
    +	rr.Mapx400 = mapx400
    +
    +	return slurpRemainder(c)
    +}
    +
    +func (rr *CAA) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +	i, err := strconv.ParseUint(l.token, 10, 8)
    +	if err != nil || l.err {
    +		return &ParseError{"", "bad CAA Flag", l}
    +	}
    +	rr.Flag = uint8(i)
    +
    +	c.Next()        // zBlank
    +	l, _ = c.Next() // zString
    +	if l.value != zString {
    +		return &ParseError{"", "bad CAA Tag", l}
    +	}
    +	rr.Tag = l.token
    +
    +	c.Next() // zBlank
    +	s, e := endingToTxtSlice(c, "bad CAA Value")
    +	if e != nil {
    +		return e
    +	}
    +	if len(s) != 1 {
    +		return &ParseError{"", "bad CAA Value", l}
    +	}
    +	rr.Value = s[0]
    +	return nil
    +}
    +
    +func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
    +	l, _ := c.Next()
    +
    +	// Algorithm
    +	if l.value != zString {
    +		return &ParseError{"", "bad TKEY algorithm", l}
    +	}
    +	rr.Algorithm = l.token
    +	c.Next() // zBlank
    +
    +	// Get the key length and key values
    +	l, _ = c.Next()
    +	i, err := strconv.ParseUint(l.token, 10, 8)
    +	if err != nil || l.err {
    +		return &ParseError{"", "bad TKEY key length", l}
    +	}
    +	rr.KeySize = uint16(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if l.value != zString {
    +		return &ParseError{"", "bad TKEY key", l}
    +	}
    +	rr.Key = l.token
    +	c.Next() // zBlank
    +
    +	// Get the otherdata length and string data
    +	l, _ = c.Next()
    +	i, err = strconv.ParseUint(l.token, 10, 8)
    +	if err != nil || l.err {
    +		return &ParseError{"", "bad TKEY otherdata length", l}
    +	}
    +	rr.OtherLen = uint16(i)
    +	c.Next() // zBlank
    +	l, _ = c.Next()
    +	if l.value != zString {
    +		return &ParseError{"", "bad TKEY otherday", l}
    +	}
    +	rr.OtherData = l.token
    +
    +	return nil
    +}
    diff --git a/vendor/github.com/miekg/dns/serve_mux.go b/vendor/github.com/miekg/dns/serve_mux.go
    new file mode 100644
    index 000000000000..ae304db530a5
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/serve_mux.go
    @@ -0,0 +1,147 @@
    +package dns
    +
    +import (
    +	"strings"
    +	"sync"
    +)
    +
    +// ServeMux is an DNS request multiplexer. It matches the zone name of
    +// each incoming request against a list of registered patterns add calls
    +// the handler for the pattern that most closely matches the zone name.
    +//
    +// ServeMux is DNSSEC aware, meaning that queries for the DS record are
    +// redirected to the parent zone (if that is also registered), otherwise
    +// the child gets the query.
    +//
    +// ServeMux is also safe for concurrent access from multiple goroutines.
    +//
    +// The zero ServeMux is empty and ready for use.
    +type ServeMux struct {
    +	z map[string]Handler
    +	m sync.RWMutex
    +}
    +
    +// NewServeMux allocates and returns a new ServeMux.
    +func NewServeMux() *ServeMux {
    +	return new(ServeMux)
    +}
    +
    +// DefaultServeMux is the default ServeMux used by Serve.
    +var DefaultServeMux = NewServeMux()
    +
    +func (mux *ServeMux) match(q string, t uint16) Handler {
    +	mux.m.RLock()
    +	defer mux.m.RUnlock()
    +	if mux.z == nil {
    +		return nil
    +	}
    +
    +	var handler Handler
    +
    +	// TODO(tmthrgd): Once https://go-review.googlesource.com/c/go/+/137575
    +	// lands in a go release, replace the following with strings.ToLower.
    +	var sb strings.Builder
    +	for i := 0; i < len(q); i++ {
    +		c := q[i]
    +		if !(c >= 'A' && c <= 'Z') {
    +			continue
    +		}
    +
    +		sb.Grow(len(q))
    +		sb.WriteString(q[:i])
    +
    +		for ; i < len(q); i++ {
    +			c := q[i]
    +			if c >= 'A' && c <= 'Z' {
    +				c += 'a' - 'A'
    +			}
    +
    +			sb.WriteByte(c)
    +		}
    +
    +		q = sb.String()
    +		break
    +	}
    +
    +	for off, end := 0, false; !end; off, end = NextLabel(q, off) {
    +		if h, ok := mux.z[q[off:]]; ok {
    +			if t != TypeDS {
    +				return h
    +			}
    +			// Continue for DS to see if we have a parent too, if so delegate to the parent
    +			handler = h
    +		}
    +	}
    +
    +	// Wildcard match, if we have found nothing try the root zone as a last resort.
    +	if h, ok := mux.z["."]; ok {
    +		return h
    +	}
    +
    +	return handler
    +}
    +
    +// Handle adds a handler to the ServeMux for pattern.
    +func (mux *ServeMux) Handle(pattern string, handler Handler) {
    +	if pattern == "" {
    +		panic("dns: invalid pattern " + pattern)
    +	}
    +	mux.m.Lock()
    +	if mux.z == nil {
    +		mux.z = make(map[string]Handler)
    +	}
    +	mux.z[Fqdn(pattern)] = handler
    +	mux.m.Unlock()
    +}
    +
    +// HandleFunc adds a handler function to the ServeMux for pattern.
    +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
    +	mux.Handle(pattern, HandlerFunc(handler))
    +}
    +
    +// HandleRemove deregisters the handler specific for pattern from the ServeMux.
    +func (mux *ServeMux) HandleRemove(pattern string) {
    +	if pattern == "" {
    +		panic("dns: invalid pattern " + pattern)
    +	}
    +	mux.m.Lock()
    +	delete(mux.z, Fqdn(pattern))
    +	mux.m.Unlock()
    +}
    +
    +// ServeDNS dispatches the request to the handler whose pattern most
    +// closely matches the request message.
    +//
    +// ServeDNS is DNSSEC aware, meaning that queries for the DS record
    +// are redirected to the parent zone (if that is also registered),
    +// otherwise the child gets the query.
    +//
    +// If no handler is found, or there is no question, a standard SERVFAIL
    +// message is returned
    +func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
    +	var h Handler
    +	if len(req.Question) >= 1 { // allow more than one question
    +		h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
    +	}
    +
    +	if h != nil {
    +		h.ServeDNS(w, req)
    +	} else {
    +		HandleFailed(w, req)
    +	}
    +}
    +
    +// Handle registers the handler with the given pattern
    +// in the DefaultServeMux. The documentation for
    +// ServeMux explains how patterns are matched.
    +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
    +
    +// HandleRemove deregisters the handle with the given pattern
    +// in the DefaultServeMux.
    +func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
    +
    +// HandleFunc registers the handler function with the given pattern
    +// in the DefaultServeMux.
    +func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
    +	DefaultServeMux.HandleFunc(pattern, handler)
    +}
    diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
    new file mode 100644
    index 000000000000..3cf1a0240112
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/server.go
    @@ -0,0 +1,764 @@
    +// DNS server implementation.
    +
    +package dns
    +
    +import (
    +	"context"
    +	"crypto/tls"
    +	"encoding/binary"
    +	"errors"
    +	"io"
    +	"net"
    +	"strings"
    +	"sync"
    +	"time"
    +)
    +
    +// Default maximum number of TCP queries before we close the socket.
    +const maxTCPQueries = 128
    +
    +// aLongTimeAgo is a non-zero time, far in the past, used for
    +// immediate cancelation of network operations.
    +var aLongTimeAgo = time.Unix(1, 0)
    +
    +// Handler is implemented by any value that implements ServeDNS.
    +type Handler interface {
    +	ServeDNS(w ResponseWriter, r *Msg)
    +}
    +
    +// The HandlerFunc type is an adapter to allow the use of
    +// ordinary functions as DNS handlers.  If f is a function
    +// with the appropriate signature, HandlerFunc(f) is a
    +// Handler object that calls f.
    +type HandlerFunc func(ResponseWriter, *Msg)
    +
    +// ServeDNS calls f(w, r).
    +func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
    +	f(w, r)
    +}
    +
    +// A ResponseWriter interface is used by an DNS handler to
    +// construct an DNS response.
    +type ResponseWriter interface {
    +	// LocalAddr returns the net.Addr of the server
    +	LocalAddr() net.Addr
    +	// RemoteAddr returns the net.Addr of the client that sent the current request.
    +	RemoteAddr() net.Addr
    +	// WriteMsg writes a reply back to the client.
    +	WriteMsg(*Msg) error
    +	// Write writes a raw buffer back to the client.
    +	Write([]byte) (int, error)
    +	// Close closes the connection.
    +	Close() error
    +	// TsigStatus returns the status of the Tsig.
    +	TsigStatus() error
    +	// TsigTimersOnly sets the tsig timers only boolean.
    +	TsigTimersOnly(bool)
    +	// Hijack lets the caller take over the connection.
    +	// After a call to Hijack(), the DNS package will not do anything with the connection.
    +	Hijack()
    +}
    +
    +// A ConnectionStater interface is used by a DNS Handler to access TLS connection state
    +// when available.
    +type ConnectionStater interface {
    +	ConnectionState() *tls.ConnectionState
    +}
    +
    +type response struct {
    +	closed         bool // connection has been closed
    +	hijacked       bool // connection has been hijacked by handler
    +	tsigTimersOnly bool
    +	tsigStatus     error
    +	tsigRequestMAC string
    +	tsigSecret     map[string]string // the tsig secrets
    +	udp            *net.UDPConn      // i/o connection if UDP was used
    +	tcp            net.Conn          // i/o connection if TCP was used
    +	udpSession     *SessionUDP       // oob data to get egress interface right
    +	writer         Writer            // writer to output the raw DNS bits
    +}
    +
    +// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
    +func HandleFailed(w ResponseWriter, r *Msg) {
    +	m := new(Msg)
    +	m.SetRcode(r, RcodeServerFailure)
    +	// does not matter if this write fails
    +	w.WriteMsg(m)
    +}
    +
    +// ListenAndServe Starts a server on address and network specified Invoke handler
    +// for incoming queries.
    +func ListenAndServe(addr string, network string, handler Handler) error {
    +	server := &Server{Addr: addr, Net: network, Handler: handler}
    +	return server.ListenAndServe()
    +}
    +
    +// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
    +// http://golang.org/pkg/net/http/#ListenAndServeTLS
    +func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
    +	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
    +	if err != nil {
    +		return err
    +	}
    +
    +	config := tls.Config{
    +		Certificates: []tls.Certificate{cert},
    +	}
    +
    +	server := &Server{
    +		Addr:      addr,
    +		Net:       "tcp-tls",
    +		TLSConfig: &config,
    +		Handler:   handler,
    +	}
    +
    +	return server.ListenAndServe()
    +}
    +
    +// ActivateAndServe activates a server with a listener from systemd,
    +// l and p should not both be non-nil.
    +// If both l and p are not nil only p will be used.
    +// Invoke handler for incoming queries.
    +func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
    +	server := &Server{Listener: l, PacketConn: p, Handler: handler}
    +	return server.ActivateAndServe()
    +}
    +
    +// Writer writes raw DNS messages; each call to Write should send an entire message.
    +type Writer interface {
    +	io.Writer
    +}
    +
    +// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
    +type Reader interface {
    +	// ReadTCP reads a raw message from a TCP connection. Implementations may alter
    +	// connection properties, for example the read-deadline.
    +	ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
    +	// ReadUDP reads a raw message from a UDP connection. Implementations may alter
    +	// connection properties, for example the read-deadline.
    +	ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
    +}
    +
    +// defaultReader is an adapter for the Server struct that implements the Reader interface
    +// using the readTCP and readUDP func of the embedded Server.
    +type defaultReader struct {
    +	*Server
    +}
    +
    +func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
    +	return dr.readTCP(conn, timeout)
    +}
    +
    +func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
    +	return dr.readUDP(conn, timeout)
    +}
    +
    +// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
    +// Implementations should never return a nil Reader.
    +type DecorateReader func(Reader) Reader
    +
    +// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
    +// Implementations should never return a nil Writer.
    +type DecorateWriter func(Writer) Writer
    +
    +// A Server defines parameters for running an DNS server.
    +type Server struct {
    +	// Address to listen on, ":dns" if empty.
    +	Addr string
    +	// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
    +	Net string
    +	// TCP Listener to use, this is to aid in systemd's socket activation.
    +	Listener net.Listener
    +	// TLS connection configuration
    +	TLSConfig *tls.Config
    +	// UDP "Listener" to use, this is to aid in systemd's socket activation.
    +	PacketConn net.PacketConn
    +	// Handler to invoke, dns.DefaultServeMux if nil.
    +	Handler Handler
    +	// Default buffer size to use to read incoming UDP messages. If not set
    +	// it defaults to MinMsgSize (512 B).
    +	UDPSize int
    +	// The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
    +	ReadTimeout time.Duration
    +	// The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
    +	WriteTimeout time.Duration
    +	// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
    +	IdleTimeout func() time.Duration
    +	// Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
    +	TsigSecret map[string]string
    +	// If NotifyStartedFunc is set it is called once the server has started listening.
    +	NotifyStartedFunc func()
    +	// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
    +	DecorateReader DecorateReader
    +	// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
    +	DecorateWriter DecorateWriter
    +	// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
    +	MaxTCPQueries int
    +	// Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
    +	// It is only supported on go1.11+ and when using ListenAndServe.
    +	ReusePort bool
    +	// AcceptMsgFunc will check the incoming message and will reject it early in the process.
    +	// By default DefaultMsgAcceptFunc will be used.
    +	MsgAcceptFunc MsgAcceptFunc
    +
    +	// Shutdown handling
    +	lock     sync.RWMutex
    +	started  bool
    +	shutdown chan struct{}
    +	conns    map[net.Conn]struct{}
    +
    +	// A pool for UDP message buffers.
    +	udpPool sync.Pool
    +}
    +
    +func (srv *Server) isStarted() bool {
    +	srv.lock.RLock()
    +	started := srv.started
    +	srv.lock.RUnlock()
    +	return started
    +}
    +
    +func makeUDPBuffer(size int) func() interface{} {
    +	return func() interface{} {
    +		return make([]byte, size)
    +	}
    +}
    +
    +func (srv *Server) init() {
    +	srv.shutdown = make(chan struct{})
    +	srv.conns = make(map[net.Conn]struct{})
    +
    +	if srv.UDPSize == 0 {
    +		srv.UDPSize = MinMsgSize
    +	}
    +	if srv.MsgAcceptFunc == nil {
    +		srv.MsgAcceptFunc = DefaultMsgAcceptFunc
    +	}
    +	if srv.Handler == nil {
    +		srv.Handler = DefaultServeMux
    +	}
    +
    +	srv.udpPool.New = makeUDPBuffer(srv.UDPSize)
    +}
    +
    +func unlockOnce(l sync.Locker) func() {
    +	var once sync.Once
    +	return func() { once.Do(l.Unlock) }
    +}
    +
    +// ListenAndServe starts a nameserver on the configured address in *Server.
    +func (srv *Server) ListenAndServe() error {
    +	unlock := unlockOnce(&srv.lock)
    +	srv.lock.Lock()
    +	defer unlock()
    +
    +	if srv.started {
    +		return &Error{err: "server already started"}
    +	}
    +
    +	addr := srv.Addr
    +	if addr == "" {
    +		addr = ":domain"
    +	}
    +
    +	srv.init()
    +
    +	switch srv.Net {
    +	case "tcp", "tcp4", "tcp6":
    +		l, err := listenTCP(srv.Net, addr, srv.ReusePort)
    +		if err != nil {
    +			return err
    +		}
    +		srv.Listener = l
    +		srv.started = true
    +		unlock()
    +		return srv.serveTCP(l)
    +	case "tcp-tls", "tcp4-tls", "tcp6-tls":
    +		if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
    +			return errors.New("dns: neither Certificates nor GetCertificate set in Config")
    +		}
    +		network := strings.TrimSuffix(srv.Net, "-tls")
    +		l, err := listenTCP(network, addr, srv.ReusePort)
    +		if err != nil {
    +			return err
    +		}
    +		l = tls.NewListener(l, srv.TLSConfig)
    +		srv.Listener = l
    +		srv.started = true
    +		unlock()
    +		return srv.serveTCP(l)
    +	case "udp", "udp4", "udp6":
    +		l, err := listenUDP(srv.Net, addr, srv.ReusePort)
    +		if err != nil {
    +			return err
    +		}
    +		u := l.(*net.UDPConn)
    +		if e := setUDPSocketOptions(u); e != nil {
    +			return e
    +		}
    +		srv.PacketConn = l
    +		srv.started = true
    +		unlock()
    +		return srv.serveUDP(u)
    +	}
    +	return &Error{err: "bad network"}
    +}
    +
    +// ActivateAndServe starts a nameserver with the PacketConn or Listener
    +// configured in *Server. Its main use is to start a server from systemd.
    +func (srv *Server) ActivateAndServe() error {
    +	unlock := unlockOnce(&srv.lock)
    +	srv.lock.Lock()
    +	defer unlock()
    +
    +	if srv.started {
    +		return &Error{err: "server already started"}
    +	}
    +
    +	srv.init()
    +
    +	pConn := srv.PacketConn
    +	l := srv.Listener
    +	if pConn != nil {
    +		// Check PacketConn interface's type is valid and value
    +		// is not nil
    +		if t, ok := pConn.(*net.UDPConn); ok && t != nil {
    +			if e := setUDPSocketOptions(t); e != nil {
    +				return e
    +			}
    +			srv.started = true
    +			unlock()
    +			return srv.serveUDP(t)
    +		}
    +	}
    +	if l != nil {
    +		srv.started = true
    +		unlock()
    +		return srv.serveTCP(l)
    +	}
    +	return &Error{err: "bad listeners"}
    +}
    +
    +// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
    +// ActivateAndServe will return.
    +func (srv *Server) Shutdown() error {
    +	return srv.ShutdownContext(context.Background())
    +}
    +
    +// ShutdownContext shuts down a server. After a call to ShutdownContext,
    +// ListenAndServe and ActivateAndServe will return.
    +//
    +// A context.Context may be passed to limit how long to wait for connections
    +// to terminate.
    +func (srv *Server) ShutdownContext(ctx context.Context) error {
    +	srv.lock.Lock()
    +	if !srv.started {
    +		srv.lock.Unlock()
    +		return &Error{err: "server not started"}
    +	}
    +
    +	srv.started = false
    +
    +	if srv.PacketConn != nil {
    +		srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads
    +	}
    +
    +	if srv.Listener != nil {
    +		srv.Listener.Close()
    +	}
    +
    +	for rw := range srv.conns {
    +		rw.SetReadDeadline(aLongTimeAgo) // Unblock reads
    +	}
    +
    +	srv.lock.Unlock()
    +
    +	if testShutdownNotify != nil {
    +		testShutdownNotify.Broadcast()
    +	}
    +
    +	var ctxErr error
    +	select {
    +	case <-srv.shutdown:
    +	case <-ctx.Done():
    +		ctxErr = ctx.Err()
    +	}
    +
    +	if srv.PacketConn != nil {
    +		srv.PacketConn.Close()
    +	}
    +
    +	return ctxErr
    +}
    +
    +var testShutdownNotify *sync.Cond
    +
    +// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
    +func (srv *Server) getReadTimeout() time.Duration {
    +	if srv.ReadTimeout != 0 {
    +		return srv.ReadTimeout
    +	}
    +	return dnsTimeout
    +}
    +
    +// serveTCP starts a TCP listener for the server.
    +func (srv *Server) serveTCP(l net.Listener) error {
    +	defer l.Close()
    +
    +	if srv.NotifyStartedFunc != nil {
    +		srv.NotifyStartedFunc()
    +	}
    +
    +	var wg sync.WaitGroup
    +	defer func() {
    +		wg.Wait()
    +		close(srv.shutdown)
    +	}()
    +
    +	for srv.isStarted() {
    +		rw, err := l.Accept()
    +		if err != nil {
    +			if !srv.isStarted() {
    +				return nil
    +			}
    +			if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
    +				continue
    +			}
    +			return err
    +		}
    +		srv.lock.Lock()
    +		// Track the connection to allow unblocking reads on shutdown.
    +		srv.conns[rw] = struct{}{}
    +		srv.lock.Unlock()
    +		wg.Add(1)
    +		go srv.serveTCPConn(&wg, rw)
    +	}
    +
    +	return nil
    +}
    +
    +// serveUDP starts a UDP listener for the server.
    +func (srv *Server) serveUDP(l *net.UDPConn) error {
    +	defer l.Close()
    +
    +	if srv.NotifyStartedFunc != nil {
    +		srv.NotifyStartedFunc()
    +	}
    +
    +	reader := Reader(defaultReader{srv})
    +	if srv.DecorateReader != nil {
    +		reader = srv.DecorateReader(reader)
    +	}
    +
    +	var wg sync.WaitGroup
    +	defer func() {
    +		wg.Wait()
    +		close(srv.shutdown)
    +	}()
    +
    +	rtimeout := srv.getReadTimeout()
    +	// deadline is not used here
    +	for srv.isStarted() {
    +		m, s, err := reader.ReadUDP(l, rtimeout)
    +		if err != nil {
    +			if !srv.isStarted() {
    +				return nil
    +			}
    +			if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
    +				continue
    +			}
    +			return err
    +		}
    +		if len(m) < headerSize {
    +			if cap(m) == srv.UDPSize {
    +				srv.udpPool.Put(m[:srv.UDPSize])
    +			}
    +			continue
    +		}
    +		wg.Add(1)
    +		go srv.serveUDPPacket(&wg, m, l, s)
    +	}
    +
    +	return nil
    +}
    +
    +// Serve a new TCP connection.
    +func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
    +	w := &response{tsigSecret: srv.TsigSecret, tcp: rw}
    +	if srv.DecorateWriter != nil {
    +		w.writer = srv.DecorateWriter(w)
    +	} else {
    +		w.writer = w
    +	}
    +
    +	reader := Reader(defaultReader{srv})
    +	if srv.DecorateReader != nil {
    +		reader = srv.DecorateReader(reader)
    +	}
    +
    +	idleTimeout := tcpIdleTimeout
    +	if srv.IdleTimeout != nil {
    +		idleTimeout = srv.IdleTimeout()
    +	}
    +
    +	timeout := srv.getReadTimeout()
    +
    +	limit := srv.MaxTCPQueries
    +	if limit == 0 {
    +		limit = maxTCPQueries
    +	}
    +
    +	for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ {
    +		m, err := reader.ReadTCP(w.tcp, timeout)
    +		if err != nil {
    +			// TODO(tmthrgd): handle error
    +			break
    +		}
    +		srv.serveDNS(m, w)
    +		if w.closed {
    +			break // Close() was called
    +		}
    +		if w.hijacked {
    +			break // client will call Close() themselves
    +		}
    +		// The first read uses the read timeout, the rest use the
    +		// idle timeout.
    +		timeout = idleTimeout
    +	}
    +
    +	if !w.hijacked {
    +		w.Close()
    +	}
    +
    +	srv.lock.Lock()
    +	delete(srv.conns, w.tcp)
    +	srv.lock.Unlock()
    +
    +	wg.Done()
    +}
    +
    +// Serve a new UDP request.
    +func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) {
    +	w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s}
    +	if srv.DecorateWriter != nil {
    +		w.writer = srv.DecorateWriter(w)
    +	} else {
    +		w.writer = w
    +	}
    +
    +	srv.serveDNS(m, w)
    +	wg.Done()
    +}
    +
    +func (srv *Server) serveDNS(m []byte, w *response) {
    +	dh, off, err := unpackMsgHdr(m, 0)
    +	if err != nil {
    +		// Let client hang, they are sending crap; any reply can be used to amplify.
    +		return
    +	}
    +
    +	req := new(Msg)
    +	req.setHdr(dh)
    +
    +	switch action := srv.MsgAcceptFunc(dh); action {
    +	case MsgAccept:
    +		if req.unpack(dh, m, off) == nil {
    +			break
    +		}
    +
    +		fallthrough
    +	case MsgReject, MsgRejectNotImplemented:
    +		opcode := req.Opcode
    +		req.SetRcodeFormatError(req)
    +		req.Zero = false
    +		if action == MsgRejectNotImplemented {
    +			req.Opcode = opcode
    +			req.Rcode = RcodeNotImplemented
    +		}
    +
    +		// Are we allowed to delete any OPT records here?
    +		req.Ns, req.Answer, req.Extra = nil, nil, nil
    +
    +		w.WriteMsg(req)
    +		fallthrough
    +	case MsgIgnore:
    +		if w.udp != nil && cap(m) == srv.UDPSize {
    +			srv.udpPool.Put(m[:srv.UDPSize])
    +		}
    +
    +		return
    +	}
    +
    +	w.tsigStatus = nil
    +	if w.tsigSecret != nil {
    +		if t := req.IsTsig(); t != nil {
    +			if secret, ok := w.tsigSecret[t.Hdr.Name]; ok {
    +				w.tsigStatus = TsigVerify(m, secret, "", false)
    +			} else {
    +				w.tsigStatus = ErrSecret
    +			}
    +			w.tsigTimersOnly = false
    +			w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
    +		}
    +	}
    +
    +	if w.udp != nil && cap(m) == srv.UDPSize {
    +		srv.udpPool.Put(m[:srv.UDPSize])
    +	}
    +
    +	srv.Handler.ServeDNS(w, req) // Writes back to the client
    +}
    +
    +func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
    +	// If we race with ShutdownContext, the read deadline may
    +	// have been set in the distant past to unblock the read
    +	// below. We must not override it, otherwise we may block
    +	// ShutdownContext.
    +	srv.lock.RLock()
    +	if srv.started {
    +		conn.SetReadDeadline(time.Now().Add(timeout))
    +	}
    +	srv.lock.RUnlock()
    +
    +	var length uint16
    +	if err := binary.Read(conn, binary.BigEndian, &length); err != nil {
    +		return nil, err
    +	}
    +
    +	m := make([]byte, length)
    +	if _, err := io.ReadFull(conn, m); err != nil {
    +		return nil, err
    +	}
    +
    +	return m, nil
    +}
    +
    +func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
    +	srv.lock.RLock()
    +	if srv.started {
    +		// See the comment in readTCP above.
    +		conn.SetReadDeadline(time.Now().Add(timeout))
    +	}
    +	srv.lock.RUnlock()
    +
    +	m := srv.udpPool.Get().([]byte)
    +	n, s, err := ReadFromSessionUDP(conn, m)
    +	if err != nil {
    +		srv.udpPool.Put(m)
    +		return nil, nil, err
    +	}
    +	m = m[:n]
    +	return m, s, nil
    +}
    +
    +// WriteMsg implements the ResponseWriter.WriteMsg method.
    +func (w *response) WriteMsg(m *Msg) (err error) {
    +	if w.closed {
    +		return &Error{err: "WriteMsg called after Close"}
    +	}
    +
    +	var data []byte
    +	if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
    +		if t := m.IsTsig(); t != nil {
    +			data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
    +			if err != nil {
    +				return err
    +			}
    +			_, err = w.writer.Write(data)
    +			return err
    +		}
    +	}
    +	data, err = m.Pack()
    +	if err != nil {
    +		return err
    +	}
    +	_, err = w.writer.Write(data)
    +	return err
    +}
    +
    +// Write implements the ResponseWriter.Write method.
    +func (w *response) Write(m []byte) (int, error) {
    +	if w.closed {
    +		return 0, &Error{err: "Write called after Close"}
    +	}
    +
    +	switch {
    +	case w.udp != nil:
    +		return WriteToSessionUDP(w.udp, m, w.udpSession)
    +	case w.tcp != nil:
    +		if len(m) > MaxMsgSize {
    +			return 0, &Error{err: "message too large"}
    +		}
    +
    +		l := make([]byte, 2)
    +		binary.BigEndian.PutUint16(l, uint16(len(m)))
    +
    +		n, err := (&net.Buffers{l, m}).WriteTo(w.tcp)
    +		return int(n), err
    +	default:
    +		panic("dns: internal error: udp and tcp both nil")
    +	}
    +}
    +
    +// LocalAddr implements the ResponseWriter.LocalAddr method.
    +func (w *response) LocalAddr() net.Addr {
    +	switch {
    +	case w.udp != nil:
    +		return w.udp.LocalAddr()
    +	case w.tcp != nil:
    +		return w.tcp.LocalAddr()
    +	default:
    +		panic("dns: internal error: udp and tcp both nil")
    +	}
    +}
    +
    +// RemoteAddr implements the ResponseWriter.RemoteAddr method.
    +func (w *response) RemoteAddr() net.Addr {
    +	switch {
    +	case w.udpSession != nil:
    +		return w.udpSession.RemoteAddr()
    +	case w.tcp != nil:
    +		return w.tcp.RemoteAddr()
    +	default:
    +		panic("dns: internal error: udpSession and tcp both nil")
    +	}
    +}
    +
    +// TsigStatus implements the ResponseWriter.TsigStatus method.
    +func (w *response) TsigStatus() error { return w.tsigStatus }
    +
    +// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
    +func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
    +
    +// Hijack implements the ResponseWriter.Hijack method.
    +func (w *response) Hijack() { w.hijacked = true }
    +
    +// Close implements the ResponseWriter.Close method
    +func (w *response) Close() error {
    +	if w.closed {
    +		return &Error{err: "connection already closed"}
    +	}
    +	w.closed = true
    +
    +	switch {
    +	case w.udp != nil:
    +		// Can't close the udp conn, as that is actually the listener.
    +		return nil
    +	case w.tcp != nil:
    +		return w.tcp.Close()
    +	default:
    +		panic("dns: internal error: udp and tcp both nil")
    +	}
    +}
    +
    +// ConnectionState() implements the ConnectionStater.ConnectionState() interface.
    +func (w *response) ConnectionState() *tls.ConnectionState {
    +	type tlsConnectionStater interface {
    +		ConnectionState() tls.ConnectionState
    +	}
    +	if v, ok := w.tcp.(tlsConnectionStater); ok {
    +		t := v.ConnectionState()
    +		return &t
    +	}
    +	return nil
    +}
    diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go
    new file mode 100644
    index 000000000000..55cf1c3863ac
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/sig0.go
    @@ -0,0 +1,209 @@
    +package dns
    +
    +import (
    +	"crypto"
    +	"crypto/dsa"
    +	"crypto/ecdsa"
    +	"crypto/rsa"
    +	"encoding/binary"
    +	"math/big"
    +	"strings"
    +	"time"
    +)
    +
    +// Sign signs a dns.Msg. It fills the signature with the appropriate data.
    +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
    +// and Expiration set.
    +func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
    +	if k == nil {
    +		return nil, ErrPrivKey
    +	}
    +	if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
    +		return nil, ErrKey
    +	}
    +
    +	rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0}
    +	rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0
    +
    +	buf := make([]byte, m.Len()+Len(rr))
    +	mbuf, err := m.PackBuffer(buf)
    +	if err != nil {
    +		return nil, err
    +	}
    +	if &buf[0] != &mbuf[0] {
    +		return nil, ErrBuf
    +	}
    +	off, err := PackRR(rr, buf, len(mbuf), nil, false)
    +	if err != nil {
    +		return nil, err
    +	}
    +	buf = buf[:off:cap(buf)]
    +
    +	hash, ok := AlgorithmToHash[rr.Algorithm]
    +	if !ok {
    +		return nil, ErrAlg
    +	}
    +
    +	hasher := hash.New()
    +	// Write SIG rdata
    +	hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
    +	// Write message
    +	hasher.Write(buf[:len(mbuf)])
    +
    +	signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	rr.Signature = toBase64(signature)
    +
    +	buf = append(buf, signature...)
    +	if len(buf) > int(^uint16(0)) {
    +		return nil, ErrBuf
    +	}
    +	// Adjust sig data length
    +	rdoff := len(mbuf) + 1 + 2 + 2 + 4
    +	rdlen := binary.BigEndian.Uint16(buf[rdoff:])
    +	rdlen += uint16(len(signature))
    +	binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
    +	// Adjust additional count
    +	adc := binary.BigEndian.Uint16(buf[10:])
    +	adc++
    +	binary.BigEndian.PutUint16(buf[10:], adc)
    +	return buf, nil
    +}
    +
    +// Verify validates the message buf using the key k.
    +// It's assumed that buf is a valid message from which rr was unpacked.
    +func (rr *SIG) Verify(k *KEY, buf []byte) error {
    +	if k == nil {
    +		return ErrKey
    +	}
    +	if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
    +		return ErrKey
    +	}
    +
    +	var hash crypto.Hash
    +	switch rr.Algorithm {
    +	case DSA, RSASHA1:
    +		hash = crypto.SHA1
    +	case RSASHA256, ECDSAP256SHA256:
    +		hash = crypto.SHA256
    +	case ECDSAP384SHA384:
    +		hash = crypto.SHA384
    +	case RSASHA512:
    +		hash = crypto.SHA512
    +	default:
    +		return ErrAlg
    +	}
    +	hasher := hash.New()
    +
    +	buflen := len(buf)
    +	qdc := binary.BigEndian.Uint16(buf[4:])
    +	anc := binary.BigEndian.Uint16(buf[6:])
    +	auc := binary.BigEndian.Uint16(buf[8:])
    +	adc := binary.BigEndian.Uint16(buf[10:])
    +	offset := headerSize
    +	var err error
    +	for i := uint16(0); i < qdc && offset < buflen; i++ {
    +		_, offset, err = UnpackDomainName(buf, offset)
    +		if err != nil {
    +			return err
    +		}
    +		// Skip past Type and Class
    +		offset += 2 + 2
    +	}
    +	for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
    +		_, offset, err = UnpackDomainName(buf, offset)
    +		if err != nil {
    +			return err
    +		}
    +		// Skip past Type, Class and TTL
    +		offset += 2 + 2 + 4
    +		if offset+1 >= buflen {
    +			continue
    +		}
    +		rdlen := binary.BigEndian.Uint16(buf[offset:])
    +		offset += 2
    +		offset += int(rdlen)
    +	}
    +	if offset >= buflen {
    +		return &Error{err: "overflowing unpacking signed message"}
    +	}
    +
    +	// offset should be just prior to SIG
    +	bodyend := offset
    +	// owner name SHOULD be root
    +	_, offset, err = UnpackDomainName(buf, offset)
    +	if err != nil {
    +		return err
    +	}
    +	// Skip Type, Class, TTL, RDLen
    +	offset += 2 + 2 + 4 + 2
    +	sigstart := offset
    +	// Skip Type Covered, Algorithm, Labels, Original TTL
    +	offset += 2 + 1 + 1 + 4
    +	if offset+4+4 >= buflen {
    +		return &Error{err: "overflow unpacking signed message"}
    +	}
    +	expire := binary.BigEndian.Uint32(buf[offset:])
    +	offset += 4
    +	incept := binary.BigEndian.Uint32(buf[offset:])
    +	offset += 4
    +	now := uint32(time.Now().Unix())
    +	if now < incept || now > expire {
    +		return ErrTime
    +	}
    +	// Skip key tag
    +	offset += 2
    +	var signername string
    +	signername, offset, err = UnpackDomainName(buf, offset)
    +	if err != nil {
    +		return err
    +	}
    +	// If key has come from the DNS name compression might
    +	// have mangled the case of the name
    +	if !strings.EqualFold(signername, k.Header().Name) {
    +		return &Error{err: "signer name doesn't match key name"}
    +	}
    +	sigend := offset
    +	hasher.Write(buf[sigstart:sigend])
    +	hasher.Write(buf[:10])
    +	hasher.Write([]byte{
    +		byte((adc - 1) << 8),
    +		byte(adc - 1),
    +	})
    +	hasher.Write(buf[12:bodyend])
    +
    +	hashed := hasher.Sum(nil)
    +	sig := buf[sigend:]
    +	switch k.Algorithm {
    +	case DSA:
    +		pk := k.publicKeyDSA()
    +		sig = sig[1:]
    +		r := new(big.Int).SetBytes(sig[:len(sig)/2])
    +		s := new(big.Int).SetBytes(sig[len(sig)/2:])
    +		if pk != nil {
    +			if dsa.Verify(pk, hashed, r, s) {
    +				return nil
    +			}
    +			return ErrSig
    +		}
    +	case RSASHA1, RSASHA256, RSASHA512:
    +		pk := k.publicKeyRSA()
    +		if pk != nil {
    +			return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
    +		}
    +	case ECDSAP256SHA256, ECDSAP384SHA384:
    +		pk := k.publicKeyECDSA()
    +		r := new(big.Int).SetBytes(sig[:len(sig)/2])
    +		s := new(big.Int).SetBytes(sig[len(sig)/2:])
    +		if pk != nil {
    +			if ecdsa.Verify(pk, hashed, r, s) {
    +				return nil
    +			}
    +			return ErrSig
    +		}
    +	}
    +	return ErrKeyAlg
    +}
    diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go
    new file mode 100644
    index 000000000000..febcc300fe17
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/singleinflight.go
    @@ -0,0 +1,61 @@
    +// Copyright 2013 The Go Authors.  All rights reserved.
    +// Use of this source code is governed by a BSD-style
    +// license that can be found in the LICENSE file.
    +
    +// Adapted for dns package usage by Miek Gieben.
    +
    +package dns
    +
    +import "sync"
    +import "time"
    +
    +// call is an in-flight or completed singleflight.Do call
    +type call struct {
    +	wg   sync.WaitGroup
    +	val  *Msg
    +	rtt  time.Duration
    +	err  error
    +	dups int
    +}
    +
    +// singleflight represents a class of work and forms a namespace in
    +// which units of work can be executed with duplicate suppression.
    +type singleflight struct {
    +	sync.Mutex                  // protects m
    +	m          map[string]*call // lazily initialized
    +
    +	dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
    +}
    +
    +// Do executes and returns the results of the given function, making
    +// sure that only one execution is in-flight for a given key at a
    +// time. If a duplicate comes in, the duplicate caller waits for the
    +// original to complete and receives the same results.
    +// The return value shared indicates whether v was given to multiple callers.
    +func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
    +	g.Lock()
    +	if g.m == nil {
    +		g.m = make(map[string]*call)
    +	}
    +	if c, ok := g.m[key]; ok {
    +		c.dups++
    +		g.Unlock()
    +		c.wg.Wait()
    +		return c.val, c.rtt, c.err, true
    +	}
    +	c := new(call)
    +	c.wg.Add(1)
    +	g.m[key] = c
    +	g.Unlock()
    +
    +	c.val, c.rtt, c.err = fn()
    +	c.wg.Done()
    +
    +	if !g.dontDeleteForTesting {
    +		g.Lock()
    +		delete(g.m, key)
    +		g.Unlock()
    +	}
    +
    +	return c.val, c.rtt, c.err, c.dups > 0
    +}
    diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go
    new file mode 100644
    index 000000000000..89f09f0d10c1
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/smimea.go
    @@ -0,0 +1,44 @@
    +package dns
    +
    +import (
    +	"crypto/sha256"
    +	"crypto/x509"
    +	"encoding/hex"
    +)
    +
    +// Sign creates a SMIMEA record from an SSL certificate.
    +func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
    +	r.Hdr.Rrtype = TypeSMIMEA
    +	r.Usage = uint8(usage)
    +	r.Selector = uint8(selector)
    +	r.MatchingType = uint8(matchingType)
    +
    +	r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
    +	return err
    +}
    +
    +// Verify verifies a SMIMEA record against an SSL certificate. If it is OK
    +// a nil error is returned.
    +func (r *SMIMEA) Verify(cert *x509.Certificate) error {
    +	c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
    +	if err != nil {
    +		return err // Not also ErrSig?
    +	}
    +	if r.Certificate == c {
    +		return nil
    +	}
    +	return ErrSig // ErrSig, really?
    +}
    +
    +// SMIMEAName returns the ownername of a SMIMEA resource record as per the
    +// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
    +func SMIMEAName(email, domain string) (string, error) {
    +	hasher := sha256.New()
    +	hasher.Write([]byte(email))
    +
    +	// RFC Section 3: "The local-part is hashed using the SHA2-256
    +	// algorithm with the hash truncated to 28 octets and
    +	// represented in its hexadecimal representation to become the
    +	// left-most label in the prepared domain name"
    +	return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
    +}
    diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go
    new file mode 100644
    index 000000000000..4e07983b9787
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/tlsa.go
    @@ -0,0 +1,44 @@
    +package dns
    +
    +import (
    +	"crypto/x509"
    +	"net"
    +	"strconv"
    +)
    +
    +// Sign creates a TLSA record from an SSL certificate.
    +func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
    +	r.Hdr.Rrtype = TypeTLSA
    +	r.Usage = uint8(usage)
    +	r.Selector = uint8(selector)
    +	r.MatchingType = uint8(matchingType)
    +
    +	r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
    +	return err
    +}
    +
    +// Verify verifies a TLSA record against an SSL certificate. If it is OK
    +// a nil error is returned.
    +func (r *TLSA) Verify(cert *x509.Certificate) error {
    +	c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
    +	if err != nil {
    +		return err // Not also ErrSig?
    +	}
    +	if r.Certificate == c {
    +		return nil
    +	}
    +	return ErrSig // ErrSig, really?
    +}
    +
    +// TLSAName returns the ownername of a TLSA resource record as per the
    +// rules specified in RFC 6698, Section 3.
    +func TLSAName(name, service, network string) (string, error) {
    +	if !IsFqdn(name) {
    +		return "", ErrFqdn
    +	}
    +	p, err := net.LookupPort(network, service)
    +	if err != nil {
    +		return "", err
    +	}
    +	return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
    +}
    diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go
    new file mode 100644
    index 000000000000..2c4ef03bac36
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/tsig.go
    @@ -0,0 +1,389 @@
    +package dns
    +
    +import (
    +	"crypto/hmac"
    +	"crypto/md5"
    +	"crypto/sha1"
    +	"crypto/sha256"
    +	"crypto/sha512"
    +	"encoding/binary"
    +	"encoding/hex"
    +	"hash"
    +	"strconv"
    +	"strings"
    +	"time"
    +)
    +
    +// HMAC hashing codes. These are transmitted as domain names.
    +const (
    +	HmacMD5    = "hmac-md5.sig-alg.reg.int."
    +	HmacSHA1   = "hmac-sha1."
    +	HmacSHA256 = "hmac-sha256."
    +	HmacSHA512 = "hmac-sha512."
    +)
    +
    +// TSIG is the RR the holds the transaction signature of a message.
    +// See RFC 2845 and RFC 4635.
    +type TSIG struct {
    +	Hdr        RR_Header
    +	Algorithm  string `dns:"domain-name"`
    +	TimeSigned uint64 `dns:"uint48"`
    +	Fudge      uint16
    +	MACSize    uint16
    +	MAC        string `dns:"size-hex:MACSize"`
    +	OrigId     uint16
    +	Error      uint16
    +	OtherLen   uint16
    +	OtherData  string `dns:"size-hex:OtherLen"`
    +}
    +
    +// TSIG has no official presentation format, but this will suffice.
    +
    +func (rr *TSIG) String() string {
    +	s := "\n;; TSIG PSEUDOSECTION:\n"
    +	s += rr.Hdr.String() +
    +		" " + rr.Algorithm +
    +		" " + tsigTimeToString(rr.TimeSigned) +
    +		" " + strconv.Itoa(int(rr.Fudge)) +
    +		" " + strconv.Itoa(int(rr.MACSize)) +
    +		" " + strings.ToUpper(rr.MAC) +
    +		" " + strconv.Itoa(int(rr.OrigId)) +
    +		" " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR
    +		" " + strconv.Itoa(int(rr.OtherLen)) +
    +		" " + rr.OtherData
    +	return s
    +}
    +
    +func (rr *TSIG) parse(c *zlexer, origin string) *ParseError {
    +	panic("dns: internal error: parse should never be called on TSIG")
    +}
    +
    +// The following values must be put in wireformat, so that the MAC can be calculated.
    +// RFC 2845, section 3.4.2. TSIG Variables.
    +type tsigWireFmt struct {
    +	// From RR_Header
    +	Name  string `dns:"domain-name"`
    +	Class uint16
    +	Ttl   uint32
    +	// Rdata of the TSIG
    +	Algorithm  string `dns:"domain-name"`
    +	TimeSigned uint64 `dns:"uint48"`
    +	Fudge      uint16
    +	// MACSize, MAC and OrigId excluded
    +	Error     uint16
    +	OtherLen  uint16
    +	OtherData string `dns:"size-hex:OtherLen"`
    +}
    +
    +// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC
    +type macWireFmt struct {
    +	MACSize uint16
    +	MAC     string `dns:"size-hex:MACSize"`
    +}
    +
    +// 3.3. Time values used in TSIG calculations
    +type timerWireFmt struct {
    +	TimeSigned uint64 `dns:"uint48"`
    +	Fudge      uint16
    +}
    +
    +// TsigGenerate fills out the TSIG record attached to the message.
    +// The message should contain
    +// a "stub" TSIG RR with the algorithm, key name (owner name of the RR),
    +// time fudge (defaults to 300 seconds) and the current time
    +// The TSIG MAC is saved in that Tsig RR.
    +// When TsigGenerate is called for the first time requestMAC is set to the empty string and
    +// timersOnly is false.
    +// If something goes wrong an error is returned, otherwise it is nil.
    +func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
    +	if m.IsTsig() == nil {
    +		panic("dns: TSIG not last RR in additional")
    +	}
    +	// If we barf here, the caller is to blame
    +	rawsecret, err := fromBase64([]byte(secret))
    +	if err != nil {
    +		return nil, "", err
    +	}
    +
    +	rr := m.Extra[len(m.Extra)-1].(*TSIG)
    +	m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
    +	mbuf, err := m.Pack()
    +	if err != nil {
    +		return nil, "", err
    +	}
    +	buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
    +
    +	t := new(TSIG)
    +	var h hash.Hash
    +	switch strings.ToLower(rr.Algorithm) {
    +	case HmacMD5:
    +		h = hmac.New(md5.New, rawsecret)
    +	case HmacSHA1:
    +		h = hmac.New(sha1.New, rawsecret)
    +	case HmacSHA256:
    +		h = hmac.New(sha256.New, rawsecret)
    +	case HmacSHA512:
    +		h = hmac.New(sha512.New, rawsecret)
    +	default:
    +		return nil, "", ErrKeyAlg
    +	}
    +	h.Write(buf)
    +	t.MAC = hex.EncodeToString(h.Sum(nil))
    +	t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
    +
    +	t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
    +	t.Fudge = rr.Fudge
    +	t.TimeSigned = rr.TimeSigned
    +	t.Algorithm = rr.Algorithm
    +	t.OrigId = m.Id
    +
    +	tbuf := make([]byte, Len(t))
    +	off, err := PackRR(t, tbuf, 0, nil, false)
    +	if err != nil {
    +		return nil, "", err
    +	}
    +	mbuf = append(mbuf, tbuf[:off]...)
    +	// Update the ArCount directly in the buffer.
    +	binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))
    +
    +	return mbuf, t.MAC, nil
    +}
    +
    +// TsigVerify verifies the TSIG on a message.
    +// If the signature does not validate err contains the
    +// error, otherwise it is nil.
    +func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
    +	rawsecret, err := fromBase64([]byte(secret))
    +	if err != nil {
    +		return err
    +	}
    +	// Strip the TSIG from the incoming msg
    +	stripped, tsig, err := stripTsig(msg)
    +	if err != nil {
    +		return err
    +	}
    +
    +	msgMAC, err := hex.DecodeString(tsig.MAC)
    +	if err != nil {
    +		return err
    +	}
    +
    +	buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
    +
    +	// Fudge factor works both ways. A message can arrive before it was signed because
    +	// of clock skew.
    +	now := uint64(time.Now().Unix())
    +	ti := now - tsig.TimeSigned
    +	if now < tsig.TimeSigned {
    +		ti = tsig.TimeSigned - now
    +	}
    +	if uint64(tsig.Fudge) < ti {
    +		return ErrTime
    +	}
    +
    +	var h hash.Hash
    +	switch strings.ToLower(tsig.Algorithm) {
    +	case HmacMD5:
    +		h = hmac.New(md5.New, rawsecret)
    +	case HmacSHA1:
    +		h = hmac.New(sha1.New, rawsecret)
    +	case HmacSHA256:
    +		h = hmac.New(sha256.New, rawsecret)
    +	case HmacSHA512:
    +		h = hmac.New(sha512.New, rawsecret)
    +	default:
    +		return ErrKeyAlg
    +	}
    +	h.Write(buf)
    +	if !hmac.Equal(h.Sum(nil), msgMAC) {
    +		return ErrSig
    +	}
    +	return nil
    +}
    +
    +// Create a wiredata buffer for the MAC calculation.
    +func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
    +	var buf []byte
    +	if rr.TimeSigned == 0 {
    +		rr.TimeSigned = uint64(time.Now().Unix())
    +	}
    +	if rr.Fudge == 0 {
    +		rr.Fudge = 300 // Standard (RFC) default.
    +	}
    +
    +	// Replace message ID in header with original ID from TSIG
    +	binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId)
    +
    +	if requestMAC != "" {
    +		m := new(macWireFmt)
    +		m.MACSize = uint16(len(requestMAC) / 2)
    +		m.MAC = requestMAC
    +		buf = make([]byte, len(requestMAC)) // long enough
    +		n, _ := packMacWire(m, buf)
    +		buf = buf[:n]
    +	}
    +
    +	tsigvar := make([]byte, DefaultMsgSize)
    +	if timersOnly {
    +		tsig := new(timerWireFmt)
    +		tsig.TimeSigned = rr.TimeSigned
    +		tsig.Fudge = rr.Fudge
    +		n, _ := packTimerWire(tsig, tsigvar)
    +		tsigvar = tsigvar[:n]
    +	} else {
    +		tsig := new(tsigWireFmt)
    +		tsig.Name = strings.ToLower(rr.Hdr.Name)
    +		tsig.Class = ClassANY
    +		tsig.Ttl = rr.Hdr.Ttl
    +		tsig.Algorithm = strings.ToLower(rr.Algorithm)
    +		tsig.TimeSigned = rr.TimeSigned
    +		tsig.Fudge = rr.Fudge
    +		tsig.Error = rr.Error
    +		tsig.OtherLen = rr.OtherLen
    +		tsig.OtherData = rr.OtherData
    +		n, _ := packTsigWire(tsig, tsigvar)
    +		tsigvar = tsigvar[:n]
    +	}
    +
    +	if requestMAC != "" {
    +		x := append(buf, msgbuf...)
    +		buf = append(x, tsigvar...)
    +	} else {
    +		buf = append(msgbuf, tsigvar...)
    +	}
    +	return buf
    +}
    +
    +// Strip the TSIG from the raw message.
    +func stripTsig(msg []byte) ([]byte, *TSIG, error) {
    +	// Copied from msg.go's Unpack() Header, but modified.
    +	var (
    +		dh  Header
    +		err error
    +	)
    +	off, tsigoff := 0, 0
    +
    +	if dh, off, err = unpackMsgHdr(msg, off); err != nil {
    +		return nil, nil, err
    +	}
    +	if dh.Arcount == 0 {
    +		return nil, nil, ErrNoSig
    +	}
    +
    +	// Rcode, see msg.go Unpack()
    +	if int(dh.Bits&0xF) == RcodeNotAuth {
    +		return nil, nil, ErrAuth
    +	}
    +
    +	for i := 0; i < int(dh.Qdcount); i++ {
    +		_, off, err = unpackQuestion(msg, off)
    +		if err != nil {
    +			return nil, nil, err
    +		}
    +	}
    +
    +	_, off, err = unpackRRslice(int(dh.Ancount), msg, off)
    +	if err != nil {
    +		return nil, nil, err
    +	}
    +	_, off, err = unpackRRslice(int(dh.Nscount), msg, off)
    +	if err != nil {
    +		return nil, nil, err
    +	}
    +
    +	rr := new(TSIG)
    +	var extra RR
    +	for i := 0; i < int(dh.Arcount); i++ {
    +		tsigoff = off
    +		extra, off, err = UnpackRR(msg, off)
    +		if err != nil {
    +			return nil, nil, err
    +		}
    +		if extra.Header().Rrtype == TypeTSIG {
    +			rr = extra.(*TSIG)
    +			// Adjust Arcount.
    +			arcount := binary.BigEndian.Uint16(msg[10:])
    +			binary.BigEndian.PutUint16(msg[10:], arcount-1)
    +			break
    +		}
    +	}
    +	if rr == nil {
    +		return nil, nil, ErrNoSig
    +	}
    +	return msg[:tsigoff], rr, nil
    +}
    +
    +// Translate the TSIG time signed into a date. There is no
    +// need for RFC1982 calculations as this date is 48 bits.
    +func tsigTimeToString(t uint64) string {
    +	ti := time.Unix(int64(t), 0).UTC()
    +	return ti.Format("20060102150405")
    +}
    +
    +func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) {
    +	// copied from zmsg.go TSIG packing
    +	// RR_Header
    +	off, err := PackDomainName(tw.Name, msg, 0, nil, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(tw.Class, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(tw.Ttl, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +
    +	off, err = PackDomainName(tw.Algorithm, msg, off, nil, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint48(tw.TimeSigned, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(tw.Fudge, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +
    +	off, err = packUint16(tw.Error, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(tw.OtherLen, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(tw.OtherData, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func packMacWire(mw *macWireFmt, msg []byte) (int, error) {
    +	off, err := packUint16(mw.MACSize, msg, 0)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(mw.MAC, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) {
    +	off, err := packUint48(tw.TimeSigned, msg, 0)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(tw.Fudge, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
    new file mode 100644
    index 000000000000..6f79038b4a52
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/types.go
    @@ -0,0 +1,1413 @@
    +package dns
    +
    +import (
    +	"fmt"
    +	"net"
    +	"strconv"
    +	"strings"
    +	"time"
    +)
    +
    +type (
    +	// Type is a DNS type.
    +	Type uint16
    +	// Class is a DNS class.
    +	Class uint16
    +	// Name is a DNS domain name.
    +	Name string
    +)
    +
    +// Packet formats
    +
    +// Wire constants and supported types.
    +const (
    +	// valid RR_Header.Rrtype and Question.qtype
    +
    +	TypeNone       uint16 = 0
    +	TypeA          uint16 = 1
    +	TypeNS         uint16 = 2
    +	TypeMD         uint16 = 3
    +	TypeMF         uint16 = 4
    +	TypeCNAME      uint16 = 5
    +	TypeSOA        uint16 = 6
    +	TypeMB         uint16 = 7
    +	TypeMG         uint16 = 8
    +	TypeMR         uint16 = 9
    +	TypeNULL       uint16 = 10
    +	TypePTR        uint16 = 12
    +	TypeHINFO      uint16 = 13
    +	TypeMINFO      uint16 = 14
    +	TypeMX         uint16 = 15
    +	TypeTXT        uint16 = 16
    +	TypeRP         uint16 = 17
    +	TypeAFSDB      uint16 = 18
    +	TypeX25        uint16 = 19
    +	TypeISDN       uint16 = 20
    +	TypeRT         uint16 = 21
    +	TypeNSAPPTR    uint16 = 23
    +	TypeSIG        uint16 = 24
    +	TypeKEY        uint16 = 25
    +	TypePX         uint16 = 26
    +	TypeGPOS       uint16 = 27
    +	TypeAAAA       uint16 = 28
    +	TypeLOC        uint16 = 29
    +	TypeNXT        uint16 = 30
    +	TypeEID        uint16 = 31
    +	TypeNIMLOC     uint16 = 32
    +	TypeSRV        uint16 = 33
    +	TypeATMA       uint16 = 34
    +	TypeNAPTR      uint16 = 35
    +	TypeKX         uint16 = 36
    +	TypeCERT       uint16 = 37
    +	TypeDNAME      uint16 = 39
    +	TypeOPT        uint16 = 41 // EDNS
    +	TypeDS         uint16 = 43
    +	TypeSSHFP      uint16 = 44
    +	TypeRRSIG      uint16 = 46
    +	TypeNSEC       uint16 = 47
    +	TypeDNSKEY     uint16 = 48
    +	TypeDHCID      uint16 = 49
    +	TypeNSEC3      uint16 = 50
    +	TypeNSEC3PARAM uint16 = 51
    +	TypeTLSA       uint16 = 52
    +	TypeSMIMEA     uint16 = 53
    +	TypeHIP        uint16 = 55
    +	TypeNINFO      uint16 = 56
    +	TypeRKEY       uint16 = 57
    +	TypeTALINK     uint16 = 58
    +	TypeCDS        uint16 = 59
    +	TypeCDNSKEY    uint16 = 60
    +	TypeOPENPGPKEY uint16 = 61
    +	TypeCSYNC      uint16 = 62
    +	TypeSPF        uint16 = 99
    +	TypeUINFO      uint16 = 100
    +	TypeUID        uint16 = 101
    +	TypeGID        uint16 = 102
    +	TypeUNSPEC     uint16 = 103
    +	TypeNID        uint16 = 104
    +	TypeL32        uint16 = 105
    +	TypeL64        uint16 = 106
    +	TypeLP         uint16 = 107
    +	TypeEUI48      uint16 = 108
    +	TypeEUI64      uint16 = 109
    +	TypeURI        uint16 = 256
    +	TypeCAA        uint16 = 257
    +	TypeAVC        uint16 = 258
    +
    +	TypeTKEY uint16 = 249
    +	TypeTSIG uint16 = 250
    +
    +	// valid Question.Qtype only
    +	TypeIXFR  uint16 = 251
    +	TypeAXFR  uint16 = 252
    +	TypeMAILB uint16 = 253
    +	TypeMAILA uint16 = 254
    +	TypeANY   uint16 = 255
    +
    +	TypeTA       uint16 = 32768
    +	TypeDLV      uint16 = 32769
    +	TypeReserved uint16 = 65535
    +
    +	// valid Question.Qclass
    +	ClassINET   = 1
    +	ClassCSNET  = 2
    +	ClassCHAOS  = 3
    +	ClassHESIOD = 4
    +	ClassNONE   = 254
    +	ClassANY    = 255
    +
    +	// Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml
    +	RcodeSuccess        = 0  // NoError   - No Error                          [DNS]
    +	RcodeFormatError    = 1  // FormErr   - Format Error                      [DNS]
    +	RcodeServerFailure  = 2  // ServFail  - Server Failure                    [DNS]
    +	RcodeNameError      = 3  // NXDomain  - Non-Existent Domain               [DNS]
    +	RcodeNotImplemented = 4  // NotImp    - Not Implemented                   [DNS]
    +	RcodeRefused        = 5  // Refused   - Query Refused                     [DNS]
    +	RcodeYXDomain       = 6  // YXDomain  - Name Exists when it should not    [DNS Update]
    +	RcodeYXRrset        = 7  // YXRRSet   - RR Set Exists when it should not  [DNS Update]
    +	RcodeNXRrset        = 8  // NXRRSet   - RR Set that should exist does not [DNS Update]
    +	RcodeNotAuth        = 9  // NotAuth   - Server Not Authoritative for zone [DNS Update]
    +	RcodeNotZone        = 10 // NotZone   - Name not contained in zone        [DNS Update/TSIG]
    +	RcodeBadSig         = 16 // BADSIG    - TSIG Signature Failure            [TSIG]
    +	RcodeBadVers        = 16 // BADVERS   - Bad OPT Version                   [EDNS0]
    +	RcodeBadKey         = 17 // BADKEY    - Key not recognized                [TSIG]
    +	RcodeBadTime        = 18 // BADTIME   - Signature out of time window      [TSIG]
    +	RcodeBadMode        = 19 // BADMODE   - Bad TKEY Mode                     [TKEY]
    +	RcodeBadName        = 20 // BADNAME   - Duplicate key name                [TKEY]
    +	RcodeBadAlg         = 21 // BADALG    - Algorithm not supported           [TKEY]
    +	RcodeBadTrunc       = 22 // BADTRUNC  - Bad Truncation                    [TSIG]
    +	RcodeBadCookie      = 23 // BADCOOKIE - Bad/missing Server Cookie         [DNS Cookies]
    +
    +	// Message Opcodes. There is no 3.
    +	OpcodeQuery  = 0
    +	OpcodeIQuery = 1
    +	OpcodeStatus = 2
    +	OpcodeNotify = 4
    +	OpcodeUpdate = 5
    +)
    +
    +// Header is the wire format for the DNS packet header.
    +type Header struct {
    +	Id                                 uint16
    +	Bits                               uint16
    +	Qdcount, Ancount, Nscount, Arcount uint16
    +}
    +
    +const (
    +	headerSize = 12
    +
    +	// Header.Bits
    +	_QR = 1 << 15 // query/response (response=1)
    +	_AA = 1 << 10 // authoritative
    +	_TC = 1 << 9  // truncated
    +	_RD = 1 << 8  // recursion desired
    +	_RA = 1 << 7  // recursion available
    +	_Z  = 1 << 6  // Z
    +	_AD = 1 << 5  // authticated data
    +	_CD = 1 << 4  // checking disabled
    +)
    +
    +// Various constants used in the LOC RR, See RFC 1887.
    +const (
    +	LOC_EQUATOR       = 1 << 31 // RFC 1876, Section 2.
    +	LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
    +	LOC_HOURS         = 60 * 1000
    +	LOC_DEGREES       = 60 * LOC_HOURS
    +	LOC_ALTITUDEBASE  = 100000
    +)
    +
    +// Different Certificate Types, see RFC 4398, Section 2.1
    +const (
    +	CertPKIX = 1 + iota
    +	CertSPKI
    +	CertPGP
    +	CertIPIX
    +	CertISPKI
    +	CertIPGP
    +	CertACPKIX
    +	CertIACPKIX
    +	CertURI = 253
    +	CertOID = 254
    +)
    +
    +// CertTypeToString converts the Cert Type to its string representation.
    +// See RFC 4398 and RFC 6944.
    +var CertTypeToString = map[uint16]string{
    +	CertPKIX:    "PKIX",
    +	CertSPKI:    "SPKI",
    +	CertPGP:     "PGP",
    +	CertIPIX:    "IPIX",
    +	CertISPKI:   "ISPKI",
    +	CertIPGP:    "IPGP",
    +	CertACPKIX:  "ACPKIX",
    +	CertIACPKIX: "IACPKIX",
    +	CertURI:     "URI",
    +	CertOID:     "OID",
    +}
    +
    +//go:generate go run types_generate.go
    +
    +// Question holds a DNS question. There can be multiple questions in the
    +// question section of a message. Usually there is just one.
    +type Question struct {
    +	Name   string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
    +	Qtype  uint16
    +	Qclass uint16
    +}
    +
    +func (q *Question) len(off int, compression map[string]struct{}) int {
    +	l := domainNameLen(q.Name, off, compression, true)
    +	l += 2 + 2
    +	return l
    +}
    +
    +func (q *Question) String() (s string) {
    +	// prefix with ; (as in dig)
    +	s = ";" + sprintName(q.Name) + "\t"
    +	s += Class(q.Qclass).String() + "\t"
    +	s += " " + Type(q.Qtype).String()
    +	return s
    +}
    +
    +// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY
    +// is named "*" there.
    +type ANY struct {
    +	Hdr RR_Header
    +	// Does not have any rdata
    +}
    +
    +func (rr *ANY) String() string { return rr.Hdr.String() }
    +
    +func (rr *ANY) parse(c *zlexer, origin string) *ParseError {
    +	panic("dns: internal error: parse should never be called on ANY")
    +}
    +
    +// NULL RR. See RFC 1035.
    +type NULL struct {
    +	Hdr  RR_Header
    +	Data string `dns:"any"`
    +}
    +
    +func (rr *NULL) String() string {
    +	// There is no presentation format; prefix string with a comment.
    +	return ";" + rr.Hdr.String() + rr.Data
    +}
    +
    +func (rr *NULL) parse(c *zlexer, origin string) *ParseError {
    +	panic("dns: internal error: parse should never be called on NULL")
    +}
    +
    +// CNAME RR. See RFC 1034.
    +type CNAME struct {
    +	Hdr    RR_Header
    +	Target string `dns:"cdomain-name"`
    +}
    +
    +func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) }
    +
    +// HINFO RR. See RFC 1034.
    +type HINFO struct {
    +	Hdr RR_Header
    +	Cpu string
    +	Os  string
    +}
    +
    +func (rr *HINFO) String() string {
    +	return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os})
    +}
    +
    +// MB RR. See RFC 1035.
    +type MB struct {
    +	Hdr RR_Header
    +	Mb  string `dns:"cdomain-name"`
    +}
    +
    +func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) }
    +
    +// MG RR. See RFC 1035.
    +type MG struct {
    +	Hdr RR_Header
    +	Mg  string `dns:"cdomain-name"`
    +}
    +
    +func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) }
    +
    +// MINFO RR. See RFC 1035.
    +type MINFO struct {
    +	Hdr   RR_Header
    +	Rmail string `dns:"cdomain-name"`
    +	Email string `dns:"cdomain-name"`
    +}
    +
    +func (rr *MINFO) String() string {
    +	return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email)
    +}
    +
    +// MR RR. See RFC 1035.
    +type MR struct {
    +	Hdr RR_Header
    +	Mr  string `dns:"cdomain-name"`
    +}
    +
    +func (rr *MR) String() string {
    +	return rr.Hdr.String() + sprintName(rr.Mr)
    +}
    +
    +// MF RR. See RFC 1035.
    +type MF struct {
    +	Hdr RR_Header
    +	Mf  string `dns:"cdomain-name"`
    +}
    +
    +func (rr *MF) String() string {
    +	return rr.Hdr.String() + sprintName(rr.Mf)
    +}
    +
    +// MD RR. See RFC 1035.
    +type MD struct {
    +	Hdr RR_Header
    +	Md  string `dns:"cdomain-name"`
    +}
    +
    +func (rr *MD) String() string {
    +	return rr.Hdr.String() + sprintName(rr.Md)
    +}
    +
    +// MX RR. See RFC 1035.
    +type MX struct {
    +	Hdr        RR_Header
    +	Preference uint16
    +	Mx         string `dns:"cdomain-name"`
    +}
    +
    +func (rr *MX) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx)
    +}
    +
    +// AFSDB RR. See RFC 1183.
    +type AFSDB struct {
    +	Hdr      RR_Header
    +	Subtype  uint16
    +	Hostname string `dns:"domain-name"`
    +}
    +
    +func (rr *AFSDB) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname)
    +}
    +
    +// X25 RR. See RFC 1183, Section 3.1.
    +type X25 struct {
    +	Hdr         RR_Header
    +	PSDNAddress string
    +}
    +
    +func (rr *X25) String() string {
    +	return rr.Hdr.String() + rr.PSDNAddress
    +}
    +
    +// RT RR. See RFC 1183, Section 3.3.
    +type RT struct {
    +	Hdr        RR_Header
    +	Preference uint16
    +	Host       string `dns:"domain-name"` // RFC 3597 prohibits compressing records not defined in RFC 1035.
    +}
    +
    +func (rr *RT) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host)
    +}
    +
    +// NS RR. See RFC 1035.
    +type NS struct {
    +	Hdr RR_Header
    +	Ns  string `dns:"cdomain-name"`
    +}
    +
    +func (rr *NS) String() string {
    +	return rr.Hdr.String() + sprintName(rr.Ns)
    +}
    +
    +// PTR RR. See RFC 1035.
    +type PTR struct {
    +	Hdr RR_Header
    +	Ptr string `dns:"cdomain-name"`
    +}
    +
    +func (rr *PTR) String() string {
    +	return rr.Hdr.String() + sprintName(rr.Ptr)
    +}
    +
    +// RP RR. See RFC 1138, Section 2.2.
    +type RP struct {
    +	Hdr  RR_Header
    +	Mbox string `dns:"domain-name"`
    +	Txt  string `dns:"domain-name"`
    +}
    +
    +func (rr *RP) String() string {
    +	return rr.Hdr.String() + sprintName(rr.Mbox) + " " + sprintName(rr.Txt)
    +}
    +
    +// SOA RR. See RFC 1035.
    +type SOA struct {
    +	Hdr     RR_Header
    +	Ns      string `dns:"cdomain-name"`
    +	Mbox    string `dns:"cdomain-name"`
    +	Serial  uint32
    +	Refresh uint32
    +	Retry   uint32
    +	Expire  uint32
    +	Minttl  uint32
    +}
    +
    +func (rr *SOA) String() string {
    +	return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) +
    +		" " + strconv.FormatInt(int64(rr.Serial), 10) +
    +		" " + strconv.FormatInt(int64(rr.Refresh), 10) +
    +		" " + strconv.FormatInt(int64(rr.Retry), 10) +
    +		" " + strconv.FormatInt(int64(rr.Expire), 10) +
    +		" " + strconv.FormatInt(int64(rr.Minttl), 10)
    +}
    +
    +// TXT RR. See RFC 1035.
    +type TXT struct {
    +	Hdr RR_Header
    +	Txt []string `dns:"txt"`
    +}
    +
    +func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
    +
    +func sprintName(s string) string {
    +	var dst strings.Builder
    +	dst.Grow(len(s))
    +	for i := 0; i < len(s); {
    +		if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
    +			dst.WriteString(s[i : i+2])
    +			i += 2
    +			continue
    +		}
    +
    +		b, n := nextByte(s, i)
    +		switch {
    +		case n == 0:
    +			i++ // dangling back slash
    +		case b == '.':
    +			dst.WriteByte('.')
    +		default:
    +			writeDomainNameByte(&dst, b)
    +		}
    +		i += n
    +	}
    +	return dst.String()
    +}
    +
    +func sprintTxtOctet(s string) string {
    +	var dst strings.Builder
    +	dst.Grow(2 + len(s))
    +	dst.WriteByte('"')
    +	for i := 0; i < len(s); {
    +		if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
    +			dst.WriteString(s[i : i+2])
    +			i += 2
    +			continue
    +		}
    +
    +		b, n := nextByte(s, i)
    +		switch {
    +		case n == 0:
    +			i++ // dangling back slash
    +		case b == '.':
    +			dst.WriteByte('.')
    +		case b < ' ' || b > '~':
    +			dst.WriteString(escapeByte(b))
    +		default:
    +			dst.WriteByte(b)
    +		}
    +		i += n
    +	}
    +	dst.WriteByte('"')
    +	return dst.String()
    +}
    +
    +func sprintTxt(txt []string) string {
    +	var out strings.Builder
    +	for i, s := range txt {
    +		out.Grow(3 + len(s))
    +		if i > 0 {
    +			out.WriteString(` "`)
    +		} else {
    +			out.WriteByte('"')
    +		}
    +		for j := 0; j < len(s); {
    +			b, n := nextByte(s, j)
    +			if n == 0 {
    +				break
    +			}
    +			writeTXTStringByte(&out, b)
    +			j += n
    +		}
    +		out.WriteByte('"')
    +	}
    +	return out.String()
    +}
    +
    +func writeDomainNameByte(s *strings.Builder, b byte) {
    +	switch b {
    +	case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape
    +		s.WriteByte('\\')
    +		s.WriteByte(b)
    +	default:
    +		writeTXTStringByte(s, b)
    +	}
    +}
    +
    +func writeTXTStringByte(s *strings.Builder, b byte) {
    +	switch {
    +	case b == '"' || b == '\\':
    +		s.WriteByte('\\')
    +		s.WriteByte(b)
    +	case b < ' ' || b > '~':
    +		s.WriteString(escapeByte(b))
    +	default:
    +		s.WriteByte(b)
    +	}
    +}
    +
    +const (
    +	escapedByteSmall = "" +
    +		`\000\001\002\003\004\005\006\007\008\009` +
    +		`\010\011\012\013\014\015\016\017\018\019` +
    +		`\020\021\022\023\024\025\026\027\028\029` +
    +		`\030\031`
    +	escapedByteLarge = `\127\128\129` +
    +		`\130\131\132\133\134\135\136\137\138\139` +
    +		`\140\141\142\143\144\145\146\147\148\149` +
    +		`\150\151\152\153\154\155\156\157\158\159` +
    +		`\160\161\162\163\164\165\166\167\168\169` +
    +		`\170\171\172\173\174\175\176\177\178\179` +
    +		`\180\181\182\183\184\185\186\187\188\189` +
    +		`\190\191\192\193\194\195\196\197\198\199` +
    +		`\200\201\202\203\204\205\206\207\208\209` +
    +		`\210\211\212\213\214\215\216\217\218\219` +
    +		`\220\221\222\223\224\225\226\227\228\229` +
    +		`\230\231\232\233\234\235\236\237\238\239` +
    +		`\240\241\242\243\244\245\246\247\248\249` +
    +		`\250\251\252\253\254\255`
    +)
    +
    +// escapeByte returns the \DDD escaping of b which must
    +// satisfy b < ' ' || b > '~'.
    +func escapeByte(b byte) string {
    +	if b < ' ' {
    +		return escapedByteSmall[b*4 : b*4+4]
    +	}
    +
    +	b -= '~' + 1
    +	// The cast here is needed as b*4 may overflow byte.
    +	return escapedByteLarge[int(b)*4 : int(b)*4+4]
    +}
    +
    +func nextByte(s string, offset int) (byte, int) {
    +	if offset >= len(s) {
    +		return 0, 0
    +	}
    +	if s[offset] != '\\' {
    +		// not an escape sequence
    +		return s[offset], 1
    +	}
    +	switch len(s) - offset {
    +	case 1: // dangling escape
    +		return 0, 0
    +	case 2, 3: // too short to be \ddd
    +	default: // maybe \ddd
    +		if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) {
    +			return dddStringToByte(s[offset+1:]), 4
    +		}
    +	}
    +	// not \ddd, just an RFC 1035 "quoted" character
    +	return s[offset+1], 2
    +}
    +
    +// SPF RR. See RFC 4408, Section 3.1.1.
    +type SPF struct {
    +	Hdr RR_Header
    +	Txt []string `dns:"txt"`
    +}
    +
    +func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
    +
    +// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template.
    +type AVC struct {
    +	Hdr RR_Header
    +	Txt []string `dns:"txt"`
    +}
    +
    +func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
    +
    +// SRV RR. See RFC 2782.
    +type SRV struct {
    +	Hdr      RR_Header
    +	Priority uint16
    +	Weight   uint16
    +	Port     uint16
    +	Target   string `dns:"domain-name"`
    +}
    +
    +func (rr *SRV) String() string {
    +	return rr.Hdr.String() +
    +		strconv.Itoa(int(rr.Priority)) + " " +
    +		strconv.Itoa(int(rr.Weight)) + " " +
    +		strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target)
    +}
    +
    +// NAPTR RR. See RFC 2915.
    +type NAPTR struct {
    +	Hdr         RR_Header
    +	Order       uint16
    +	Preference  uint16
    +	Flags       string
    +	Service     string
    +	Regexp      string
    +	Replacement string `dns:"domain-name"`
    +}
    +
    +func (rr *NAPTR) String() string {
    +	return rr.Hdr.String() +
    +		strconv.Itoa(int(rr.Order)) + " " +
    +		strconv.Itoa(int(rr.Preference)) + " " +
    +		"\"" + rr.Flags + "\" " +
    +		"\"" + rr.Service + "\" " +
    +		"\"" + rr.Regexp + "\" " +
    +		rr.Replacement
    +}
    +
    +// CERT RR. See RFC 4398.
    +type CERT struct {
    +	Hdr         RR_Header
    +	Type        uint16
    +	KeyTag      uint16
    +	Algorithm   uint8
    +	Certificate string `dns:"base64"`
    +}
    +
    +func (rr *CERT) String() string {
    +	var (
    +		ok                  bool
    +		certtype, algorithm string
    +	)
    +	if certtype, ok = CertTypeToString[rr.Type]; !ok {
    +		certtype = strconv.Itoa(int(rr.Type))
    +	}
    +	if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok {
    +		algorithm = strconv.Itoa(int(rr.Algorithm))
    +	}
    +	return rr.Hdr.String() + certtype +
    +		" " + strconv.Itoa(int(rr.KeyTag)) +
    +		" " + algorithm +
    +		" " + rr.Certificate
    +}
    +
    +// DNAME RR. See RFC 2672.
    +type DNAME struct {
    +	Hdr    RR_Header
    +	Target string `dns:"domain-name"`
    +}
    +
    +func (rr *DNAME) String() string {
    +	return rr.Hdr.String() + sprintName(rr.Target)
    +}
    +
    +// A RR. See RFC 1035.
    +type A struct {
    +	Hdr RR_Header
    +	A   net.IP `dns:"a"`
    +}
    +
    +func (rr *A) String() string {
    +	if rr.A == nil {
    +		return rr.Hdr.String()
    +	}
    +	return rr.Hdr.String() + rr.A.String()
    +}
    +
    +// AAAA RR. See RFC 3596.
    +type AAAA struct {
    +	Hdr  RR_Header
    +	AAAA net.IP `dns:"aaaa"`
    +}
    +
    +func (rr *AAAA) String() string {
    +	if rr.AAAA == nil {
    +		return rr.Hdr.String()
    +	}
    +	return rr.Hdr.String() + rr.AAAA.String()
    +}
    +
    +// PX RR. See RFC 2163.
    +type PX struct {
    +	Hdr        RR_Header
    +	Preference uint16
    +	Map822     string `dns:"domain-name"`
    +	Mapx400    string `dns:"domain-name"`
    +}
    +
    +func (rr *PX) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400)
    +}
    +
    +// GPOS RR. See RFC 1712.
    +type GPOS struct {
    +	Hdr       RR_Header
    +	Longitude string
    +	Latitude  string
    +	Altitude  string
    +}
    +
    +func (rr *GPOS) String() string {
    +	return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude
    +}
    +
    +// LOC RR. See RFC RFC 1876.
    +type LOC struct {
    +	Hdr       RR_Header
    +	Version   uint8
    +	Size      uint8
    +	HorizPre  uint8
    +	VertPre   uint8
    +	Latitude  uint32
    +	Longitude uint32
    +	Altitude  uint32
    +}
    +
    +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent
    +// format and returns a string in m (two decimals for the cm)
    +func cmToM(m, e uint8) string {
    +	if e < 2 {
    +		if e == 1 {
    +			m *= 10
    +		}
    +
    +		return fmt.Sprintf("0.%02d", m)
    +	}
    +
    +	s := fmt.Sprintf("%d", m)
    +	for e > 2 {
    +		s += "0"
    +		e--
    +	}
    +	return s
    +}
    +
    +func (rr *LOC) String() string {
    +	s := rr.Hdr.String()
    +
    +	lat := rr.Latitude
    +	ns := "N"
    +	if lat > LOC_EQUATOR {
    +		lat = lat - LOC_EQUATOR
    +	} else {
    +		ns = "S"
    +		lat = LOC_EQUATOR - lat
    +	}
    +	h := lat / LOC_DEGREES
    +	lat = lat % LOC_DEGREES
    +	m := lat / LOC_HOURS
    +	lat = lat % LOC_HOURS
    +	s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lat)/1000, ns)
    +
    +	lon := rr.Longitude
    +	ew := "E"
    +	if lon > LOC_PRIMEMERIDIAN {
    +		lon = lon - LOC_PRIMEMERIDIAN
    +	} else {
    +		ew = "W"
    +		lon = LOC_PRIMEMERIDIAN - lon
    +	}
    +	h = lon / LOC_DEGREES
    +	lon = lon % LOC_DEGREES
    +	m = lon / LOC_HOURS
    +	lon = lon % LOC_HOURS
    +	s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lon)/1000, ew)
    +
    +	var alt = float64(rr.Altitude) / 100
    +	alt -= LOC_ALTITUDEBASE
    +	if rr.Altitude%100 != 0 {
    +		s += fmt.Sprintf("%.2fm ", alt)
    +	} else {
    +		s += fmt.Sprintf("%.0fm ", alt)
    +	}
    +
    +	s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m "
    +	s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m "
    +	s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m"
    +
    +	return s
    +}
    +
    +// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931.
    +type SIG struct {
    +	RRSIG
    +}
    +
    +// RRSIG RR. See RFC 4034 and RFC 3755.
    +type RRSIG struct {
    +	Hdr         RR_Header
    +	TypeCovered uint16
    +	Algorithm   uint8
    +	Labels      uint8
    +	OrigTtl     uint32
    +	Expiration  uint32
    +	Inception   uint32
    +	KeyTag      uint16
    +	SignerName  string `dns:"domain-name"`
    +	Signature   string `dns:"base64"`
    +}
    +
    +func (rr *RRSIG) String() string {
    +	s := rr.Hdr.String()
    +	s += Type(rr.TypeCovered).String()
    +	s += " " + strconv.Itoa(int(rr.Algorithm)) +
    +		" " + strconv.Itoa(int(rr.Labels)) +
    +		" " + strconv.FormatInt(int64(rr.OrigTtl), 10) +
    +		" " + TimeToString(rr.Expiration) +
    +		" " + TimeToString(rr.Inception) +
    +		" " + strconv.Itoa(int(rr.KeyTag)) +
    +		" " + sprintName(rr.SignerName) +
    +		" " + rr.Signature
    +	return s
    +}
    +
    +// NSEC RR. See RFC 4034 and RFC 3755.
    +type NSEC struct {
    +	Hdr        RR_Header
    +	NextDomain string   `dns:"domain-name"`
    +	TypeBitMap []uint16 `dns:"nsec"`
    +}
    +
    +func (rr *NSEC) String() string {
    +	s := rr.Hdr.String() + sprintName(rr.NextDomain)
    +	for _, t := range rr.TypeBitMap {
    +		s += " " + Type(t).String()
    +	}
    +	return s
    +}
    +
    +func (rr *NSEC) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.NextDomain, off+l, compression, false)
    +	l += typeBitMapLen(rr.TypeBitMap)
    +	return l
    +}
    +
    +// DLV RR. See RFC 4431.
    +type DLV struct{ DS }
    +
    +// CDS RR. See RFC 7344.
    +type CDS struct{ DS }
    +
    +// DS RR. See RFC 4034 and RFC 3658.
    +type DS struct {
    +	Hdr        RR_Header
    +	KeyTag     uint16
    +	Algorithm  uint8
    +	DigestType uint8
    +	Digest     string `dns:"hex"`
    +}
    +
    +func (rr *DS) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
    +		" " + strconv.Itoa(int(rr.Algorithm)) +
    +		" " + strconv.Itoa(int(rr.DigestType)) +
    +		" " + strings.ToUpper(rr.Digest)
    +}
    +
    +// KX RR. See RFC 2230.
    +type KX struct {
    +	Hdr        RR_Header
    +	Preference uint16
    +	Exchanger  string `dns:"domain-name"`
    +}
    +
    +func (rr *KX) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
    +		" " + sprintName(rr.Exchanger)
    +}
    +
    +// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf.
    +type TA struct {
    +	Hdr        RR_Header
    +	KeyTag     uint16
    +	Algorithm  uint8
    +	DigestType uint8
    +	Digest     string `dns:"hex"`
    +}
    +
    +func (rr *TA) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
    +		" " + strconv.Itoa(int(rr.Algorithm)) +
    +		" " + strconv.Itoa(int(rr.DigestType)) +
    +		" " + strings.ToUpper(rr.Digest)
    +}
    +
    +// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template.
    +type TALINK struct {
    +	Hdr          RR_Header
    +	PreviousName string `dns:"domain-name"`
    +	NextName     string `dns:"domain-name"`
    +}
    +
    +func (rr *TALINK) String() string {
    +	return rr.Hdr.String() +
    +		sprintName(rr.PreviousName) + " " + sprintName(rr.NextName)
    +}
    +
    +// SSHFP RR. See RFC RFC 4255.
    +type SSHFP struct {
    +	Hdr         RR_Header
    +	Algorithm   uint8
    +	Type        uint8
    +	FingerPrint string `dns:"hex"`
    +}
    +
    +func (rr *SSHFP) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) +
    +		" " + strconv.Itoa(int(rr.Type)) +
    +		" " + strings.ToUpper(rr.FingerPrint)
    +}
    +
    +// KEY RR. See RFC RFC 2535.
    +type KEY struct {
    +	DNSKEY
    +}
    +
    +// CDNSKEY RR. See RFC 7344.
    +type CDNSKEY struct {
    +	DNSKEY
    +}
    +
    +// DNSKEY RR. See RFC 4034 and RFC 3755.
    +type DNSKEY struct {
    +	Hdr       RR_Header
    +	Flags     uint16
    +	Protocol  uint8
    +	Algorithm uint8
    +	PublicKey string `dns:"base64"`
    +}
    +
    +func (rr *DNSKEY) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
    +		" " + strconv.Itoa(int(rr.Protocol)) +
    +		" " + strconv.Itoa(int(rr.Algorithm)) +
    +		" " + rr.PublicKey
    +}
    +
    +// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template.
    +type RKEY struct {
    +	Hdr       RR_Header
    +	Flags     uint16
    +	Protocol  uint8
    +	Algorithm uint8
    +	PublicKey string `dns:"base64"`
    +}
    +
    +func (rr *RKEY) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
    +		" " + strconv.Itoa(int(rr.Protocol)) +
    +		" " + strconv.Itoa(int(rr.Algorithm)) +
    +		" " + rr.PublicKey
    +}
    +
    +// NSAPPTR RR. See RFC 1348.
    +type NSAPPTR struct {
    +	Hdr RR_Header
    +	Ptr string `dns:"domain-name"`
    +}
    +
    +func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) }
    +
    +// NSEC3 RR. See RFC 5155.
    +type NSEC3 struct {
    +	Hdr        RR_Header
    +	Hash       uint8
    +	Flags      uint8
    +	Iterations uint16
    +	SaltLength uint8
    +	Salt       string `dns:"size-hex:SaltLength"`
    +	HashLength uint8
    +	NextDomain string   `dns:"size-base32:HashLength"`
    +	TypeBitMap []uint16 `dns:"nsec"`
    +}
    +
    +func (rr *NSEC3) String() string {
    +	s := rr.Hdr.String()
    +	s += strconv.Itoa(int(rr.Hash)) +
    +		" " + strconv.Itoa(int(rr.Flags)) +
    +		" " + strconv.Itoa(int(rr.Iterations)) +
    +		" " + saltToString(rr.Salt) +
    +		" " + rr.NextDomain
    +	for _, t := range rr.TypeBitMap {
    +		s += " " + Type(t).String()
    +	}
    +	return s
    +}
    +
    +func (rr *NSEC3) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1
    +	l += typeBitMapLen(rr.TypeBitMap)
    +	return l
    +}
    +
    +// NSEC3PARAM RR. See RFC 5155.
    +type NSEC3PARAM struct {
    +	Hdr        RR_Header
    +	Hash       uint8
    +	Flags      uint8
    +	Iterations uint16
    +	SaltLength uint8
    +	Salt       string `dns:"size-hex:SaltLength"`
    +}
    +
    +func (rr *NSEC3PARAM) String() string {
    +	s := rr.Hdr.String()
    +	s += strconv.Itoa(int(rr.Hash)) +
    +		" " + strconv.Itoa(int(rr.Flags)) +
    +		" " + strconv.Itoa(int(rr.Iterations)) +
    +		" " + saltToString(rr.Salt)
    +	return s
    +}
    +
    +// TKEY RR. See RFC 2930.
    +type TKEY struct {
    +	Hdr        RR_Header
    +	Algorithm  string `dns:"domain-name"`
    +	Inception  uint32
    +	Expiration uint32
    +	Mode       uint16
    +	Error      uint16
    +	KeySize    uint16
    +	Key        string `dns:"size-hex:KeySize"`
    +	OtherLen   uint16
    +	OtherData  string `dns:"size-hex:OtherLen"`
    +}
    +
    +// TKEY has no official presentation format, but this will suffice.
    +func (rr *TKEY) String() string {
    +	s := ";" + rr.Hdr.String() +
    +		" " + rr.Algorithm +
    +		" " + TimeToString(rr.Inception) +
    +		" " + TimeToString(rr.Expiration) +
    +		" " + strconv.Itoa(int(rr.Mode)) +
    +		" " + strconv.Itoa(int(rr.Error)) +
    +		" " + strconv.Itoa(int(rr.KeySize)) +
    +		" " + rr.Key +
    +		" " + strconv.Itoa(int(rr.OtherLen)) +
    +		" " + rr.OtherData
    +	return s
    +}
    +
    +// RFC3597 represents an unknown/generic RR. See RFC 3597.
    +type RFC3597 struct {
    +	Hdr   RR_Header
    +	Rdata string `dns:"hex"`
    +}
    +
    +func (rr *RFC3597) String() string {
    +	// Let's call it a hack
    +	s := rfc3597Header(rr.Hdr)
    +
    +	s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata
    +	return s
    +}
    +
    +func rfc3597Header(h RR_Header) string {
    +	var s string
    +
    +	s += sprintName(h.Name) + "\t"
    +	s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
    +	s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t"
    +	s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t"
    +	return s
    +}
    +
    +// URI RR. See RFC 7553.
    +type URI struct {
    +	Hdr      RR_Header
    +	Priority uint16
    +	Weight   uint16
    +	Target   string `dns:"octet"`
    +}
    +
    +func (rr *URI) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) +
    +		" " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target)
    +}
    +
    +// DHCID RR. See RFC 4701.
    +type DHCID struct {
    +	Hdr    RR_Header
    +	Digest string `dns:"base64"`
    +}
    +
    +func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest }
    +
    +// TLSA RR. See RFC 6698.
    +type TLSA struct {
    +	Hdr          RR_Header
    +	Usage        uint8
    +	Selector     uint8
    +	MatchingType uint8
    +	Certificate  string `dns:"hex"`
    +}
    +
    +func (rr *TLSA) String() string {
    +	return rr.Hdr.String() +
    +		strconv.Itoa(int(rr.Usage)) +
    +		" " + strconv.Itoa(int(rr.Selector)) +
    +		" " + strconv.Itoa(int(rr.MatchingType)) +
    +		" " + rr.Certificate
    +}
    +
    +// SMIMEA RR. See RFC 8162.
    +type SMIMEA struct {
    +	Hdr          RR_Header
    +	Usage        uint8
    +	Selector     uint8
    +	MatchingType uint8
    +	Certificate  string `dns:"hex"`
    +}
    +
    +func (rr *SMIMEA) String() string {
    +	s := rr.Hdr.String() +
    +		strconv.Itoa(int(rr.Usage)) +
    +		" " + strconv.Itoa(int(rr.Selector)) +
    +		" " + strconv.Itoa(int(rr.MatchingType))
    +
    +	// Every Nth char needs a space on this output. If we output
    +	// this as one giant line, we can't read it can in because in some cases
    +	// the cert length overflows scan.maxTok (2048).
    +	sx := splitN(rr.Certificate, 1024) // conservative value here
    +	s += " " + strings.Join(sx, " ")
    +	return s
    +}
    +
    +// HIP RR. See RFC 8005.
    +type HIP struct {
    +	Hdr                RR_Header
    +	HitLength          uint8
    +	PublicKeyAlgorithm uint8
    +	PublicKeyLength    uint16
    +	Hit                string   `dns:"size-hex:HitLength"`
    +	PublicKey          string   `dns:"size-base64:PublicKeyLength"`
    +	RendezvousServers  []string `dns:"domain-name"`
    +}
    +
    +func (rr *HIP) String() string {
    +	s := rr.Hdr.String() +
    +		strconv.Itoa(int(rr.PublicKeyAlgorithm)) +
    +		" " + rr.Hit +
    +		" " + rr.PublicKey
    +	for _, d := range rr.RendezvousServers {
    +		s += " " + sprintName(d)
    +	}
    +	return s
    +}
    +
    +// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template.
    +type NINFO struct {
    +	Hdr    RR_Header
    +	ZSData []string `dns:"txt"`
    +}
    +
    +func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) }
    +
    +// NID RR. See RFC RFC 6742.
    +type NID struct {
    +	Hdr        RR_Header
    +	Preference uint16
    +	NodeID     uint64
    +}
    +
    +func (rr *NID) String() string {
    +	s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
    +	node := fmt.Sprintf("%0.16x", rr.NodeID)
    +	s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
    +	return s
    +}
    +
    +// L32 RR, See RFC 6742.
    +type L32 struct {
    +	Hdr        RR_Header
    +	Preference uint16
    +	Locator32  net.IP `dns:"a"`
    +}
    +
    +func (rr *L32) String() string {
    +	if rr.Locator32 == nil {
    +		return rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
    +	}
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
    +		" " + rr.Locator32.String()
    +}
    +
    +// L64 RR, See RFC 6742.
    +type L64 struct {
    +	Hdr        RR_Header
    +	Preference uint16
    +	Locator64  uint64
    +}
    +
    +func (rr *L64) String() string {
    +	s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
    +	node := fmt.Sprintf("%0.16X", rr.Locator64)
    +	s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
    +	return s
    +}
    +
    +// LP RR. See RFC 6742.
    +type LP struct {
    +	Hdr        RR_Header
    +	Preference uint16
    +	Fqdn       string `dns:"domain-name"`
    +}
    +
    +func (rr *LP) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn)
    +}
    +
    +// EUI48 RR. See RFC 7043.
    +type EUI48 struct {
    +	Hdr     RR_Header
    +	Address uint64 `dns:"uint48"`
    +}
    +
    +func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) }
    +
    +// EUI64 RR. See RFC 7043.
    +type EUI64 struct {
    +	Hdr     RR_Header
    +	Address uint64
    +}
    +
    +func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) }
    +
    +// CAA RR. See RFC 6844.
    +type CAA struct {
    +	Hdr   RR_Header
    +	Flag  uint8
    +	Tag   string
    +	Value string `dns:"octet"`
    +}
    +
    +func (rr *CAA) String() string {
    +	return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value)
    +}
    +
    +// UID RR. Deprecated, IANA-Reserved.
    +type UID struct {
    +	Hdr RR_Header
    +	Uid uint32
    +}
    +
    +func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) }
    +
    +// GID RR. Deprecated, IANA-Reserved.
    +type GID struct {
    +	Hdr RR_Header
    +	Gid uint32
    +}
    +
    +func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) }
    +
    +// UINFO RR. Deprecated, IANA-Reserved.
    +type UINFO struct {
    +	Hdr   RR_Header
    +	Uinfo string
    +}
    +
    +func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) }
    +
    +// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt.
    +type EID struct {
    +	Hdr      RR_Header
    +	Endpoint string `dns:"hex"`
    +}
    +
    +func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) }
    +
    +// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt.
    +type NIMLOC struct {
    +	Hdr     RR_Header
    +	Locator string `dns:"hex"`
    +}
    +
    +func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) }
    +
    +// OPENPGPKEY RR. See RFC 7929.
    +type OPENPGPKEY struct {
    +	Hdr       RR_Header
    +	PublicKey string `dns:"base64"`
    +}
    +
    +func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey }
    +
    +// CSYNC RR. See RFC 7477.
    +type CSYNC struct {
    +	Hdr        RR_Header
    +	Serial     uint32
    +	Flags      uint16
    +	TypeBitMap []uint16 `dns:"nsec"`
    +}
    +
    +func (rr *CSYNC) String() string {
    +	s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags))
    +
    +	for _, t := range rr.TypeBitMap {
    +		s += " " + Type(t).String()
    +	}
    +	return s
    +}
    +
    +func (rr *CSYNC) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 4 + 2
    +	l += typeBitMapLen(rr.TypeBitMap)
    +	return l
    +}
    +
    +// TimeToString translates the RRSIG's incep. and expir. times to the
    +// string representation used when printing the record.
    +// It takes serial arithmetic (RFC 1982) into account.
    +func TimeToString(t uint32) string {
    +	mod := (int64(t)-time.Now().Unix())/year68 - 1
    +	if mod < 0 {
    +		mod = 0
    +	}
    +	ti := time.Unix(int64(t)-mod*year68, 0).UTC()
    +	return ti.Format("20060102150405")
    +}
    +
    +// StringToTime translates the RRSIG's incep. and expir. times from
    +// string values like "20110403154150" to an 32 bit integer.
    +// It takes serial arithmetic (RFC 1982) into account.
    +func StringToTime(s string) (uint32, error) {
    +	t, err := time.Parse("20060102150405", s)
    +	if err != nil {
    +		return 0, err
    +	}
    +	mod := t.Unix()/year68 - 1
    +	if mod < 0 {
    +		mod = 0
    +	}
    +	return uint32(t.Unix() - mod*year68), nil
    +}
    +
    +// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty.
    +func saltToString(s string) string {
    +	if len(s) == 0 {
    +		return "-"
    +	}
    +	return strings.ToUpper(s)
    +}
    +
    +func euiToString(eui uint64, bits int) (hex string) {
    +	switch bits {
    +	case 64:
    +		hex = fmt.Sprintf("%16.16x", eui)
    +		hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
    +			"-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16]
    +	case 48:
    +		hex = fmt.Sprintf("%12.12x", eui)
    +		hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
    +			"-" + hex[8:10] + "-" + hex[10:12]
    +	}
    +	return
    +}
    +
    +// copyIP returns a copy of ip.
    +func copyIP(ip net.IP) net.IP {
    +	p := make(net.IP, len(ip))
    +	copy(p, ip)
    +	return p
    +}
    +
    +// SplitN splits a string into N sized string chunks.
    +// This might become an exported function once.
    +func splitN(s string, n int) []string {
    +	if len(s) < n {
    +		return []string{s}
    +	}
    +	sx := []string{}
    +	p, i := 0, n
    +	for {
    +		if i <= len(s) {
    +			sx = append(sx, s[p:i])
    +		} else {
    +			sx = append(sx, s[p:])
    +			break
    +
    +		}
    +		p, i = p+n, i+n
    +	}
    +
    +	return sx
    +}
    diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go
    new file mode 100644
    index 000000000000..a4826ee2ffd6
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/udp.go
    @@ -0,0 +1,102 @@
    +// +build !windows
    +
    +package dns
    +
    +import (
    +	"net"
    +
    +	"golang.org/x/net/ipv4"
    +	"golang.org/x/net/ipv6"
    +)
    +
    +// This is the required size of the OOB buffer to pass to ReadMsgUDP.
    +var udpOOBSize = func() int {
    +	// We can't know whether we'll get an IPv4 control message or an
    +	// IPv6 control message ahead of time. To get around this, we size
    +	// the buffer equal to the largest of the two.
    +
    +	oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface)
    +	oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface)
    +
    +	if len(oob4) > len(oob6) {
    +		return len(oob4)
    +	}
    +
    +	return len(oob6)
    +}()
    +
    +// SessionUDP holds the remote address and the associated
    +// out-of-band data.
    +type SessionUDP struct {
    +	raddr   *net.UDPAddr
    +	context []byte
    +}
    +
    +// RemoteAddr returns the remote network address.
    +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
    +
    +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
    +// net.UDPAddr.
    +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
    +	oob := make([]byte, udpOOBSize)
    +	n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
    +	if err != nil {
    +		return n, nil, err
    +	}
    +	return n, &SessionUDP{raddr, oob[:oobn]}, err
    +}
    +
    +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
    +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
    +	oob := correctSource(session.context)
    +	n, _, err := conn.WriteMsgUDP(b, oob, session.raddr)
    +	return n, err
    +}
    +
    +func setUDPSocketOptions(conn *net.UDPConn) error {
    +	// Try setting the flags for both families and ignore the errors unless they
    +	// both error.
    +	err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true)
    +	err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true)
    +	if err6 != nil && err4 != nil {
    +		return err4
    +	}
    +	return nil
    +}
    +
    +// parseDstFromOOB takes oob data and returns the destination IP.
    +func parseDstFromOOB(oob []byte) net.IP {
    +	// Start with IPv6 and then fallback to IPv4
    +	// TODO(fastest963): Figure out a way to prefer one or the other. Looking at
    +	// the lvl of the header for a 0 or 41 isn't cross-platform.
    +	cm6 := new(ipv6.ControlMessage)
    +	if cm6.Parse(oob) == nil && cm6.Dst != nil {
    +		return cm6.Dst
    +	}
    +	cm4 := new(ipv4.ControlMessage)
    +	if cm4.Parse(oob) == nil && cm4.Dst != nil {
    +		return cm4.Dst
    +	}
    +	return nil
    +}
    +
    +// correctSource takes oob data and returns new oob data with the Src equal to the Dst
    +func correctSource(oob []byte) []byte {
    +	dst := parseDstFromOOB(oob)
    +	if dst == nil {
    +		return nil
    +	}
    +	// If the dst is definitely an IPv6, then use ipv6's ControlMessage to
    +	// respond otherwise use ipv4's because ipv6's marshal ignores ipv4
    +	// addresses.
    +	if dst.To4() == nil {
    +		cm := new(ipv6.ControlMessage)
    +		cm.Src = dst
    +		oob = cm.Marshal()
    +	} else {
    +		cm := new(ipv4.ControlMessage)
    +		cm.Src = dst
    +		oob = cm.Marshal()
    +	}
    +	return oob
    +}
    diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go
    new file mode 100644
    index 000000000000..e7dd8ca313c0
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/udp_windows.go
    @@ -0,0 +1,35 @@
    +// +build windows
    +
    +package dns
    +
    +import "net"
    +
    +// SessionUDP holds the remote address
    +type SessionUDP struct {
    +	raddr *net.UDPAddr
    +}
    +
    +// RemoteAddr returns the remote network address.
    +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
    +
    +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
    +// net.UDPAddr.
    +// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
    +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
    +	n, raddr, err := conn.ReadFrom(b)
    +	if err != nil {
    +		return n, nil, err
    +	}
    +	return n, &SessionUDP{raddr.(*net.UDPAddr)}, err
    +}
    +
    +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
    +// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
    +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
    +	return conn.WriteTo(b, session.raddr)
    +}
    +
    +// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
    +// use the standard method in udp.go for these.
    +func setUDPSocketOptions(*net.UDPConn) error { return nil }
    +func parseDstFromOOB([]byte, net.IP) net.IP  { return nil }
    diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go
    new file mode 100644
    index 000000000000..69dd38652224
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/update.go
    @@ -0,0 +1,110 @@
    +package dns
    +
    +// NameUsed sets the RRs in the prereq section to
    +// "Name is in use" RRs. RFC 2136 section 2.4.4.
    +func (u *Msg) NameUsed(rr []RR) {
    +	if u.Answer == nil {
    +		u.Answer = make([]RR, 0, len(rr))
    +	}
    +	for _, r := range rr {
    +		u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
    +	}
    +}
    +
    +// NameNotUsed sets the RRs in the prereq section to
    +// "Name is in not use" RRs. RFC 2136 section 2.4.5.
    +func (u *Msg) NameNotUsed(rr []RR) {
    +	if u.Answer == nil {
    +		u.Answer = make([]RR, 0, len(rr))
    +	}
    +	for _, r := range rr {
    +		u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}})
    +	}
    +}
    +
    +// Used sets the RRs in the prereq section to
    +// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2.
    +func (u *Msg) Used(rr []RR) {
    +	if len(u.Question) == 0 {
    +		panic("dns: empty question section")
    +	}
    +	if u.Answer == nil {
    +		u.Answer = make([]RR, 0, len(rr))
    +	}
    +	for _, r := range rr {
    +		r.Header().Class = u.Question[0].Qclass
    +		u.Answer = append(u.Answer, r)
    +	}
    +}
    +
    +// RRsetUsed sets the RRs in the prereq section to
    +// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
    +func (u *Msg) RRsetUsed(rr []RR) {
    +	if u.Answer == nil {
    +		u.Answer = make([]RR, 0, len(rr))
    +	}
    +	for _, r := range rr {
    +		h := r.Header()
    +		u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
    +	}
    +}
    +
    +// RRsetNotUsed sets the RRs in the prereq section to
    +// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
    +func (u *Msg) RRsetNotUsed(rr []RR) {
    +	if u.Answer == nil {
    +		u.Answer = make([]RR, 0, len(rr))
    +	}
    +	for _, r := range rr {
    +		h := r.Header()
    +		u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassNONE}})
    +	}
    +}
    +
    +// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1.
    +func (u *Msg) Insert(rr []RR) {
    +	if len(u.Question) == 0 {
    +		panic("dns: empty question section")
    +	}
    +	if u.Ns == nil {
    +		u.Ns = make([]RR, 0, len(rr))
    +	}
    +	for _, r := range rr {
    +		r.Header().Class = u.Question[0].Qclass
    +		u.Ns = append(u.Ns, r)
    +	}
    +}
    +
    +// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
    +func (u *Msg) RemoveRRset(rr []RR) {
    +	if u.Ns == nil {
    +		u.Ns = make([]RR, 0, len(rr))
    +	}
    +	for _, r := range rr {
    +		h := r.Header()
    +		u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
    +	}
    +}
    +
    +// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
    +func (u *Msg) RemoveName(rr []RR) {
    +	if u.Ns == nil {
    +		u.Ns = make([]RR, 0, len(rr))
    +	}
    +	for _, r := range rr {
    +		u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
    +	}
    +}
    +
    +// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4
    +func (u *Msg) Remove(rr []RR) {
    +	if u.Ns == nil {
    +		u.Ns = make([]RR, 0, len(rr))
    +	}
    +	for _, r := range rr {
    +		h := r.Header()
    +		h.Class = ClassNONE
    +		h.Ttl = 0
    +		u.Ns = append(u.Ns, r)
    +	}
    +}
    diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
    new file mode 100644
    index 000000000000..2e0292eff6c2
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/version.go
    @@ -0,0 +1,15 @@
    +package dns
    +
    +import "fmt"
    +
    +// Version is current version of this library.
    +var Version = V{1, 1, 15}
    +
    +// V holds the version of this library.
    +type V struct {
    +	Major, Minor, Patch int
    +}
    +
    +func (v V) String() string {
    +	return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
    +}
    diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go
    new file mode 100644
    index 000000000000..bb4ca3d879b8
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/xfr.go
    @@ -0,0 +1,263 @@
    +package dns
    +
    +import (
    +	"fmt"
    +	"time"
    +)
    +
    +// Envelope is used when doing a zone transfer with a remote server.
    +type Envelope struct {
    +	RR    []RR  // The set of RRs in the answer section of the xfr reply message.
    +	Error error // If something went wrong, this contains the error.
    +}
    +
    +// A Transfer defines parameters that are used during a zone transfer.
    +type Transfer struct {
    +	*Conn
    +	DialTimeout    time.Duration     // net.DialTimeout, defaults to 2 seconds
    +	ReadTimeout    time.Duration     // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
    +	WriteTimeout   time.Duration     // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
    +	TsigSecret     map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
    +	tsigTimersOnly bool
    +}
    +
    +// Think we need to away to stop the transfer
    +
    +// In performs an incoming transfer with the server in a.
    +// If you would like to set the source IP, or some other attribute
    +// of a Dialer for a Transfer, you can do so by specifying the attributes
    +// in the Transfer.Conn:
    +//
    +//	d := net.Dialer{LocalAddr: transfer_source}
    +//	con, err := d.Dial("tcp", master)
    +//	dnscon := &dns.Conn{Conn:con}
    +//	transfer = &dns.Transfer{Conn: dnscon}
    +//	channel, err := transfer.In(message, master)
    +//
    +func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
    +	switch q.Question[0].Qtype {
    +	case TypeAXFR, TypeIXFR:
    +	default:
    +		return nil, &Error{"unsupported question type"}
    +	}
    +
    +	timeout := dnsTimeout
    +	if t.DialTimeout != 0 {
    +		timeout = t.DialTimeout
    +	}
    +
    +	if t.Conn == nil {
    +		t.Conn, err = DialTimeout("tcp", a, timeout)
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +
    +	if err := t.WriteMsg(q); err != nil {
    +		return nil, err
    +	}
    +
    +	env = make(chan *Envelope)
    +	switch q.Question[0].Qtype {
    +	case TypeAXFR:
    +		go t.inAxfr(q, env)
    +	case TypeIXFR:
    +		go t.inIxfr(q, env)
    +	}
    +
    +	return env, nil
    +}
    +
    +func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) {
    +	first := true
    +	defer t.Close()
    +	defer close(c)
    +	timeout := dnsTimeout
    +	if t.ReadTimeout != 0 {
    +		timeout = t.ReadTimeout
    +	}
    +	for {
    +		t.Conn.SetReadDeadline(time.Now().Add(timeout))
    +		in, err := t.ReadMsg()
    +		if err != nil {
    +			c <- &Envelope{nil, err}
    +			return
    +		}
    +		if q.Id != in.Id {
    +			c <- &Envelope{in.Answer, ErrId}
    +			return
    +		}
    +		if first {
    +			if in.Rcode != RcodeSuccess {
    +				c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
    +				return
    +			}
    +			if !isSOAFirst(in) {
    +				c <- &Envelope{in.Answer, ErrSoa}
    +				return
    +			}
    +			first = !first
    +			// only one answer that is SOA, receive more
    +			if len(in.Answer) == 1 {
    +				t.tsigTimersOnly = true
    +				c <- &Envelope{in.Answer, nil}
    +				continue
    +			}
    +		}
    +
    +		if !first {
    +			t.tsigTimersOnly = true // Subsequent envelopes use this.
    +			if isSOALast(in) {
    +				c <- &Envelope{in.Answer, nil}
    +				return
    +			}
    +			c <- &Envelope{in.Answer, nil}
    +		}
    +	}
    +}
    +
    +func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) {
    +	var serial uint32 // The first serial seen is the current server serial
    +	axfr := true
    +	n := 0
    +	qser := q.Ns[0].(*SOA).Serial
    +	defer t.Close()
    +	defer close(c)
    +	timeout := dnsTimeout
    +	if t.ReadTimeout != 0 {
    +		timeout = t.ReadTimeout
    +	}
    +	for {
    +		t.SetReadDeadline(time.Now().Add(timeout))
    +		in, err := t.ReadMsg()
    +		if err != nil {
    +			c <- &Envelope{nil, err}
    +			return
    +		}
    +		if q.Id != in.Id {
    +			c <- &Envelope{in.Answer, ErrId}
    +			return
    +		}
    +		if in.Rcode != RcodeSuccess {
    +			c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
    +			return
    +		}
    +		if n == 0 {
    +			// Check if the returned answer is ok
    +			if !isSOAFirst(in) {
    +				c <- &Envelope{in.Answer, ErrSoa}
    +				return
    +			}
    +			// This serial is important
    +			serial = in.Answer[0].(*SOA).Serial
    +			// Check if there are no changes in zone
    +			if qser >= serial {
    +				c <- &Envelope{in.Answer, nil}
    +				return
    +			}
    +		}
    +		// Now we need to check each message for SOA records, to see what we need to do
    +		t.tsigTimersOnly = true
    +		for _, rr := range in.Answer {
    +			if v, ok := rr.(*SOA); ok {
    +				if v.Serial == serial {
    +					n++
    +					// quit if it's a full axfr or the the servers' SOA is repeated the third time
    +					if axfr && n == 2 || n == 3 {
    +						c <- &Envelope{in.Answer, nil}
    +						return
    +					}
    +				} else if axfr {
    +					// it's an ixfr
    +					axfr = false
    +				}
    +			}
    +		}
    +		c <- &Envelope{in.Answer, nil}
    +	}
    +}
    +
    +// Out performs an outgoing transfer with the client connecting in w.
    +// Basic use pattern:
    +//
    +//	ch := make(chan *dns.Envelope)
    +//	tr := new(dns.Transfer)
    +//	go tr.Out(w, r, ch)
    +//	ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
    +//	close(ch)
    +//	w.Hijack()
    +//	// w.Close() // Client closes connection
    +//
    +// The server is responsible for sending the correct sequence of RRs through the
    +// channel ch.
    +func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
    +	for x := range ch {
    +		r := new(Msg)
    +		// Compress?
    +		r.SetReply(q)
    +		r.Authoritative = true
    +		// assume it fits TODO(miek): fix
    +		r.Answer = append(r.Answer, x.RR...)
    +		if tsig := q.IsTsig(); tsig != nil && w.TsigStatus() == nil {
    +			r.SetTsig(tsig.Hdr.Name, tsig.Algorithm, tsig.Fudge, time.Now().Unix())
    +		}
    +		if err := w.WriteMsg(r); err != nil {
    +			return err
    +		}
    +		w.TsigTimersOnly(true)
    +	}
    +	return nil
    +}
    +
    +// ReadMsg reads a message from the transfer connection t.
    +func (t *Transfer) ReadMsg() (*Msg, error) {
    +	m := new(Msg)
    +	p := make([]byte, MaxMsgSize)
    +	n, err := t.Read(p)
    +	if err != nil && n == 0 {
    +		return nil, err
    +	}
    +	p = p[:n]
    +	if err := m.Unpack(p); err != nil {
    +		return nil, err
    +	}
    +	if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
    +		if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
    +			return m, ErrSecret
    +		}
    +		// Need to work on the original message p, as that was used to calculate the tsig.
    +		err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
    +		t.tsigRequestMAC = ts.MAC
    +	}
    +	return m, err
    +}
    +
    +// WriteMsg writes a message through the transfer connection t.
    +func (t *Transfer) WriteMsg(m *Msg) (err error) {
    +	var out []byte
    +	if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
    +		if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
    +			return ErrSecret
    +		}
    +		out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
    +	} else {
    +		out, err = m.Pack()
    +	}
    +	if err != nil {
    +		return err
    +	}
    +	_, err = t.Write(out)
    +	return err
    +}
    +
    +func isSOAFirst(in *Msg) bool {
    +	return len(in.Answer) > 0 &&
    +		in.Answer[0].Header().Rrtype == TypeSOA
    +}
    +
    +func isSOALast(in *Msg) bool {
    +	return len(in.Answer) > 0 &&
    +		in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA
    +}
    +
    +const errXFR = "bad xfr rcode: %d"
    diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go
    new file mode 100644
    index 000000000000..74389162faf7
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/zduplicate.go
    @@ -0,0 +1,1140 @@
    +// Code generated by "go run duplicate_generate.go"; DO NOT EDIT.
    +
    +package dns
    +
    +// isDuplicate() functions
    +
    +func (r1 *A) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*A)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !r1.A.Equal(r2.A) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *AAAA) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*AAAA)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !r1.AAAA.Equal(r2.AAAA) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *AFSDB) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*AFSDB)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Subtype != r2.Subtype {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Hostname, r2.Hostname) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *ANY) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*ANY)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	return true
    +}
    +
    +func (r1 *AVC) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*AVC)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if len(r1.Txt) != len(r2.Txt) {
    +		return false
    +	}
    +	for i := 0; i < len(r1.Txt); i++ {
    +		if r1.Txt[i] != r2.Txt[i] {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +func (r1 *CAA) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*CAA)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Flag != r2.Flag {
    +		return false
    +	}
    +	if r1.Tag != r2.Tag {
    +		return false
    +	}
    +	if r1.Value != r2.Value {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *CERT) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*CERT)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Type != r2.Type {
    +		return false
    +	}
    +	if r1.KeyTag != r2.KeyTag {
    +		return false
    +	}
    +	if r1.Algorithm != r2.Algorithm {
    +		return false
    +	}
    +	if r1.Certificate != r2.Certificate {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *CNAME) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*CNAME)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Target, r2.Target) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *CSYNC) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*CSYNC)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Serial != r2.Serial {
    +		return false
    +	}
    +	if r1.Flags != r2.Flags {
    +		return false
    +	}
    +	if len(r1.TypeBitMap) != len(r2.TypeBitMap) {
    +		return false
    +	}
    +	for i := 0; i < len(r1.TypeBitMap); i++ {
    +		if r1.TypeBitMap[i] != r2.TypeBitMap[i] {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +func (r1 *DHCID) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*DHCID)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Digest != r2.Digest {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *DNAME) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*DNAME)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Target, r2.Target) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *DNSKEY) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*DNSKEY)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Flags != r2.Flags {
    +		return false
    +	}
    +	if r1.Protocol != r2.Protocol {
    +		return false
    +	}
    +	if r1.Algorithm != r2.Algorithm {
    +		return false
    +	}
    +	if r1.PublicKey != r2.PublicKey {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *DS) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*DS)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.KeyTag != r2.KeyTag {
    +		return false
    +	}
    +	if r1.Algorithm != r2.Algorithm {
    +		return false
    +	}
    +	if r1.DigestType != r2.DigestType {
    +		return false
    +	}
    +	if r1.Digest != r2.Digest {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *EID) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*EID)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Endpoint != r2.Endpoint {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *EUI48) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*EUI48)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Address != r2.Address {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *EUI64) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*EUI64)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Address != r2.Address {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *GID) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*GID)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Gid != r2.Gid {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *GPOS) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*GPOS)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Longitude != r2.Longitude {
    +		return false
    +	}
    +	if r1.Latitude != r2.Latitude {
    +		return false
    +	}
    +	if r1.Altitude != r2.Altitude {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *HINFO) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*HINFO)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Cpu != r2.Cpu {
    +		return false
    +	}
    +	if r1.Os != r2.Os {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *HIP) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*HIP)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.HitLength != r2.HitLength {
    +		return false
    +	}
    +	if r1.PublicKeyAlgorithm != r2.PublicKeyAlgorithm {
    +		return false
    +	}
    +	if r1.PublicKeyLength != r2.PublicKeyLength {
    +		return false
    +	}
    +	if r1.Hit != r2.Hit {
    +		return false
    +	}
    +	if r1.PublicKey != r2.PublicKey {
    +		return false
    +	}
    +	if len(r1.RendezvousServers) != len(r2.RendezvousServers) {
    +		return false
    +	}
    +	for i := 0; i < len(r1.RendezvousServers); i++ {
    +		if !isDuplicateName(r1.RendezvousServers[i], r2.RendezvousServers[i]) {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +func (r1 *KX) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*KX)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Preference != r2.Preference {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Exchanger, r2.Exchanger) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *L32) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*L32)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Preference != r2.Preference {
    +		return false
    +	}
    +	if !r1.Locator32.Equal(r2.Locator32) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *L64) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*L64)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Preference != r2.Preference {
    +		return false
    +	}
    +	if r1.Locator64 != r2.Locator64 {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *LOC) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*LOC)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Version != r2.Version {
    +		return false
    +	}
    +	if r1.Size != r2.Size {
    +		return false
    +	}
    +	if r1.HorizPre != r2.HorizPre {
    +		return false
    +	}
    +	if r1.VertPre != r2.VertPre {
    +		return false
    +	}
    +	if r1.Latitude != r2.Latitude {
    +		return false
    +	}
    +	if r1.Longitude != r2.Longitude {
    +		return false
    +	}
    +	if r1.Altitude != r2.Altitude {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *LP) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*LP)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Preference != r2.Preference {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Fqdn, r2.Fqdn) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *MB) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*MB)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Mb, r2.Mb) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *MD) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*MD)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Md, r2.Md) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *MF) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*MF)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Mf, r2.Mf) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *MG) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*MG)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Mg, r2.Mg) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *MINFO) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*MINFO)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Rmail, r2.Rmail) {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Email, r2.Email) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *MR) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*MR)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Mr, r2.Mr) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *MX) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*MX)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Preference != r2.Preference {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Mx, r2.Mx) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *NAPTR) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NAPTR)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Order != r2.Order {
    +		return false
    +	}
    +	if r1.Preference != r2.Preference {
    +		return false
    +	}
    +	if r1.Flags != r2.Flags {
    +		return false
    +	}
    +	if r1.Service != r2.Service {
    +		return false
    +	}
    +	if r1.Regexp != r2.Regexp {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Replacement, r2.Replacement) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *NID) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NID)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Preference != r2.Preference {
    +		return false
    +	}
    +	if r1.NodeID != r2.NodeID {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *NIMLOC) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NIMLOC)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Locator != r2.Locator {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *NINFO) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NINFO)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if len(r1.ZSData) != len(r2.ZSData) {
    +		return false
    +	}
    +	for i := 0; i < len(r1.ZSData); i++ {
    +		if r1.ZSData[i] != r2.ZSData[i] {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +func (r1 *NS) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NS)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Ns, r2.Ns) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *NSAPPTR) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NSAPPTR)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Ptr, r2.Ptr) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *NSEC) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NSEC)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.NextDomain, r2.NextDomain) {
    +		return false
    +	}
    +	if len(r1.TypeBitMap) != len(r2.TypeBitMap) {
    +		return false
    +	}
    +	for i := 0; i < len(r1.TypeBitMap); i++ {
    +		if r1.TypeBitMap[i] != r2.TypeBitMap[i] {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +func (r1 *NSEC3) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NSEC3)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Hash != r2.Hash {
    +		return false
    +	}
    +	if r1.Flags != r2.Flags {
    +		return false
    +	}
    +	if r1.Iterations != r2.Iterations {
    +		return false
    +	}
    +	if r1.SaltLength != r2.SaltLength {
    +		return false
    +	}
    +	if r1.Salt != r2.Salt {
    +		return false
    +	}
    +	if r1.HashLength != r2.HashLength {
    +		return false
    +	}
    +	if r1.NextDomain != r2.NextDomain {
    +		return false
    +	}
    +	if len(r1.TypeBitMap) != len(r2.TypeBitMap) {
    +		return false
    +	}
    +	for i := 0; i < len(r1.TypeBitMap); i++ {
    +		if r1.TypeBitMap[i] != r2.TypeBitMap[i] {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +func (r1 *NSEC3PARAM) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NSEC3PARAM)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Hash != r2.Hash {
    +		return false
    +	}
    +	if r1.Flags != r2.Flags {
    +		return false
    +	}
    +	if r1.Iterations != r2.Iterations {
    +		return false
    +	}
    +	if r1.SaltLength != r2.SaltLength {
    +		return false
    +	}
    +	if r1.Salt != r2.Salt {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *NULL) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*NULL)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Data != r2.Data {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *OPENPGPKEY) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*OPENPGPKEY)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.PublicKey != r2.PublicKey {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *PTR) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*PTR)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Ptr, r2.Ptr) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *PX) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*PX)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Preference != r2.Preference {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Map822, r2.Map822) {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Mapx400, r2.Mapx400) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *RFC3597) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*RFC3597)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Rdata != r2.Rdata {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *RKEY) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*RKEY)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Flags != r2.Flags {
    +		return false
    +	}
    +	if r1.Protocol != r2.Protocol {
    +		return false
    +	}
    +	if r1.Algorithm != r2.Algorithm {
    +		return false
    +	}
    +	if r1.PublicKey != r2.PublicKey {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *RP) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*RP)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Mbox, r2.Mbox) {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Txt, r2.Txt) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *RRSIG) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*RRSIG)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.TypeCovered != r2.TypeCovered {
    +		return false
    +	}
    +	if r1.Algorithm != r2.Algorithm {
    +		return false
    +	}
    +	if r1.Labels != r2.Labels {
    +		return false
    +	}
    +	if r1.OrigTtl != r2.OrigTtl {
    +		return false
    +	}
    +	if r1.Expiration != r2.Expiration {
    +		return false
    +	}
    +	if r1.Inception != r2.Inception {
    +		return false
    +	}
    +	if r1.KeyTag != r2.KeyTag {
    +		return false
    +	}
    +	if !isDuplicateName(r1.SignerName, r2.SignerName) {
    +		return false
    +	}
    +	if r1.Signature != r2.Signature {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *RT) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*RT)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Preference != r2.Preference {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Host, r2.Host) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *SMIMEA) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*SMIMEA)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Usage != r2.Usage {
    +		return false
    +	}
    +	if r1.Selector != r2.Selector {
    +		return false
    +	}
    +	if r1.MatchingType != r2.MatchingType {
    +		return false
    +	}
    +	if r1.Certificate != r2.Certificate {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *SOA) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*SOA)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Ns, r2.Ns) {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Mbox, r2.Mbox) {
    +		return false
    +	}
    +	if r1.Serial != r2.Serial {
    +		return false
    +	}
    +	if r1.Refresh != r2.Refresh {
    +		return false
    +	}
    +	if r1.Retry != r2.Retry {
    +		return false
    +	}
    +	if r1.Expire != r2.Expire {
    +		return false
    +	}
    +	if r1.Minttl != r2.Minttl {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *SPF) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*SPF)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if len(r1.Txt) != len(r2.Txt) {
    +		return false
    +	}
    +	for i := 0; i < len(r1.Txt); i++ {
    +		if r1.Txt[i] != r2.Txt[i] {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +func (r1 *SRV) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*SRV)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Priority != r2.Priority {
    +		return false
    +	}
    +	if r1.Weight != r2.Weight {
    +		return false
    +	}
    +	if r1.Port != r2.Port {
    +		return false
    +	}
    +	if !isDuplicateName(r1.Target, r2.Target) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *SSHFP) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*SSHFP)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Algorithm != r2.Algorithm {
    +		return false
    +	}
    +	if r1.Type != r2.Type {
    +		return false
    +	}
    +	if r1.FingerPrint != r2.FingerPrint {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *TA) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*TA)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.KeyTag != r2.KeyTag {
    +		return false
    +	}
    +	if r1.Algorithm != r2.Algorithm {
    +		return false
    +	}
    +	if r1.DigestType != r2.DigestType {
    +		return false
    +	}
    +	if r1.Digest != r2.Digest {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *TALINK) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*TALINK)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.PreviousName, r2.PreviousName) {
    +		return false
    +	}
    +	if !isDuplicateName(r1.NextName, r2.NextName) {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *TKEY) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*TKEY)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Algorithm, r2.Algorithm) {
    +		return false
    +	}
    +	if r1.Inception != r2.Inception {
    +		return false
    +	}
    +	if r1.Expiration != r2.Expiration {
    +		return false
    +	}
    +	if r1.Mode != r2.Mode {
    +		return false
    +	}
    +	if r1.Error != r2.Error {
    +		return false
    +	}
    +	if r1.KeySize != r2.KeySize {
    +		return false
    +	}
    +	if r1.Key != r2.Key {
    +		return false
    +	}
    +	if r1.OtherLen != r2.OtherLen {
    +		return false
    +	}
    +	if r1.OtherData != r2.OtherData {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *TLSA) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*TLSA)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Usage != r2.Usage {
    +		return false
    +	}
    +	if r1.Selector != r2.Selector {
    +		return false
    +	}
    +	if r1.MatchingType != r2.MatchingType {
    +		return false
    +	}
    +	if r1.Certificate != r2.Certificate {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *TSIG) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*TSIG)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if !isDuplicateName(r1.Algorithm, r2.Algorithm) {
    +		return false
    +	}
    +	if r1.TimeSigned != r2.TimeSigned {
    +		return false
    +	}
    +	if r1.Fudge != r2.Fudge {
    +		return false
    +	}
    +	if r1.MACSize != r2.MACSize {
    +		return false
    +	}
    +	if r1.MAC != r2.MAC {
    +		return false
    +	}
    +	if r1.OrigId != r2.OrigId {
    +		return false
    +	}
    +	if r1.Error != r2.Error {
    +		return false
    +	}
    +	if r1.OtherLen != r2.OtherLen {
    +		return false
    +	}
    +	if r1.OtherData != r2.OtherData {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *TXT) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*TXT)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if len(r1.Txt) != len(r2.Txt) {
    +		return false
    +	}
    +	for i := 0; i < len(r1.Txt); i++ {
    +		if r1.Txt[i] != r2.Txt[i] {
    +			return false
    +		}
    +	}
    +	return true
    +}
    +
    +func (r1 *UID) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*UID)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Uid != r2.Uid {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *UINFO) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*UINFO)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Uinfo != r2.Uinfo {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *URI) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*URI)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.Priority != r2.Priority {
    +		return false
    +	}
    +	if r1.Weight != r2.Weight {
    +		return false
    +	}
    +	if r1.Target != r2.Target {
    +		return false
    +	}
    +	return true
    +}
    +
    +func (r1 *X25) isDuplicate(_r2 RR) bool {
    +	r2, ok := _r2.(*X25)
    +	if !ok {
    +		return false
    +	}
    +	_ = r2
    +	if r1.PSDNAddress != r2.PSDNAddress {
    +		return false
    +	}
    +	return true
    +}
    diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
    new file mode 100644
    index 000000000000..c4cf4757e8a6
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/zmsg.go
    @@ -0,0 +1,2722 @@
    +// Code generated by "go run msg_generate.go"; DO NOT EDIT.
    +
    +package dns
    +
    +// pack*() functions
    +
    +func (rr *A) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDataA(rr.A, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *AAAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDataAAAA(rr.AAAA, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Subtype, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Hostname, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	return off, nil
    +}
    +
    +func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringTxt(rr.Txt, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint8(rr.Flag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packString(rr.Tag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringOctet(rr.Value, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CDNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Flags, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Protocol, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase64(rr.PublicKey, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CDS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.KeyTag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.DigestType, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.Digest, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CERT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Type, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.KeyTag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase64(rr.Certificate, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Target, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CSYNC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint32(rr.Serial, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Flags, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDataNsec(rr.TypeBitMap, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DHCID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringBase64(rr.Digest, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DLV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.KeyTag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.DigestType, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.Digest, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Target, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Flags, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Protocol, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase64(rr.PublicKey, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.KeyTag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.DigestType, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.Digest, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *EID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringHex(rr.Endpoint, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *EUI48) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint48(rr.Address, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *EUI64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint64(rr.Address, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *GID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint32(rr.Gid, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *GPOS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packString(rr.Longitude, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packString(rr.Latitude, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packString(rr.Altitude, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *HINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packString(rr.Cpu, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packString(rr.Os, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *HIP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint8(rr.HitLength, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.PublicKeyAlgorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.PublicKeyLength, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.Hit, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase64(rr.PublicKey, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Flags, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Protocol, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase64(rr.PublicKey, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *KX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Preference, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Exchanger, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *L32) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Preference, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDataA(rr.Locator32, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *L64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Preference, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint64(rr.Locator64, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *LOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint8(rr.Version, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Size, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.HorizPre, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.VertPre, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Latitude, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Longitude, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Altitude, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *LP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Preference, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Fqdn, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Mb, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MD) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Md, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Mf, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Mg, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Rmail, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Email, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Mr, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Preference, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Mx, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NAPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Order, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Preference, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packString(rr.Flags, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packString(rr.Service, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packString(rr.Regexp, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Replacement, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Preference, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint64(rr.NodeID, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NIMLOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringHex(rr.Locator, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringTxt(rr.ZSData, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Ns, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NSAPPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Ptr, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NSEC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.NextDomain, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDataNsec(rr.TypeBitMap, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NSEC3) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint8(rr.Hash, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Flags, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Iterations, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.SaltLength, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	// Only pack salt if value is not "-", i.e. empty
    +	if rr.Salt != "-" {
    +		off, err = packStringHex(rr.Salt, msg, off)
    +		if err != nil {
    +			return off, err
    +		}
    +	}
    +	off, err = packUint8(rr.HashLength, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase32(rr.NextDomain, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDataNsec(rr.TypeBitMap, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint8(rr.Hash, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Flags, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Iterations, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.SaltLength, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	// Only pack salt if value is not "-", i.e. empty
    +	if rr.Salt != "-" {
    +		off, err = packStringHex(rr.Salt, msg, off)
    +		if err != nil {
    +			return off, err
    +		}
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringAny(rr.Data, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringBase64(rr.PublicKey, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *OPT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDataOpt(rr.Option, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *PTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Ptr, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Preference, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Map822, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Mapx400, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringHex(rr.Rdata, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Flags, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Protocol, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase64(rr.PublicKey, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Mbox, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Txt, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RRSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.TypeCovered, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Labels, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.OrigTtl, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Expiration, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Inception, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.KeyTag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.SignerName, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase64(rr.Signature, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Preference, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Host, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.TypeCovered, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Labels, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.OrigTtl, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Expiration, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Inception, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.KeyTag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.SignerName, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringBase64(rr.Signature, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SMIMEA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint8(rr.Usage, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Selector, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.MatchingType, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.Certificate, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SOA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Ns, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Mbox, msg, off, compression, compress)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Serial, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Refresh, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Retry, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Expire, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Minttl, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SPF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringTxt(rr.Txt, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SRV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Priority, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Weight, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Port, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.Target, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SSHFP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Type, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.FingerPrint, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.KeyTag, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Algorithm, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.DigestType, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.Digest, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TALINK) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.PreviousName, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packDomainName(rr.NextName, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Algorithm, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Inception, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint32(rr.Expiration, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Mode, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Error, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.KeySize, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.Key, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.OtherLen, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.OtherData, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TLSA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint8(rr.Usage, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.Selector, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint8(rr.MatchingType, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.Certificate, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packDomainName(rr.Algorithm, msg, off, compression, false)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint48(rr.TimeSigned, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Fudge, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.MACSize, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.MAC, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.OrigId, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Error, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.OtherLen, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringHex(rr.OtherData, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packStringTxt(rr.Txt, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *UID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint32(rr.Uid, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *UINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packString(rr.Uinfo, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *URI) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packUint16(rr.Priority, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packUint16(rr.Weight, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	off, err = packStringOctet(rr.Target, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *X25) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
    +	off, err = packString(rr.PSDNAddress, msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +// unpack*() functions
    +
    +func (rr *A) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.A, off, err = unpackDataA(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *AAAA) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.AAAA, off, err = unpackDataAAAA(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Subtype, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Hostname, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	return off, nil
    +}
    +
    +func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Txt, off, err = unpackStringTxt(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CAA) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Flag, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Tag, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Value, off, err = unpackStringOctet(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CDNSKEY) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Flags, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Protocol, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CDS) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.KeyTag, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.DigestType, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CERT) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Type, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.KeyTag, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CNAME) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Target, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *CSYNC) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Serial, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Flags, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DHCID) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DLV) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.KeyTag, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.DigestType, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DNAME) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Target, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DNSKEY) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Flags, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Protocol, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *DS) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.KeyTag, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.DigestType, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *EID) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *EUI48) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Address, off, err = unpackUint48(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *EUI64) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Address, off, err = unpackUint64(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *GID) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Gid, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *GPOS) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Longitude, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Latitude, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Altitude, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *HINFO) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Cpu, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Os, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.HitLength, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.PublicKeyLength, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength))
    +	if err != nil {
    +		return off, err
    +	}
    +	rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength))
    +	if err != nil {
    +		return off, err
    +	}
    +	rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Flags, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Protocol, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *KX) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Preference, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Exchanger, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *L32) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Preference, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Locator32, off, err = unpackDataA(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *L64) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Preference, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Locator64, off, err = unpackUint64(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *LOC) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Version, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Size, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.HorizPre, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.VertPre, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Latitude, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Longitude, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Altitude, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *LP) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Preference, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Fqdn, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MB) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Mb, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MD) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Md, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MF) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Mf, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MG) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Mg, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MINFO) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Rmail, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Email, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MR) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Mr, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *MX) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Preference, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Mx, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NAPTR) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Order, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Preference, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Flags, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Service, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Regexp, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Replacement, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NID) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Preference, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.NodeID, off, err = unpackUint64(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NIMLOC) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NINFO) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.ZSData, off, err = unpackStringTxt(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NS) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Ns, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NSAPPTR) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Ptr, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NSEC) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.NextDomain, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NSEC3) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Hash, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Flags, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Iterations, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.SaltLength, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength))
    +	if err != nil {
    +		return off, err
    +	}
    +	rr.HashLength, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength))
    +	if err != nil {
    +		return off, err
    +	}
    +	rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NSEC3PARAM) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Hash, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Flags, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Iterations, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.SaltLength, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Data, off, err = unpackStringAny(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *OPENPGPKEY) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *OPT) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Option, off, err = unpackDataOpt(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *PTR) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Ptr, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Preference, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Map822, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Mapx400, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RKEY) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Flags, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Protocol, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RP) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Mbox, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Txt, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RRSIG) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.TypeCovered, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Labels, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.OrigTtl, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Expiration, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Inception, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.KeyTag, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.SignerName, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *RT) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Preference, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Host, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SIG) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.TypeCovered, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Labels, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.OrigTtl, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Expiration, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Inception, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.KeyTag, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.SignerName, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SMIMEA) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Usage, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Selector, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.MatchingType, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SOA) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Ns, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Mbox, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Serial, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Refresh, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Retry, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Expire, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Minttl, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SPF) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Txt, off, err = unpackStringTxt(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SRV) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Priority, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Weight, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Port, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Target, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *SSHFP) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Type, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TA) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.KeyTag, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Algorithm, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.DigestType, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TALINK) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.PreviousName, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.NextName, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TKEY) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Algorithm, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Inception, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Expiration, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Mode, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Error, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.KeySize, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize))
    +	if err != nil {
    +		return off, err
    +	}
    +	rr.OtherLen, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TLSA) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Usage, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Selector, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.MatchingType, off, err = unpackUint8(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TSIG) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Algorithm, off, err = UnpackDomainName(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.TimeSigned, off, err = unpackUint48(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Fudge, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.MACSize, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize))
    +	if err != nil {
    +		return off, err
    +	}
    +	rr.OrigId, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Error, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.OtherLen, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen))
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *TXT) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Txt, off, err = unpackStringTxt(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *UID) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Uid, off, err = unpackUint32(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *UINFO) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Uinfo, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *URI) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.Priority, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Weight, off, err = unpackUint16(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	if off == len(msg) {
    +		return off, nil
    +	}
    +	rr.Target, off, err = unpackStringOctet(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    +
    +func (rr *X25) unpack(msg []byte, off int) (off1 int, err error) {
    +	rdStart := off
    +	_ = rdStart
    +
    +	rr.PSDNAddress, off, err = unpackString(msg, off)
    +	if err != nil {
    +		return off, err
    +	}
    +	return off, nil
    +}
    diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go
    new file mode 100644
    index 000000000000..f7ec8352fb1d
    --- /dev/null
    +++ b/vendor/github.com/miekg/dns/ztypes.go
    @@ -0,0 +1,881 @@
    +// Code generated by "go run types_generate.go"; DO NOT EDIT.
    +
    +package dns
    +
    +import (
    +	"encoding/base64"
    +	"net"
    +)
    +
    +// TypeToRR is a map of constructors for each RR type.
    +var TypeToRR = map[uint16]func() RR{
    +	TypeA:          func() RR { return new(A) },
    +	TypeAAAA:       func() RR { return new(AAAA) },
    +	TypeAFSDB:      func() RR { return new(AFSDB) },
    +	TypeANY:        func() RR { return new(ANY) },
    +	TypeAVC:        func() RR { return new(AVC) },
    +	TypeCAA:        func() RR { return new(CAA) },
    +	TypeCDNSKEY:    func() RR { return new(CDNSKEY) },
    +	TypeCDS:        func() RR { return new(CDS) },
    +	TypeCERT:       func() RR { return new(CERT) },
    +	TypeCNAME:      func() RR { return new(CNAME) },
    +	TypeCSYNC:      func() RR { return new(CSYNC) },
    +	TypeDHCID:      func() RR { return new(DHCID) },
    +	TypeDLV:        func() RR { return new(DLV) },
    +	TypeDNAME:      func() RR { return new(DNAME) },
    +	TypeDNSKEY:     func() RR { return new(DNSKEY) },
    +	TypeDS:         func() RR { return new(DS) },
    +	TypeEID:        func() RR { return new(EID) },
    +	TypeEUI48:      func() RR { return new(EUI48) },
    +	TypeEUI64:      func() RR { return new(EUI64) },
    +	TypeGID:        func() RR { return new(GID) },
    +	TypeGPOS:       func() RR { return new(GPOS) },
    +	TypeHINFO:      func() RR { return new(HINFO) },
    +	TypeHIP:        func() RR { return new(HIP) },
    +	TypeKEY:        func() RR { return new(KEY) },
    +	TypeKX:         func() RR { return new(KX) },
    +	TypeL32:        func() RR { return new(L32) },
    +	TypeL64:        func() RR { return new(L64) },
    +	TypeLOC:        func() RR { return new(LOC) },
    +	TypeLP:         func() RR { return new(LP) },
    +	TypeMB:         func() RR { return new(MB) },
    +	TypeMD:         func() RR { return new(MD) },
    +	TypeMF:         func() RR { return new(MF) },
    +	TypeMG:         func() RR { return new(MG) },
    +	TypeMINFO:      func() RR { return new(MINFO) },
    +	TypeMR:         func() RR { return new(MR) },
    +	TypeMX:         func() RR { return new(MX) },
    +	TypeNAPTR:      func() RR { return new(NAPTR) },
    +	TypeNID:        func() RR { return new(NID) },
    +	TypeNIMLOC:     func() RR { return new(NIMLOC) },
    +	TypeNINFO:      func() RR { return new(NINFO) },
    +	TypeNS:         func() RR { return new(NS) },
    +	TypeNSAPPTR:    func() RR { return new(NSAPPTR) },
    +	TypeNSEC:       func() RR { return new(NSEC) },
    +	TypeNSEC3:      func() RR { return new(NSEC3) },
    +	TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
    +	TypeNULL:       func() RR { return new(NULL) },
    +	TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
    +	TypeOPT:        func() RR { return new(OPT) },
    +	TypePTR:        func() RR { return new(PTR) },
    +	TypePX:         func() RR { return new(PX) },
    +	TypeRKEY:       func() RR { return new(RKEY) },
    +	TypeRP:         func() RR { return new(RP) },
    +	TypeRRSIG:      func() RR { return new(RRSIG) },
    +	TypeRT:         func() RR { return new(RT) },
    +	TypeSIG:        func() RR { return new(SIG) },
    +	TypeSMIMEA:     func() RR { return new(SMIMEA) },
    +	TypeSOA:        func() RR { return new(SOA) },
    +	TypeSPF:        func() RR { return new(SPF) },
    +	TypeSRV:        func() RR { return new(SRV) },
    +	TypeSSHFP:      func() RR { return new(SSHFP) },
    +	TypeTA:         func() RR { return new(TA) },
    +	TypeTALINK:     func() RR { return new(TALINK) },
    +	TypeTKEY:       func() RR { return new(TKEY) },
    +	TypeTLSA:       func() RR { return new(TLSA) },
    +	TypeTSIG:       func() RR { return new(TSIG) },
    +	TypeTXT:        func() RR { return new(TXT) },
    +	TypeUID:        func() RR { return new(UID) },
    +	TypeUINFO:      func() RR { return new(UINFO) },
    +	TypeURI:        func() RR { return new(URI) },
    +	TypeX25:        func() RR { return new(X25) },
    +}
    +
    +// TypeToString is a map of strings for each RR type.
    +var TypeToString = map[uint16]string{
    +	TypeA:          "A",
    +	TypeAAAA:       "AAAA",
    +	TypeAFSDB:      "AFSDB",
    +	TypeANY:        "ANY",
    +	TypeATMA:       "ATMA",
    +	TypeAVC:        "AVC",
    +	TypeAXFR:       "AXFR",
    +	TypeCAA:        "CAA",
    +	TypeCDNSKEY:    "CDNSKEY",
    +	TypeCDS:        "CDS",
    +	TypeCERT:       "CERT",
    +	TypeCNAME:      "CNAME",
    +	TypeCSYNC:      "CSYNC",
    +	TypeDHCID:      "DHCID",
    +	TypeDLV:        "DLV",
    +	TypeDNAME:      "DNAME",
    +	TypeDNSKEY:     "DNSKEY",
    +	TypeDS:         "DS",
    +	TypeEID:        "EID",
    +	TypeEUI48:      "EUI48",
    +	TypeEUI64:      "EUI64",
    +	TypeGID:        "GID",
    +	TypeGPOS:       "GPOS",
    +	TypeHINFO:      "HINFO",
    +	TypeHIP:        "HIP",
    +	TypeISDN:       "ISDN",
    +	TypeIXFR:       "IXFR",
    +	TypeKEY:        "KEY",
    +	TypeKX:         "KX",
    +	TypeL32:        "L32",
    +	TypeL64:        "L64",
    +	TypeLOC:        "LOC",
    +	TypeLP:         "LP",
    +	TypeMAILA:      "MAILA",
    +	TypeMAILB:      "MAILB",
    +	TypeMB:         "MB",
    +	TypeMD:         "MD",
    +	TypeMF:         "MF",
    +	TypeMG:         "MG",
    +	TypeMINFO:      "MINFO",
    +	TypeMR:         "MR",
    +	TypeMX:         "MX",
    +	TypeNAPTR:      "NAPTR",
    +	TypeNID:        "NID",
    +	TypeNIMLOC:     "NIMLOC",
    +	TypeNINFO:      "NINFO",
    +	TypeNS:         "NS",
    +	TypeNSEC:       "NSEC",
    +	TypeNSEC3:      "NSEC3",
    +	TypeNSEC3PARAM: "NSEC3PARAM",
    +	TypeNULL:       "NULL",
    +	TypeNXT:        "NXT",
    +	TypeNone:       "None",
    +	TypeOPENPGPKEY: "OPENPGPKEY",
    +	TypeOPT:        "OPT",
    +	TypePTR:        "PTR",
    +	TypePX:         "PX",
    +	TypeRKEY:       "RKEY",
    +	TypeRP:         "RP",
    +	TypeRRSIG:      "RRSIG",
    +	TypeRT:         "RT",
    +	TypeReserved:   "Reserved",
    +	TypeSIG:        "SIG",
    +	TypeSMIMEA:     "SMIMEA",
    +	TypeSOA:        "SOA",
    +	TypeSPF:        "SPF",
    +	TypeSRV:        "SRV",
    +	TypeSSHFP:      "SSHFP",
    +	TypeTA:         "TA",
    +	TypeTALINK:     "TALINK",
    +	TypeTKEY:       "TKEY",
    +	TypeTLSA:       "TLSA",
    +	TypeTSIG:       "TSIG",
    +	TypeTXT:        "TXT",
    +	TypeUID:        "UID",
    +	TypeUINFO:      "UINFO",
    +	TypeUNSPEC:     "UNSPEC",
    +	TypeURI:        "URI",
    +	TypeX25:        "X25",
    +	TypeNSAPPTR:    "NSAP-PTR",
    +}
    +
    +func (rr *A) Header() *RR_Header          { return &rr.Hdr }
    +func (rr *AAAA) Header() *RR_Header       { return &rr.Hdr }
    +func (rr *AFSDB) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *ANY) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *AVC) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *CAA) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *CDNSKEY) Header() *RR_Header    { return &rr.Hdr }
    +func (rr *CDS) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *CERT) Header() *RR_Header       { return &rr.Hdr }
    +func (rr *CNAME) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *CSYNC) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *DHCID) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *DLV) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *DNAME) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *DNSKEY) Header() *RR_Header     { return &rr.Hdr }
    +func (rr *DS) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *EID) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *EUI48) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *EUI64) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *GID) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *GPOS) Header() *RR_Header       { return &rr.Hdr }
    +func (rr *HINFO) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *HIP) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *KEY) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *KX) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *L32) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *L64) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *LOC) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *LP) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *MB) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *MD) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *MF) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *MG) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *MINFO) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *MR) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *MX) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *NAPTR) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *NID) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *NIMLOC) Header() *RR_Header     { return &rr.Hdr }
    +func (rr *NINFO) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *NS) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *NSAPPTR) Header() *RR_Header    { return &rr.Hdr }
    +func (rr *NSEC) Header() *RR_Header       { return &rr.Hdr }
    +func (rr *NSEC3) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
    +func (rr *NULL) Header() *RR_Header       { return &rr.Hdr }
    +func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
    +func (rr *OPT) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *PTR) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *PX) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *RFC3597) Header() *RR_Header    { return &rr.Hdr }
    +func (rr *RKEY) Header() *RR_Header       { return &rr.Hdr }
    +func (rr *RP) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *RRSIG) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *RT) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *SIG) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *SMIMEA) Header() *RR_Header     { return &rr.Hdr }
    +func (rr *SOA) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *SPF) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *SRV) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *SSHFP) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *TA) Header() *RR_Header         { return &rr.Hdr }
    +func (rr *TALINK) Header() *RR_Header     { return &rr.Hdr }
    +func (rr *TKEY) Header() *RR_Header       { return &rr.Hdr }
    +func (rr *TLSA) Header() *RR_Header       { return &rr.Hdr }
    +func (rr *TSIG) Header() *RR_Header       { return &rr.Hdr }
    +func (rr *TXT) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *UID) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *UINFO) Header() *RR_Header      { return &rr.Hdr }
    +func (rr *URI) Header() *RR_Header        { return &rr.Hdr }
    +func (rr *X25) Header() *RR_Header        { return &rr.Hdr }
    +
    +// len() functions
    +func (rr *A) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	if len(rr.A) != 0 {
    +		l += net.IPv4len
    +	}
    +	return l
    +}
    +func (rr *AAAA) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	if len(rr.AAAA) != 0 {
    +		l += net.IPv6len
    +	}
    +	return l
    +}
    +func (rr *AFSDB) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Subtype
    +	l += domainNameLen(rr.Hostname, off+l, compression, false)
    +	return l
    +}
    +func (rr *ANY) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	return l
    +}
    +func (rr *AVC) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	for _, x := range rr.Txt {
    +		l += len(x) + 1
    +	}
    +	return l
    +}
    +func (rr *CAA) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l++ // Flag
    +	l += len(rr.Tag) + 1
    +	l += len(rr.Value)
    +	return l
    +}
    +func (rr *CERT) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Type
    +	l += 2 // KeyTag
    +	l++    // Algorithm
    +	l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
    +	return l
    +}
    +func (rr *CNAME) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Target, off+l, compression, true)
    +	return l
    +}
    +func (rr *DHCID) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += base64.StdEncoding.DecodedLen(len(rr.Digest))
    +	return l
    +}
    +func (rr *DNAME) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Target, off+l, compression, false)
    +	return l
    +}
    +func (rr *DNSKEY) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Flags
    +	l++    // Protocol
    +	l++    // Algorithm
    +	l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
    +	return l
    +}
    +func (rr *DS) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // KeyTag
    +	l++    // Algorithm
    +	l++    // DigestType
    +	l += len(rr.Digest) / 2
    +	return l
    +}
    +func (rr *EID) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += len(rr.Endpoint) / 2
    +	return l
    +}
    +func (rr *EUI48) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 6 // Address
    +	return l
    +}
    +func (rr *EUI64) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 8 // Address
    +	return l
    +}
    +func (rr *GID) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 4 // Gid
    +	return l
    +}
    +func (rr *GPOS) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += len(rr.Longitude) + 1
    +	l += len(rr.Latitude) + 1
    +	l += len(rr.Altitude) + 1
    +	return l
    +}
    +func (rr *HINFO) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += len(rr.Cpu) + 1
    +	l += len(rr.Os) + 1
    +	return l
    +}
    +func (rr *HIP) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l++    // HitLength
    +	l++    // PublicKeyAlgorithm
    +	l += 2 // PublicKeyLength
    +	l += len(rr.Hit) / 2
    +	l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
    +	for _, x := range rr.RendezvousServers {
    +		l += domainNameLen(x, off+l, compression, false)
    +	}
    +	return l
    +}
    +func (rr *KX) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Preference
    +	l += domainNameLen(rr.Exchanger, off+l, compression, false)
    +	return l
    +}
    +func (rr *L32) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Preference
    +	if len(rr.Locator32) != 0 {
    +		l += net.IPv4len
    +	}
    +	return l
    +}
    +func (rr *L64) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Preference
    +	l += 8 // Locator64
    +	return l
    +}
    +func (rr *LOC) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l++    // Version
    +	l++    // Size
    +	l++    // HorizPre
    +	l++    // VertPre
    +	l += 4 // Latitude
    +	l += 4 // Longitude
    +	l += 4 // Altitude
    +	return l
    +}
    +func (rr *LP) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Preference
    +	l += domainNameLen(rr.Fqdn, off+l, compression, false)
    +	return l
    +}
    +func (rr *MB) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Mb, off+l, compression, true)
    +	return l
    +}
    +func (rr *MD) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Md, off+l, compression, true)
    +	return l
    +}
    +func (rr *MF) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Mf, off+l, compression, true)
    +	return l
    +}
    +func (rr *MG) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Mg, off+l, compression, true)
    +	return l
    +}
    +func (rr *MINFO) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Rmail, off+l, compression, true)
    +	l += domainNameLen(rr.Email, off+l, compression, true)
    +	return l
    +}
    +func (rr *MR) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Mr, off+l, compression, true)
    +	return l
    +}
    +func (rr *MX) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Preference
    +	l += domainNameLen(rr.Mx, off+l, compression, true)
    +	return l
    +}
    +func (rr *NAPTR) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Order
    +	l += 2 // Preference
    +	l += len(rr.Flags) + 1
    +	l += len(rr.Service) + 1
    +	l += len(rr.Regexp) + 1
    +	l += domainNameLen(rr.Replacement, off+l, compression, false)
    +	return l
    +}
    +func (rr *NID) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Preference
    +	l += 8 // NodeID
    +	return l
    +}
    +func (rr *NIMLOC) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += len(rr.Locator) / 2
    +	return l
    +}
    +func (rr *NINFO) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	for _, x := range rr.ZSData {
    +		l += len(x) + 1
    +	}
    +	return l
    +}
    +func (rr *NS) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Ns, off+l, compression, true)
    +	return l
    +}
    +func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Ptr, off+l, compression, false)
    +	return l
    +}
    +func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l++    // Hash
    +	l++    // Flags
    +	l += 2 // Iterations
    +	l++    // SaltLength
    +	l += len(rr.Salt) / 2
    +	return l
    +}
    +func (rr *NULL) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += len(rr.Data)
    +	return l
    +}
    +func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
    +	return l
    +}
    +func (rr *PTR) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Ptr, off+l, compression, true)
    +	return l
    +}
    +func (rr *PX) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Preference
    +	l += domainNameLen(rr.Map822, off+l, compression, false)
    +	l += domainNameLen(rr.Mapx400, off+l, compression, false)
    +	return l
    +}
    +func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += len(rr.Rdata) / 2
    +	return l
    +}
    +func (rr *RKEY) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Flags
    +	l++    // Protocol
    +	l++    // Algorithm
    +	l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
    +	return l
    +}
    +func (rr *RP) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Mbox, off+l, compression, false)
    +	l += domainNameLen(rr.Txt, off+l, compression, false)
    +	return l
    +}
    +func (rr *RRSIG) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // TypeCovered
    +	l++    // Algorithm
    +	l++    // Labels
    +	l += 4 // OrigTtl
    +	l += 4 // Expiration
    +	l += 4 // Inception
    +	l += 2 // KeyTag
    +	l += domainNameLen(rr.SignerName, off+l, compression, false)
    +	l += base64.StdEncoding.DecodedLen(len(rr.Signature))
    +	return l
    +}
    +func (rr *RT) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Preference
    +	l += domainNameLen(rr.Host, off+l, compression, false)
    +	return l
    +}
    +func (rr *SMIMEA) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l++ // Usage
    +	l++ // Selector
    +	l++ // MatchingType
    +	l += len(rr.Certificate) / 2
    +	return l
    +}
    +func (rr *SOA) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Ns, off+l, compression, true)
    +	l += domainNameLen(rr.Mbox, off+l, compression, true)
    +	l += 4 // Serial
    +	l += 4 // Refresh
    +	l += 4 // Retry
    +	l += 4 // Expire
    +	l += 4 // Minttl
    +	return l
    +}
    +func (rr *SPF) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	for _, x := range rr.Txt {
    +		l += len(x) + 1
    +	}
    +	return l
    +}
    +func (rr *SRV) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Priority
    +	l += 2 // Weight
    +	l += 2 // Port
    +	l += domainNameLen(rr.Target, off+l, compression, false)
    +	return l
    +}
    +func (rr *SSHFP) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l++ // Algorithm
    +	l++ // Type
    +	l += len(rr.FingerPrint) / 2
    +	return l
    +}
    +func (rr *TA) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // KeyTag
    +	l++    // Algorithm
    +	l++    // DigestType
    +	l += len(rr.Digest) / 2
    +	return l
    +}
    +func (rr *TALINK) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.PreviousName, off+l, compression, false)
    +	l += domainNameLen(rr.NextName, off+l, compression, false)
    +	return l
    +}
    +func (rr *TKEY) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Algorithm, off+l, compression, false)
    +	l += 4 // Inception
    +	l += 4 // Expiration
    +	l += 2 // Mode
    +	l += 2 // Error
    +	l += 2 // KeySize
    +	l += len(rr.Key) / 2
    +	l += 2 // OtherLen
    +	l += len(rr.OtherData) / 2
    +	return l
    +}
    +func (rr *TLSA) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l++ // Usage
    +	l++ // Selector
    +	l++ // MatchingType
    +	l += len(rr.Certificate) / 2
    +	return l
    +}
    +func (rr *TSIG) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += domainNameLen(rr.Algorithm, off+l, compression, false)
    +	l += 6 // TimeSigned
    +	l += 2 // Fudge
    +	l += 2 // MACSize
    +	l += len(rr.MAC) / 2
    +	l += 2 // OrigId
    +	l += 2 // Error
    +	l += 2 // OtherLen
    +	l += len(rr.OtherData) / 2
    +	return l
    +}
    +func (rr *TXT) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	for _, x := range rr.Txt {
    +		l += len(x) + 1
    +	}
    +	return l
    +}
    +func (rr *UID) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 4 // Uid
    +	return l
    +}
    +func (rr *UINFO) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += len(rr.Uinfo) + 1
    +	return l
    +}
    +func (rr *URI) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += 2 // Priority
    +	l += 2 // Weight
    +	l += len(rr.Target)
    +	return l
    +}
    +func (rr *X25) len(off int, compression map[string]struct{}) int {
    +	l := rr.Hdr.len(off, compression)
    +	l += len(rr.PSDNAddress) + 1
    +	return l
    +}
    +
    +// copy() functions
    +func (rr *A) copy() RR {
    +	return &A{rr.Hdr, copyIP(rr.A)}
    +}
    +func (rr *AAAA) copy() RR {
    +	return &AAAA{rr.Hdr, copyIP(rr.AAAA)}
    +}
    +func (rr *AFSDB) copy() RR {
    +	return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname}
    +}
    +func (rr *ANY) copy() RR {
    +	return &ANY{rr.Hdr}
    +}
    +func (rr *AVC) copy() RR {
    +	Txt := make([]string, len(rr.Txt))
    +	copy(Txt, rr.Txt)
    +	return &AVC{rr.Hdr, Txt}
    +}
    +func (rr *CAA) copy() RR {
    +	return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value}
    +}
    +func (rr *CERT) copy() RR {
    +	return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
    +}
    +func (rr *CNAME) copy() RR {
    +	return &CNAME{rr.Hdr, rr.Target}
    +}
    +func (rr *CSYNC) copy() RR {
    +	TypeBitMap := make([]uint16, len(rr.TypeBitMap))
    +	copy(TypeBitMap, rr.TypeBitMap)
    +	return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap}
    +}
    +func (rr *DHCID) copy() RR {
    +	return &DHCID{rr.Hdr, rr.Digest}
    +}
    +func (rr *DNAME) copy() RR {
    +	return &DNAME{rr.Hdr, rr.Target}
    +}
    +func (rr *DNSKEY) copy() RR {
    +	return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
    +}
    +func (rr *DS) copy() RR {
    +	return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
    +}
    +func (rr *EID) copy() RR {
    +	return &EID{rr.Hdr, rr.Endpoint}
    +}
    +func (rr *EUI48) copy() RR {
    +	return &EUI48{rr.Hdr, rr.Address}
    +}
    +func (rr *EUI64) copy() RR {
    +	return &EUI64{rr.Hdr, rr.Address}
    +}
    +func (rr *GID) copy() RR {
    +	return &GID{rr.Hdr, rr.Gid}
    +}
    +func (rr *GPOS) copy() RR {
    +	return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude}
    +}
    +func (rr *HINFO) copy() RR {
    +	return &HINFO{rr.Hdr, rr.Cpu, rr.Os}
    +}
    +func (rr *HIP) copy() RR {
    +	RendezvousServers := make([]string, len(rr.RendezvousServers))
    +	copy(RendezvousServers, rr.RendezvousServers)
    +	return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
    +}
    +func (rr *KX) copy() RR {
    +	return &KX{rr.Hdr, rr.Preference, rr.Exchanger}
    +}
    +func (rr *L32) copy() RR {
    +	return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)}
    +}
    +func (rr *L64) copy() RR {
    +	return &L64{rr.Hdr, rr.Preference, rr.Locator64}
    +}
    +func (rr *LOC) copy() RR {
    +	return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
    +}
    +func (rr *LP) copy() RR {
    +	return &LP{rr.Hdr, rr.Preference, rr.Fqdn}
    +}
    +func (rr *MB) copy() RR {
    +	return &MB{rr.Hdr, rr.Mb}
    +}
    +func (rr *MD) copy() RR {
    +	return &MD{rr.Hdr, rr.Md}
    +}
    +func (rr *MF) copy() RR {
    +	return &MF{rr.Hdr, rr.Mf}
    +}
    +func (rr *MG) copy() RR {
    +	return &MG{rr.Hdr, rr.Mg}
    +}
    +func (rr *MINFO) copy() RR {
    +	return &MINFO{rr.Hdr, rr.Rmail, rr.Email}
    +}
    +func (rr *MR) copy() RR {
    +	return &MR{rr.Hdr, rr.Mr}
    +}
    +func (rr *MX) copy() RR {
    +	return &MX{rr.Hdr, rr.Preference, rr.Mx}
    +}
    +func (rr *NAPTR) copy() RR {
    +	return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
    +}
    +func (rr *NID) copy() RR {
    +	return &NID{rr.Hdr, rr.Preference, rr.NodeID}
    +}
    +func (rr *NIMLOC) copy() RR {
    +	return &NIMLOC{rr.Hdr, rr.Locator}
    +}
    +func (rr *NINFO) copy() RR {
    +	ZSData := make([]string, len(rr.ZSData))
    +	copy(ZSData, rr.ZSData)
    +	return &NINFO{rr.Hdr, ZSData}
    +}
    +func (rr *NS) copy() RR {
    +	return &NS{rr.Hdr, rr.Ns}
    +}
    +func (rr *NSAPPTR) copy() RR {
    +	return &NSAPPTR{rr.Hdr, rr.Ptr}
    +}
    +func (rr *NSEC) copy() RR {
    +	TypeBitMap := make([]uint16, len(rr.TypeBitMap))
    +	copy(TypeBitMap, rr.TypeBitMap)
    +	return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap}
    +}
    +func (rr *NSEC3) copy() RR {
    +	TypeBitMap := make([]uint16, len(rr.TypeBitMap))
    +	copy(TypeBitMap, rr.TypeBitMap)
    +	return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
    +}
    +func (rr *NSEC3PARAM) copy() RR {
    +	return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
    +}
    +func (rr *NULL) copy() RR {
    +	return &NULL{rr.Hdr, rr.Data}
    +}
    +func (rr *OPENPGPKEY) copy() RR {
    +	return &OPENPGPKEY{rr.Hdr, rr.PublicKey}
    +}
    +func (rr *OPT) copy() RR {
    +	Option := make([]EDNS0, len(rr.Option))
    +	for i, e := range rr.Option {
    +		Option[i] = e.copy()
    +	}
    +	return &OPT{rr.Hdr, Option}
    +}
    +func (rr *PTR) copy() RR {
    +	return &PTR{rr.Hdr, rr.Ptr}
    +}
    +func (rr *PX) copy() RR {
    +	return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400}
    +}
    +func (rr *RFC3597) copy() RR {
    +	return &RFC3597{rr.Hdr, rr.Rdata}
    +}
    +func (rr *RKEY) copy() RR {
    +	return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
    +}
    +func (rr *RP) copy() RR {
    +	return &RP{rr.Hdr, rr.Mbox, rr.Txt}
    +}
    +func (rr *RRSIG) copy() RR {
    +	return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
    +}
    +func (rr *RT) copy() RR {
    +	return &RT{rr.Hdr, rr.Preference, rr.Host}
    +}
    +func (rr *SMIMEA) copy() RR {
    +	return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
    +}
    +func (rr *SOA) copy() RR {
    +	return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
    +}
    +func (rr *SPF) copy() RR {
    +	Txt := make([]string, len(rr.Txt))
    +	copy(Txt, rr.Txt)
    +	return &SPF{rr.Hdr, Txt}
    +}
    +func (rr *SRV) copy() RR {
    +	return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target}
    +}
    +func (rr *SSHFP) copy() RR {
    +	return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint}
    +}
    +func (rr *TA) copy() RR {
    +	return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
    +}
    +func (rr *TALINK) copy() RR {
    +	return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName}
    +}
    +func (rr *TKEY) copy() RR {
    +	return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
    +}
    +func (rr *TLSA) copy() RR {
    +	return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
    +}
    +func (rr *TSIG) copy() RR {
    +	return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
    +}
    +func (rr *TXT) copy() RR {
    +	Txt := make([]string, len(rr.Txt))
    +	copy(Txt, rr.Txt)
    +	return &TXT{rr.Hdr, Txt}
    +}
    +func (rr *UID) copy() RR {
    +	return &UID{rr.Hdr, rr.Uid}
    +}
    +func (rr *UINFO) copy() RR {
    +	return &UINFO{rr.Hdr, rr.Uinfo}
    +}
    +func (rr *URI) copy() RR {
    +	return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target}
    +}
    +func (rr *X25) copy() RR {
    +	return &X25{rr.Hdr, rr.PSDNAddress}
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/LICENSE.md b/vendor/github.com/nicolai86/scaleway-sdk/LICENSE.md
    new file mode 100644
    index 000000000000..12e056f64e07
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/LICENSE.md
    @@ -0,0 +1,20 @@
    +The MIT License
    +===============
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in
    +all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    +THE SOFTWARE.
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/README.md b/vendor/github.com/nicolai86/scaleway-sdk/README.md
    new file mode 100644
    index 000000000000..b0c8dc1fe19a
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/README.md
    @@ -0,0 +1,9 @@
    +# Scaleway SDK
    +
    +A fork of the scaleway cli repository which only aims at being an API SDK - nothing more.
    +
    +## Tests
    +
    +```bash
    +$ go test ./...
    +```
    \ No newline at end of file
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/api.go b/vendor/github.com/nicolai86/scaleway-sdk/api.go
    new file mode 100644
    index 000000000000..cd449935cf83
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/api.go
    @@ -0,0 +1,302 @@
    +// Copyright (C) 2015 . All rights reserved.
    +// Use of this source code is governed by a MIT-style
    +// license that can be found in the LICENSE.md file.
    +
    +// Interact with  API
    +
    +// Package api contains client and functions to interact with  API
    +package api
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"errors"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"net/http"
    +	"net/url"
    +	"os"
    +	"strconv"
    +	"strings"
    +
    +	"golang.org/x/sync/errgroup"
    +)
    +
    +// https://cp-par1.scaleway.com/products/servers
    +// https://cp-ams1.scaleway.com/products/servers
    +// Default values
    +var (
    +	AccountAPI     = "https://account.scaleway.com/"
    +	MetadataAPI    = "http://169.254.42.42/"
    +	MarketplaceAPI = "https://api-marketplace.scaleway.com"
    +	ComputeAPIPar1 = "https://cp-par1.scaleway.com/"
    +	ComputeAPIAms1 = "https://cp-ams1.scaleway.com/"
    +
    +	URLPublicDNS  = ".pub.cloud.scaleway.com"
    +	URLPrivateDNS = ".priv.cloud.scaleway.com"
    +)
    +
    +func init() {
    +	if url := os.Getenv("SCW_ACCOUNT_API"); url != "" {
    +		AccountAPI = url
    +	}
    +	if url := os.Getenv("SCW_METADATA_API"); url != "" {
    +		MetadataAPI = url
    +	}
    +	if url := os.Getenv("SCW_MARKETPLACE_API"); url != "" {
    +		MarketplaceAPI = url
    +	}
    +}
    +
    +const (
    +	perPage = 50
    +)
    +
    +// HTTPClient wraps the net/http Client Do method
    +type HTTPClient interface {
    +	Do(*http.Request) (*http.Response, error)
    +}
    +
    +// API is the interface used to communicate with the  API
    +type API struct {
    +	Organization string     // Organization is the identifier of the  organization
    +	Token        string     // Token is the authentication token for the  organization
    +	Client       HTTPClient // Client is used for all HTTP interactions
    +
    +	password   string // Password is the authentication password
    +	userAgent  string
    +	computeAPI string
    +
    +	Region string
    +}
    +
    +// APIError represents a  API Error
    +type APIError struct {
    +	// Message is a human-friendly error message
    +	APIMessage string `json:"message,omitempty"`
    +
    +	// Type is a string code that defines the kind of error
    +	Type string `json:"type,omitempty"`
    +
    +	// Fields contains detail about validation error
    +	Fields map[string][]string `json:"fields,omitempty"`
    +
    +	// StatusCode is the HTTP status code received
    +	StatusCode int `json:"-"`
    +
    +	// Message
    +	Message string `json:"-"`
    +}
    +
    +// Error returns a string representing the error
    +func (e APIError) Error() string {
    +	var b bytes.Buffer
    +
    +	fmt.Fprintf(&b, "StatusCode: %v, ", e.StatusCode)
    +	fmt.Fprintf(&b, "Type: %v, ", e.Type)
    +	fmt.Fprintf(&b, "APIMessage: \x1b[31m%v\x1b[0m", e.APIMessage)
    +	if len(e.Fields) > 0 {
    +		fmt.Fprintf(&b, ", Details: %v", e.Fields)
    +	}
    +	return b.String()
    +}
    +
    +// New creates a ready-to-use  SDK client
    +func New(organization, token, region string, options ...func(*API)) (*API, error) {
    +	s := &API{
    +		// exposed
    +		Organization: organization,
    +		Token:        token,
    +		Client:       &http.Client{},
    +
    +		// internal
    +		password:  "",
    +		userAgent: "scaleway-sdk",
    +	}
    +	for _, option := range options {
    +		option(s)
    +	}
    +	switch region {
    +	case "par1", "":
    +		s.computeAPI = ComputeAPIPar1
    +	case "ams1":
    +		s.computeAPI = ComputeAPIAms1
    +	default:
    +		return nil, fmt.Errorf("%s isn't a valid region", region)
    +	}
    +	s.Region = region
    +	if url := os.Getenv("SCW_COMPUTE_API"); url != "" {
    +		s.computeAPI = url
    +	}
    +	return s, nil
    +}
    +
    +func (s *API) response(method, uri string, content io.Reader) (resp *http.Response, err error) {
    +	var (
    +		req *http.Request
    +	)
    +
    +	req, err = http.NewRequest(method, uri, content)
    +	if err != nil {
    +		err = fmt.Errorf("response %s %s", method, uri)
    +		return
    +	}
    +	req.Header.Set("X-Auth-Token", s.Token)
    +	req.Header.Set("Content-Type", "application/json")
    +	req.Header.Set("User-Agent", s.userAgent)
    +	resp, err = s.Client.Do(req)
    +	return
    +}
    +
    +// GetResponsePaginate fetchs all resources and returns an http.Response object for the requested resource
    +func (s *API) GetResponsePaginate(apiURL, resource string, values url.Values) (*http.Response, error) {
    +	resp, err := s.response("HEAD", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	count := resp.Header.Get("X-Total-Count")
    +	var maxElem int
    +	if count == "" {
    +		maxElem = 0
    +	} else {
    +		maxElem, err = strconv.Atoi(count)
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +
    +	get := maxElem / perPage
    +	if (float32(maxElem) / perPage) > float32(get) {
    +		get++
    +	}
    +
    +	if get <= 1 { // If there is 0 or 1 page of result, the response is not paginated
    +		if len(values) == 0 {
    +			return s.response("GET", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), nil)
    +		}
    +		return s.response("GET", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil)
    +	}
    +
    +	fetchAll := !(values.Get("per_page") != "" || values.Get("page") != "")
    +	if fetchAll {
    +		var g errgroup.Group
    +
    +		ch := make(chan *http.Response, get)
    +		for i := 1; i <= get; i++ {
    +			i := i // closure tricks
    +			g.Go(func() (err error) {
    +				var resp *http.Response
    +
    +				val := url.Values{}
    +				val.Set("per_page", fmt.Sprintf("%v", perPage))
    +				val.Set("page", fmt.Sprintf("%v", i))
    +				resp, err = s.response("GET", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, val.Encode()), nil)
    +				ch <- resp
    +				return
    +			})
    +		}
    +		if err = g.Wait(); err != nil {
    +			return nil, err
    +		}
    +		newBody := make(map[string][]json.RawMessage)
    +		body := make(map[string][]json.RawMessage)
    +		key := ""
    +		for i := 0; i < get; i++ {
    +			res := <-ch
    +			if res.StatusCode != http.StatusOK {
    +				return res, nil
    +			}
    +			content, err := ioutil.ReadAll(res.Body)
    +			res.Body.Close()
    +			if err != nil {
    +				return nil, err
    +			}
    +			if err := json.Unmarshal(content, &body); err != nil {
    +				return nil, err
    +			}
    +
    +			if i == 0 {
    +				resp = res
    +				for k := range body {
    +					key = k
    +					break
    +				}
    +			}
    +			newBody[key] = append(newBody[key], body[key]...)
    +		}
    +		payload := new(bytes.Buffer)
    +		if err := json.NewEncoder(payload).Encode(newBody); err != nil {
    +			return nil, err
    +		}
    +		resp.Body = ioutil.NopCloser(payload)
    +	} else {
    +		resp, err = s.response("GET", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil)
    +	}
    +	return resp, err
    +}
    +
    +// PostResponse returns an http.Response object for the updated resource
    +func (s *API) PostResponse(apiURL, resource string, data interface{}) (*http.Response, error) {
    +	payload := new(bytes.Buffer)
    +	if err := json.NewEncoder(payload).Encode(data); err != nil {
    +		return nil, err
    +	}
    +	return s.response("POST", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), payload)
    +}
    +
    +// PatchResponse returns an http.Response object for the updated resource
    +func (s *API) PatchResponse(apiURL, resource string, data interface{}) (*http.Response, error) {
    +	payload := new(bytes.Buffer)
    +	if err := json.NewEncoder(payload).Encode(data); err != nil {
    +		return nil, err
    +	}
    +	return s.response("PATCH", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), payload)
    +}
    +
    +// PutResponse returns an http.Response object for the updated resource
    +func (s *API) PutResponse(apiURL, resource string, data interface{}) (*http.Response, error) {
    +	payload := new(bytes.Buffer)
    +	if err := json.NewEncoder(payload).Encode(data); err != nil {
    +		return nil, err
    +	}
    +	return s.response("PUT", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), payload)
    +}
    +
    +// DeleteResponse returns an http.Response object for the deleted resource
    +func (s *API) DeleteResponse(apiURL, resource string) (*http.Response, error) {
    +	return s.response("DELETE", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), nil)
    +}
    +
    +// handleHTTPError checks the statusCode and displays the error
    +func (s *API) handleHTTPError(goodStatusCode []int, resp *http.Response) ([]byte, error) {
    +	body, err := ioutil.ReadAll(resp.Body)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	if resp.StatusCode >= http.StatusInternalServerError {
    +		return nil, errors.New(string(body))
    +	}
    +
    +	for _, code := range goodStatusCode {
    +		if code == resp.StatusCode {
    +			return body, nil
    +		}
    +	}
    +
    +	var scwError APIError
    +	scwError.StatusCode = resp.StatusCode
    +	if len(body) > 0 {
    +		if err := json.Unmarshal(body, &scwError); err != nil {
    +			return nil, err
    +		}
    +	}
    +	return nil, scwError
    +}
    +
    +// SetPassword register the password
    +func (s *API) SetPassword(password string) {
    +	s.password = password
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/availability.go b/vendor/github.com/nicolai86/scaleway-sdk/availability.go
    new file mode 100644
    index 000000000000..58977127d0dc
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/availability.go
    @@ -0,0 +1,50 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"io/ioutil"
    +)
    +
    +type InstanceTypeAvailability string
    +
    +var (
    +	InstanceTypeAvailable InstanceTypeAvailability = "available"
    +	InstanceTypeScarce    InstanceTypeAvailability = "scarce"
    +	InstanceTypeShortage  InstanceTypeAvailability = "shortage"
    +)
    +
    +type ServerAvailability struct {
    +	Availability InstanceTypeAvailability `json:"availability"`
    +}
    +
    +type ServerAvailabilities map[string]ServerAvailability
    +
    +func (a ServerAvailabilities) CommercialTypes() []string {
    +	types := []string{}
    +	for k, _ := range a {
    +		types = append(types, k)
    +	}
    +	return types
    +}
    +
    +type availabilityResponse struct {
    +	Servers ServerAvailabilities
    +}
    +
    +func (s *API) GetServerAvailabilities() (ServerAvailabilities, error) {
    +	resp, err := s.response("GET", fmt.Sprintf("%s/products/servers/availability", s.computeAPI), nil)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +	bs, err := ioutil.ReadAll(resp.Body)
    +	if err != nil {
    +		return nil, err
    +	}
    +	content := availabilityResponse{}
    +	if err := json.Unmarshal(bs, &content); err != nil {
    +		return nil, err
    +	}
    +	return content.Servers, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/bootscript.go b/vendor/github.com/nicolai86/scaleway-sdk/bootscript.go
    new file mode 100644
    index 000000000000..b10ced6e3ca9
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/bootscript.go
    @@ -0,0 +1,83 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// Bootscript represents a  Bootscript
    +type Bootscript struct {
    +	Bootcmdargs string `json:"bootcmdargs,omitempty"`
    +	Dtb         string `json:"dtb,omitempty"`
    +	Initrd      string `json:"initrd,omitempty"`
    +	Kernel      string `json:"kernel,omitempty"`
    +
    +	// Arch is the architecture target of the bootscript
    +	Arch string `json:"architecture,omitempty"`
    +
    +	// Identifier is a unique identifier for the bootscript
    +	Identifier string `json:"id,omitempty"`
    +
    +	// Organization is the owner of the bootscript
    +	Organization string `json:"organization,omitempty"`
    +
    +	// Name is a user-defined name for the bootscript
    +	Title string `json:"title,omitempty"`
    +
    +	// Public is true for public bootscripts and false for user bootscripts
    +	Public bool `json:"public,omitempty"`
    +
    +	Default bool `json:"default,omitempty"`
    +}
    +
    +type getBootscriptResponse struct {
    +	Bootscript Bootscript `json:"bootscript,omitempty"`
    +}
    +
    +type getBootscriptsResponse struct {
    +	Bootscripts []Bootscript `json:"bootscripts,omitempty"`
    +}
    +
    +// GetBootscripts gets the list of bootscripts from the API
    +func (s *API) GetBootscripts() ([]Bootscript, error) {
    +	query := url.Values{}
    +
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "bootscripts", query)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var bootscripts getBootscriptsResponse
    +
    +	if err = json.Unmarshal(body, &bootscripts); err != nil {
    +		return nil, err
    +	}
    +	return bootscripts.Bootscripts, nil
    +}
    +
    +// GetBootscript gets a bootscript from the API
    +func (s *API) GetBootscript(bootscriptID string) (*Bootscript, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "bootscripts/"+bootscriptID, url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var oneBootscript getBootscriptResponse
    +
    +	if err = json.Unmarshal(body, &oneBootscript); err != nil {
    +		return nil, err
    +	}
    +	// FIXME region, arch, owner, title
    +	return &oneBootscript.Bootscript, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/container.go b/vendor/github.com/nicolai86/scaleway-sdk/container.go
    new file mode 100644
    index 000000000000..e7a5abc49e9b
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/container.go
    @@ -0,0 +1,70 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// ContainerData represents a  container data (S3)
    +type ContainerData struct {
    +	LastModified string `json:"last_modified"`
    +	Name         string `json:"name"`
    +	Size         string `json:"size"`
    +}
    +
    +type getContainerDatas struct {
    +	Container []*ContainerData `json:"container"`
    +}
    +
    +// Container represents a  container (S3)
    +type Container struct {
    +	Organization `json:"organization"`
    +	Name         string `json:"name"`
    +	Size         string `json:"size"`
    +}
    +
    +type getContainers struct {
    +	Containers []*Container `json:"containers"`
    +}
    +
    +// GetContainers returns a GetContainers
    +func (s *API) GetContainers() ([]*Container, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "containers", url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var containers getContainers
    +
    +	if err = json.Unmarshal(body, &containers); err != nil {
    +		return nil, err
    +	}
    +	return containers.Containers, nil
    +}
    +
    +// GetContainerDatas returns a GetContainerDatas
    +func (s *API) GetContainerDatas(container string) ([]*ContainerData, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("containers/%s", container), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var datas getContainerDatas
    +
    +	if err = json.Unmarshal(body, &datas); err != nil {
    +		return nil, err
    +	}
    +	return datas.Container, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/dashboard.go b/vendor/github.com/nicolai86/scaleway-sdk/dashboard.go
    new file mode 100644
    index 000000000000..a66cea6faa1c
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/dashboard.go
    @@ -0,0 +1,42 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// DashboardResp represents a dashboard received from the API
    +type DashboardResp struct {
    +	Dashboard Dashboard
    +}
    +
    +// Dashboard represents a dashboard
    +type Dashboard struct {
    +	VolumesCount        int `json:"volumes_count"`
    +	RunningServersCount int `json:"running_servers_count"`
    +	ImagesCount         int `json:"images_count"`
    +	SnapshotsCount      int `json:"snapshots_count"`
    +	ServersCount        int `json:"servers_count"`
    +	IPsCount            int `json:"ips_count"`
    +}
    +
    +// GetDashboard returns the dashboard
    +func (s *API) GetDashboard() (*Dashboard, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "dashboard", url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var dashboard DashboardResp
    +
    +	if err = json.Unmarshal(body, &dashboard); err != nil {
    +		return nil, err
    +	}
    +	return &dashboard.Dashboard, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/image.go b/vendor/github.com/nicolai86/scaleway-sdk/image.go
    new file mode 100644
    index 000000000000..e8e644e32182
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/image.go
    @@ -0,0 +1,469 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// ImageDefinition represents a  image definition
    +type ImageDefinition struct {
    +	SnapshotIDentifier string  `json:"root_volume"`
    +	Name               string  `json:"name,omitempty"`
    +	Organization       string  `json:"organization"`
    +	Arch               string  `json:"arch"`
    +	DefaultBootscript  *string `json:"default_bootscript,omitempty"`
    +}
    +
    +// Image represents a  Image
    +type Image struct {
    +	// Identifier is a unique identifier for the image
    +	Identifier string `json:"id,omitempty"`
    +
    +	// Name is a user-defined name for the image
    +	Name string `json:"name,omitempty"`
    +
    +	// CreationDate is the creation date of the image
    +	CreationDate string `json:"creation_date,omitempty"`
    +
    +	// ModificationDate is the date of the last modification of the image
    +	ModificationDate string `json:"modification_date,omitempty"`
    +
    +	// RootVolume is the root volume bound to the image
    +	RootVolume Volume `json:"root_volume,omitempty"`
    +
    +	// Public is true for public images and false for user images
    +	Public bool `json:"public,omitempty"`
    +
    +	// Bootscript is the bootscript bound to the image
    +	DefaultBootscript *Bootscript `json:"default_bootscript,omitempty"`
    +
    +	// Organization is the owner of the image
    +	Organization string `json:"organization,omitempty"`
    +
    +	// Arch is the architecture target of the image
    +	Arch string `json:"arch,omitempty"`
    +
    +	// FIXME: extra_volumes
    +}
    +
    +// ImageIdentifier represents a  Image Identifier
    +type ImageIdentifier struct {
    +	Identifier string
    +	Arch       string
    +	Region     string
    +	Owner      string
    +}
    +
    +// OneImage represents the response of a GET /images/UUID API call
    +type OneImage struct {
    +	Image Image `json:"image,omitempty"`
    +}
    +
    +// Images represents a group of  images
    +type Images struct {
    +	// Images holds  images of the response
    +	Images []Image `json:"images,omitempty"`
    +}
    +
    +// MarketImages represents MarketPlace images
    +type MarketImages struct {
    +	Images []MarketImage `json:"images"`
    +}
    +
    +// MarketLocalImageDefinition represents localImage of marketplace version
    +type MarketLocalImageDefinition struct {
    +	Arch string `json:"arch"`
    +	ID   string `json:"id"`
    +	Zone string `json:"zone"`
    +}
    +
    +// MarketLocalImages represents an array of local images
    +type MarketLocalImages struct {
    +	LocalImages []MarketLocalImageDefinition `json:"local_images"`
    +}
    +
    +// MarketLocalImage represents local image
    +type MarketLocalImage struct {
    +	LocalImages MarketLocalImageDefinition `json:"local_image"`
    +}
    +
    +// MarketVersionDefinition represents version of marketplace image
    +type MarketVersionDefinition struct {
    +	CreationDate string `json:"creation_date"`
    +	ID           string `json:"id"`
    +	Image        struct {
    +		ID   string `json:"id"`
    +		Name string `json:"name"`
    +	} `json:"image"`
    +	ModificationDate string `json:"modification_date"`
    +	Name             string `json:"name"`
    +	MarketLocalImages
    +}
    +
    +// MarketVersions represents an array of marketplace image versions
    +type MarketVersions struct {
    +	Versions []MarketVersionDefinition `json:"versions"`
    +}
    +
    +// MarketVersion represents version of marketplace image
    +type MarketVersion struct {
    +	Version MarketVersionDefinition `json:"version"`
    +}
    +
    +// MarketImage represents MarketPlace image
    +type MarketImage struct {
    +	Categories           []string `json:"categories"`
    +	CreationDate         string   `json:"creation_date"`
    +	CurrentPublicVersion string   `json:"current_public_version"`
    +	Description          string   `json:"description"`
    +	ID                   string   `json:"id"`
    +	Logo                 string   `json:"logo"`
    +	ModificationDate     string   `json:"modification_date"`
    +	Name                 string   `json:"name"`
    +	Organization         struct {
    +		ID   string `json:"id"`
    +		Name string `json:"name"`
    +	} `json:"organization"`
    +	Public bool `json:"-"`
    +	MarketVersions
    +}
    +
    +// CreateImage creates a new image
    +func (s *API) CreateImage(volumeID string, name string, bootscript string, arch string) (*Image, error) {
    +	definition := ImageDefinition{
    +		SnapshotIDentifier: volumeID,
    +		Name:               name,
    +		Organization:       s.Organization,
    +		Arch:               arch,
    +	}
    +	if bootscript != "" {
    +		definition.DefaultBootscript = &bootscript
    +	}
    +
    +	resp, err := s.PostResponse(s.computeAPI, "images", definition)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusCreated}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var image OneImage
    +
    +	if err = json.Unmarshal(body, &image); err != nil {
    +		return nil, err
    +	}
    +	return &image.Image, nil
    +}
    +
    +// GetImages gets the list of images from the API
    +func (s *API) GetImages() (*[]MarketImage, error) {
    +	images, err := s.GetMarketPlaceImages("")
    +	if err != nil {
    +		return nil, err
    +	}
    +	for i, image := range images.Images {
    +		if image.CurrentPublicVersion != "" {
    +			for _, version := range image.Versions {
    +				if version.ID == image.CurrentPublicVersion {
    +					images.Images[i].Public = true
    +				}
    +			}
    +		}
    +	}
    +	values := url.Values{}
    +	values.Set("organization", s.Organization)
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "images", values)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var OrgaImages Images
    +
    +	if err = json.Unmarshal(body, &OrgaImages); err != nil {
    +		return nil, err
    +	}
    +
    +	for _, orgaImage := range OrgaImages.Images {
    +		images.Images = append(images.Images, MarketImage{
    +			Categories:           []string{"MyImages"},
    +			CreationDate:         orgaImage.CreationDate,
    +			CurrentPublicVersion: orgaImage.Identifier,
    +			ModificationDate:     orgaImage.ModificationDate,
    +			Name:                 orgaImage.Name,
    +			Public:               false,
    +			MarketVersions: MarketVersions{
    +				Versions: []MarketVersionDefinition{
    +					{
    +						CreationDate:     orgaImage.CreationDate,
    +						ID:               orgaImage.Identifier,
    +						ModificationDate: orgaImage.ModificationDate,
    +						MarketLocalImages: MarketLocalImages{
    +							LocalImages: []MarketLocalImageDefinition{
    +								{
    +									Arch: orgaImage.Arch,
    +									ID:   orgaImage.Identifier,
    +									// TODO: fecth images from ams1 and par1
    +									Zone: s.Region,
    +								},
    +							},
    +						},
    +					},
    +				},
    +			},
    +		})
    +	}
    +	return &images.Images, nil
    +}
    +
    +// GetImage gets an image from the API
    +func (s *API) GetImage(imageID string) (*Image, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "images/"+imageID, url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var oneImage OneImage
    +
    +	if err = json.Unmarshal(body, &oneImage); err != nil {
    +		return nil, err
    +	}
    +	// FIXME owner, title
    +	return &oneImage.Image, nil
    +}
    +
    +// DeleteImage deletes a image
    +func (s *API) DeleteImage(imageID string) error {
    +	resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("images/%s", imageID))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	if _, err := s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +// GetMarketPlaceImages returns images from marketplace
    +func (s *API) GetMarketPlaceImages(uuidImage string) (*MarketImages, error) {
    +	resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%s", uuidImage), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var ret MarketImages
    +
    +	if uuidImage != "" {
    +		ret.Images = make([]MarketImage, 1)
    +
    +		var img MarketImage
    +
    +		if err = json.Unmarshal(body, &img); err != nil {
    +			return nil, err
    +		}
    +		ret.Images[0] = img
    +	} else {
    +		if err = json.Unmarshal(body, &ret); err != nil {
    +			return nil, err
    +		}
    +	}
    +	return &ret, nil
    +}
    +
    +// GetMarketPlaceImageVersions returns image version
    +func (s *API) GetMarketPlaceImageVersions(uuidImage, uuidVersion string) (*MarketVersions, error) {
    +	resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s", uuidImage, uuidVersion), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var ret MarketVersions
    +
    +	if uuidImage != "" {
    +		var version MarketVersion
    +		ret.Versions = make([]MarketVersionDefinition, 1)
    +
    +		if err = json.Unmarshal(body, &version); err != nil {
    +			return nil, err
    +		}
    +		ret.Versions[0] = version.Version
    +	} else {
    +		if err = json.Unmarshal(body, &ret); err != nil {
    +			return nil, err
    +		}
    +	}
    +	return &ret, nil
    +}
    +
    +// GetMarketPlaceImageCurrentVersion return the image current version
    +func (s *API) GetMarketPlaceImageCurrentVersion(uuidImage string) (*MarketVersion, error) {
    +	resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/current", uuidImage), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var ret MarketVersion
    +
    +	if err = json.Unmarshal(body, &ret); err != nil {
    +		return nil, err
    +	}
    +	return &ret, nil
    +}
    +
    +// GetMarketPlaceLocalImages returns images from local region
    +func (s *API) GetMarketPlaceLocalImages(uuidImage, uuidVersion, uuidLocalImage string) (*MarketLocalImages, error) {
    +	resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%s", uuidImage, uuidVersion, uuidLocalImage), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var ret MarketLocalImages
    +	if uuidLocalImage != "" {
    +		var localImage MarketLocalImage
    +		ret.LocalImages = make([]MarketLocalImageDefinition, 1)
    +
    +		if err = json.Unmarshal(body, &localImage); err != nil {
    +			return nil, err
    +		}
    +		ret.LocalImages[0] = localImage.LocalImages
    +	} else {
    +		if err = json.Unmarshal(body, &ret); err != nil {
    +			return nil, err
    +		}
    +	}
    +	return &ret, nil
    +}
    +
    +// CreateMarketPlaceImage adds new image
    +func (s *API) CreateMarketPlaceImage(images MarketImage) error {
    +	resp, err := s.PostResponse(MarketplaceAPI, "images/", images)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusAccepted}, resp)
    +	return err
    +}
    +
    +// CreateMarketPlaceImageVersion adds new image version
    +func (s *API) CreateMarketPlaceImageVersion(uuidImage string, version MarketVersion) error {
    +	resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions", uuidImage), version)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusAccepted}, resp)
    +	return err
    +}
    +
    +// CreateMarketPlaceLocalImage adds new local image
    +func (s *API) CreateMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error {
    +	resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusAccepted}, resp)
    +	return err
    +}
    +
    +// UpdateMarketPlaceImage updates image
    +func (s *API) UpdateMarketPlaceImage(uudiImage string, images MarketImage) error {
    +	resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudiImage), images)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusOK}, resp)
    +	return err
    +}
    +
    +// UpdateMarketPlaceImageVersion updates image version
    +func (s *API) UpdateMarketPlaceImageVersion(uuidImage, uuidVersion string, version MarketVersion) error {
    +	resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion), version)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusOK}, resp)
    +	return err
    +}
    +
    +// UpdateMarketPlaceLocalImage updates local image
    +func (s *API) UpdateMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error {
    +	resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusOK}, resp)
    +	return err
    +}
    +
    +// DeleteMarketPlaceImage deletes image
    +func (s *API) DeleteMarketPlaceImage(uudImage string) error {
    +	resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudImage))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusNoContent}, resp)
    +	return err
    +}
    +
    +// DeleteMarketPlaceImageVersion delete image version
    +func (s *API) DeleteMarketPlaceImageVersion(uuidImage, uuidVersion string) error {
    +	resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusNoContent}, resp)
    +	return err
    +}
    +
    +// DeleteMarketPlaceLocalImage deletes local image
    +func (s *API) DeleteMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string) error {
    +	resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusNoContent}, resp)
    +	return err
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/ip.go b/vendor/github.com/nicolai86/scaleway-sdk/ip.go
    new file mode 100644
    index 000000000000..04d1dd0205a1
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/ip.go
    @@ -0,0 +1,209 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// IPV6 represents a ipv6
    +type IPV6 struct {
    +	Netmask string `json:"netmask"`
    +	Gateway string `json:"gateway"`
    +	Address string `json:"address"`
    +}
    +
    +// IPV4 represents the IPs fields
    +type IPV4 struct {
    +	Organization string  `json:"organization"`
    +	Reverse      *string `json:"reverse"`
    +	ID           string  `json:"id"`
    +	Server       *struct {
    +		Identifier string `json:"id,omitempty"`
    +		Name       string `json:"name,omitempty"`
    +	} `json:"server"`
    +	Address string `json:"address"`
    +}
    +
    +// IPAddress represents a  IP address
    +type IPAddress struct {
    +	// Identifier is a unique identifier for the IP address
    +	Identifier string `json:"id,omitempty"`
    +
    +	// IP is an IPv4 address
    +	IP string `json:"address,omitempty"`
    +
    +	// Dynamic is a flag that defines an IP that change on each reboot
    +	Dynamic *bool `json:"dynamic,omitempty"`
    +}
    +
    +// GetIPS represents the response of a GET /ips/
    +type GetIPS struct {
    +	IPS []IPV4 `json:"ips"`
    +}
    +
    +// GetIP represents the response of a GET /ips/{id_ip}
    +type GetIP struct {
    +	IP IPV4 `json:"ip"`
    +}
    +
    +// GetIP returns a GetIP
    +func (s *API) GetIP(ipID string) (*IPV4, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("ips/%s", ipID), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var ip GetIP
    +
    +	if err = json.Unmarshal(body, &ip); err != nil {
    +		return nil, err
    +	}
    +	return &ip.IP, nil
    +}
    +
    +type UpdateIPRequest struct {
    +	ID      string
    +	Reverse string
    +}
    +
    +func (s *API) UpdateIP(req UpdateIPRequest) (*IPV4, error) {
    +	var update struct {
    +		Address      string  `json:"address"`
    +		ID           string  `json:"id"`
    +		Reverse      *string `json:"reverse"`
    +		Organization string  `json:"organization"`
    +		Server       *string `json:"server"`
    +	}
    +
    +	ip, err := s.GetIP(req.ID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	update.Address = ip.Address
    +	update.ID = ip.ID
    +	update.Organization = ip.Organization
    +	update.Server = nil
    +	if ip.Server != nil {
    +		update.Server = &ip.Server.Identifier
    +	}
    +	update.Reverse = nil
    +	if req.Reverse != "" {
    +		update.Reverse = &req.Reverse
    +	}
    +	resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", req.ID), update)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	var data GetIP
    +
    +	if err = json.Unmarshal(body, &data); err != nil {
    +		return nil, err
    +	}
    +	return &data.IP, nil
    +}
    +
    +// GetIPS returns a GetIPS
    +func (s *API) GetIPS() ([]IPV4, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "ips", url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var ips GetIPS
    +
    +	if err = json.Unmarshal(body, &ips); err != nil {
    +		return nil, err
    +	}
    +	return ips.IPS, nil
    +}
    +
    +// CreateIP returns a new IP
    +func (s *API) CreateIP() (*IPV4, error) {
    +	var orga struct {
    +		Organization string `json:"organization"`
    +	}
    +	orga.Organization = s.Organization
    +	resp, err := s.PostResponse(s.computeAPI, "ips", orga)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusCreated}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var ip GetIP
    +
    +	if err = json.Unmarshal(body, &ip); err != nil {
    +		return nil, err
    +	}
    +	return &ip.IP, nil
    +}
    +
    +// AttachIP attachs an IP to a server
    +func (s *API) AttachIP(ipID, serverID string) error {
    +	var update struct {
    +		Address      string  `json:"address"`
    +		ID           string  `json:"id"`
    +		Reverse      *string `json:"reverse"`
    +		Organization string  `json:"organization"`
    +		Server       string  `json:"server"`
    +	}
    +
    +	ip, err := s.GetIP(ipID)
    +	if err != nil {
    +		return err
    +	}
    +	update.Address = ip.Address
    +	update.ID = ip.ID
    +	update.Organization = ip.Organization
    +	update.Server = serverID
    +	resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID), update)
    +	if err != nil {
    +		return err
    +	}
    +	_, err = s.handleHTTPError([]int{http.StatusOK}, resp)
    +	return err
    +}
    +
    +// DetachIP detaches an IP from a server
    +func (s *API) DetachIP(ipID string) error {
    +	ip, err := s.GetIP(ipID)
    +	if err != nil {
    +		return err
    +	}
    +	ip.Server = nil
    +	resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID), ip)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusOK}, resp)
    +	return err
    +}
    +
    +// DeleteIP deletes an IP
    +func (s *API) DeleteIP(ipID string) error {
    +	resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +	_, err = s.handleHTTPError([]int{http.StatusNoContent}, resp)
    +	return err
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/organization.go b/vendor/github.com/nicolai86/scaleway-sdk/organization.go
    new file mode 100644
    index 000000000000..7110edb590e6
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/organization.go
    @@ -0,0 +1,39 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// Organization represents a  Organization
    +type Organization struct {
    +	ID    string `json:"id"`
    +	Name  string `json:"name"`
    +	Users []User `json:"users"`
    +}
    +
    +// organizationsDefinition represents a  Organizations
    +type organizationsDefinition struct {
    +	Organizations []Organization `json:"organizations"`
    +}
    +
    +// GetOrganization returns Organization
    +func (s *API) GetOrganization() ([]Organization, error) {
    +	resp, err := s.GetResponsePaginate(AccountAPI, "organizations", url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var data organizationsDefinition
    +
    +	if err = json.Unmarshal(body, &data); err != nil {
    +		return nil, err
    +	}
    +	return data.Organizations, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/permissions.go b/vendor/github.com/nicolai86/scaleway-sdk/permissions.go
    new file mode 100644
    index 000000000000..5e493ef69eee
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/permissions.go
    @@ -0,0 +1,39 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// Permissions represents the response of GET /permissions
    +type Permissions map[string]PermCategory
    +
    +// PermCategory represents Permissions's fields
    +type PermCategory map[string][]string
    +
    +// permissions represents the permissions
    +type permissionsResponse struct {
    +	Permissions Permissions `json:"permissions"`
    +}
    +
    +// GetPermissions returns the permissions
    +func (s *API) GetPermissions() (Permissions, error) {
    +	resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s/permissions", s.Token), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var permissions permissionsResponse
    +
    +	if err = json.Unmarshal(body, &permissions); err != nil {
    +		return nil, err
    +	}
    +	return permissions.Permissions, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/quota.go b/vendor/github.com/nicolai86/scaleway-sdk/quota.go
    new file mode 100644
    index 000000000000..0ee7cff0066b
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/quota.go
    @@ -0,0 +1,36 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// Quota represents a map of quota (name, value)
    +type Quotas map[string]int
    +
    +// GetQuotas represents the response of GET /organizations/{orga_id}/quotas
    +type GetQuotas struct {
    +	Quotas Quotas `json:"quotas"`
    +}
    +
    +// GetQuotas returns a GetQuotas
    +func (s *API) GetQuotas() (Quotas, error) {
    +	resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("organizations/%s/quotas", s.Organization), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var quotas GetQuotas
    +
    +	if err = json.Unmarshal(body, "as); err != nil {
    +		return nil, err
    +	}
    +	return quotas.Quotas, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/security_group.go b/vendor/github.com/nicolai86/scaleway-sdk/security_group.go
    new file mode 100644
    index 000000000000..d0bdb6dc07e4
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/security_group.go
    @@ -0,0 +1,146 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// SecurityGroup definition
    +type SecurityGroup struct {
    +	Description           string      `json:"description"`
    +	ID                    string      `json:"id"`
    +	Organization          string      `json:"organization"`
    +	Name                  string      `json:"name"`
    +	Servers               []ServerRef `json:"servers"`
    +	EnableDefaultSecurity bool        `json:"enable_default_security"`
    +	OrganizationDefault   bool        `json:"organization_default"`
    +}
    +
    +type SecurityGroupRef struct {
    +	Identifier string `json:"id,omitempty"`
    +	Name       string `json:"name,omitempty"`
    +}
    +
    +type ServerRef struct {
    +	Identifier string `json:"id,omitempty"`
    +	Name       string `json:"name,omitempty"`
    +}
    +
    +// getSecurityGroups represents the response of a GET /security_groups/
    +type getSecurityGroups struct {
    +	SecurityGroups []SecurityGroup `json:"security_groups"`
    +}
    +
    +// getSecurityGroup represents the response of a GET /security_groups/{groupID}
    +type getSecurityGroup struct {
    +	SecurityGroup SecurityGroup `json:"security_group"`
    +}
    +
    +// NewSecurityGroup definition POST request /security_groups
    +type NewSecurityGroup struct {
    +	Organization          string `json:"organization"`
    +	Name                  string `json:"name"`
    +	Description           string `json:"description"`
    +	EnableDefaultSecurity bool   `json:"enable_default_security"`
    +}
    +
    +// UpdateSecurityGroup definition PUT request /security_groups
    +type UpdateSecurityGroup struct {
    +	Organization        string `json:"organization"`
    +	Name                string `json:"name"`
    +	Description         string `json:"description"`
    +	OrganizationDefault bool   `json:"organization_default"`
    +}
    +
    +// DeleteSecurityGroup deletes a SecurityGroup
    +func (s *API) DeleteSecurityGroup(securityGroupID string) error {
    +	resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	_, err = s.handleHTTPError([]int{http.StatusNoContent}, resp)
    +	return err
    +}
    +
    +// UpdateSecurityGroup updates a SecurityGroup
    +func (s *API) UpdateSecurityGroup(group UpdateSecurityGroup, securityGroupID string) (*SecurityGroup, error) {
    +	resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID), group)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var data getSecurityGroup
    +	if err = json.Unmarshal(body, &data); err != nil {
    +		return nil, err
    +	}
    +	return &data.SecurityGroup, err
    +}
    +
    +// GetSecurityGroup returns a SecurityGroup
    +func (s *API) GetSecurityGroup(groupsID string) (*SecurityGroup, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s", groupsID), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var securityGroups getSecurityGroup
    +
    +	if err = json.Unmarshal(body, &securityGroups); err != nil {
    +		return nil, err
    +	}
    +	return &securityGroups.SecurityGroup, nil
    +}
    +
    +// CreateSecurityGroup posts a group on a server
    +func (s *API) CreateSecurityGroup(group NewSecurityGroup) (*SecurityGroup, error) {
    +	resp, err := s.PostResponse(s.computeAPI, "security_groups", group)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusCreated}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var securityGroups getSecurityGroup
    +
    +	if err = json.Unmarshal(body, &securityGroups); err != nil {
    +		return nil, err
    +	}
    +	return &securityGroups.SecurityGroup, nil
    +}
    +
    +// GetSecurityGroups returns a SecurityGroups
    +func (s *API) GetSecurityGroups() ([]SecurityGroup, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "security_groups", url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var securityGroups getSecurityGroups
    +
    +	if err = json.Unmarshal(body, &securityGroups); err != nil {
    +		return nil, err
    +	}
    +	return securityGroups.SecurityGroups, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/security_group_rule.go b/vendor/github.com/nicolai86/scaleway-sdk/security_group_rule.go
    new file mode 100644
    index 000000000000..492e3cefe890
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/security_group_rule.go
    @@ -0,0 +1,140 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// SecurityGroupRule definition
    +type SecurityGroupRule struct {
    +	Direction    string `json:"direction"`
    +	Protocol     string `json:"protocol"`
    +	IPRange      string `json:"ip_range"`
    +	DestPortFrom int    `json:"dest_port_from,omitempty"`
    +	Action       string `json:"action"`
    +	Position     int    `json:"position"`
    +	DestPortTo   string `json:"dest_port_to"`
    +	Editable     bool   `json:"editable"`
    +	ID           string `json:"id"`
    +}
    +
    +// getSecurityGroupRules represents the response of a GET /_group/{groupID}/rules
    +type getSecurityGroupRules struct {
    +	Rules []SecurityGroupRule `json:"rules"`
    +}
    +
    +// getSecurityGroupRule represents the response of a GET /_group/{groupID}/rules/{ruleID}
    +type getSecurityGroupRule struct {
    +	Rules SecurityGroupRule `json:"rule"`
    +}
    +
    +// NewSecurityGroupRule definition POST/PUT request /_group/{groupID}
    +type NewSecurityGroupRule struct {
    +	Action       string `json:"action"`
    +	Direction    string `json:"direction"`
    +	IPRange      string `json:"ip_range"`
    +	Protocol     string `json:"protocol"`
    +	DestPortFrom int    `json:"dest_port_from,omitempty"`
    +}
    +
    +// UpdateSecurityGroupRule definition POST/PUT request /_group/{groupID}
    +type UpdateSecurityGroupRule struct {
    +	Action       string `json:"action"`
    +	Direction    string `json:"direction"`
    +	IPRange      string `json:"ip_range"`
    +	Protocol     string `json:"protocol"`
    +	Position     int    `json:"position"`
    +	DestPortFrom int    `json:"dest_port_from,omitempty"`
    +}
    +
    +// GetSecurityGroupRules returns a GroupRules
    +func (s *API) GetSecurityGroupRules(groupID string) ([]SecurityGroupRule, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", groupID), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var data getSecurityGroupRules
    +
    +	if err = json.Unmarshal(body, &data); err != nil {
    +		return nil, err
    +	}
    +	return data.Rules, nil
    +}
    +
    +// GetASecurityGroupRule returns a SecurityGroupRule
    +func (s *API) GetSecurityGroupRule(groupID string, rulesID string) (*SecurityGroupRule, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", groupID, rulesID), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var data getSecurityGroupRule
    +
    +	if err = json.Unmarshal(body, &data); err != nil {
    +		return nil, err
    +	}
    +	return &data.Rules, nil
    +}
    +
    +type postGroupRuleResponse struct {
    +	SecurityGroupRule SecurityGroupRule `json:"rule"`
    +}
    +
    +// CreateSecurityGroupRule posts a rule on a server
    +func (s *API) CreateSecurityGroupRule(GroupID string, rules NewSecurityGroupRule) (*SecurityGroupRule, error) {
    +	resp, err := s.PostResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", GroupID), rules)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	data, err := s.handleHTTPError([]int{http.StatusCreated}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var res postGroupRuleResponse
    +	err = json.Unmarshal(data, &res)
    +	return &res.SecurityGroupRule, err
    +}
    +
    +// UpdateSecurityGroupRule updates a SecurityGroupRule
    +func (s *API) UpdateSecurityGroupRule(rules UpdateSecurityGroupRule, GroupID, RuleID string) (*SecurityGroupRule, error) {
    +	resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", GroupID, RuleID), rules)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var res postGroupRuleResponse
    +	err = json.Unmarshal(body, &res)
    +	return &res.SecurityGroupRule, err
    +}
    +
    +// DeleteSecurityGroupRule deletes a SecurityGroupRule
    +func (s *API) DeleteSecurityGroupRule(GroupID, RuleID string) error {
    +	resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", GroupID, RuleID))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	_, err = s.handleHTTPError([]int{http.StatusNoContent}, resp)
    +	return err
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/server.go b/vendor/github.com/nicolai86/scaleway-sdk/server.go
    new file mode 100644
    index 000000000000..6e76dfa85231
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/server.go
    @@ -0,0 +1,324 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +	"time"
    +)
    +
    +// Server represents a  server
    +type Server struct {
    +	// Arch is the architecture target of the server
    +	Arch string `json:"arch,omitempty"`
    +
    +	// Identifier is a unique identifier for the server
    +	Identifier string `json:"id,omitempty"`
    +
    +	// Name is the user-defined name of the server
    +	Name string `json:"name,omitempty"`
    +
    +	// CreationDate is the creation date of the server
    +	CreationDate string `json:"creation_date,omitempty"`
    +
    +	// ModificationDate is the date of the last modification of the server
    +	ModificationDate string `json:"modification_date,omitempty"`
    +
    +	// Image is the image used by the server
    +	Image Image `json:"image,omitempty"`
    +
    +	// DynamicIPRequired is a flag that defines a server with a dynamic ip address attached
    +	DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"`
    +
    +	// PublicIP is the public IP address bound to the server
    +	PublicAddress IPAddress `json:"public_ip,omitempty"`
    +
    +	// State is the current status of the server
    +	State string `json:"state,omitempty"`
    +
    +	// StateDetail is the detailed status of the server
    +	StateDetail string `json:"state_detail,omitempty"`
    +
    +	// PrivateIP represents the private IPV4 attached to the server (changes on each boot)
    +	PrivateIP string `json:"private_ip,omitempty"`
    +
    +	// Bootscript is the unique identifier of the selected bootscript
    +	Bootscript *Bootscript `json:"bootscript,omitempty"`
    +
    +	// BootType defines the type of boot. Can be local or bootscript
    +	BootType string `json:"boot_type,omitempty"`
    +
    +	// Hostname represents the ServerName in a format compatible with unix's hostname
    +	Hostname string `json:"hostname,omitempty"`
    +
    +	// Tags represents user-defined tags
    +	Tags []string `json:"tags,omitempty"`
    +
    +	// Volumes are the attached volumes
    +	Volumes map[string]Volume `json:"volumes,omitempty"`
    +
    +	// SecurityGroup is the selected security group object
    +	SecurityGroup SecurityGroupRef `json:"security_group,omitempty"`
    +
    +	// Organization is the owner of the server
    +	Organization string `json:"organization,omitempty"`
    +
    +	// CommercialType is the commercial type of the server (i.e: C1, C2[SML], VC1S)
    +	CommercialType string `json:"commercial_type,omitempty"`
    +
    +	// Location of the server
    +	Location struct {
    +		Platform   string `json:"platform_id,omitempty"`
    +		Chassis    string `json:"chassis_id,omitempty"`
    +		Cluster    string `json:"cluster_id,omitempty"`
    +		Hypervisor string `json:"hypervisor_id,omitempty"`
    +		Blade      string `json:"blade_id,omitempty"`
    +		Node       string `json:"node_id,omitempty"`
    +		ZoneID     string `json:"zone_id,omitempty"`
    +	} `json:"location,omitempty"`
    +
    +	IPV6 *IPV6 `json:"ipv6,omitempty"`
    +
    +	EnableIPV6 bool `json:"enable_ipv6,omitempty"`
    +
    +	// This fields are not returned by the API, we generate it
    +	DNSPublic  string `json:"dns_public,omitempty"`
    +	DNSPrivate string `json:"dns_private,omitempty"`
    +}
    +
    +// ServerPatchDefinition represents a  server with nullable fields (for PATCH)
    +type ServerPatchDefinition struct {
    +	Arch              *string            `json:"arch,omitempty"`
    +	Name              *string            `json:"name,omitempty"`
    +	CreationDate      *string            `json:"creation_date,omitempty"`
    +	ModificationDate  *string            `json:"modification_date,omitempty"`
    +	Image             *Image             `json:"image,omitempty"`
    +	DynamicIPRequired *bool              `json:"dynamic_ip_required,omitempty"`
    +	PublicAddress     *IPAddress         `json:"public_ip,omitempty"`
    +	State             *string            `json:"state,omitempty"`
    +	StateDetail       *string            `json:"state_detail,omitempty"`
    +	PrivateIP         *string            `json:"private_ip,omitempty"`
    +	Bootscript        *string            `json:"bootscript,omitempty"`
    +	Hostname          *string            `json:"hostname,omitempty"`
    +	Volumes           *map[string]Volume `json:"volumes,omitempty"`
    +	SecurityGroup     *SecurityGroupRef  `json:"security_group,omitempty"`
    +	Organization      *string            `json:"organization,omitempty"`
    +	Tags              *[]string          `json:"tags,omitempty"`
    +	IPV6              *IPV6              `json:"ipv6,omitempty"`
    +	EnableIPV6        *bool              `json:"enable_ipv6,omitempty"`
    +}
    +
    +// ServerDefinition represents a  server with image definition
    +type ServerDefinition struct {
    +	// Name is the user-defined name of the server
    +	Name string `json:"name"`
    +
    +	// Image is the image used by the server
    +	Image *string `json:"image,omitempty"`
    +
    +	// Volumes are the attached volumes
    +	Volumes map[string]string `json:"volumes,omitempty"`
    +
    +	// DynamicIPRequired is a flag that defines a server with a dynamic ip address attached
    +	DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"`
    +
    +	// Bootscript is the bootscript used by the server
    +	Bootscript *string `json:"bootscript"`
    +
    +	// Tags are the metadata tags attached to the server
    +	Tags []string `json:"tags,omitempty"`
    +
    +	// Organization is the owner of the server
    +	Organization string `json:"organization"`
    +
    +	// CommercialType is the commercial type of the server (i.e: C1, C2[SML], VC1S)
    +	CommercialType string `json:"commercial_type"`
    +
    +	// BootType defines the type of boot. Can be local or bootscript
    +	BootType string `json:"boot_type,omitempty"`
    +
    +	PublicIP string `json:"public_ip,omitempty"`
    +
    +	EnableIPV6 bool `json:"enable_ipv6,omitempty"`
    +
    +	SecurityGroup string `json:"security_group,omitempty"`
    +}
    +
    +// Servers represents a group of  servers
    +type Servers struct {
    +	// Servers holds  servers of the response
    +	Servers []Server `json:"servers,omitempty"`
    +}
    +
    +// ServerAction represents an action to perform on a  server
    +type ServerAction struct {
    +	// Action is the name of the action to trigger
    +	Action string `json:"action,omitempty"`
    +}
    +
    +// OneServer represents the response of a GET /servers/UUID API call
    +type OneServer struct {
    +	Server Server `json:"server,omitempty"`
    +}
    +
    +// PatchServer updates a server
    +func (s *API) PatchServer(serverID string, definition ServerPatchDefinition) error {
    +	resp, err := s.PatchResponse(s.computeAPI, fmt.Sprintf("servers/%s", serverID), definition)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	if _, err := s.handleHTTPError([]int{http.StatusOK}, resp); err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +// GetServers gets the list of servers from the API
    +func (s *API) GetServers(all bool, limit int) ([]Server, error) {
    +	query := url.Values{}
    +	if !all {
    +		query.Set("state", "running")
    +	}
    +	// TODO per_page=20&page=2&state=running
    +	if limit > 0 {
    +		// FIXME: wait for the API to be ready
    +		// query.Set("per_page", strconv.Itoa(limit))
    +		panic("Not implemented yet")
    +	}
    +
    +	servers, err := s.fetchServers(query)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	for i, server := range servers.Servers {
    +		servers.Servers[i].DNSPublic = server.Identifier + URLPublicDNS
    +		servers.Servers[i].DNSPrivate = server.Identifier + URLPrivateDNS
    +	}
    +	return servers.Servers, nil
    +}
    +
    +// SortServers represents a wrapper to sort by CreationDate the servers
    +type SortServers []Server
    +
    +func (s SortServers) Len() int {
    +	return len(s)
    +}
    +
    +func (s SortServers) Swap(i, j int) {
    +	s[i], s[j] = s[j], s[i]
    +}
    +
    +func (s SortServers) Less(i, j int) bool {
    +	date1, _ := time.Parse("2006-01-02T15:04:05.000000+00:00", s[i].CreationDate)
    +	date2, _ := time.Parse("2006-01-02T15:04:05.000000+00:00", s[j].CreationDate)
    +	return date2.Before(date1)
    +}
    +
    +// GetServer gets a server from the API
    +func (s *API) GetServer(serverID string) (*Server, error) {
    +	if serverID == "" {
    +		return nil, fmt.Errorf("cannot get server without serverID")
    +	}
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "servers/"+serverID, url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var oneServer OneServer
    +
    +	if err = json.Unmarshal(body, &oneServer); err != nil {
    +		return nil, err
    +	}
    +	// FIXME arch, owner, title
    +	oneServer.Server.DNSPublic = oneServer.Server.Identifier + URLPublicDNS
    +	oneServer.Server.DNSPrivate = oneServer.Server.Identifier + URLPrivateDNS
    +	return &oneServer.Server, nil
    +}
    +
    +// PostServerAction posts an action on a server
    +func (s *API) PostServerAction(serverID, action string) (*Task, error) {
    +	data := ServerAction{
    +		Action: action,
    +	}
    +	resp, err := s.PostResponse(s.computeAPI, fmt.Sprintf("servers/%s/action", serverID), data)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusAccepted}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var t oneTask
    +	if err = json.Unmarshal(body, &t); err != nil {
    +		return nil, err
    +	}
    +	return &t.Task, err
    +}
    +
    +func (s *API) fetchServers(query url.Values) (*Servers, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "servers", query)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var servers Servers
    +
    +	if err = json.Unmarshal(body, &servers); err != nil {
    +		return nil, err
    +	}
    +	return &servers, nil
    +}
    +
    +// DeleteServer deletes a server
    +func (s *API) DeleteServer(serverID string) error {
    +	resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("servers/%s", serverID))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	if _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +// CreateServer creates a new server
    +func (s *API) CreateServer(definition ServerDefinition) (*Server, error) {
    +	definition.Organization = s.Organization
    +
    +	resp, err := s.PostResponse(s.computeAPI, "servers", definition)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusCreated}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var data OneServer
    +
    +	if err = json.Unmarshal(body, &data); err != nil {
    +		return nil, err
    +	}
    +	return &data.Server, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/snapshot.go b/vendor/github.com/nicolai86/scaleway-sdk/snapshot.go
    new file mode 100644
    index 000000000000..99571eb52832
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/snapshot.go
    @@ -0,0 +1,138 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// SnapshotDefinition represents a  snapshot definition
    +type SnapshotDefinition struct {
    +	VolumeIDentifier string `json:"volume_id"`
    +	Name             string `json:"name,omitempty"`
    +	Organization     string `json:"organization"`
    +}
    +
    +// Snapshot represents a  Snapshot
    +type Snapshot struct {
    +	// Identifier is a unique identifier for the snapshot
    +	Identifier string `json:"id,omitempty"`
    +
    +	// Name is a user-defined name for the snapshot
    +	Name string `json:"name,omitempty"`
    +
    +	// CreationDate is the creation date of the snapshot
    +	CreationDate string `json:"creation_date,omitempty"`
    +
    +	// ModificationDate is the date of the last modification of the snapshot
    +	ModificationDate string `json:"modification_date,omitempty"`
    +
    +	// Size is the allocated size of the volume
    +	Size uint64 `json:"size,omitempty"`
    +
    +	// Organization is the owner of the snapshot
    +	Organization string `json:"organization"`
    +
    +	// State is the current state of the snapshot
    +	State string `json:"state"`
    +
    +	// VolumeType is the kind of volume behind the snapshot
    +	VolumeType string `json:"volume_type"`
    +
    +	// BaseVolume is the volume from which the snapshot inherits
    +	BaseVolume Volume `json:"base_volume,omitempty"`
    +}
    +
    +// oneSnapshot represents the response of a GET /snapshots/UUID API call
    +type oneSnapshot struct {
    +	Snapshot Snapshot `json:"snapshot,omitempty"`
    +}
    +
    +// Snapshots represents a group of  snapshots
    +type Snapshots struct {
    +	// Snapshots holds  snapshots of the response
    +	Snapshots []Snapshot `json:"snapshots,omitempty"`
    +}
    +
    +// CreateSnapshot creates a new snapshot
    +func (s *API) CreateSnapshot(volumeID string, name string) (*Snapshot, error) {
    +	definition := SnapshotDefinition{
    +		VolumeIDentifier: volumeID,
    +		Name:             name,
    +		Organization:     s.Organization,
    +	}
    +	resp, err := s.PostResponse(s.computeAPI, "snapshots", definition)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusCreated}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var snapshot oneSnapshot
    +
    +	if err = json.Unmarshal(body, &snapshot); err != nil {
    +		return nil, err
    +	}
    +	return &snapshot.Snapshot, nil
    +}
    +
    +// DeleteSnapshot deletes a snapshot
    +func (s *API) DeleteSnapshot(snapshotID string) error {
    +	resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("snapshots/%s", snapshotID))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	if _, err := s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +// GetSnapshots gets the list of snapshots from the API
    +func (s *API) GetSnapshots() ([]Snapshot, error) {
    +	query := url.Values{}
    +
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "snapshots", query)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var snapshots Snapshots
    +
    +	if err = json.Unmarshal(body, &snapshots); err != nil {
    +		return nil, err
    +	}
    +	return snapshots.Snapshots, nil
    +}
    +
    +// GetSnapshot gets a snapshot from the API
    +func (s *API) GetSnapshot(snapshotID string) (*Snapshot, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "snapshots/"+snapshotID, url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var oneSnapshot oneSnapshot
    +
    +	if err = json.Unmarshal(body, &oneSnapshot); err != nil {
    +		return nil, err
    +	}
    +	// FIXME region, arch, owner, title
    +	return &oneSnapshot.Snapshot, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/tasks.go b/vendor/github.com/nicolai86/scaleway-sdk/tasks.go
    new file mode 100644
    index 000000000000..70b502a077bb
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/tasks.go
    @@ -0,0 +1,82 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// Task represents a  Task
    +type Task struct {
    +	// Identifier is a unique identifier for the task
    +	Identifier string `json:"id,omitempty"`
    +
    +	// StartDate is the start date of the task
    +	StartDate string `json:"started_at,omitempty"`
    +
    +	// TerminationDate is the termination date of the task
    +	TerminationDate string `json:"terminated_at,omitempty"`
    +
    +	HrefFrom string `json:"href_from,omitempty"`
    +
    +	Description string `json:"description,omitempty"`
    +
    +	Status string `json:"status,omitempty"`
    +
    +	Progress int `json:"progress,omitempty"`
    +}
    +
    +// oneTask represents the response of a GET /tasks/UUID API call
    +type oneTask struct {
    +	Task Task `json:"task,omitempty"`
    +}
    +
    +// Tasks represents a group of  tasks
    +type Tasks struct {
    +	// Tasks holds  tasks of the response
    +	Tasks []Task `json:"tasks,omitempty"`
    +}
    +
    +// GetTasks get the list of tasks from the API
    +func (s *API) GetTasks() ([]Task, error) {
    +	query := url.Values{}
    +	// TODO per_page=20&page=2
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "tasks", query)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var tasks Tasks
    +
    +	if err = json.Unmarshal(body, &tasks); err != nil {
    +		return nil, err
    +	}
    +	return tasks.Tasks, nil
    +}
    +
    +// GetTask fetches a specific task
    +func (s *API) GetTask(id string) (*Task, error) {
    +	query := url.Values{}
    +	resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("tasks/%s", id), query)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var t oneTask
    +	if err = json.Unmarshal(body, &t); err != nil {
    +		return nil, err
    +	}
    +	return &t.Task, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/tokens.go b/vendor/github.com/nicolai86/scaleway-sdk/tokens.go
    new file mode 100644
    index 000000000000..b3dff56bb808
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/tokens.go
    @@ -0,0 +1,135 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// Token represents a  Token
    +type Token struct {
    +	UserID             string `json:"user_id"`
    +	Description        string `json:"description,omitempty"`
    +	Roles              Role   `json:"roles"`
    +	Expires            string `json:"expires"`
    +	InheritsUsersPerms bool   `json:"inherits_user_perms"`
    +	ID                 string `json:"id"`
    +}
    +
    +// Role represents a  Token UserId Role
    +type Role struct {
    +	Organization Organization `json:"organization,omitempty"`
    +	Role         string       `json:"role,omitempty"`
    +}
    +
    +type getTokenResponse struct {
    +	Token Token `json:"token"`
    +}
    +
    +type getTokensResponse struct {
    +	Tokens []Token `json:"tokens"`
    +}
    +
    +func (s *API) GetTokens() ([]Token, error) {
    +	query := url.Values{}
    +	// TODO per_page=20&page=2
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "tokens", query)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var token getTokensResponse
    +
    +	if err = json.Unmarshal(body, &token); err != nil {
    +		return nil, err
    +	}
    +	return token.Tokens, nil
    +}
    +
    +type CreateTokenRequest struct {
    +	Email    string `json:"email"`
    +	Password string `json:"password,omitempty"`
    +	Expires  bool   `json:"expires"`
    +}
    +
    +func (s *API) CreateToken(req *CreateTokenRequest) (*Token, error) {
    +	resp, err := s.PostResponse(AccountAPI, "tokens", req)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusCreated}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var data getTokenResponse
    +
    +	if err = json.Unmarshal(body, &data); err != nil {
    +		return nil, err
    +	}
    +	return &data.Token, nil
    +}
    +
    +type UpdateTokenRequest struct {
    +	Description string `json:"description,omitempty"`
    +	Expires     bool   `json:"expires"`
    +	ID          string `json:"-"`
    +}
    +
    +func (s *API) UpdateToken(req *UpdateTokenRequest) (*Token, error) {
    +	resp, err := s.PatchResponse(AccountAPI, fmt.Sprintf("tokens/%s", req.ID), req)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var data getTokenResponse
    +
    +	if err = json.Unmarshal(body, &data); err != nil {
    +		return nil, err
    +	}
    +	return &data.Token, nil
    +}
    +
    +func (s *API) GetToken(id string) (*Token, error) {
    +	query := url.Values{}
    +	resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s", id), query)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var data getTokenResponse
    +
    +	if err = json.Unmarshal(body, &data); err != nil {
    +		return nil, err
    +	}
    +	return &data.Token, nil
    +}
    +
    +func (s *API) DeleteToken(id string) error {
    +	resp, err := s.DeleteResponse(AccountAPI, fmt.Sprintf("tokens/%s", id))
    +	if err != nil {
    +		return err
    +	}
    +
    +	if _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil {
    +		return err
    +	}
    +	return nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/user.go b/vendor/github.com/nicolai86/scaleway-sdk/user.go
    new file mode 100644
    index 000000000000..5c07cdbcdfda
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/user.go
    @@ -0,0 +1,101 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// User represents a  User
    +type User struct {
    +	Email         string          `json:"email"`
    +	Firstname     string          `json:"firstname"`
    +	Fullname      string          `json:"fullname"`
    +	ID            string          `json:"id"`
    +	Lastname      string          `json:"lastname"`
    +	Organizations []Organization  `json:"organizations"`
    +	Roles         []Role          `json:"roles"`
    +	SSHPublicKeys []KeyDefinition `json:"ssh_public_keys"`
    +}
    +
    +// KeyDefinition represents a key
    +type KeyDefinition struct {
    +	Key         string `json:"key"`
    +	Fingerprint string `json:"fingerprint,omitempty"`
    +}
    +
    +// UsersDefinition represents the response of a GET /user
    +type UsersDefinition struct {
    +	User User `json:"user"`
    +}
    +
    +// UserPatchSSHKeyDefinition represents a User Patch
    +type UserPatchSSHKeyDefinition struct {
    +	SSHPublicKeys []KeyDefinition `json:"ssh_public_keys"`
    +}
    +
    +// PatchUserSSHKey updates a user
    +func (s *API) PatchUserSSHKey(UserID string, definition UserPatchSSHKeyDefinition) (*User, error) {
    +	resp, err := s.PatchResponse(AccountAPI, fmt.Sprintf("users/%s", UserID), definition)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var user UsersDefinition
    +
    +	if err = json.Unmarshal(body, &user); err != nil {
    +		return nil, err
    +	}
    +	return &user.User, nil
    +}
    +
    +// GetUserID returns the userID
    +func (s *API) GetUserID() (string, error) {
    +	resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s", s.Token), url.Values{})
    +	if err != nil {
    +		return "", err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return "", err
    +	}
    +	var token getTokenResponse
    +
    +	if err = json.Unmarshal(body, &token); err != nil {
    +		return "", err
    +	}
    +	return token.Token.UserID, nil
    +}
    +
    +// GetUser returns the user
    +func (s *API) GetUser() (*User, error) {
    +	userID, err := s.GetUserID()
    +	if err != nil {
    +		return nil, err
    +	}
    +	resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("users/%s", userID), url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var user UsersDefinition
    +
    +	if err = json.Unmarshal(body, &user); err != nil {
    +		return nil, err
    +	}
    +	return &user.User, nil
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/user_data.go b/vendor/github.com/nicolai86/scaleway-sdk/user_data.go
    new file mode 100644
    index 000000000000..61608b8dbe17
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/user_data.go
    @@ -0,0 +1,140 @@
    +package api
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"fmt"
    +	"io/ioutil"
    +	"net/http"
    +	"net/url"
    +	"strings"
    +)
    +
    +// Userdatas represents the response of a GET /user_data
    +type Userdatas struct {
    +	UserData []string `json:"user_data"`
    +}
    +
    +// Userdata represents []byte
    +type Userdata []byte
    +
    +// GetUserdatas gets list of userdata for a server
    +func (s *API) GetUserdatas(serverID string, metadata bool) (*Userdatas, error) {
    +	var uri, endpoint string
    +
    +	endpoint = s.computeAPI
    +	if metadata {
    +		uri = "/user_data"
    +		endpoint = MetadataAPI
    +	} else {
    +		uri = fmt.Sprintf("servers/%s/user_data", serverID)
    +	}
    +
    +	resp, err := s.GetResponsePaginate(endpoint, uri, url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var userdatas Userdatas
    +
    +	if err = json.Unmarshal(body, &userdatas); err != nil {
    +		return nil, err
    +	}
    +	return &userdatas, nil
    +}
    +
    +func (s *Userdata) String() string {
    +	return string(*s)
    +}
    +
    +// GetUserdata gets a specific userdata for a server
    +func (s *API) GetUserdata(serverID, key string, metadata bool) (*Userdata, error) {
    +	var uri, endpoint string
    +
    +	endpoint = s.computeAPI
    +	if metadata {
    +		uri = fmt.Sprintf("/user_data/%s", key)
    +		endpoint = MetadataAPI
    +	} else {
    +		uri = fmt.Sprintf("servers/%s/user_data/%s", serverID, key)
    +	}
    +
    +	var err error
    +	resp, err := s.GetResponsePaginate(endpoint, uri, url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	if resp.StatusCode != http.StatusOK {
    +		return nil, fmt.Errorf("no such user_data %q (%d)", key, resp.StatusCode)
    +	}
    +	var data Userdata
    +	data, err = ioutil.ReadAll(resp.Body)
    +	return &data, err
    +}
    +
    +// PatchUserdata sets a user data
    +func (s *API) PatchUserdata(serverID, key string, value []byte, metadata bool) error {
    +	var resource, endpoint string
    +
    +	endpoint = s.computeAPI
    +	if metadata {
    +		resource = fmt.Sprintf("/user_data/%s", key)
    +		endpoint = MetadataAPI
    +	} else {
    +		resource = fmt.Sprintf("servers/%s/user_data/%s", serverID, key)
    +	}
    +
    +	uri := fmt.Sprintf("%s/%s", strings.TrimRight(endpoint, "/"), resource)
    +	payload := new(bytes.Buffer)
    +	payload.Write(value)
    +
    +	req, err := http.NewRequest("PATCH", uri, payload)
    +	if err != nil {
    +		return err
    +	}
    +
    +	req.Header.Set("X-Auth-Token", s.Token)
    +	req.Header.Set("Content-Type", "text/plain")
    +	req.Header.Set("User-Agent", s.userAgent)
    +
    +	resp, err := s.Client.Do(req)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	if resp.StatusCode == http.StatusNoContent {
    +		return nil
    +	}
    +
    +	return fmt.Errorf("cannot set user_data (%d)", resp.StatusCode)
    +}
    +
    +// DeleteUserdata deletes a server user_data
    +func (s *API) DeleteUserdata(serverID, key string, metadata bool) error {
    +	var url, endpoint string
    +
    +	endpoint = s.computeAPI
    +	if metadata {
    +		url = fmt.Sprintf("/user_data/%s", key)
    +		endpoint = MetadataAPI
    +	} else {
    +		url = fmt.Sprintf("servers/%s/user_data/%s", serverID, key)
    +	}
    +
    +	resp, err := s.DeleteResponse(endpoint, url)
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	_, err = s.handleHTTPError([]int{http.StatusNoContent}, resp)
    +	return err
    +}
    diff --git a/vendor/github.com/nicolai86/scaleway-sdk/volume.go b/vendor/github.com/nicolai86/scaleway-sdk/volume.go
    new file mode 100644
    index 000000000000..e4284cce5646
    --- /dev/null
    +++ b/vendor/github.com/nicolai86/scaleway-sdk/volume.go
    @@ -0,0 +1,183 @@
    +package api
    +
    +import (
    +	"encoding/json"
    +	"fmt"
    +	"net/http"
    +	"net/url"
    +)
    +
    +// Volume represents a  Volume
    +type Volume struct {
    +	// Identifier is a unique identifier for the volume
    +	Identifier string `json:"id,omitempty"`
    +
    +	// Size is the allocated size of the volume
    +	Size uint64 `json:"size,omitempty"`
    +
    +	// CreationDate is the creation date of the volume
    +	CreationDate string `json:"creation_date,omitempty"`
    +
    +	// ModificationDate is the date of the last modification of the volume
    +	ModificationDate string `json:"modification_date,omitempty"`
    +
    +	// Organization is the organization owning the volume
    +	Organization string `json:"organization,omitempty"`
    +
    +	// Name is the name of the volume
    +	Name string `json:"name,omitempty"`
    +
    +	// Server is the server using this image
    +	Server *struct {
    +		Identifier string `json:"id,omitempty"`
    +		Name       string `json:"name,omitempty"`
    +	} `json:"server,omitempty"`
    +
    +	// VolumeType is a  identifier for the kind of volume (default: l_ssd)
    +	VolumeType string `json:"volume_type,omitempty"`
    +
    +	// ExportURI represents the url used by initrd/scripts to attach the volume
    +	ExportURI string `json:"export_uri,omitempty"`
    +}
    +
    +type volumeResponse struct {
    +	Volume Volume `json:"volume,omitempty"`
    +}
    +
    +// VolumeDefinition represents a  volume definition
    +type VolumeDefinition struct {
    +	// Name is the user-defined name of the volume
    +	Name string `json:"name"`
    +
    +	// Image is the image used by the volume
    +	Size uint64 `json:"size"`
    +
    +	// Bootscript is the bootscript used by the volume
    +	Type string `json:"volume_type"`
    +
    +	// Organization is the owner of the volume
    +	Organization string `json:"organization"`
    +}
    +
    +// VolumePutDefinition represents a  volume with nullable fields (for PUT)
    +type VolumePutDefinition struct {
    +	Identifier       *string `json:"id,omitempty"`
    +	Size             *uint64 `json:"size,omitempty"`
    +	CreationDate     *string `json:"creation_date,omitempty"`
    +	ModificationDate *string `json:"modification_date,omitempty"`
    +	Organization     *string `json:"organization,omitempty"`
    +	Name             *string `json:"name,omitempty"`
    +	Server           struct {
    +		Identifier *string `json:"id,omitempty"`
    +		Name       *string `json:"name,omitempty"`
    +	} `json:"server,omitempty"`
    +	VolumeType *string `json:"volume_type,omitempty"`
    +	ExportURI  *string `json:"export_uri,omitempty"`
    +}
    +
    +// CreateVolume creates a new volume
    +func (s *API) CreateVolume(definition VolumeDefinition) (*Volume, error) {
    +	definition.Organization = s.Organization
    +	if definition.Type == "" {
    +		definition.Type = "l_ssd"
    +	}
    +
    +	resp, err := s.PostResponse(s.computeAPI, "volumes", definition)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusCreated}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var volume volumeResponse
    +
    +	if err = json.Unmarshal(body, &volume); err != nil {
    +		return nil, err
    +	}
    +	return &volume.Volume, nil
    +}
    +
    +// UpdateVolume updates a volume
    +func (s *API) UpdateVolume(volumeID string, definition VolumePutDefinition) (*Volume, error) {
    +	resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("volumes/%s", volumeID), definition)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var volume volumeResponse
    +
    +	if err = json.Unmarshal(body, &volume); err != nil {
    +		return nil, err
    +	}
    +	return &volume.Volume, nil
    +}
    +
    +// DeleteVolume deletes a volume
    +func (s *API) DeleteVolume(volumeID string) error {
    +	resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("volumes/%s", volumeID))
    +	if err != nil {
    +		return err
    +	}
    +	defer resp.Body.Close()
    +
    +	if _, err := s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil {
    +		return err
    +	}
    +	return nil
    +}
    +
    +type volumesResponse struct {
    +	Volumes []Volume `json:"volumes,omitempty"`
    +}
    +
    +// GetVolumes gets the list of volumes from the API
    +func (s *API) GetVolumes() (*[]Volume, error) {
    +	query := url.Values{}
    +
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "volumes", query)
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	var volumes volumesResponse
    +
    +	if err = json.Unmarshal(body, &volumes); err != nil {
    +		return nil, err
    +	}
    +	return &volumes.Volumes, nil
    +}
    +
    +// GetVolume gets a volume from the API
    +func (s *API) GetVolume(volumeID string) (*Volume, error) {
    +	resp, err := s.GetResponsePaginate(s.computeAPI, "volumes/"+volumeID, url.Values{})
    +	if err != nil {
    +		return nil, err
    +	}
    +	defer resp.Body.Close()
    +
    +	body, err := s.handleHTTPError([]int{http.StatusOK}, resp)
    +	if err != nil {
    +		return nil, err
    +	}
    +	var volume volumeResponse
    +
    +	if err = json.Unmarshal(body, &volume); err != nil {
    +		return nil, err
    +	}
    +	// FIXME region, arch, owner, title
    +	return &volume.Volume, nil
    +}
    diff --git a/vendor/github.com/packethost/packngo/.drone.yml b/vendor/github.com/packethost/packngo/.drone.yml
    new file mode 100644
    index 000000000000..88ac5fd0f217
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/.drone.yml
    @@ -0,0 +1,28 @@
    +workspace:
    +  base: /go
    +  path: src/github.com/packethost/packngo
    +
    +pipeline:
    +  lint:
    +    image: golang:1.9
    +    commands:
    +      - go get -v -u github.com/alecthomas/gometalinter
    +      - gometalinter --install
    +      - go get -v -t ./...
    +      - |
    +        gometalinter --disable=gas ./... || :
    +      - |
    +        gometalinter --disable-all --enable=gas ./... || :
    +      - |
    +        gofmt -d . | (! grep '.') || ok=false
    +      - if ! $ok; then exit 1; fi
    +
    +  build:
    +    image: golang:1.9
    +    commands:
    +      - go build -i -v ./...
    +
    +  test:
    +    image: golang:1.9
    +    commands:
    +      - go test ./...
    diff --git a/vendor/github.com/packethost/packngo/.gitignore b/vendor/github.com/packethost/packngo/.gitignore
    new file mode 100644
    index 000000000000..844259c4bd7f
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/.gitignore
    @@ -0,0 +1,28 @@
    +### Go template
    +# Compiled Object files, Static and Dynamic libs (Shared Objects)
    +*.o
    +*.a
    +*.so
    +
    +# Folders
    +_obj
    +_test
    +
    +# Architecture specific extensions/prefixes
    +*.[568vq]
    +[568vq].out
    +
    +*.cgo1.go
    +*.cgo2.c
    +_cgo_defun.c
    +_cgo_gotypes.go
    +_cgo_export.*
    +
    +_testmain.go
    +
    +*.exe
    +*.test
    +*.prof
    +
    +# idea
    +.idea**/**
    diff --git a/vendor/github.com/packethost/packngo/CHANGELOG.md b/vendor/github.com/packethost/packngo/CHANGELOG.md
    new file mode 100644
    index 000000000000..34e5d311404b
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/CHANGELOG.md
    @@ -0,0 +1,54 @@
    +# Changelog
    +All notable changes to this project will be documented in this file.
    +
    +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
    +This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
    +
    +## [Unreleased]
    +
    +This release contains a bunch of fixes to the package api after some more real
    +world use. There a few breaks in backwards compatibility, but we are tying to
    +minimize them and move towards a 1.0 release.
    +
    +### Added
    +- "acceptance" tests which run against production api (will incur charges)
    +- HardwareReservation to Device
    +- RootPassword to Device
    +- Spot market support
    +- Management and Manageable fields to discern between Elastic IPs and device unique IP
    +- Support for Volume attachments to Device and Volume
    +- Support for ProvisionEvents
    +- DoRequest sugar to Client
    +- Add ListProject function to the SSHKeys interface
    +- Operations for switching between Network Modes, aka "L2 support"
    +  Support for Organization, Payment Method and Billing address resources
    +
    +### Fixed
    +- User.Emails json tag is fixed to match api response
    +- Single error object api response is now handled correctly
    +
    +### Changed
    +- IPService was split to DeviceIPService and ProjectIPService
    +- Renamed Device.IPXEScriptUrl -> Device.IPXEScriptURL
    +- Renamed DeviceCreateRequest.HostName -> DeviceCreateRequest.Hostname
    +- Renamed DeviceCreateRequest.IPXEScriptUrl -> DeviceCreateRequest.IPXEScriptURL
    +- Renamed DeviceUpdateRequest.HostName -> DeviceUpdateRequest.Hostname
    +- Renamed DeviceUpdateRequest.IPXEScriptUrl -> DeviceUpdateRequest.IPXEScriptURL
    +- Sync with packet.net api change to /projects/{id}/ips which no longer returns
    +  the address in CIDR form
    +- Removed package level exported functions that should have never existed
    +
    +## [0.1.0] - 2017-08-17
    +
    +Initial release, supports most of the api for interacting with:
    +
    +- Plans
    +- Users
    +- Emails
    +- SSH Keys
    +- Devices
    +- Projects
    +- Facilities
    +- Operating Systems
    +- IP Reservations
    +- Volumes
    diff --git a/vendor/github.com/packethost/packngo/LICENSE.txt b/vendor/github.com/packethost/packngo/LICENSE.txt
    new file mode 100644
    index 000000000000..57c50110ca2c
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/LICENSE.txt
    @@ -0,0 +1,56 @@
    +Copyright (c) 2014 The packngo AUTHORS. All rights reserved.
    +
    +MIT License
    +
    +Permission is hereby granted, free of charge, to any person obtaining
    +a copy of this software and associated documentation files (the
    +"Software"), to deal in the Software without restriction, including
    +without limitation the rights to use, copy, modify, merge, publish,
    +distribute, sublicense, and/or sell copies of the Software, and to
    +permit persons to whom the Software is furnished to do so, subject to
    +the following conditions:
    +
    +The above copyright notice and this permission notice shall be
    +included in all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
    +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
    +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    +
    +======================
    +Portions of the client are based on code at:
    +https://github.com/google/go-github/ and
    +https://github.com/digitalocean/godo
    +
    +Copyright (c) 2013 The go-github AUTHORS. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    +
    diff --git a/vendor/github.com/packethost/packngo/README.md b/vendor/github.com/packethost/packngo/README.md
    new file mode 100644
    index 000000000000..a63faf5a9e37
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/README.md
    @@ -0,0 +1,72 @@
    +# packngo
    +Packet Go Api Client
    +
    +![](https://www.packet.net/media/images/xeiw-packettwitterprofilew.png)
    +
    +
    +Installation
    +------------
    +
    +`go get github.com/packethost/packngo`
    +
    +Usage
    +-----
    +
    +To authenticate to the Packet API, you must have your API token exported in env var `PACKET_API_TOKEN`.
    +
    +This code snippet initializes Packet API client, and lists your Projects:
    +
    +```go
    +package main
    +
    +import (
    +	"log"
    +
    +	"github.com/packethost/packngo"
    +)
    +
    +func main() {
    +	c, err := packngo.NewClient()
    +	if err != nil {
    +		log.Fatal(err)
    +	}
    +
    +	ps, _, err := c.Projects.List(nil)
    +	if err != nil {
    +		log.Fatal(err)
    +	}
    +	for _, p := range ps {
    +		log.Println(p.ID, p.Name)
    +	}
    +}
    +
    +```
    +
    +This lib is used by the official [terraform-provider-packet](https://github.com/terraform-providers/terraform-provider-packet).
    +
    +You can also learn a lot from the `*_test.go` sources. Almost all out tests touch the Packet API, so you can see how auth, querying and POSTing works. For example [devices_test.go](devices_test.go).
    +
    +
    +
    +Acceptance Tests
    +----------------
    +
    +If you want to run tests against the actual Packet API, you must set envvar `PACKET_TEST_ACTUAL_API` to non-empty string for the `go test`. The device tests wait for the device creation, so it's best to run a few in parallel.
    +
    +To run a particular test, you can do
    +
    +```
    +$ PACKNGO_TEST_ACTUAL_API=1 go test -v -run=TestAccDeviceBasic
    +```
    +
    +If you want to see HTTP requests, set the `PACKNGO_DEBUG` env var to non-empty string, for example:
    +
    +```
    +$ PACKNGO_DEBUG=1 PACKNGO_TEST_ACTUAL_API=1 go test -v -run=TestAccVolumeUpdate
    +```
    +
    +
    +Committing
    +----------
    +
    +Before committing, it's a good idea to run `gofmt -w *.go`. ([gofmt](https://golang.org/cmd/gofmt/))
    diff --git a/vendor/github.com/packethost/packngo/billing_address.go b/vendor/github.com/packethost/packngo/billing_address.go
    new file mode 100644
    index 000000000000..93255b32290a
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/billing_address.go
    @@ -0,0 +1,7 @@
    +package packngo
    +
    +type BillingAddress struct {
    +	StreetAddress string `json:"street_address,omitempty"`
    +	PostalCode    string `json:"postal_code,omitempty"`
    +	CountryCode   string `json:"country_code_alpha2,omitempty"`
    +}
    diff --git a/vendor/github.com/packethost/packngo/devices.go b/vendor/github.com/packethost/packngo/devices.go
    new file mode 100644
    index 000000000000..b41bcf070aa9
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/devices.go
    @@ -0,0 +1,257 @@
    +package packngo
    +
    +import (
    +	"fmt"
    +	"strings"
    +)
    +
    +const deviceBasePath = "/devices"
    +
    +// DeviceService interface defines available device methods
    +type DeviceService interface {
    +	List(ProjectID string, listOpt *ListOptions) ([]Device, *Response, error)
    +	Get(string) (*Device, *Response, error)
    +	GetExtra(deviceID string, includes, excludes []string) (*Device, *Response, error)
    +	Create(*DeviceCreateRequest) (*Device, *Response, error)
    +	Update(string, *DeviceUpdateRequest) (*Device, *Response, error)
    +	Delete(string) (*Response, error)
    +	Reboot(string) (*Response, error)
    +	PowerOff(string) (*Response, error)
    +	PowerOn(string) (*Response, error)
    +	Lock(string) (*Response, error)
    +	Unlock(string) (*Response, error)
    +}
    +
    +type devicesRoot struct {
    +	Devices []Device `json:"devices"`
    +	Meta    meta     `json:"meta"`
    +}
    +
    +// Device represents a Packet device
    +type Device struct {
    +	ID                  string                 `json:"id"`
    +	Href                string                 `json:"href,omitempty"`
    +	Hostname            string                 `json:"hostname,omitempty"`
    +	State               string                 `json:"state,omitempty"`
    +	Created             string                 `json:"created_at,omitempty"`
    +	Updated             string                 `json:"updated_at,omitempty"`
    +	Locked              bool                   `json:"locked,omitempty"`
    +	BillingCycle        string                 `json:"billing_cycle,omitempty"`
    +	Storage             map[string]interface{} `json:"storage,omitempty"`
    +	Tags                []string               `json:"tags,omitempty"`
    +	Network             []*IPAddressAssignment `json:"ip_addresses"`
    +	Volumes             []*Volume              `json:"volumes"`
    +	OS                  *OS                    `json:"operating_system,omitempty"`
    +	Plan                *Plan                  `json:"plan,omitempty"`
    +	Facility            *Facility              `json:"facility,omitempty"`
    +	Project             *Project               `json:"project,omitempty"`
    +	ProvisionEvents     []*ProvisionEvent      `json:"provisioning_events,omitempty"`
    +	ProvisionPer        float32                `json:"provisioning_percentage,omitempty"`
    +	UserData            string                 `json:"userdata,omitempty"`
    +	RootPassword        string                 `json:"root_password,omitempty"`
    +	IPXEScriptURL       string                 `json:"ipxe_script_url,omitempty"`
    +	AlwaysPXE           bool                   `json:"always_pxe,omitempty"`
    +	HardwareReservation Href                   `json:"hardware_reservation,omitempty"`
    +	SpotInstance        bool                   `json:"spot_instance,omitempty"`
    +	SpotPriceMax        float64                `json:"spot_price_max,omitempty"`
    +	TerminationTime     *Timestamp             `json:"termination_time,omitempty"`
    +	NetworkPorts        []Port                 `json:"network_ports,omitempty"`
    +	CustomData          map[string]interface{} `json:"customdata,omitempty"`
    +}
    +
    +type ProvisionEvent struct {
    +	ID            string     `json:"id"`
    +	Body          string     `json:"body"`
    +	CreatedAt     *Timestamp `json:"created_at,omitempty"`
    +	Href          string     `json:"href"`
    +	Interpolated  string     `json:"interpolated"`
    +	Relationships []Href     `json:"relationships"`
    +	State         string     `json:"state"`
    +	Type          string     `json:"type"`
    +}
    +
    +func (d Device) String() string {
    +	return Stringify(d)
    +}
    +
    +// DeviceCreateRequest type used to create a Packet device
    +type DeviceCreateRequest struct {
    +	Hostname              string     `json:"hostname"`
    +	Plan                  string     `json:"plan"`
    +	Facility              string     `json:"facility"`
    +	OS                    string     `json:"operating_system"`
    +	BillingCycle          string     `json:"billing_cycle"`
    +	ProjectID             string     `json:"project_id"`
    +	UserData              string     `json:"userdata"`
    +	Storage               string     `json:"storage,omitempty"`
    +	Tags                  []string   `json:"tags"`
    +	IPXEScriptURL         string     `json:"ipxe_script_url,omitempty"`
    +	PublicIPv4SubnetSize  int        `json:"public_ipv4_subnet_size,omitempty"`
    +	AlwaysPXE             bool       `json:"always_pxe,omitempty"`
    +	HardwareReservationID string     `json:"hardware_reservation_id,omitempty"`
    +	SpotInstance          bool       `json:"spot_instance,omitempty"`
    +	SpotPriceMax          float64    `json:"spot_price_max,omitempty,string"`
    +	TerminationTime       *Timestamp `json:"termination_time,omitempty"`
    +	CustomData            string     `json:"customdata,omitempty"`
    +}
    +
    +// DeviceUpdateRequest type used to update a Packet device
    +type DeviceUpdateRequest struct {
    +	Hostname      *string   `json:"hostname,omitempty"`
    +	Description   *string   `json:"description,omitempty"`
    +	UserData      *string   `json:"userdata,omitempty"`
    +	Locked        *bool     `json:"locked,omitempty"`
    +	Tags          *[]string `json:"tags,omitempty"`
    +	AlwaysPXE     *bool     `json:"always_pxe,omitempty"`
    +	IPXEScriptURL *string   `json:"ipxe_script_url,omitempty"`
    +	CustomData    *string   `json:"customdata,omitempty"`
    +}
    +
    +func (d DeviceCreateRequest) String() string {
    +	return Stringify(d)
    +}
    +
    +// DeviceActionRequest type used to execute actions on devices
    +type DeviceActionRequest struct {
    +	Type string `json:"type"`
    +}
    +
    +func (d DeviceActionRequest) String() string {
    +	return Stringify(d)
    +}
    +
    +// DeviceServiceOp implements DeviceService
    +type DeviceServiceOp struct {
    +	client *Client
    +}
    +
    +// List returns devices on a project
    +func (s *DeviceServiceOp) List(projectID string, listOpt *ListOptions) (devices []Device, resp *Response, err error) {
    +	params := "include=facility"
    +	if listOpt != nil {
    +		params = listOpt.createURL()
    +	}
    +	path := fmt.Sprintf("%s/%s%s?%s", projectBasePath, projectID, deviceBasePath, params)
    +
    +	for {
    +		subset := new(devicesRoot)
    +
    +		resp, err = s.client.DoRequest("GET", path, nil, subset)
    +		if err != nil {
    +			return nil, resp, err
    +		}
    +
    +		devices = append(devices, subset.Devices...)
    +
    +		if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) {
    +			path = subset.Meta.Next.Href
    +			if params != "" {
    +				path = fmt.Sprintf("%s&%s", path, params)
    +			}
    +			continue
    +		}
    +
    +		return
    +	}
    +}
    +
    +// Get returns a device by id
    +func (s *DeviceServiceOp) Get(deviceID string) (*Device, *Response, error) {
    +	return s.GetExtra(deviceID, []string{"facility"}, nil)
    +}
    +
    +// GetExtra returns a device by id. Specifying either includes/excludes provides more or less desired
    +// detailed information about resources which would otherwise be represented with an href link
    +func (s *DeviceServiceOp) GetExtra(deviceID string, includes, excludes []string) (*Device, *Response, error) {
    +	path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID)
    +	if includes != nil {
    +		path += fmt.Sprintf("?include=%s", strings.Join(includes, ","))
    +	} else if excludes != nil {
    +		path += fmt.Sprintf("?exclude=%s", strings.Join(excludes, ","))
    +	}
    +	device := new(Device)
    +
    +	resp, err := s.client.DoRequest("GET", path, nil, device)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return device, resp, err
    +}
    +
    +// Create creates a new device
    +func (s *DeviceServiceOp) Create(createRequest *DeviceCreateRequest) (*Device, *Response, error) {
    +	path := fmt.Sprintf("%s/%s%s", projectBasePath, createRequest.ProjectID, deviceBasePath)
    +	device := new(Device)
    +
    +	resp, err := s.client.DoRequest("POST", path, createRequest, device)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return device, resp, err
    +}
    +
    +// Update updates an existing device
    +func (s *DeviceServiceOp) Update(deviceID string, updateRequest *DeviceUpdateRequest) (*Device, *Response, error) {
    +	path := fmt.Sprintf("%s/%s?include=facility", deviceBasePath, deviceID)
    +	device := new(Device)
    +
    +	resp, err := s.client.DoRequest("PUT", path, updateRequest, device)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return device, resp, err
    +}
    +
    +// Delete deletes a device
    +func (s *DeviceServiceOp) Delete(deviceID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID)
    +
    +	return s.client.DoRequest("DELETE", path, nil, nil)
    +}
    +
    +// Reboot reboots on a device
    +func (s *DeviceServiceOp) Reboot(deviceID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID)
    +	action := &DeviceActionRequest{Type: "reboot"}
    +
    +	return s.client.DoRequest("POST", path, action, nil)
    +}
    +
    +// PowerOff powers on a device
    +func (s *DeviceServiceOp) PowerOff(deviceID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID)
    +	action := &DeviceActionRequest{Type: "power_off"}
    +
    +	return s.client.DoRequest("POST", path, action, nil)
    +}
    +
    +// PowerOn powers on a device
    +func (s *DeviceServiceOp) PowerOn(deviceID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s/actions", deviceBasePath, deviceID)
    +	action := &DeviceActionRequest{Type: "power_on"}
    +
    +	return s.client.DoRequest("POST", path, action, nil)
    +}
    +
    +type lockType struct {
    +	Locked bool `json:"locked"`
    +}
    +
    +// Lock sets a device to "locked"
    +func (s *DeviceServiceOp) Lock(deviceID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID)
    +	action := lockType{Locked: true}
    +
    +	return s.client.DoRequest("PATCH", path, action, nil)
    +}
    +
    +// Unlock sets a device to "unlocked"
    +func (s *DeviceServiceOp) Unlock(deviceID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", deviceBasePath, deviceID)
    +	action := lockType{Locked: false}
    +
    +	return s.client.DoRequest("PATCH", path, action, nil)
    +}
    diff --git a/vendor/github.com/packethost/packngo/email.go b/vendor/github.com/packethost/packngo/email.go
    new file mode 100644
    index 000000000000..4c77d0f60771
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/email.go
    @@ -0,0 +1,37 @@
    +package packngo
    +
    +const emailBasePath = "/emails"
    +
    +// EmailService interface defines available email methods
    +type EmailService interface {
    +	Get(string) (*Email, *Response, error)
    +}
    +
    +// Email represents a user's email address
    +type Email struct {
    +	ID      string `json:"id"`
    +	Address string `json:"address"`
    +	Default bool   `json:"default,omitempty"`
    +	URL     string `json:"href,omitempty"`
    +}
    +
    +func (e Email) String() string {
    +	return Stringify(e)
    +}
    +
    +// EmailServiceOp implements EmailService
    +type EmailServiceOp struct {
    +	client *Client
    +}
    +
    +// Get retrieves an email by id
    +func (s *EmailServiceOp) Get(emailID string) (*Email, *Response, error) {
    +	email := new(Email)
    +
    +	resp, err := s.client.DoRequest("GET", emailBasePath, nil, email)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return email, resp, err
    +}
    diff --git a/vendor/github.com/packethost/packngo/facilities.go b/vendor/github.com/packethost/packngo/facilities.go
    new file mode 100644
    index 000000000000..12aac9198b6e
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/facilities.go
    @@ -0,0 +1,52 @@
    +package packngo
    +
    +const facilityBasePath = "/facilities"
    +
    +// FacilityService interface defines available facility methods
    +type FacilityService interface {
    +	List() ([]Facility, *Response, error)
    +}
    +
    +type facilityRoot struct {
    +	Facilities []Facility `json:"facilities"`
    +}
    +
    +// Facility represents a Packet facility
    +type Facility struct {
    +	ID       string   `json:"id"`
    +	Name     string   `json:"name,omitempty"`
    +	Code     string   `json:"code,omitempty"`
    +	Features []string `json:"features,omitempty"`
    +	Address  *Address `json:"address,omitempty"`
    +	URL      string   `json:"href,omitempty"`
    +}
    +
    +func (f Facility) String() string {
    +	return Stringify(f)
    +}
    +
    +// Address - the physical address of the facility
    +type Address struct {
    +	ID string `json:"id,omitempty"`
    +}
    +
    +func (a Address) String() string {
    +	return Stringify(a)
    +}
    +
    +// FacilityServiceOp implements FacilityService
    +type FacilityServiceOp struct {
    +	client *Client
    +}
    +
    +// List returns all available Packet facilities
    +func (s *FacilityServiceOp) List() ([]Facility, *Response, error) {
    +	root := new(facilityRoot)
    +
    +	resp, err := s.client.DoRequest("GET", facilityBasePath, nil, root)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return root.Facilities, resp, err
    +}
    diff --git a/vendor/github.com/packethost/packngo/ip.go b/vendor/github.com/packethost/packngo/ip.go
    new file mode 100644
    index 000000000000..671eea60c210
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/ip.go
    @@ -0,0 +1,194 @@
    +package packngo
    +
    +import (
    +	"fmt"
    +)
    +
    +const ipBasePath = "/ips"
    +
    +// DeviceIPService handles assignment of addresses from reserved blocks to instances in a project.
    +type DeviceIPService interface {
    +	Assign(deviceID string, assignRequest *AddressStruct) (*IPAddressAssignment, *Response, error)
    +	Unassign(assignmentID string) (*Response, error)
    +	Get(assignmentID string) (*IPAddressAssignment, *Response, error)
    +}
    +
    +// ProjectIPService handles reservation of IP address blocks for a project.
    +type ProjectIPService interface {
    +	Get(reservationID string) (*IPAddressReservation, *Response, error)
    +	List(projectID string) ([]IPAddressReservation, *Response, error)
    +	Request(projectID string, ipReservationReq *IPReservationRequest) (*IPAddressReservation, *Response, error)
    +	Remove(ipReservationID string) (*Response, error)
    +	AvailableAddresses(ipReservationID string, r *AvailableRequest) ([]string, *Response, error)
    +}
    +
    +type ipAddressCommon struct {
    +	ID            string `json:"id"`
    +	Address       string `json:"address"`
    +	Gateway       string `json:"gateway"`
    +	Network       string `json:"network"`
    +	AddressFamily int    `json:"address_family"`
    +	Netmask       string `json:"netmask"`
    +	Public        bool   `json:"public"`
    +	CIDR          int    `json:"cidr"`
    +	Created       string `json:"created_at,omitempty"`
    +	Updated       string `json:"updated_at,omitempty"`
    +	Href          string `json:"href"`
    +	Management    bool   `json:"management"`
    +	Manageable    bool   `json:"manageable"`
    +	Project       Href   `json:"project"`
    +}
    +
    +// IPAddressReservation is created when user sends IP reservation request for a project (considering it's within quota).
    +type IPAddressReservation struct {
    +	ipAddressCommon
    +	Assignments []Href   `json:"assignments"`
    +	Facility    Facility `json:"facility,omitempty"`
    +	Available   string   `json:"available"`
    +	Addon       bool     `json:"addon"`
    +	Bill        bool     `json:"bill"`
    +}
    +
    +// AvailableResponse is a type for listing of available addresses from a reserved block.
    +type AvailableResponse struct {
    +	Available []string `json:"available"`
    +}
    +
    +// AvailableRequest is a type for listing available addresses from a reserved block.
    +type AvailableRequest struct {
    +	CIDR int `json:"cidr"`
    +}
    +
    +// IPAddressAssignment is created when an IP address from reservation block is assigned to a device.
    +type IPAddressAssignment struct {
    +	ipAddressCommon
    +	AssignedTo Href `json:"assigned_to"`
    +}
    +
    +// IPReservationRequest represents the body of a reservation request.
    +type IPReservationRequest struct {
    +	Type     string `json:"type"`
    +	Quantity int    `json:"quantity"`
    +	Comments string `json:"comments"`
    +	Facility string `json:"facility"`
    +}
    +
    +// AddressStruct is a helper type for request/response with dict like {"address": ... }
    +type AddressStruct struct {
    +	Address string `json:"address"`
    +}
    +
    +func deleteFromIP(client *Client, resourceID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", ipBasePath, resourceID)
    +
    +	return client.DoRequest("DELETE", path, nil, nil)
    +}
    +
    +func (i IPAddressReservation) String() string {
    +	return Stringify(i)
    +}
    +
    +func (i IPAddressAssignment) String() string {
    +	return Stringify(i)
    +}
    +
    +// DeviceIPServiceOp is interface for IP-address assignment methods.
    +type DeviceIPServiceOp struct {
    +	client *Client
    +}
    +
    +// Unassign unassigns an IP address from the device to which it is currently assigned.
    +// This will remove the relationship between an IP and the device and will make the IP
    +// address available to be assigned to another device.
    +func (i *DeviceIPServiceOp) Unassign(assignmentID string) (*Response, error) {
    +	return deleteFromIP(i.client, assignmentID)
    +}
    +
    +// Assign assigns an IP address to a device.
    +// The IP address must be in one of the IP ranges assigned to the device’s project.
    +func (i *DeviceIPServiceOp) Assign(deviceID string, assignRequest *AddressStruct) (*IPAddressAssignment, *Response, error) {
    +	path := fmt.Sprintf("%s/%s%s", deviceBasePath, deviceID, ipBasePath)
    +	ipa := new(IPAddressAssignment)
    +
    +	resp, err := i.client.DoRequest("POST", path, assignRequest, ipa)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return ipa, resp, err
    +}
    +
    +// Get returns assignment by ID.
    +func (i *DeviceIPServiceOp) Get(assignmentID string) (*IPAddressAssignment, *Response, error) {
    +	path := fmt.Sprintf("%s/%s", ipBasePath, assignmentID)
    +	ipa := new(IPAddressAssignment)
    +
    +	resp, err := i.client.DoRequest("GET", path, nil, ipa)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return ipa, resp, err
    +}
    +
    +// ProjectIPServiceOp is interface for IP assignment methods.
    +type ProjectIPServiceOp struct {
    +	client *Client
    +}
    +
    +// Get returns reservation by ID.
    +func (i *ProjectIPServiceOp) Get(reservationID string) (*IPAddressReservation, *Response, error) {
    +	path := fmt.Sprintf("%s/%s", ipBasePath, reservationID)
    +	ipr := new(IPAddressReservation)
    +
    +	resp, err := i.client.DoRequest("GET", path, nil, ipr)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return ipr, resp, err
    +}
    +
    +// List provides a list of IP resevations for a single project.
    +func (i *ProjectIPServiceOp) List(projectID string) ([]IPAddressReservation, *Response, error) {
    +	path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, ipBasePath)
    +	reservations := new(struct {
    +		Reservations []IPAddressReservation `json:"ip_addresses"`
    +	})
    +
    +	resp, err := i.client.DoRequest("GET", path, nil, reservations)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +	return reservations.Reservations, resp, nil
    +}
    +
    +// Request requests more IP space for a project in order to have additional IP addresses to assign to devices.
    +func (i *ProjectIPServiceOp) Request(projectID string, ipReservationReq *IPReservationRequest) (*IPAddressReservation, *Response, error) {
    +	path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, ipBasePath)
    +	ipr := new(IPAddressReservation)
    +
    +	resp, err := i.client.DoRequest("POST", path, ipReservationReq, ipr)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +	return ipr, resp, err
    +}
    +
    +// Remove removes an IP reservation from the project.
    +func (i *ProjectIPServiceOp) Remove(ipReservationID string) (*Response, error) {
    +	return deleteFromIP(i.client, ipReservationID)
    +}
    +
    +// AvailableAddresses lists addresses available from a reserved block
    +func (i *ProjectIPServiceOp) AvailableAddresses(ipReservationID string, r *AvailableRequest) ([]string, *Response, error) {
    +	path := fmt.Sprintf("%s/%s/available?cidr=%d", ipBasePath, ipReservationID, r.CIDR)
    +	ar := new(AvailableResponse)
    +
    +	resp, err := i.client.DoRequest("GET", path, r, ar)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +	return ar.Available, resp, nil
    +
    +}
    diff --git a/vendor/github.com/packethost/packngo/operatingsystems.go b/vendor/github.com/packethost/packngo/operatingsystems.go
    new file mode 100644
    index 000000000000..7fd7f27ad2fe
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/operatingsystems.go
    @@ -0,0 +1,41 @@
    +package packngo
    +
    +const osBasePath = "/operating-systems"
    +
    +// OSService interface defines available operating_systems methods
    +type OSService interface {
    +	List() ([]OS, *Response, error)
    +}
    +
    +type osRoot struct {
    +	OperatingSystems []OS `json:"operating_systems"`
    +}
    +
    +// OS represents a Packet operating system
    +type OS struct {
    +	Name    string `json:"name"`
    +	Slug    string `json:"slug"`
    +	Distro  string `json:"distro"`
    +	Version string `json:"version"`
    +}
    +
    +func (o OS) String() string {
    +	return Stringify(o)
    +}
    +
    +// OSServiceOp implements OSService
    +type OSServiceOp struct {
    +	client *Client
    +}
    +
    +// List returns all available operating systems
    +func (s *OSServiceOp) List() ([]OS, *Response, error) {
    +	root := new(osRoot)
    +
    +	resp, err := s.client.DoRequest("GET", osBasePath, nil, root)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return root.OperatingSystems, resp, err
    +}
    diff --git a/vendor/github.com/packethost/packngo/organizations.go b/vendor/github.com/packethost/packngo/organizations.go
    new file mode 100644
    index 000000000000..36e76f1a2a0d
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/organizations.go
    @@ -0,0 +1,147 @@
    +package packngo
    +
    +import "fmt"
    +
    +// API documentation https://www.packet.net/developers/api/organizations/
    +const organizationBasePath = "/organizations"
    +
    +// OrganizationService interface defines available organization methods
    +type OrganizationService interface {
    +	List() ([]Organization, *Response, error)
    +	Get(string) (*Organization, *Response, error)
    +	Create(*OrganizationCreateRequest) (*Organization, *Response, error)
    +	Update(string, *OrganizationUpdateRequest) (*Organization, *Response, error)
    +	Delete(string) (*Response, error)
    +	ListPaymentMethods(string) ([]PaymentMethod, *Response, error)
    +}
    +
    +type organizationsRoot struct {
    +	Organizations []Organization `json:"organizations"`
    +}
    +
    +// Organization represents a Packet organization
    +type Organization struct {
    +	ID           string    `json:"id"`
    +	Name         string    `json:"name,omitempty"`
    +	Description  string    `json:"description,omitempty"`
    +	Website      string    `json:"website,omitempty"`
    +	Twitter      string    `json:"twitter,omitempty"`
    +	Created      string    `json:"created_at,omitempty"`
    +	Updated      string    `json:"updated_at,omitempty"`
    +	Address      Address   `json:"address,omitempty"`
    +	TaxID        string    `json:"tax_id,omitempty"`
    +	MainPhone    string    `json:"main_phone,omitempty"`
    +	BillingPhone string    `json:"billing_phone,omitempty"`
    +	CreditAmount float64   `json:"credit_amount,omitempty"`
    +	Logo         string    `json:"logo,omitempty"`
    +	LogoThumb    string    `json:"logo_thumb,omitempty"`
    +	Projects     []Project `json:"projects,omitempty"`
    +	URL          string    `json:"href,omitempty"`
    +	Users        []User    `json:"members,omitempty"`
    +	Owners       []User    `json:"owners,omitempty"`
    +}
    +
    +func (o Organization) String() string {
    +	return Stringify(o)
    +}
    +
    +// OrganizationCreateRequest type used to create a Packet organization
    +type OrganizationCreateRequest struct {
    +	Name        string `json:"name"`
    +	Description string `json:"description"`
    +	Website     string `json:"website"`
    +	Twitter     string `json:"twitter"`
    +	Logo        string `json:"logo"`
    +}
    +
    +func (o OrganizationCreateRequest) String() string {
    +	return Stringify(o)
    +}
    +
    +// OrganizationUpdateRequest type used to update a Packet organization
    +type OrganizationUpdateRequest struct {
    +	Name        *string `json:"name,omitempty"`
    +	Description *string `json:"description,omitempty"`
    +	Website     *string `json:"website,omitempty"`
    +	Twitter     *string `json:"twitter,omitempty"`
    +	Logo        *string `json:"logo,omitempty"`
    +}
    +
    +func (o OrganizationUpdateRequest) String() string {
    +	return Stringify(o)
    +}
    +
    +// OrganizationServiceOp implements OrganizationService
    +type OrganizationServiceOp struct {
    +	client *Client
    +}
    +
    +// List returns the user's organizations
    +func (s *OrganizationServiceOp) List() ([]Organization, *Response, error) {
    +	root := new(organizationsRoot)
    +
    +	resp, err := s.client.DoRequest("GET", organizationBasePath, nil, root)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return root.Organizations, resp, err
    +}
    +
    +// Get returns a organization by id
    +func (s *OrganizationServiceOp) Get(organizationID string) (*Organization, *Response, error) {
    +	path := fmt.Sprintf("%s/%s", organizationBasePath, organizationID)
    +	organization := new(Organization)
    +
    +	resp, err := s.client.DoRequest("GET", path, nil, organization)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return organization, resp, err
    +}
    +
    +// Create creates a new organization
    +func (s *OrganizationServiceOp) Create(createRequest *OrganizationCreateRequest) (*Organization, *Response, error) {
    +	organization := new(Organization)
    +
    +	resp, err := s.client.DoRequest("POST", organizationBasePath, createRequest, organization)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return organization, resp, err
    +}
    +
    +// Update updates an organization
    +func (s *OrganizationServiceOp) Update(id string, updateRequest *OrganizationUpdateRequest) (*Organization, *Response, error) {
    +	path := fmt.Sprintf("%s/%s", organizationBasePath, id)
    +	organization := new(Organization)
    +
    +	resp, err := s.client.DoRequest("PATCH", path, updateRequest, organization)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return organization, resp, err
    +}
    +
    +// Delete deletes an organizationID
    +func (s *OrganizationServiceOp) Delete(organizationID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", organizationBasePath, organizationID)
    +
    +	return s.client.DoRequest("DELETE", path, nil, nil)
    +}
    +
    +// ListPaymentMethods returns PaymentMethods for an organization
    +func (s *OrganizationServiceOp) ListPaymentMethods(organizationID string) ([]PaymentMethod, *Response, error) {
    +	url := fmt.Sprintf("%s/%s%s", organizationBasePath, organizationID, paymentMethodBasePath)
    +	root := new(paymentMethodsRoot)
    +
    +	resp, err := s.client.DoRequest("GET", url, nil, root)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return root.PaymentMethods, resp, err
    +}
    diff --git a/vendor/github.com/packethost/packngo/packngo.go b/vendor/github.com/packethost/packngo/packngo.go
    new file mode 100644
    index 000000000000..37b6c23a1066
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/packngo.go
    @@ -0,0 +1,306 @@
    +package packngo
    +
    +import (
    +	"bytes"
    +	"encoding/json"
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"log"
    +	"net/http"
    +	"net/http/httputil"
    +	"net/url"
    +	"os"
    +	"strconv"
    +	"strings"
    +	"time"
    +)
    +
    +const (
    +	packetTokenEnvVar = "PACKET_AUTH_TOKEN"
    +	libraryVersion    = "0.1.0"
    +	baseURL           = "https://api.packet.net/"
    +	userAgent         = "packngo/" + libraryVersion
    +	mediaType         = "application/json"
    +	debugEnvVar       = "PACKNGO_DEBUG"
    +
    +	headerRateLimit     = "X-RateLimit-Limit"
    +	headerRateRemaining = "X-RateLimit-Remaining"
    +	headerRateReset     = "X-RateLimit-Reset"
    +)
    +
    +// ListOptions specifies optional global API parameters
    +type ListOptions struct {
    +	// for paginated result sets, page of results to retrieve
    +	Page int `url:"page,omitempty"`
    +
    +	// for paginated result sets, the number of results to return per page
    +	PerPage int `url:"per_page,omitempty"`
    +
    +	// specify which resources you want to return as collections instead of references
    +	Includes string
    +}
    +
    +func (l *ListOptions) createURL() (url string) {
    +	if l.Includes != "" {
    +		url += fmt.Sprintf("include=%s", l.Includes)
    +	}
    +
    +	if l.Page != 0 {
    +		if url != "" {
    +			url += "&"
    +		}
    +		url += fmt.Sprintf("page=%d", l.Page)
    +	}
    +
    +	if l.PerPage != 0 {
    +		if url != "" {
    +			url += "&"
    +		}
    +		url += fmt.Sprintf("per_page=%d", l.PerPage)
    +	}
    +
    +	return
    +}
    +
    +// meta contains pagination information
    +type meta struct {
    +	Self           *Href `json:"self"`
    +	First          *Href `json:"first"`
    +	Last           *Href `json:"last"`
    +	Previous       *Href `json:"previous,omitempty"`
    +	Next           *Href `json:"next,omitempty"`
    +	Total          int   `json:"total"`
    +	CurrentPageNum int   `json:"current_page"`
    +	LastPageNum    int   `json:"last_page"`
    +}
    +
    +// Response is the http response from api calls
    +type Response struct {
    +	*http.Response
    +	Rate
    +}
    +
    +// Href is an API link
    +type Href struct {
    +	Href string `json:"href"`
    +}
    +
    +func (r *Response) populateRate() {
    +	// parse the rate limit headers and populate Response.Rate
    +	if limit := r.Header.Get(headerRateLimit); limit != "" {
    +		r.Rate.RequestLimit, _ = strconv.Atoi(limit)
    +	}
    +	if remaining := r.Header.Get(headerRateRemaining); remaining != "" {
    +		r.Rate.RequestsRemaining, _ = strconv.Atoi(remaining)
    +	}
    +	if reset := r.Header.Get(headerRateReset); reset != "" {
    +		if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 {
    +			r.Rate.Reset = Timestamp{time.Unix(v, 0)}
    +		}
    +	}
    +}
    +
    +// ErrorResponse is the http response used on errors
    +type ErrorResponse struct {
    +	Response    *http.Response
    +	Errors      []string `json:"errors"`
    +	SingleError string   `json:"error"`
    +}
    +
    +func (r *ErrorResponse) Error() string {
    +	return fmt.Sprintf("%v %v: %d %v %v",
    +		r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, strings.Join(r.Errors, ", "), r.SingleError)
    +}
    +
    +// Client is the base API Client
    +type Client struct {
    +	client *http.Client
    +	debug  bool
    +
    +	BaseURL *url.URL
    +
    +	UserAgent     string
    +	ConsumerToken string
    +	APIKey        string
    +
    +	RateLimit Rate
    +
    +	// Packet Api Objects
    +	Plans                  PlanService
    +	Users                  UserService
    +	Emails                 EmailService
    +	SSHKeys                SSHKeyService
    +	Devices                DeviceService
    +	Projects               ProjectService
    +	Facilities             FacilityService
    +	OperatingSystems       OSService
    +	DeviceIPs              DeviceIPService
    +	DevicePorts            DevicePortService
    +	ProjectIPs             ProjectIPService
    +	ProjectVirtualNetworks ProjectVirtualNetworkService
    +	Volumes                VolumeService
    +	VolumeAttachments      VolumeAttachmentService
    +	SpotMarket             SpotMarketService
    +	Organizations          OrganizationService
    +}
    +
    +// NewRequest inits a new http request with the proper headers
    +func (c *Client) NewRequest(method, path string, body interface{}) (*http.Request, error) {
    +	// relative path to append to the endpoint url, no leading slash please
    +	rel, err := url.Parse(path)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	u := c.BaseURL.ResolveReference(rel)
    +
    +	// json encode the request body, if any
    +	buf := new(bytes.Buffer)
    +	if body != nil {
    +		err := json.NewEncoder(buf).Encode(body)
    +		if err != nil {
    +			return nil, err
    +		}
    +	}
    +
    +	req, err := http.NewRequest(method, u.String(), buf)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	req.Close = true
    +
    +	req.Header.Add("X-Auth-Token", c.APIKey)
    +	req.Header.Add("X-Consumer-Token", c.ConsumerToken)
    +
    +	req.Header.Add("Content-Type", mediaType)
    +	req.Header.Add("Accept", mediaType)
    +	req.Header.Add("User-Agent", userAgent)
    +	return req, nil
    +}
    +
    +// Do executes the http request
    +func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
    +	resp, err := c.client.Do(req)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	defer resp.Body.Close()
    +
    +	response := Response{Response: resp}
    +	response.populateRate()
    +	if c.debug {
    +		o, _ := httputil.DumpResponse(response.Response, true)
    +		log.Printf("\n=======[RESPONSE]============\n%s\n\n", string(o))
    +	}
    +	c.RateLimit = response.Rate
    +
    +	err = checkResponse(resp)
    +	// if the response is an error, return the ErrorResponse
    +	if err != nil {
    +		return &response, err
    +	}
    +
    +	if v != nil {
    +		// if v implements the io.Writer interface, return the raw response
    +		if w, ok := v.(io.Writer); ok {
    +			io.Copy(w, resp.Body)
    +		} else {
    +			err = json.NewDecoder(resp.Body).Decode(v)
    +			if err != nil {
    +				return &response, err
    +			}
    +		}
    +	}
    +
    +	return &response, err
    +}
    +
    +// DoRequest is a convenience method, it calls NewRequest followed by Do
    +// v is the interface to unmarshal the response JSON into
    +func (c *Client) DoRequest(method, path string, body, v interface{}) (*Response, error) {
    +	req, err := c.NewRequest(method, path, body)
    +	if c.debug {
    +		o, _ := httputil.DumpRequestOut(req, true)
    +		log.Printf("\n=======[REQUEST]=============\n%s\n", string(o))
    +	}
    +	if err != nil {
    +		return nil, err
    +	}
    +	return c.Do(req, v)
    +}
    +
    +func NewClient() (*Client, error) {
    +	apiToken := os.Getenv(packetTokenEnvVar)
    +	if apiToken == "" {
    +		return nil, fmt.Errorf("you must export %s.", packetTokenEnvVar)
    +	}
    +	c := NewClientWithAuth("packngo lib", apiToken, nil)
    +	return c, nil
    +
    +}
    +
    +// NewClientWithAuth initializes and returns a Client, use this to get an API Client to operate on
    +// N.B.: Packet's API certificate requires Go 1.5+ to successfully parse. If you are using
    +// an older version of Go, pass in a custom http.Client with a custom TLS configuration
    +// that sets "InsecureSkipVerify" to "true"
    +func NewClientWithAuth(consumerToken string, apiKey string, httpClient *http.Client) *Client {
    +	client, _ := NewClientWithBaseURL(consumerToken, apiKey, httpClient, baseURL)
    +	return client
    +}
    +
    +// NewClientWithBaseURL returns a Client pointing to nonstandard API URL, e.g.
    +// for mocking the remote API
    +func NewClientWithBaseURL(consumerToken string, apiKey string, httpClient *http.Client, apiBaseURL string) (*Client, error) {
    +	if httpClient == nil {
    +		// Don't fall back on http.DefaultClient as it's not nice to adjust state
    +		// implicitly. If the client wants to use http.DefaultClient, they can
    +		// pass it in explicitly.
    +		httpClient = &http.Client{}
    +	}
    +
    +	u, err := url.Parse(apiBaseURL)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	c := &Client{client: httpClient, BaseURL: u, UserAgent: userAgent, ConsumerToken: consumerToken, APIKey: apiKey}
    +	c.debug = os.Getenv(debugEnvVar) != ""
    +	c.Plans = &PlanServiceOp{client: c}
    +	c.Organizations = &OrganizationServiceOp{client: c}
    +	c.Users = &UserServiceOp{client: c}
    +	c.Emails = &EmailServiceOp{client: c}
    +	c.SSHKeys = &SSHKeyServiceOp{client: c}
    +	c.Devices = &DeviceServiceOp{client: c}
    +	c.Projects = &ProjectServiceOp{client: c}
    +	c.Facilities = &FacilityServiceOp{client: c}
    +	c.OperatingSystems = &OSServiceOp{client: c}
    +	c.DeviceIPs = &DeviceIPServiceOp{client: c}
    +	c.DevicePorts = &DevicePortServiceOp{client: c}
    +	c.ProjectVirtualNetworks = &ProjectVirtualNetworkServiceOp{client: c}
    +	c.ProjectIPs = &ProjectIPServiceOp{client: c}
    +	c.Volumes = &VolumeServiceOp{client: c}
    +	c.VolumeAttachments = &VolumeAttachmentServiceOp{client: c}
    +	c.SpotMarket = &SpotMarketServiceOp{client: c}
    +
    +	return c, nil
    +}
    +
    +func checkResponse(r *http.Response) error {
    +	// return if http status code is within 200 range
    +	if c := r.StatusCode; c >= 200 && c <= 299 {
    +		// response is good, return
    +		return nil
    +	}
    +
    +	errorResponse := &ErrorResponse{Response: r}
    +	data, err := ioutil.ReadAll(r.Body)
    +	// if the response has a body, populate the message in errorResponse
    +	if err == nil && len(data) > 0 {
    +		json.Unmarshal(data, errorResponse)
    +	}
    +
    +	return errorResponse
    +}
    diff --git a/vendor/github.com/packethost/packngo/payment_methods.go b/vendor/github.com/packethost/packngo/payment_methods.go
    new file mode 100644
    index 000000000000..3479f0920ba5
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/payment_methods.go
    @@ -0,0 +1,72 @@
    +package packngo
    +
    +// API documentation https://www.packet.net/developers/api/paymentmethods/
    +const paymentMethodBasePath = "/payment-methods"
    +
    +// ProjectService interface defines available project methods
    +type PaymentMethodService interface {
    +	List() ([]PaymentMethod, *Response, error)
    +	Get(string) (*PaymentMethod, *Response, error)
    +	Create(*PaymentMethodCreateRequest) (*PaymentMethod, *Response, error)
    +	Update(string, *PaymentMethodUpdateRequest) (*PaymentMethod, *Response, error)
    +	Delete(string) (*Response, error)
    +}
    +
    +type paymentMethodsRoot struct {
    +	PaymentMethods []PaymentMethod `json:"payment_methods"`
    +}
    +
    +// PaymentMethod represents a Packet payment method of an organization
    +type PaymentMethod struct {
    +	ID             string         `json:"id"`
    +	Name           string         `json:"name,omitempty"`
    +	Created        string         `json:"created_at,omitempty"`
    +	Updated        string         `json:"updated_at,omitempty"`
    +	Nonce          string         `json:"nonce,omitempty"`
    +	Default        bool           `json:"default,omitempty"`
    +	Organization   Organization   `json:"organization,omitempty"`
    +	Projects       []Project      `json:"projects,omitempty"`
    +	Type           string         `json:"type,omitempty"`
    +	CardholderName string         `json:"cardholder_name,omitempty"`
    +	ExpMonth       string         `json:"expiration_month,omitempty"`
    +	ExpYear        string         `json:"expiration_year,omitempty"`
    +	Last4          string         `json:"last_4,omitempty"`
    +	BillingAddress BillingAddress `json:"billing_address,omitempty"`
    +	URL            string         `json:"href,omitempty"`
    +}
    +
    +func (pm PaymentMethod) String() string {
    +	return Stringify(pm)
    +}
    +
    +// PaymentMethodCreateRequest type used to create a Packet payment method of an organization
    +type PaymentMethodCreateRequest struct {
    +	Name           string `json:"name"`
    +	Nonce          string `json:"name"`
    +	CardholderName string `json:"cardholder_name,omitempty"`
    +	ExpMonth       string `json:"expiration_month,omitempty"`
    +	ExpYear        string `json:"expiration_year,omitempty"`
    +	BillingAddress string `json:"billing_address,omitempty"`
    +}
    +
    +func (pm PaymentMethodCreateRequest) String() string {
    +	return Stringify(pm)
    +}
    +
    +// PaymentMethodUpdateRequest type used to update a Packet payment method of an organization
    +type PaymentMethodUpdateRequest struct {
    +	Name           *string `json:"name,omitempty"`
    +	CardholderName *string `json:"cardholder_name,omitempty"`
    +	ExpMonth       *string `json:"expiration_month,omitempty"`
    +	ExpYear        *string `json:"expiration_year,omitempty"`
    +	BillingAddress *string `json:"billing_address,omitempty"`
    +}
    +
    +func (pm PaymentMethodUpdateRequest) String() string {
    +	return Stringify(pm)
    +}
    +
    +// PaymentMethodServiceOp implements PaymentMethodService
    +type PaymentMethodServiceOp struct {
    +	client *Client
    +}
    diff --git a/vendor/github.com/packethost/packngo/plans.go b/vendor/github.com/packethost/packngo/plans.go
    new file mode 100644
    index 000000000000..148a2a5ce7a2
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/plans.go
    @@ -0,0 +1,117 @@
    +package packngo
    +
    +const planBasePath = "/plans"
    +
    +// PlanService interface defines available plan methods
    +type PlanService interface {
    +	List() ([]Plan, *Response, error)
    +}
    +
    +type planRoot struct {
    +	Plans []Plan `json:"plans"`
    +}
    +
    +// Plan represents a Packet service plan
    +type Plan struct {
    +	ID          string   `json:"id"`
    +	Slug        string   `json:"slug,omitempty"`
    +	Name        string   `json:"name,omitempty"`
    +	Description string   `json:"description,omitempty"`
    +	Line        string   `json:"line,omitempty"`
    +	Specs       *Specs   `json:"specs,omitempty"`
    +	Pricing     *Pricing `json:"pricing,omitempty"`
    +}
    +
    +func (p Plan) String() string {
    +	return Stringify(p)
    +}
    +
    +// Specs - the server specs for a plan
    +type Specs struct {
    +	Cpus     []*Cpus   `json:"cpus,omitempty"`
    +	Memory   *Memory   `json:"memory,omitempty"`
    +	Drives   []*Drives `json:"drives,omitempty"`
    +	Nics     []*Nics   `json:"nics,omitempty"`
    +	Features *Features `json:"features,omitempty"`
    +}
    +
    +func (s Specs) String() string {
    +	return Stringify(s)
    +}
    +
    +// Cpus - the CPU config details for specs on a plan
    +type Cpus struct {
    +	Count int    `json:"count,omitempty"`
    +	Type  string `json:"type,omitempty"`
    +}
    +
    +func (c Cpus) String() string {
    +	return Stringify(c)
    +}
    +
    +// Memory - the RAM config details for specs on a plan
    +type Memory struct {
    +	Total string `json:"total,omitempty"`
    +}
    +
    +func (m Memory) String() string {
    +	return Stringify(m)
    +}
    +
    +// Drives - the storage config details for specs on a plan
    +type Drives struct {
    +	Count int    `json:"count,omitempty"`
    +	Size  string `json:"size,omitempty"`
    +	Type  string `json:"type,omitempty"`
    +}
    +
    +func (d Drives) String() string {
    +	return Stringify(d)
    +}
    +
    +// Nics - the network hardware details for specs on a plan
    +type Nics struct {
    +	Count int    `json:"count,omitempty"`
    +	Type  string `json:"type,omitempty"`
    +}
    +
    +func (n Nics) String() string {
    +	return Stringify(n)
    +}
    +
    +// Features - other features in the specs for a plan
    +type Features struct {
    +	Raid bool `json:"raid,omitempty"`
    +	Txt  bool `json:"txt,omitempty"`
    +}
    +
    +func (f Features) String() string {
    +	return Stringify(f)
    +}
    +
    +// Pricing - the pricing options on a plan
    +type Pricing struct {
    +	Hourly  float32 `json:"hourly,omitempty"`
    +	Monthly float32 `json:"monthly,omitempty"`
    +}
    +
    +func (p Pricing) String() string {
    +	return Stringify(p)
    +}
    +
    +// PlanServiceOp implements PlanService
    +type PlanServiceOp struct {
    +	client *Client
    +}
    +
    +// List method returns all available plans
    +func (s *PlanServiceOp) List() ([]Plan, *Response, error) {
    +	root := new(planRoot)
    +
    +	resp, err := s.client.DoRequest("GET", planBasePath, nil, root)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return root.Plans, resp, err
    +}
    diff --git a/vendor/github.com/packethost/packngo/ports.go b/vendor/github.com/packethost/packngo/ports.go
    new file mode 100644
    index 000000000000..c0f79d38edba
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/ports.go
    @@ -0,0 +1,225 @@
    +package packngo
    +
    +import (
    +	"fmt"
    +)
    +
    +const portBasePath = "/ports"
    +
    +type NetworkType int
    +
    +const (
    +	NetworkL3 NetworkType = iota
    +	NetworkHybrid
    +	NetworkL2Bonded
    +	NetworkL2Individual
    +	NetworkUnknown
    +)
    +
    +// DevicePortService handles operations on a port which belongs to a particular device
    +type DevicePortService interface {
    +	Assign(*PortAssignRequest) (*Port, *Response, error)
    +	Unassign(*PortAssignRequest) (*Port, *Response, error)
    +	Bond(*BondRequest) (*Port, *Response, error)
    +	Disbond(*DisbondRequest) (*Port, *Response, error)
    +	PortToLayerTwo(string) (*Port, *Response, error)
    +	PortToLayerThree(string) (*Port, *Response, error)
    +	DeviceToLayerTwo(string) (*Device, error)
    +	DeviceToLayerThree(string) (*Device, error)
    +	DeviceNetworkType(string) (NetworkType, error)
    +	GetBondPort(string) (*Port, error)
    +	GetPortByName(string, string) (*Port, error)
    +}
    +
    +type PortData struct {
    +	MAC    string `json:"mac"`
    +	Bonded bool   `json:"bonded"`
    +}
    +
    +type Port struct {
    +	ID                      string           `json:"id"`
    +	Type                    string           `json:"type"`
    +	Name                    string           `json:"name"`
    +	Data                    PortData         `json:"data"`
    +	AttachedVirtualNetworks []VirtualNetwork `json:"virtual_networks"`
    +}
    +
    +type AddressRequest struct {
    +	AddressFamily int  `json:"address_family"`
    +	Public        bool `json:"public"`
    +}
    +
    +type BackToL3Request struct {
    +	RequestIPs []AddressRequest `json:"request_ips"`
    +}
    +
    +type DevicePortServiceOp struct {
    +	client *Client
    +}
    +
    +type PortAssignRequest struct {
    +	PortID           string `json:"id"`
    +	VirtualNetworkID string `json:"vnid"`
    +}
    +
    +type BondRequest struct {
    +	PortID     string `json:"id"`
    +	BulkEnable bool   `json:"bulk_enable"`
    +}
    +
    +type DisbondRequest struct {
    +	PortID      string `json:"id"`
    +	BulkDisable bool   `json:"bulk_disable"`
    +}
    +
    +func (i *DevicePortServiceOp) GetBondPort(deviceID string) (*Port, error) {
    +	device, _, err := i.client.Devices.Get(deviceID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	for _, port := range device.NetworkPorts {
    +		if port.Type == "NetworkBondPort" {
    +			return &port, nil
    +		}
    +	}
    +
    +	return nil, fmt.Errorf("No bonded port found in device %s", deviceID)
    +}
    +
    +func (i *DevicePortServiceOp) GetPortByName(deviceID, name string) (*Port, error) {
    +	device, _, err := i.client.Devices.Get(deviceID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	for _, port := range device.NetworkPorts {
    +		if port.Name == name {
    +			return &port, nil
    +		}
    +	}
    +
    +	return nil, fmt.Errorf("Port %s not found in device %s", name, deviceID)
    +}
    +
    +func (i *DevicePortServiceOp) Assign(par *PortAssignRequest) (*Port, *Response, error) {
    +	path := fmt.Sprintf("%s/%s/assign", portBasePath, par.PortID)
    +	return i.portAction(path, par)
    +}
    +
    +func (i *DevicePortServiceOp) Unassign(par *PortAssignRequest) (*Port, *Response, error) {
    +	path := fmt.Sprintf("%s/%s/unassign", portBasePath, par.PortID)
    +	return i.portAction(path, par)
    +}
    +
    +func (i *DevicePortServiceOp) Bond(br *BondRequest) (*Port, *Response, error) {
    +	path := fmt.Sprintf("%s/%s/bond", portBasePath, br.PortID)
    +	return i.portAction(path, br)
    +}
    +
    +func (i *DevicePortServiceOp) Disbond(dr *DisbondRequest) (*Port, *Response, error) {
    +	path := fmt.Sprintf("%s/%s/disbond", portBasePath, dr.PortID)
    +	return i.portAction(path, dr)
    +}
    +
    +func (i *DevicePortServiceOp) portAction(path string, req interface{}) (*Port, *Response, error) {
    +	port := new(Port)
    +
    +	resp, err := i.client.DoRequest("POST", path, req, port)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return port, resp, err
    +}
    +
    +func (i *DevicePortServiceOp) PortToLayerTwo(portID string) (*Port, *Response, error) {
    +	path := fmt.Sprintf("%s/%s/convert/layer-2", portBasePath, portID)
    +	port := new(Port)
    +
    +	resp, err := i.client.DoRequest("POST", path, nil, port)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return port, resp, err
    +}
    +
    +func (i *DevicePortServiceOp) PortToLayerThree(portID string) (*Port, *Response, error) {
    +	path := fmt.Sprintf("%s/%s/convert/layer-3", portBasePath, portID)
    +	port := new(Port)
    +
    +	req := BackToL3Request{
    +		RequestIPs: []AddressRequest{
    +			AddressRequest{AddressFamily: 4, Public: true},
    +			AddressRequest{AddressFamily: 4, Public: false},
    +			AddressRequest{AddressFamily: 6, Public: true},
    +		},
    +	}
    +
    +	resp, err := i.client.DoRequest("POST", path, &req, port)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return port, resp, err
    +}
    +
    +func (i *DevicePortServiceOp) DeviceNetworkType(deviceID string) (NetworkType, error) {
    +	d, _, err := i.client.Devices.Get(deviceID)
    +	if err != nil {
    +		return NetworkUnknown, err
    +	}
    +	if d.Plan.Slug == "baremetal_0" || d.Plan.Slug == "baremetal_1" {
    +		return NetworkL3, nil
    +	}
    +	if d.Plan.Slug == "baremetal_1e" {
    +		return NetworkHybrid, nil
    +	}
    +	if len(d.NetworkPorts) < 1 {
    +		// really?
    +		return NetworkL2Individual, nil
    +	}
    +	if d.NetworkPorts[0].Data.Bonded {
    +		if d.NetworkPorts[2].Data.Bonded {
    +			for _, ip := range d.Network {
    +				if ip.Management {
    +					return NetworkL3, nil
    +				}
    +			}
    +			return NetworkL2Bonded, nil
    +		} else {
    +			return NetworkHybrid, nil
    +		}
    +	}
    +	return NetworkL2Individual, nil
    +}
    +
    +func (i *DevicePortServiceOp) DeviceToLayerThree(deviceID string) (*Device, error) {
    +	// hopefull all the VLANs are unassigned at this point
    +	bond0, err := i.client.DevicePorts.GetBondPort(deviceID)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	bond0, _, err = i.client.DevicePorts.PortToLayerThree(bond0.ID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	d, _, err := i.client.Devices.Get(deviceID)
    +	return d, err
    +}
    +
    +// DeviceToLayerTwo converts device to L2 networking. Use bond0 to attach VLAN.
    +func (i *DevicePortServiceOp) DeviceToLayerTwo(deviceID string) (*Device, error) {
    +	bond0, err := i.client.DevicePorts.GetBondPort(deviceID)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	bond0, _, err = i.client.DevicePorts.PortToLayerTwo(bond0.ID)
    +	if err != nil {
    +		return nil, err
    +	}
    +	d, _, err := i.client.Devices.Get(deviceID)
    +	return d, err
    +
    +}
    diff --git a/vendor/github.com/packethost/packngo/projects.go b/vendor/github.com/packethost/packngo/projects.go
    new file mode 100644
    index 000000000000..814b5e3c1f26
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/projects.go
    @@ -0,0 +1,138 @@
    +package packngo
    +
    +import "fmt"
    +
    +const projectBasePath = "/projects"
    +
    +// ProjectService interface defines available project methods
    +type ProjectService interface {
    +	List(listOpt *ListOptions) ([]Project, *Response, error)
    +	Get(string) (*Project, *Response, error)
    +	Create(*ProjectCreateRequest) (*Project, *Response, error)
    +	Update(string, *ProjectUpdateRequest) (*Project, *Response, error)
    +	Delete(string) (*Response, error)
    +}
    +
    +type projectsRoot struct {
    +	Projects []Project `json:"projects"`
    +	Meta     meta      `json:"meta"`
    +}
    +
    +// Project represents a Packet project
    +type Project struct {
    +	ID            string        `json:"id"`
    +	Name          string        `json:"name,omitempty"`
    +	Organization  Organization  `json:"organization,omitempty"`
    +	Created       string        `json:"created_at,omitempty"`
    +	Updated       string        `json:"updated_at,omitempty"`
    +	Users         []User        `json:"members,omitempty"`
    +	Devices       []Device      `json:"devices,omitempty"`
    +	SSHKeys       []SSHKey      `json:"ssh_keys,omitempty"`
    +	URL           string        `json:"href,omitempty"`
    +	PaymentMethod PaymentMethod `json:"payment_method,omitempty"`
    +}
    +
    +func (p Project) String() string {
    +	return Stringify(p)
    +}
    +
    +// ProjectCreateRequest type used to create a Packet project
    +type ProjectCreateRequest struct {
    +	Name            string `json:"name"`
    +	PaymentMethodID string `json:"payment_method_id,omitempty"`
    +	OrganizationID  string `json:"organization_id,omitempty"`
    +}
    +
    +func (p ProjectCreateRequest) String() string {
    +	return Stringify(p)
    +}
    +
    +// ProjectUpdateRequest type used to update a Packet project
    +type ProjectUpdateRequest struct {
    +	Name            *string `json:"name,omitempty"`
    +	PaymentMethodID *string `json:"payment_method_id,omitempty"`
    +}
    +
    +func (p ProjectUpdateRequest) String() string {
    +	return Stringify(p)
    +}
    +
    +// ProjectServiceOp implements ProjectService
    +type ProjectServiceOp struct {
    +	client *Client
    +}
    +
    +// List returns the user's projects
    +func (s *ProjectServiceOp) List(listOpt *ListOptions) (projects []Project, resp *Response, err error) {
    +	var params string
    +	if listOpt != nil {
    +		params = listOpt.createURL()
    +	}
    +	root := new(projectsRoot)
    +
    +	path := fmt.Sprintf("%s?%s", projectBasePath, params)
    +
    +	for {
    +		resp, err = s.client.DoRequest("GET", path, nil, root)
    +		if err != nil {
    +			return nil, resp, err
    +		}
    +
    +		projects = append(projects, root.Projects...)
    +
    +		if root.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) {
    +			path = root.Meta.Next.Href
    +			if params != "" {
    +				path = fmt.Sprintf("%s&%s", path, params)
    +			}
    +			continue
    +		}
    +
    +		return
    +	}
    +}
    +
    +// Get returns a project by id
    +func (s *ProjectServiceOp) Get(projectID string) (*Project, *Response, error) {
    +	path := fmt.Sprintf("%s/%s", projectBasePath, projectID)
    +	project := new(Project)
    +
    +	resp, err := s.client.DoRequest("GET", path, nil, project)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return project, resp, err
    +}
    +
    +// Create creates a new project
    +func (s *ProjectServiceOp) Create(createRequest *ProjectCreateRequest) (*Project, *Response, error) {
    +	project := new(Project)
    +
    +	resp, err := s.client.DoRequest("POST", projectBasePath, createRequest, project)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return project, resp, err
    +}
    +
    +// Update updates a project
    +func (s *ProjectServiceOp) Update(id string, updateRequest *ProjectUpdateRequest) (*Project, *Response, error) {
    +	path := fmt.Sprintf("%s/%s", projectBasePath, id)
    +	project := new(Project)
    +
    +	resp, err := s.client.DoRequest("PATCH", path, updateRequest, project)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return project, resp, err
    +}
    +
    +// Delete deletes a project
    +func (s *ProjectServiceOp) Delete(projectID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", projectBasePath, projectID)
    +
    +	return s.client.DoRequest("DELETE", path, nil, nil)
    +}
    diff --git a/vendor/github.com/packethost/packngo/rate.go b/vendor/github.com/packethost/packngo/rate.go
    new file mode 100644
    index 000000000000..965967d4557c
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/rate.go
    @@ -0,0 +1,12 @@
    +package packngo
    +
    +// Rate provides the API request rate limit details
    +type Rate struct {
    +	RequestLimit      int       `json:"request_limit"`
    +	RequestsRemaining int       `json:"requests_remaining"`
    +	Reset             Timestamp `json:"rate_reset"`
    +}
    +
    +func (r Rate) String() string {
    +	return Stringify(r)
    +}
    diff --git a/vendor/github.com/packethost/packngo/spotmarket.go b/vendor/github.com/packethost/packngo/spotmarket.go
    new file mode 100644
    index 000000000000..5dfb7d559b1c
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/spotmarket.go
    @@ -0,0 +1,39 @@
    +package packngo
    +
    +const spotMarketBasePath = "/market/spot/prices"
    +
    +// SpotMarketService expooses Spot Market methods
    +type SpotMarketService interface {
    +	Prices() (PriceMap, *Response, error)
    +}
    +
    +// SpotMarketServiceOp implements SpotMarketService
    +type SpotMarketServiceOp struct {
    +	client *Client
    +}
    +
    +// PriceMap is a map of [facility][plan]-> float Price
    +type PriceMap map[string]map[string]float64
    +
    +// Prices gets current PriceMap from the API
    +func (s *SpotMarketServiceOp) Prices() (PriceMap, *Response, error) {
    +	root := new(struct {
    +		SMPs map[string]map[string]struct {
    +			Price float64 `json:"price"`
    +		} `json:"spot_market_prices"`
    +	})
    +
    +	resp, err := s.client.DoRequest("GET", spotMarketBasePath, nil, root)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	prices := make(PriceMap)
    +	for facility, planMap := range root.SMPs {
    +		prices[facility] = map[string]float64{}
    +		for plan, v := range planMap {
    +			prices[facility][plan] = v.Price
    +		}
    +	}
    +	return prices, resp, err
    +}
    diff --git a/vendor/github.com/packethost/packngo/sshkeys.go b/vendor/github.com/packethost/packngo/sshkeys.go
    new file mode 100644
    index 000000000000..260bf0876420
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/sshkeys.go
    @@ -0,0 +1,138 @@
    +package packngo
    +
    +import "fmt"
    +
    +const (
    +	sshKeyBasePath = "/ssh-keys"
    +)
    +
    +// SSHKeyService interface defines available device methods
    +type SSHKeyService interface {
    +	List() ([]SSHKey, *Response, error)
    +	ProjectList(string) ([]SSHKey, *Response, error)
    +	Get(string) (*SSHKey, *Response, error)
    +	Create(*SSHKeyCreateRequest) (*SSHKey, *Response, error)
    +	Update(string, *SSHKeyUpdateRequest) (*SSHKey, *Response, error)
    +	Delete(string) (*Response, error)
    +}
    +
    +type sshKeyRoot struct {
    +	SSHKeys []SSHKey `json:"ssh_keys"`
    +}
    +
    +// SSHKey represents a user's ssh key
    +type SSHKey struct {
    +	ID          string `json:"id"`
    +	Label       string `json:"label"`
    +	Key         string `json:"key"`
    +	FingerPrint string `json:"fingerprint"`
    +	Created     string `json:"created_at"`
    +	Updated     string `json:"updated_at"`
    +	User        User   `json:"user,omitempty"`
    +	URL         string `json:"href,omitempty"`
    +}
    +
    +func (s SSHKey) String() string {
    +	return Stringify(s)
    +}
    +
    +// SSHKeyCreateRequest type used to create an ssh key
    +type SSHKeyCreateRequest struct {
    +	Label     string `json:"label"`
    +	Key       string `json:"key"`
    +	ProjectID string `json:"-"`
    +}
    +
    +func (s SSHKeyCreateRequest) String() string {
    +	return Stringify(s)
    +}
    +
    +// SSHKeyUpdateRequest type used to update an ssh key
    +type SSHKeyUpdateRequest struct {
    +	Label *string `json:"label,omitempty"`
    +	Key   *string `json:"key,omitempty"`
    +}
    +
    +func (s SSHKeyUpdateRequest) String() string {
    +	return Stringify(s)
    +}
    +
    +// SSHKeyServiceOp implements SSHKeyService
    +type SSHKeyServiceOp struct {
    +	client *Client
    +}
    +
    +func (s *SSHKeyServiceOp) list(url string) ([]SSHKey, *Response, error) {
    +	root := new(sshKeyRoot)
    +
    +	resp, err := s.client.DoRequest("GET", url, nil, root)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return root.SSHKeys, resp, err
    +}
    +
    +// ProjectList lists ssh keys of a project
    +func (s *SSHKeyServiceOp) ProjectList(projectID string) ([]SSHKey, *Response, error) {
    +	return s.list(fmt.Sprintf("%s/%s%s", projectBasePath, projectID, sshKeyBasePath))
    +
    +}
    +
    +// List returns a user's ssh keys
    +func (s *SSHKeyServiceOp) List() ([]SSHKey, *Response, error) {
    +	return s.list(sshKeyBasePath)
    +}
    +
    +// Get returns an ssh key by id
    +func (s *SSHKeyServiceOp) Get(sshKeyID string) (*SSHKey, *Response, error) {
    +	path := fmt.Sprintf("%s/%s", sshKeyBasePath, sshKeyID)
    +	sshKey := new(SSHKey)
    +
    +	resp, err := s.client.DoRequest("GET", path, nil, sshKey)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return sshKey, resp, err
    +}
    +
    +// Create creates a new ssh key
    +func (s *SSHKeyServiceOp) Create(createRequest *SSHKeyCreateRequest) (*SSHKey, *Response, error) {
    +	path := sshKeyBasePath
    +	if createRequest.ProjectID != "" {
    +		path = fmt.Sprintf("%s/%s%s", projectBasePath, createRequest.ProjectID, sshKeyBasePath)
    +	}
    +	sshKey := new(SSHKey)
    +
    +	resp, err := s.client.DoRequest("POST", path, createRequest, sshKey)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return sshKey, resp, err
    +}
    +
    +// Update updates an ssh key
    +func (s *SSHKeyServiceOp) Update(id string, updateRequest *SSHKeyUpdateRequest) (*SSHKey, *Response, error) {
    +	if updateRequest.Label == nil && updateRequest.Key == nil {
    +		return nil, nil, fmt.Errorf("You must set either Label or Key string for SSH Key update")
    +	}
    +	path := fmt.Sprintf("%s/%s", sshKeyBasePath, id)
    +
    +	sshKey := new(SSHKey)
    +
    +	resp, err := s.client.DoRequest("PATCH", path, updateRequest, sshKey)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return sshKey, resp, err
    +}
    +
    +// Delete deletes an ssh key
    +func (s *SSHKeyServiceOp) Delete(sshKeyID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", sshKeyBasePath, sshKeyID)
    +
    +	return s.client.DoRequest("DELETE", path, nil, nil)
    +}
    diff --git a/vendor/github.com/packethost/packngo/timestamp.go b/vendor/github.com/packethost/packngo/timestamp.go
    new file mode 100644
    index 000000000000..c3320ed62eb0
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/timestamp.go
    @@ -0,0 +1,35 @@
    +package packngo
    +
    +import (
    +	"strconv"
    +	"time"
    +)
    +
    +// Timestamp represents a time that can be unmarshalled from a JSON string
    +// formatted as either an RFC3339 or Unix timestamp. All
    +// exported methods of time.Time can be called on Timestamp.
    +type Timestamp struct {
    +	time.Time
    +}
    +
    +func (t Timestamp) String() string {
    +	return t.Time.String()
    +}
    +
    +// UnmarshalJSON implements the json.Unmarshaler interface.
    +// Time is expected in RFC3339 or Unix format.
    +func (t *Timestamp) UnmarshalJSON(data []byte) (err error) {
    +	str := string(data)
    +	i, err := strconv.ParseInt(str, 10, 64)
    +	if err == nil {
    +		t.Time = time.Unix(i, 0)
    +	} else {
    +		t.Time, err = time.Parse(`"`+time.RFC3339+`"`, str)
    +	}
    +	return
    +}
    +
    +// Equal reports whether t and u are equal based on time.Equal
    +func (t Timestamp) Equal(u Timestamp) bool {
    +	return t.Time.Equal(u.Time)
    +}
    diff --git a/vendor/github.com/packethost/packngo/user.go b/vendor/github.com/packethost/packngo/user.go
    new file mode 100644
    index 000000000000..412db905e4e3
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/user.go
    @@ -0,0 +1,64 @@
    +package packngo
    +
    +const userBasePath = "/users"
    +const userPath = "/user"
    +
    +// UserService interface defines available user methods
    +type UserService interface {
    +	Get(string) (*User, *Response, error)
    +	Current() (*User, *Response, error)
    +}
    +
    +// User represents a Packet user
    +type User struct {
    +	ID                    string  `json:"id"`
    +	FirstName             string  `json:"first_name,omitempty"`
    +	LastName              string  `json:"last_name,omitempty"`
    +	FullName              string  `json:"full_name,omitempty"`
    +	Email                 string  `json:"email,omitempty"`
    +	TwoFactor             string  `json:"two_factor_auth,omitempty"`
    +	DefaultOrganizationID string  `json:"default_organization_id,omitempty"`
    +	AvatarURL             string  `json:"avatar_url,omitempty"`
    +	Facebook              string  `json:"twitter,omitempty"`
    +	Twitter               string  `json:"facebook,omitempty"`
    +	LinkedIn              string  `json:"linkedin,omitempty"`
    +	Created               string  `json:"created_at,omitempty"`
    +	Updated               string  `json:"updated_at,omitempty"`
    +	TimeZone              string  `json:"timezone,omitempty"`
    +	Emails                []Email `json:"emails,omitempty"`
    +	PhoneNumber           string  `json:"phone_number,omitempty"`
    +	URL                   string  `json:"href,omitempty"`
    +}
    +
    +func (u User) String() string {
    +	return Stringify(u)
    +}
    +
    +// UserServiceOp implements UserService
    +type UserServiceOp struct {
    +	client *Client
    +}
    +
    +// Get method gets a user by userID
    +func (s *UserServiceOp) Get(userID string) (*User, *Response, error) {
    +	user := new(User)
    +
    +	resp, err := s.client.DoRequest("GET", userBasePath, nil, user)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return user, resp, err
    +}
    +
    +// Returns the user object for the currently logged-in user.
    +func (s *UserServiceOp) Current() (*User, *Response, error) {
    +	user := new(User)
    +
    +	resp, err := s.client.DoRequest("GET", userPath, nil, user)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return user, resp, err
    +}
    diff --git a/vendor/github.com/packethost/packngo/utils.go b/vendor/github.com/packethost/packngo/utils.go
    new file mode 100644
    index 000000000000..57e5ef163a39
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/utils.go
    @@ -0,0 +1,91 @@
    +package packngo
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"io"
    +	"reflect"
    +)
    +
    +var timestampType = reflect.TypeOf(Timestamp{})
    +
    +// Stringify creates a string representation of the provided message
    +func Stringify(message interface{}) string {
    +	var buf bytes.Buffer
    +	v := reflect.ValueOf(message)
    +	stringifyValue(&buf, v)
    +	return buf.String()
    +}
    +
    +// StreamToString converts a reader to a string
    +func StreamToString(stream io.Reader) string {
    +	buf := new(bytes.Buffer)
    +	buf.ReadFrom(stream)
    +	return buf.String()
    +}
    +
    +// stringifyValue was graciously cargoculted from the goprotubuf library
    +func stringifyValue(w io.Writer, val reflect.Value) {
    +	if val.Kind() == reflect.Ptr && val.IsNil() {
    +		w.Write([]byte(""))
    +		return
    +	}
    +
    +	v := reflect.Indirect(val)
    +
    +	switch v.Kind() {
    +	case reflect.String:
    +		fmt.Fprintf(w, `"%s"`, v)
    +	case reflect.Slice:
    +		w.Write([]byte{'['})
    +		for i := 0; i < v.Len(); i++ {
    +			if i > 0 {
    +				w.Write([]byte{' '})
    +			}
    +
    +			stringifyValue(w, v.Index(i))
    +		}
    +
    +		w.Write([]byte{']'})
    +		return
    +	case reflect.Struct:
    +		if v.Type().Name() != "" {
    +			w.Write([]byte(v.Type().String()))
    +		}
    +
    +		// special handling of Timestamp values
    +		if v.Type() == timestampType {
    +			fmt.Fprintf(w, "{%s}", v.Interface())
    +			return
    +		}
    +
    +		w.Write([]byte{'{'})
    +
    +		var sep bool
    +		for i := 0; i < v.NumField(); i++ {
    +			fv := v.Field(i)
    +			if fv.Kind() == reflect.Ptr && fv.IsNil() {
    +				continue
    +			}
    +			if fv.Kind() == reflect.Slice && fv.IsNil() {
    +				continue
    +			}
    +
    +			if sep {
    +				w.Write([]byte(", "))
    +			} else {
    +				sep = true
    +			}
    +
    +			w.Write([]byte(v.Type().Field(i).Name))
    +			w.Write([]byte{':'})
    +			stringifyValue(w, fv)
    +		}
    +
    +		w.Write([]byte{'}'})
    +	default:
    +		if v.CanInterface() {
    +			fmt.Fprint(w, v.Interface())
    +		}
    +	}
    +}
    diff --git a/vendor/github.com/packethost/packngo/virtualnetworks.go b/vendor/github.com/packethost/packngo/virtualnetworks.go
    new file mode 100644
    index 000000000000..ec0b9fc652ea
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/virtualnetworks.go
    @@ -0,0 +1,81 @@
    +package packngo
    +
    +import (
    +	"fmt"
    +)
    +
    +const virtualNetworkBasePath = "/virtual-networks"
    +
    +// DevicePortService handles operations on a port which belongs to a particular device
    +type ProjectVirtualNetworkService interface {
    +	List(projectID string) (*VirtualNetworkListResponse, *Response, error)
    +	Create(*VirtualNetworkCreateRequest) (*VirtualNetwork, *Response, error)
    +	Delete(virtualNetworkID string) (*Response, error)
    +}
    +
    +type VirtualNetwork struct {
    +	ID           string `json:"id"`
    +	Description  string `json:"description,omitempty"`
    +	VXLAN        int    `json:"vxlan,omitempty"`
    +	FacilityCode string `json:"facility_code,omitempty"`
    +	CreatedAt    string `json:"created_at,omitempty"`
    +	Href         string `json:"href"`
    +}
    +
    +type ProjectVirtualNetworkServiceOp struct {
    +	client *Client
    +}
    +
    +type VirtualNetworkListResponse struct {
    +	VirtualNetworks []VirtualNetwork `json:"virtual_networks"`
    +}
    +
    +func (i *ProjectVirtualNetworkServiceOp) List(projectID string) (*VirtualNetworkListResponse, *Response, error) {
    +	path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, virtualNetworkBasePath)
    +	output := new(VirtualNetworkListResponse)
    +
    +	resp, err := i.client.DoRequest("GET", path, nil, output)
    +	if err != nil {
    +		return nil, nil, err
    +	}
    +
    +	return output, resp, nil
    +}
    +
    +type VirtualNetworkCreateRequest struct {
    +	ProjectID   string `json:"project_id"`
    +	Description string `json:"description"`
    +	Facility    string `json:"facility"`
    +	VXLAN       int    `json:"vxlan"`
    +	VLAN        int    `json:"vlan"`
    +}
    +
    +type VirtualNetworkCreateResponse struct {
    +	VirtualNetwork VirtualNetwork `json:"virtual_networks"`
    +}
    +
    +func (i *ProjectVirtualNetworkServiceOp) Create(input *VirtualNetworkCreateRequest) (*VirtualNetwork, *Response, error) {
    +	// TODO: May need to add timestamp to output from 'post' request
    +	// for the 'created_at' attribute of VirtualNetwork struct since
    +	// API response doesn't include it
    +	path := fmt.Sprintf("%s/%s%s", projectBasePath, input.ProjectID, virtualNetworkBasePath)
    +	output := new(VirtualNetwork)
    +
    +	resp, err := i.client.DoRequest("POST", path, input, output)
    +	if err != nil {
    +		return nil, nil, err
    +	}
    +
    +	return output, resp, nil
    +}
    +
    +func (i *ProjectVirtualNetworkServiceOp) Delete(virtualNetworkID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", virtualNetworkBasePath, virtualNetworkID)
    +
    +	resp, err := i.client.DoRequest("DELETE", path, nil, nil)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return resp, nil
    +}
    diff --git a/vendor/github.com/packethost/packngo/volumes.go b/vendor/github.com/packethost/packngo/volumes.go
    new file mode 100644
    index 000000000000..772672a37b26
    --- /dev/null
    +++ b/vendor/github.com/packethost/packngo/volumes.go
    @@ -0,0 +1,239 @@
    +package packngo
    +
    +import "fmt"
    +
    +const (
    +	volumeBasePath      = "/storage"
    +	attachmentsBasePath = "/attachments"
    +)
    +
    +// VolumeService interface defines available Volume methods
    +type VolumeService interface {
    +	List(string, *ListOptions) ([]Volume, *Response, error)
    +	Get(string) (*Volume, *Response, error)
    +	Update(string, *VolumeUpdateRequest) (*Volume, *Response, error)
    +	Delete(string) (*Response, error)
    +	Create(*VolumeCreateRequest, string) (*Volume, *Response, error)
    +	Lock(string) (*Response, error)
    +	Unlock(string) (*Response, error)
    +}
    +
    +// VolumeAttachmentService defines attachment methdods
    +type VolumeAttachmentService interface {
    +	Get(string) (*VolumeAttachment, *Response, error)
    +	Create(string, string) (*VolumeAttachment, *Response, error)
    +	Delete(string) (*Response, error)
    +}
    +
    +type volumesRoot struct {
    +	Volumes []Volume `json:"volumes"`
    +	Meta    meta     `json:"meta"`
    +}
    +
    +// Volume represents a volume
    +type Volume struct {
    +	Attachments      []*VolumeAttachment `json:"attachments,omitempty"`
    +	BillingCycle     string              `json:"billing_cycle,omitempty"`
    +	Created          string              `json:"created_at,omitempty"`
    +	Description      string              `json:"description,omitempty"`
    +	Facility         *Facility           `json:"facility,omitempty"`
    +	Href             string              `json:"href,omitempty"`
    +	ID               string              `json:"id"`
    +	Locked           bool                `json:"locked,omitempty"`
    +	Name             string              `json:"name,omitempty"`
    +	Plan             *Plan               `json:"plan,omitempty"`
    +	Project          *Project            `json:"project,omitempty"`
    +	Size             int                 `json:"size,omitempty"`
    +	SnapshotPolicies []*SnapshotPolicy   `json:"snapshot_policies,omitempty"`
    +	State            string              `json:"state,omitempty"`
    +	Updated          string              `json:"updated_at,omitempty"`
    +}
    +
    +// SnapshotPolicy used to execute actions on volume
    +type SnapshotPolicy struct {
    +	ID                string `json:"id"`
    +	Href              string `json:"href"`
    +	SnapshotFrequency string `json:"snapshot_frequency,omitempty"`
    +	SnapshotCount     int    `json:"snapshot_count,omitempty"`
    +}
    +
    +func (v Volume) String() string {
    +	return Stringify(v)
    +}
    +
    +// VolumeCreateRequest type used to create a Packet volume
    +type VolumeCreateRequest struct {
    +	BillingCycle     string            `json:"billing_cycle"`
    +	Description      string            `json:"description,omitempty"`
    +	Locked           bool              `json:"locked,omitempty"`
    +	Size             int               `json:"size"`
    +	PlanID           string            `json:"plan_id"`
    +	FacilityID       string            `json:"facility_id"`
    +	SnapshotPolicies []*SnapshotPolicy `json:"snapshot_policies,omitempty"`
    +}
    +
    +func (v VolumeCreateRequest) String() string {
    +	return Stringify(v)
    +}
    +
    +// VolumeUpdateRequest type used to update a Packet volume
    +type VolumeUpdateRequest struct {
    +	Description  *string `json:"description,omitempty"`
    +	PlanID       *string `json:"plan_id,omitempty"`
    +	Size         *int    `json:"size,omitempty"`
    +	BillingCycle *string `json:"billing_cycle,omitempty"`
    +}
    +
    +// VolumeAttachment is a type from Packet API
    +type VolumeAttachment struct {
    +	Href   string `json:"href"`
    +	ID     string `json:"id"`
    +	Volume Volume `json:"volume"`
    +	Device Device `json:"device"`
    +}
    +
    +func (v VolumeUpdateRequest) String() string {
    +	return Stringify(v)
    +}
    +
    +// VolumeAttachmentServiceOp implements VolumeService
    +type VolumeAttachmentServiceOp struct {
    +	client *Client
    +}
    +
    +// VolumeServiceOp implements VolumeService
    +type VolumeServiceOp struct {
    +	client *Client
    +}
    +
    +// List returns the volumes for a project
    +func (v *VolumeServiceOp) List(projectID string, listOpt *ListOptions) (volumes []Volume, resp *Response, err error) {
    +	url := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, volumeBasePath)
    +	var params string
    +	if listOpt != nil {
    +		params = listOpt.createURL()
    +		if params != "" {
    +			url = fmt.Sprintf("%s?%s", url, params)
    +		}
    +	}
    +
    +	for {
    +		subset := new(volumesRoot)
    +
    +		resp, err = v.client.DoRequest("GET", url, nil, subset)
    +		if err != nil {
    +			return nil, resp, err
    +		}
    +
    +		volumes = append(volumes, subset.Volumes...)
    +
    +		if subset.Meta.Next != nil && (listOpt == nil || listOpt.Page == 0) {
    +			url = subset.Meta.Next.Href
    +			if params != "" {
    +				url = fmt.Sprintf("%s&%s", url, params)
    +			}
    +			continue
    +		}
    +
    +		return
    +	}
    +}
    +
    +// Get returns a volume by id
    +func (v *VolumeServiceOp) Get(volumeID string) (*Volume, *Response, error) {
    +	path := fmt.Sprintf("%s/%s?include=facility,snapshot_policies,attachments.device", volumeBasePath, volumeID)
    +	volume := new(Volume)
    +
    +	resp, err := v.client.DoRequest("GET", path, nil, volume)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return volume, resp, err
    +}
    +
    +// Update updates a volume
    +func (v *VolumeServiceOp) Update(id string, updateRequest *VolumeUpdateRequest) (*Volume, *Response, error) {
    +	path := fmt.Sprintf("%s/%s", volumeBasePath, id)
    +	volume := new(Volume)
    +
    +	resp, err := v.client.DoRequest("PATCH", path, updateRequest, volume)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return volume, resp, err
    +}
    +
    +// Delete deletes a volume
    +func (v *VolumeServiceOp) Delete(volumeID string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", volumeBasePath, volumeID)
    +
    +	return v.client.DoRequest("DELETE", path, nil, nil)
    +}
    +
    +// Create creates a new volume for a project
    +func (v *VolumeServiceOp) Create(createRequest *VolumeCreateRequest, projectID string) (*Volume, *Response, error) {
    +	url := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, volumeBasePath)
    +	volume := new(Volume)
    +
    +	resp, err := v.client.DoRequest("POST", url, createRequest, volume)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return volume, resp, err
    +}
    +
    +// Attachments
    +
    +// Create Attachment, i.e. attach volume to a device
    +func (v *VolumeAttachmentServiceOp) Create(volumeID, deviceID string) (*VolumeAttachment, *Response, error) {
    +	url := fmt.Sprintf("%s/%s%s", volumeBasePath, volumeID, attachmentsBasePath)
    +	volAttachParam := map[string]string{
    +		"device_id": deviceID,
    +	}
    +	volumeAttachment := new(VolumeAttachment)
    +
    +	resp, err := v.client.DoRequest("POST", url, volAttachParam, volumeAttachment)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +	return volumeAttachment, resp, nil
    +}
    +
    +// Get gets attachment by id
    +func (v *VolumeAttachmentServiceOp) Get(attachmentID string) (*VolumeAttachment, *Response, error) {
    +	path := fmt.Sprintf("%s%s/%s", volumeBasePath, attachmentsBasePath, attachmentID)
    +	volumeAttachment := new(VolumeAttachment)
    +
    +	resp, err := v.client.DoRequest("GET", path, nil, volumeAttachment)
    +	if err != nil {
    +		return nil, resp, err
    +	}
    +
    +	return volumeAttachment, resp, nil
    +}
    +
    +// Delete deletes attachment by id
    +func (v *VolumeAttachmentServiceOp) Delete(attachmentID string) (*Response, error) {
    +	path := fmt.Sprintf("%s%s/%s", volumeBasePath, attachmentsBasePath, attachmentID)
    +
    +	return v.client.DoRequest("DELETE", path, nil, nil)
    +}
    +
    +// Lock sets a volume to "locked"
    +func (s *VolumeServiceOp) Lock(id string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", volumeBasePath, id)
    +	action := lockType{Locked: true}
    +
    +	return s.client.DoRequest("PATCH", path, action, nil)
    +}
    +
    +// Unlock sets a volume to "unlocked"
    +func (s *VolumeServiceOp) Unlock(id string) (*Response, error) {
    +	path := fmt.Sprintf("%s/%s", volumeBasePath, id)
    +	action := lockType{Locked: false}
    +
    +	return s.client.DoRequest("PATCH", path, action, nil)
    +}
    diff --git a/vendor/github.com/renier/xmlrpc/.gitignore b/vendor/github.com/renier/xmlrpc/.gitignore
    new file mode 100644
    index 000000000000..c38fa4e00568
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/.gitignore
    @@ -0,0 +1,2 @@
    +.idea
    +*.iml
    diff --git a/vendor/github.com/renier/xmlrpc/LICENSE b/vendor/github.com/renier/xmlrpc/LICENSE
    new file mode 100644
    index 000000000000..8103dd139136
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/LICENSE
    @@ -0,0 +1,19 @@
    +Copyright (C) 2012 Dmitry Maksimov
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in
    +all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    +THE SOFTWARE.
    diff --git a/vendor/github.com/renier/xmlrpc/README.md b/vendor/github.com/renier/xmlrpc/README.md
    new file mode 100644
    index 000000000000..12b7692e9077
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/README.md
    @@ -0,0 +1,79 @@
    +## Overview
    +
    +xmlrpc is an implementation of client side part of XMLRPC protocol in Go language.
    +
    +## Installation
    +
    +To install xmlrpc package run `go get github.com/kolo/xmlrpc`. To use
    +it in application add `"github.com/kolo/xmlrpc"` string to `import`
    +statement.
    +
    +## Usage
    +
    +    client, _ := xmlrpc.NewClient("https://bugzilla.mozilla.org/xmlrpc.cgi", nil)
    +    result := struct{
    +      Version string `xmlrpc:"version"`
    +    }{}
    +    client.Call("Bugzilla.version", nil, &result)
    +    fmt.Printf("Version: %s\n", result.Version) // Version: 4.2.7+
    +
    +Second argument of NewClient function is an object that implements
    +[http.RoundTripper](http://golang.org/pkg/net/http/#RoundTripper)
    +interface, it can be used to get more control over connection options.
    +By default it initialized by http.DefaultTransport object.
    +
    +### Arguments encoding
    +
    +xmlrpc package supports encoding of native Go data types to method
    +arguments.
    +
    +Data types encoding rules:
    +* int, int8, int16, int32, int64 encoded to int;
    +* float32, float64 encoded to double;
    +* bool encoded to boolean;
    +* string encoded to string;
    +* time.Time encoded to datetime.iso8601;
    +* xmlrpc.Base64 encoded to base64;
    +* slice decoded to array;
    +
    +Structs decoded to struct by following rules:
    +* all public field become struct members;
    +* field name become member name;
    +* if field has xmlrpc tag, its value become member name.
    +
    +Server method can accept few arguments, to handle this case there is
    +special approach to handle slice of empty interfaces (`[]interface{}`).
    +Each value of such slice encoded as separate argument.
    +
    +### Result decoding
    +
    +Result of remote function is decoded to native Go data type.
    +
    +Data types decoding rules:
    +* int, i4 decoded to int, int8, int16, int32, int64;
    +* double decoded to float32, float64;
    +* boolean decoded to bool;
    +* string decoded to string;
    +* array decoded to slice;
    +* structs decoded following the rules described in previous section;
    +* datetime.iso8601 decoded as time.Time data type;
    +* base64 decoded to string.
    +
    +## Implementation details
    +
    +xmlrpc package contains clientCodec type, that implements [rpc.ClientCodec](http://golang.org/pkg/net/rpc/#ClientCodec)
    +interface of [net/rpc](http://golang.org/pkg/net/rpc) package.
    +
    +xmlrpc package works over HTTP protocol, but some internal functions
    +and data type were made public to make it easier to create another
    +implementation of xmlrpc that works over another protocol. To encode
    +request body there is EncodeMethodCall function. To decode server
    +response Response data type can be used.
    +
    +## Contribution
    +
    +Feel free to fork the project, submit pull requests, ask questions.
    +
    +## Authors
    +
    +Dmitry Maksimov (dmtmax@gmail.com)
    diff --git a/vendor/github.com/renier/xmlrpc/client.go b/vendor/github.com/renier/xmlrpc/client.go
    new file mode 100644
    index 000000000000..eed6936dff1f
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/client.go
    @@ -0,0 +1,181 @@
    +package xmlrpc
    +
    +import (
    +	"fmt"
    +	"io"
    +	"io/ioutil"
    +	"net/http"
    +	"net/http/cookiejar"
    +	"net/rpc"
    +	"net/url"
    +	"strconv"
    +	"sync"
    +	"time"
    +)
    +
    +type Client struct {
    +	*rpc.Client
    +}
    +
    +// clientCodec is rpc.ClientCodec interface implementation.
    +type clientCodec struct {
    +	// url presents url of xmlrpc service
    +	url *url.URL
    +
    +	// httpClient works with HTTP protocol
    +	httpClient *http.Client
    +
    +	// cookies stores cookies received on last request
    +	cookies http.CookieJar
    +
    +	// responses presents map of active requests. It is required to return request id, that
    +	// rpc.Client can mark them as done.
    +	responsesMu sync.RWMutex
    +	responses   map[uint64]*http.Response
    +
    +	response *Response
    +
    +	// ready presents channel, that is used to link request and it`s response.
    +	ready chan *uint64
    +}
    +
    +func (codec *clientCodec) WriteRequest(request *rpc.Request, args interface{}) (err error) {
    +	httpRequest, err := NewRequest(codec.url.String(), request.ServiceMethod, args)
    +	if err != nil {
    +		return err
    +	}
    +
    +	if codec.cookies != nil {
    +		for _, cookie := range codec.cookies.Cookies(codec.url) {
    +			httpRequest.AddCookie(cookie)
    +		}
    +	}
    +
    +	httpResponse, err := codec.httpClient.Do(httpRequest)
    +	if err != nil {
    +		return err
    +	}
    +
    +	if codec.cookies != nil {
    +		codec.cookies.SetCookies(codec.url, httpResponse.Cookies())
    +	}
    +
    +	codec.responsesMu.Lock()
    +	codec.responses[request.Seq] = httpResponse
    +	codec.responsesMu.Unlock()
    +
    +	codec.ready <- &request.Seq
    +
    +	return nil
    +}
    +
    +func (codec *clientCodec) ReadResponseHeader(response *rpc.Response) error {
    +	seq, ok := <-codec.ready
    +	if !ok {
    +		return io.EOF
    +	}
    +
    +	codec.responsesMu.RLock()
    +	httpResponse := codec.responses[*seq]
    +	codec.responsesMu.RUnlock()
    +
    +	defer func() {
    +		httpResponse.Body.Close()
    +		codec.responsesMu.Lock()
    +		delete(codec.responses, *seq)
    +		codec.responsesMu.Unlock()
    +	}()
    +
    +	contentLength := httpResponse.ContentLength
    +	if contentLength == -1 {
    +		if ntcoentLengthHeader, ok := httpResponse.Header["Ntcoent-Length"]; ok {
    +			ntcoentLength, err := strconv.ParseInt(ntcoentLengthHeader[0], 10, 64)
    +			if err == nil {
    +				contentLength = ntcoentLength
    +			}
    +		}
    +	}
    +
    +	var respData []byte
    +	var err error
    +	if contentLength != -1 {
    +		respData = make([]byte, contentLength)
    +		_, err = io.ReadFull(httpResponse.Body, respData)
    +	} else {
    +		respData, err = ioutil.ReadAll(httpResponse.Body)
    +	}
    +	if err != nil {
    +		return err
    +	}
    +
    +	resp := NewResponse(respData, httpResponse.StatusCode)
    +
    +	if resp.Failed() {
    +		err := resp.Err()
    +		response.Error = fmt.Sprintf("%v", err)
    +		return err
    +	}
    +
    +	codec.response = resp
    +
    +	response.Seq = *seq
    +
    +	if httpResponse.StatusCode < 200 || httpResponse.StatusCode >= 300 {
    +		return &XmlRpcError{HttpStatusCode: httpResponse.StatusCode}
    +	}
    +
    +	return nil
    +}
    +
    +func (codec *clientCodec) ReadResponseBody(v interface{}) (err error) {
    +	if v == nil {
    +		return nil
    +	}
    +
    +	if err = codec.response.Unmarshal(v); err != nil {
    +		return err
    +	}
    +
    +	return nil
    +}
    +
    +func (codec *clientCodec) Close() error {
    +	transport := codec.httpClient.Transport.(*http.Transport)
    +	transport.CloseIdleConnections()
    +	close(codec.ready)
    +	return nil
    +}
    +
    +// NewClient returns instance of rpc.Client object, that is used to send request to xmlrpc service.
    +func NewClient(requrl string, transport http.RoundTripper, timeout time.Duration) (*Client, error) {
    +	if transport == nil {
    +		transport = http.DefaultTransport
    +	}
    +
    +	httpClient := &http.Client{
    +		Transport: transport,
    +		Timeout:   timeout,
    +	}
    +
    +	jar, err := cookiejar.New(nil)
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	u, err := url.Parse(requrl)
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	codec := clientCodec{
    +		url:        u,
    +		httpClient: httpClient,
    +		ready:      make(chan *uint64),
    +		responses:  make(map[uint64]*http.Response),
    +		cookies:    jar,
    +	}
    +
    +	return &Client{rpc.NewClientWithCodec(&codec)}, nil
    +}
    diff --git a/vendor/github.com/renier/xmlrpc/decoder.go b/vendor/github.com/renier/xmlrpc/decoder.go
    new file mode 100644
    index 000000000000..30fa4a91ee6c
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/decoder.go
    @@ -0,0 +1,547 @@
    +package xmlrpc
    +
    +import (
    +	"bytes"
    +	"encoding/xml"
    +	"errors"
    +	"fmt"
    +	"io"
    +	"reflect"
    +	"regexp"
    +	"strconv"
    +	"strings"
    +	"time"
    +
    +	"golang.org/x/text/encoding/charmap"
    +)
    +
    +const (
    +	iso8601         = "20060102T15:04:05"
    +	iso8601hyphen   = "2006-01-02T15:04:05Z"
    +	iso8601hyphenTZ = "2006-01-02T15:04:05-07:00"
    +)
    +
    +var (
    +	// CharsetReader is a function to generate reader which converts a non UTF-8
    +	// charset into UTF-8.
    +	CharsetReader func(string, io.Reader) (io.Reader, error)
    +
    +	invalidXmlError = errors.New("invalid xml")
    +
    +	dateFormats = []string{iso8601, iso8601hyphen, iso8601hyphenTZ}
    +
    +	topArrayRE = regexp.MustCompile(`^<\?xml version="1.0" encoding=".+"\?>\s*\s*\s*\s*`)
    +)
    +
    +type TypeMismatchError string
    +
    +func (e TypeMismatchError) Error() string { return string(e) }
    +
    +type decoder struct {
    +	*xml.Decoder
    +}
    +
    +func unmarshal(data []byte, v interface{}) (err error) {
    +	dec := &decoder{xml.NewDecoder(bytes.NewBuffer(data))}
    +
    +	if CharsetReader != nil {
    +		dec.CharsetReader = CharsetReader
    +	} else {
    +		dec.CharsetReader = defaultCharsetReader
    +	}
    +
    +	var tok xml.Token
    +	for {
    +		if tok, err = dec.Token(); err != nil {
    +			return err
    +		}
    +
    +		if t, ok := tok.(xml.StartElement); ok {
    +			if t.Name.Local == "value" {
    +				val := reflect.ValueOf(v)
    +				if val.Kind() != reflect.Ptr {
    +					return errors.New("non-pointer value passed to unmarshal")
    +				}
    +
    +				val = val.Elem()
    +				// Some APIs that normally return a collection, omit the []'s when
    +				// the API returns a single value.
    +				if val.Kind() == reflect.Slice && !topArrayRE.MatchString(string(data)) {
    +					val.Set(reflect.MakeSlice(val.Type(), 1, 1))
    +					val = val.Index(0)
    +				}
    +
    +				if err = dec.decodeValue(val); err != nil {
    +					return err
    +				}
    +
    +				break
    +			}
    +		}
    +	}
    +
    +	// read until end of document
    +	err = dec.Skip()
    +	if err != nil && err != io.EOF {
    +		return err
    +	}
    +
    +	return nil
    +}
    +
    +func (dec *decoder) decodeValue(val reflect.Value) error {
    +	var tok xml.Token
    +	var err error
    +
    +	if val.Kind() == reflect.Ptr {
    +		if val.IsNil() {
    +			val.Set(reflect.New(val.Type().Elem()))
    +		}
    +		val = val.Elem()
    +	}
    +
    +	var typeName string
    +	for {
    +		if tok, err = dec.Token(); err != nil {
    +			return err
    +		}
    +
    +		if t, ok := tok.(xml.EndElement); ok {
    +			if t.Name.Local == "value" {
    +				return nil
    +			} else {
    +				return invalidXmlError
    +			}
    +		}
    +
    +		if t, ok := tok.(xml.StartElement); ok {
    +			typeName = t.Name.Local
    +			break
    +		}
    +
    +		// Treat value data without type identifier as string
    +		if t, ok := tok.(xml.CharData); ok {
    +			if value := strings.TrimSpace(string(t)); value != "" {
    +				if err = checkType(val, reflect.String); err != nil {
    +					return err
    +				}
    +
    +				val.SetString(value)
    +				return nil
    +			}
    +		}
    +	}
    +
    +	switch typeName {
    +	case "struct":
    +		ismap := false
    +		pmap := val
    +		valType := val.Type()
    +
    +		if err = checkType(val, reflect.Struct); err != nil {
    +			if checkType(val, reflect.Map) == nil {
    +				if valType.Key().Kind() != reflect.String {
    +					return fmt.Errorf("only maps with string key type can be unmarshalled")
    +				}
    +				ismap = true
    +			} else if checkType(val, reflect.Interface) == nil && val.IsNil() {
    +				var dummy map[string]interface{}
    +				pmap = reflect.New(reflect.TypeOf(dummy)).Elem()
    +				valType = pmap.Type()
    +				ismap = true
    +			} else {
    +				return err
    +			}
    +		}
    +
    +		var fields map[string]reflect.Value
    +
    +		if !ismap {
    +			fields = make(map[string]reflect.Value)
    +			buildStructFieldMap(&fields, val)
    +		} else {
    +			// Create initial empty map
    +			pmap.Set(reflect.MakeMap(valType))
    +		}
    +
    +		// Process struct members.
    +	StructLoop:
    +		for {
    +			if tok, err = dec.Token(); err != nil {
    +				return err
    +			}
    +			switch t := tok.(type) {
    +			case xml.StartElement:
    +				if t.Name.Local != "member" {
    +					return invalidXmlError
    +				}
    +
    +				tagName, fieldName, err := dec.readTag()
    +				if err != nil {
    +					return err
    +				}
    +				if tagName != "name" {
    +					return invalidXmlError
    +				}
    +
    +				var fv reflect.Value
    +				ok := true
    +
    +				if !ismap {
    +					fv, ok = fields[string(fieldName)]
    +				} else {
    +					fv = reflect.New(valType.Elem())
    +				}
    +
    +				if ok {
    +					for {
    +						if tok, err = dec.Token(); err != nil {
    +							return err
    +						}
    +						if t, ok := tok.(xml.StartElement); ok && t.Name.Local == "value" {
    +							if err = dec.decodeValue(fv); err != nil {
    +								return err
    +							}
    +
    +							// 
    +							if err = dec.Skip(); err != nil {
    +								return err
    +							}
    +
    +							break
    +						}
    +					}
    +				}
    +
    +				// 
    +				if err = dec.Skip(); err != nil {
    +					return err
    +				}
    +
    +				if ismap {
    +					pmap.SetMapIndex(reflect.ValueOf(string(fieldName)), reflect.Indirect(fv))
    +					val.Set(pmap)
    +				}
    +			case xml.EndElement:
    +				break StructLoop
    +			}
    +		}
    +	case "array":
    +		pslice := val
    +		if checkType(val, reflect.Interface) == nil && val.IsNil() {
    +			var dummy []interface{}
    +			pslice = reflect.New(reflect.TypeOf(dummy)).Elem()
    +		} else if err = checkType(val, reflect.Slice); err != nil {
    +			// Check to see if we have an unexpected array when we expect
    +			// a struct. Adjust by expecting an array of the struct type
    +			// and see if things still work.
    +			// https://github.com/renier/xmlrpc/pull/2
    +			if val.Kind() == reflect.Struct {
    +				pslice = reflect.New(reflect.SliceOf(reflect.TypeOf(val.Interface()))).Elem()
    +				val = pslice
    +			} else {
    +				return err
    +			}
    +		}
    +
    +	ArrayLoop:
    +		for {
    +			if tok, err = dec.Token(); err != nil {
    +				return err
    +			}
    +
    +			switch t := tok.(type) {
    +			case xml.StartElement:
    +				if t.Name.Local != "data" {
    +					return invalidXmlError
    +				}
    +
    +				slice := reflect.MakeSlice(pslice.Type(), 0, 0)
    +
    +			DataLoop:
    +				for {
    +					if tok, err = dec.Token(); err != nil {
    +						return err
    +					}
    +
    +					switch tt := tok.(type) {
    +					case xml.StartElement:
    +						if tt.Name.Local != "value" {
    +							return invalidXmlError
    +						}
    +
    +						v := reflect.New(pslice.Type().Elem())
    +						if err = dec.decodeValue(v); err != nil {
    +							return err
    +						}
    +
    +						slice = reflect.Append(slice, v.Elem())
    +
    +						// 
    +						if err = dec.Skip(); err != nil {
    +							return err
    +						}
    +					case xml.EndElement:
    +						pslice.Set(slice)
    +						val.Set(pslice)
    +						break DataLoop
    +					}
    +				}
    +			case xml.EndElement:
    +				break ArrayLoop
    +			}
    +		}
    +	default:
    +		if tok, err = dec.Token(); err != nil {
    +			return err
    +		}
    +
    +		var data []byte
    +
    +		switch t := tok.(type) {
    +		case xml.EndElement:
    +			return nil
    +		case xml.CharData:
    +			data = []byte(t.Copy())
    +		default:
    +			return invalidXmlError
    +		}
    +
    +	ParseValue:
    +		switch typeName {
    +		case "int", "i4", "i8":
    +			if checkType(val, reflect.Interface) == nil && val.IsNil() {
    +				i, err := strconv.ParseInt(string(data), 10, 64)
    +				if err != nil {
    +					return err
    +				}
    +
    +				pi := reflect.New(reflect.TypeOf(i)).Elem()
    +				pi.SetInt(i)
    +				val.Set(pi)
    +			} else if err = checkType(val, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64); err != nil {
    +				return err
    +			} else {
    +				k := val.Kind()
    +				isInt := k == reflect.Int || k == reflect.Int8 || k == reflect.Int16 || k == reflect.Int32 || k == reflect.Int64
    +
    +				if isInt {
    +					i, err := strconv.ParseInt(string(data), 10, val.Type().Bits())
    +					if err != nil {
    +						return err
    +					}
    +
    +					val.SetInt(i)
    +				} else {
    +					i, err := strconv.ParseUint(string(data), 10, val.Type().Bits())
    +					if err != nil {
    +						return err
    +					}
    +
    +					val.SetUint(i)
    +				}
    +			}
    +		case "string", "base64":
    +			str := string(data)
    +			if checkType(val, reflect.Interface) == nil && val.IsNil() {
    +				pstr := reflect.New(reflect.TypeOf(str)).Elem()
    +				pstr.SetString(str)
    +				val.Set(pstr)
    +			} else if err = checkType(val, reflect.String); err != nil {
    +				valName := val.Type().Name()
    +				if valName == "" {
    +					valName = reflect.Indirect(val).Type().Name()
    +				}
    +
    +				if valName == "Time" {
    +					timeField := val.FieldByName(valName)
    +					if timeField.IsValid() {
    +						val = timeField
    +					}
    +					typeName = "dateTime.iso8601"
    +					goto ParseValue
    +				} else if strings.HasPrefix(strings.ToLower(valName), "float") {
    +					typeName = "double"
    +					goto ParseValue
    +				}
    +				return err
    +			} else {
    +				val.SetString(str)
    +			}
    +		case "dateTime.iso8601":
    +			err = nil
    +			var t time.Time
    +			for _, df := range dateFormats {
    +				t, err = time.Parse(df, string(data))
    +				if err == nil {
    +					break
    +				}
    +			}
    +			if err != nil {
    +				return err
    +			}
    +
    +			if checkType(val, reflect.Interface) == nil && val.IsNil() {
    +				ptime := reflect.New(reflect.TypeOf(t)).Elem()
    +				ptime.Set(reflect.ValueOf(t))
    +				val.Set(ptime)
    +			} else if !reflect.TypeOf((time.Time)(t)).ConvertibleTo(val.Type()) {
    +				return TypeMismatchError(
    +					fmt.Sprintf(
    +						"error: type mismatch error - can't decode %v (%s.%s) to time",
    +						val.Kind(),
    +						val.Type().PkgPath(),
    +						val.Type().Name(),
    +					),
    +				)
    +			} else {
    +				val.Set(reflect.ValueOf(t).Convert(val.Type()))
    +			}
    +		case "boolean":
    +			v, err := strconv.ParseBool(string(data))
    +			if err != nil {
    +				return err
    +			}
    +
    +			if checkType(val, reflect.Interface) == nil && val.IsNil() {
    +				pv := reflect.New(reflect.TypeOf(v)).Elem()
    +				pv.SetBool(v)
    +				val.Set(pv)
    +			} else if err = checkType(val, reflect.Bool); err != nil {
    +				return err
    +			} else {
    +				val.SetBool(v)
    +			}
    +		case "double":
    +			if checkType(val, reflect.Interface) == nil && val.IsNil() {
    +				i, err := strconv.ParseFloat(string(data), 64)
    +				if err != nil {
    +					return err
    +				}
    +
    +				pdouble := reflect.New(reflect.TypeOf(i)).Elem()
    +				pdouble.SetFloat(i)
    +				val.Set(pdouble)
    +			} else if err = checkType(val, reflect.Float32, reflect.Float64); err != nil {
    +				return err
    +			} else {
    +				i, err := strconv.ParseFloat(string(data), val.Type().Bits())
    +				if err != nil {
    +					return err
    +				}
    +
    +				val.SetFloat(i)
    +			}
    +		default:
    +			return errors.New("unsupported type")
    +		}
    +
    +		// 
    +		if err = dec.Skip(); err != nil {
    +			return err
    +		}
    +	}
    +
    +	return nil
    +}
    +
    +func (dec *decoder) readTag() (string, []byte, error) {
    +	var tok xml.Token
    +	var err error
    +
    +	var name string
    +	for {
    +		if tok, err = dec.Token(); err != nil {
    +			return "", nil, err
    +		}
    +
    +		if t, ok := tok.(xml.StartElement); ok {
    +			name = t.Name.Local
    +			break
    +		}
    +	}
    +
    +	value, err := dec.readCharData()
    +	if err != nil {
    +		return "", nil, err
    +	}
    +
    +	return name, value, dec.Skip()
    +}
    +
    +func (dec *decoder) readCharData() ([]byte, error) {
    +	var tok xml.Token
    +	var err error
    +
    +	if tok, err = dec.Token(); err != nil {
    +		return nil, err
    +	}
    +
    +	if t, ok := tok.(xml.CharData); ok {
    +		return []byte(t.Copy()), nil
    +	} else {
    +		return nil, invalidXmlError
    +	}
    +}
    +
    +func checkType(val reflect.Value, kinds ...reflect.Kind) error {
    +	if len(kinds) == 0 {
    +		return nil
    +	}
    +
    +	if val.Kind() == reflect.Ptr {
    +		val = val.Elem()
    +	}
    +
    +	match := false
    +
    +	for _, kind := range kinds {
    +		if val.Kind() == kind {
    +			match = true
    +			break
    +		}
    +	}
    +
    +	if !match {
    +		return TypeMismatchError(fmt.Sprintf("error: type mismatch - can't unmarshal %v to %v",
    +			val.Kind(), kinds[0]))
    +	}
    +
    +	return nil
    +}
    +
    +func buildStructFieldMap(fieldMap *map[string]reflect.Value, val reflect.Value) {
    +	valType := val.Type()
    +	valFieldNum := valType.NumField()
    +	for i := 0; i < valFieldNum; i++ {
    +		field := valType.Field(i)
    +		fieldVal := val.FieldByName(field.Name)
    +
    +		if field.Anonymous {
    +			// Drill down into embedded structs
    +			buildStructFieldMap(fieldMap, fieldVal)
    +			continue
    +		}
    +
    +		if fieldVal.CanSet() {
    +			if fn := field.Tag.Get("xmlrpc"); fn != "" {
    +				fn = strings.Split(fn, ",")[0]
    +				(*fieldMap)[fn] = fieldVal
    +			} else {
    +				(*fieldMap)[field.Name] = fieldVal
    +			}
    +		}
    +	}
    +}
    +
    +// http://stackoverflow.com/a/34712322/3160958
    +// https://groups.google.com/forum/#!topic/golang-nuts/VudK_05B62k
    +func defaultCharsetReader(charset string, input io.Reader) (io.Reader, error) {
    +	if charset == "iso-8859-1" || charset == "ISO-8859-1" {
    +		return charmap.ISO8859_1.NewDecoder().Reader(input), nil
    +	} else if strings.HasPrefix(charset, "utf") || strings.HasPrefix(charset, "UTF") {
    +		return input, nil
    +	}
    +
    +	return nil, fmt.Errorf("Unknown charset: %s", charset)
    +}
    diff --git a/vendor/github.com/renier/xmlrpc/encoder.go b/vendor/github.com/renier/xmlrpc/encoder.go
    new file mode 100644
    index 000000000000..2410aff8e3f7
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/encoder.go
    @@ -0,0 +1,204 @@
    +package xmlrpc
    +
    +import (
    +	"bytes"
    +	"encoding/xml"
    +	"fmt"
    +	"reflect"
    +	"strconv"
    +	"strings"
    +	"time"
    +)
    +
    +type encodeFunc func(reflect.Value) ([]byte, error)
    +
    +func marshal(v interface{}) ([]byte, error) {
    +	if v == nil {
    +		return []byte{}, nil
    +	}
    +
    +	val := reflect.ValueOf(v)
    +	return encodeValue(val)
    +}
    +
    +func encodeValue(val reflect.Value) ([]byte, error) {
    +	var b []byte
    +	var err error
    +
    +	if val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface {
    +		if val.IsNil() {
    +			return []byte(""), nil
    +		}
    +
    +		val = val.Elem()
    +	}
    +
    +	switch val.Kind() {
    +	case reflect.Struct:
    +		switch val.Interface().(type) {
    +		case time.Time:
    +			t := val.Interface().(time.Time)
    +			b = []byte(fmt.Sprintf("%s", t.Format(iso8601)))
    +		default:
    +			b, err = encodeStruct(val)
    +		}
    +	case reflect.Map:
    +		b, err = encodeMap(val)
    +	case reflect.Slice:
    +		b, err = encodeSlice(val)
    +	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
    +		b = []byte(fmt.Sprintf("%s", strconv.FormatInt(val.Int(), 10)))
    +	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
    +		b = []byte(fmt.Sprintf("%s", strconv.FormatUint(val.Uint(), 10)))
    +	case reflect.Float32, reflect.Float64:
    +		b = []byte(fmt.Sprintf("%s",
    +			strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits())))
    +	case reflect.Bool:
    +		if val.Bool() {
    +			b = []byte("1")
    +		} else {
    +			b = []byte("0")
    +		}
    +	case reflect.String:
    +		var buf bytes.Buffer
    +
    +		xml.Escape(&buf, []byte(val.String()))
    +
    +		if _, ok := val.Interface().(Base64); ok {
    +			b = []byte(fmt.Sprintf("%s", buf.String()))
    +		} else {
    +			b = []byte(fmt.Sprintf("%s", buf.String()))
    +		}
    +	default:
    +		return nil, fmt.Errorf("xmlrpc encode error: unsupported type")
    +	}
    +
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	return []byte(fmt.Sprintf("%s", string(b))), nil
    +}
    +
    +func encodeStruct(value reflect.Value) ([]byte, error) {
    +	var b bytes.Buffer
    +
    +	b.WriteString("")
    +
    +	vals := []reflect.Value{value}
    +	for j := 0; j < len(vals); j++ {
    +		val := vals[j]
    +		t := val.Type()
    +		for i := 0; i < t.NumField(); i++ {
    +			f := t.Field(i)
    +			tag := f.Tag.Get("xmlrpc")
    +			name := f.Name
    +			fieldVal := val.FieldByName(f.Name)
    +			fieldValKind := fieldVal.Kind()
    +
    +			// Omit unexported fields
    +			if !fieldVal.CanInterface() {
    +				continue
    +			}
    +
    +			// Omit fields who are structs that contain no fields themselves
    +			if fieldValKind == reflect.Struct && fieldVal.NumField() == 0 {
    +				continue
    +			}
    +
    +			// Omit empty slices
    +			if fieldValKind == reflect.Slice && fieldVal.Len() == 0 {
    +				continue
    +			}
    +
    +			// Omit empty fields (defined as nil pointers)
    +			if tag != "" {
    +				parts := strings.Split(tag, ",")
    +				name = parts[0]
    +				if len(parts) > 1 && parts[1] == "omitempty" {
    +					if fieldValKind == reflect.Ptr && fieldVal.IsNil() {
    +						continue
    +					}
    +				}
    +			}
    +
    +			// Drill down into anonymous/embedded structs and do not expose the
    +			// containing embedded struct in request.
    +			// This will effectively pull up fields in embedded structs to look
    +			// as part of the original struct in the request.
    +			if f.Anonymous {
    +				vals = append(vals, fieldVal)
    +				continue
    +			}
    +
    +			b.WriteString("")
    +			b.WriteString(fmt.Sprintf("%s", name))
    +
    +			p, err := encodeValue(fieldVal)
    +			if err != nil {
    +				return nil, err
    +			}
    +			b.Write(p)
    +
    +			b.WriteString("")
    +		}
    +	}
    +
    +	b.WriteString("")
    +
    +	return b.Bytes(), nil
    +}
    +
    +func encodeMap(val reflect.Value) ([]byte, error) {
    +	var t = val.Type()
    +
    +	if t.Key().Kind() != reflect.String {
    +		return nil, fmt.Errorf("xmlrpc encode error: only maps with string keys are supported")
    +	}
    +
    +	var b bytes.Buffer
    +
    +	b.WriteString("")
    +
    +	keys := val.MapKeys()
    +
    +	for i := 0; i < val.Len(); i++ {
    +		key := keys[i]
    +		kval := val.MapIndex(key)
    +
    +		b.WriteString("")
    +		b.WriteString(fmt.Sprintf("%s", key.String()))
    +
    +		p, err := encodeValue(kval)
    +
    +		if err != nil {
    +			return nil, err
    +		}
    +
    +		b.Write(p)
    +		b.WriteString("")
    +	}
    +
    +	b.WriteString("")
    +
    +	return b.Bytes(), nil
    +}
    +
    +func encodeSlice(val reflect.Value) ([]byte, error) {
    +	var b bytes.Buffer
    +
    +	b.WriteString("")
    +
    +	for i := 0; i < val.Len(); i++ {
    +		p, err := encodeValue(val.Index(i))
    +		if err != nil {
    +			return nil, err
    +		}
    +
    +		b.Write(p)
    +	}
    +
    +	b.WriteString("")
    +
    +	return b.Bytes(), nil
    +}
    diff --git a/vendor/github.com/renier/xmlrpc/request.go b/vendor/github.com/renier/xmlrpc/request.go
    new file mode 100644
    index 000000000000..acb8251b2b3c
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/request.go
    @@ -0,0 +1,57 @@
    +package xmlrpc
    +
    +import (
    +	"bytes"
    +	"fmt"
    +	"net/http"
    +)
    +
    +func NewRequest(url string, method string, args interface{}) (*http.Request, error) {
    +	var t []interface{}
    +	var ok bool
    +	if t, ok = args.([]interface{}); !ok {
    +		if args != nil {
    +			t = []interface{}{args}
    +		}
    +	}
    +
    +	body, err := EncodeMethodCall(method, t...)
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	request, err := http.NewRequest("POST", url, bytes.NewReader(body))
    +	if err != nil {
    +		return nil, err
    +	}
    +
    +	request.Header.Set("Content-Type", "text/xml")
    +	request.Header.Set("Content-Length", fmt.Sprintf("%d", len(body)))
    +
    +	return request, nil
    +}
    +
    +func EncodeMethodCall(method string, args ...interface{}) ([]byte, error) {
    +	var b bytes.Buffer
    +	b.WriteString(``)
    +	b.WriteString(fmt.Sprintf("%s", method))
    +
    +	if args != nil {
    +		b.WriteString("")
    +
    +		for _, arg := range args {
    +			p, err := marshal(arg)
    +			if err != nil {
    +				return nil, err
    +			}
    +
    +			b.WriteString(fmt.Sprintf("%s", string(p)))
    +		}
    +
    +		b.WriteString("")
    +	}
    +
    +	b.WriteString("")
    +
    +	return b.Bytes(), nil
    +}
    diff --git a/vendor/github.com/renier/xmlrpc/response.go b/vendor/github.com/renier/xmlrpc/response.go
    new file mode 100644
    index 000000000000..a973b949e46e
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/response.go
    @@ -0,0 +1,57 @@
    +package xmlrpc
    +
    +import (
    +	"regexp"
    +)
    +
    +var (
    +	faultRx = regexp.MustCompile(`(\s|\S)+`)
    +)
    +
    +type failedResponse struct {
    +	Code  interface{} `xmlrpc:"faultCode"`
    +	Error string `xmlrpc:"faultString"`
    +	HttpStatusCode int
    +}
    +
    +func (r *failedResponse) err() error {
    +	return &XmlRpcError{
    +		Code: r.Code,
    +		Err:  r.Error,
    +		HttpStatusCode: r.HttpStatusCode,
    +	}
    +}
    +
    +type Response struct {
    +	data []byte
    +	httpStatusCode int
    +}
    +
    +func NewResponse(data []byte, httpStatusCode int) *Response {
    +	return &Response{
    +		data: data,
    +		httpStatusCode: httpStatusCode,
    +	}
    +}
    +
    +func (r *Response) Failed() bool {
    +	return faultRx.Match(r.data)
    +}
    +
    +func (r *Response) Err() error {
    +	failedResp := new(failedResponse)
    +	if err := unmarshal(r.data, failedResp); err != nil {
    +		return err
    +	}
    +	failedResp.HttpStatusCode = r.httpStatusCode
    +
    +	return failedResp.err()
    +}
    +
    +func (r *Response) Unmarshal(v interface{}) error {
    +	if err := unmarshal(r.data, v); err != nil {
    +		return err
    +	}
    +
    +	return nil
    +}
    diff --git a/vendor/github.com/renier/xmlrpc/test_server.rb b/vendor/github.com/renier/xmlrpc/test_server.rb
    new file mode 100644
    index 000000000000..1b1ff8760f79
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/test_server.rb
    @@ -0,0 +1,25 @@
    +# encoding: utf-8
    +
    +require "xmlrpc/server"
    +
    +class Service
    +  def time
    +    Time.now
    +  end
    +
    +  def upcase(s)
    +    s.upcase
    +  end
    +
    +  def sum(x, y)
    +    x + y
    +  end
    +
    +	def error
    +		raise XMLRPC::FaultException.new(500, "Server error")
    +	end
    +end
    +
    +server = XMLRPC::Server.new 5001, 'localhost'
    +server.add_handler "service", Service.new
    +server.serve
    diff --git a/vendor/github.com/renier/xmlrpc/xmlrpc.go b/vendor/github.com/renier/xmlrpc/xmlrpc.go
    new file mode 100644
    index 000000000000..e9f97a685ef4
    --- /dev/null
    +++ b/vendor/github.com/renier/xmlrpc/xmlrpc.go
    @@ -0,0 +1,28 @@
    +package xmlrpc
    +
    +import (
    +	"fmt"
    +)
    +
    +// xmlrpcError represents errors returned on xmlrpc request.
    +type XmlRpcError struct {
    +	Code           interface{}
    +	Err            string
    +	HttpStatusCode int
    +}
    +
    +// Error() method implements Error interface
    +func (e *XmlRpcError) Error() string {
    +	return fmt.Sprintf(
    +		"error: %s, code: %v, http status code: %d",
    +		e.Err, e.Code, e.HttpStatusCode)
    +}
    +
    +// Base64 represents value in base64 encoding
    +type Base64 string
    +
    +type Params struct {
    +	Params []interface{}
    +}
    +
    +type Struct map[string]interface{}
    diff --git a/vendor/github.com/softlayer/softlayer-go/LICENSE b/vendor/github.com/softlayer/softlayer-go/LICENSE
    new file mode 100644
    index 000000000000..d64569567334
    --- /dev/null
    +++ b/vendor/github.com/softlayer/softlayer-go/LICENSE
    @@ -0,0 +1,202 @@
    +
    +                                 Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "[]"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +   Copyright [yyyy] [name of copyright owner]
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
    diff --git a/vendor/github.com/softlayer/softlayer-go/config/config.go b/vendor/github.com/softlayer/softlayer-go/config/config.go
    new file mode 100644
    index 000000000000..36d5348e9d1d
    --- /dev/null
    +++ b/vendor/github.com/softlayer/softlayer-go/config/config.go
    @@ -0,0 +1,145 @@
    +/**
    + * // This file is borrowed from https://github.com/vaughan0/go-ini/blob/master/ini.go
    + * // which is distributed under the MIT license (https://github.com/vaughan0/go-ini/blob/master/LICENSE).
    + *
    + * Copyright (c) 2013 Vaughan Newton
    + *
    + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
    + * associated documentation files (the "Software"), to deal in the Software without restriction, including
    + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    + * copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the
    + * following conditions:
    + *
    + * The above copyright notice and this permission notice shall be included in all copies or substantial
    + * portions of the Software.
    + *
    + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
    + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
    + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
    + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
    + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    + */
    +
    +// Package config provides functions for parsing INI configuration files.
    +package config
    +
    +import (
    +	"bufio"
    +	"fmt"
    +	"io"
    +	"os"
    +	"regexp"
    +	"strings"
    +)
    +
    +var (
    +	sectionRegex = regexp.MustCompile(`^\[(.*)\]$`)
    +	assignRegex  = regexp.MustCompile(`^([^=]+)=(.*)$`)
    +)
    +
    +// ErrSyntax is returned when there is a syntax error in an INI file.
    +type ErrSyntax struct {
    +	Line   int
    +	Source string // The contents of the erroneous line, without leading or trailing whitespace
    +}
    +
    +func (e ErrSyntax) Error() string {
    +	return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source)
    +}
    +
    +// A File represents a parsed INI file.
    +type File map[string]Section
    +
    +// A Section represents a single section of an INI file.
    +type Section map[string]string
    +
    +// Returns a named Section. A Section will be created if one does not already exist for the given name.
    +func (f File) Section(name string) Section {
    +	section := f[name]
    +	if section == nil {
    +		section = make(Section)
    +		f[name] = section
    +	}
    +	return section
    +}
    +
    +// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup.
    +func (f File) Get(section, key string) (value string, ok bool) {
    +	if s := f[section]; s != nil {
    +		value, ok = s[key]
    +	}
    +	return
    +}
    +
    +// Loads INI data from a reader and stores the data in the File.
    +func (f File) Load(in io.Reader) error {
    +	bufin, ok := in.(*bufio.Reader)
    +	if !ok {
    +		bufin = bufio.NewReader(in)
    +	}
    +	return parseFile(bufin, f)
    +}
    +
    +// Loads INI data from a named file and stores the data in the File.
    +func (f File) LoadFile(file string) (err error) {
    +	in, err := os.Open(file)
    +	if err != nil {
    +		return
    +	}
    +	defer in.Close()
    +	return f.Load(in)
    +}
    +
    +func parseFile(in *bufio.Reader, file File) (err error) {
    +	section := ""
    +	lineNum := 0
    +	for done := false; !done; {
    +		var line string
    +		if line, err = in.ReadString('\n'); err != nil {
    +			if err == io.EOF {
    +				done = true
    +			} else {
    +				return
    +			}
    +		}
    +		lineNum++
    +		line = strings.TrimSpace(line)
    +		if len(line) == 0 {
    +			// Skip blank lines
    +			continue
    +		}
    +		if line[0] == ';' || line[0] == '#' {
    +			// Skip comments
    +			continue
    +		}
    +
    +		if groups := assignRegex.FindStringSubmatch(line); groups != nil {
    +			key, val := groups[1], groups[2]
    +			key, val = strings.TrimSpace(key), strings.TrimSpace(val)
    +			file.Section(section)[key] = val
    +		} else if groups := sectionRegex.FindStringSubmatch(line); groups != nil {
    +			name := strings.TrimSpace(groups[1])
    +			section = name
    +			// Create the section if it does not exist
    +			file.Section(section)
    +		} else {
    +			return ErrSyntax{Line: lineNum, Source: line}
    +		}
    +
    +	}
    +	return nil
    +}
    +
    +// Loads and returns a File from a reader.
    +func Load(in io.Reader) (File, error) {
    +	file := make(File)
    +	err := file.Load(in)
    +	return file, err
    +}
    +
    +// Loads and returns an INI File from a file on disk.
    +func LoadFile(filename string) (File, error) {
    +	file := make(File)
    +	err := file.LoadFile(filename)
    +	return file, err
    +}
    diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go b/vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go
    new file mode 100644
    index 000000000000..8cc82eb32549
    --- /dev/null
    +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go
    @@ -0,0 +1,32 @@
    +/**
    + * Copyright 2016 IBM Corp.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +/**
    + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY
    + */
    +
    +package datatypes
    +
    +// no documentation yet
    +type Abuse_Lockdown_Resource struct {
    +	Entity
    +
    +	// no documentation yet
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// no documentation yet
    +	InvoiceItem *Billing_Invoice_Item `json:"invoiceItem,omitempty" xmlrpc:"invoiceItem,omitempty"`
    +}
    diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/account.go b/vendor/github.com/softlayer/softlayer-go/datatypes/account.go
    new file mode 100644
    index 000000000000..78b547e3097b
    --- /dev/null
    +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/account.go
    @@ -0,0 +1,2986 @@
    +/**
    + * Copyright 2016 IBM Corp.
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +/**
    + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY
    + */
    +
    +package datatypes
    +
    +// The SoftLayer_Account data type contains general information relating to a single SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are assigned to the account only and not to users belonging to the account. The SoftLayer_Account data type contains a number of relational properties that are used by the SoftLayer customer portal to quickly present a variety of account related services to it's users.
    +//
    +// SoftLayer customers are unable to change their company account information in the portal or the API. If you need to change this information please open a sales ticket in our customer portal and our account management staff will assist you.
    +type Account struct {
    +	Entity
    +
    +	// An email address that is responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to this address.
    +	AbuseEmail *string `json:"abuseEmail,omitempty" xmlrpc:"abuseEmail,omitempty"`
    +
    +	// A count of email addresses that are responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to these addresses.
    +	AbuseEmailCount *uint `json:"abuseEmailCount,omitempty" xmlrpc:"abuseEmailCount,omitempty"`
    +
    +	// Email addresses that are responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to these addresses.
    +	AbuseEmails []Account_AbuseEmail `json:"abuseEmails,omitempty" xmlrpc:"abuseEmails,omitempty"`
    +
    +	// A count of the account contacts on an account.
    +	AccountContactCount *uint `json:"accountContactCount,omitempty" xmlrpc:"accountContactCount,omitempty"`
    +
    +	// The account contacts on an account.
    +	AccountContacts []Account_Contact `json:"accountContacts,omitempty" xmlrpc:"accountContacts,omitempty"`
    +
    +	// A count of the account software licenses owned by an account
    +	AccountLicenseCount *uint `json:"accountLicenseCount,omitempty" xmlrpc:"accountLicenseCount,omitempty"`
    +
    +	// The account software licenses owned by an account
    +	AccountLicenses []Software_AccountLicense `json:"accountLicenses,omitempty" xmlrpc:"accountLicenses,omitempty"`
    +
    +	// A count of
    +	AccountLinkCount *uint `json:"accountLinkCount,omitempty" xmlrpc:"accountLinkCount,omitempty"`
    +
    +	// no documentation yet
    +	AccountLinks []Account_Link `json:"accountLinks,omitempty" xmlrpc:"accountLinks,omitempty"`
    +
    +	// A flag indicating that the account has a managed resource.
    +	AccountManagedResourcesFlag *bool `json:"accountManagedResourcesFlag,omitempty" xmlrpc:"accountManagedResourcesFlag,omitempty"`
    +
    +	// An account's status presented in a more detailed data type.
    +	AccountStatus *Account_Status `json:"accountStatus,omitempty" xmlrpc:"accountStatus,omitempty"`
    +
    +	// A number reflecting the state of an account.
    +	AccountStatusId *int `json:"accountStatusId,omitempty" xmlrpc:"accountStatusId,omitempty"`
    +
    +	// The billing item associated with an account's monthly discount.
    +	ActiveAccountDiscountBillingItem *Billing_Item `json:"activeAccountDiscountBillingItem,omitempty" xmlrpc:"activeAccountDiscountBillingItem,omitempty"`
    +
    +	// A count of the active account software licenses owned by an account
    +	ActiveAccountLicenseCount *uint `json:"activeAccountLicenseCount,omitempty" xmlrpc:"activeAccountLicenseCount,omitempty"`
    +
    +	// The active account software licenses owned by an account
    +	ActiveAccountLicenses []Software_AccountLicense `json:"activeAccountLicenses,omitempty" xmlrpc:"activeAccountLicenses,omitempty"`
    +
    +	// A count of the active address(es) that belong to an account.
    +	ActiveAddressCount *uint `json:"activeAddressCount,omitempty" xmlrpc:"activeAddressCount,omitempty"`
    +
    +	// The active address(es) that belong to an account.
    +	ActiveAddresses []Account_Address `json:"activeAddresses,omitempty" xmlrpc:"activeAddresses,omitempty"`
    +
    +	// A count of all billing agreements for an account
    +	ActiveBillingAgreementCount *uint `json:"activeBillingAgreementCount,omitempty" xmlrpc:"activeBillingAgreementCount,omitempty"`
    +
    +	// All billing agreements for an account
    +	ActiveBillingAgreements []Account_Agreement `json:"activeBillingAgreements,omitempty" xmlrpc:"activeBillingAgreements,omitempty"`
    +
    +	// no documentation yet
    +	ActiveCatalystEnrollment *Catalyst_Enrollment `json:"activeCatalystEnrollment,omitempty" xmlrpc:"activeCatalystEnrollment,omitempty"`
    +
    +	// A count of the account's active top level colocation containers.
    +	ActiveColocationContainerCount *uint `json:"activeColocationContainerCount,omitempty" xmlrpc:"activeColocationContainerCount,omitempty"`
    +
    +	// The account's active top level colocation containers.
    +	ActiveColocationContainers []Billing_Item `json:"activeColocationContainers,omitempty" xmlrpc:"activeColocationContainers,omitempty"`
    +
    +	// Account's currently active Flexible Credit enrollment.
    +	ActiveFlexibleCreditEnrollment *FlexibleCredit_Enrollment `json:"activeFlexibleCreditEnrollment,omitempty" xmlrpc:"activeFlexibleCreditEnrollment,omitempty"`
    +
    +	// A count of
    +	ActiveNotificationSubscriberCount *uint `json:"activeNotificationSubscriberCount,omitempty" xmlrpc:"activeNotificationSubscriberCount,omitempty"`
    +
    +	// no documentation yet
    +	ActiveNotificationSubscribers []Notification_Subscriber `json:"activeNotificationSubscribers,omitempty" xmlrpc:"activeNotificationSubscribers,omitempty"`
    +
    +	// A count of an account's non-expired quotes.
    +	ActiveQuoteCount *uint `json:"activeQuoteCount,omitempty" xmlrpc:"activeQuoteCount,omitempty"`
    +
    +	// An account's non-expired quotes.
    +	ActiveQuotes []Billing_Order_Quote `json:"activeQuotes,omitempty" xmlrpc:"activeQuotes,omitempty"`
    +
    +	// A count of the virtual software licenses controlled by an account
    +	ActiveVirtualLicenseCount *uint `json:"activeVirtualLicenseCount,omitempty" xmlrpc:"activeVirtualLicenseCount,omitempty"`
    +
    +	// The virtual software licenses controlled by an account
    +	ActiveVirtualLicenses []Software_VirtualLicense `json:"activeVirtualLicenses,omitempty" xmlrpc:"activeVirtualLicenses,omitempty"`
    +
    +	// A count of an account's associated load balancers.
    +	AdcLoadBalancerCount *uint `json:"adcLoadBalancerCount,omitempty" xmlrpc:"adcLoadBalancerCount,omitempty"`
    +
    +	// An account's associated load balancers.
    +	AdcLoadBalancers []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"adcLoadBalancers,omitempty" xmlrpc:"adcLoadBalancers,omitempty"`
    +
    +	// The first line of the mailing address belonging to an account.
    +	Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"`
    +
    +	// The second line of the mailing address belonging to an account.
    +	Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"`
    +
    +	// A count of all the address(es) that belong to an account.
    +	AddressCount *uint `json:"addressCount,omitempty" xmlrpc:"addressCount,omitempty"`
    +
    +	// All the address(es) that belong to an account.
    +	Addresses []Account_Address `json:"addresses,omitempty" xmlrpc:"addresses,omitempty"`
    +
    +	// An affiliate identifier associated with the customer account.
    +	AffiliateId *string `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"`
    +
    +	// The billing items that will be on an account's next invoice.
    +	AllBillingItems []Billing_Item `json:"allBillingItems,omitempty" xmlrpc:"allBillingItems,omitempty"`
    +
    +	// A count of the billing items that will be on an account's next invoice.
    +	AllCommissionBillingItemCount *uint `json:"allCommissionBillingItemCount,omitempty" xmlrpc:"allCommissionBillingItemCount,omitempty"`
    +
    +	// The billing items that will be on an account's next invoice.
    +	AllCommissionBillingItems []Billing_Item `json:"allCommissionBillingItems,omitempty" xmlrpc:"allCommissionBillingItems,omitempty"`
    +
    +	// A count of the billing items that will be on an account's next invoice.
    +	AllRecurringTopLevelBillingItemCount *uint `json:"allRecurringTopLevelBillingItemCount,omitempty" xmlrpc:"allRecurringTopLevelBillingItemCount,omitempty"`
    +
    +	// The billing items that will be on an account's next invoice.
    +	AllRecurringTopLevelBillingItems []Billing_Item `json:"allRecurringTopLevelBillingItems,omitempty" xmlrpc:"allRecurringTopLevelBillingItems,omitempty"`
    +
    +	// The billing items that will be on an account's next invoice. Does not consider associated items.
    +	AllRecurringTopLevelBillingItemsUnfiltered []Billing_Item `json:"allRecurringTopLevelBillingItemsUnfiltered,omitempty" xmlrpc:"allRecurringTopLevelBillingItemsUnfiltered,omitempty"`
    +
    +	// A count of the billing items that will be on an account's next invoice. Does not consider associated items.
    +	AllRecurringTopLevelBillingItemsUnfilteredCount *uint `json:"allRecurringTopLevelBillingItemsUnfilteredCount,omitempty" xmlrpc:"allRecurringTopLevelBillingItemsUnfilteredCount,omitempty"`
    +
    +	// A count of the billing items that will be on an account's next invoice.
    +	AllSubnetBillingItemCount *uint `json:"allSubnetBillingItemCount,omitempty" xmlrpc:"allSubnetBillingItemCount,omitempty"`
    +
    +	// The billing items that will be on an account's next invoice.
    +	AllSubnetBillingItems []Billing_Item `json:"allSubnetBillingItems,omitempty" xmlrpc:"allSubnetBillingItems,omitempty"`
    +
    +	// A count of all billing items of an account.
    +	AllTopLevelBillingItemCount *uint `json:"allTopLevelBillingItemCount,omitempty" xmlrpc:"allTopLevelBillingItemCount,omitempty"`
    +
    +	// All billing items of an account.
    +	AllTopLevelBillingItems []Billing_Item `json:"allTopLevelBillingItems,omitempty" xmlrpc:"allTopLevelBillingItems,omitempty"`
    +
    +	// The billing items that will be on an account's next invoice. Does not consider associated items.
    +	AllTopLevelBillingItemsUnfiltered []Billing_Item `json:"allTopLevelBillingItemsUnfiltered,omitempty" xmlrpc:"allTopLevelBillingItemsUnfiltered,omitempty"`
    +
    +	// A count of the billing items that will be on an account's next invoice. Does not consider associated items.
    +	AllTopLevelBillingItemsUnfilteredCount *uint `json:"allTopLevelBillingItemsUnfilteredCount,omitempty" xmlrpc:"allTopLevelBillingItemsUnfilteredCount,omitempty"`
    +
    +	// Indicates whether this account is allowed to silently migrate to use IBMid Authentication.
    +	AllowIbmIdSilentMigrationFlag *bool `json:"allowIbmIdSilentMigrationFlag,omitempty" xmlrpc:"allowIbmIdSilentMigrationFlag,omitempty"`
    +
    +	// The number of PPTP VPN users allowed on an account.
    +	AllowedPptpVpnQuantity *int `json:"allowedPptpVpnQuantity,omitempty" xmlrpc:"allowedPptpVpnQuantity,omitempty"`
    +
    +	// Flag indicating if this account can be linked with Bluemix.
    +	AllowsBluemixAccountLinkingFlag *bool `json:"allowsBluemixAccountLinkingFlag,omitempty" xmlrpc:"allowsBluemixAccountLinkingFlag,omitempty"`
    +
    +	// A secondary phone number assigned to an account.
    +	AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"`
    +
    +	// A count of an account's associated application delivery controller records.
    +	ApplicationDeliveryControllerCount *uint `json:"applicationDeliveryControllerCount,omitempty" xmlrpc:"applicationDeliveryControllerCount,omitempty"`
    +
    +	// An account's associated application delivery controller records.
    +	ApplicationDeliveryControllers []Network_Application_Delivery_Controller `json:"applicationDeliveryControllers,omitempty" xmlrpc:"applicationDeliveryControllers,omitempty"`
    +
    +	// A count of the account attribute values for a SoftLayer customer account.
    +	AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"`
    +
    +	// The account attribute values for a SoftLayer customer account.
    +	Attributes []Account_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"`
    +
    +	// A count of the public network VLANs assigned to an account.
    +	AvailablePublicNetworkVlanCount *uint `json:"availablePublicNetworkVlanCount,omitempty" xmlrpc:"availablePublicNetworkVlanCount,omitempty"`
    +
    +	// The public network VLANs assigned to an account.
    +	AvailablePublicNetworkVlans []Network_Vlan `json:"availablePublicNetworkVlans,omitempty" xmlrpc:"availablePublicNetworkVlans,omitempty"`
    +
    +	// The account balance of a SoftLayer customer account. An account's balance is the amount of money owed to SoftLayer by the account holder, returned as a floating point number with two decimal places, measured in US Dollars ($USD). A negative account balance means the account holder has overpaid and is owed money by SoftLayer.
    +	Balance *Float64 `json:"balance,omitempty" xmlrpc:"balance,omitempty"`
    +
    +	// A count of the bandwidth allotments for an account.
    +	BandwidthAllotmentCount *uint `json:"bandwidthAllotmentCount,omitempty" xmlrpc:"bandwidthAllotmentCount,omitempty"`
    +
    +	// The bandwidth allotments for an account.
    +	BandwidthAllotments []Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotments,omitempty" xmlrpc:"bandwidthAllotments,omitempty"`
    +
    +	// The bandwidth allotments for an account currently over allocation.
    +	BandwidthAllotmentsOverAllocation []Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotmentsOverAllocation,omitempty" xmlrpc:"bandwidthAllotmentsOverAllocation,omitempty"`
    +
    +	// A count of the bandwidth allotments for an account currently over allocation.
    +	BandwidthAllotmentsOverAllocationCount *uint `json:"bandwidthAllotmentsOverAllocationCount,omitempty" xmlrpc:"bandwidthAllotmentsOverAllocationCount,omitempty"`
    +
    +	// The bandwidth allotments for an account projected to go over allocation.
    +	BandwidthAllotmentsProjectedOverAllocation []Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotmentsProjectedOverAllocation,omitempty" xmlrpc:"bandwidthAllotmentsProjectedOverAllocation,omitempty"`
    +
    +	// A count of the bandwidth allotments for an account projected to go over allocation.
    +	BandwidthAllotmentsProjectedOverAllocationCount *uint `json:"bandwidthAllotmentsProjectedOverAllocationCount,omitempty" xmlrpc:"bandwidthAllotmentsProjectedOverAllocationCount,omitempty"`
    +
    +	// A count of an account's associated bare metal server objects.
    +	BareMetalInstanceCount *uint `json:"bareMetalInstanceCount,omitempty" xmlrpc:"bareMetalInstanceCount,omitempty"`
    +
    +	// An account's associated bare metal server objects.
    +	BareMetalInstances []Hardware `json:"bareMetalInstances,omitempty" xmlrpc:"bareMetalInstances,omitempty"`
    +
    +	// A count of all billing agreements for an account
    +	BillingAgreementCount *uint `json:"billingAgreementCount,omitempty" xmlrpc:"billingAgreementCount,omitempty"`
    +
    +	// All billing agreements for an account
    +	BillingAgreements []Account_Agreement `json:"billingAgreements,omitempty" xmlrpc:"billingAgreements,omitempty"`
    +
    +	// An account's billing information.
    +	BillingInfo *Billing_Info `json:"billingInfo,omitempty" xmlrpc:"billingInfo,omitempty"`
    +
    +	// A count of private template group objects (parent and children) and the shared template group objects (parent only) for an account.
    +	BlockDeviceTemplateGroupCount *uint `json:"blockDeviceTemplateGroupCount,omitempty" xmlrpc:"blockDeviceTemplateGroupCount,omitempty"`
    +
    +	// Private template group objects (parent and children) and the shared template group objects (parent only) for an account.
    +	BlockDeviceTemplateGroups []Virtual_Guest_Block_Device_Template_Group `json:"blockDeviceTemplateGroups,omitempty" xmlrpc:"blockDeviceTemplateGroups,omitempty"`
    +
    +	// The Bluemix account link associated with this SoftLayer account, if one exists.
    +	BluemixAccountLink *Account_Link_Bluemix `json:"bluemixAccountLink,omitempty" xmlrpc:"bluemixAccountLink,omitempty"`
    +
    +	// Returns true if this account is linked to IBM Bluemix, false if not.
    +	BluemixLinkedFlag *bool `json:"bluemixLinkedFlag,omitempty" xmlrpc:"bluemixLinkedFlag,omitempty"`
    +
    +	// no documentation yet
    +	Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"`
    +
    +	// no documentation yet
    +	BrandAccountFlag *bool `json:"brandAccountFlag,omitempty" xmlrpc:"brandAccountFlag,omitempty"`
    +
    +	// The Brand tied to an account.
    +	BrandId *int `json:"brandId,omitempty" xmlrpc:"brandId,omitempty"`
    +
    +	// The brand keyName.
    +	BrandKeyName *string `json:"brandKeyName,omitempty" xmlrpc:"brandKeyName,omitempty"`
    +
    +	// The Business Partner details for the account. Country Enterprise Code, Channel, Segment, Reseller Level.
    +	BusinessPartner *Account_Business_Partner `json:"businessPartner,omitempty" xmlrpc:"businessPartner,omitempty"`
    +
    +	// Indicating whether this account can order additional Vlans.
    +	CanOrderAdditionalVlansFlag *bool `json:"canOrderAdditionalVlansFlag,omitempty" xmlrpc:"canOrderAdditionalVlansFlag,omitempty"`
    +
    +	// A count of an account's active carts.
    +	CartCount *uint `json:"cartCount,omitempty" xmlrpc:"cartCount,omitempty"`
    +
    +	// An account's active carts.
    +	Carts []Billing_Order_Quote `json:"carts,omitempty" xmlrpc:"carts,omitempty"`
    +
    +	// A count of
    +	CatalystEnrollmentCount *uint `json:"catalystEnrollmentCount,omitempty" xmlrpc:"catalystEnrollmentCount,omitempty"`
    +
    +	// no documentation yet
    +	CatalystEnrollments []Catalyst_Enrollment `json:"catalystEnrollments,omitempty" xmlrpc:"catalystEnrollments,omitempty"`
    +
    +	// A count of an account's associated CDN accounts.
    +	CdnAccountCount *uint `json:"cdnAccountCount,omitempty" xmlrpc:"cdnAccountCount,omitempty"`
    +
    +	// An account's associated CDN accounts.
    +	CdnAccounts []Network_ContentDelivery_Account `json:"cdnAccounts,omitempty" xmlrpc:"cdnAccounts,omitempty"`
    +
    +	// The city of the mailing address belonging to an account.
    +	City *string `json:"city,omitempty" xmlrpc:"city,omitempty"`
    +
    +	// Whether an account is exempt from taxes on their invoices.
    +	ClaimedTaxExemptTxFlag *bool `json:"claimedTaxExemptTxFlag,omitempty" xmlrpc:"claimedTaxExemptTxFlag,omitempty"`
    +
    +	// A count of all closed tickets associated with an account.
    +	ClosedTicketCount *uint `json:"closedTicketCount,omitempty" xmlrpc:"closedTicketCount,omitempty"`
    +
    +	// All closed tickets associated with an account.
    +	ClosedTickets []Ticket `json:"closedTickets,omitempty" xmlrpc:"closedTickets,omitempty"`
    +
    +	// The company name associated with an account.
    +	CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"`
    +
    +	// A two-letter abbreviation of the country in the mailing address belonging to an account.
    +	Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"`
    +
    +	// The date an account was created.
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// A count of datacenters which contain subnets that the account has access to route.
    +	DatacentersWithSubnetAllocationCount *uint `json:"datacentersWithSubnetAllocationCount,omitempty" xmlrpc:"datacentersWithSubnetAllocationCount,omitempty"`
    +
    +	// Datacenters which contain subnets that the account has access to route.
    +	DatacentersWithSubnetAllocations []Location `json:"datacentersWithSubnetAllocations,omitempty" xmlrpc:"datacentersWithSubnetAllocations,omitempty"`
    +
    +	// A count of an account's associated virtual dedicated host objects.
    +	DedicatedHostCount *uint `json:"dedicatedHostCount,omitempty" xmlrpc:"dedicatedHostCount,omitempty"`
    +
    +	// An account's associated virtual dedicated host objects.
    +	DedicatedHosts []Virtual_DedicatedHost `json:"dedicatedHosts,omitempty" xmlrpc:"dedicatedHosts,omitempty"`
    +
    +	// Device Fingerprint Identifier - Used internally and can safely be ignored.
    +	DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"`
    +
    +	// A flag indicating whether payments are processed for this account.
    +	DisablePaymentProcessingFlag *bool `json:"disablePaymentProcessingFlag,omitempty" xmlrpc:"disablePaymentProcessingFlag,omitempty"`
    +
    +	// A count of the SoftLayer employees that an account is assigned to.
    +	DisplaySupportRepresentativeAssignmentCount *uint `json:"displaySupportRepresentativeAssignmentCount,omitempty" xmlrpc:"displaySupportRepresentativeAssignmentCount,omitempty"`
    +
    +	// The SoftLayer employees that an account is assigned to.
    +	DisplaySupportRepresentativeAssignments []Account_Attachment_Employee `json:"displaySupportRepresentativeAssignments,omitempty" xmlrpc:"displaySupportRepresentativeAssignments,omitempty"`
    +
    +	// A count of the DNS domains associated with an account.
    +	DomainCount *uint `json:"domainCount,omitempty" xmlrpc:"domainCount,omitempty"`
    +
    +	// A count of
    +	DomainRegistrationCount *uint `json:"domainRegistrationCount,omitempty" xmlrpc:"domainRegistrationCount,omitempty"`
    +
    +	// no documentation yet
    +	DomainRegistrations []Dns_Domain_Registration `json:"domainRegistrations,omitempty" xmlrpc:"domainRegistrations,omitempty"`
    +
    +	// The DNS domains associated with an account.
    +	Domains []Dns_Domain `json:"domains,omitempty" xmlrpc:"domains,omitempty"`
    +
    +	// A count of the DNS domains associated with an account that were not created as a result of a secondary DNS zone transfer.
    +	DomainsWithoutSecondaryDnsRecordCount *uint `json:"domainsWithoutSecondaryDnsRecordCount,omitempty" xmlrpc:"domainsWithoutSecondaryDnsRecordCount,omitempty"`
    +
    +	// The DNS domains associated with an account that were not created as a result of a secondary DNS zone transfer.
    +	DomainsWithoutSecondaryDnsRecords []Dns_Domain `json:"domainsWithoutSecondaryDnsRecords,omitempty" xmlrpc:"domainsWithoutSecondaryDnsRecords,omitempty"`
    +
    +	// A general email address assigned to an account.
    +	Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"`
    +
    +	// Boolean flag dictating whether or not this account has the EU Supported flag. This flag indicates that this account uses IBM Cloud services to process EU citizen's personal data.
    +	EuSupportedFlag *bool `json:"euSupportedFlag,omitempty" xmlrpc:"euSupportedFlag,omitempty"`
    +
    +	// The total capacity of Legacy EVault Volumes on an account, in GB.
    +	EvaultCapacityGB *uint `json:"evaultCapacityGB,omitempty" xmlrpc:"evaultCapacityGB,omitempty"`
    +
    +	// A count of an account's master EVault user. This is only used when an account has EVault service.
    +	EvaultMasterUserCount *uint `json:"evaultMasterUserCount,omitempty" xmlrpc:"evaultMasterUserCount,omitempty"`
    +
    +	// An account's master EVault user. This is only used when an account has EVault service.
    +	EvaultMasterUsers []Account_Password `json:"evaultMasterUsers,omitempty" xmlrpc:"evaultMasterUsers,omitempty"`
    +
    +	// An account's associated EVault storage volumes.
    +	EvaultNetworkStorage []Network_Storage `json:"evaultNetworkStorage,omitempty" xmlrpc:"evaultNetworkStorage,omitempty"`
    +
    +	// A count of an account's associated EVault storage volumes.
    +	EvaultNetworkStorageCount *uint `json:"evaultNetworkStorageCount,omitempty" xmlrpc:"evaultNetworkStorageCount,omitempty"`
    +
    +	// A count of stored security certificates that are expired (ie. SSL)
    +	ExpiredSecurityCertificateCount *uint `json:"expiredSecurityCertificateCount,omitempty" xmlrpc:"expiredSecurityCertificateCount,omitempty"`
    +
    +	// Stored security certificates that are expired (ie. SSL)
    +	ExpiredSecurityCertificates []Security_Certificate `json:"expiredSecurityCertificates,omitempty" xmlrpc:"expiredSecurityCertificates,omitempty"`
    +
    +	// A count of logs of who entered a colocation area which is assigned to this account, or when a user under this account enters a datacenter.
    +	FacilityLogCount *uint `json:"facilityLogCount,omitempty" xmlrpc:"facilityLogCount,omitempty"`
    +
    +	// Logs of who entered a colocation area which is assigned to this account, or when a user under this account enters a datacenter.
    +	FacilityLogs []User_Access_Facility_Log `json:"facilityLogs,omitempty" xmlrpc:"facilityLogs,omitempty"`
    +
    +	// A fax phone number assigned to an account.
    +	FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"`
    +
    +	// Each customer account is listed under a single individual. This is that individual's first name.
    +	FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"`
    +
    +	// A count of all of the account's current and former Flexible Credit enrollments.
    +	FlexibleCreditEnrollmentCount *uint `json:"flexibleCreditEnrollmentCount,omitempty" xmlrpc:"flexibleCreditEnrollmentCount,omitempty"`
    +
    +	// All of the account's current and former Flexible Credit enrollments.
    +	FlexibleCreditEnrollments []FlexibleCredit_Enrollment `json:"flexibleCreditEnrollments,omitempty" xmlrpc:"flexibleCreditEnrollments,omitempty"`
    +
    +	// Timestamp representing the point in time when an account is required to link with PaaS.
    +	ForcePaasAccountLinkDate *string `json:"forcePaasAccountLinkDate,omitempty" xmlrpc:"forcePaasAccountLinkDate,omitempty"`
    +
    +	// A count of
    +	GlobalIpRecordCount *uint `json:"globalIpRecordCount,omitempty" xmlrpc:"globalIpRecordCount,omitempty"`
    +
    +	// no documentation yet
    +	GlobalIpRecords []Network_Subnet_IpAddress_Global `json:"globalIpRecords,omitempty" xmlrpc:"globalIpRecords,omitempty"`
    +
    +	// A count of
    +	GlobalIpv4RecordCount *uint `json:"globalIpv4RecordCount,omitempty" xmlrpc:"globalIpv4RecordCount,omitempty"`
    +
    +	// no documentation yet
    +	GlobalIpv4Records []Network_Subnet_IpAddress_Global `json:"globalIpv4Records,omitempty" xmlrpc:"globalIpv4Records,omitempty"`
    +
    +	// A count of
    +	GlobalIpv6RecordCount *uint `json:"globalIpv6RecordCount,omitempty" xmlrpc:"globalIpv6RecordCount,omitempty"`
    +
    +	// no documentation yet
    +	GlobalIpv6Records []Network_Subnet_IpAddress_Global `json:"globalIpv6Records,omitempty" xmlrpc:"globalIpv6Records,omitempty"`
    +
    +	// A count of the global load balancer accounts for a softlayer customer account.
    +	GlobalLoadBalancerAccountCount *uint `json:"globalLoadBalancerAccountCount,omitempty" xmlrpc:"globalLoadBalancerAccountCount,omitempty"`
    +
    +	// The global load balancer accounts for a softlayer customer account.
    +	GlobalLoadBalancerAccounts []Network_LoadBalancer_Global_Account `json:"globalLoadBalancerAccounts,omitempty" xmlrpc:"globalLoadBalancerAccounts,omitempty"`
    +
    +	// An account's associated hardware objects.
    +	Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"`
    +
    +	// A count of an account's associated hardware objects.
    +	HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"`
    +
    +	// An account's associated hardware objects currently over bandwidth allocation.
    +	HardwareOverBandwidthAllocation []Hardware `json:"hardwareOverBandwidthAllocation,omitempty" xmlrpc:"hardwareOverBandwidthAllocation,omitempty"`
    +
    +	// A count of an account's associated hardware objects currently over bandwidth allocation.
    +	HardwareOverBandwidthAllocationCount *uint `json:"hardwareOverBandwidthAllocationCount,omitempty" xmlrpc:"hardwareOverBandwidthAllocationCount,omitempty"`
    +
    +	// An account's associated hardware objects projected to go over bandwidth allocation.
    +	HardwareProjectedOverBandwidthAllocation []Hardware `json:"hardwareProjectedOverBandwidthAllocation,omitempty" xmlrpc:"hardwareProjectedOverBandwidthAllocation,omitempty"`
    +
    +	// A count of an account's associated hardware objects projected to go over bandwidth allocation.
    +	HardwareProjectedOverBandwidthAllocationCount *uint `json:"hardwareProjectedOverBandwidthAllocationCount,omitempty" xmlrpc:"hardwareProjectedOverBandwidthAllocationCount,omitempty"`
    +
    +	// All hardware associated with an account that has the cPanel web hosting control panel installed.
    +	HardwareWithCpanel []Hardware `json:"hardwareWithCpanel,omitempty" xmlrpc:"hardwareWithCpanel,omitempty"`
    +
    +	// A count of all hardware associated with an account that has the cPanel web hosting control panel installed.
    +	HardwareWithCpanelCount *uint `json:"hardwareWithCpanelCount,omitempty" xmlrpc:"hardwareWithCpanelCount,omitempty"`
    +
    +	// All hardware associated with an account that has the Helm web hosting control panel installed.
    +	HardwareWithHelm []Hardware `json:"hardwareWithHelm,omitempty" xmlrpc:"hardwareWithHelm,omitempty"`
    +
    +	// A count of all hardware associated with an account that has the Helm web hosting control panel installed.
    +	HardwareWithHelmCount *uint `json:"hardwareWithHelmCount,omitempty" xmlrpc:"hardwareWithHelmCount,omitempty"`
    +
    +	// All hardware associated with an account that has McAfee Secure software components.
    +	HardwareWithMcafee []Hardware `json:"hardwareWithMcafee,omitempty" xmlrpc:"hardwareWithMcafee,omitempty"`
    +
    +	// All hardware associated with an account that has McAfee Secure AntiVirus for Redhat software components.
    +	HardwareWithMcafeeAntivirusRedhat []Hardware `json:"hardwareWithMcafeeAntivirusRedhat,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusRedhat,omitempty"`
    +
    +	// A count of all hardware associated with an account that has McAfee Secure AntiVirus for Redhat software components.
    +	HardwareWithMcafeeAntivirusRedhatCount *uint `json:"hardwareWithMcafeeAntivirusRedhatCount,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusRedhatCount,omitempty"`
    +
    +	// A count of all hardware associated with an account that has McAfee Secure AntiVirus for Windows software components.
    +	HardwareWithMcafeeAntivirusWindowCount *uint `json:"hardwareWithMcafeeAntivirusWindowCount,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusWindowCount,omitempty"`
    +
    +	// All hardware associated with an account that has McAfee Secure AntiVirus for Windows software components.
    +	HardwareWithMcafeeAntivirusWindows []Hardware `json:"hardwareWithMcafeeAntivirusWindows,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusWindows,omitempty"`
    +
    +	// A count of all hardware associated with an account that has McAfee Secure software components.
    +	HardwareWithMcafeeCount *uint `json:"hardwareWithMcafeeCount,omitempty" xmlrpc:"hardwareWithMcafeeCount,omitempty"`
    +
    +	// All hardware associated with an account that has McAfee Secure Intrusion Detection System software components.
    +	HardwareWithMcafeeIntrusionDetectionSystem []Hardware `json:"hardwareWithMcafeeIntrusionDetectionSystem,omitempty" xmlrpc:"hardwareWithMcafeeIntrusionDetectionSystem,omitempty"`
    +
    +	// A count of all hardware associated with an account that has McAfee Secure Intrusion Detection System software components.
    +	HardwareWithMcafeeIntrusionDetectionSystemCount *uint `json:"hardwareWithMcafeeIntrusionDetectionSystemCount,omitempty" xmlrpc:"hardwareWithMcafeeIntrusionDetectionSystemCount,omitempty"`
    +
    +	// All hardware associated with an account that has the Plesk web hosting control panel installed.
    +	HardwareWithPlesk []Hardware `json:"hardwareWithPlesk,omitempty" xmlrpc:"hardwareWithPlesk,omitempty"`
    +
    +	// A count of all hardware associated with an account that has the Plesk web hosting control panel installed.
    +	HardwareWithPleskCount *uint `json:"hardwareWithPleskCount,omitempty" xmlrpc:"hardwareWithPleskCount,omitempty"`
    +
    +	// All hardware associated with an account that has the QuantaStor storage system installed.
    +	HardwareWithQuantastor []Hardware `json:"hardwareWithQuantastor,omitempty" xmlrpc:"hardwareWithQuantastor,omitempty"`
    +
    +	// A count of all hardware associated with an account that has the QuantaStor storage system installed.
    +	HardwareWithQuantastorCount *uint `json:"hardwareWithQuantastorCount,omitempty" xmlrpc:"hardwareWithQuantastorCount,omitempty"`
    +
    +	// All hardware associated with an account that has the Urchin web traffic analytics package installed.
    +	HardwareWithUrchin []Hardware `json:"hardwareWithUrchin,omitempty" xmlrpc:"hardwareWithUrchin,omitempty"`
    +
    +	// A count of all hardware associated with an account that has the Urchin web traffic analytics package installed.
    +	HardwareWithUrchinCount *uint `json:"hardwareWithUrchinCount,omitempty" xmlrpc:"hardwareWithUrchinCount,omitempty"`
    +
    +	// A count of all hardware associated with an account that is running a version of the Microsoft Windows operating system.
    +	HardwareWithWindowCount *uint `json:"hardwareWithWindowCount,omitempty" xmlrpc:"hardwareWithWindowCount,omitempty"`
    +
    +	// All hardware associated with an account that is running a version of the Microsoft Windows operating system.
    +	HardwareWithWindows []Hardware `json:"hardwareWithWindows,omitempty" xmlrpc:"hardwareWithWindows,omitempty"`
    +
    +	// Return 1 if one of the account's hardware has the EVault Bare Metal Server Restore Plugin otherwise 0.
    +	HasEvaultBareMetalRestorePluginFlag *bool `json:"hasEvaultBareMetalRestorePluginFlag,omitempty" xmlrpc:"hasEvaultBareMetalRestorePluginFlag,omitempty"`
    +
    +	// Return 1 if one of the account's hardware has an installation of Idera Server Backup otherwise 0.
    +	HasIderaBareMetalRestorePluginFlag *bool `json:"hasIderaBareMetalRestorePluginFlag,omitempty" xmlrpc:"hasIderaBareMetalRestorePluginFlag,omitempty"`
    +
    +	// The number of orders in a PENDING status for a SoftLayer customer account.
    +	HasPendingOrder *uint `json:"hasPendingOrder,omitempty" xmlrpc:"hasPendingOrder,omitempty"`
    +
    +	// Return 1 if one of the account's hardware has an installation of R1Soft CDP otherwise 0.
    +	HasR1softBareMetalRestorePluginFlag *bool `json:"hasR1softBareMetalRestorePluginFlag,omitempty" xmlrpc:"hasR1softBareMetalRestorePluginFlag,omitempty"`
    +
    +	// A count of an account's associated hourly bare metal server objects.
    +	HourlyBareMetalInstanceCount *uint `json:"hourlyBareMetalInstanceCount,omitempty" xmlrpc:"hourlyBareMetalInstanceCount,omitempty"`
    +
    +	// An account's associated hourly bare metal server objects.
    +	HourlyBareMetalInstances []Hardware `json:"hourlyBareMetalInstances,omitempty" xmlrpc:"hourlyBareMetalInstances,omitempty"`
    +
    +	// A count of hourly service billing items that will be on an account's next invoice.
    +	HourlyServiceBillingItemCount *uint `json:"hourlyServiceBillingItemCount,omitempty" xmlrpc:"hourlyServiceBillingItemCount,omitempty"`
    +
    +	// Hourly service billing items that will be on an account's next invoice.
    +	HourlyServiceBillingItems []Billing_Item `json:"hourlyServiceBillingItems,omitempty" xmlrpc:"hourlyServiceBillingItems,omitempty"`
    +
    +	// A count of an account's associated hourly virtual guest objects.
    +	HourlyVirtualGuestCount *uint `json:"hourlyVirtualGuestCount,omitempty" xmlrpc:"hourlyVirtualGuestCount,omitempty"`
    +
    +	// An account's associated hourly virtual guest objects.
    +	HourlyVirtualGuests []Virtual_Guest `json:"hourlyVirtualGuests,omitempty" xmlrpc:"hourlyVirtualGuests,omitempty"`
    +
    +	// An account's associated Virtual Storage volumes.
    +	HubNetworkStorage []Network_Storage `json:"hubNetworkStorage,omitempty" xmlrpc:"hubNetworkStorage,omitempty"`
    +
    +	// A count of an account's associated Virtual Storage volumes.
    +	HubNetworkStorageCount *uint `json:"hubNetworkStorageCount,omitempty" xmlrpc:"hubNetworkStorageCount,omitempty"`
    +
    +	// Unique identifier for a customer used throughout IBM.
    +	IbmCustomerNumber *string `json:"ibmCustomerNumber,omitempty" xmlrpc:"ibmCustomerNumber,omitempty"`
    +
    +	// Indicates whether this account requires IBMid authentication.
    +	IbmIdAuthenticationRequiredFlag *bool `json:"ibmIdAuthenticationRequiredFlag,omitempty" xmlrpc:"ibmIdAuthenticationRequiredFlag,omitempty"`
    +
    +	// Timestamp representing the point in time when an account is required to use IBMid authentication.
    +	IbmIdMigrationExpirationTimestamp *string `json:"ibmIdMigrationExpirationTimestamp,omitempty" xmlrpc:"ibmIdMigrationExpirationTimestamp,omitempty"`
    +
    +	// A customer account's internal identifier. Account numbers are typically preceded by the string "SL" in the customer portal. Every SoftLayer account has at least one portal user whose username follows the "SL" + account number naming scheme.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// An in progress request to switch billing systems.
    +	InProgressExternalAccountSetup *Account_External_Setup `json:"inProgressExternalAccountSetup,omitempty" xmlrpc:"inProgressExternalAccountSetup,omitempty"`
    +
    +	// A count of
    +	InternalNoteCount *uint `json:"internalNoteCount,omitempty" xmlrpc:"internalNoteCount,omitempty"`
    +
    +	// no documentation yet
    +	InternalNotes []Account_Note `json:"internalNotes,omitempty" xmlrpc:"internalNotes,omitempty"`
    +
    +	// A count of an account's associated billing invoices.
    +	InvoiceCount *uint `json:"invoiceCount,omitempty" xmlrpc:"invoiceCount,omitempty"`
    +
    +	// An account's associated billing invoices.
    +	Invoices []Billing_Invoice `json:"invoices,omitempty" xmlrpc:"invoices,omitempty"`
    +
    +	// A count of
    +	IpAddressCount *uint `json:"ipAddressCount,omitempty" xmlrpc:"ipAddressCount,omitempty"`
    +
    +	// no documentation yet
    +	IpAddresses []Network_Subnet_IpAddress `json:"ipAddresses,omitempty" xmlrpc:"ipAddresses,omitempty"`
    +
    +	// A flag indicating if an account belongs to a reseller or not.
    +	IsReseller *int `json:"isReseller,omitempty" xmlrpc:"isReseller,omitempty"`
    +
    +	// An account's associated iSCSI storage volumes.
    +	IscsiNetworkStorage []Network_Storage `json:"iscsiNetworkStorage,omitempty" xmlrpc:"iscsiNetworkStorage,omitempty"`
    +
    +	// A count of an account's associated iSCSI storage volumes.
    +	IscsiNetworkStorageCount *uint `json:"iscsiNetworkStorageCount,omitempty" xmlrpc:"iscsiNetworkStorageCount,omitempty"`
    +
    +	// The most recently canceled billing item.
    +	LastCanceledBillingItem *Billing_Item `json:"lastCanceledBillingItem,omitempty" xmlrpc:"lastCanceledBillingItem,omitempty"`
    +
    +	// The most recent cancelled server billing item.
    +	LastCancelledServerBillingItem *Billing_Item `json:"lastCancelledServerBillingItem,omitempty" xmlrpc:"lastCancelledServerBillingItem,omitempty"`
    +
    +	// A count of the five most recently closed abuse tickets associated with an account.
    +	LastFiveClosedAbuseTicketCount *uint `json:"lastFiveClosedAbuseTicketCount,omitempty" xmlrpc:"lastFiveClosedAbuseTicketCount,omitempty"`
    +
    +	// The five most recently closed abuse tickets associated with an account.
    +	LastFiveClosedAbuseTickets []Ticket `json:"lastFiveClosedAbuseTickets,omitempty" xmlrpc:"lastFiveClosedAbuseTickets,omitempty"`
    +
    +	// A count of the five most recently closed accounting tickets associated with an account.
    +	LastFiveClosedAccountingTicketCount *uint `json:"lastFiveClosedAccountingTicketCount,omitempty" xmlrpc:"lastFiveClosedAccountingTicketCount,omitempty"`
    +
    +	// The five most recently closed accounting tickets associated with an account.
    +	LastFiveClosedAccountingTickets []Ticket `json:"lastFiveClosedAccountingTickets,omitempty" xmlrpc:"lastFiveClosedAccountingTickets,omitempty"`
    +
    +	// A count of the five most recently closed tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account.
    +	LastFiveClosedOtherTicketCount *uint `json:"lastFiveClosedOtherTicketCount,omitempty" xmlrpc:"lastFiveClosedOtherTicketCount,omitempty"`
    +
    +	// The five most recently closed tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account.
    +	LastFiveClosedOtherTickets []Ticket `json:"lastFiveClosedOtherTickets,omitempty" xmlrpc:"lastFiveClosedOtherTickets,omitempty"`
    +
    +	// A count of the five most recently closed sales tickets associated with an account.
    +	LastFiveClosedSalesTicketCount *uint `json:"lastFiveClosedSalesTicketCount,omitempty" xmlrpc:"lastFiveClosedSalesTicketCount,omitempty"`
    +
    +	// The five most recently closed sales tickets associated with an account.
    +	LastFiveClosedSalesTickets []Ticket `json:"lastFiveClosedSalesTickets,omitempty" xmlrpc:"lastFiveClosedSalesTickets,omitempty"`
    +
    +	// A count of the five most recently closed support tickets associated with an account.
    +	LastFiveClosedSupportTicketCount *uint `json:"lastFiveClosedSupportTicketCount,omitempty" xmlrpc:"lastFiveClosedSupportTicketCount,omitempty"`
    +
    +	// The five most recently closed support tickets associated with an account.
    +	LastFiveClosedSupportTickets []Ticket `json:"lastFiveClosedSupportTickets,omitempty" xmlrpc:"lastFiveClosedSupportTickets,omitempty"`
    +
    +	// A count of the five most recently closed tickets associated with an account.
    +	LastFiveClosedTicketCount *uint `json:"lastFiveClosedTicketCount,omitempty" xmlrpc:"lastFiveClosedTicketCount,omitempty"`
    +
    +	// The five most recently closed tickets associated with an account.
    +	LastFiveClosedTickets []Ticket `json:"lastFiveClosedTickets,omitempty" xmlrpc:"lastFiveClosedTickets,omitempty"`
    +
    +	// Each customer account is listed under a single individual. This is that individual's last name.
    +	LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"`
    +
    +	// Whether an account has late fee protection.
    +	LateFeeProtectionFlag *bool `json:"lateFeeProtectionFlag,omitempty" xmlrpc:"lateFeeProtectionFlag,omitempty"`
    +
    +	// An account's most recent billing date.
    +	LatestBillDate *Time `json:"latestBillDate,omitempty" xmlrpc:"latestBillDate,omitempty"`
    +
    +	// An account's latest recurring invoice.
    +	LatestRecurringInvoice *Billing_Invoice `json:"latestRecurringInvoice,omitempty" xmlrpc:"latestRecurringInvoice,omitempty"`
    +
    +	// An account's latest recurring pending invoice.
    +	LatestRecurringPendingInvoice *Billing_Invoice `json:"latestRecurringPendingInvoice,omitempty" xmlrpc:"latestRecurringPendingInvoice,omitempty"`
    +
    +	// A count of the legacy bandwidth allotments for an account.
    +	LegacyBandwidthAllotmentCount *uint `json:"legacyBandwidthAllotmentCount,omitempty" xmlrpc:"legacyBandwidthAllotmentCount,omitempty"`
    +
    +	// The legacy bandwidth allotments for an account.
    +	LegacyBandwidthAllotments []Network_Bandwidth_Version1_Allotment `json:"legacyBandwidthAllotments,omitempty" xmlrpc:"legacyBandwidthAllotments,omitempty"`
    +
    +	// The total capacity of Legacy iSCSI Volumes on an account, in GB.
    +	LegacyIscsiCapacityGB *uint `json:"legacyIscsiCapacityGB,omitempty" xmlrpc:"legacyIscsiCapacityGB,omitempty"`
    +
    +	// A count of an account's associated load balancers.
    +	LoadBalancerCount *uint `json:"loadBalancerCount,omitempty" xmlrpc:"loadBalancerCount,omitempty"`
    +
    +	// An account's associated load balancers.
    +	LoadBalancers []Network_LoadBalancer_VirtualIpAddress `json:"loadBalancers,omitempty" xmlrpc:"loadBalancers,omitempty"`
    +
    +	// The total capacity of Legacy lockbox Volumes on an account, in GB.
    +	LockboxCapacityGB *uint `json:"lockboxCapacityGB,omitempty" xmlrpc:"lockboxCapacityGB,omitempty"`
    +
    +	// An account's associated Lockbox storage volumes.
    +	LockboxNetworkStorage []Network_Storage `json:"lockboxNetworkStorage,omitempty" xmlrpc:"lockboxNetworkStorage,omitempty"`
    +
    +	// A count of an account's associated Lockbox storage volumes.
    +	LockboxNetworkStorageCount *uint `json:"lockboxNetworkStorageCount,omitempty" xmlrpc:"lockboxNetworkStorageCount,omitempty"`
    +
    +	// no documentation yet
    +	ManualPaymentsUnderReview []Billing_Payment_Card_ManualPayment `json:"manualPaymentsUnderReview,omitempty" xmlrpc:"manualPaymentsUnderReview,omitempty"`
    +
    +	// A count of
    +	ManualPaymentsUnderReviewCount *uint `json:"manualPaymentsUnderReviewCount,omitempty" xmlrpc:"manualPaymentsUnderReviewCount,omitempty"`
    +
    +	// An account's master user.
    +	MasterUser *User_Customer `json:"masterUser,omitempty" xmlrpc:"masterUser,omitempty"`
    +
    +	// A count of an account's media transfer service requests.
    +	MediaDataTransferRequestCount *uint `json:"mediaDataTransferRequestCount,omitempty" xmlrpc:"mediaDataTransferRequestCount,omitempty"`
    +
    +	// An account's media transfer service requests.
    +	MediaDataTransferRequests []Account_Media_Data_Transfer_Request `json:"mediaDataTransferRequests,omitempty" xmlrpc:"mediaDataTransferRequests,omitempty"`
    +
    +	// The date an account was last modified.
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// A count of an account's associated monthly bare metal server objects.
    +	MonthlyBareMetalInstanceCount *uint `json:"monthlyBareMetalInstanceCount,omitempty" xmlrpc:"monthlyBareMetalInstanceCount,omitempty"`
    +
    +	// An account's associated monthly bare metal server objects.
    +	MonthlyBareMetalInstances []Hardware `json:"monthlyBareMetalInstances,omitempty" xmlrpc:"monthlyBareMetalInstances,omitempty"`
    +
    +	// A count of an account's associated monthly virtual guest objects.
    +	MonthlyVirtualGuestCount *uint `json:"monthlyVirtualGuestCount,omitempty" xmlrpc:"monthlyVirtualGuestCount,omitempty"`
    +
    +	// An account's associated monthly virtual guest objects.
    +	MonthlyVirtualGuests []Virtual_Guest `json:"monthlyVirtualGuests,omitempty" xmlrpc:"monthlyVirtualGuests,omitempty"`
    +
    +	// An account's associated NAS storage volumes.
    +	NasNetworkStorage []Network_Storage `json:"nasNetworkStorage,omitempty" xmlrpc:"nasNetworkStorage,omitempty"`
    +
    +	// A count of an account's associated NAS storage volumes.
    +	NasNetworkStorageCount *uint `json:"nasNetworkStorageCount,omitempty" xmlrpc:"nasNetworkStorageCount,omitempty"`
    +
    +	// Whether or not this account can define their own networks.
    +	NetworkCreationFlag *bool `json:"networkCreationFlag,omitempty" xmlrpc:"networkCreationFlag,omitempty"`
    +
    +	// A count of all network gateway devices on this account.
    +	NetworkGatewayCount *uint `json:"networkGatewayCount,omitempty" xmlrpc:"networkGatewayCount,omitempty"`
    +
    +	// All network gateway devices on this account.
    +	NetworkGateways []Network_Gateway `json:"networkGateways,omitempty" xmlrpc:"networkGateways,omitempty"`
    +
    +	// An account's associated network hardware.
    +	NetworkHardware []Hardware `json:"networkHardware,omitempty" xmlrpc:"networkHardware,omitempty"`
    +
    +	// A count of an account's associated network hardware.
    +	NetworkHardwareCount *uint `json:"networkHardwareCount,omitempty" xmlrpc:"networkHardwareCount,omitempty"`
    +
    +	// A count of
    +	NetworkMessageDeliveryAccountCount *uint `json:"networkMessageDeliveryAccountCount,omitempty" xmlrpc:"networkMessageDeliveryAccountCount,omitempty"`
    +
    +	// no documentation yet
    +	NetworkMessageDeliveryAccounts []Network_Message_Delivery `json:"networkMessageDeliveryAccounts,omitempty" xmlrpc:"networkMessageDeliveryAccounts,omitempty"`
    +
    +	// Hardware which is currently experiencing a service failure.
    +	NetworkMonitorDownHardware []Hardware `json:"networkMonitorDownHardware,omitempty" xmlrpc:"networkMonitorDownHardware,omitempty"`
    +
    +	// A count of hardware which is currently experiencing a service failure.
    +	NetworkMonitorDownHardwareCount *uint `json:"networkMonitorDownHardwareCount,omitempty" xmlrpc:"networkMonitorDownHardwareCount,omitempty"`
    +
    +	// A count of virtual guest which is currently experiencing a service failure.
    +	NetworkMonitorDownVirtualGuestCount *uint `json:"networkMonitorDownVirtualGuestCount,omitempty" xmlrpc:"networkMonitorDownVirtualGuestCount,omitempty"`
    +
    +	// Virtual guest which is currently experiencing a service failure.
    +	NetworkMonitorDownVirtualGuests []Virtual_Guest `json:"networkMonitorDownVirtualGuests,omitempty" xmlrpc:"networkMonitorDownVirtualGuests,omitempty"`
    +
    +	// Hardware which is currently recovering from a service failure.
    +	NetworkMonitorRecoveringHardware []Hardware `json:"networkMonitorRecoveringHardware,omitempty" xmlrpc:"networkMonitorRecoveringHardware,omitempty"`
    +
    +	// A count of hardware which is currently recovering from a service failure.
    +	NetworkMonitorRecoveringHardwareCount *uint `json:"networkMonitorRecoveringHardwareCount,omitempty" xmlrpc:"networkMonitorRecoveringHardwareCount,omitempty"`
    +
    +	// A count of virtual guest which is currently recovering from a service failure.
    +	NetworkMonitorRecoveringVirtualGuestCount *uint `json:"networkMonitorRecoveringVirtualGuestCount,omitempty" xmlrpc:"networkMonitorRecoveringVirtualGuestCount,omitempty"`
    +
    +	// Virtual guest which is currently recovering from a service failure.
    +	NetworkMonitorRecoveringVirtualGuests []Virtual_Guest `json:"networkMonitorRecoveringVirtualGuests,omitempty" xmlrpc:"networkMonitorRecoveringVirtualGuests,omitempty"`
    +
    +	// Hardware which is currently online.
    +	NetworkMonitorUpHardware []Hardware `json:"networkMonitorUpHardware,omitempty" xmlrpc:"networkMonitorUpHardware,omitempty"`
    +
    +	// A count of hardware which is currently online.
    +	NetworkMonitorUpHardwareCount *uint `json:"networkMonitorUpHardwareCount,omitempty" xmlrpc:"networkMonitorUpHardwareCount,omitempty"`
    +
    +	// A count of virtual guest which is currently online.
    +	NetworkMonitorUpVirtualGuestCount *uint `json:"networkMonitorUpVirtualGuestCount,omitempty" xmlrpc:"networkMonitorUpVirtualGuestCount,omitempty"`
    +
    +	// Virtual guest which is currently online.
    +	NetworkMonitorUpVirtualGuests []Virtual_Guest `json:"networkMonitorUpVirtualGuests,omitempty" xmlrpc:"networkMonitorUpVirtualGuests,omitempty"`
    +
    +	// An account's associated storage volumes. This includes Lockbox, NAS, EVault, and iSCSI volumes.
    +	NetworkStorage []Network_Storage `json:"networkStorage,omitempty" xmlrpc:"networkStorage,omitempty"`
    +
    +	// A count of an account's associated storage volumes. This includes Lockbox, NAS, EVault, and iSCSI volumes.
    +	NetworkStorageCount *uint `json:"networkStorageCount,omitempty" xmlrpc:"networkStorageCount,omitempty"`
    +
    +	// A count of an account's Network Storage groups.
    +	NetworkStorageGroupCount *uint `json:"networkStorageGroupCount,omitempty" xmlrpc:"networkStorageGroupCount,omitempty"`
    +
    +	// An account's Network Storage groups.
    +	NetworkStorageGroups []Network_Storage_Group `json:"networkStorageGroups,omitempty" xmlrpc:"networkStorageGroups,omitempty"`
    +
    +	// A count of iPSec network tunnels for an account.
    +	NetworkTunnelContextCount *uint `json:"networkTunnelContextCount,omitempty" xmlrpc:"networkTunnelContextCount,omitempty"`
    +
    +	// IPSec network tunnels for an account.
    +	NetworkTunnelContexts []Network_Tunnel_Module_Context `json:"networkTunnelContexts,omitempty" xmlrpc:"networkTunnelContexts,omitempty"`
    +
    +	// A count of all network VLANs assigned to an account.
    +	NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"`
    +
    +	// Whether or not an account has automatic private VLAN spanning enabled.
    +	NetworkVlanSpan *Account_Network_Vlan_Span `json:"networkVlanSpan,omitempty" xmlrpc:"networkVlanSpan,omitempty"`
    +
    +	// All network VLANs assigned to an account.
    +	NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"`
    +
    +	// A count of dEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers for the next billing cycle. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date.
    +	NextBillingPublicAllotmentHardwareBandwidthDetailCount *uint `json:"nextBillingPublicAllotmentHardwareBandwidthDetailCount,omitempty" xmlrpc:"nextBillingPublicAllotmentHardwareBandwidthDetailCount,omitempty"`
    +
    +	// DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers for the next billing cycle. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date.
    +	NextBillingPublicAllotmentHardwareBandwidthDetails []Network_Bandwidth_Version1_Allotment `json:"nextBillingPublicAllotmentHardwareBandwidthDetails,omitempty" xmlrpc:"nextBillingPublicAllotmentHardwareBandwidthDetails,omitempty"`
    +
    +	// The pre-tax total amount exempt from incubator credit for the account's next invoice. This field is now deprecated and will soon be removed. Please update all references to instead use nextInvoiceTotalAmount
    +	NextInvoiceIncubatorExemptTotal *Float64 `json:"nextInvoiceIncubatorExemptTotal,omitempty" xmlrpc:"nextInvoiceIncubatorExemptTotal,omitempty"`
    +
    +	// A count of the billing items that will be on an account's next invoice.
    +	NextInvoiceTopLevelBillingItemCount *uint `json:"nextInvoiceTopLevelBillingItemCount,omitempty" xmlrpc:"nextInvoiceTopLevelBillingItemCount,omitempty"`
    +
    +	// The billing items that will be on an account's next invoice.
    +	NextInvoiceTopLevelBillingItems []Billing_Item `json:"nextInvoiceTopLevelBillingItems,omitempty" xmlrpc:"nextInvoiceTopLevelBillingItems,omitempty"`
    +
    +	// The pre-tax total amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing.
    +	NextInvoiceTotalAmount *Float64 `json:"nextInvoiceTotalAmount,omitempty" xmlrpc:"nextInvoiceTotalAmount,omitempty"`
    +
    +	// The total one-time charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing.
    +	NextInvoiceTotalOneTimeAmount *Float64 `json:"nextInvoiceTotalOneTimeAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeAmount,omitempty"`
    +
    +	// The total one-time tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing.
    +	NextInvoiceTotalOneTimeTaxAmount *Float64 `json:"nextInvoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeTaxAmount,omitempty"`
    +
    +	// The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing.
    +	NextInvoiceTotalRecurringAmount *Float64 `json:"nextInvoiceTotalRecurringAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringAmount,omitempty"`
    +
    +	// The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing.
    +	NextInvoiceTotalRecurringAmountBeforeAccountDiscount *Float64 `json:"nextInvoiceTotalRecurringAmountBeforeAccountDiscount,omitempty" xmlrpc:"nextInvoiceTotalRecurringAmountBeforeAccountDiscount,omitempty"`
    +
    +	// The total recurring tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing.
    +	NextInvoiceTotalRecurringTaxAmount *Float64 `json:"nextInvoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringTaxAmount,omitempty"`
    +
    +	// The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing.
    +	NextInvoiceTotalTaxableRecurringAmount *Float64 `json:"nextInvoiceTotalTaxableRecurringAmount,omitempty" xmlrpc:"nextInvoiceTotalTaxableRecurringAmount,omitempty"`
    +
    +	// A count of
    +	NotificationSubscriberCount *uint `json:"notificationSubscriberCount,omitempty" xmlrpc:"notificationSubscriberCount,omitempty"`
    +
    +	// no documentation yet
    +	NotificationSubscribers []Notification_Subscriber `json:"notificationSubscribers,omitempty" xmlrpc:"notificationSubscribers,omitempty"`
    +
    +	// An office phone number assigned to an account.
    +	OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"`
    +
    +	// A count of the open abuse tickets associated with an account.
    +	OpenAbuseTicketCount *uint `json:"openAbuseTicketCount,omitempty" xmlrpc:"openAbuseTicketCount,omitempty"`
    +
    +	// The open abuse tickets associated with an account.
    +	OpenAbuseTickets []Ticket `json:"openAbuseTickets,omitempty" xmlrpc:"openAbuseTickets,omitempty"`
    +
    +	// A count of the open accounting tickets associated with an account.
    +	OpenAccountingTicketCount *uint `json:"openAccountingTicketCount,omitempty" xmlrpc:"openAccountingTicketCount,omitempty"`
    +
    +	// The open accounting tickets associated with an account.
    +	OpenAccountingTickets []Ticket `json:"openAccountingTickets,omitempty" xmlrpc:"openAccountingTickets,omitempty"`
    +
    +	// A count of the open billing tickets associated with an account.
    +	OpenBillingTicketCount *uint `json:"openBillingTicketCount,omitempty" xmlrpc:"openBillingTicketCount,omitempty"`
    +
    +	// The open billing tickets associated with an account.
    +	OpenBillingTickets []Ticket `json:"openBillingTickets,omitempty" xmlrpc:"openBillingTickets,omitempty"`
    +
    +	// A count of an open ticket requesting cancellation of this server, if one exists.
    +	OpenCancellationRequestCount *uint `json:"openCancellationRequestCount,omitempty" xmlrpc:"openCancellationRequestCount,omitempty"`
    +
    +	// An open ticket requesting cancellation of this server, if one exists.
    +	OpenCancellationRequests []Billing_Item_Cancellation_Request `json:"openCancellationRequests,omitempty" xmlrpc:"openCancellationRequests,omitempty"`
    +
    +	// A count of the open tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account.
    +	OpenOtherTicketCount *uint `json:"openOtherTicketCount,omitempty" xmlrpc:"openOtherTicketCount,omitempty"`
    +
    +	// The open tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account.
    +	OpenOtherTickets []Ticket `json:"openOtherTickets,omitempty" xmlrpc:"openOtherTickets,omitempty"`
    +
    +	// A count of an account's recurring invoices.
    +	OpenRecurringInvoiceCount *uint `json:"openRecurringInvoiceCount,omitempty" xmlrpc:"openRecurringInvoiceCount,omitempty"`
    +
    +	// An account's recurring invoices.
    +	OpenRecurringInvoices []Billing_Invoice `json:"openRecurringInvoices,omitempty" xmlrpc:"openRecurringInvoices,omitempty"`
    +
    +	// A count of the open sales tickets associated with an account.
    +	OpenSalesTicketCount *uint `json:"openSalesTicketCount,omitempty" xmlrpc:"openSalesTicketCount,omitempty"`
    +
    +	// The open sales tickets associated with an account.
    +	OpenSalesTickets []Ticket `json:"openSalesTickets,omitempty" xmlrpc:"openSalesTickets,omitempty"`
    +
    +	// A count of
    +	OpenStackAccountLinkCount *uint `json:"openStackAccountLinkCount,omitempty" xmlrpc:"openStackAccountLinkCount,omitempty"`
    +
    +	// no documentation yet
    +	OpenStackAccountLinks []Account_Link `json:"openStackAccountLinks,omitempty" xmlrpc:"openStackAccountLinks,omitempty"`
    +
    +	// An account's associated Openstack related Object Storage accounts.
    +	OpenStackObjectStorage []Network_Storage `json:"openStackObjectStorage,omitempty" xmlrpc:"openStackObjectStorage,omitempty"`
    +
    +	// A count of an account's associated Openstack related Object Storage accounts.
    +	OpenStackObjectStorageCount *uint `json:"openStackObjectStorageCount,omitempty" xmlrpc:"openStackObjectStorageCount,omitempty"`
    +
    +	// A count of the open support tickets associated with an account.
    +	OpenSupportTicketCount *uint `json:"openSupportTicketCount,omitempty" xmlrpc:"openSupportTicketCount,omitempty"`
    +
    +	// The open support tickets associated with an account.
    +	OpenSupportTickets []Ticket `json:"openSupportTickets,omitempty" xmlrpc:"openSupportTickets,omitempty"`
    +
    +	// A count of all open tickets associated with an account.
    +	OpenTicketCount *uint `json:"openTicketCount,omitempty" xmlrpc:"openTicketCount,omitempty"`
    +
    +	// All open tickets associated with an account.
    +	OpenTickets []Ticket `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"`
    +
    +	// All open tickets associated with an account last edited by an employee.
    +	OpenTicketsWaitingOnCustomer []Ticket `json:"openTicketsWaitingOnCustomer,omitempty" xmlrpc:"openTicketsWaitingOnCustomer,omitempty"`
    +
    +	// A count of all open tickets associated with an account last edited by an employee.
    +	OpenTicketsWaitingOnCustomerCount *uint `json:"openTicketsWaitingOnCustomerCount,omitempty" xmlrpc:"openTicketsWaitingOnCustomerCount,omitempty"`
    +
    +	// A count of an account's associated billing orders excluding upgrades.
    +	OrderCount *uint `json:"orderCount,omitempty" xmlrpc:"orderCount,omitempty"`
    +
    +	// An account's associated billing orders excluding upgrades.
    +	Orders []Billing_Order `json:"orders,omitempty" xmlrpc:"orders,omitempty"`
    +
    +	// A count of the billing items that have no parent billing item. These are items that don't necessarily belong to a single server.
    +	OrphanBillingItemCount *uint `json:"orphanBillingItemCount,omitempty" xmlrpc:"orphanBillingItemCount,omitempty"`
    +
    +	// The billing items that have no parent billing item. These are items that don't necessarily belong to a single server.
    +	OrphanBillingItems []Billing_Item `json:"orphanBillingItems,omitempty" xmlrpc:"orphanBillingItems,omitempty"`
    +
    +	// A count of
    +	OwnedBrandCount *uint `json:"ownedBrandCount,omitempty" xmlrpc:"ownedBrandCount,omitempty"`
    +
    +	// no documentation yet
    +	OwnedBrands []Brand `json:"ownedBrands,omitempty" xmlrpc:"ownedBrands,omitempty"`
    +
    +	// A count of
    +	OwnedHardwareGenericComponentModelCount *uint `json:"ownedHardwareGenericComponentModelCount,omitempty" xmlrpc:"ownedHardwareGenericComponentModelCount,omitempty"`
    +
    +	// no documentation yet
    +	OwnedHardwareGenericComponentModels []Hardware_Component_Model_Generic `json:"ownedHardwareGenericComponentModels,omitempty" xmlrpc:"ownedHardwareGenericComponentModels,omitempty"`
    +
    +	// A count of
    +	PaymentProcessorCount *uint `json:"paymentProcessorCount,omitempty" xmlrpc:"paymentProcessorCount,omitempty"`
    +
    +	// no documentation yet
    +	PaymentProcessors []Billing_Payment_Processor `json:"paymentProcessors,omitempty" xmlrpc:"paymentProcessors,omitempty"`
    +
    +	// A count of
    +	PendingEventCount *uint `json:"pendingEventCount,omitempty" xmlrpc:"pendingEventCount,omitempty"`
    +
    +	// no documentation yet
    +	PendingEvents []Notification_Occurrence_Event `json:"pendingEvents,omitempty" xmlrpc:"pendingEvents,omitempty"`
    +
    +	// An account's latest open (pending) invoice.
    +	PendingInvoice *Billing_Invoice `json:"pendingInvoice,omitempty" xmlrpc:"pendingInvoice,omitempty"`
    +
    +	// A count of a list of top-level invoice items that are on an account's currently pending invoice.
    +	PendingInvoiceTopLevelItemCount *uint `json:"pendingInvoiceTopLevelItemCount,omitempty" xmlrpc:"pendingInvoiceTopLevelItemCount,omitempty"`
    +
    +	// A list of top-level invoice items that are on an account's currently pending invoice.
    +	PendingInvoiceTopLevelItems []Billing_Invoice_Item `json:"pendingInvoiceTopLevelItems,omitempty" xmlrpc:"pendingInvoiceTopLevelItems,omitempty"`
    +
    +	// The total amount of an account's pending invoice, if one exists.
    +	PendingInvoiceTotalAmount *Float64 `json:"pendingInvoiceTotalAmount,omitempty" xmlrpc:"pendingInvoiceTotalAmount,omitempty"`
    +
    +	// The total one-time charges for an account's pending invoice, if one exists. In other words, it is the sum of one-time charges, setup fees, and labor fees. It does not include taxes.
    +	PendingInvoiceTotalOneTimeAmount *Float64 `json:"pendingInvoiceTotalOneTimeAmount,omitempty" xmlrpc:"pendingInvoiceTotalOneTimeAmount,omitempty"`
    +
    +	// The sum of all the taxes related to one time charges for an account's pending invoice, if one exists.
    +	PendingInvoiceTotalOneTimeTaxAmount *Float64 `json:"pendingInvoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"pendingInvoiceTotalOneTimeTaxAmount,omitempty"`
    +
    +	// The total recurring amount of an account's pending invoice, if one exists.
    +	PendingInvoiceTotalRecurringAmount *Float64 `json:"pendingInvoiceTotalRecurringAmount,omitempty" xmlrpc:"pendingInvoiceTotalRecurringAmount,omitempty"`
    +
    +	// The total amount of the recurring taxes on an account's pending invoice, if one exists.
    +	PendingInvoiceTotalRecurringTaxAmount *Float64 `json:"pendingInvoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"pendingInvoiceTotalRecurringTaxAmount,omitempty"`
    +
    +	// A count of an account's permission groups.
    +	PermissionGroupCount *uint `json:"permissionGroupCount,omitempty" xmlrpc:"permissionGroupCount,omitempty"`
    +
    +	// An account's permission groups.
    +	PermissionGroups []User_Permission_Group `json:"permissionGroups,omitempty" xmlrpc:"permissionGroups,omitempty"`
    +
    +	// A count of an account's user roles.
    +	PermissionRoleCount *uint `json:"permissionRoleCount,omitempty" xmlrpc:"permissionRoleCount,omitempty"`
    +
    +	// An account's user roles.
    +	PermissionRoles []User_Permission_Role `json:"permissionRoles,omitempty" xmlrpc:"permissionRoles,omitempty"`
    +
    +	// A count of
    +	PortableStorageVolumeCount *uint `json:"portableStorageVolumeCount,omitempty" xmlrpc:"portableStorageVolumeCount,omitempty"`
    +
    +	// no documentation yet
    +	PortableStorageVolumes []Virtual_Disk_Image `json:"portableStorageVolumes,omitempty" xmlrpc:"portableStorageVolumes,omitempty"`
    +
    +	// A count of customer specified URIs that are downloaded onto a newly provisioned or reloaded server. If the URI is sent over https it will be executed directly on the server.
    +	PostProvisioningHookCount *uint `json:"postProvisioningHookCount,omitempty" xmlrpc:"postProvisioningHookCount,omitempty"`
    +
    +	// Customer specified URIs that are downloaded onto a newly provisioned or reloaded server. If the URI is sent over https it will be executed directly on the server.
    +	PostProvisioningHooks []Provisioning_Hook `json:"postProvisioningHooks,omitempty" xmlrpc:"postProvisioningHooks,omitempty"`
    +
    +	// The postal code of the mailing address belonging to an account.
    +	PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"`
    +
    +	// Boolean flag dictating whether or not this account supports PPTP VPN Access.
    +	PptpVpnAllowedFlag *bool `json:"pptpVpnAllowedFlag,omitempty" xmlrpc:"pptpVpnAllowedFlag,omitempty"`
    +
    +	// A count of an account's associated portal users with PPTP VPN access.
    +	PptpVpnUserCount *uint `json:"pptpVpnUserCount,omitempty" xmlrpc:"pptpVpnUserCount,omitempty"`
    +
    +	// An account's associated portal users with PPTP VPN access.
    +	PptpVpnUsers []User_Customer `json:"pptpVpnUsers,omitempty" xmlrpc:"pptpVpnUsers,omitempty"`
    +
    +	// The total recurring amount for an accounts previous revenue.
    +	PreviousRecurringRevenue *Float64 `json:"previousRecurringRevenue,omitempty" xmlrpc:"previousRecurringRevenue,omitempty"`
    +
    +	// A count of the item price that an account is restricted to.
    +	PriceRestrictionCount *uint `json:"priceRestrictionCount,omitempty" xmlrpc:"priceRestrictionCount,omitempty"`
    +
    +	// The item price that an account is restricted to.
    +	PriceRestrictions []Product_Item_Price_Account_Restriction `json:"priceRestrictions,omitempty" xmlrpc:"priceRestrictions,omitempty"`
    +
    +	// A count of all priority one tickets associated with an account.
    +	PriorityOneTicketCount *uint `json:"priorityOneTicketCount,omitempty" xmlrpc:"priorityOneTicketCount,omitempty"`
    +
    +	// All priority one tickets associated with an account.
    +	PriorityOneTickets []Ticket `json:"priorityOneTickets,omitempty" xmlrpc:"priorityOneTickets,omitempty"`
    +
    +	// A count of dEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The private inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date.
    +	PrivateAllotmentHardwareBandwidthDetailCount *uint `json:"privateAllotmentHardwareBandwidthDetailCount,omitempty" xmlrpc:"privateAllotmentHardwareBandwidthDetailCount,omitempty"`
    +
    +	// DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The private inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date.
    +	PrivateAllotmentHardwareBandwidthDetails []Network_Bandwidth_Version1_Allotment `json:"privateAllotmentHardwareBandwidthDetails,omitempty" xmlrpc:"privateAllotmentHardwareBandwidthDetails,omitempty"`
    +
    +	// A count of private and shared template group objects (parent only) for an account.
    +	PrivateBlockDeviceTemplateGroupCount *uint `json:"privateBlockDeviceTemplateGroupCount,omitempty" xmlrpc:"privateBlockDeviceTemplateGroupCount,omitempty"`
    +
    +	// Private and shared template group objects (parent only) for an account.
    +	PrivateBlockDeviceTemplateGroups []Virtual_Guest_Block_Device_Template_Group `json:"privateBlockDeviceTemplateGroups,omitempty" xmlrpc:"privateBlockDeviceTemplateGroups,omitempty"`
    +
    +	// A count of
    +	PrivateIpAddressCount *uint `json:"privateIpAddressCount,omitempty" xmlrpc:"privateIpAddressCount,omitempty"`
    +
    +	// no documentation yet
    +	PrivateIpAddresses []Network_Subnet_IpAddress `json:"privateIpAddresses,omitempty" xmlrpc:"privateIpAddresses,omitempty"`
    +
    +	// A count of the private network VLANs assigned to an account.
    +	PrivateNetworkVlanCount *uint `json:"privateNetworkVlanCount,omitempty" xmlrpc:"privateNetworkVlanCount,omitempty"`
    +
    +	// The private network VLANs assigned to an account.
    +	PrivateNetworkVlans []Network_Vlan `json:"privateNetworkVlans,omitempty" xmlrpc:"privateNetworkVlans,omitempty"`
    +
    +	// A count of all private subnets associated with an account.
    +	PrivateSubnetCount *uint `json:"privateSubnetCount,omitempty" xmlrpc:"privateSubnetCount,omitempty"`
    +
    +	// All private subnets associated with an account.
    +	PrivateSubnets []Network_Subnet `json:"privateSubnets,omitempty" xmlrpc:"privateSubnets,omitempty"`
    +
    +	// Boolean flag indicating whether or not this account is a Proof of Concept account.
    +	ProofOfConceptAccountFlag *bool `json:"proofOfConceptAccountFlag,omitempty" xmlrpc:"proofOfConceptAccountFlag,omitempty"`
    +
    +	// A count of dEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date.
    +	PublicAllotmentHardwareBandwidthDetailCount *uint `json:"publicAllotmentHardwareBandwidthDetailCount,omitempty" xmlrpc:"publicAllotmentHardwareBandwidthDetailCount,omitempty"`
    +
    +	// DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date.
    +	PublicAllotmentHardwareBandwidthDetails []Network_Bandwidth_Version1_Allotment `json:"publicAllotmentHardwareBandwidthDetails,omitempty" xmlrpc:"publicAllotmentHardwareBandwidthDetails,omitempty"`
    +
    +	// A count of
    +	PublicIpAddressCount *uint `json:"publicIpAddressCount,omitempty" xmlrpc:"publicIpAddressCount,omitempty"`
    +
    +	// no documentation yet
    +	PublicIpAddresses []Network_Subnet_IpAddress `json:"publicIpAddresses,omitempty" xmlrpc:"publicIpAddresses,omitempty"`
    +
    +	// A count of the public network VLANs assigned to an account.
    +	PublicNetworkVlanCount *uint `json:"publicNetworkVlanCount,omitempty" xmlrpc:"publicNetworkVlanCount,omitempty"`
    +
    +	// The public network VLANs assigned to an account.
    +	PublicNetworkVlans []Network_Vlan `json:"publicNetworkVlans,omitempty" xmlrpc:"publicNetworkVlans,omitempty"`
    +
    +	// A count of all public network subnets associated with an account.
    +	PublicSubnetCount *uint `json:"publicSubnetCount,omitempty" xmlrpc:"publicSubnetCount,omitempty"`
    +
    +	// All public network subnets associated with an account.
    +	PublicSubnets []Network_Subnet `json:"publicSubnets,omitempty" xmlrpc:"publicSubnets,omitempty"`
    +
    +	// A count of an account's quotes.
    +	QuoteCount *uint `json:"quoteCount,omitempty" xmlrpc:"quoteCount,omitempty"`
    +
    +	// An account's quotes.
    +	Quotes []Billing_Order_Quote `json:"quotes,omitempty" xmlrpc:"quotes,omitempty"`
    +
    +	// A count of
    +	RecentEventCount *uint `json:"recentEventCount,omitempty" xmlrpc:"recentEventCount,omitempty"`
    +
    +	// no documentation yet
    +	RecentEvents []Notification_Occurrence_Event `json:"recentEvents,omitempty" xmlrpc:"recentEvents,omitempty"`
    +
    +	// The Referral Partner for this account, if any.
    +	ReferralPartner *Account `json:"referralPartner,omitempty" xmlrpc:"referralPartner,omitempty"`
    +
    +	// A count of if this is a account is a referral partner, the accounts this referral partner has referred
    +	ReferredAccountCount *uint `json:"referredAccountCount,omitempty" xmlrpc:"referredAccountCount,omitempty"`
    +
    +	// If this is a account is a referral partner, the accounts this referral partner has referred
    +	ReferredAccounts []Account `json:"referredAccounts,omitempty" xmlrpc:"referredAccounts,omitempty"`
    +
    +	// A count of
    +	RegulatedWorkloadCount *uint `json:"regulatedWorkloadCount,omitempty" xmlrpc:"regulatedWorkloadCount,omitempty"`
    +
    +	// no documentation yet
    +	RegulatedWorkloads []Legal_RegulatedWorkload `json:"regulatedWorkloads,omitempty" xmlrpc:"regulatedWorkloads,omitempty"`
    +
    +	// A count of remote management command requests for an account
    +	RemoteManagementCommandRequestCount *uint `json:"remoteManagementCommandRequestCount,omitempty" xmlrpc:"remoteManagementCommandRequestCount,omitempty"`
    +
    +	// Remote management command requests for an account
    +	RemoteManagementCommandRequests []Hardware_Component_RemoteManagement_Command_Request `json:"remoteManagementCommandRequests,omitempty" xmlrpc:"remoteManagementCommandRequests,omitempty"`
    +
    +	// A count of the Replication events for all Network Storage volumes on an account.
    +	ReplicationEventCount *uint `json:"replicationEventCount,omitempty" xmlrpc:"replicationEventCount,omitempty"`
    +
    +	// The Replication events for all Network Storage volumes on an account.
    +	ReplicationEvents []Network_Storage_Event `json:"replicationEvents,omitempty" xmlrpc:"replicationEvents,omitempty"`
    +
    +	// Indicates whether newly created users under this account will be associated with IBMid via an email requiring a response, or not.
    +	RequireSilentIBMidUserCreation *bool `json:"requireSilentIBMidUserCreation,omitempty" xmlrpc:"requireSilentIBMidUserCreation,omitempty"`
    +
    +	// The Reseller level of the account.
    +	ResellerLevel *int `json:"resellerLevel,omitempty" xmlrpc:"resellerLevel,omitempty"`
    +
    +	// A count of an account's associated top-level resource groups.
    +	ResourceGroupCount *uint `json:"resourceGroupCount,omitempty" xmlrpc:"resourceGroupCount,omitempty"`
    +
    +	// An account's associated top-level resource groups.
    +	ResourceGroups []Resource_Group `json:"resourceGroups,omitempty" xmlrpc:"resourceGroups,omitempty"`
    +
    +	// A count of all Routers that an accounts VLANs reside on
    +	RouterCount *uint `json:"routerCount,omitempty" xmlrpc:"routerCount,omitempty"`
    +
    +	// All Routers that an accounts VLANs reside on
    +	Routers []Hardware `json:"routers,omitempty" xmlrpc:"routers,omitempty"`
    +
    +	// An account's reverse WHOIS data. This data is used when making SWIP requests.
    +	RwhoisData *Network_Subnet_Rwhois_Data `json:"rwhoisData,omitempty" xmlrpc:"rwhoisData,omitempty"`
    +
    +	// no documentation yet
    +	SalesforceAccountLink *Account_Link `json:"salesforceAccountLink,omitempty" xmlrpc:"salesforceAccountLink,omitempty"`
    +
    +	// The SAML configuration for this account.
    +	SamlAuthentication *Account_Authentication_Saml `json:"samlAuthentication,omitempty" xmlrpc:"samlAuthentication,omitempty"`
    +
    +	// A count of all scale groups on this account.
    +	ScaleGroupCount *uint `json:"scaleGroupCount,omitempty" xmlrpc:"scaleGroupCount,omitempty"`
    +
    +	// All scale groups on this account.
    +	ScaleGroups []Scale_Group `json:"scaleGroups,omitempty" xmlrpc:"scaleGroups,omitempty"`
    +
    +	// A count of the secondary DNS records for a SoftLayer customer account.
    +	SecondaryDomainCount *uint `json:"secondaryDomainCount,omitempty" xmlrpc:"secondaryDomainCount,omitempty"`
    +
    +	// The secondary DNS records for a SoftLayer customer account.
    +	SecondaryDomains []Dns_Secondary `json:"secondaryDomains,omitempty" xmlrpc:"secondaryDomains,omitempty"`
    +
    +	// A count of stored security certificates (ie. SSL)
    +	SecurityCertificateCount *uint `json:"securityCertificateCount,omitempty" xmlrpc:"securityCertificateCount,omitempty"`
    +
    +	// Stored security certificates (ie. SSL)
    +	SecurityCertificates []Security_Certificate `json:"securityCertificates,omitempty" xmlrpc:"securityCertificates,omitempty"`
    +
    +	// A count of the security groups belonging to this account.
    +	SecurityGroupCount *uint `json:"securityGroupCount,omitempty" xmlrpc:"securityGroupCount,omitempty"`
    +
    +	// The security groups belonging to this account.
    +	SecurityGroups []Network_SecurityGroup `json:"securityGroups,omitempty" xmlrpc:"securityGroups,omitempty"`
    +
    +	// no documentation yet
    +	SecurityLevel *Security_Level `json:"securityLevel,omitempty" xmlrpc:"securityLevel,omitempty"`
    +
    +	// A count of an account's vulnerability scan requests.
    +	SecurityScanRequestCount *uint `json:"securityScanRequestCount,omitempty" xmlrpc:"securityScanRequestCount,omitempty"`
    +
    +	// An account's vulnerability scan requests.
    +	SecurityScanRequests []Network_Security_Scanner_Request `json:"securityScanRequests,omitempty" xmlrpc:"securityScanRequests,omitempty"`
    +
    +	// A count of the service billing items that will be on an account's next invoice.
    +	ServiceBillingItemCount *uint `json:"serviceBillingItemCount,omitempty" xmlrpc:"serviceBillingItemCount,omitempty"`
    +
    +	// The service billing items that will be on an account's next invoice.
    +	ServiceBillingItems []Billing_Item `json:"serviceBillingItems,omitempty" xmlrpc:"serviceBillingItems,omitempty"`
    +
    +	// A count of shipments that belong to the customer's account.
    +	ShipmentCount *uint `json:"shipmentCount,omitempty" xmlrpc:"shipmentCount,omitempty"`
    +
    +	// Shipments that belong to the customer's account.
    +	Shipments []Account_Shipment `json:"shipments,omitempty" xmlrpc:"shipments,omitempty"`
    +
    +	// A count of customer specified SSH keys that can be implemented onto a newly provisioned or reloaded server.
    +	SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"`
    +
    +	// Customer specified SSH keys that can be implemented onto a newly provisioned or reloaded server.
    +	SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"`
    +
    +	// A count of an account's associated portal users with SSL VPN access.
    +	SslVpnUserCount *uint `json:"sslVpnUserCount,omitempty" xmlrpc:"sslVpnUserCount,omitempty"`
    +
    +	// An account's associated portal users with SSL VPN access.
    +	SslVpnUsers []User_Customer `json:"sslVpnUsers,omitempty" xmlrpc:"sslVpnUsers,omitempty"`
    +
    +	// A count of an account's virtual guest objects that are hosted on a user provisioned hypervisor.
    +	StandardPoolVirtualGuestCount *uint `json:"standardPoolVirtualGuestCount,omitempty" xmlrpc:"standardPoolVirtualGuestCount,omitempty"`
    +
    +	// An account's virtual guest objects that are hosted on a user provisioned hypervisor.
    +	StandardPoolVirtualGuests []Virtual_Guest `json:"standardPoolVirtualGuests,omitempty" xmlrpc:"standardPoolVirtualGuests,omitempty"`
    +
    +	// A two-letter abbreviation of the state in the mailing address belonging to an account. If an account does not reside in a province then this is typically blank.
    +	State *string `json:"state,omitempty" xmlrpc:"state,omitempty"`
    +
    +	// The date of an account's last status change.
    +	StatusDate *Time `json:"statusDate,omitempty" xmlrpc:"statusDate,omitempty"`
    +
    +	// A count of all network subnets associated with an account.
    +	SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"`
    +
    +	// A count of
    +	SubnetRegistrationCount *uint `json:"subnetRegistrationCount,omitempty" xmlrpc:"subnetRegistrationCount,omitempty"`
    +
    +	// A count of
    +	SubnetRegistrationDetailCount *uint `json:"subnetRegistrationDetailCount,omitempty" xmlrpc:"subnetRegistrationDetailCount,omitempty"`
    +
    +	// no documentation yet
    +	SubnetRegistrationDetails []Account_Regional_Registry_Detail `json:"subnetRegistrationDetails,omitempty" xmlrpc:"subnetRegistrationDetails,omitempty"`
    +
    +	// no documentation yet
    +	SubnetRegistrations []Network_Subnet_Registration `json:"subnetRegistrations,omitempty" xmlrpc:"subnetRegistrations,omitempty"`
    +
    +	// All network subnets associated with an account.
    +	Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"`
    +
    +	// A count of the SoftLayer employees that an account is assigned to.
    +	SupportRepresentativeCount *uint `json:"supportRepresentativeCount,omitempty" xmlrpc:"supportRepresentativeCount,omitempty"`
    +
    +	// The SoftLayer employees that an account is assigned to.
    +	SupportRepresentatives []User_Employee `json:"supportRepresentatives,omitempty" xmlrpc:"supportRepresentatives,omitempty"`
    +
    +	// A count of the active support subscriptions for this account.
    +	SupportSubscriptionCount *uint `json:"supportSubscriptionCount,omitempty" xmlrpc:"supportSubscriptionCount,omitempty"`
    +
    +	// The active support subscriptions for this account.
    +	SupportSubscriptions []Billing_Item `json:"supportSubscriptions,omitempty" xmlrpc:"supportSubscriptions,omitempty"`
    +
    +	// no documentation yet
    +	SupportTier *string `json:"supportTier,omitempty" xmlrpc:"supportTier,omitempty"`
    +
    +	// A flag indicating to suppress invoices.
    +	SuppressInvoicesFlag *bool `json:"suppressInvoicesFlag,omitempty" xmlrpc:"suppressInvoicesFlag,omitempty"`
    +
    +	// A count of
    +	TagCount *uint `json:"tagCount,omitempty" xmlrpc:"tagCount,omitempty"`
    +
    +	// no documentation yet
    +	Tags []Tag `json:"tags,omitempty" xmlrpc:"tags,omitempty"`
    +
    +	// A count of an account's associated tickets.
    +	TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"`
    +
    +	// An account's associated tickets.
    +	Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"`
    +
    +	// Tickets closed within the last 72 hours or last 10 tickets, whichever is less, associated with an account.
    +	TicketsClosedInTheLastThreeDays []Ticket `json:"ticketsClosedInTheLastThreeDays,omitempty" xmlrpc:"ticketsClosedInTheLastThreeDays,omitempty"`
    +
    +	// A count of tickets closed within the last 72 hours or last 10 tickets, whichever is less, associated with an account.
    +	TicketsClosedInTheLastThreeDaysCount *uint `json:"ticketsClosedInTheLastThreeDaysCount,omitempty" xmlrpc:"ticketsClosedInTheLastThreeDaysCount,omitempty"`
    +
    +	// Tickets closed today associated with an account.
    +	TicketsClosedToday []Ticket `json:"ticketsClosedToday,omitempty" xmlrpc:"ticketsClosedToday,omitempty"`
    +
    +	// A count of tickets closed today associated with an account.
    +	TicketsClosedTodayCount *uint `json:"ticketsClosedTodayCount,omitempty" xmlrpc:"ticketsClosedTodayCount,omitempty"`
    +
    +	// A count of an account's associated Transcode account.
    +	TranscodeAccountCount *uint `json:"transcodeAccountCount,omitempty" xmlrpc:"transcodeAccountCount,omitempty"`
    +
    +	// An account's associated Transcode account.
    +	TranscodeAccounts []Network_Media_Transcode_Account `json:"transcodeAccounts,omitempty" xmlrpc:"transcodeAccounts,omitempty"`
    +
    +	// A count of an account's associated upgrade requests.
    +	UpgradeRequestCount *uint `json:"upgradeRequestCount,omitempty" xmlrpc:"upgradeRequestCount,omitempty"`
    +
    +	// An account's associated upgrade requests.
    +	UpgradeRequests []Product_Upgrade_Request `json:"upgradeRequests,omitempty" xmlrpc:"upgradeRequests,omitempty"`
    +
    +	// A count of an account's portal users.
    +	UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"`
    +
    +	// An account's portal users.
    +	Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"`
    +
    +	// A count of stored security certificates that are not expired (ie. SSL)
    +	ValidSecurityCertificateCount *uint `json:"validSecurityCertificateCount,omitempty" xmlrpc:"validSecurityCertificateCount,omitempty"`
    +
    +	// Stored security certificates that are not expired (ie. SSL)
    +	ValidSecurityCertificates []Security_Certificate `json:"validSecurityCertificates,omitempty" xmlrpc:"validSecurityCertificates,omitempty"`
    +
    +	// Return 0 if vpn updates are currently in progress on this account otherwise 1.
    +	VdrUpdatesInProgressFlag *bool `json:"vdrUpdatesInProgressFlag,omitempty" xmlrpc:"vdrUpdatesInProgressFlag,omitempty"`
    +
    +	// A count of the bandwidth pooling for this account.
    +	VirtualDedicatedRackCount *uint `json:"virtualDedicatedRackCount,omitempty" xmlrpc:"virtualDedicatedRackCount,omitempty"`
    +
    +	// The bandwidth pooling for this account.
    +	VirtualDedicatedRacks []Network_Bandwidth_Version1_Allotment `json:"virtualDedicatedRacks,omitempty" xmlrpc:"virtualDedicatedRacks,omitempty"`
    +
    +	// A count of an account's associated virtual server virtual disk images.
    +	VirtualDiskImageCount *uint `json:"virtualDiskImageCount,omitempty" xmlrpc:"virtualDiskImageCount,omitempty"`
    +
    +	// An account's associated virtual server virtual disk images.
    +	VirtualDiskImages []Virtual_Disk_Image `json:"virtualDiskImages,omitempty" xmlrpc:"virtualDiskImages,omitempty"`
    +
    +	// A count of an account's associated virtual guest objects.
    +	VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"`
    +
    +	// An account's associated virtual guest objects.
    +	VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"`
    +
    +	// An account's associated virtual guest objects currently over bandwidth allocation.
    +	VirtualGuestsOverBandwidthAllocation []Virtual_Guest `json:"virtualGuestsOverBandwidthAllocation,omitempty" xmlrpc:"virtualGuestsOverBandwidthAllocation,omitempty"`
    +
    +	// A count of an account's associated virtual guest objects currently over bandwidth allocation.
    +	VirtualGuestsOverBandwidthAllocationCount *uint `json:"virtualGuestsOverBandwidthAllocationCount,omitempty" xmlrpc:"virtualGuestsOverBandwidthAllocationCount,omitempty"`
    +
    +	// An account's associated virtual guest objects currently over bandwidth allocation.
    +	VirtualGuestsProjectedOverBandwidthAllocation []Virtual_Guest `json:"virtualGuestsProjectedOverBandwidthAllocation,omitempty" xmlrpc:"virtualGuestsProjectedOverBandwidthAllocation,omitempty"`
    +
    +	// A count of an account's associated virtual guest objects currently over bandwidth allocation.
    +	VirtualGuestsProjectedOverBandwidthAllocationCount *uint `json:"virtualGuestsProjectedOverBandwidthAllocationCount,omitempty" xmlrpc:"virtualGuestsProjectedOverBandwidthAllocationCount,omitempty"`
    +
    +	// All virtual guests associated with an account that has the cPanel web hosting control panel installed.
    +	VirtualGuestsWithCpanel []Virtual_Guest `json:"virtualGuestsWithCpanel,omitempty" xmlrpc:"virtualGuestsWithCpanel,omitempty"`
    +
    +	// A count of all virtual guests associated with an account that has the cPanel web hosting control panel installed.
    +	VirtualGuestsWithCpanelCount *uint `json:"virtualGuestsWithCpanelCount,omitempty" xmlrpc:"virtualGuestsWithCpanelCount,omitempty"`
    +
    +	// All virtual guests associated with an account that have McAfee Secure software components.
    +	VirtualGuestsWithMcafee []Virtual_Guest `json:"virtualGuestsWithMcafee,omitempty" xmlrpc:"virtualGuestsWithMcafee,omitempty"`
    +
    +	// All virtual guests associated with an account that have McAfee Secure AntiVirus for Redhat software components.
    +	VirtualGuestsWithMcafeeAntivirusRedhat []Virtual_Guest `json:"virtualGuestsWithMcafeeAntivirusRedhat,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusRedhat,omitempty"`
    +
    +	// A count of all virtual guests associated with an account that have McAfee Secure AntiVirus for Redhat software components.
    +	VirtualGuestsWithMcafeeAntivirusRedhatCount *uint `json:"virtualGuestsWithMcafeeAntivirusRedhatCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusRedhatCount,omitempty"`
    +
    +	// A count of all virtual guests associated with an account that has McAfee Secure AntiVirus for Windows software components.
    +	VirtualGuestsWithMcafeeAntivirusWindowCount *uint `json:"virtualGuestsWithMcafeeAntivirusWindowCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusWindowCount,omitempty"`
    +
    +	// All virtual guests associated with an account that has McAfee Secure AntiVirus for Windows software components.
    +	VirtualGuestsWithMcafeeAntivirusWindows []Virtual_Guest `json:"virtualGuestsWithMcafeeAntivirusWindows,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusWindows,omitempty"`
    +
    +	// A count of all virtual guests associated with an account that have McAfee Secure software components.
    +	VirtualGuestsWithMcafeeCount *uint `json:"virtualGuestsWithMcafeeCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeCount,omitempty"`
    +
    +	// All virtual guests associated with an account that has McAfee Secure Intrusion Detection System software components.
    +	VirtualGuestsWithMcafeeIntrusionDetectionSystem []Virtual_Guest `json:"virtualGuestsWithMcafeeIntrusionDetectionSystem,omitempty" xmlrpc:"virtualGuestsWithMcafeeIntrusionDetectionSystem,omitempty"`
    +
    +	// A count of all virtual guests associated with an account that has McAfee Secure Intrusion Detection System software components.
    +	VirtualGuestsWithMcafeeIntrusionDetectionSystemCount *uint `json:"virtualGuestsWithMcafeeIntrusionDetectionSystemCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeIntrusionDetectionSystemCount,omitempty"`
    +
    +	// All virtual guests associated with an account that has the Plesk web hosting control panel installed.
    +	VirtualGuestsWithPlesk []Virtual_Guest `json:"virtualGuestsWithPlesk,omitempty" xmlrpc:"virtualGuestsWithPlesk,omitempty"`
    +
    +	// A count of all virtual guests associated with an account that has the Plesk web hosting control panel installed.
    +	VirtualGuestsWithPleskCount *uint `json:"virtualGuestsWithPleskCount,omitempty" xmlrpc:"virtualGuestsWithPleskCount,omitempty"`
    +
    +	// All virtual guests associated with an account that have the QuantaStor storage system installed.
    +	VirtualGuestsWithQuantastor []Virtual_Guest `json:"virtualGuestsWithQuantastor,omitempty" xmlrpc:"virtualGuestsWithQuantastor,omitempty"`
    +
    +	// A count of all virtual guests associated with an account that have the QuantaStor storage system installed.
    +	VirtualGuestsWithQuantastorCount *uint `json:"virtualGuestsWithQuantastorCount,omitempty" xmlrpc:"virtualGuestsWithQuantastorCount,omitempty"`
    +
    +	// All virtual guests associated with an account that has the Urchin web traffic analytics package installed.
    +	VirtualGuestsWithUrchin []Virtual_Guest `json:"virtualGuestsWithUrchin,omitempty" xmlrpc:"virtualGuestsWithUrchin,omitempty"`
    +
    +	// A count of all virtual guests associated with an account that has the Urchin web traffic analytics package installed.
    +	VirtualGuestsWithUrchinCount *uint `json:"virtualGuestsWithUrchinCount,omitempty" xmlrpc:"virtualGuestsWithUrchinCount,omitempty"`
    +
    +	// The bandwidth pooling for this account.
    +	VirtualPrivateRack *Network_Bandwidth_Version1_Allotment `json:"virtualPrivateRack,omitempty" xmlrpc:"virtualPrivateRack,omitempty"`
    +
    +	// An account's associated virtual server archived storage repositories.
    +	VirtualStorageArchiveRepositories []Virtual_Storage_Repository `json:"virtualStorageArchiveRepositories,omitempty" xmlrpc:"virtualStorageArchiveRepositories,omitempty"`
    +
    +	// A count of an account's associated virtual server archived storage repositories.
    +	VirtualStorageArchiveRepositoryCount *uint `json:"virtualStorageArchiveRepositoryCount,omitempty" xmlrpc:"virtualStorageArchiveRepositoryCount,omitempty"`
    +
    +	// An account's associated virtual server public storage repositories.
    +	VirtualStoragePublicRepositories []Virtual_Storage_Repository `json:"virtualStoragePublicRepositories,omitempty" xmlrpc:"virtualStoragePublicRepositories,omitempty"`
    +
    +	// A count of an account's associated virtual server public storage repositories.
    +	VirtualStoragePublicRepositoryCount *uint `json:"virtualStoragePublicRepositoryCount,omitempty" xmlrpc:"virtualStoragePublicRepositoryCount,omitempty"`
    +
    +	// A count of an account's associated VPC configured virtual guest objects.
    +	VpcVirtualGuestCount *uint `json:"vpcVirtualGuestCount,omitempty" xmlrpc:"vpcVirtualGuestCount,omitempty"`
    +
    +	// An account's associated VPC configured virtual guest objects.
    +	VpcVirtualGuests []Virtual_Guest `json:"vpcVirtualGuests,omitempty" xmlrpc:"vpcVirtualGuests,omitempty"`
    +}
    +
    +// An unfortunate facet of the hosting business is the necessity of with legal and network abuse inquiries. As these types of inquiries frequently contain sensitive information SoftLayer keeps a separate account contact email address for direct contact about legal and abuse matters, modeled by the SoftLayer_Account_AbuseEmail data type. SoftLayer will typically email an account's abuse email addresses in these types of cases, and an email is automatically sent to an account's abuse email addresses when a legal or abuse ticket is created or updated.
    +type Account_AbuseEmail struct {
    +	Entity
    +
    +	// The account associated with an abuse email address.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// A valid email address.
    +	Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"`
    +}
    +
    +// The SoftLayer_Account_Address data type contains information on an address associated with a SoftLayer account.
    +type Account_Address struct {
    +	Entity
    +
    +	// The account to which this address belongs.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// no documentation yet
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// Line 1 of the address (normally the street address).
    +	Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"`
    +
    +	// Line 2 of the address.
    +	Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"`
    +
    +	// The city of the address.
    +	City *string `json:"city,omitempty" xmlrpc:"city,omitempty"`
    +
    +	// The contact name (person, office) of the address.
    +	ContactName *string `json:"contactName,omitempty" xmlrpc:"contactName,omitempty"`
    +
    +	// The country of the address.
    +	Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"`
    +
    +	// The customer user who created this address.
    +	CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"`
    +
    +	// The description of the address.
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// The unique id of the address.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// Flag to show whether the address is active.
    +	IsActive *int `json:"isActive,omitempty" xmlrpc:"isActive,omitempty"`
    +
    +	// The location of this address.
    +	Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"`
    +
    +	// The location id of the address.
    +	LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"`
    +
    +	// The employee who last modified this address.
    +	ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"`
    +
    +	// The customer user who last modified this address.
    +	ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"`
    +
    +	// The postal (zip) code of the address.
    +	PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"`
    +
    +	// The state of the address.
    +	State *string `json:"state,omitempty" xmlrpc:"state,omitempty"`
    +
    +	// An account address' type.
    +	Type *Account_Address_Type `json:"type,omitempty" xmlrpc:"type,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Address_Type struct {
    +	Entity
    +
    +	// DEPRECATED
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// no documentation yet
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// no documentation yet
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// This service allows for a unique identifier to be associated to an existing customer account.
    +type Account_Affiliation struct {
    +	Entity
    +
    +	// The account that an affiliation belongs to.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// A customer account's internal identifier.
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// An affiliate identifier associated with the customer account.
    +	AffiliateId *string `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"`
    +
    +	// The date an account affiliation was created.
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// A customer affiliation internal identifier.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The date an account affiliation was last modified.
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Agreement struct {
    +	Entity
    +
    +	// no documentation yet
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// The type of agreement.
    +	AgreementType *Account_Agreement_Type `json:"agreementType,omitempty" xmlrpc:"agreementType,omitempty"`
    +
    +	// The type of agreement identifier.
    +	AgreementTypeId *int `json:"agreementTypeId,omitempty" xmlrpc:"agreementTypeId,omitempty"`
    +
    +	// A count of the files attached to an agreement.
    +	AttachedBillingAgreementFileCount *uint `json:"attachedBillingAgreementFileCount,omitempty" xmlrpc:"attachedBillingAgreementFileCount,omitempty"`
    +
    +	// The files attached to an agreement.
    +	AttachedBillingAgreementFiles []Account_MasterServiceAgreement `json:"attachedBillingAgreementFiles,omitempty" xmlrpc:"attachedBillingAgreementFiles,omitempty"`
    +
    +	// no documentation yet
    +	AutoRenew *int `json:"autoRenew,omitempty" xmlrpc:"autoRenew,omitempty"`
    +
    +	// A count of the billing items associated with an agreement.
    +	BillingItemCount *uint `json:"billingItemCount,omitempty" xmlrpc:"billingItemCount,omitempty"`
    +
    +	// The billing items associated with an agreement.
    +	BillingItems []Billing_Item `json:"billingItems,omitempty" xmlrpc:"billingItems,omitempty"`
    +
    +	// no documentation yet
    +	CancellationFee *int `json:"cancellationFee,omitempty" xmlrpc:"cancellationFee,omitempty"`
    +
    +	// The date an agreement was created.
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// The duration in months of an agreement.
    +	DurationMonths *int `json:"durationMonths,omitempty" xmlrpc:"durationMonths,omitempty"`
    +
    +	// The end date of an agreement.
    +	EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"`
    +
    +	// An agreement's internal identifier.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The effective start date of an agreement.
    +	StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"`
    +
    +	// The status of the agreement.
    +	Status *Account_Agreement_Status `json:"status,omitempty" xmlrpc:"status,omitempty"`
    +
    +	// The status identifier for an agreement.
    +	StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"`
    +
    +	// The title of an agreement.
    +	Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"`
    +
    +	// A count of the top level billing item associated with an agreement.
    +	TopLevelBillingItemCount *uint `json:"topLevelBillingItemCount,omitempty" xmlrpc:"topLevelBillingItemCount,omitempty"`
    +
    +	// The top level billing item associated with an agreement.
    +	TopLevelBillingItems []Billing_Item `json:"topLevelBillingItems,omitempty" xmlrpc:"topLevelBillingItems,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Agreement_Status struct {
    +	Entity
    +
    +	// The name of the agreement status.
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Agreement_Type struct {
    +	Entity
    +
    +	// The name of the agreement type.
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// A SoftLayer_Account_Attachment_Employee models an assignment of a single [[SoftLayer_User_Employee|employee]] with a single [[SoftLayer_Account|account]]
    +type Account_Attachment_Employee struct {
    +	Entity
    +
    +	// A [[SoftLayer_Account|account]] that is assigned to a [[SoftLayer_User_Employee|employee]].
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// A [[SoftLayer_User_Employee|employee]] that is assigned to a [[SoftLayer_Account|account]].
    +	Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"`
    +
    +	// A [[SoftLayer_User_Employee|employee]] that is assigned to a [[SoftLayer_Account|account]].
    +	EmployeeRole *Account_Attachment_Employee_Role `json:"employeeRole,omitempty" xmlrpc:"employeeRole,omitempty"`
    +
    +	// Role identifier.
    +	RoleId *int `json:"roleId,omitempty" xmlrpc:"roleId,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Attachment_Employee_Role struct {
    +	Entity
    +
    +	// no documentation yet
    +	Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"`
    +
    +	// no documentation yet
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// Many SoftLayer customer accounts have individual attributes assigned to them that describe features or special features for that account, such as special pricing, account statuses, and ordering instructions. The SoftLayer_Account_Attribute data type contains information relating to a single SoftLayer_Account attribute.
    +type Account_Attribute struct {
    +	Entity
    +
    +	// The SoftLayer customer account that has an attribute.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// The type of attribute assigned to a SoftLayer customer account.
    +	AccountAttributeType *Account_Attribute_Type `json:"accountAttributeType,omitempty" xmlrpc:"accountAttributeType,omitempty"`
    +
    +	// The internal identifier of the type of attribute that a SoftLayer customer account attribute belongs to.
    +	AccountAttributeTypeId *int `json:"accountAttributeTypeId,omitempty" xmlrpc:"accountAttributeTypeId,omitempty"`
    +
    +	// The internal identifier of the SoftLayer customer account that is assigned an account attribute.
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// A SoftLayer customer account attribute's internal identifier.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// A SoftLayer account attribute's value.
    +	Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"`
    +}
    +
    +// SoftLayer_Account_Attribute_Type models the type of attribute that can be assigned to a SoftLayer customer account.
    +type Account_Attribute_Type struct {
    +	Entity
    +
    +	// A brief description of a SoftLayer account attribute type.
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// A SoftLayer account attribute type's internal identifier.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// A SoftLayer account attribute type's key name. This is typically a shorter version of an attribute type's name.
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// A SoftLayer account attribute type's name.
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// Account authentication has many different settings that can be set. This class allows the customer or employee to set these settigns.
    +type Account_Authentication_Attribute struct {
    +	Entity
    +
    +	// The SoftLayer customer account.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// The internal identifier of the SoftLayer customer account that is assigned an account authenction attribute.
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// The SoftLayer account authentication that has an attribute.
    +	AuthenticationRecord *Account_Authentication_Saml `json:"authenticationRecord,omitempty" xmlrpc:"authenticationRecord,omitempty"`
    +
    +	// A SoftLayer account authenction attribute's internal identifier.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The type of attribute assigned to a SoftLayer account authentication.
    +	Type *Account_Authentication_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"`
    +
    +	// The internal identifier of the type of attribute that a SoftLayer account authenction attribute belongs to.
    +	TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"`
    +
    +	// A SoftLayer account authenction attribute's value.
    +	Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"`
    +}
    +
    +// SoftLayer_Account_Authentication_Attribute_Type models the type of attribute that can be assigned to a SoftLayer customer account authentication.
    +type Account_Authentication_Attribute_Type struct {
    +	Entity
    +
    +	// A brief description of a SoftLayer account authentication attribute type.
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// A SoftLayer account authentication attribute type's internal identifier.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// A SoftLayer account authentication attribute type's key name. This is typically a shorter version of an attribute type's name.
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// A SoftLayer account authentication attribute type's name.
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +
    +	// An example of what you can put in as your value.
    +	ValueExample *string `json:"valueExample,omitempty" xmlrpc:"valueExample,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Authentication_OpenIdConnect_Option struct {
    +	Entity
    +
    +	// no documentation yet
    +	Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"`
    +
    +	// no documentation yet
    +	Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Authentication_OpenIdConnect_RegistrationInformation struct {
    +	Entity
    +
    +	// no documentation yet
    +	ExistingBlueIdFlag *bool `json:"existingBlueIdFlag,omitempty" xmlrpc:"existingBlueIdFlag,omitempty"`
    +
    +	// no documentation yet
    +	FederatedEmailDomainFlag *bool `json:"federatedEmailDomainFlag,omitempty" xmlrpc:"federatedEmailDomainFlag,omitempty"`
    +
    +	// no documentation yet
    +	User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Authentication_Saml struct {
    +	Entity
    +
    +	// The account associated with this saml configuration.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// The saml account id.
    +	AccountId *string `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// A count of the saml attribute values for a SoftLayer customer account.
    +	AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"`
    +
    +	// The saml attribute values for a SoftLayer customer account.
    +	Attributes []Account_Authentication_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"`
    +
    +	// The identity provider x509 certificate.
    +	Certificate *string `json:"certificate,omitempty" xmlrpc:"certificate,omitempty"`
    +
    +	// The identity provider x509 certificate fingerprint.
    +	CertificateFingerprint *string `json:"certificateFingerprint,omitempty" xmlrpc:"certificateFingerprint,omitempty"`
    +
    +	// The identity provider entity ID.
    +	EntityId *string `json:"entityId,omitempty" xmlrpc:"entityId,omitempty"`
    +
    +	// The saml internal identifying number.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The service provider x509 certificate.
    +	ServiceProviderCertificate *string `json:"serviceProviderCertificate,omitempty" xmlrpc:"serviceProviderCertificate,omitempty"`
    +
    +	// The service provider entity IDs.
    +	ServiceProviderEntityId *string `json:"serviceProviderEntityId,omitempty" xmlrpc:"serviceProviderEntityId,omitempty"`
    +
    +	// The service provider public key.
    +	ServiceProviderPublicKey *string `json:"serviceProviderPublicKey,omitempty" xmlrpc:"serviceProviderPublicKey,omitempty"`
    +
    +	// The service provider signle logout encoding.
    +	ServiceProviderSingleLogoutEncoding *string `json:"serviceProviderSingleLogoutEncoding,omitempty" xmlrpc:"serviceProviderSingleLogoutEncoding,omitempty"`
    +
    +	// The service provider signle logout address.
    +	ServiceProviderSingleLogoutUrl *string `json:"serviceProviderSingleLogoutUrl,omitempty" xmlrpc:"serviceProviderSingleLogoutUrl,omitempty"`
    +
    +	// The service provider signle sign on encoding.
    +	ServiceProviderSingleSignOnEncoding *string `json:"serviceProviderSingleSignOnEncoding,omitempty" xmlrpc:"serviceProviderSingleSignOnEncoding,omitempty"`
    +
    +	// The service provider signle sign on address.
    +	ServiceProviderSingleSignOnUrl *string `json:"serviceProviderSingleSignOnUrl,omitempty" xmlrpc:"serviceProviderSingleSignOnUrl,omitempty"`
    +
    +	// The identity provider single logout encoding.
    +	SingleLogoutEncoding *string `json:"singleLogoutEncoding,omitempty" xmlrpc:"singleLogoutEncoding,omitempty"`
    +
    +	// The identity provider sigle logout address.
    +	SingleLogoutUrl *string `json:"singleLogoutUrl,omitempty" xmlrpc:"singleLogoutUrl,omitempty"`
    +
    +	// The identity provider single sign on encoding.
    +	SingleSignOnEncoding *string `json:"singleSignOnEncoding,omitempty" xmlrpc:"singleSignOnEncoding,omitempty"`
    +
    +	// The identity provider signle sign on address.
    +	SingleSignOnUrl *string `json:"singleSignOnUrl,omitempty" xmlrpc:"singleSignOnUrl,omitempty"`
    +}
    +
    +// Contains business partner details associated with an account. Country Enterprise Identifier (CEID), Channel ID, Segment ID and Reseller Level.
    +type Account_Business_Partner struct {
    +	Entity
    +
    +	// Account associated with the business partner data
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// Channel indicator used to categorize business partner revenue.
    +	Channel *Business_Partner_Channel `json:"channel,omitempty" xmlrpc:"channel,omitempty"`
    +
    +	// Account business partner channel identifier
    +	ChannelId *int `json:"channelId,omitempty" xmlrpc:"channelId,omitempty"`
    +
    +	// Account business partner country enterprise code
    +	CountryEnterpriseCode *string `json:"countryEnterpriseCode,omitempty" xmlrpc:"countryEnterpriseCode,omitempty"`
    +
    +	// Reseller level of an account business partner
    +	ResellerLevel *int `json:"resellerLevel,omitempty" xmlrpc:"resellerLevel,omitempty"`
    +
    +	// Segment indicator used to categorize business partner revenue.
    +	Segment *Business_Partner_Segment `json:"segment,omitempty" xmlrpc:"segment,omitempty"`
    +
    +	// Account business partner segment identifier
    +	SegmentId *int `json:"segmentId,omitempty" xmlrpc:"segmentId,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Classification_Group_Type struct {
    +	Entity
    +
    +	// no documentation yet
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Contact struct {
    +	Entity
    +
    +	// no documentation yet
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// no documentation yet
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// no documentation yet
    +	Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"`
    +
    +	// no documentation yet
    +	Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"`
    +
    +	// no documentation yet
    +	AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"`
    +
    +	// no documentation yet
    +	City *string `json:"city,omitempty" xmlrpc:"city,omitempty"`
    +
    +	// no documentation yet
    +	CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"`
    +
    +	// no documentation yet
    +	Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"`
    +
    +	// no documentation yet
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// no documentation yet
    +	Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"`
    +
    +	// no documentation yet
    +	FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"`
    +
    +	// no documentation yet
    +	FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"`
    +
    +	// no documentation yet
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	JobTitle *string `json:"jobTitle,omitempty" xmlrpc:"jobTitle,omitempty"`
    +
    +	// no documentation yet
    +	LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"`
    +
    +	// no documentation yet
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// no documentation yet
    +	OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"`
    +
    +	// no documentation yet
    +	PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"`
    +
    +	// no documentation yet
    +	ProfileName *string `json:"profileName,omitempty" xmlrpc:"profileName,omitempty"`
    +
    +	// no documentation yet
    +	State *string `json:"state,omitempty" xmlrpc:"state,omitempty"`
    +
    +	// no documentation yet
    +	Type *Account_Contact_Type `json:"type,omitempty" xmlrpc:"type,omitempty"`
    +
    +	// no documentation yet
    +	TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"`
    +
    +	// no documentation yet
    +	Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Contact_Type struct {
    +	Entity
    +
    +	// no documentation yet
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// no documentation yet
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// no documentation yet
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// no documentation yet
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// no documentation yet
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_External_Setup struct {
    +	Entity
    +
    +	// The SoftLayer customer account the request belongs to.
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// The currency requested after the billing switch.
    +	CurrencyId *int `json:"currencyId,omitempty" xmlrpc:"currencyId,omitempty"`
    +
    +	// The unique identifier for this setup request.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The external system that will handle billing.
    +	ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"`
    +
    +	// The status of the account setup request.
    +	StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"`
    +
    +	// no documentation yet
    +	TypeCode *string `json:"typeCode,omitempty" xmlrpc:"typeCode,omitempty"`
    +
    +	// The transaction information related to verifying the customer credit card.
    +	VerifyCardTransaction *Billing_Payment_Card_Transaction `json:"verifyCardTransaction,omitempty" xmlrpc:"verifyCardTransaction,omitempty"`
    +
    +	// The related credit card transaction record for card verification.
    +	VerifyCardTransactionId *int `json:"verifyCardTransactionId,omitempty" xmlrpc:"verifyCardTransactionId,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Historical_Report struct {
    +	Entity
    +}
    +
    +// no documentation yet
    +type Account_Internal_Ibm struct {
    +	Entity
    +}
    +
    +// no documentation yet
    +type Account_Link struct {
    +	Entity
    +
    +	// no documentation yet
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// no documentation yet
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// no documentation yet
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// no documentation yet
    +	DestinationAccountAlphanumericId *string `json:"destinationAccountAlphanumericId,omitempty" xmlrpc:"destinationAccountAlphanumericId,omitempty"`
    +
    +	// no documentation yet
    +	DestinationAccountId *int `json:"destinationAccountId,omitempty" xmlrpc:"destinationAccountId,omitempty"`
    +
    +	// no documentation yet
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"`
    +
    +	// no documentation yet
    +	ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Link_Bluemix struct {
    +	Account_Link
    +}
    +
    +// no documentation yet
    +type Account_Link_OpenStack struct {
    +	Account_Link
    +
    +	// Pseudonym for destinationAccountAlphanumericId
    +	DomainId *string `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"`
    +}
    +
    +// OpenStack domain creation details
    +type Account_Link_OpenStack_DomainCreationDetails struct {
    +	Entity
    +
    +	// Id for the domain this user was added to.
    +	DomainId *string `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"`
    +
    +	// Id for the user given the Cloud Admin role for this domain.
    +	UserId *string `json:"userId,omitempty" xmlrpc:"userId,omitempty"`
    +
    +	// Name for the user given the Cloud Admin role for this domain.
    +	UserName *string `json:"userName,omitempty" xmlrpc:"userName,omitempty"`
    +}
    +
    +// Details required for OpenStack link request
    +type Account_Link_OpenStack_LinkRequest struct {
    +	Entity
    +
    +	// Optional password
    +	DesiredPassword *string `json:"desiredPassword,omitempty" xmlrpc:"desiredPassword,omitempty"`
    +
    +	// Optional projectName
    +	DesiredProjectName *string `json:"desiredProjectName,omitempty" xmlrpc:"desiredProjectName,omitempty"`
    +
    +	// Required username
    +	DesiredUsername *string `json:"desiredUsername,omitempty" xmlrpc:"desiredUsername,omitempty"`
    +}
    +
    +// OpenStack project creation details
    +type Account_Link_OpenStack_ProjectCreationDetails struct {
    +	Entity
    +
    +	// Id for the domain this project was added to.
    +	DomainId *string `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"`
    +
    +	// Id for this project.
    +	ProjectId *string `json:"projectId,omitempty" xmlrpc:"projectId,omitempty"`
    +
    +	// Name for this project.
    +	ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"`
    +
    +	// Id for the user given the Project Admin role for this project.
    +	UserId *string `json:"userId,omitempty" xmlrpc:"userId,omitempty"`
    +
    +	// Name for the user given the Project Admin role for this project.
    +	UserName *string `json:"userName,omitempty" xmlrpc:"userName,omitempty"`
    +}
    +
    +// OpenStack project details
    +type Account_Link_OpenStack_ProjectDetails struct {
    +	Entity
    +
    +	// Id for this project.
    +	ProjectId *string `json:"projectId,omitempty" xmlrpc:"projectId,omitempty"`
    +
    +	// Name for this project.
    +	ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Link_ThePlanet struct {
    +	Account_Link
    +}
    +
    +// no documentation yet
    +type Account_Link_Vendor struct {
    +	Entity
    +
    +	// no documentation yet
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// no documentation yet
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// The SoftLayer_Account_Lockdown_Request data type holds information on API requests from brand customers.
    +type Account_Lockdown_Request struct {
    +	Entity
    +
    +	// Account ID associated with this lockdown request.
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// Type of request.
    +	Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"`
    +
    +	// Timestamp when the lockdown request was initially made.
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// ID of this lockdown request.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// Timestamp when the lockdown request was modified.
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// Status of the lockdown request denoting whether it's been completed.
    +	Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_MasterServiceAgreement struct {
    +	Entity
    +
    +	// no documentation yet
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// no documentation yet
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// no documentation yet
    +	Guid *string `json:"guid,omitempty" xmlrpc:"guid,omitempty"`
    +
    +	// no documentation yet
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// The SoftLayer_Account_Media data type contains information on a single piece of media associated with a Data Transfer Service request.
    +type Account_Media struct {
    +	Entity
    +
    +	// The account to which the media belongs.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// The customer user who created the media object.
    +	CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"`
    +
    +	// The datacenter where the media resides.
    +	Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"`
    +
    +	// The description of the media.
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// The unique id of the media.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The employee who last modified the media.
    +	ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"`
    +
    +	// The customer user who last modified the media.
    +	ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"`
    +
    +	// The request to which the media belongs.
    +	Request *Account_Media_Data_Transfer_Request `json:"request,omitempty" xmlrpc:"request,omitempty"`
    +
    +	// The request id of the media.
    +	RequestId *int `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"`
    +
    +	// The manufacturer's serial number of the media.
    +	SerialNumber *string `json:"serialNumber,omitempty" xmlrpc:"serialNumber,omitempty"`
    +
    +	// The media's type.
    +	Type *Account_Media_Type `json:"type,omitempty" xmlrpc:"type,omitempty"`
    +
    +	// The type id of the media.
    +	TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"`
    +
    +	// A guest's associated EVault network storage service account.
    +	Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"`
    +}
    +
    +// The SoftLayer_Account_Media_Data_Transfer_Request data type contains information on a single Data Transfer Service request. Creation of these requests is limited to SoftLayer customers through the SoftLayer Customer Portal.
    +type Account_Media_Data_Transfer_Request struct {
    +	Entity
    +
    +	// The account to which the request belongs.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// The account id of the request.
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// A count of the active tickets that are attached to the data transfer request.
    +	ActiveTicketCount *uint `json:"activeTicketCount,omitempty" xmlrpc:"activeTicketCount,omitempty"`
    +
    +	// The active tickets that are attached to the data transfer request.
    +	ActiveTickets []Ticket `json:"activeTickets,omitempty" xmlrpc:"activeTickets,omitempty"`
    +
    +	// The billing item for the original request.
    +	BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"`
    +
    +	// The customer user who created the request.
    +	CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"`
    +
    +	// The create user id of the request.
    +	CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"`
    +
    +	// The end date of the request.
    +	EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"`
    +
    +	// The unique id of the request.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The media of the request.
    +	Media *Account_Media `json:"media,omitempty" xmlrpc:"media,omitempty"`
    +
    +	// The employee who last modified the request.
    +	ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"`
    +
    +	// The customer user who last modified the request.
    +	ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"`
    +
    +	// The modify user id of the request.
    +	ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"`
    +
    +	// A count of the shipments of the request.
    +	ShipmentCount *uint `json:"shipmentCount,omitempty" xmlrpc:"shipmentCount,omitempty"`
    +
    +	// The shipments of the request.
    +	Shipments []Account_Shipment `json:"shipments,omitempty" xmlrpc:"shipments,omitempty"`
    +
    +	// The start date of the request.
    +	StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"`
    +
    +	// The status of the request.
    +	Status *Account_Media_Data_Transfer_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"`
    +
    +	// The status id of the request.
    +	StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"`
    +
    +	// A count of all tickets that are attached to the data transfer request.
    +	TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"`
    +
    +	// All tickets that are attached to the data transfer request.
    +	Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"`
    +}
    +
    +// The SoftLayer_Account_Media_Data_Transfer_Request_Status data type contains general information relating to the statuses to which a Data Transfer Request may be set.
    +type Account_Media_Data_Transfer_Request_Status struct {
    +	Entity
    +
    +	// The description of the request status.
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// The unique id of the request status.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The unique keyname of the request status.
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// The name of the request status.
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// The SoftLayer_Account_Media_Type data type contains general information relating to the different types of media devices that SoftLayer currently supports, as part of the Data Transfer Request Service. Such devices as USB hard drives and flash drives, as well as optical media such as CD and DVD are currently supported.
    +type Account_Media_Type struct {
    +	Entity
    +
    +	// The description of the media type.
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// The unique id of the media type.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The unique keyname of the media type.
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// The name of the media type.
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// The SoftLayer_Account_Network_Vlan_Span data type exposes the setting which controls the automatic spanning of private VLANs attached to a given customers account.
    +type Account_Network_Vlan_Span struct {
    +	Entity
    +
    +	// The SoftLayer customer account associated with a VLAN.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// Flag indicating whether the customer wishes to have all private network VLANs associated with account automatically joined [0 or 1]
    +	EnabledFlag *bool `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"`
    +
    +	// The unique internal identifier of the SoftLayer_Account_Network_Vlan_Span object.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// Timestamp of the last time the ACL for this account was applied.
    +	LastAppliedDate *Time `json:"lastAppliedDate,omitempty" xmlrpc:"lastAppliedDate,omitempty"`
    +
    +	// Timestamp of the last time the subnet hash was verified for this VLAN span record.
    +	LastVerifiedDate *Time `json:"lastVerifiedDate,omitempty" xmlrpc:"lastVerifiedDate,omitempty"`
    +
    +	// Timestamp of the last edit of the record.
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Note struct {
    +	Entity
    +
    +	// no documentation yet
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// no documentation yet
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// no documentation yet
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// no documentation yet
    +	Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"`
    +
    +	// no documentation yet
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// no documentation yet
    +	Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"`
    +
    +	// no documentation yet
    +	NoteHistory []Account_Note_History `json:"noteHistory,omitempty" xmlrpc:"noteHistory,omitempty"`
    +
    +	// A count of
    +	NoteHistoryCount *uint `json:"noteHistoryCount,omitempty" xmlrpc:"noteHistoryCount,omitempty"`
    +
    +	// no documentation yet
    +	NoteType *Account_Note_Type `json:"noteType,omitempty" xmlrpc:"noteType,omitempty"`
    +
    +	// no documentation yet
    +	NoteTypeId *int `json:"noteTypeId,omitempty" xmlrpc:"noteTypeId,omitempty"`
    +
    +	// no documentation yet
    +	UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Note_History struct {
    +	Entity
    +
    +	// no documentation yet
    +	AccountNote *Account_Note `json:"accountNote,omitempty" xmlrpc:"accountNote,omitempty"`
    +
    +	// no documentation yet
    +	AccountNoteId *int `json:"accountNoteId,omitempty" xmlrpc:"accountNoteId,omitempty"`
    +
    +	// no documentation yet
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// no documentation yet
    +	Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"`
    +
    +	// no documentation yet
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// no documentation yet
    +	Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"`
    +
    +	// no documentation yet
    +	UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Note_Type struct {
    +	Entity
    +
    +	// no documentation yet
    +	BrandId *int `json:"brandId,omitempty" xmlrpc:"brandId,omitempty"`
    +
    +	// no documentation yet
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// no documentation yet
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// no documentation yet
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// no documentation yet
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// no documentation yet
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +
    +	// no documentation yet
    +	ValueExpression *string `json:"valueExpression,omitempty" xmlrpc:"valueExpression,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_Partner_Referral_Prospect struct {
    +	User_Customer_Prospect
    +
    +	// no documentation yet
    +	CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"`
    +
    +	// no documentation yet
    +	EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"`
    +
    +	// no documentation yet
    +	FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"`
    +
    +	// no documentation yet
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"`
    +}
    +
    +// The SoftLayer_Account_Password contains username, passwords and notes for services that may require for external applications such the Webcc interface for the EVault Storage service.
    +type Account_Password struct {
    +	Entity
    +
    +	// no documentation yet
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// The SoftLayer customer account id that a username/password combination is associated with.
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// A username/password combination's internal identifier.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// A simple description of a username/password combination. These notes don't affect portal functionality.
    +	Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"`
    +
    +	// The password portion of a username/password combination.
    +	Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"`
    +
    +	// The service that an account/password combination is tied to.
    +	Type *Account_Password_Type `json:"type,omitempty" xmlrpc:"type,omitempty"`
    +
    +	// An identifier relating to a username/password combinations's associated service.
    +	TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"`
    +
    +	// The username portion of a username/password combination.
    +	Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"`
    +}
    +
    +// Every username and password combination associated with a SoftLayer customer account belongs to a service that SoftLayer provides. The relationship between a username/password and it's service is provided by the SoftLayer_Account_Password_Type data type. Each username/password belongs to a single service type.
    +type Account_Password_Type struct {
    +	Entity
    +
    +	// A description of the use for the account username/password combination.
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_PersonalData_RemoveRequestReview struct {
    +	Entity
    +
    +	// no documentation yet
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// no documentation yet
    +	ApprovedFlag *Account_PersonalData_RemoveRequestReview `json:"approvedFlag,omitempty" xmlrpc:"approvedFlag,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_ProofOfConcept struct {
    +	Entity
    +}
    +
    +// This class represents a Proof of Concept account approver.
    +type Account_ProofOfConcept_Approver struct {
    +	Entity
    +
    +	// Approval slot of the approver.
    +	ApprovalOrder *int `json:"approvalOrder,omitempty" xmlrpc:"approvalOrder,omitempty"`
    +
    +	// Internal identifier.
    +	BluepagesUid *string `json:"bluepagesUid,omitempty" xmlrpc:"bluepagesUid,omitempty"`
    +
    +	// Email of the approver.
    +	Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"`
    +
    +	// First name of the approver.
    +	FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"`
    +
    +	// Internal identifier of a Proof of Concept account approver.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// Last name of the approver.
    +	LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"`
    +
    +	// SoftLayer_Account_ProofOfConcept_Approver_Region identifier of the approver.
    +	RegionKeyName *string `json:"regionKeyName,omitempty" xmlrpc:"regionKeyName,omitempty"`
    +
    +	// no documentation yet
    +	Role *Account_ProofOfConcept_Approver_Role `json:"role,omitempty" xmlrpc:"role,omitempty"`
    +
    +	// SoftLayer_Account_ProofOfConcept_Approver_Role identifier of the approver.
    +	RoleId *int `json:"roleId,omitempty" xmlrpc:"roleId,omitempty"`
    +
    +	// no documentation yet
    +	Type *Account_ProofOfConcept_Approver_Type `json:"type,omitempty" xmlrpc:"type,omitempty"`
    +
    +	// SoftLayer_Account_ProofOfConcept_Approver_Type identifier of the approver.
    +	TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"`
    +}
    +
    +// This class represents a Proof of Concept account approver type. The current roles are Primary and Backup approvers.
    +type Account_ProofOfConcept_Approver_Role struct {
    +	Entity
    +
    +	// Description of a Proof of Concept account approver role.
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// Internal identifier of a Proof of Concept account approver role.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// Key name of a Proof of Concept account approver role.
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// Name of a Proof of Concept account approver role.
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// This class represents a Proof of Concept account approver type.
    +type Account_ProofOfConcept_Approver_Type struct {
    +	Entity
    +
    +	// A count of
    +	ApproverCount *uint `json:"approverCount,omitempty" xmlrpc:"approverCount,omitempty"`
    +
    +	// no documentation yet
    +	Approvers []Account_ProofOfConcept_Approver `json:"approvers,omitempty" xmlrpc:"approvers,omitempty"`
    +
    +	// Description for a Proof of Concept account approver type.
    +	Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"`
    +
    +	// Internal identifier of a Proof of Concept account approver type.
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// Key name for a Proof of Concept account approver type.
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// Name of a Proof of Concept account approver type.
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +}
    +
    +// no documentation yet
    +type Account_ProofOfConcept_Funding_Type struct {
    +	Entity
    +
    +	// A count of
    +	ApproverCount *uint `json:"approverCount,omitempty" xmlrpc:"approverCount,omitempty"`
    +
    +	// A count of
    +	ApproverTypeCount *uint `json:"approverTypeCount,omitempty" xmlrpc:"approverTypeCount,omitempty"`
    +
    +	// no documentation yet
    +	ApproverTypes []Account_ProofOfConcept_Approver_Type `json:"approverTypes,omitempty" xmlrpc:"approverTypes,omitempty"`
    +
    +	// no documentation yet
    +	Approvers []Account_ProofOfConcept_Approver `json:"approvers,omitempty" xmlrpc:"approvers,omitempty"`
    +
    +	// no documentation yet
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +}
    +
    +//
    +//
    +//
    +//
    +//
    +type Account_Regional_Registry_Detail struct {
    +	Entity
    +
    +	// The account that this detail object belongs to.
    +	Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"`
    +
    +	// The detail object's associated [[SoftLayer_Account|account]] id
    +	AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"`
    +
    +	// The date and time the detail object was created
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// A count of references to the [[SoftLayer_Network_Subnet_Registration|registration objects]] that consume this detail object.
    +	DetailCount *uint `json:"detailCount,omitempty" xmlrpc:"detailCount,omitempty"`
    +
    +	// The associated type of this detail object.
    +	DetailType *Account_Regional_Registry_Detail_Type `json:"detailType,omitempty" xmlrpc:"detailType,omitempty"`
    +
    +	// The detail object's associated [[SoftLayer_Account_Regional_Registry_Detail_Type|type]] id
    +	DetailTypeId *int `json:"detailTypeId,omitempty" xmlrpc:"detailTypeId,omitempty"`
    +
    +	// References to the [[SoftLayer_Network_Subnet_Registration|registration objects]] that consume this detail object.
    +	Details []Network_Subnet_Registration_Details `json:"details,omitempty" xmlrpc:"details,omitempty"`
    +
    +	// Unique ID of the detail object
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// The date and time the detail object was last modified
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// The individual properties that define this detail object's values.
    +	Properties []Account_Regional_Registry_Detail_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"`
    +
    +	// A count of the individual properties that define this detail object's values.
    +	PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"`
    +
    +	// The associated RWhois handle of this detail object. Used only when detailed reassignments are necessary.
    +	RegionalInternetRegistryHandle *Account_Rwhois_Handle `json:"regionalInternetRegistryHandle,omitempty" xmlrpc:"regionalInternetRegistryHandle,omitempty"`
    +
    +	// The detail object's associated [[SoftLayer_Account_Rwhois_Handle|RIR handle]] id
    +	RegionalInternetRegistryHandleId *int `json:"regionalInternetRegistryHandleId,omitempty" xmlrpc:"regionalInternetRegistryHandleId,omitempty"`
    +}
    +
    +// Subnet registration properties are used to define various attributes of the [[SoftLayer_Account_Regional_Registry_Detail|detail objects]]. These properties are defined by the [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] objects, which describe the available value formats.
    +type Account_Regional_Registry_Detail_Property struct {
    +	Entity
    +
    +	// no documentation yet
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// The [[SoftLayer_Account_Regional_Registry_Detail]] object this property belongs to
    +	Detail *Account_Regional_Registry_Detail `json:"detail,omitempty" xmlrpc:"detail,omitempty"`
    +
    +	// Unique ID of the property object
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// no documentation yet
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// The [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] object this property belongs to
    +	PropertyType *Account_Regional_Registry_Detail_Property_Type `json:"propertyType,omitempty" xmlrpc:"propertyType,omitempty"`
    +
    +	// The numeric ID of the related [[SoftLayer_Account_Regional_Registry_Detail_Property_Type|property type object]]
    +	PropertyTypeId *int `json:"propertyTypeId,omitempty" xmlrpc:"propertyTypeId,omitempty"`
    +
    +	// The numeric ID of the related [[SoftLayer_Account_Regional_Registry_Detail|detail object]]
    +	RegistrationDetailId *int `json:"registrationDetailId,omitempty" xmlrpc:"registrationDetailId,omitempty"`
    +
    +	// When multiple properties exist for a property type, defines the position in the sequence of those properties
    +	SequencePosition *int `json:"sequencePosition,omitempty" xmlrpc:"sequencePosition,omitempty"`
    +
    +	// The value of the property
    +	Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"`
    +}
    +
    +// Subnet Registration Detail Property Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail_Property]] object. These types use [http://php.net/pcre.pattern.php Perl-Compatible Regular Expressions] to validate the value of a property object.
    +type Account_Regional_Registry_Detail_Property_Type struct {
    +	Entity
    +
    +	// no documentation yet
    +	CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"`
    +
    +	// Unique numeric ID of the property type object
    +	Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"`
    +
    +	// Code-friendly string name of the property type
    +	KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"`
    +
    +	// no documentation yet
    +	ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"`
    +
    +	// Human-readable name of the property type
    +	Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"`
    +
    +	// A Perl-compatible regular expression used to describe the valid format of the property
    +	ValueExpression *string `json:"valueExpression,omitempty" xmlrpc:"valueExpression,omitempty"`
    +}
    +
    +// Subnet Registration Detail Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail]] object.
    +//
    +// The standard values for these objects are as follows: 
    • NETWORK - The detail object represents the information for a [[SoftLayer_Network_Subnet|subnet]]
    • NETWORK6 - The detail object represents the information for an [[SoftLayer_Network_Subnet_Version6|IPv6 subnet]]
    • PERSON - The detail object represents the information for a customer with the RIR
    +type Account_Regional_Registry_Detail_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the detail type object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Code-friendly string name of the detail type + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Human-readable name of the detail type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Regional_Registry_Detail_Version4_Person_Default data type contains general information relating to a single SoftLayer RIR account. RIR account information in this type such as names, addresses, and phone numbers are assigned to the registry only and not to users belonging to the account. +type Account_Regional_Registry_Detail_Version4_Person_Default struct { + Account_Regional_Registry_Detail +} + +// no documentation yet +type Account_Reports_Request struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A request's corresponding external contact, if one exists. + AccountContact *Account_Contact `json:"accountContact,omitempty" xmlrpc:"accountContact,omitempty"` + + // no documentation yet + AccountContactId *int `json:"accountContactId,omitempty" xmlrpc:"accountContactId,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + ComplianceReportTypeId *string `json:"complianceReportTypeId,omitempty" xmlrpc:"complianceReportTypeId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + EmployeeRecordId *int `json:"employeeRecordId,omitempty" xmlrpc:"employeeRecordId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Nda *string `json:"nda,omitempty" xmlrpc:"nda,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + Report *string `json:"report,omitempty" xmlrpc:"report,omitempty"` + + // Type of the report customer is requesting for. + ReportType *Compliance_Report_Type `json:"reportType,omitempty" xmlrpc:"reportType,omitempty"` + + // no documentation yet + RequestKey *string `json:"requestKey,omitempty" xmlrpc:"requestKey,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // no documentation yet + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The customer user that initiated a report request. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UsrRecordId *int `json:"usrRecordId,omitempty" xmlrpc:"usrRecordId,omitempty"` +} + +// Provides a means of tracking handle identifiers at the various regional internet registries (RIRs). These objects are used by the [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]] objects to identify a customer or organization when a subnet is registered. +type Account_Rwhois_Handle struct { + Entity + + // The account that this handle belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The handle object's associated [[SoftLayer_Account|account]] id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The handle object's unique identifier as assigned by the RIR. + Handle *string `json:"handle,omitempty" xmlrpc:"handle,omitempty"` + + // Unique ID of the handle object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// The SoftLayer_Account_Shipment data type contains information relating to a shipment. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment struct { + Entity + + // The account to which the shipment belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account id of the shipment. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The courier handling the shipment. + Courier *Auxiliary_Shipping_Courier `json:"courier,omitempty" xmlrpc:"courier,omitempty"` + + // The courier id of the shipment. + CourierId *int `json:"courierId,omitempty" xmlrpc:"courierId,omitempty"` + + // The courier name of the shipment. + CourierName *string `json:"courierName,omitempty" xmlrpc:"courierName,omitempty"` + + // The employee who created the shipment. + CreateEmployee *User_Employee `json:"createEmployee,omitempty" xmlrpc:"createEmployee,omitempty"` + + // The customer user who created the shipment. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The create user id of the shipment. + CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"` + + // The address at which the shipment is received. + DestinationAddress *Account_Address `json:"destinationAddress,omitempty" xmlrpc:"destinationAddress,omitempty"` + + // The destination address id of the shipment. + DestinationAddressId *int `json:"destinationAddressId,omitempty" xmlrpc:"destinationAddressId,omitempty"` + + // The destination date of the shipment. + DestinationDate *Time `json:"destinationDate,omitempty" xmlrpc:"destinationDate,omitempty"` + + // The unique id of the shipment. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The employee who last modified the shipment. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the shipment. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The modify user id of the shipment. + ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"` + + // The shipment note (special handling instructions). + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The address from which the shipment is sent. + OriginationAddress *Account_Address `json:"originationAddress,omitempty" xmlrpc:"originationAddress,omitempty"` + + // The origination address id of the shipment. + OriginationAddressId *int `json:"originationAddressId,omitempty" xmlrpc:"originationAddressId,omitempty"` + + // The origination date of the shipment. + OriginationDate *Time `json:"originationDate,omitempty" xmlrpc:"originationDate,omitempty"` + + // A count of the items in the shipment. + ShipmentItemCount *uint `json:"shipmentItemCount,omitempty" xmlrpc:"shipmentItemCount,omitempty"` + + // The items in the shipment. + ShipmentItems []Account_Shipment_Item `json:"shipmentItems,omitempty" xmlrpc:"shipmentItems,omitempty"` + + // The status of the shipment. + Status *Account_Shipment_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status id of the shipment. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The tracking data for the shipment. + TrackingData []Account_Shipment_Tracking_Data `json:"trackingData,omitempty" xmlrpc:"trackingData,omitempty"` + + // A count of the tracking data for the shipment. + TrackingDataCount *uint `json:"trackingDataCount,omitempty" xmlrpc:"trackingDataCount,omitempty"` + + // The type of shipment (e.g. for Data Transfer Service or Colocation Service). + Type *Account_Shipment_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type id of the shipment. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// The SoftLayer_Account_Shipment_Item data type contains information relating to a shipment's item. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment_Item struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description of the shipping item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the shipping item. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The package id of the shipping item. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The shipment to which this item belongs. + Shipment *Account_Shipment `json:"shipment,omitempty" xmlrpc:"shipment,omitempty"` + + // The shipment id of the shipping item. + ShipmentId *int `json:"shipmentId,omitempty" xmlrpc:"shipmentId,omitempty"` + + // The item id of the shipping item. + ShipmentItemId *int `json:"shipmentItemId,omitempty" xmlrpc:"shipmentItemId,omitempty"` + + // The type of this shipment item. + ShipmentItemType *Account_Shipment_Item_Type `json:"shipmentItemType,omitempty" xmlrpc:"shipmentItemType,omitempty"` + + // The item type id of the shipping item. + ShipmentItemTypeId *int `json:"shipmentItemTypeId,omitempty" xmlrpc:"shipmentItemTypeId,omitempty"` +} + +// no documentation yet +type Account_Shipment_Item_Type struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_Shipment_Resource_Type struct { + Entity +} + +// no documentation yet +type Account_Shipment_Status struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Shipment_Tracking_Data data type contains information on a single piece of tracking information pertaining to a shipment. This tracking information tracking numbers by which the shipment may be tracked through the shipping courier. +type Account_Shipment_Tracking_Data struct { + Entity + + // The employee who created the tracking datum. + CreateEmployee *User_Employee `json:"createEmployee,omitempty" xmlrpc:"createEmployee,omitempty"` + + // The customer user who created the tracking datum. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The create user id of the tracking data. + CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"` + + // The unique id of the tracking data. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The employee who last modified the tracking datum. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the tracking datum. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The user id of the tracking data. + ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"` + + // The package id of the tracking data. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The sequence of the tracking data. + Sequence *int `json:"sequence,omitempty" xmlrpc:"sequence,omitempty"` + + // The shipment of the tracking datum. + Shipment *Account_Shipment `json:"shipment,omitempty" xmlrpc:"shipment,omitempty"` + + // The shipment id of the tracking data. + ShipmentId *int `json:"shipmentId,omitempty" xmlrpc:"shipmentId,omitempty"` + + // The tracking data (tracking number/reference number). + TrackingData *string `json:"trackingData,omitempty" xmlrpc:"trackingData,omitempty"` +} + +// no documentation yet +type Account_Shipment_Type struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_Status struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go b/vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go new file mode 100644 index 000000000000..84f03a52e978 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go @@ -0,0 +1,342 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Auxiliary_Marketing_Event struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + EnabledFlag *int `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // no documentation yet + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // no documentation yet + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Auxiliary_Network_Status struct { + Entity +} + +// A SoftLayer_Auxiliary_Notification_Emergency data object represents a notification event being broadcast to the SoftLayer customer base. It is used to provide information regarding outages or current known issues. +type Auxiliary_Notification_Emergency struct { + Entity + + // The date this event was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The device (if any) effected by this event. + Device *string `json:"device,omitempty" xmlrpc:"device,omitempty"` + + // The duration of this event. + Duration *string `json:"duration,omitempty" xmlrpc:"duration,omitempty"` + + // The device (if any) effected by this event. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The location effected by this event. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A message describing this event. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The last date this event was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The service(s) (if any) effected by this event. + ServicesAffected *string `json:"servicesAffected,omitempty" xmlrpc:"servicesAffected,omitempty"` + + // The signature of the SoftLayer employee department associated with this notification. + Signature *Auxiliary_Notification_Emergency_Signature `json:"signature,omitempty" xmlrpc:"signature,omitempty"` + + // The date this event will start. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The status of this notification. + Status *Auxiliary_Notification_Emergency_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Current status record for this event. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// Every SoftLayer_Auxiliary_Notification_Emergency has a signatureId that references a SoftLayer_Auxiliary_Notification_Emergency_Signature data type. The signature is the user or group responsible for the current event. +type Auxiliary_Notification_Emergency_Signature struct { + Entity + + // The name or signature for the current Emergency Notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Every SoftLayer_Auxiliary_Notification_Emergency has a statusId that references a SoftLayer_Auxiliary_Notification_Emergency_Status data type. The status is used to determine the current state of the event. +type Auxiliary_Notification_Emergency_Status struct { + Entity + + // A name describing the status of the current Emergency Notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release struct { + Entity + + // no documentation yet + About []Auxiliary_Press_Release_About_Press_Release `json:"about,omitempty" xmlrpc:"about,omitempty"` + + // A count of + AboutCount *uint `json:"aboutCount,omitempty" xmlrpc:"aboutCount,omitempty"` + + // A count of + ContactCount *uint `json:"contactCount,omitempty" xmlrpc:"contactCount,omitempty"` + + // no documentation yet + Contacts []Auxiliary_Press_Release_Contact_Press_Release `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // A press release's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + MediaPartnerCount *uint `json:"mediaPartnerCount,omitempty" xmlrpc:"mediaPartnerCount,omitempty"` + + // no documentation yet + MediaPartners []Auxiliary_Press_Release_Media_Partner_Press_Release `json:"mediaPartners,omitempty" xmlrpc:"mediaPartners,omitempty"` + + // no documentation yet + PressReleaseContent *Auxiliary_Press_Release_Content `json:"pressReleaseContent,omitempty" xmlrpc:"pressReleaseContent,omitempty"` + + // The data a press release was published. + PublishDate *Time `json:"publishDate,omitempty" xmlrpc:"publishDate,omitempty"` + + // A press release's location. + ReleaseLocation *string `json:"releaseLocation,omitempty" xmlrpc:"releaseLocation,omitempty"` + + // A press release's sub-title. + SubTitle *string `json:"subTitle,omitempty" xmlrpc:"subTitle,omitempty"` + + // A press release's title. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // Whether or not a press release is highlighted on the SoftLayer Website. + WebsiteHighlightFlag *bool `json:"websiteHighlightFlag,omitempty" xmlrpc:"websiteHighlightFlag,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_About struct { + Entity + + // A press release about's content. + Content *string `json:"content,omitempty" xmlrpc:"content,omitempty"` + + // A press release about's internal + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release about's title. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_About_Press_Release struct { + Entity + + // A count of + AboutParagraphCount *uint `json:"aboutParagraphCount,omitempty" xmlrpc:"aboutParagraphCount,omitempty"` + + // no documentation yet + AboutParagraphs []Auxiliary_Press_Release_About `json:"aboutParagraphs,omitempty" xmlrpc:"aboutParagraphs,omitempty"` + + // A press release about cross + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release about's internal + PressReleaseAboutId *int `json:"pressReleaseAboutId,omitempty" xmlrpc:"pressReleaseAboutId,omitempty"` + + // A count of + PressReleaseCount *uint `json:"pressReleaseCount,omitempty" xmlrpc:"pressReleaseCount,omitempty"` + + // A press release internal identifier. + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // no documentation yet + PressReleases []Auxiliary_Press_Release `json:"pressReleases,omitempty" xmlrpc:"pressReleases,omitempty"` + + // The number that associated an about + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Contact struct { + Entity + + // A press release contact's email + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // A press release contact's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // A press release contact's internal + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release contact's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // A press release contact's phone + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // A press release contact's + ProfessionalTitle *string `json:"professionalTitle,omitempty" xmlrpc:"professionalTitle,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Contact_Press_Release struct { + Entity + + // A count of + ContactCount *uint `json:"contactCount,omitempty" xmlrpc:"contactCount,omitempty"` + + // no documentation yet + Contacts []Auxiliary_Press_Release_Contact `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // A press release contact cross + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release contact's internal + PressReleaseContactId *int `json:"pressReleaseContactId,omitempty" xmlrpc:"pressReleaseContactId,omitempty"` + + // A count of + PressReleaseCount *uint `json:"pressReleaseCount,omitempty" xmlrpc:"pressReleaseCount,omitempty"` + + // A press release internal identifier. + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // no documentation yet + PressReleases []Auxiliary_Press_Release `json:"pressReleases,omitempty" xmlrpc:"pressReleases,omitempty"` + + // The number that associated a contact + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Content struct { + Entity + + // the id of a single press release + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // the press release id that the content + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // the content of a press release + Text *string `json:"text,omitempty" xmlrpc:"text,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner struct { + Entity + + // A press release media partner's + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release media partner's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner_Press_Release struct { + Entity + + // A press release media partner cross + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + MediaPartnerCount *uint `json:"mediaPartnerCount,omitempty" xmlrpc:"mediaPartnerCount,omitempty"` + + // A press release media partner's + MediaPartnerId *int `json:"mediaPartnerId,omitempty" xmlrpc:"mediaPartnerId,omitempty"` + + // no documentation yet + MediaPartners []Auxiliary_Press_Release_Media_Partner `json:"mediaPartners,omitempty" xmlrpc:"mediaPartners,omitempty"` + + // A count of + PressReleaseCount *uint `json:"pressReleaseCount,omitempty" xmlrpc:"pressReleaseCount,omitempty"` + + // A press release internal identifier. + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // no documentation yet + PressReleases []Auxiliary_Press_Release `json:"pressReleases,omitempty" xmlrpc:"pressReleases,omitempty"` +} + +// The SoftLayer_Auxiliary_Shipping_Courier data type contains general information relating the different (major) couriers that SoftLayer may use for shipping. +type Auxiliary_Shipping_Courier struct { + Entity + + // The unique id of the shipping courier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the shipping courier. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the shipping courier. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The url to shipping courier's website. + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Auxiliary_Shipping_Courier_Type struct { + Entity + + // no documentation yet + Courier []Auxiliary_Shipping_Courier `json:"courier,omitempty" xmlrpc:"courier,omitempty"` + + // A count of + CourierCount *uint `json:"courierCount,omitempty" xmlrpc:"courierCount,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/billing.go b/vendor/github.com/softlayer/softlayer-go/datatypes/billing.go new file mode 100644 index 000000000000..129bc569cb8b --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/billing.go @@ -0,0 +1,2674 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Billing_Currency struct { + Entity + + // The current exchange rate + CurrentExchangeRate *Billing_Currency_ExchangeRate `json:"currentExchangeRate,omitempty" xmlrpc:"currentExchangeRate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Billing_Currency_Country data type maps what currencies are valid for specific countries. US Dollars are valid from any country, but other currencies are only available to customers in certain countries. +type Billing_Currency_Country struct { + Entity + + // A unique identifier for the related country. + CountryId *int `json:"countryId,omitempty" xmlrpc:"countryId,omitempty"` + + // A unique identifier for the related currency. + CurrencyId *int `json:"currencyId,omitempty" xmlrpc:"currencyId,omitempty"` + + // A unique identifier for a map between a country and currency. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The country currency locale. + Locale *string `json:"locale,omitempty" xmlrpc:"locale,omitempty"` +} + +// no documentation yet +type Billing_Currency_ExchangeRate struct { + Entity + + // no documentation yet + EffectiveDate *Time `json:"effectiveDate,omitempty" xmlrpc:"effectiveDate,omitempty"` + + // no documentation yet + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // no documentation yet + FundingCurrency *Billing_Currency `json:"fundingCurrency,omitempty" xmlrpc:"fundingCurrency,omitempty"` + + // The id of the exchange rate record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LocalCurrency *Billing_Currency `json:"localCurrency,omitempty" xmlrpc:"localCurrency,omitempty"` + + // no documentation yet + Rate *Float64 `json:"rate,omitempty" xmlrpc:"rate,omitempty"` +} + +// Every SoftLayer customer account has billing specific information which is kept in the SoftLayer_Billing_Info data type. This information is used by the SoftLayer accounting group when sending invoices and making billing inquiries. +type Billing_Info struct { + Entity + + // The SoftLayer customer account associated with this billing information. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A SoftLayer account's identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AchInformation []Billing_Info_Ach `json:"achInformation,omitempty" xmlrpc:"achInformation,omitempty"` + + // A count of + AchInformationCount *uint `json:"achInformationCount,omitempty" xmlrpc:"achInformationCount,omitempty"` + + // The day of the month that a SoftLayer customer is billed. + AnniversaryDayOfMonth *int `json:"anniversaryDayOfMonth,omitempty" xmlrpc:"anniversaryDayOfMonth,omitempty"` + + // This value doesn't persist to this object. It's used as part of the account creation process only; + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // the expiration month of the credit card on file + CardExpirationMonth *int `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // the expiration year of the credit card on file + CardExpirationYear *int `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // no documentation yet + CardNickname *string `json:"cardNickname,omitempty" xmlrpc:"cardNickname,omitempty"` + + // the type of the credit card on file + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // This value doesn't persist to this object. It's used as part of the account creation process only. + CardVerificationNumber *string `json:"cardVerificationNumber,omitempty" xmlrpc:"cardVerificationNumber,omitempty"` + + // The date a customer's billing information was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Currency to be used by this customer account. + Currency *Billing_Currency `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // Information related to an account's current and previous billing cycles. + CurrentBillingCycle *Billing_Info_Cycle `json:"currentBillingCycle,omitempty" xmlrpc:"currentBillingCycle,omitempty"` + + // A SoftLayer customer's billing information identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date on which an account was last billed. + LastBillDate *Time `json:"lastBillDate,omitempty" xmlrpc:"lastBillDate,omitempty"` + + // The last four digits of the credit card currently on the account. This is the only portion of the card that we store. For Paypal customers, this value will be empty. + LastFourPaymentCardDigits *int `json:"lastFourPaymentCardDigits,omitempty" xmlrpc:"lastFourPaymentCardDigits,omitempty"` + + // The date of the last payment received by SoftLayer from the account holder. + LastPaymentDate *Time `json:"lastPaymentDate,omitempty" xmlrpc:"lastPaymentDate,omitempty"` + + // The date a customer's billing information was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The date on which an account will be billed next. + NextBillDate *Time `json:"nextBillDate,omitempty" xmlrpc:"nextBillDate,omitempty"` + + // The payment terms for an account. + PaymentTerms *int `json:"paymentTerms,omitempty" xmlrpc:"paymentTerms,omitempty"` + + // The percentage discount received on all one-time charges on a customer's monthly bill. + PercentDiscountOnetime *int `json:"percentDiscountOnetime,omitempty" xmlrpc:"percentDiscountOnetime,omitempty"` + + // The percentage discount received on all recurring charges on a customer's monthly bill. + PercentDiscountRecurring *int `json:"percentDiscountRecurring,omitempty" xmlrpc:"percentDiscountRecurring,omitempty"` + + // The total recurring fee amount for servers that are in the spare pool status. + SparePoolAmount *int `json:"sparePoolAmount,omitempty" xmlrpc:"sparePoolAmount,omitempty"` + + // no documentation yet + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// no documentation yet +type Billing_Info_Ach struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AccountNumber *string `json:"accountNumber,omitempty" xmlrpc:"accountNumber,omitempty"` + + // no documentation yet + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // no documentation yet + BankTransitNumber *string `json:"bankTransitNumber,omitempty" xmlrpc:"bankTransitNumber,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // no documentation yet + Postalcode *string `json:"postalcode,omitempty" xmlrpc:"postalcode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + Street1 *string `json:"street1,omitempty" xmlrpc:"street1,omitempty"` + + // no documentation yet + Street2 *string `json:"street2,omitempty" xmlrpc:"street2,omitempty"` + + // no documentation yet + VerifiedDate *Time `json:"verifiedDate,omitempty" xmlrpc:"verifiedDate,omitempty"` +} + +// The SoftLayer_Billing_Info_Cycle data type models basic information concerning a SoftLayer account's previous and current billing cycles. The information in this class is only populated for SoftLayer customers who are billed monthly. +type Billing_Info_Cycle struct { + Entity + + // The account that a current billing cycle is associated with. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ending date of an account's current billing cycle. + CurrentCycleEndDate *Time `json:"currentCycleEndDate,omitempty" xmlrpc:"currentCycleEndDate,omitempty"` + + // The starting date of an account's current billing cycle. + CurrentCycleStartDate *Time `json:"currentCycleStartDate,omitempty" xmlrpc:"currentCycleStartDate,omitempty"` + + // The start date of an account's next billing cycle. + NextCycleStartDate *Time `json:"nextCycleStartDate,omitempty" xmlrpc:"nextCycleStartDate,omitempty"` + + // The ending date of an account's previous billing cycle. + PreviousCycleEndDate *Time `json:"previousCycleEndDate,omitempty" xmlrpc:"previousCycleEndDate,omitempty"` + + // The starting date of an account's previous billing cycle. + PreviousCycleStartDate *Time `json:"previousCycleStartDate,omitempty" xmlrpc:"previousCycleStartDate,omitempty"` +} + +// The SoftLayer_Billing_Invoice data type contains general information relating to an individual invoice applied to a SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the invoice is generated. +type Billing_Invoice struct { + Entity + + // The account that an invoice belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The SoftLayer customer account that an invoice belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The first line of an address belonging to an account at the time an invoice is created. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line of an address belonging to an account at the time an invoice is created. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // This is the amount of this invoice. + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // no documentation yet + BrandAtInvoiceCreation *Brand `json:"brandAtInvoiceCreation,omitempty" xmlrpc:"brandAtInvoiceCreation,omitempty"` + + // The city portion of an address belonging to an account at the time an invoice is created. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Whether an account was exempt from taxes on their invoices at the time an invoice is created. + ClaimedTaxExemptTxFlag *bool `json:"claimedTaxExemptTxFlag,omitempty" xmlrpc:"claimedTaxExemptTxFlag,omitempty"` + + // The date an invoice was closed. Open invoices have a null closed date. + ClosedDate *Time `json:"closedDate,omitempty" xmlrpc:"closedDate,omitempty"` + + // The company name belonging to an account at the time an invoice is created. + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // A two-letter abbreviation of the country portion of an address belonging to an account at the time an invoice is created. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The date an invoice was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A flag that will reflect whether the detailed version of the pdf has been generated. + DetailedPdfGeneratedFlag *bool `json:"detailedPdfGeneratedFlag,omitempty" xmlrpc:"detailedPdfGeneratedFlag,omitempty"` + + // no documentation yet + DocumentsGeneratedFlag *bool `json:"documentsGeneratedFlag,omitempty" xmlrpc:"documentsGeneratedFlag,omitempty"` + + // The email address belonging to an account at the time an invoice is created. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // An SoftLayer account's balance at the time an invoice is closed. This value is measured in US Dollar ($USD) currency. + EndingBalance *Float64 `json:"endingBalance,omitempty" xmlrpc:"endingBalance,omitempty"` + + // The fax telephone number belonging to an account at the time an invoice is created. + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // The first name of the account holder at the time an invoice is created. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // An invoice's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a list of top-level invoice items that are on the currently pending invoice. + InvoiceTopLevelItemCount *uint `json:"invoiceTopLevelItemCount,omitempty" xmlrpc:"invoiceTopLevelItemCount,omitempty"` + + // A list of top-level invoice items that are on the currently pending invoice. + InvoiceTopLevelItems []Billing_Invoice_Item `json:"invoiceTopLevelItems,omitempty" xmlrpc:"invoiceTopLevelItems,omitempty"` + + // The total amount of this invoice. + InvoiceTotalAmount *Float64 `json:"invoiceTotalAmount,omitempty" xmlrpc:"invoiceTotalAmount,omitempty"` + + // The total one-time charges for this invoice. This is the sum of one-time charges + setup fees + labor fees. This does not include taxes. + InvoiceTotalOneTimeAmount *Float64 `json:"invoiceTotalOneTimeAmount,omitempty" xmlrpc:"invoiceTotalOneTimeAmount,omitempty"` + + // A sum of all the taxes related to one time charges for this invoice. + InvoiceTotalOneTimeTaxAmount *Float64 `json:"invoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"invoiceTotalOneTimeTaxAmount,omitempty"` + + // The total amount of this invoice. This does not include taxes. + InvoiceTotalPreTaxAmount *Float64 `json:"invoiceTotalPreTaxAmount,omitempty" xmlrpc:"invoiceTotalPreTaxAmount,omitempty"` + + // The total Recurring amount of this invoice. This amount does not include taxes or one time charges. + InvoiceTotalRecurringAmount *Float64 `json:"invoiceTotalRecurringAmount,omitempty" xmlrpc:"invoiceTotalRecurringAmount,omitempty"` + + // The total amount of the recurring taxes on this invoice. + InvoiceTotalRecurringTaxAmount *Float64 `json:"invoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"invoiceTotalRecurringTaxAmount,omitempty"` + + // A count of the items that belong to this invoice. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // The items that belong to this invoice. + Items []Billing_Invoice_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The last name of the account holder at the time an invoice is created. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Exchange rate used for billing this invoice. + LocalCurrencyExchangeRate *Billing_Currency_ExchangeRate `json:"localCurrencyExchangeRate,omitempty" xmlrpc:"localCurrencyExchangeRate,omitempty"` + + // The date an invoice was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The telephone number belonging to an account at the time an invoice is created. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // This is the total payment made on this invoice. + Payment *Float64 `json:"payment,omitempty" xmlrpc:"payment,omitempty"` + + // A count of the payments for the invoice. + PaymentCount *uint `json:"paymentCount,omitempty" xmlrpc:"paymentCount,omitempty"` + + // The payments for the invoice. + Payments []Billing_Invoice_Receivable_Payment `json:"payments,omitempty" xmlrpc:"payments,omitempty"` + + // The postal code portion of an address belonging to an account at the time an invoice is created. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + PurchaseOrderNumber *string `json:"purchaseOrderNumber,omitempty" xmlrpc:"purchaseOrderNumber,omitempty"` + + // This is the seller's tax registration. + SellerRegistration *string `json:"sellerRegistration,omitempty" xmlrpc:"sellerRegistration,omitempty"` + + // An SoftLayer account's balance at the time an invoice is created. This value is measured in US Dollar ($USD) currency. + StartingBalance *Float64 `json:"startingBalance,omitempty" xmlrpc:"startingBalance,omitempty"` + + // A two-letter abbreviation of the state portion of an address belonging to an account at the time an invoice is created. If the account that the invoice was generated for resides outside a province then this is set to "other". + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // An invoice's status. The "OPEN" status means SoftLayer has not yet received payment for this invoice. "CLOSED" status means that SoftLayer has received payment and closed the invoice. The "CLOSED_FAILED" status code means SoftLayer closed the invoice without receiving a payment. Invoices are usually set to CLOSED_FAILED status in cases where customer accounts are terminated for non-payment. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // This is the tax information that applies to tax auditing. This is the official tax record for this invoice. + TaxInfo *Billing_Invoice_Tax_Info `json:"taxInfo,omitempty" xmlrpc:"taxInfo,omitempty"` + + // This is the set of tax information for any tax calculation for this invoice. Note that not all of these are necessarily official, so use the taxInfo key to get the final information. + TaxInfoHistory []Billing_Invoice_Tax_Info `json:"taxInfoHistory,omitempty" xmlrpc:"taxInfoHistory,omitempty"` + + // A count of this is the set of tax information for any tax calculation for this invoice. Note that not all of these are necessarily official, so use the taxInfo key to get the final information. + TaxInfoHistoryCount *uint `json:"taxInfoHistoryCount,omitempty" xmlrpc:"taxInfoHistoryCount,omitempty"` + + // This is a message explaining the tax treatment for this invoice. + TaxMessage *string `json:"taxMessage,omitempty" xmlrpc:"taxMessage,omitempty"` + + // no documentation yet + TaxStatusId *int `json:"taxStatusId,omitempty" xmlrpc:"taxStatusId,omitempty"` + + // This is the strategy used to calculate tax on this invoice. + TaxType *Billing_Invoice_Tax_Type `json:"taxType,omitempty" xmlrpc:"taxType,omitempty"` + + // no documentation yet + TaxTypeId *int `json:"taxTypeId,omitempty" xmlrpc:"taxTypeId,omitempty"` + + // An invoice's type. SoftLayer invoices and service credits are differentiated by their type. The "NEW" type code signifies an invoice for new service. A SoftLayer customer's first invoice has the NEW type code. "RECURRING" invoices are generated on a SoftLayer customer's anniversary billing date for monthly services. "ONE-TIME-CHARGE" invoices are generated when one-time charges are applied to an account. "CREDIT" invoices are generated whenever SoftLayer applies a credit against an account's balance. There are two special types of service credits. "REFUND" type credits are applied against a customer's account balance along with the receivables on their account. "MANUAL_PAYMENT_CREDIT" invoice credits are generated whenever a customer makes an unscheduled payment. + TypeCode *string `json:"typeCode,omitempty" xmlrpc:"typeCode,omitempty"` +} + +// Each billing invoice item makes up a record within an invoice. This provides you with a detailed record of everything related to an invoice item. When you are billed, our system takes active billing items and creates an invoice. These invoice items are a copy of your active billing items, and make up the contents of your invoice. +type Billing_Invoice_Item struct { + Entity + + // An Invoice Item's associated child invoice items. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + AssociatedChildren []Billing_Invoice_Item `json:"associatedChildren,omitempty" xmlrpc:"associatedChildren,omitempty"` + + // A count of an Invoice Item's associated child invoice items. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + AssociatedChildrenCount *uint `json:"associatedChildrenCount,omitempty" xmlrpc:"associatedChildrenCount,omitempty"` + + // An Invoice Item's associated invoice item. If this is populated, it means this is an orphaned invoice item, but logically belongs to the associated invoice item. + AssociatedInvoiceItem *Billing_Invoice_Item `json:"associatedInvoiceItem,omitempty" xmlrpc:"associatedInvoiceItem,omitempty"` + + // The associated invoice Item ID. + AssociatedInvoiceItemId *int `json:"associatedInvoiceItemId,omitempty" xmlrpc:"associatedInvoiceItemId,omitempty"` + + // An Invoice Item's billing item, from which this item was generated. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The billing item from which this invoice item was generated. + BillingItemId *int `json:"billingItemId,omitempty" xmlrpc:"billingItemId,omitempty"` + + // This invoice item's "item category". + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The item category of the invoice item being invoiced. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // An Invoice Item's child invoice items. Only parent invoice items have children. For instance, a server invoice item will have children. + Children []Billing_Invoice_Item `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of an Invoice Item's child invoice items. Only parent invoice items have children. For instance, a server invoice item will have children. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // The date the invoice item was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The item description for this invoice item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The domain name of the invoiced item. This is only used on invoice items whose category is "server". + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // An Invoice Item's associated child invoice items, excluding some items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + FilteredAssociatedChildren []Billing_Invoice_Item `json:"filteredAssociatedChildren,omitempty" xmlrpc:"filteredAssociatedChildren,omitempty"` + + // A count of an Invoice Item's associated child invoice items, excluding some items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + FilteredAssociatedChildrenCount *uint `json:"filteredAssociatedChildrenCount,omitempty" xmlrpc:"filteredAssociatedChildrenCount,omitempty"` + + // The Host name of the invoiced item. This is only used on invoice items whose category is "server". + HostName *string `json:"hostName,omitempty" xmlrpc:"hostName,omitempty"` + + // Indicating whether this invoice item is billed on an hourly basis. + HourlyFlag *bool `json:"hourlyFlag,omitempty" xmlrpc:"hourlyFlag,omitempty"` + + // The hourly recurring fee of the invoice item represented by a floating point decimal in US Dollars ($USD) + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // The ID of the invoice item. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The invoice to which this item belongs. + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // The invoice to which this invoice item belongs. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // An invoice item's labor fee total after taxes. This does not include any child invoice items. + LaborAfterTaxAmount *Float64 `json:"laborAfterTaxAmount,omitempty" xmlrpc:"laborAfterTaxAmount,omitempty"` + + // This also a one-time fee of a special type. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The tax rate at which the labor fee is taxed. + LaborFeeTaxRate *Float64 `json:"laborFeeTaxRate,omitempty" xmlrpc:"laborFeeTaxRate,omitempty"` + + // An invoice item's labor tax amount. This does not include any child invoice items. + LaborTaxAmount *Float64 `json:"laborTaxAmount,omitempty" xmlrpc:"laborTaxAmount,omitempty"` + + // An invoice item's location, if one exists.' + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // An Invoice Item's associated child invoice items, excluding ALL items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + NonZeroAssociatedChildren []Billing_Invoice_Item `json:"nonZeroAssociatedChildren,omitempty" xmlrpc:"nonZeroAssociatedChildren,omitempty"` + + // A count of an Invoice Item's associated child invoice items, excluding ALL items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + NonZeroAssociatedChildrenCount *uint `json:"nonZeroAssociatedChildrenCount,omitempty" xmlrpc:"nonZeroAssociatedChildrenCount,omitempty"` + + // A note to help describe more about the item. This normally holds usernames, or some other bit of extra information. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // An invoice item's one-time fee total after taxes. This does not include any child invoice items. + OneTimeAfterTaxAmount *Float64 `json:"oneTimeAfterTaxAmount,omitempty" xmlrpc:"oneTimeAfterTaxAmount,omitempty"` + + // If there are any one-time charges assessed, it will show up here represented by a floating point decimal in US Dollars ($USD) + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // The rate at which the one-time fee is taxed. + OneTimeFeeTaxRate *Float64 `json:"oneTimeFeeTaxRate,omitempty" xmlrpc:"oneTimeFeeTaxRate,omitempty"` + + // An invoice item's one-time tax amount. This does not include any child invoice items. + OneTimeTaxAmount *Float64 `json:"oneTimeTaxAmount,omitempty" xmlrpc:"oneTimeTaxAmount,omitempty"` + + // Every item tied to a server should have a parent invoice item which is the server line item. This is how we associate items to a server. + Parent *Billing_Invoice_Item `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // The parent invoice item, usually the server invoice item. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // The entry in the product catalog that a invoice item is based upon. + Product *Product_Item `json:"product,omitempty" xmlrpc:"product,omitempty"` + + // The entry in the product catalog that a invoice item is based upon. + ProductItemId *int `json:"productItemId,omitempty" xmlrpc:"productItemId,omitempty"` + + // An invoice item's recurring fee total after taxes. This does not include any child invoice items. + RecurringAfterTaxAmount *Float64 `json:"recurringAfterTaxAmount,omitempty" xmlrpc:"recurringAfterTaxAmount,omitempty"` + + // The recurring fee of the invoice item represented by a floating point decimal in US Dollars ($USD) + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // the rate at which the recurring fee is taxed. + RecurringFeeTaxRate *Float64 `json:"recurringFeeTaxRate,omitempty" xmlrpc:"recurringFeeTaxRate,omitempty"` + + // An invoice item's recurring tax amount. This does not include any child invoice items. + RecurringTaxAmount *Float64 `json:"recurringTaxAmount,omitempty" xmlrpc:"recurringTaxAmount,omitempty"` + + // A unique identifier for a SoftLayer Service that is associated to an invoice item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // The service provider for the invoice item. + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // An invoice item's setup fee total after taxes. This does not include any child invoice items. + SetupAfterTaxAmount *Float64 `json:"setupAfterTaxAmount,omitempty" xmlrpc:"setupAfterTaxAmount,omitempty"` + + // If there were any setup fees they will show up here. These are normally a one-time fee. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // The tax rate at which the setup fee is taxed. + SetupFeeTaxRate *Float64 `json:"setupFeeTaxRate,omitempty" xmlrpc:"setupFeeTaxRate,omitempty"` + + // An invoice item's setup tax amount. This does not include any child invoice items. + SetupTaxAmount *Float64 `json:"setupTaxAmount,omitempty" xmlrpc:"setupTaxAmount,omitempty"` + + // A string representing the name of parent level product group of an invoice item. + TopLevelProductGroupName *string `json:"topLevelProductGroupName,omitempty" xmlrpc:"topLevelProductGroupName,omitempty"` + + // An invoice Item's total, including any child invoice items if they exist. + TotalOneTimeAmount *Float64 `json:"totalOneTimeAmount,omitempty" xmlrpc:"totalOneTimeAmount,omitempty"` + + // An invoice Item's total, including any child invoice items if they exist. + TotalOneTimeTaxAmount *Float64 `json:"totalOneTimeTaxAmount,omitempty" xmlrpc:"totalOneTimeTaxAmount,omitempty"` + + // An invoice Item's total, including any child invoice items if they exist. + TotalRecurringAmount *Float64 `json:"totalRecurringAmount,omitempty" xmlrpc:"totalRecurringAmount,omitempty"` + + // A Billing Item's total, including any child billing items if they exist.' + TotalRecurringTaxAmount *Float64 `json:"totalRecurringTaxAmount,omitempty" xmlrpc:"totalRecurringTaxAmount,omitempty"` + + // Indicating whether this invoice item is for the usage charge. + UsageChargeFlag *bool `json:"usageChargeFlag,omitempty" xmlrpc:"usageChargeFlag,omitempty"` +} + +// The SoftLayer_Billing_Invoice_Item_Hardware data type contains a "resource". This resource is a link to the hardware tied to a SoftLayer_Billing_item whose category code is "server". +type Billing_Invoice_Item_Hardware struct { + Billing_Invoice_Item + + // The resource for a server invoice item. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// Information about the tax rates that apply to a particular invoice item. +type Billing_Invoice_Item_Tax_Info struct { + Entity + + // The date and time the tax information was recorded. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The invoice description with special information about the invoice. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The tax rate that can be multiplied by the subtotal to get the + EffectiveTaxRate *Float64 `json:"effectiveTaxRate,omitempty" xmlrpc:"effectiveTaxRate,omitempty"` + + // The amount that is exempt from tax. + ExemptAmount *Float64 `json:"exemptAmount,omitempty" xmlrpc:"exemptAmount,omitempty"` + + // The type of fee being tracked for this particular set of tax information. + FeeProperty *string `json:"feeProperty,omitempty" xmlrpc:"feeProperty,omitempty"` + + // An invoice item's tax information internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + InvoiceItem *Billing_Invoice_Item `json:"invoiceItem,omitempty" xmlrpc:"invoiceItem,omitempty"` + + // A reference to the related invoice item. + InvoiceItemId *int `json:"invoiceItemId,omitempty" xmlrpc:"invoiceItemId,omitempty"` + + // no documentation yet + InvoiceTaxInfo *Billing_Invoice_Tax_Info `json:"invoiceTaxInfo,omitempty" xmlrpc:"invoiceTaxInfo,omitempty"` + + // A reference to the tax information for the parent invoice. + InvoiceTaxInfoId *int `json:"invoiceTaxInfoId,omitempty" xmlrpc:"invoiceTaxInfoId,omitempty"` + + // The date and time the tax information was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The amount that is exempt from tax. + NonTaxableBasis *Float64 `json:"nonTaxableBasis,omitempty" xmlrpc:"nonTaxableBasis,omitempty"` + + // A flag to indicate whether this is the official record for this invoice item. + ReportedFlag *bool `json:"reportedFlag,omitempty" xmlrpc:"reportedFlag,omitempty"` + + // The registration that the seller will use to report the invoice. + SellerRegistration *string `json:"sellerRegistration,omitempty" xmlrpc:"sellerRegistration,omitempty"` + + // The tax amount associated with this line item. + TaxAmount *Float64 `json:"taxAmount,omitempty" xmlrpc:"taxAmount,omitempty"` + + // The tax amount (converted to the 'to' currency) associated with this line item. + TaxAmountToCurrency *Float64 `json:"taxAmountToCurrency,omitempty" xmlrpc:"taxAmountToCurrency,omitempty"` + + // The tax rate used. Note that this might apply to only part of the + TaxRate *Float64 `json:"taxRate,omitempty" xmlrpc:"taxRate,omitempty"` + + // The amount that is subject to tax. + TaxableBasis *Float64 `json:"taxableBasis,omitempty" xmlrpc:"taxableBasis,omitempty"` + + // This is the currency the invoice will be converted to. + ToCurrency *Billing_Currency `json:"toCurrency,omitempty" xmlrpc:"toCurrency,omitempty"` + + // The currency code that the invoice is being converted to. + ToCurrencyId *int `json:"toCurrencyId,omitempty" xmlrpc:"toCurrencyId,omitempty"` +} + +// no documentation yet +type Billing_Invoice_Next struct { + Entity +} + +// The SoftLayer_Billing_Invoice_Receivable_Payment data type contains general information relating to payments made against invoices. +type Billing_Invoice_Receivable_Payment struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The amount of the payment. + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The date of the payment. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + CreditCardLastFourDigits *int `json:"creditCardLastFourDigits,omitempty" xmlrpc:"creditCardLastFourDigits,omitempty"` + + // no documentation yet + CreditCardRequestId *string `json:"creditCardRequestId,omitempty" xmlrpc:"creditCardRequestId,omitempty"` + + // no documentation yet + CreditCardTransaction *Billing_Payment_Card_Transaction `json:"creditCardTransaction,omitempty" xmlrpc:"creditCardTransaction,omitempty"` + + // no documentation yet + ExchangeRate *Billing_Currency_ExchangeRate `json:"exchangeRate,omitempty" xmlrpc:"exchangeRate,omitempty"` + + // no documentation yet + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // The invoice that the payment is for. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // no documentation yet + PaypalTransaction *Billing_Payment_PayPal_Transaction `json:"paypalTransaction,omitempty" xmlrpc:"paypalTransaction,omitempty"` + + // The type of payment. + TypeCode *string `json:"typeCode,omitempty" xmlrpc:"typeCode,omitempty"` +} + +// Invoice tax information contains top-level information about the taxes recorded for a particular invoice. +type Billing_Invoice_Tax_Info struct { + Entity + + // The date and time this tax information was recorded. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // This is the currency used for the invoice. + Currency *Billing_Currency `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // The currency code that the invoice should be recorded in. + CurrencyId *int `json:"currencyId,omitempty" xmlrpc:"currencyId,omitempty"` + + // This is the functional currency used for the invoice. + FunctionalCurrency *Billing_Currency `json:"functionalCurrency,omitempty" xmlrpc:"functionalCurrency,omitempty"` + + // The internal identifier for this invoice tax information. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is the related invoice for this tax-related information. + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // A reference to the related invoice. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // A count of this is the collection of tax information for each of the related invoice items. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // This tax information on the invoice item that includes currency details. + ItemWithCurrencyInfo *Billing_Invoice_Item_Tax_Info `json:"itemWithCurrencyInfo,omitempty" xmlrpc:"itemWithCurrencyInfo,omitempty"` + + // This is the collection of tax information for each of the related invoice items. + Items []Billing_Invoice_Item_Tax_Info `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The date and time this tax information was updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A flag to indicate whether the invoice will be auditable. + ReportedFlag *bool `json:"reportedFlag,omitempty" xmlrpc:"reportedFlag,omitempty"` + + // This the total tax amount (converted to the 'to' currency) for the invoice. + TotalTaxAmountToCurrency *Float64 `json:"totalTaxAmountToCurrency,omitempty" xmlrpc:"totalTaxAmountToCurrency,omitempty"` +} + +// The invoice tax status data type models a single status or state that an invoice can reflect in regard to an integration with a third-party tax calculation service. +type Billing_Invoice_Tax_Status struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The invoice tax type data type models a single strategy for handling tax calculations. +type Billing_Invoice_Tax_Type struct { + Entity + + // A tax type's internal identifier. Each type of tax calculation strategy has a unique ID value. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A unique string that identifies each strategy and is guaranteed to be stable over time. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A human-readable label for each tax strategy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Item struct { + Entity + + // The account that a billing item belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + ActiveAgreement *Account_Agreement `json:"activeAgreement,omitempty" xmlrpc:"activeAgreement,omitempty"` + + // A flag indicating that the billing item is under an active agreement. + ActiveAgreementFlag *Account_Agreement `json:"activeAgreementFlag,omitempty" xmlrpc:"activeAgreementFlag,omitempty"` + + // A billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. + ActiveAssociatedChildren []Billing_Item `json:"activeAssociatedChildren,omitempty" xmlrpc:"activeAssociatedChildren,omitempty"` + + // A count of a billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. + ActiveAssociatedChildrenCount *uint `json:"activeAssociatedChildrenCount,omitempty" xmlrpc:"activeAssociatedChildrenCount,omitempty"` + + // A count of + ActiveAssociatedGuestDiskBillingItemCount *uint `json:"activeAssociatedGuestDiskBillingItemCount,omitempty" xmlrpc:"activeAssociatedGuestDiskBillingItemCount,omitempty"` + + // no documentation yet + ActiveAssociatedGuestDiskBillingItems []Billing_Item `json:"activeAssociatedGuestDiskBillingItems,omitempty" xmlrpc:"activeAssociatedGuestDiskBillingItems,omitempty"` + + // A count of a Billing Item's active bundled billing items. + ActiveBundledItemCount *uint `json:"activeBundledItemCount,omitempty" xmlrpc:"activeBundledItemCount,omitempty"` + + // A Billing Item's active bundled billing items. + ActiveBundledItems []Billing_Item `json:"activeBundledItems,omitempty" xmlrpc:"activeBundledItems,omitempty"` + + // A service cancellation request item that corresponds to the billing item. + ActiveCancellationItem *Billing_Item_Cancellation_Request_Item `json:"activeCancellationItem,omitempty" xmlrpc:"activeCancellationItem,omitempty"` + + // A Billing Item's active child billing items. + ActiveChildren []Billing_Item `json:"activeChildren,omitempty" xmlrpc:"activeChildren,omitempty"` + + // A count of a Billing Item's active child billing items. + ActiveChildrenCount *uint `json:"activeChildrenCount,omitempty" xmlrpc:"activeChildrenCount,omitempty"` + + // no documentation yet + ActiveFlag *bool `json:"activeFlag,omitempty" xmlrpc:"activeFlag,omitempty"` + + // A count of + ActiveSparePoolAssociatedGuestDiskBillingItemCount *uint `json:"activeSparePoolAssociatedGuestDiskBillingItemCount,omitempty" xmlrpc:"activeSparePoolAssociatedGuestDiskBillingItemCount,omitempty"` + + // no documentation yet + ActiveSparePoolAssociatedGuestDiskBillingItems []Billing_Item `json:"activeSparePoolAssociatedGuestDiskBillingItems,omitempty" xmlrpc:"activeSparePoolAssociatedGuestDiskBillingItems,omitempty"` + + // A count of a Billing Item's spare pool bundled billing items. + ActiveSparePoolBundledItemCount *uint `json:"activeSparePoolBundledItemCount,omitempty" xmlrpc:"activeSparePoolBundledItemCount,omitempty"` + + // A Billing Item's spare pool bundled billing items. + ActiveSparePoolBundledItems []Billing_Item `json:"activeSparePoolBundledItems,omitempty" xmlrpc:"activeSparePoolBundledItems,omitempty"` + + // Flag to check if a billing item can be cancelled. 1 = yes. 0 = no. + AllowCancellationFlag *int `json:"allowCancellationFlag,omitempty" xmlrpc:"allowCancellationFlag,omitempty"` + + // A billing item's associated parent. This is to be used for billing items that are "floating", and therefore are not child items of any parent billing item. If it is desired to associate an item to another, populate this with the SoftLayer_Billing_Item ID of that associated parent item. + AssociatedBillingItem *Billing_Item `json:"associatedBillingItem,omitempty" xmlrpc:"associatedBillingItem,omitempty"` + + // A history of billing items which a billing item has been associated with. + AssociatedBillingItemHistory []Billing_Item_Association_History `json:"associatedBillingItemHistory,omitempty" xmlrpc:"associatedBillingItemHistory,omitempty"` + + // A count of a history of billing items which a billing item has been associated with. + AssociatedBillingItemHistoryCount *uint `json:"associatedBillingItemHistoryCount,omitempty" xmlrpc:"associatedBillingItemHistoryCount,omitempty"` + + // This is sometimes populated for orphan billing items that are not attached to servers. Billing items like secondary portable IP addresses fit into this category. A user may set an association by calling [[SoftLayer_Billing_Item::setAssociationId]]. This will cause this orphan item to appear under its associated server billing item on future invoices. You may only attach orphaned billing items to server billing items without cancellation dates set. + AssociatedBillingItemId *string `json:"associatedBillingItemId,omitempty" xmlrpc:"associatedBillingItemId,omitempty"` + + // A Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. + AssociatedChildren []Billing_Item `json:"associatedChildren,omitempty" xmlrpc:"associatedChildren,omitempty"` + + // A count of a Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. + AssociatedChildrenCount *uint `json:"associatedChildrenCount,omitempty" xmlrpc:"associatedChildrenCount,omitempty"` + + // A billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. + AssociatedParent []Billing_Item `json:"associatedParent,omitempty" xmlrpc:"associatedParent,omitempty"` + + // A count of a billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. + AssociatedParentCount *uint `json:"associatedParentCount,omitempty" xmlrpc:"associatedParentCount,omitempty"` + + // A count of + AvailableMatchingVlanCount *uint `json:"availableMatchingVlanCount,omitempty" xmlrpc:"availableMatchingVlanCount,omitempty"` + + // no documentation yet + AvailableMatchingVlans []Network_Vlan `json:"availableMatchingVlans,omitempty" xmlrpc:"availableMatchingVlans,omitempty"` + + // The bandwidth allocation for a billing item. + BandwidthAllocation *Network_Bandwidth_Version1_Allocation `json:"bandwidthAllocation,omitempty" xmlrpc:"bandwidthAllocation,omitempty"` + + // A billing item's recurring child items that have once been billed and are scheduled to be billed in the future. + BillableChildren []Billing_Item `json:"billableChildren,omitempty" xmlrpc:"billableChildren,omitempty"` + + // A count of a billing item's recurring child items that have once been billed and are scheduled to be billed in the future. + BillableChildrenCount *uint `json:"billableChildrenCount,omitempty" xmlrpc:"billableChildrenCount,omitempty"` + + // A count of a Billing Item's bundled billing items + BundleItemCount *uint `json:"bundleItemCount,omitempty" xmlrpc:"bundleItemCount,omitempty"` + + // A Billing Item's bundled billing items + BundleItems []Product_Item_Bundles `json:"bundleItems,omitempty" xmlrpc:"bundleItems,omitempty"` + + // A count of a Billing Item's bundled billing items' + BundledItemCount *uint `json:"bundledItemCount,omitempty" xmlrpc:"bundledItemCount,omitempty"` + + // A Billing Item's bundled billing items' + BundledItems []Billing_Item `json:"bundledItems,omitempty" xmlrpc:"bundledItems,omitempty"` + + // A Billing Item's active child billing items. + CanceledChildren []Billing_Item `json:"canceledChildren,omitempty" xmlrpc:"canceledChildren,omitempty"` + + // A count of a Billing Item's active child billing items. + CanceledChildrenCount *uint `json:"canceledChildrenCount,omitempty" xmlrpc:"canceledChildrenCount,omitempty"` + + // A billing item's cancellation date. A billing item with a cancellation date in the past is not charged on your SoftLayer invoice. Cancellation dates in the future indicate the current billing item is active, but will be cancelled and not charged for in the future. A billing item with a null cancellation date is also considered an active billing item and is charged once every billing cycle. + CancellationDate *Time `json:"cancellationDate,omitempty" xmlrpc:"cancellationDate,omitempty"` + + // The billing item's cancellation reason. + CancellationReason *Billing_Item_Cancellation_Reason `json:"cancellationReason,omitempty" xmlrpc:"cancellationReason,omitempty"` + + // A count of this will return any cancellation requests that are associated with this billing item. + CancellationRequestCount *uint `json:"cancellationRequestCount,omitempty" xmlrpc:"cancellationRequestCount,omitempty"` + + // This will return any cancellation requests that are associated with this billing item. + CancellationRequests []Billing_Item_Cancellation_Request `json:"cancellationRequests,omitempty" xmlrpc:"cancellationRequests,omitempty"` + + // The item category to which the billing item's item belongs. + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The category code of this billing item. It is used to tell us the difference between a primary disk and a secondary disk, for instance. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // A Billing Item's child billing items' + Children []Billing_Item `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of a Billing Item's child billing items' + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // A Billing Item's active child billing items. + ChildrenWithActiveAgreement []Billing_Item `json:"childrenWithActiveAgreement,omitempty" xmlrpc:"childrenWithActiveAgreement,omitempty"` + + // A count of a Billing Item's active child billing items. + ChildrenWithActiveAgreementCount *uint `json:"childrenWithActiveAgreementCount,omitempty" xmlrpc:"childrenWithActiveAgreementCount,omitempty"` + + // The date the billing item was created. You can see this date on the invoice. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // This is the total charge for the billing item for this billing item. It is calculated based on the hourlyRecurringFee * hoursUsed. + CurrentHourlyCharge *string `json:"currentHourlyCharge,omitempty" xmlrpc:"currentHourlyCharge,omitempty"` + + // The last time this billing item was charged. + CycleStartDate *Time `json:"cycleStartDate,omitempty" xmlrpc:"cycleStartDate,omitempty"` + + // A brief description of a billing item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The domain name is provided for server billing items. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // A count of for product items which have a downgrade path defined, this will return those product items. + DowngradeItemCount *uint `json:"downgradeItemCount,omitempty" xmlrpc:"downgradeItemCount,omitempty"` + + // For product items which have a downgrade path defined, this will return those product items. + DowngradeItems []Product_Item `json:"downgradeItems,omitempty" xmlrpc:"downgradeItems,omitempty"` + + // A Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. + FilteredNextInvoiceChildren []Billing_Item `json:"filteredNextInvoiceChildren,omitempty" xmlrpc:"filteredNextInvoiceChildren,omitempty"` + + // A count of a Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. + FilteredNextInvoiceChildrenCount *uint `json:"filteredNextInvoiceChildrenCount,omitempty" xmlrpc:"filteredNextInvoiceChildrenCount,omitempty"` + + // The hostname is provided for server billing items + HostName *string `json:"hostName,omitempty" xmlrpc:"hostName,omitempty"` + + // A flag that will reflect whether this billing item is billed on an hourly basis or not. + HourlyFlag *bool `json:"hourlyFlag,omitempty" xmlrpc:"hourlyFlag,omitempty"` + + // The amount of money charged per hour for a billing item, if applicable. hourlyRecurringFee is measured in US Dollars ($USD). + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // This is the number of hours the hourly billing item has been in use this billing period. For virtual servers, this means running, paused or stopped. + HoursUsed *string `json:"hoursUsed,omitempty" xmlrpc:"hoursUsed,omitempty"` + + // The unique identifier for this billing item. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Invoice items associated with this billing item + InvoiceItem *Billing_Invoice_Item `json:"invoiceItem,omitempty" xmlrpc:"invoiceItem,omitempty"` + + // A count of all invoice items associated with the billing item + InvoiceItemCount *uint `json:"invoiceItemCount,omitempty" xmlrpc:"invoiceItemCount,omitempty"` + + // All invoice items associated with the billing item + InvoiceItems []Billing_Invoice_Item `json:"invoiceItems,omitempty" xmlrpc:"invoiceItems,omitempty"` + + // The entry in the SoftLayer product catalog that a billing item is based upon. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The labor fee, if any. This is a one time charge. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The rate at which labor fees are taxed if you are a taxable customer. + LaborFeeTaxRate *Float64 `json:"laborFeeTaxRate,omitempty" xmlrpc:"laborFeeTaxRate,omitempty"` + + // The last time this billing item was charged. + LastBillDate *Time `json:"lastBillDate,omitempty" xmlrpc:"lastBillDate,omitempty"` + + // The location of the billing item. Some billing items have physical properties such as the server itself. For items such as these, we provide location information. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The date that a billing item was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The date on which your account will be charged for this billing item. + NextBillDate *Time `json:"nextBillDate,omitempty" xmlrpc:"nextBillDate,omitempty"` + + // A Billing Item's child billing items and associated items' + NextInvoiceChildren []Billing_Item `json:"nextInvoiceChildren,omitempty" xmlrpc:"nextInvoiceChildren,omitempty"` + + // A count of a Billing Item's child billing items and associated items' + NextInvoiceChildrenCount *uint `json:"nextInvoiceChildrenCount,omitempty" xmlrpc:"nextInvoiceChildrenCount,omitempty"` + + // A Billing Item's total, including any child billing items if they exist.' + NextInvoiceTotalOneTimeAmount *Float64 `json:"nextInvoiceTotalOneTimeAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeAmount,omitempty"` + + // A Billing Item's total, including any child billing items if they exist.' + NextInvoiceTotalOneTimeTaxAmount *Float64 `json:"nextInvoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeTaxAmount,omitempty"` + + // A Billing Item's total, including any child billing items and associated billing items if they exist.' + NextInvoiceTotalRecurringAmount *Float64 `json:"nextInvoiceTotalRecurringAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringAmount,omitempty"` + + // This is deprecated and will always be zero. Because tax is calculated in real-time, previewing the next recurring invoice is pre-tax only. + NextInvoiceTotalRecurringTaxAmount *Float64 `json:"nextInvoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringTaxAmount,omitempty"` + + // A Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. + NonZeroNextInvoiceChildren []Billing_Item `json:"nonZeroNextInvoiceChildren,omitempty" xmlrpc:"nonZeroNextInvoiceChildren,omitempty"` + + // A count of a Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. + NonZeroNextInvoiceChildrenCount *uint `json:"nonZeroNextInvoiceChildrenCount,omitempty" xmlrpc:"nonZeroNextInvoiceChildrenCount,omitempty"` + + // Extra information provided to help you identify this billing item. This is often a username or something to help identify items that customers have more than one of. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The amount of money charged as a one-time charge for a billing item, if applicable. oneTimeFee is measured in US Dollars ($USD). + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // The rate at which one time fees are taxed if you are a taxable customer. + OneTimeFeeTaxRate *Float64 `json:"oneTimeFeeTaxRate,omitempty" xmlrpc:"oneTimeFeeTaxRate,omitempty"` + + // A billing item's original order item. Simply a reference to the original order from which this billing item was created. + OrderItem *Billing_Order_Item `json:"orderItem,omitempty" xmlrpc:"orderItem,omitempty"` + + // the SoftLayer_Billing_Order_Item ID. This is a reference to the original order item from which this billing item was originally created. + OrderItemId *int `json:"orderItemId,omitempty" xmlrpc:"orderItemId,omitempty"` + + // The original physical location for this billing item--may differ from current. + OriginalLocation *Location `json:"originalLocation,omitempty" xmlrpc:"originalLocation,omitempty"` + + // The package under which this billing item was sold. A Package is the general grouping of products as seen on our order forms. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // A billing item's parent item. If a billing item has no parent item then this value is null. + Parent *Billing_Item `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // The unique identifier of the parent of this billing item. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // A billing item's parent item. If a billing item has no parent item then this value is null. + ParentVirtualGuestBillingItem *Billing_Item_Virtual_Guest `json:"parentVirtualGuestBillingItem,omitempty" xmlrpc:"parentVirtualGuestBillingItem,omitempty"` + + // This flag indicates whether a billing item is scheduled to be canceled or not. + PendingCancellationFlag *bool `json:"pendingCancellationFlag,omitempty" xmlrpc:"pendingCancellationFlag,omitempty"` + + // The new order item that will replace this billing item. + PendingOrderItem *Billing_Order_Item `json:"pendingOrderItem,omitempty" xmlrpc:"pendingOrderItem,omitempty"` + + // Provisioning transaction for this billing item + ProvisionTransaction *Provisioning_Version1_Transaction `json:"provisionTransaction,omitempty" xmlrpc:"provisionTransaction,omitempty"` + + // The amount of money charged per month for a billing item, if applicable. recurringFee is measured in US Dollars ($USD). + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // The rate at which recurring fees are taxed if you are a taxable customer. + RecurringFeeTaxRate *Float64 `json:"recurringFeeTaxRate,omitempty" xmlrpc:"recurringFeeTaxRate,omitempty"` + + // The number of months in which the recurring fees will be incurred. + RecurringMonths *int `json:"recurringMonths,omitempty" xmlrpc:"recurringMonths,omitempty"` + + // This is the service provider for this billing item. + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // The setup fee, if any. This is a one time charge. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // The rate at which setup fees are taxed if you are a taxable customer. + SetupFeeTaxRate *Float64 `json:"setupFeeTaxRate,omitempty" xmlrpc:"setupFeeTaxRate,omitempty"` + + // A friendly description of software component + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // Billing items whose product item has an upgrade path defined in our system will return the next product item in the upgrade path. + UpgradeItem *Product_Item `json:"upgradeItem,omitempty" xmlrpc:"upgradeItem,omitempty"` + + // A count of billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. + UpgradeItemCount *uint `json:"upgradeItemCount,omitempty" xmlrpc:"upgradeItemCount,omitempty"` + + // Billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. + UpgradeItems []Product_Item `json:"upgradeItems,omitempty" xmlrpc:"upgradeItems,omitempty"` +} + +// The SoftLayer_Billing_Item_Account_Media_Data_Transfer_Request data type contains general information relating to a single SoftLayer billing item for a data transfer request. +type Billing_Item_Account_Media_Data_Transfer_Request struct { + Billing_Item + + // The data transfer request to which the billing item points. + Resource *Account_Media_Data_Transfer_Request `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Association_History type keeps a record of which server billing items an "orphan" item has been associated with. Orphan billing items are billable items for secondary portable services (such as secondary subnets and StorageLayer accounts) that are not associated with a server and appear at the bottom of a SoftLayer invoice. The [[SoftLayer_Billing_Item::setAssociationId]] method allows you to associate these kinds of items with servers, making them appear as a child item of the server on your invoice. A SoftLayer_Billing_Item_Association_History record is created every time one of these associations are set. +type Billing_Item_Association_History struct { + Entity + + // The server billing item that an orphaned billing item was associated with. + AssociatedBillingItem *Billing_Item `json:"associatedBillingItem,omitempty" xmlrpc:"associatedBillingItem,omitempty"` + + // The internal identifier of the server billing item that an orphaned billing item was associated with. + AssociatedBillingItemId *int `json:"associatedBillingItemId,omitempty" xmlrpc:"associatedBillingItemId,omitempty"` + + // The billing item that was associated with a server billing item. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The internal identifier of the billing item that was associated with a server billing item. + BillingItemId *int `json:"billingItemId,omitempty" xmlrpc:"billingItemId,omitempty"` + + // The date that a billing item association was last changed. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A billing item association history's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// The SoftLayer_Billing_Item_Cancellation_Reason data type contains cancellation reasons. +type Billing_Item_Cancellation_Reason struct { + Entity + + // A cancel reason category internal identifier. + BillingCancelReasonCategoryId *int `json:"billingCancelReasonCategoryId,omitempty" xmlrpc:"billingCancelReasonCategoryId,omitempty"` + + // An billing cancellation reason category. + BillingCancellationReasonCategory *Billing_Item_Cancellation_Reason_Category `json:"billingCancellationReasonCategory,omitempty" xmlrpc:"billingCancellationReasonCategory,omitempty"` + + // A count of the corresponding billing items having the specific cancellation reason. + BillingItemCount *uint `json:"billingItemCount,omitempty" xmlrpc:"billingItemCount,omitempty"` + + // The corresponding billing items having the specific cancellation reason. + BillingItems []Billing_Item `json:"billingItems,omitempty" xmlrpc:"billingItems,omitempty"` + + // A reason internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A standardized reason internal identifier. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The descriptoin of the reason + Reason *string `json:"reason,omitempty" xmlrpc:"reason,omitempty"` + + // no documentation yet + TranslatedReason *string `json:"translatedReason,omitempty" xmlrpc:"translatedReason,omitempty"` +} + +// The SoftLayer_Billing_Item_Cancellation_Reason_Category data type contains cancellation reason categories. +type Billing_Item_Cancellation_Reason_Category struct { + Entity + + // A count of the corresponding billing cancellation reasons having the specific billing cancellation reason category. + BillingCancellationReasonCount *uint `json:"billingCancellationReasonCount,omitempty" xmlrpc:"billingCancellationReasonCount,omitempty"` + + // The corresponding billing cancellation reasons having the specific billing cancellation reason category. + BillingCancellationReasons []Billing_Item_Cancellation_Reason `json:"billingCancellationReasons,omitempty" xmlrpc:"billingCancellationReasons,omitempty"` + + // A category internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The description of the category + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Billing_Item_Cancellation_Request data type is used to cancel service billing items. +type Billing_Item_Cancellation_Request struct { + Entity + + // The SoftLayer account that a service cancellation request belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the customer account that a service cancellation record belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The last modified date. + BillingCancelReasonId *int `json:"billingCancelReasonId,omitempty" xmlrpc:"billingCancelReasonId,omitempty"` + + // The date that a cancellation request was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A cancellation record's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a collection of service cancellation items. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // A collection of service cancellation items. + Items []Billing_Item_Cancellation_Request_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The last modified date. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Brief cancellation note. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The status of a service cancellation request. + Status *Billing_Item_Cancellation_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // An internal identifier of the service cancellation status that this request is associated with. When a service cancellation is submitted, it will be in "Pending" status until SoftLayer Sales team reviews it. The status of a cancellation request will be updated to "Approved" or "Voided" by SoftLayer Sales. + // + // It will be updated to "Complete" when all services are reclaimed. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The ticket that is associated with the service cancellation request. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // An internal identifier of the ticket that is associated with a service cancellation request. When a service cancellation is submitted, a support ticket will be created. This ticket contains details on your service cancellation details and SoftLayer Sales team will use it to further communicate with you. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The user that initiated a service cancellation request. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` +} + +// SoftLayer_Billing_Item_Cancellation_Request_Item data type contains a billing item for cancellation. This data type is used to harness billing items to the associated service. +type Billing_Item_Cancellation_Request_Item struct { + Entity + + // The billing item for cancellation. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The internal identifier of a billing item + BillingItemId *int `json:"billingItemId,omitempty" xmlrpc:"billingItemId,omitempty"` + + // The service cancellation request that a cancellation item belongs to. + CancellationRequest *Billing_Item_Cancellation_Request `json:"cancellationRequest,omitempty" xmlrpc:"cancellationRequest,omitempty"` + + // A cancellation request's internal identifier. + CancellationRequestId *int `json:"cancellationRequestId,omitempty" xmlrpc:"cancellationRequestId,omitempty"` + + // A cancellation request item's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This flag indicated if a billing item should be canceled immediately or not. Set this flag to true when creating a cancellation request. + ImmediateCancellationFlag *bool `json:"immediateCancellationFlag,omitempty" xmlrpc:"immediateCancellationFlag,omitempty"` + + // The scheduled cancellation date + ScheduledCancellationDate *Time `json:"scheduledCancellationDate,omitempty" xmlrpc:"scheduledCancellationDate,omitempty"` + + // The reclaim status of a service. + ServiceReclaimStatusCode *string `json:"serviceReclaimStatusCode,omitempty" xmlrpc:"serviceReclaimStatusCode,omitempty"` +} + +// SoftLayer_Billing_Item_Cancellation_Request_Status data type represents the status of a service cancellation request. +type Billing_Item_Cancellation_Request_Status struct { + Entity + + // The short description of a cancellation request status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of a cancellation request status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // status key name + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Billing_Item_Ctc_Account data type contains general information relating to a single SoftLayer billing item for a CTC client account creation +type Billing_Item_Ctc_Account struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Big_Data_Cluster data type contains general information relating to a single SoftLayer billing item for a big data cluster. +type Billing_Item_Gateway_Appliance_Cluster struct { + Billing_Item + + // The resource for a resource group billing item. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware. +type Billing_Item_Hardware struct { + Billing_Item + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this hardware for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this hardware for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this hardware for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // A count of the raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsageCount *uint `json:"billingCyclePublicBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePublicBandwidthUsageCount,omitempty"` + + // The total public inbound bandwidth for this hardware for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this hardware for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this hardware for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // A lockbox account associated with a server. + LockboxNetworkStorage *Billing_Item_Network_Storage `json:"lockboxNetworkStorage,omitempty" xmlrpc:"lockboxNetworkStorage,omitempty"` + + // A count of + MonitoringBillingItemCount *uint `json:"monitoringBillingItemCount,omitempty" xmlrpc:"monitoringBillingItemCount,omitempty"` + + // no documentation yet + MonitoringBillingItems []Billing_Item `json:"monitoringBillingItems,omitempty" xmlrpc:"monitoringBillingItems,omitempty"` + + // The resource for a server billing item. + Resource *Hardware_Server `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware. +type Billing_Item_Hardware_Colocation struct { + Billing_Item_Hardware +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware components. +type Billing_Item_Hardware_Component struct { + Billing_Item + + // The hardware component that this billing item points to. + Resource []Hardware_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // A count of the hardware component that this billing item points to. + ResourceCount *uint `json:"resourceCount,omitempty" xmlrpc:"resourceCount,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware_Security_Module data type contains general information relating to a single SoftLayer billing item for a hardware security module. +type Billing_Item_Hardware_Security_Module struct { + Billing_Item_Hardware +} + +// The SoftLayer_Billing_Item_Hardware_Server data type contains billing information about a bare metal server and its relationship to a particular customer account. +type Billing_Item_Hardware_Server struct { + Billing_Item_Hardware +} + +// no documentation yet +type Billing_Item_Link_ThePlanet struct { + Entity + + // no documentation yet + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Application_Delivery_Controller data type describes the billing item related to a NetScaler VPX +type Billing_Item_Network_Application_Delivery_Controller struct { + Billing_Item + + // The bandwidth allotment detail for a billing item. + BandwidthAllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"bandwidthAllotmentDetail,omitempty" xmlrpc:"bandwidthAllotmentDetail,omitempty"` + + // The network application controller that a billing item is associated with. + Resource *Network_Application_Delivery_Controller `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_Application_Delivery_Controller_LoadBalancer represents the [[SoftLayer_Billing_Item|billing item]] related to a single [[SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress|load balancer]] instance. +type Billing_Item_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress struct { + Billing_Item + + // The load balancer that a load balancer billing item is associated with. + Resource *Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware. +type Billing_Item_Network_Bandwidth struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Network_Firewall data type contains general information relating to a single SoftLayer billing item whose item category code is 'firewall' +type Billing_Item_Network_Firewall struct { + Billing_Item + + // The VLAN firewall that a VLAN firewall billing item is associated with. + Resource *Network_Component_Firewall `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Firewall_Module_Context data type describes the billing items related to VLAN Firewalls. +type Billing_Item_Network_Firewall_Module_Context struct { + Billing_Item + + // The total public outbound bandwidth for this firewall for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_Interconnect represents the [[SoftLayer_Billing_Item|billing item]] related to a network interconnect instance. +type Billing_Item_Network_Interconnect struct { + Billing_Item + + // The interconnect tenant that the billing item is associated with. + Resource *Network_Interconnect_Tenant `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_LoadBalancer represents the [[SoftLayer_Billing_Item|billing item]] related to a single [[SoftLayer_Network_LoadBalancer|load balancer]] instance. +type Billing_Item_Network_LoadBalancer struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Network_LoadBalancer_Global data type contains general information relating to a single SoftLayer billing item whose item category code is 'global_load_balancer' +type Billing_Item_Network_LoadBalancer_Global struct { + Billing_Item + + // The resource for a global load balancer billing item. + Resource *Network_LoadBalancer_Global_Account `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_LoadBalancer_VirtualIpAddress represents the [[SoftLayer_Billing_Item|billing item]] related to a single [[SoftLayer_Network_LoadBalancer_VirtualIpAddress|load balancer]] instance. +type Billing_Item_Network_LoadBalancer_VirtualIpAddress struct { + Billing_Item + + // The load balancer's virtual IP address that the billing item is associated with. + Resource *Network_LoadBalancer_VirtualIpAddress `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Message_Delivery data describes the related billing item. +type Billing_Item_Network_Message_Delivery struct { + Billing_Item + + // The object this billing item is associated with. + Resource *Network_Message_Delivery `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_PerformanceStorage_Iscsi data type contains general information relating to a single SoftLayer billing item whose item category code is 'performance_storage_iscsi' +type Billing_Item_Network_PerformanceStorage_Iscsi struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_PerformanceStorage_Nfs data type contains general information relating to a single SoftLayer billing item whose item category code is 'performance_storage_nfs' +type Billing_Item_Network_PerformanceStorage_Nfs struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_Storage data type describes the billing items related to StorageLayer accounts. +type Billing_Item_Network_Storage struct { + Billing_Item + + // The StorageLayer account that a network storage billing item is associated with. + Resource *Network_Storage `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Storage_Hub models all billing items related to hub-based StorageLayer offerings, such as CloudLayer storage. +type Billing_Item_Network_Storage_Hub struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_Storage_Hub_Bandwidth data type models the billing items created when a CloudLayer storage account generates a bandwidth overage charge. +type Billing_Item_Network_Storage_Hub_Bandwidth struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_Subnet data type contains general information relating to a single SoftLayer billing item whose item category code is one of the following: +// * pri_ip_address +// * static_sec_ip_addresses (static secondary) +// * sov_sec_ip_addresses (secondary on vlan, also known as "portable ips") +// * sov_sec_ip_addresses_pub (sov_sec_ip_addresses public only) +// * sov_sec_ip_addresses_priv (sov_sec_ip_addresses private only) +// * sec_ip_addresses (old style, secondary ip addresses) +// +// +// These item categories denote that the billing item has subnet information attached. +type Billing_Item_Network_Subnet struct { + Billing_Item + + // The resource for a subnet-related billing item. + Resource *Network_Subnet `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource name for a subnet billing item. + ResourceName *string `json:"resourceName,omitempty" xmlrpc:"resourceName,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Subnet_IpAddress_Global data type contains general information relating to a single SoftLayer billing item whose item category code is one of the following: +// * global_ipv4 +// * global_ipv6 +// +// +// These item categories denote that the billing item has subnet information attached. +type Billing_Item_Network_Subnet_IpAddress_Global struct { + Billing_Item_Network_Subnet +} + +// The SoftLayer_Billing_Item_Network_Storage data type describes the billing items related to StorageLayer accounts. +type Billing_Item_Network_Tunnel struct { + Billing_Item + + // The IPsec VPN that a network tunnel billing item is associated with. + Resource *Network_Tunnel_Module_Context `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Vlan data type contains general information relating to a single SoftLayer billing item whose item category code is one of the following: +// * network_vlan +// +// +// These item categories denote that the billing item has network vlan information attached. +type Billing_Item_Network_Vlan struct { + Billing_Item + + // The network vlan resource for this billing item. + Resource *Network_Vlan `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Billing_Item_NewCustomerSetup struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Private_Cloud data type contains general information relating to a single billing item for a private cloud. +type Billing_Item_Private_Cloud struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware components. +type Billing_Item_Software_Component struct { + Billing_Item + + // The software component that this billing item points to. + Resource *Software_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a software component billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_Component_Analytics_Urchin data type contains general information relating to a single SoftLayer billing item for Urchin software components. +type Billing_Item_Software_Component_Analytics_Urchin struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_ControlPanel data type contains general information relating to a single SoftLayer billing item for control panel software components. +type Billing_Item_Software_Component_ControlPanel struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_ControlPanel data type contains general information relating to a single SoftLayer billing item for control panel software components. +type Billing_Item_Software_Component_ControlPanel_Parallels_Plesk_Billing struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_OperatingSystem_Addon data type contains general information relating to a single SoftLayer billing item for operating system add-on software components. +type Billing_Item_Software_Component_OperatingSystem_Addon struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_OperatingSystem_Addon_Citrix_Essentials data type contains general information relating to a single SoftLayer billing item for Citrix Essentials software components. +type Billing_Item_Software_Component_OperatingSystem_Addon_Citrix_Essentials struct { + Billing_Item_Software_Component_OperatingSystem_Addon + + // The Citrix Essentials software component that a billing item is associated with. + Resource *Software_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_Component_Virtual_OperatingSystem data type contains general information relating to a single SoftLayer billing item for operating system software components on virtual machines. +type Billing_Item_Software_Component_Virtual_OperatingSystem struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_Virtual_OperatingSystem_Microsoft data type contains general information relating to a single SoftLayer billing item for a Microsoft operating system software components on virtual machines. +type Billing_Item_Software_Component_Virtual_OperatingSystem_Microsoft struct { + Billing_Item_Software_Component_Virtual_OperatingSystem + + // The software virtual license to which this billing item points. + Resource *Software_VirtualLicense `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a software virtual license billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_Component_Virtual_OperatingSystem_Microsoft data type contains general information relating to a single SoftLayer billing item for a Microsoft operating system software components on virtual machines. +type Billing_Item_Software_Component_Virtual_OperatingSystem_Redhat struct { + Billing_Item_Software_Component_Virtual_OperatingSystem + + // The software component to which this billing item points. + Resource *Software_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a software component billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_License data type contains general information relating to a single SoftLayer billing item for a software license. +type Billing_Item_Software_License struct { + Billing_Item + + // The resource for a software license billing item. + Resource *Software_AccountLicense `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Support data type contains general information relating to a premium support offering +type Billing_Item_Support struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Network_Application_Delivery_Controller data type describes the billing item related to an external authentication binding +type Billing_Item_User_Customer_External_Binding struct { + Billing_Item + + // The external authentication binding that a billing item is associated with. + Resource *User_Customer_External_Binding `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Billing_Item_Virtual_DedicatedHost struct { + Billing_Item + + // The resource for a virtual dedicated host billing item. + Resource *Virtual_DedicatedHost `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// A SoftLayer_Billing_Item_Virtual_Dedicated_Rack data type models the billing information for a single bandwidth pooling. Bandwidth pooling members share their public bandwidth allocations, and incur overage charges instead of the overages on individual rack members. Virtual rack billing items are the parent items for all of it's rack membership billing items. +type Billing_Item_Virtual_Dedicated_Rack struct { + Billing_Item + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network a virtual rack is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network a virtual rack is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private network inbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private network outbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private network bandwidth for this virtual rack for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // A count of the raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsageCount *uint `json:"billingCyclePublicBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePublicBandwidthUsageCount,omitempty"` + + // The total public inbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this virtual rack for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // The virtual rack that a virtual rack billing item is associated with. + Resource *Network_Bandwidth_Version1_Allotment `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Virtual_Disk_Image data type contains general information relating to a single SoftLayer billing item for disk images. +type Billing_Item_Virtual_Disk_Image struct { + Billing_Item + + // The disk image to which the billing item points. + Resource *Virtual_Disk_Image `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a disk image billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Virtual_Guest data type contains general information relating to a single SoftLayer billing item for guests. +type Billing_Item_Virtual_Guest struct { + Billing_Item + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this virtual server for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // A count of the raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsageCount *uint `json:"billingCyclePublicBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePublicBandwidthUsageCount,omitempty"` + + // The total public inbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this virtual server for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // A count of + MonitoringBillingItemCount *uint `json:"monitoringBillingItemCount,omitempty" xmlrpc:"monitoringBillingItemCount,omitempty"` + + // no documentation yet + MonitoringBillingItems []Billing_Item `json:"monitoringBillingItems,omitempty" xmlrpc:"monitoringBillingItems,omitempty"` + + // The resource for a cloud server billing item. + Resource *Virtual_Guest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Virtual_Host_Usage data type contains general information relating to a single SoftLayer billing item for virtual machine peak usage +type Billing_Item_Virtual_Host_Usage struct { + Billing_Item + + // The resource for a peak virtual machine usage billing item. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Workspace data type contains general information relating to a single SoftLayer billing item whose item category code is 'workspace' +type Billing_Item_Workspace struct { + Billing_Item +} + +// The SoftLayer_Billing_Order data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the order is generated for existing SoftLayer customer. +type Billing_Order struct { + Entity + + // The [[SoftLayer_Account|account]] to which an order belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which an order belongs. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // A cart is similar to a quote, except that it can be continually modified by the customer and does not have locked-in prices. Not all orders will have a cart associated with them. See [[SoftLayer_Billing_Order_Cart]] for more information. + Cart *Billing_Order_Cart `json:"cart,omitempty" xmlrpc:"cart,omitempty"` + + // A count of the [[SoftLayer_Billing_Order_Item (type)|order items]] that are core restricted + CoreRestrictedItemCount *uint `json:"coreRestrictedItemCount,omitempty" xmlrpc:"coreRestrictedItemCount,omitempty"` + + // The [[SoftLayer_Billing_Order_Item (type)|order items]] that are core restricted + CoreRestrictedItems []Billing_Order_Item `json:"coreRestrictedItems,omitempty" xmlrpc:"coreRestrictedItems,omitempty"` + + // The point in time at which a billing item was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of all credit card transactions associated with this order. If this order was not placed with a credit card, this will be empty. + CreditCardTransactionCount *uint `json:"creditCardTransactionCount,omitempty" xmlrpc:"creditCardTransactionCount,omitempty"` + + // All credit card transactions associated with this order. If this order was not placed with a credit card, this will be empty. + CreditCardTransactions []Billing_Payment_Card_Transaction `json:"creditCardTransactions,omitempty" xmlrpc:"creditCardTransactions,omitempty"` + + // no documentation yet + ExchangeRate *Billing_Currency_ExchangeRate `json:"exchangeRate,omitempty" xmlrpc:"exchangeRate,omitempty"` + + // * + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The SoftLayer_User_Customer id of the portal or API user who impersonated the user which submitted an order. + ImpersonatingUserRecordId *int `json:"impersonatingUserRecordId,omitempty" xmlrpc:"impersonatingUserRecordId,omitempty"` + + // no documentation yet + InitialInvoice *Billing_Invoice `json:"initialInvoice,omitempty" xmlrpc:"initialInvoice,omitempty"` + + // A count of the SoftLayer_Billing_Order_items included in an order. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // The SoftLayer_Billing_Order_items included in an order. + Items []Billing_Order_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The last time an order was updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + OrderApprovalDate *Time `json:"orderApprovalDate,omitempty" xmlrpc:"orderApprovalDate,omitempty"` + + // An order's non-server items total monthly fee. + OrderNonServerMonthlyAmount *Float64 `json:"orderNonServerMonthlyAmount,omitempty" xmlrpc:"orderNonServerMonthlyAmount,omitempty"` + + // The SoftLayer_Billing_Order_Quote id of the quote's user who finalized an order. + OrderQuoteId *int `json:"orderQuoteId,omitempty" xmlrpc:"orderQuoteId,omitempty"` + + // An order's server items total monthly fee. + OrderServerMonthlyAmount *Float64 `json:"orderServerMonthlyAmount,omitempty" xmlrpc:"orderServerMonthlyAmount,omitempty"` + + // A count of an order's top level items. This normally includes the server line item and any non-server additional services such as NAS or ISCSI. + OrderTopLevelItemCount *uint `json:"orderTopLevelItemCount,omitempty" xmlrpc:"orderTopLevelItemCount,omitempty"` + + // An order's top level items. This normally includes the server line item and any non-server additional services such as NAS or ISCSI. + OrderTopLevelItems []Billing_Order_Item `json:"orderTopLevelItems,omitempty" xmlrpc:"orderTopLevelItems,omitempty"` + + // This amount represents the order's initial charge including set up fee and taxes. + OrderTotalAmount *Float64 `json:"orderTotalAmount,omitempty" xmlrpc:"orderTotalAmount,omitempty"` + + // An order's total one time amount summing all the set up fees, the labor fees and the one time fees. Taxes will be applied for non-tax-exempt. This amount represents the initial fees that will be charged. + OrderTotalOneTime *Float64 `json:"orderTotalOneTime,omitempty" xmlrpc:"orderTotalOneTime,omitempty"` + + // An order's total one time amount. This amount represents the initial fees before tax. + OrderTotalOneTimeAmount *Float64 `json:"orderTotalOneTimeAmount,omitempty" xmlrpc:"orderTotalOneTimeAmount,omitempty"` + + // An order's total one time tax amount. This amount represents the tax that will be applied to the total charge, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. + OrderTotalOneTimeTaxAmount *Float64 `json:"orderTotalOneTimeTaxAmount,omitempty" xmlrpc:"orderTotalOneTimeTaxAmount,omitempty"` + + // An order's total recurring amount. Taxes will be applied for non-tax-exempt. This amount represents the fees that will be charged on a recurring (usually monthly) basis. + OrderTotalRecurring *Float64 `json:"orderTotalRecurring,omitempty" xmlrpc:"orderTotalRecurring,omitempty"` + + // An order's total recurring amount. This amount represents the fees that will be charged on a recurring (usually monthly) basis. + OrderTotalRecurringAmount *Float64 `json:"orderTotalRecurringAmount,omitempty" xmlrpc:"orderTotalRecurringAmount,omitempty"` + + // The total tax amount of the recurring fees, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. + OrderTotalRecurringTaxAmount *Float64 `json:"orderTotalRecurringTaxAmount,omitempty" xmlrpc:"orderTotalRecurringTaxAmount,omitempty"` + + // An order's total setup fee. + OrderTotalSetupAmount *Float64 `json:"orderTotalSetupAmount,omitempty" xmlrpc:"orderTotalSetupAmount,omitempty"` + + // The type of an order. This lets you know where this order was generated from. + OrderType *Billing_Order_Type `json:"orderType,omitempty" xmlrpc:"orderType,omitempty"` + + // The SoftLayer_Billing_Order_Type id of the order. + OrderTypeId *int `json:"orderTypeId,omitempty" xmlrpc:"orderTypeId,omitempty"` + + // A count of all PayPal transactions associated with this order. If this order was not placed with PayPal, this will be empty. + PaypalTransactionCount *uint `json:"paypalTransactionCount,omitempty" xmlrpc:"paypalTransactionCount,omitempty"` + + // All PayPal transactions associated with this order. If this order was not placed with PayPal, this will be empty. + PaypalTransactions []Billing_Payment_PayPal_Transaction `json:"paypalTransactions,omitempty" xmlrpc:"paypalTransactions,omitempty"` + + // no documentation yet + PresaleEvent *Sales_Presale_Event `json:"presaleEvent,omitempty" xmlrpc:"presaleEvent,omitempty"` + + // no documentation yet + PresaleEventId *int `json:"presaleEventId,omitempty" xmlrpc:"presaleEventId,omitempty"` + + // Flag indicating a private cloud solution order (Deprecated) + PrivateCloudOrderFlag *bool `json:"privateCloudOrderFlag,omitempty" xmlrpc:"privateCloudOrderFlag,omitempty"` + + // The quote of an order. This quote holds information about its expiration date, creation date, name and status. This information is tied to an order having the status 'QUOTE' + Quote *Billing_Order_Quote `json:"quote,omitempty" xmlrpc:"quote,omitempty"` + + // The Referral Partner who referred this order. (Only necessary for new customer orders) + ReferralPartner *Account `json:"referralPartner,omitempty" xmlrpc:"referralPartner,omitempty"` + + // Purchaser current status e.i. Approved, Pending_Approval + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // This flag indicates an order is an upgrade. + UpgradeRequestFlag *bool `json:"upgradeRequestFlag,omitempty" xmlrpc:"upgradeRequestFlag,omitempty"` + + // The SoftLayer_User_Customer object tied to an order. + UserRecord *User_Customer `json:"userRecord,omitempty" xmlrpc:"userRecord,omitempty"` + + // The SoftLayer_User_Customer id of the portal or API user who submitted an order. + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// no documentation yet +type Billing_Order_Cart struct { + Billing_Order_Quote +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Order_Item struct { + Entity + + // The SoftLayer_Billing_Item tied to the order item. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A count of the other items included with an ordered item. + BundledItemCount *uint `json:"bundledItemCount,omitempty" xmlrpc:"bundledItemCount,omitempty"` + + // The other items included with an ordered item. + BundledItems []Billing_Order_Item `json:"bundledItems,omitempty" xmlrpc:"bundledItems,omitempty"` + + // The item category tied to an order item. + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The category code for the order item. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // The child order items for an order item. All server order items should have children. These children are considered a part of the server. + Children []Billing_Order_Item `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of the child order items for an order item. All server order items should have children. These children are considered a part of the server. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // friendly description of purchase item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The domain name of the server as designated by the purchaser at the time of order placement. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // A hardware's universally unique identifier. + GlobalIdentifier *string `json:"globalIdentifier,omitempty" xmlrpc:"globalIdentifier,omitempty"` + + // The component type tied to an order item. All hardware-specific items should have a generic hardware component. + HardwareGenericComponent *Hardware_Component_Model_Generic `json:"hardwareGenericComponent,omitempty" xmlrpc:"hardwareGenericComponent,omitempty"` + + // The hostname of the server as designated by the purchaser at the time of order placement. + HostName *string `json:"hostName,omitempty" xmlrpc:"hostName,omitempty"` + + // The amount of money charged per hourly for an order item, if applicable, and only if it was ordered this day. hourlyRecurringFee is measured in US Dollars ($USD). + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The SoftLayer_Product_Item tied to an order item. The item is the actual definition of the product being sold. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // A count of this is an item's category answers. + ItemCategoryAnswerCount *uint `json:"itemCategoryAnswerCount,omitempty" xmlrpc:"itemCategoryAnswerCount,omitempty"` + + // This is an item's category answers. + ItemCategoryAnswers []Billing_Order_Item_Category_Answer `json:"itemCategoryAnswers,omitempty" xmlrpc:"itemCategoryAnswers,omitempty"` + + // The SoftLayer_Product_Item ID for this order item. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The SoftLayer_Product_Item_Price tied to an order item. The item price object describes the cost of an item. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // the item price id (SoftLayer_Product_Item_Price->id) of the ordered item. + ItemPriceId *Float64 `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // An order item's labor fee total after taxes. This does not include any child invoice items. + LaborAfterTaxAmount *Float64 `json:"laborAfterTaxAmount,omitempty" xmlrpc:"laborAfterTaxAmount,omitempty"` + + // The labor fee, if any. This is a one time charge. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The rate at which labor fees are taxed if you are a taxable customer. + LaborFeeTaxRate *Float64 `json:"laborFeeTaxRate,omitempty" xmlrpc:"laborFeeTaxRate,omitempty"` + + // An order item's labor tax amount. This does not include any child invoice items. + LaborTaxAmount *Float64 `json:"laborTaxAmount,omitempty" xmlrpc:"laborTaxAmount,omitempty"` + + // The location of an ordered item. This is usually the same as the server it is being ordered with. Otherwise it describes the location of the additional service being ordered. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + NextOrderChildren []Billing_Order_Item `json:"nextOrderChildren,omitempty" xmlrpc:"nextOrderChildren,omitempty"` + + // A count of + NextOrderChildrenCount *uint `json:"nextOrderChildrenCount,omitempty" xmlrpc:"nextOrderChildrenCount,omitempty"` + + // This is only populated when an upgrade order is placed. The old billing item represents what the billing was before the upgrade happened. + OldBillingItem *Billing_Item `json:"oldBillingItem,omitempty" xmlrpc:"oldBillingItem,omitempty"` + + // An order item's one-time fee total after taxes. This does not include any child invoice items. + OneTimeAfterTaxAmount *Float64 `json:"oneTimeAfterTaxAmount,omitempty" xmlrpc:"oneTimeAfterTaxAmount,omitempty"` + + // The amount of money charged as a one-time charge for an order item, if applicable. oneTimeFee is measured in US Dollars ($USD). + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // The rate at which one time fees are taxed if you are a taxable customer. + OneTimeFeeTaxRate *Float64 `json:"oneTimeFeeTaxRate,omitempty" xmlrpc:"oneTimeFeeTaxRate,omitempty"` + + // An order item's one-time tax amount. This does not include any child invoice items. + OneTimeTaxAmount *Float64 `json:"oneTimeTaxAmount,omitempty" xmlrpc:"oneTimeTaxAmount,omitempty"` + + // The order to which this item belongs. The order contains all the information related to the items included in an order + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // no documentation yet + OrderApprovalDate *Time `json:"orderApprovalDate,omitempty" xmlrpc:"orderApprovalDate,omitempty"` + + // The SoftLayer_Product_Package an order item is a part of. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The parent order item ID for an item. Items that are associated with a server will have a parent. The parent will be the server item itself. + Parent *Billing_Order_Item `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // no documentation yet + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // The SoftLayer_Product_Package_Preset related to this order item. + Preset *Product_Package_Preset `json:"preset,omitempty" xmlrpc:"preset,omitempty"` + + // The id for the preset configuration ordered. + PresetId *int `json:"presetId,omitempty" xmlrpc:"presetId,omitempty"` + + // no documentation yet + PromoCodeId *int `json:"promoCodeId,omitempty" xmlrpc:"promoCodeId,omitempty"` + + // the quantity of the ordered item in a quote. + Quantity *int `json:"quantity,omitempty" xmlrpc:"quantity,omitempty"` + + // An order item's recurring fee total after taxes. This does not include any child invoice items. + RecurringAfterTaxAmount *Float64 `json:"recurringAfterTaxAmount,omitempty" xmlrpc:"recurringAfterTaxAmount,omitempty"` + + // The amount of money charged per month for an order item, if applicable. recurringFee is measured in US Dollars ($USD). + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // An order item's recurring tax amount. This does not include any child invoice items. + RecurringTaxAmount *Float64 `json:"recurringTaxAmount,omitempty" xmlrpc:"recurringTaxAmount,omitempty"` + + // A count of power supplies contained within this SoftLayer_Billing_Order + RedundantPowerSupplyCount *uint `json:"redundantPowerSupplyCount,omitempty" xmlrpc:"redundantPowerSupplyCount,omitempty"` + + // An order item's setup fee total after taxes. This does not include any child invoice items. + SetupAfterTaxAmount *Float64 `json:"setupAfterTaxAmount,omitempty" xmlrpc:"setupAfterTaxAmount,omitempty"` + + // The setup fee, if any. This is a one time charge. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // The month set up fee deferral. + SetupFeeDeferralMonths *int `json:"setupFeeDeferralMonths,omitempty" xmlrpc:"setupFeeDeferralMonths,omitempty"` + + // The rate at which setup fees are taxed if you are a taxable customer. + SetupFeeTaxRate *Float64 `json:"setupFeeTaxRate,omitempty" xmlrpc:"setupFeeTaxRate,omitempty"` + + // An order item's setup tax amount. This does not include any child invoice items. + SetupTaxAmount *Float64 `json:"setupTaxAmount,omitempty" xmlrpc:"setupTaxAmount,omitempty"` + + // For ordered items that are software items, a full description of that software can be found with this property. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // A count of the drive storage groups that are attached to this billing order item. + StorageGroupCount *uint `json:"storageGroupCount,omitempty" xmlrpc:"storageGroupCount,omitempty"` + + // The drive storage groups that are attached to this billing order item. + StorageGroups []Configuration_Storage_Group_Order `json:"storageGroups,omitempty" xmlrpc:"storageGroups,omitempty"` + + // The recurring fee of an ordered item. This amount represents the fees that will be charged on a recurring (usually monthly) basis. + TotalRecurringAmount *Float64 `json:"totalRecurringAmount,omitempty" xmlrpc:"totalRecurringAmount,omitempty"` + + // The next SoftLayer_Product_Item in the upgrade path for this order item. + UpgradeItem *Product_Item `json:"upgradeItem,omitempty" xmlrpc:"upgradeItem,omitempty"` +} + +// The SoftLayer_Billing_Order_Item_Category_Answer data type represents a single answer to an item category question. +type Billing_Order_Item_Category_Answer struct { + Entity + + // The answer to the question. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // The date that the answer was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The billing order item that the answer is for. + OrderItem *Billing_Order_Item `json:"orderItem,omitempty" xmlrpc:"orderItem,omitempty"` + + // The question that is being answered. + Question *Product_Item_Category_Question `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // The identifier for the question that the answer belongs to. + QuestionId *int `json:"questionId,omitempty" xmlrpc:"questionId,omitempty"` +} + +// no documentation yet +type Billing_Order_Note struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // no documentation yet + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` +} + +// The SoftLayer_Billing_Oder_Quote data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the quote is generated for existing SoftLayer customer. +type Billing_Order_Quote struct { + Entity + + // A quote's corresponding account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Identification Number of the account record tied to the quote + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Identification Number of the order record tied to the quote. + CompletedPurchaseDataId *int `json:"completedPurchaseDataId,omitempty" xmlrpc:"completedPurchaseDataId,omitempty"` + + // Holds the date the quote record was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Indicates whether the owner of the quote chosen to no longer be contacted. + DoNotContactFlag *bool `json:"doNotContactFlag,omitempty" xmlrpc:"doNotContactFlag,omitempty"` + + // This property holds the date of expiration of a quote, after that date the quote would be deem expired + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // The id use to identify a quote. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Holds the date when the quote record was modified with reference to its creation date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name given to quote by the initiator + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // This order contains the records for which products were selected for this quote. + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // These are all the orders that were created from this quote. + OrdersFromQuote []Billing_Order `json:"ordersFromQuote,omitempty" xmlrpc:"ordersFromQuote,omitempty"` + + // A count of these are all the orders that were created from this quote. + OrdersFromQuoteCount *uint `json:"ordersFromQuoteCount,omitempty" xmlrpc:"ordersFromQuoteCount,omitempty"` + + // This property Holds system generated notes. In our case if a quote is tied to an order where one of the order item has an inactive promotion code, the quote will be considered invalid. + PublicNote *string `json:"publicNote,omitempty" xmlrpc:"publicNote,omitempty"` + + // Holds system generated hash password for the Quote + QuoteKey *string `json:"quoteKey,omitempty" xmlrpc:"quoteKey,omitempty"` + + // This property Holds the current status of a Quote: pending,expired, saved or deleted + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Billing_Oder_Type data type contains general information relating to all the different types of orders that exist. This data pertains only to where an order was generated from, from any of the SoftLayer websites with ordering interfaces or directly through the SoftLayer API. +type Billing_Order_Type struct { + Entity + + // A brief description of where a SoftLayer order originated from. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A SoftLayer order type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A simple keyname stating where a SoftLayer order originated from. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Billing_Payment_Card_ChangeRequest data type contains general information relating to attempted credit card information changes. +type Billing_Payment_Card_ChangeRequest struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the credit card and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The total amount of the attempted transaction, represented in decimal format as US Dollars ($USD). + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The SoftLayer_Billing_Payment_Card_Transaction tied to the authorization performed as part of this change request. + AuthorizedCreditCardTransaction *Billing_Payment_Card_Transaction `json:"authorizedCreditCardTransaction,omitempty" xmlrpc:"authorizedCreditCardTransaction,omitempty"` + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The SoftLayer_Billing_Payment_Card_Transaction tied to the capture of funds performed as part of this change request. + CaptureCreditCardTransaction *Billing_Payment_Card_Transaction `json:"captureCreditCardTransaction,omitempty" xmlrpc:"captureCreditCardTransaction,omitempty"` + + // The last 4 digits of a customer's credit card. + CardAccountLast4 *string `json:"cardAccountLast4,omitempty" xmlrpc:"cardAccountLast4,omitempty"` + + // The card number submitted in the change request. + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // The month (MM) in which a customer's payment card will expire. + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The year (YYYY) in which a customer's payment card will expire. + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // no documentation yet + CardNickname *string `json:"cardNickname,omitempty" xmlrpc:"cardNickname,omitempty"` + + // The type of payment card a customer has. (i.e. Visa, MasterCard, American Express). + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // The credit card verification number submitted in the change request. + CreditCardVerificationNumber *string `json:"creditCardVerificationNumber,omitempty" xmlrpc:"creditCardVerificationNumber,omitempty"` + + // Describes the currency selected for payment + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // Device Fingerprint Identifier - Used internally and can safely be ignored. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // The unique identifier for a single change request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // the notes stored about a customer's payment card. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + PaymentRoleId *int `json:"paymentRoleId,omitempty" xmlrpc:"paymentRoleId,omitempty"` + + // The description of the type of payment sent in a change transaction. + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // A count of these are tickets tied to a credit card change request. + TicketAttachmentReferenceCount *uint `json:"ticketAttachmentReferenceCount,omitempty" xmlrpc:"ticketAttachmentReferenceCount,omitempty"` + + // These are tickets tied to a credit card change request. + TicketAttachmentReferences []Ticket_Attachment `json:"ticketAttachmentReferences,omitempty" xmlrpc:"ticketAttachmentReferences,omitempty"` + + // Unique identifier for a ticket discussing the switch between payment methods. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` +} + +// The SoftLayer_Billing_Payment_Card_ManualPayment data type contains general information relating to attempted credit card information changes. +type Billing_Payment_Card_ManualPayment struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the credit card and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The total amount of the attempted transaction, represented in decimal format as US Dollars ($USD). + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // This is the credit card transaction data tied to a credit card manual payment. + AuthorizedCreditCardTransaction *Billing_Payment_Card_Transaction `json:"authorizedCreditCardTransaction,omitempty" xmlrpc:"authorizedCreditCardTransaction,omitempty"` + + // The unique identifier of an attempted credit card transaction. + AuthorizedCreditCardTransactionId *int `json:"authorizedCreditCardTransactionId,omitempty" xmlrpc:"authorizedCreditCardTransactionId,omitempty"` + + // This is the PayPal transaction data tied to a PayPal manual payment. + AuthorizedPayPalTransaction *Billing_Payment_PayPal_Transaction `json:"authorizedPayPalTransaction,omitempty" xmlrpc:"authorizedPayPalTransaction,omitempty"` + + // The unique identifier of an attempted PayPal transaction. + AuthorizedPayPalTransactionId *int `json:"authorizedPayPalTransactionId,omitempty" xmlrpc:"authorizedPayPalTransactionId,omitempty"` + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner. + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The cancel URL is the page to which PayPal redirects if payment is not approved. + CancelUrl *string `json:"cancelUrl,omitempty" xmlrpc:"cancelUrl,omitempty"` + + // The SoftLayer_Billing_Payment_Card_Transaction tied to the capture performed as part of this manual payment. This will only exist if the manual payment was performed with a credit card. + CaptureCreditCardTransaction *Billing_Payment_Card_Transaction `json:"captureCreditCardTransaction,omitempty" xmlrpc:"captureCreditCardTransaction,omitempty"` + + // The SoftLayer_Billing_Payment_PayPal_Transaction tied to the capture performed as part of this manual payment. This will only exist if the manual payment was performed via PayPal. + CapturePayPalTransaction *Billing_Payment_PayPal_Transaction `json:"capturePayPalTransaction,omitempty" xmlrpc:"capturePayPalTransaction,omitempty"` + + // A hash value of the credit card number. + CardAccountHash *string `json:"cardAccountHash,omitempty" xmlrpc:"cardAccountHash,omitempty"` + + // The last 4 digits of a customer's credit card. + CardAccountLast4 *string `json:"cardAccountLast4,omitempty" xmlrpc:"cardAccountLast4,omitempty"` + + // The card number submitted in the change request. + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // The month (MM) in which a customer's payment card will expire. + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The year (YYYY) in which a customer's payment card will expire. + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // The method key of the type payment issued (Visa - 001, Mastercard - 002, American Express - 003, Discover - 004, PayPal - paypal). + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // The credit card verification number submitted in the change request. + CreditCardVerificationNumber *string `json:"creditCardVerificationNumber,omitempty" xmlrpc:"creditCardVerificationNumber,omitempty"` + + // Describes the currency selected for payment + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // Device Fingerprint Identifier - Used internally and can safely be ignored. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // The IP address from which the transaction originates. + FromIpAddress *string `json:"fromIpAddress,omitempty" xmlrpc:"fromIpAddress,omitempty"` + + // The unique identifier for a single manual payment request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Notes generated as a result of the payment request. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The description of the type of payment sent in a change transaction. + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // The return URL is the page to which PayPal redirects after payment is approved. + ReturnUrl *string `json:"returnUrl,omitempty" xmlrpc:"returnUrl,omitempty"` + + // A count of these are tickets tied to a credit card manual payment. + TicketAttachmentReferenceCount *uint `json:"ticketAttachmentReferenceCount,omitempty" xmlrpc:"ticketAttachmentReferenceCount,omitempty"` + + // These are tickets tied to a credit card manual payment. + TicketAttachmentReferences []Ticket_Attachment `json:"ticketAttachmentReferences,omitempty" xmlrpc:"ticketAttachmentReferences,omitempty"` + + // Describes the type of manual payment (PAYPAL or CREDIT_CARD). + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Billing_Payment_Card_Transaction data type contains general information relating to attempted credit card transactions. +type Billing_Payment_Card_Transaction struct { + Billing_Payment_Transaction + + // The account to which a transaction belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the credit card and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The total amount of the attempted transaction, represented in decimal format as US Dollars ($USD). + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner. + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The last 4 digits of a customer's credit card. + CardAccountLast4 *int `json:"cardAccountLast4,omitempty" xmlrpc:"cardAccountLast4,omitempty"` + + // The month (MM) in which a customer's payment card will expire. + CardExpirationMonth *int `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The year (YYYY) in which a customer's payment card will expire. + CardExpirationYear *int `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // The type of payment issued (i.e. Visa, MasterCard, American Express). + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // The date that the transaction was attempted. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The unique identifier for a single credit card transaction request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique identifier of the invoice to which funds will be applied. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // The date that the transaction was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The IP address from which the transaction originates. + OrderFromIpAddress *string `json:"orderFromIpAddress,omitempty" xmlrpc:"orderFromIpAddress,omitempty"` + + // A code used by the financial institution to refer to the requested transaction. + ReferenceCode *string `json:"referenceCode,omitempty" xmlrpc:"referenceCode,omitempty"` + + // The unique identifier of the request submitted to the financial institution. + RequestId *string `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` + + // The status code returned from the financial institution. + ReturnStatus *int `json:"returnStatus,omitempty" xmlrpc:"returnStatus,omitempty"` + + // A serialized, delimited string of the transaction request sent to the financial institution. + SerializedReply *string `json:"serializedReply,omitempty" xmlrpc:"serializedReply,omitempty"` + + // A serialized, delimited string of the transaction request sent to the financial institution. + SerializedRequest *string `json:"serializedRequest,omitempty" xmlrpc:"serializedRequest,omitempty"` +} + +// The SoftLayer_Billing_Payment_PayPal_Transaction data type contains general information relating to attempted PayPal transactions. +type Billing_Payment_PayPal_Transaction struct { + Billing_Payment_Transaction + + // The account to which a transaction belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the PayPal and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // City given in the address of the PayPal user. + AddressCityName *string `json:"addressCityName,omitempty" xmlrpc:"addressCityName,omitempty"` + + // Country given in the named address of the PayPal user. + AddressCountry *string `json:"addressCountry,omitempty" xmlrpc:"addressCountry,omitempty"` + + // Name given to the address provided for the PayPal user. + AddressName *string `json:"addressName,omitempty" xmlrpc:"addressName,omitempty"` + + // Postal Code of the address of the PayPal user. + AddressPostalCode *string `json:"addressPostalCode,omitempty" xmlrpc:"addressPostalCode,omitempty"` + + // State or Province in the address of the PayPal user. + AddressStateProvence *string `json:"addressStateProvence,omitempty" xmlrpc:"addressStateProvence,omitempty"` + + // PayPal defined status of the address of the PayPal user. + AddressStatus *string `json:"addressStatus,omitempty" xmlrpc:"addressStatus,omitempty"` + + // First line of the street address of the PayPal user. + AddressStreet1 *string `json:"addressStreet1,omitempty" xmlrpc:"addressStreet1,omitempty"` + + // Second line of the street address of the PayPal user. + AddressStreet2 *string `json:"addressStreet2,omitempty" xmlrpc:"addressStreet2,omitempty"` + + // Phone number provided for the PayPal user. + ContactPhone *string `json:"contactPhone,omitempty" xmlrpc:"contactPhone,omitempty"` + + // The date that the transaction was attempted. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Exchange rate imposed on the payment amount. + ExchangeRate *string `json:"exchangeRate,omitempty" xmlrpc:"exchangeRate,omitempty"` + + // PayPal fee applied to the payment. + FeeAmount *Float64 `json:"feeAmount,omitempty" xmlrpc:"feeAmount,omitempty"` + + // The total amount of the payment executed by PayPal, represented in decimal format as US Dollars ($USD). + GrossAmount *Float64 `json:"grossAmount,omitempty" xmlrpc:"grossAmount,omitempty"` + + // The unique identifier for a single PayPal transaction request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique identifier of the invoice to which funds will be applied. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // The name of the command issued to PayPal with regards to the attempted transaction. + LastPaypalCommand *string `json:"lastPaypalCommand,omitempty" xmlrpc:"lastPaypalCommand,omitempty"` + + // The date that the transaction was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The IP address from where the PayPal payment request originated. + OrderFromIpAddress *string `json:"orderFromIpAddress,omitempty" xmlrpc:"orderFromIpAddress,omitempty"` + + // The amount of the payment submitted through the SoftLayer interface, represented in decimal format as US Dollars ($USD). + OrderTotal *Float64 `json:"orderTotal,omitempty" xmlrpc:"orderTotal,omitempty"` + + // The PayPal user account name (email address) associated with the customer account. + Payer *string `json:"payer,omitempty" xmlrpc:"payer,omitempty"` + + // The name of the business associated with the PayPal user. + PayerBusiness *string `json:"payerBusiness,omitempty" xmlrpc:"payerBusiness,omitempty"` + + // Country given in the address of the PayPal user. + PayerCountry *string `json:"payerCountry,omitempty" xmlrpc:"payerCountry,omitempty"` + + // First name of the PayPal user. + PayerFirstName *string `json:"payerFirstName,omitempty" xmlrpc:"payerFirstName,omitempty"` + + // Unique PayPal user account identifier. + PayerId *string `json:"payerId,omitempty" xmlrpc:"payerId,omitempty"` + + // Last name of the PayPal user. + PayerLastName *string `json:"payerLastName,omitempty" xmlrpc:"payerLastName,omitempty"` + + // Current PayPal status associated with the user account. + PayerStatus *string `json:"payerStatus,omitempty" xmlrpc:"payerStatus,omitempty"` + + // Date that the payment was confirmed in PayPal by the user. + PaymentDate *Time `json:"paymentDate,omitempty" xmlrpc:"paymentDate,omitempty"` + + // PayPal defined status of the attempted payment. + PaymentStatus *string `json:"paymentStatus,omitempty" xmlrpc:"paymentStatus,omitempty"` + + // PayPal defined code used to identify the type of payment. Provided in a PayPal response. + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // Reason provided by PayPal for a payment given a pending status. + PendingReason *string `json:"pendingReason,omitempty" xmlrpc:"pendingReason,omitempty"` + + // A serialized, delimited string of the reply received from PayPal. + SerializedReply *string `json:"serializedReply,omitempty" xmlrpc:"serializedReply,omitempty"` + + // A serialized, delimited string of the request submitted to PayPal. + SerializedRequest *string `json:"serializedRequest,omitempty" xmlrpc:"serializedRequest,omitempty"` + + // PayPal defined fee. + SettleAmount *Float64 `json:"settleAmount,omitempty" xmlrpc:"settleAmount,omitempty"` + + // Tax applied by PayPal to the payment amount. + TaxAmount *Float64 `json:"taxAmount,omitempty" xmlrpc:"taxAmount,omitempty"` + + // Value issued by PayPal for referencing the attempted transaction. + Token *string `json:"token,omitempty" xmlrpc:"token,omitempty"` + + // Unique transaction ID provided in a PayPal response. + TransactionId *string `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` + + // PayPal defined code used to identify the type of transaction. Provided in a PayPal response. + TransactionType *string `json:"transactionType,omitempty" xmlrpc:"transactionType,omitempty"` +} + +// no documentation yet +type Billing_Payment_Processor struct { + Entity + + // A count of + BrandAssignmentCount *uint `json:"brandAssignmentCount,omitempty" xmlrpc:"brandAssignmentCount,omitempty"` + + // no documentation yet + BrandAssignments []Brand_Payment_Processor `json:"brandAssignments,omitempty" xmlrpc:"brandAssignments,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + OwnerAccount *Account `json:"ownerAccount,omitempty" xmlrpc:"ownerAccount,omitempty"` + + // A count of + PaymentMethodCount *uint `json:"paymentMethodCount,omitempty" xmlrpc:"paymentMethodCount,omitempty"` + + // no documentation yet + PaymentMethods []Billing_Payment_Processor_Method `json:"paymentMethods,omitempty" xmlrpc:"paymentMethods,omitempty"` + + // no documentation yet + Type *Billing_Payment_Processor_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Billing_Payment_Processor_Method struct { + Entity + + // no documentation yet + MethodKey *string `json:"methodKey,omitempty" xmlrpc:"methodKey,omitempty"` + + // no documentation yet + MultipleCurrencyFlag *bool `json:"multipleCurrencyFlag,omitempty" xmlrpc:"multipleCurrencyFlag,omitempty"` + + // no documentation yet + PaymentProcessor *Billing_Payment_Processor `json:"paymentProcessor,omitempty" xmlrpc:"paymentProcessor,omitempty"` + + // no documentation yet + PaymentType *Billing_Payment_Type `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` +} + +// no documentation yet +type Billing_Payment_Processor_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + PaymentProcessorCount *uint `json:"paymentProcessorCount,omitempty" xmlrpc:"paymentProcessorCount,omitempty"` + + // no documentation yet + PaymentProcessors []Billing_Payment_Processor `json:"paymentProcessors,omitempty" xmlrpc:"paymentProcessors,omitempty"` +} + +// Implementation for payment transactions. +type Billing_Payment_Transaction struct { + Entity +} + +// no documentation yet +type Billing_Payment_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/brand.go b/vendor/github.com/softlayer/softlayer-go/datatypes/brand.go new file mode 100644 index 000000000000..2e3b49cee73e --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/brand.go @@ -0,0 +1,262 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Brand data type contains brand information relating to the single SoftLayer customer account. +// +// SoftLayer customers are unable to change their brand information in the portal or the API. +type Brand struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A count of all accounts owned by the brand. + AllOwnedAccountCount *uint `json:"allOwnedAccountCount,omitempty" xmlrpc:"allOwnedAccountCount,omitempty"` + + // All accounts owned by the brand. + AllOwnedAccounts []Account `json:"allOwnedAccounts,omitempty" xmlrpc:"allOwnedAccounts,omitempty"` + + // This flag indicates if creation of accounts is allowed. + AllowAccountCreationFlag *bool `json:"allowAccountCreationFlag,omitempty" xmlrpc:"allowAccountCreationFlag,omitempty"` + + // Business Partner details for the brand. Country Enterprise Code, Channel, Segment, Reseller Level. + BusinessPartner *Brand_Business_Partner `json:"businessPartner,omitempty" xmlrpc:"businessPartner,omitempty"` + + // Flag indicating if the brand is a business partner. + BusinessPartnerFlag *bool `json:"businessPartnerFlag,omitempty" xmlrpc:"businessPartnerFlag,omitempty"` + + // The Product Catalog for the Brand + Catalog *Product_Catalog `json:"catalog,omitempty" xmlrpc:"catalog,omitempty"` + + // ID of the Catalog used by this Brand + CatalogId *int `json:"catalogId,omitempty" xmlrpc:"catalogId,omitempty"` + + // A count of the contacts for the brand. + ContactCount *uint `json:"contactCount,omitempty" xmlrpc:"contactCount,omitempty"` + + // The contacts for the brand. + Contacts []Brand_Contact `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // A count of this references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + CustomerCountryLocationRestrictionCount *uint `json:"customerCountryLocationRestrictionCount,omitempty" xmlrpc:"customerCountryLocationRestrictionCount,omitempty"` + + // This references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + CustomerCountryLocationRestrictions []Brand_Restriction_Location_CustomerCountry `json:"customerCountryLocationRestrictions,omitempty" xmlrpc:"customerCountryLocationRestrictions,omitempty"` + + // no documentation yet + Distributor *Brand `json:"distributor,omitempty" xmlrpc:"distributor,omitempty"` + + // no documentation yet + DistributorChildFlag *bool `json:"distributorChildFlag,omitempty" xmlrpc:"distributorChildFlag,omitempty"` + + // no documentation yet + DistributorFlag *string `json:"distributorFlag,omitempty" xmlrpc:"distributorFlag,omitempty"` + + // An account's associated hardware objects. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of an account's associated hardware objects. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // no documentation yet + HasAgentSupportFlag *bool `json:"hasAgentSupportFlag,omitempty" xmlrpc:"hasAgentSupportFlag,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The brand key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The brand long name. + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // The brand name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + OpenTicketCount *uint `json:"openTicketCount,omitempty" xmlrpc:"openTicketCount,omitempty"` + + // no documentation yet + OpenTickets []Ticket `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // A count of active accounts owned by the brand. + OwnedAccountCount *uint `json:"ownedAccountCount,omitempty" xmlrpc:"ownedAccountCount,omitempty"` + + // Active accounts owned by the brand. + OwnedAccounts []Account `json:"ownedAccounts,omitempty" xmlrpc:"ownedAccounts,omitempty"` + + // no documentation yet + SecurityLevel *Security_Level `json:"securityLevel,omitempty" xmlrpc:"securityLevel,omitempty"` + + // A count of + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // A count of + TicketGroupCount *uint `json:"ticketGroupCount,omitempty" xmlrpc:"ticketGroupCount,omitempty"` + + // no documentation yet + TicketGroups []Ticket_Group `json:"ticketGroups,omitempty" xmlrpc:"ticketGroups,omitempty"` + + // no documentation yet + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` + + // A count of + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // no documentation yet + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` + + // A count of an account's associated virtual guest objects. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // An account's associated virtual guest objects. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// no documentation yet +type Brand_Attribute struct { + Entity + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` +} + +// Contains business partner details associated with a brand. Country Enterprise Identifier (CEID), Channel ID, Segment ID and Reseller Level. +type Brand_Business_Partner struct { + Entity + + // Brand associated with the business partner data + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // Channel indicator used to categorize business partner revenue. + Channel *Business_Partner_Channel `json:"channel,omitempty" xmlrpc:"channel,omitempty"` + + // Brand business partner channel identifier + ChannelId *int `json:"channelId,omitempty" xmlrpc:"channelId,omitempty"` + + // Brand business partner country enterprise code + CountryEnterpriseCode *string `json:"countryEnterpriseCode,omitempty" xmlrpc:"countryEnterpriseCode,omitempty"` + + // Reseller level of a brand business partner + ResellerLevel *int `json:"resellerLevel,omitempty" xmlrpc:"resellerLevel,omitempty"` + + // Segment indicator used to categorize business partner revenue. + Segment *Business_Partner_Segment `json:"segment,omitempty" xmlrpc:"segment,omitempty"` + + // Brand business partner segment identifier + SegmentId *int `json:"segmentId,omitempty" xmlrpc:"segmentId,omitempty"` +} + +// SoftLayer_Brand_Contact contains the contact information for the brand such as Corporate or Support contact information +type Brand_Contact struct { + Entity + + // The contact's address 1. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The contact's address 2. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The contact's alternate phone number. + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // no documentation yet + BrandContactType *Brand_Contact_Type `json:"brandContactType,omitempty" xmlrpc:"brandContactType,omitempty"` + + // The contact's type identifier. + BrandContactTypeId *int `json:"brandContactTypeId,omitempty" xmlrpc:"brandContactTypeId,omitempty"` + + // The contact's city. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The contact's country. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The contact's email address. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The contact's fax number. + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // The contact's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // The contact's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The contact's phone number. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // The contact's postal code. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The contact's state. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// SoftLayer_Brand_Contact_Type contains the contact type information for the brand contacts such as Corporate or Support contact type +type Brand_Contact_Type struct { + Entity + + // Contact type description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Contact type key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Contact type name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Brand_Payment_Processor struct { + Entity + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // no documentation yet + PaymentProcessor *Billing_Payment_Processor `json:"paymentProcessor,omitempty" xmlrpc:"paymentProcessor,omitempty"` +} + +// The [[SoftLayer_Brand_Restriction_Location_CustomerCountry]] data type defines the relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on the SoftLayer US brand for customers that live in Great Britain. +type Brand_Restriction_Location_CustomerCountry struct { + Entity + + // This references the brand that has a brand-location-country restriction setup. + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // The brand associated with customer's account. + BrandId *int `json:"brandId,omitempty" xmlrpc:"brandId,omitempty"` + + // country code associated with customer's account. + CustomerCountryCode *string `json:"customerCountryCode,omitempty" xmlrpc:"customerCountryCode,omitempty"` + + // This references the datacenter that has a brand-location-country restriction setup. For example, if a datacenter is listed with a restriction for Canada, a Canadian customer may not be eligible to order services at that location. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The id for datacenter location. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/business.go b/vendor/github.com/softlayer/softlayer-go/datatypes/business.go new file mode 100644 index 000000000000..0d57545bca0f --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/business.go @@ -0,0 +1,43 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Contains business partner channel information +type Business_Partner_Channel struct { + Entity + + // Business partner channel description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Business partner channel name + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// Contains business partner segment information +type Business_Partner_Segment struct { + Entity + + // Business partner segment description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Business partner segment name + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go b/vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go new file mode 100644 index 000000000000..3f8824abe472 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go @@ -0,0 +1,211 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Catalyst_Affiliate struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + SkipCreditCardVerificationFlag *bool `json:"skipCreditCardVerificationFlag,omitempty" xmlrpc:"skipCreditCardVerificationFlag,omitempty"` +} + +// no documentation yet +type Catalyst_Company_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Catalyst_Enrollment struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Affiliate *Catalyst_Affiliate `json:"affiliate,omitempty" xmlrpc:"affiliate,omitempty"` + + // no documentation yet + AffiliateId *int `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // no documentation yet + AgreementCompleteFlag *int `json:"agreementCompleteFlag,omitempty" xmlrpc:"agreementCompleteFlag,omitempty"` + + // no documentation yet + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // no documentation yet + CompanyType *Catalyst_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // no documentation yet + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // no documentation yet + EnrollmentDate *Time `json:"enrollmentDate,omitempty" xmlrpc:"enrollmentDate,omitempty"` + + // no documentation yet + GraduationDate *Time `json:"graduationDate,omitempty" xmlrpc:"graduationDate,omitempty"` + + // no documentation yet + IsActiveFlag *bool `json:"isActiveFlag,omitempty" xmlrpc:"isActiveFlag,omitempty"` + + // no documentation yet + MonthlyCreditAmount *Float64 `json:"monthlyCreditAmount,omitempty" xmlrpc:"monthlyCreditAmount,omitempty"` + + // no documentation yet + Representative *User_Employee `json:"representative,omitempty" xmlrpc:"representative,omitempty"` + + // no documentation yet + RepresentativeEmployeeId *int `json:"representativeEmployeeId,omitempty" xmlrpc:"representativeEmployeeId,omitempty"` +} + +// Contains user information for Catalyst self-enrollment. +type Catalyst_Enrollment_Request struct { + Entity + + // Applicant's address + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Additional field for extended address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + Affiliate *Catalyst_Affiliate `json:"affiliate,omitempty" xmlrpc:"affiliate,omitempty"` + + // Id of the affiliate who referred applicant's + AffiliateId *int `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // no documentation yet + AgreementCompleteFlag *bool `json:"agreementCompleteFlag,omitempty" xmlrpc:"agreementCompleteFlag,omitempty"` + + // Determines whether or not to also apply to the GEP program + ApplyToGepFlag *bool `json:"applyToGepFlag,omitempty" xmlrpc:"applyToGepFlag,omitempty"` + + // no documentation yet + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // no documentation yet + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // no documentation yet + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // no documentation yet + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // no documentation yet + CardVerificationNumber *string `json:"cardVerificationNumber,omitempty" xmlrpc:"cardVerificationNumber,omitempty"` + + // Applicant's city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Brief description of Startup's product and key differentiators + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // Name of the applicant's company + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + CompanyType *Catalyst_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // Id of the company type which best describes applicant's company + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // URL to the Startup's site + CompanyUrl *string `json:"companyUrl,omitempty" xmlrpc:"companyUrl,omitempty"` + + // Applicant's country code + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Index of answer chosen for how many current users question + CurrentUserChoice *int `json:"currentUserChoice,omitempty" xmlrpc:"currentUserChoice,omitempty"` + + // Id of the fingerprint + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // Applicant's email address + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Applicant's first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Index of answer chosen for how many future users question + FutureUserChoice *int `json:"futureUserChoice,omitempty" xmlrpc:"futureUserChoice,omitempty"` + + // Name of accelerator or incubator startup belongs to, if any + IncubatorName *string `json:"incubatorName,omitempty" xmlrpc:"incubatorName,omitempty"` + + // Name of the investor, if any + InvestorName *string `json:"investorName,omitempty" xmlrpc:"investorName,omitempty"` + + // Applicant's last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Applicant's primary phone number + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // Whether or not the startup has been operating for more than five years + OverFiveYearsOldFlag *bool `json:"overFiveYearsOldFlag,omitempty" xmlrpc:"overFiveYearsOldFlag,omitempty"` + + // Applicant's postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // IBM referral code, if any + ReferralCode *string `json:"referralCode,omitempty" xmlrpc:"referralCode,omitempty"` + + // Whether or not the startup has over one million in annual revenue + RevenueOverOneMillionFlag *bool `json:"revenueOverOneMillionFlag,omitempty" xmlrpc:"revenueOverOneMillionFlag,omitempty"` + + // Determines whether or not to apply to the Catalyst program + SkipCatalystApplicationFlag *bool `json:"skipCatalystApplicationFlag,omitempty" xmlrpc:"skipCatalystApplicationFlag,omitempty"` + + // Applicant's state/region code + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // Applicant's vatId, if one exists + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// no documentation yet +type Catalyst_Enrollment_Request_Container_AnswerOption struct { + Entity + + // no documentation yet + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // no documentation yet + Index *int `json:"index,omitempty" xmlrpc:"index,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go b/vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go new file mode 100644 index 000000000000..0efbbf9292e7 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go @@ -0,0 +1,35 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Compliance_Report_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go b/vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go new file mode 100644 index 000000000000..81777b526e79 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go @@ -0,0 +1,553 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Configuration_Storage_Filesystem_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Supported hardware raid modes +type Configuration_Storage_Group_Array_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + DriveMultiplier *int `json:"driveMultiplier,omitempty" xmlrpc:"driveMultiplier,omitempty"` + + // A count of + HardwareComponentModelCount *uint `json:"hardwareComponentModelCount,omitempty" xmlrpc:"hardwareComponentModelCount,omitempty"` + + // no documentation yet + HardwareComponentModels []Hardware_Component_Model `json:"hardwareComponentModels,omitempty" xmlrpc:"hardwareComponentModels,omitempty"` + + // no documentation yet + HotspareAllow *bool `json:"hotspareAllow,omitempty" xmlrpc:"hotspareAllow,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + MaximumDrives *int `json:"maximumDrives,omitempty" xmlrpc:"maximumDrives,omitempty"` + + // no documentation yet + MinimumDrives *int `json:"minimumDrives,omitempty" xmlrpc:"minimumDrives,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Single storage group(array) used for a hardware server order. +// +// If a raid configuration is required this object will describe a single array that will be configured on the server. If the server requires more than one array, a storage group will need to be created for each array. +type Configuration_Storage_Group_Order struct { + Entity + + // no documentation yet + ArrayNumber *int `json:"arrayNumber,omitempty" xmlrpc:"arrayNumber,omitempty"` + + // no documentation yet + ArraySize *Float64 `json:"arraySize,omitempty" xmlrpc:"arraySize,omitempty"` + + // Raid mode for the storage group. + ArrayType *Configuration_Storage_Group_Array_Type `json:"arrayType,omitempty" xmlrpc:"arrayType,omitempty"` + + // no documentation yet + ArrayTypeId *int `json:"arrayTypeId,omitempty" xmlrpc:"arrayTypeId,omitempty"` + + // The order item that relates to this storage group. + BillingOrderItem *Billing_Order_Item `json:"billingOrderItem,omitempty" xmlrpc:"billingOrderItem,omitempty"` + + // no documentation yet + BillingOrderItemId *int `json:"billingOrderItemId,omitempty" xmlrpc:"billingOrderItemId,omitempty"` + + // no documentation yet + Controller *int `json:"controller,omitempty" xmlrpc:"controller,omitempty"` + + // no documentation yet + HardDrives []int `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // no documentation yet + HotSpareDrives []int `json:"hotSpareDrives,omitempty" xmlrpc:"hotSpareDrives,omitempty"` + + // no documentation yet + LvmFlag *bool `json:"lvmFlag,omitempty" xmlrpc:"lvmFlag,omitempty"` + + // no documentation yet + PartitionData *string `json:"partitionData,omitempty" xmlrpc:"partitionData,omitempty"` +} + +// Single storage group(array) used in a storage group template. +// +// If a server configuration requires a raid configuration this object will describe a single array to be configured. +type Configuration_Storage_Group_Template_Group struct { + Entity + + // The disk controller for the array. + DiskControllerIndex *int `json:"diskControllerIndex,omitempty" xmlrpc:"diskControllerIndex,omitempty"` + + // Flag to use all available space. + Grow *bool `json:"grow,omitempty" xmlrpc:"grow,omitempty"` + + // Comma delimited integers of drive indexes for the array. This can also be the string 'all' to specify all drives in the server + HardDrivesString *string `json:"hardDrivesString,omitempty" xmlrpc:"hardDrivesString,omitempty"` + + // Comma delimited integers of drive indexes for hot spares on the array. + HotSpareDrivesString *string `json:"hotSpareDrivesString,omitempty" xmlrpc:"hotSpareDrivesString,omitempty"` + + // The order of the arrays in the template. + OrderIndex *int `json:"orderIndex,omitempty" xmlrpc:"orderIndex,omitempty"` + + // Size of array. Must be within limitations of the smallest drive and raid mode + Size *Float64 `json:"size,omitempty" xmlrpc:"size,omitempty"` + + // no documentation yet + Type *Configuration_Storage_Group_Array_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Configuration_Template data type contains general information of an arbitrary resource. +type Configuration_Template struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Internal identifier of a SoftLayer account that this configuration template belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ConfigurationSectionCount *uint `json:"configurationSectionCount,omitempty" xmlrpc:"configurationSectionCount,omitempty"` + + // no documentation yet + ConfigurationSections []Configuration_Template_Section `json:"configurationSections,omitempty" xmlrpc:"configurationSections,omitempty"` + + // no documentation yet + ConfigurationTemplateReference []Monitoring_Agent_Configuration_Template_Group_Reference `json:"configurationTemplateReference,omitempty" xmlrpc:"configurationTemplateReference,omitempty"` + + // A count of + ConfigurationTemplateReferenceCount *uint `json:"configurationTemplateReferenceCount,omitempty" xmlrpc:"configurationTemplateReferenceCount,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + DefaultValueCount *uint `json:"defaultValueCount,omitempty" xmlrpc:"defaultValueCount,omitempty"` + + // no documentation yet + DefaultValues []Configuration_Template_Section_Definition_Value `json:"defaultValues,omitempty" xmlrpc:"defaultValues,omitempty"` + + // A count of + DefinitionCount *uint `json:"definitionCount,omitempty" xmlrpc:"definitionCount,omitempty"` + + // no documentation yet + Definitions []Configuration_Template_Section_Definition `json:"definitions,omitempty" xmlrpc:"definitions,omitempty"` + + // Configuration template description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration template. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // Internal identifier of a product item that this configuration template is associated with + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // no documentation yet + LinkedSectionReferences *Configuration_Template_Section_Reference `json:"linkedSectionReferences,omitempty" xmlrpc:"linkedSectionReferences,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Configuration template name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Parent *Configuration_Template `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // Internal identifier of the parent configuration template + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // Internal identifier of a user that last modified this configuration template + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// Configuration template attribute class contains supplementary information for a configuration template. +type Configuration_Template_Attribute struct { + Entity + + // no documentation yet + ConfigurationTemplate *Configuration_Template `json:"configurationTemplate,omitempty" xmlrpc:"configurationTemplate,omitempty"` + + // Value of a configuration template attribute + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Configuration_Template_Section data type contains information of a configuration section. +// +// Configuration can contain sub-sections. +type Configuration_Template_Section struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + DefinitionCount *uint `json:"definitionCount,omitempty" xmlrpc:"definitionCount,omitempty"` + + // no documentation yet + Definitions []Configuration_Template_Section_Definition `json:"definitions,omitempty" xmlrpc:"definitions,omitempty"` + + // Configuration section description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + DisallowedDeletionFlag *bool `json:"disallowedDeletionFlag,omitempty" xmlrpc:"disallowedDeletionFlag,omitempty"` + + // Internal identifier of a configuration section. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LinkedTemplate *Configuration_Template `json:"linkedTemplate,omitempty" xmlrpc:"linkedTemplate,omitempty"` + + // Internal identifier of a sub configuration template that this section points to. Use this property if you wish to create a reference to a sub configuration template when creating a linked section. + LinkedTemplateId *string `json:"linkedTemplateId,omitempty" xmlrpc:"linkedTemplateId,omitempty"` + + // no documentation yet + LinkedTemplateReference *Configuration_Template_Section_Reference `json:"linkedTemplateReference,omitempty" xmlrpc:"linkedTemplateReference,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Configuration section name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Internal identifier of the parent configuration section + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // A count of + ProfileCount *uint `json:"profileCount,omitempty" xmlrpc:"profileCount,omitempty"` + + // no documentation yet + Profiles []Configuration_Template_Section_Profile `json:"profiles,omitempty" xmlrpc:"profiles,omitempty"` + + // no documentation yet + SectionType *Configuration_Template_Section_Type `json:"sectionType,omitempty" xmlrpc:"sectionType,omitempty"` + + // no documentation yet + SectionTypeName *string `json:"sectionTypeName,omitempty" xmlrpc:"sectionTypeName,omitempty"` + + // Sort order + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // A count of + SubSectionCount *uint `json:"subSectionCount,omitempty" xmlrpc:"subSectionCount,omitempty"` + + // no documentation yet + SubSections []Configuration_Template_Section `json:"subSections,omitempty" xmlrpc:"subSections,omitempty"` + + // no documentation yet + Template *Configuration_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // Internal identifier of a configuration template that this section belongs to + TemplateId *string `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` + + // Internal identifier of the configuration section type + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// Configuration section attribute class contains supplementary information for a configuration section. +type Configuration_Template_Section_Attribute struct { + Entity + + // no documentation yet + ConfigurationSection *Configuration_Template_Section `json:"configurationSection,omitempty" xmlrpc:"configurationSection,omitempty"` + + // Value of a configuration section attribute + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Configuration definition gives you details of the value that you're setting. +// +// Some monitoring agents requires values unique to your system. If value type is defined as "Resource Specific Values", you will have to make an additional API call to retrieve your system specific values. +// +// See [[SoftLayer_Monitoring_Agent::getAvailableConfigurationValues|Monitoring Agent]] service to retrieve your system specific values. +type Configuration_Template_Section_Definition struct { + Entity + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Configuration_Template_Section_Definition_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultValue *Configuration_Template_Section_Definition_Value `json:"defaultValue,omitempty" xmlrpc:"defaultValue,omitempty"` + + // Description of a configuration definition. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Enumeration values separated by comma. + EnumerationValues *string `json:"enumerationValues,omitempty" xmlrpc:"enumerationValues,omitempty"` + + // no documentation yet + Group *Configuration_Template_Section_Definition_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // Definition group id. + GroupId *string `json:"groupId,omitempty" xmlrpc:"groupId,omitempty"` + + // Internal identifier of a configuration definition. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Maximum value of a configuration definition. + MaximumValue *string `json:"maximumValue,omitempty" xmlrpc:"maximumValue,omitempty"` + + // Minimum value of a configuration definition. + MinimumValue *string `json:"minimumValue,omitempty" xmlrpc:"minimumValue,omitempty"` + + // Last modify date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + MonitoringDataFlag *bool `json:"monitoringDataFlag,omitempty" xmlrpc:"monitoringDataFlag,omitempty"` + + // Configuration definition name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Definition path. + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // Indicates if a configuration value is required for this definition. + RequireValueFlag *int `json:"requireValueFlag,omitempty" xmlrpc:"requireValueFlag,omitempty"` + + // no documentation yet + Section *Configuration_Template_Section `json:"section,omitempty" xmlrpc:"section,omitempty"` + + // Internal identifier of a configuration section. + SectionId *int `json:"sectionId,omitempty" xmlrpc:"sectionId,omitempty"` + + // Shortened configuration definition name. + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` + + // Sort order + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // Internal identifier of a configuration definition type. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + ValueType *Configuration_Template_Section_Definition_Type `json:"valueType,omitempty" xmlrpc:"valueType,omitempty"` +} + +// Configuration definition attribute class contains supplementary information for a configuration definition. +type Configuration_Template_Section_Definition_Attribute struct { + Entity + + // no documentation yet + AttributeType *Configuration_Template_Section_Definition_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // no documentation yet + ConfigurationDefinition *Configuration_Template_Section_Definition `json:"configurationDefinition,omitempty" xmlrpc:"configurationDefinition,omitempty"` + + // Value of a configuration definition attribute + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Configuration_Template_Attribute_Type models the type of attribute that can be assigned to a configuration definition. +type Configuration_Template_Section_Definition_Attribute_Type struct { + Entity + + // Description of a definition attribute type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Name of a definition attribute type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Configuration definition group gives you details of the definition and allows extra functionality. +// +// +type Configuration_Template_Section_Definition_Group struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Internal Description of a definition group. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a definition group. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Internal Definition group name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Parent *Configuration_Template_Section_Definition_Group `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // Sort order + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// SoftLayer_Configuration_Template_Section_Definition_Type further defines the value of a configuration definition. +type Configuration_Template_Section_Definition_Type struct { + Entity + + // Description of a configuration value type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration value type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a configuration value type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Configuration_Section_Value is used to set the value for a configuration definition +type Configuration_Template_Section_Definition_Value struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Definition *Configuration_Template_Section_Definition `json:"definition,omitempty" xmlrpc:"definition,omitempty"` + + // Internal identifier of a configuration definition that this configuration value if defined by + DefinitionId *int `json:"definitionId,omitempty" xmlrpc:"definitionId,omitempty"` + + // Internal Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Template *Configuration_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // Internal identifier of a configuration template that this configuration value belongs to + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` + + // Internal Configuration value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Some configuration templates let you create a unique configuration profiles. +// +// For example, you can create multiple configuration profiles to monitor multiple hard drives with "CPU/Memory/Disk Monitoring Agent". SoftLayer_Configuration_Template_Section_Profile help you keep track of custom configuration profiles. +type Configuration_Template_Section_Profile struct { + Entity + + // Internal identifier of a monitoring agent this profile belongs to. + AgentId *int `json:"agentId,omitempty" xmlrpc:"agentId,omitempty"` + + // no documentation yet + ConfigurationSection *Configuration_Template_Section `json:"configurationSection,omitempty" xmlrpc:"configurationSection,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Internal identifier of a configuration profile. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + MonitoringAgent *Monitoring_Agent `json:"monitoringAgent,omitempty" xmlrpc:"monitoringAgent,omitempty"` + + // Name of a configuration profile + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Internal identifier of a configuration section that this profile belongs to. + SectionId *int `json:"sectionId,omitempty" xmlrpc:"sectionId,omitempty"` +} + +// The SoftLayer_Configuration_Template_Section_Reference data type contains information of a configuration section and its associated configuration template. +type Configuration_Template_Section_Reference struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Internal identifier of a configuration section reference. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Section *Configuration_Template_Section `json:"section,omitempty" xmlrpc:"section,omitempty"` + + // Internal identifier of a configuration section. + SectionId *int `json:"sectionId,omitempty" xmlrpc:"sectionId,omitempty"` + + // no documentation yet + Template *Configuration_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // Internal identifier of a configuration template. + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` +} + +// The SoftLayer_Configuration_Template_Section_Type data type contains information of a configuration section type. +// +// Configuration can contain sub-sections. +type Configuration_Template_Section_Type struct { + Entity + + // Configuration section type description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration section type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Configuration section type name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Configuration_Template_Type data type contains configuration template type information. +type Configuration_Template_Type struct { + Entity + + // Created date. This is deprecated now. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Description of a configuration template + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration template type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a configuration template type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/container.go b/vendor/github.com/softlayer/softlayer-go/datatypes/container.go new file mode 100644 index 000000000000..8d42b98df0b4 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/container.go @@ -0,0 +1,5826 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Container_Account_Authentication_OpenIdConnect_UsernameLookupContainer struct { + Entity + + // no documentation yet + Active *bool `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // no documentation yet + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // no documentation yet + Federated *bool `json:"federated,omitempty" xmlrpc:"federated,omitempty"` + + // no documentation yet + FoundAs *string `json:"foundAs,omitempty" xmlrpc:"foundAs,omitempty"` + + // no documentation yet + NumberOfIbmIdsWithEmailAddress *int `json:"numberOfIbmIdsWithEmailAddress,omitempty" xmlrpc:"numberOfIbmIdsWithEmailAddress,omitempty"` + + // no documentation yet + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` + + // no documentation yet + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// SoftLayer_Container_Account_Discount_Program models a single outbound object for a graph of given data sets. +type Container_Account_Discount_Program struct { + Entity + + // The credit allowance that has already been applied during the current billing cycle. If the lifetime limit has been or soon will be reached, this amount may included credit applied in previous billing cycles. + AppliedCredit *Float64 `json:"appliedCredit,omitempty" xmlrpc:"appliedCredit,omitempty"` + + // Flag to signify whether the account is a participant in the discount program. + IsParticipant *bool `json:"isParticipant,omitempty" xmlrpc:"isParticipant,omitempty"` + + // Credit allowance applied over the course of the entire program enrollment. For enrollments without a lifetime restriction, this property will not be populated as credit will be tracked on a purely monthly basis. + LifetimeAppliedCredit *Float64 `json:"lifetimeAppliedCredit,omitempty" xmlrpc:"lifetimeAppliedCredit,omitempty"` + + // Credit allowance available over the course of the entire program enrollment. If null, enrollment credit is applied on a strictly monthly basis and there is no lifetime maximum. Enrollments with non-null lifetime credit will receive the lesser of the remaining monthly credit or the remaining lifetime credit. + LifetimeCredit *Float64 `json:"lifetimeCredit,omitempty" xmlrpc:"lifetimeCredit,omitempty"` + + // Remaining credit allowance available over the remaining duration of the program enrollment. If null, enrollment credit is applied on a strictly monthly basis and there is no lifetime maximum. Enrollments with non-null remaining lifetime credit will receive the lesser of the remaining monthly credit or the remaining lifetime credit. + LifetimeRemainingCredit *Float64 `json:"lifetimeRemainingCredit,omitempty" xmlrpc:"lifetimeRemainingCredit,omitempty"` + + // Maximum number of orders the enrolled account is allowed to have open at one time. If null, then the Flexible Credit Program does not impose an order limit. + MaximumActiveOrders *Float64 `json:"maximumActiveOrders,omitempty" xmlrpc:"maximumActiveOrders,omitempty"` + + // The monthly credit allowance that is available at the beginning of the billing cycle. + MonthlyCredit *Float64 `json:"monthlyCredit,omitempty" xmlrpc:"monthlyCredit,omitempty"` + + // DEPRECATED: Taxes are calculated in real time and discount amounts are shown pre-tax in all cases. Tax values in the SoftLayer_Container_Account_Discount_Program container are now populated with the related pre-tax values. + PostTaxRemainingCredit *Float64 `json:"postTaxRemainingCredit,omitempty" xmlrpc:"postTaxRemainingCredit,omitempty"` + + // The date at which the program expires in MM/DD/YYYY format. + ProgramEndDate *Time `json:"programEndDate,omitempty" xmlrpc:"programEndDate,omitempty"` + + // Name of the Flexible Credit Program the account is enrolled in. + ProgramName *string `json:"programName,omitempty" xmlrpc:"programName,omitempty"` + + // The credit allowance that is available during the current billing cycle. If the lifetime limit has been or soon will be reached, this amount may be reduced by credit applied in previous billing cycles. + RemainingCredit *Float64 `json:"remainingCredit,omitempty" xmlrpc:"remainingCredit,omitempty"` + + // DEPRECATED: Taxes are calculated in real time and discount amounts are shown pre-tax in all cases. Tax values in the SoftLayer_Container_Account_Discount_Program container are now populated with the related pre-tax values. + RemainingCreditTax *Float64 `json:"remainingCreditTax,omitempty" xmlrpc:"remainingCreditTax,omitempty"` +} + +// no documentation yet +type Container_Account_External_Setup_ProvisioningHoldLifted struct { + Entity + + // no documentation yet + AdditionalAttributes *Container_Account_External_Setup_ProvisioningHoldLifted_Attributes `json:"additionalAttributes,omitempty" xmlrpc:"additionalAttributes,omitempty"` + + // no documentation yet + Code *string `json:"code,omitempty" xmlrpc:"code,omitempty"` + + // no documentation yet + Error *string `json:"error,omitempty" xmlrpc:"error,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// no documentation yet +type Container_Account_External_Setup_ProvisioningHoldLifted_Attributes struct { + Entity + + // no documentation yet + BrandKeyName *string `json:"brandKeyName,omitempty" xmlrpc:"brandKeyName,omitempty"` + + // no documentation yet + SoftLayerBrandMoveDate *Time `json:"softLayerBrandMoveDate,omitempty" xmlrpc:"softLayerBrandMoveDate,omitempty"` +} + +// SoftLayer_Container_Account_Graph_Outputs <<< EOT +type Container_Account_Graph_Outputs struct { + Entity + + // The count of closed tickets included in this graph. + ClosedTickets *string `json:"closedTickets,omitempty" xmlrpc:"closedTickets,omitempty"` + + // The count of completed backups included in this graph. + CompletedBackupCount *string `json:"completedBackupCount,omitempty" xmlrpc:"completedBackupCount,omitempty"` + + // The count of conflicted backups included in this graph. + ConflictBackupCount *string `json:"conflictBackupCount,omitempty" xmlrpc:"conflictBackupCount,omitempty"` + + // The maximum date included in this graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The count of failed backups included in this graph. + FailedBackupCount *string `json:"failedBackupCount,omitempty" xmlrpc:"failedBackupCount,omitempty"` + + // Error message encountered during graphing + GraphError *string `json:"graphError,omitempty" xmlrpc:"graphError,omitempty"` + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The average of hardware uptime included in this graph. + HardwareUptime *string `json:"hardwareUptime,omitempty" xmlrpc:"hardwareUptime,omitempty"` + + // The inbound bandwidth usage shown in this graph. + InboundUsage *string `json:"inboundUsage,omitempty" xmlrpc:"inboundUsage,omitempty"` + + // The count of open tickets included in this graph. + OpenTickets *string `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // The outbound bandwidth usage shown in this graph. + OutboundUsage *string `json:"outboundUsage,omitempty" xmlrpc:"outboundUsage,omitempty"` + + // The count of tickets included in this graph. + PendingCustomerResponseTicketCount *string `json:"pendingCustomerResponseTicketCount,omitempty" xmlrpc:"pendingCustomerResponseTicketCount,omitempty"` + + // The minimum date included in this graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The average of url uptime included in this graph. + UrlUptime *string `json:"urlUptime,omitempty" xmlrpc:"urlUptime,omitempty"` + + // The count of tickets included in this graph. + WaitingEmployeeResponseTicketCount *string `json:"waitingEmployeeResponseTicketCount,omitempty" xmlrpc:"waitingEmployeeResponseTicketCount,omitempty"` +} + +// Historical Summary Container for account resource details +type Container_Account_Historical_Summary struct { + Entity + + // Array of server uptime detail containers + Details []Container_Account_Historical_Summary_Detail `json:"details,omitempty" xmlrpc:"details,omitempty"` + + // The maximum date included in the summary. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The minimum date included in the summary. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// Historical Summary Details Container for a resource's data +type Container_Account_Historical_Summary_Detail struct { + Entity + + // The maximum date included in the detail. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The minimum date included in the detail. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// Historical Summary Details Container for a host resource uptime +type Container_Account_Historical_Summary_Detail_Uptime struct { + Container_Account_Historical_Summary_Detail + + // The hardware for uptime details. + CloudComputingInstance *Virtual_Guest `json:"cloudComputingInstance,omitempty" xmlrpc:"cloudComputingInstance,omitempty"` + + // The configuration value for the detail's resource. + ConfigurationValue *Monitoring_Agent_Configuration_Value `json:"configurationValue,omitempty" xmlrpc:"configurationValue,omitempty"` + + // The data associated with a host uptime details. + Data []Metric_Tracking_Object_Data `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // The hardware for uptime details. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` +} + +// Historical Summary Container for account host's resource uptime details +type Container_Account_Historical_Summary_Uptime struct { + Container_Account_Historical_Summary +} + +// Contains data required to both request a new IaaS account for active IBM employees and review pending requests. Fields used exclusively in the review process are scrubbed of user input. +type Container_Account_Internal_Ibm_Request struct { + Entity + + // Purpose of the internal IBM account chosen from the list of available + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // If not provided, will attempt to retrieve from BluePages + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // If no address provided, will attempt to retrieve from BluePages + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // If not provided, will attempt to retrieve from BluePages + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Name of the company displayed on the IaaS account + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // If not provided, will attempt to retrieve from BluePages + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // True if the request has been denied by either the IaaS team or the + DeniedFlag *bool `json:"deniedFlag,omitempty" xmlrpc:"deniedFlag,omitempty"` + + // Department within the division which will be changed during cost recovery. + DepartmentCode *string `json:"departmentCode,omitempty" xmlrpc:"departmentCode,omitempty"` + + // Country assigned to the department for cost recovery. + DepartmentCountry *string `json:"departmentCountry,omitempty" xmlrpc:"departmentCountry,omitempty"` + + // Division code used for cost recovery. + DivisionCode *string `json:"divisionCode,omitempty" xmlrpc:"divisionCode,omitempty"` + + // Account owner's IBM email address. Must be a discoverable email + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // Applicant's first name, as provided by IBM BluePages API. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Applicant's last name, as provided by IBM BluePages API. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // APPROVED if the request has been approved by the first-line manager, + ManagerApprovalStatus *string `json:"managerApprovalStatus,omitempty" xmlrpc:"managerApprovalStatus,omitempty"` + + // True for accounts intended to be multi-tenant and false otherwise + MultiTenantFlag *bool `json:"multiTenantFlag,omitempty" xmlrpc:"multiTenantFlag,omitempty"` + + // Account owner's primary phone number. If no phone number is available + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // Bluemix PaaS 32 digit hexadecimal account id being automatically linked + PaasAccountId *string `json:"paasAccountId,omitempty" xmlrpc:"paasAccountId,omitempty"` + + // If not provided, will attempt to retrieve from BluePages + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Stated purpose of the new account this request would create + Purpose *string `json:"purpose,omitempty" xmlrpc:"purpose,omitempty"` + + // Division's security SME's email address, if available + SecuritySubjectMatterExpertEmail *string `json:"securitySubjectMatterExpertEmail,omitempty" xmlrpc:"securitySubjectMatterExpertEmail,omitempty"` + + // Division's security SME's name, if available + SecuritySubjectMatterExpertName *string `json:"securitySubjectMatterExpertName,omitempty" xmlrpc:"securitySubjectMatterExpertName,omitempty"` + + // Division's security SME's phone, if available + SecuritySubjectMatterExpertPhone *string `json:"securitySubjectMatterExpertPhone,omitempty" xmlrpc:"securitySubjectMatterExpertPhone,omitempty"` + + // If required for chosen country and not provided, will attempt + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// no documentation yet +type Container_Account_Payment_Method_CreditCard struct { + Entity + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // no documentation yet + CybersourceAssignedCardType *string `json:"cybersourceAssignedCardType,omitempty" xmlrpc:"cybersourceAssignedCardType,omitempty"` + + // no documentation yet + ExpireMonth *string `json:"expireMonth,omitempty" xmlrpc:"expireMonth,omitempty"` + + // no documentation yet + ExpireYear *string `json:"expireYear,omitempty" xmlrpc:"expireYear,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastFourDigits *string `json:"lastFourDigits,omitempty" xmlrpc:"lastFourDigits,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + Nickname *string `json:"nickname,omitempty" xmlrpc:"nickname,omitempty"` + + // no documentation yet + PaymentMethodRoleName *string `json:"paymentMethodRoleName,omitempty" xmlrpc:"paymentMethodRoleName,omitempty"` + + // no documentation yet + PaymentTypeId *string `json:"paymentTypeId,omitempty" xmlrpc:"paymentTypeId,omitempty"` + + // no documentation yet + PaymentTypeName *string `json:"paymentTypeName,omitempty" xmlrpc:"paymentTypeName,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// no documentation yet +type Container_Account_PersonalInformation struct { + Entity + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + RequestDate *Time `json:"requestDate,omitempty" xmlrpc:"requestDate,omitempty"` + + // no documentation yet + RequestId *int `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// The customer and prospective owner of a proof of concept account established by an IBMer. +type Container_Account_ProofOfConcept_Contact_Customer struct { + Entity + + // Customer's address + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Customer's address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // Customer's city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Customer's ISO country code + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Customer's email address + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Customer's first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Customer's last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Customer's primary phone number + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // Customer's postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Customer's state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// IBMer who is submitting a proof of concept request on behalf of a prospective customer. +type Container_Account_ProofOfConcept_Contact_Ibmer_Requester struct { + Entity + + // Customer's address + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Customer's address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + BusinessUnit *string `json:"businessUnit,omitempty" xmlrpc:"businessUnit,omitempty"` + + // Customer's city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Customer's ISO country code + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Customer's email address + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Customer's first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Customer's last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + OrganizationCountry *string `json:"organizationCountry,omitempty" xmlrpc:"organizationCountry,omitempty"` + + // no documentation yet + PaasAccountId *string `json:"paasAccountId,omitempty" xmlrpc:"paasAccountId,omitempty"` + + // Customer's primary phone number + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // Customer's postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Customer's state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + SubOrganization *string `json:"subOrganization,omitempty" xmlrpc:"subOrganization,omitempty"` + + // no documentation yet + Uid *string `json:"uid,omitempty" xmlrpc:"uid,omitempty"` +} + +// IBMer who will assist the requester with technical aspects of configuring the proof of concept account. +type Container_Account_ProofOfConcept_Contact_Ibmer_Technical struct { + Entity + + // Customer's address + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Customer's address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // Customer's city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Customer's ISO country code + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Customer's email address + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Customer's first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Customer's last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Customer's primary phone number + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // Customer's postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Customer's state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Uid *string `json:"uid,omitempty" xmlrpc:"uid,omitempty"` +} + +// Proof of concept request using the account team funding model. Note that proof of concept account request are available only to internal IBM employees. +type Container_Account_ProofOfConcept_Request_AccountFunded struct { + Container_Account_ProofOfConcept_Request_GlobalFunded + + // Billing codes for the department paying for the proof of concept account + CostRecoveryRequest *Container_Account_ProofOfConcept_Request_CostRecovery `json:"costRecoveryRequest,omitempty" xmlrpc:"costRecoveryRequest,omitempty"` +} + +// Funding codes for the department paying for the proof of concept account. +type Container_Account_ProofOfConcept_Request_CostRecovery struct { + Entity + + // Internal billing system country code + CountryCode *string `json:"countryCode,omitempty" xmlrpc:"countryCode,omitempty"` + + // Customer's Internal billing system department code + DepartmentCode *string `json:"departmentCode,omitempty" xmlrpc:"departmentCode,omitempty"` + + // Internal billing system division code + DivisionCode *string `json:"divisionCode,omitempty" xmlrpc:"divisionCode,omitempty"` +} + +// Proof of concept request using the global funding model. Note that proof of concept account request are available only to internal IBM employees. +type Container_Account_ProofOfConcept_Request_GlobalFunded struct { + Entity + + // Dollar amount of funding requested for the proof of concept period + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // Customer intended to take over ownership and and billing of the account + Customer *Container_Account_ProofOfConcept_Contact_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // Explanation of the purpose of the proof of concept request + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // End date for the proof of concept period + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // Internal opportunity system details + Opportunity *Container_Account_ProofOfConcept_Request_Opportunity `json:"opportunity,omitempty" xmlrpc:"opportunity,omitempty"` + + // Name of the project or company and will become the account companyName + ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"` + + // IBM region responsible for overseeing the proof of concept account + RegionKeyName *string `json:"regionKeyName,omitempty" xmlrpc:"regionKeyName,omitempty"` + + // IBMer requesting the proof of concept account + Requester *Container_Account_ProofOfConcept_Contact_Ibmer_Requester `json:"requester,omitempty" xmlrpc:"requester,omitempty"` + + // Start date for the proof of concept period + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // IBMer assisting with technical aspects of account configuration + TechnicalContact *Container_Account_ProofOfConcept_Contact_Ibmer_Technical `json:"technicalContact,omitempty" xmlrpc:"technicalContact,omitempty"` +} + +// Internal IBM opportunity codes required when applying for a proof of concept account. +type Container_Account_ProofOfConcept_Request_Opportunity struct { + Entity + + // Expected monthly revenue + MonthlyRecurringRevenue *Float64 `json:"monthlyRecurringRevenue,omitempty" xmlrpc:"monthlyRecurringRevenue,omitempty"` + + // Internal system identifier + OpportunityNumber *string `json:"opportunityNumber,omitempty" xmlrpc:"opportunityNumber,omitempty"` + + // Expected overall contract value + TotalContractValue *Float64 `json:"totalContractValue,omitempty" xmlrpc:"totalContractValue,omitempty"` +} + +// Full details presented to reviewers when determining whether or not to accept a proof of concept request. Note that reviewers are internal IBM employees and reviews are not exposed to external users. +type Container_Account_ProofOfConcept_Review struct { + Entity + + // Type of brand the account will use + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // Internal billing codes + CostRecoveryCodes *Container_Account_ProofOfConcept_Request_CostRecovery `json:"costRecoveryCodes,omitempty" xmlrpc:"costRecoveryCodes,omitempty"` + + // Customer intended to take over billing after the proof of concept period + Customer *Container_Account_ProofOfConcept_Contact_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // Describes the purpose and rationale of the request + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Expected end date of the proof of concept period + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // Dollar amount of funding requested + FundingAmount *Float64 `json:"fundingAmount,omitempty" xmlrpc:"fundingAmount,omitempty"` + + // Funding option chosen for the request + FundingType *string `json:"fundingType,omitempty" xmlrpc:"fundingType,omitempty"` + + // System id of the request + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of the integrated offering team lead reviewing the request + IotLeadName *string `json:"iotLeadName,omitempty" xmlrpc:"iotLeadName,omitempty"` + + // Name of the integrated offering team region + IotRegionName *string `json:"iotRegionName,omitempty" xmlrpc:"iotRegionName,omitempty"` + + // Name of requesting IBMer's manager + ManagerName *string `json:"managerName,omitempty" xmlrpc:"managerName,omitempty"` + + // Internal opportunity tracking information + Opportunity *Container_Account_ProofOfConcept_Request_Opportunity `json:"opportunity,omitempty" xmlrpc:"opportunity,omitempty"` + + // Project name chosen by the requesting IBMer + ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"` + + // IBMer requesting the account on behalf of a customer + Requester *Container_Account_ProofOfConcept_Contact_Ibmer_Requester `json:"requester,omitempty" xmlrpc:"requester,omitempty"` + + // Expected start date of the proof of concept period + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // Additional IBMer responsible for configuring the cloud capabilities + TechnicalContact *Container_Account_ProofOfConcept_Contact_Ibmer_Technical `json:"technicalContact,omitempty" xmlrpc:"technicalContact,omitempty"` +} + +// Summary presented to reviewers when determining whether or not to accept a proof of concept request. Note that reviewers are internal IBM employees and reviews are not exposed to external users. +type Container_Account_ProofOfConcept_Review_Summary struct { + Entity + + // Account's companyName + AccountName *string `json:"accountName,omitempty" xmlrpc:"accountName,omitempty"` + + // Current account owner + AccountOwnerName *string `json:"accountOwnerName,omitempty" xmlrpc:"accountOwnerName,omitempty"` + + // Dollar amount requested + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // Date the request was submitted + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Email of the customer receiving the proof of concept account + CustomerEmail *string `json:"customerEmail,omitempty" xmlrpc:"customerEmail,omitempty"` + + // Name of the customer receiving the proof of concept account + CustomerName *string `json:"customerName,omitempty" xmlrpc:"customerName,omitempty"` + + // Request record's id + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Date of the last state change on the request + LastUpdate *Time `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` + + // Email address of the reviewer, if any, currently reviewing the request + NextApproverEmail *string `json:"nextApproverEmail,omitempty" xmlrpc:"nextApproverEmail,omitempty"` + + // Email address of the requester + RequesterEmail *string `json:"requesterEmail,omitempty" xmlrpc:"requesterEmail,omitempty"` + + // Requesting IBMer's full name + RequesterName *string `json:"requesterName,omitempty" xmlrpc:"requesterName,omitempty"` + + // Request's current status (Pending, Denied, or Approved) + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Common data type contains common information for requests to the getPortalLogin API. This is an abstract class that serves as a base that more specialized classes will derive from. For example, a request class specific to SoftLayer Native IMS Login (username and password). +type Container_Authentication_Request_Common struct { + Container_Authentication_Request_Contract + + // The answer to your security question. + SecurityQuestionAnswer *string `json:"securityQuestionAnswer,omitempty" xmlrpc:"securityQuestionAnswer,omitempty"` + + // A security question you wish to answer when authenticating to the SoftLayer customer portal. This parameter isn't required if no security questions are set on your portal account or if your account is configured to not require answering a security account upon login. + SecurityQuestionId *int `json:"securityQuestionId,omitempty" xmlrpc:"securityQuestionId,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Contract provides a common set of operations for implementing classes. +type Container_Authentication_Request_Contract struct { + Entity +} + +// The SoftLayer_Container_Authentication_Request_Native data type contains information for requests to the getPortalLogin API. This class is specific to the SoftLayer Native login (username/password). The request information will be verified to ensure it is valid, and then there will be an attempt to obtain a portal login token in authenticating the user with the provided information. +type Container_Authentication_Request_Native struct { + Container_Authentication_Request_Common + + // Your SoftLayer customer portal user's portal password. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The username you wish to authenticate to the SoftLayer customer portal with. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Native_External data type contains information for requests to the getPortalLogin API. This class serves as a base class for more specialized external authentication classes to the SoftLayer Native login (username/password). +type Container_Authentication_Request_Native_External struct { + Container_Authentication_Request_Native +} + +// The SoftLayer_Container_Authentication_Request_Native_External_Totp data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the native SoftLayer (username/password) login service for a portal login token, as well as submitting a request to the TOTP 2 factor authentication service. +type Container_Authentication_Request_Native_External_Totp struct { + Container_Authentication_Request_Native_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Native_External_Verisign data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the native SoftLayer (username/password) login service for a portal login token, as well as submitting a request to the Verisign 2 factor authentication service. +type Container_Authentication_Request_Native_External_Verisign struct { + Container_Authentication_Request_Native_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect data type contains information for requests to the getPortalLogin API. This class is specific to the SoftLayer Cloud Token login. The request information will be verified to ensure it is valid, and then there will be an attempt to obtain a portal login token in authenticating the user with the provided information. +type Container_Authentication_Request_OpenIdConnect struct { + Container_Authentication_Request_Common + + // no documentation yet + OpenIdConnectAccessToken *string `json:"openIdConnectAccessToken,omitempty" xmlrpc:"openIdConnectAccessToken,omitempty"` + + // no documentation yet + OpenIdConnectAccountId *int `json:"openIdConnectAccountId,omitempty" xmlrpc:"openIdConnectAccountId,omitempty"` + + // no documentation yet + OpenIdConnectProvider *string `json:"openIdConnectProvider,omitempty" xmlrpc:"openIdConnectProvider,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect_External data type contains information for requests to the getPortalLogin API. This class serves as a base class for more specialized external authentication classes to the SoftLayer OpenIdConnect login service. +type Container_Authentication_Request_OpenIdConnect_External struct { + Container_Authentication_Request_OpenIdConnect +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect_External_Totp data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the SoftLayer OpenIdConnect (token) login service for a portal login token, as well as submitting a request to the TOTP 2 factor authentication service. +type Container_Authentication_Request_OpenIdConnect_External_Totp struct { + Container_Authentication_Request_OpenIdConnect_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect_External_Verisign data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the SoftLayer OpenIdConnect (token) login service for a portal login token, as well as submitting a request to the Verisign 2 factor authentication service. +type Container_Authentication_Request_OpenIdConnect_External_Verisign struct { + Container_Authentication_Request_OpenIdConnect_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *int `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_2FactorAuthenticationNeeded data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request that is missing the appropriate 2FA information. +type Container_Authentication_Response_2FactorAuthenticationNeeded struct { + Container_Authentication_Response_Common + + // no documentation yet + AdditionalData *Container_Authentication_Response_Common `json:"additionalData,omitempty" xmlrpc:"additionalData,omitempty"` + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_Account data type contains account information for responses from the getPortalLogin API. +type Container_Authentication_Response_Account struct { + Entity + + // no documentation yet + AccountCompanyName *string `json:"accountCompanyName,omitempty" xmlrpc:"accountCompanyName,omitempty"` + + // no documentation yet + AccountCountry *string `json:"accountCountry,omitempty" xmlrpc:"accountCountry,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AccountStatusName *string `json:"accountStatusName,omitempty" xmlrpc:"accountStatusName,omitempty"` + + // no documentation yet + BluemixAccountId *string `json:"bluemixAccountId,omitempty" xmlrpc:"bluemixAccountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultAccount *bool `json:"defaultAccount,omitempty" xmlrpc:"defaultAccount,omitempty"` + + // no documentation yet + IsMasterUserFlag *bool `json:"isMasterUserFlag,omitempty" xmlrpc:"isMasterUserFlag,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + PhoneFactorExternalAuthenticationRequired *bool `json:"phoneFactorExternalAuthenticationRequired,omitempty" xmlrpc:"phoneFactorExternalAuthenticationRequired,omitempty"` + + // no documentation yet + SecurityQuestionRequired *bool `json:"securityQuestionRequired,omitempty" xmlrpc:"securityQuestionRequired,omitempty"` + + // no documentation yet + TotpExternalAuthenticationRequired *bool `json:"totpExternalAuthenticationRequired,omitempty" xmlrpc:"totpExternalAuthenticationRequired,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // no documentation yet + VerisignExternalAuthenticationRequired *bool `json:"verisignExternalAuthenticationRequired,omitempty" xmlrpc:"verisignExternalAuthenticationRequired,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_AccountIdMissing data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request that is missing the account id. +type Container_Authentication_Response_AccountIdMissing struct { + Container_Authentication_Response_Common + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_Common data type contains common information for responses from the getPortalLogin API. This is an abstract class that serves as a base that more specialized classes will derive from. For example, a response class that is specific to a successful response from the getPortalLogin API. +type Container_Authentication_Response_Common struct { + Entity + + // The list of linked accounts for the authenticated SoftLayer customer portal user. + Accounts []Container_Authentication_Response_Account `json:"accounts,omitempty" xmlrpc:"accounts,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_LOGIN_FAILED data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request where there was an inability to login based on the information that was provided. +type Container_Authentication_Response_LoginFailed struct { + Container_Authentication_Response_Common + + // no documentation yet + ErrorMessage *string `json:"errorMessage,omitempty" xmlrpc:"errorMessage,omitempty"` + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_SUCCESS data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request that was successful in obtaining a portal login token from the getPortalLogin API. +type Container_Authentication_Response_Success struct { + Container_Authentication_Response_Common + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` + + // The token for interacting with the SoftLayer customer portal. + Token *Container_User_Authentication_Token `json:"token,omitempty" xmlrpc:"token,omitempty"` +} + +// The SoftLayer_Container_Auxiliary_Network_Status_Reading data type contains information relating to an object being monitored from outside the SoftLayer network. It is primarily used to check the status of our edge routers from multiple locations around the world. +type Container_Auxiliary_Network_Status_Reading struct { + Entity + + // Average packet round-trip time. + AveragePing *Float64 `json:"averagePing,omitempty" xmlrpc:"averagePing,omitempty"` + + // Number of failures since the target was last detected to be working properly. + Fails *int `json:"fails,omitempty" xmlrpc:"fails,omitempty"` + + // Monitoring frequency in minutes. + Frequency *int `json:"frequency,omitempty" xmlrpc:"frequency,omitempty"` + + // The target babel. + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` + + // Last check date and time. + LastCheckDate *Time `json:"lastCheckDate,omitempty" xmlrpc:"lastCheckDate,omitempty"` + + // Date and time of the last problem detected. + LastDownDate *Time `json:"lastDownDate,omitempty" xmlrpc:"lastDownDate,omitempty"` + + // The total response time in seconds calculated during the last check. + Latency *Float64 `json:"latency,omitempty" xmlrpc:"latency,omitempty"` + + // The monitoring location name. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // Maximum packet round-trip time. + MaximumPing *Float64 `json:"maximumPing,omitempty" xmlrpc:"maximumPing,omitempty"` + + // Minimum packet round-trip time. + MinimumPing *Float64 `json:"minimumPing,omitempty" xmlrpc:"minimumPing,omitempty"` + + // Packet loss percentage. + PingLoss *Float64 `json:"pingLoss,omitempty" xmlrpc:"pingLoss,omitempty"` + + // The date monitoring first began + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // Status Code - one of UP, Down, Test pending. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // The status message from the last effective check. + StatusMessage *string `json:"statusMessage,omitempty" xmlrpc:"statusMessage,omitempty"` + + // The target object. + Target *string `json:"target,omitempty" xmlrpc:"target,omitempty"` + + // A letter indicating the target type. + TargetType *string `json:"targetType,omitempty" xmlrpc:"targetType,omitempty"` +} + +// SoftLayer_Container_Bandwidth_GraphInputs models a single inbound object for a given bandwidth graph. +type Container_Bandwidth_GraphInputs struct { + Entity + + // This is a unix timestamp that represents the stop date/time for a graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The front-end or back-end network uplink interface associated with this server. + NetworkInterfaceId *int `json:"networkInterfaceId,omitempty" xmlrpc:"networkInterfaceId,omitempty"` + + // * + Pod *int `json:"pod,omitempty" xmlrpc:"pod,omitempty"` + + // This is a human readable name for the server or rack being graphed. + ServerName *string `json:"serverName,omitempty" xmlrpc:"serverName,omitempty"` + + // This is a unix timestamp that represents the begin date/time for a graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// SoftLayer_Container_Bandwidth_GraphOutputs models a single outbound object for a given bandwidth graph. +type Container_Bandwidth_GraphOutputs struct { + Entity + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The title that ended up being displayed as part of the graph image. + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` + + // The maximum date included in this graph. + MaxEndDate *Time `json:"maxEndDate,omitempty" xmlrpc:"maxEndDate,omitempty"` + + // The minimum date included in this graph. + MinStartDate *Time `json:"minStartDate,omitempty" xmlrpc:"minStartDate,omitempty"` +} + +// SoftLayer_Container_Bandwidth_GraphOutputs models an individual bandwidth graph image and certain details about that graph image. +type Container_Bandwidth_GraphOutputsExtended struct { + Entity + + // The raw PNG binary data of a bandwidth graph image. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // A bandwidth graph's title. + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` + + // The amount of inbound traffic reported on a bandwidth graph image. + InBoundTotalBytes *uint `json:"inBoundTotalBytes,omitempty" xmlrpc:"inBoundTotalBytes,omitempty"` + + // The ending date of the data represented in a bandwidth graph. + MaxEndDate *Time `json:"maxEndDate,omitempty" xmlrpc:"maxEndDate,omitempty"` + + // The beginning date of the data represented in a bandwidth graph. + MinStartDate *Time `json:"minStartDate,omitempty" xmlrpc:"minStartDate,omitempty"` + + // The amount of outbound traffic reported on a bandwidth graph image. + OutBoundTotalBytes *uint `json:"outBoundTotalBytes,omitempty" xmlrpc:"outBoundTotalBytes,omitempty"` +} + +// SoftLayer_Container_Bandwidth_Projection models projected bandwidth use over a time range. +type Container_Bandwidth_Projection struct { + Entity + + // Bandwidth limit for this hardware. + AllowedUsage *string `json:"allowedUsage,omitempty" xmlrpc:"allowedUsage,omitempty"` + + // Estimated bandwidth usage so far this billing cycle. + EstimatedUsage *string `json:"estimatedUsage,omitempty" xmlrpc:"estimatedUsage,omitempty"` + + // Hardware ID of server to monitor. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Projected usage for this hardware based on previous usage this billing cycle. + ProjectedUsage *string `json:"projectedUsage,omitempty" xmlrpc:"projectedUsage,omitempty"` + + // the text name of the server being monitored. + ServerName *string `json:"serverName,omitempty" xmlrpc:"serverName,omitempty"` + + // The minimum date included in this list. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// no documentation yet +type Container_Billing_Currency_Country struct { + Entity + + // no documentation yet + AvailableCurrencies []Billing_Currency `json:"availableCurrencies,omitempty" xmlrpc:"availableCurrencies,omitempty"` + + // no documentation yet + Country *Locale_Country `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + CurrencyCountryLocales []Billing_Currency_Country `json:"currencyCountryLocales,omitempty" xmlrpc:"currencyCountryLocales,omitempty"` +} + +// no documentation yet +type Container_Billing_Currency_Format struct { + Entity + + // no documentation yet + Currency *string `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // no documentation yet + Display *int `json:"display,omitempty" xmlrpc:"display,omitempty"` + + // no documentation yet + Format *string `json:"format,omitempty" xmlrpc:"format,omitempty"` + + // no documentation yet + Locale *string `json:"locale,omitempty" xmlrpc:"locale,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Position *int `json:"position,omitempty" xmlrpc:"position,omitempty"` + + // no documentation yet + Precision *int `json:"precision,omitempty" xmlrpc:"precision,omitempty"` + + // no documentation yet + Script *string `json:"script,omitempty" xmlrpc:"script,omitempty"` + + // no documentation yet + Service *string `json:"service,omitempty" xmlrpc:"service,omitempty"` + + // no documentation yet + Symbol *string `json:"symbol,omitempty" xmlrpc:"symbol,omitempty"` + + // no documentation yet + Tag *string `json:"tag,omitempty" xmlrpc:"tag,omitempty"` + + // no documentation yet + Value *Float64 `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Billing_Info_Ach struct { + Entity + + // no documentation yet + AccountNumber *string `json:"accountNumber,omitempty" xmlrpc:"accountNumber,omitempty"` + + // no documentation yet + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // no documentation yet + BankTransitNumber *string `json:"bankTransitNumber,omitempty" xmlrpc:"bankTransitNumber,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + FederalTaxId *string `json:"federalTaxId,omitempty" xmlrpc:"federalTaxId,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Street1 *string `json:"street1,omitempty" xmlrpc:"street1,omitempty"` + + // no documentation yet + Street2 *string `json:"street2,omitempty" xmlrpc:"street2,omitempty"` +} + +// This container is used to provide all the options for [[SoftLayer_Billing_Invoice/emailInvoices|emailInvoices]] in order to have the necessary invoices generated and links sent to the user's email. +type Container_Billing_Invoice_Email struct { + Entity + + // Excel Invoices to email + ExcelInvoiceIds []int `json:"excelInvoiceIds,omitempty" xmlrpc:"excelInvoiceIds,omitempty"` + + // PDF Invoice Details to email + PdfDetailedInvoiceIds []int `json:"pdfDetailedInvoiceIds,omitempty" xmlrpc:"pdfDetailedInvoiceIds,omitempty"` + + // PDF Invoices to email + PdfInvoiceIds []int `json:"pdfInvoiceIds,omitempty" xmlrpc:"pdfInvoiceIds,omitempty"` + + // The type of Invoices to be emailed [current|next]. If next is selected, the account id will be used. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// SoftLayer_Container_Billing_Order_Status models an order status. +type Container_Billing_Order_Status struct { + Entity + + // The description of the status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The keyname of the status. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// Contains user information used to request a manual Catalyst enrollment. +type Container_Catalyst_ManualEnrollmentRequest struct { + Entity + + // Applicant's email address + CustomerEmail *string `json:"customerEmail,omitempty" xmlrpc:"customerEmail,omitempty"` + + // Applicant's first and last name + CustomerName *string `json:"customerName,omitempty" xmlrpc:"customerName,omitempty"` + + // Name of applicant's startup company + StartupName *string `json:"startupName,omitempty" xmlrpc:"startupName,omitempty"` + + // Flag indicating whether (true) or not (false) and applicant is + VentureAffiliationFlag *bool `json:"ventureAffiliationFlag,omitempty" xmlrpc:"ventureAffiliationFlag,omitempty"` + + // Name of the venture capital fund, if any, applicant is affiliated with + VentureFundName *string `json:"ventureFundName,omitempty" xmlrpc:"ventureFundName,omitempty"` +} + +// This container is used to hold country locale information. +type Container_Collection_Locale_CountryCode struct { + Entity + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` + + // no documentation yet + StateCodes []Container_Collection_Locale_StateCode `json:"stateCodes,omitempty" xmlrpc:"stateCodes,omitempty"` +} + +// This container is used to hold information regarding a state or province. +type Container_Collection_Locale_StateCode struct { + Entity + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` +} + +// This container is used to hold VAT information. +type Container_Collection_Locale_VatCountryCodeAndFormat struct { + Entity + + // no documentation yet + CountryCode *string `json:"countryCode,omitempty" xmlrpc:"countryCode,omitempty"` + + // no documentation yet + Regex *string `json:"regex,omitempty" xmlrpc:"regex,omitempty"` +} + +// no documentation yet +type Container_Disk_Image_Capture_Template struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Summary *string `json:"summary,omitempty" xmlrpc:"summary,omitempty"` + + // no documentation yet + Volumes []Container_Disk_Image_Capture_Template_Volume `json:"volumes,omitempty" xmlrpc:"volumes,omitempty"` +} + +// no documentation yet +type Container_Disk_Image_Capture_Template_Volume struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Partitions []Container_Disk_Image_Capture_Template_Volume_Partition `json:"partitions,omitempty" xmlrpc:"partitions,omitempty"` +} + +// no documentation yet +type Container_Disk_Image_Capture_Template_Volume_Partition struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Contact information container for domain registration +type Container_Dns_Domain_Registration_Contact struct { + Entity + + // The street address of the contact. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line in the address of the contact. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The third line in the address of the contact. + Address3 *string `json:"address3,omitempty" xmlrpc:"address3,omitempty"` + + // The city of the contact. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The 2-character Country code. (i.e. US) + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The email address of the contact. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The fax number of the contact. + Fax *string `json:"fax,omitempty" xmlrpc:"fax,omitempty"` + + // The first name of the contact. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // The last name of the contact. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The organization name of the contact. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The phone number of the contact. + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // The postal code of the contact. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The state of the contact. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // The type of contact. The following are the valid types of contacts: + // * admin + // * owner + // * billing + // * tech + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// This container data type contains extended attributes information for a domain of country code TLD. +type Container_Dns_Domain_Registration_ExtendedAttribute struct { + Entity + + // Indicates if this is a child of another extended attribute. + ChildFlag *bool `json:"childFlag,omitempty" xmlrpc:"childFlag,omitempty"` + + // The description of an extended attribute. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The name of an extended attribute. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The collection of options for an extended attribute. + Options []Container_Dns_Domain_Registration_ExtendedAttribute_Option `json:"options,omitempty" xmlrpc:"options,omitempty"` + + // Indicates if extended attribute is required. + RequiredFlag *int `json:"requiredFlag,omitempty" xmlrpc:"requiredFlag,omitempty"` + + // User defined indicates that the value is required from outside sources. + UserDefinedFlag *bool `json:"userDefinedFlag,omitempty" xmlrpc:"userDefinedFlag,omitempty"` +} + +// This is the data type that may need to be populated to complete registraton for domains that are country code TLD's. +type Container_Dns_Domain_Registration_ExtendedAttribute_Configuration struct { + Entity + + // The extended attribute name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The extended attribute option value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// This container data type contains extended attribute options information for a domain of country code TLD. +type Container_Dns_Domain_Registration_ExtendedAttribute_Option struct { + Entity + + // The description of an option. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Extended Attribute that is required for an option. + RequireExtendedAttributes []Container_Dns_Domain_Registration_ExtendedAttribute_Option_Require `json:"requireExtendedAttributes,omitempty" xmlrpc:"requireExtendedAttributes,omitempty"` + + // The title of an option. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // The value of an option. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// This container data type contains the extended attribute name that is required by an extended attribute option. +type Container_Dns_Domain_Registration_ExtendedAttribute_Option_Require struct { + Entity + + // The name of an extended attribute that is required by an extended attribute option. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Information container for domain registration +type Container_Dns_Domain_Registration_Information struct { + Entity + + // The information of the registered domain. + Contacts []Container_Dns_Domain_Registration_Contact `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // The date that a domain is set to expire. + ExpireDate *Time `json:"expireDate,omitempty" xmlrpc:"expireDate,omitempty"` + + // The list of nameservers for the domain. + Nameservers []Container_Dns_Domain_Registration_Nameserver `json:"nameservers,omitempty" xmlrpc:"nameservers,omitempty"` + + // no documentation yet + RegistryCreateDate *Time `json:"registryCreateDate,omitempty" xmlrpc:"registryCreateDate,omitempty"` + + // no documentation yet + RegistryExpireDate *Time `json:"registryExpireDate,omitempty" xmlrpc:"registryExpireDate,omitempty"` + + // no documentation yet + RegistryUpdateDate *Time `json:"registryUpdateDate,omitempty" xmlrpc:"registryUpdateDate,omitempty"` +} + +// no documentation yet +type Container_Dns_Domain_Registration_List struct { + Entity + + // The domain name. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // Three-character language tag for the IDN domain that you're trying to register. This is only required for IDN domains. + EncodingType *string `json:"encodingType,omitempty" xmlrpc:"encodingType,omitempty"` + + // Data required by the Registry for some country code top level domains (i.e. example.us). + // + // In order to determine if a domain requires extended attributes, use [[SoftLayer_Dns_Domain_Registration::getExtendedAttributes|domain registration]] service. + ExtendedAttributeConfiguration []Container_Dns_Domain_Registration_ExtendedAttribute_Configuration `json:"extendedAttributeConfiguration,omitempty" xmlrpc:"extendedAttributeConfiguration,omitempty"` + + // The length of the registration period in years. Valid values are 1 – 10. + RegistrationPeriod *int `json:"registrationPeriod,omitempty" xmlrpc:"registrationPeriod,omitempty"` +} + +// Lookup domain container for domain registration +type Container_Dns_Domain_Registration_Lookup struct { + Entity + + // The list of available and taken domain names. + Items []Container_Dns_Domain_Registration_Lookup_Items `json:"items,omitempty" xmlrpc:"items,omitempty"` +} + +// Lookup items container for domain registration +type Container_Dns_Domain_Registration_Lookup_Items struct { + Entity + + // The domain name. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // The status of the domain name if available and can be registered. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// Nameserver container for domain registration +type Container_Dns_Domain_Registration_Nameserver struct { + Entity + + // The list of fully qualified names of the nameserver. + Nameservers []Container_Dns_Domain_Registration_Nameserver_List `json:"nameservers,omitempty" xmlrpc:"nameservers,omitempty"` +} + +// Nameservers list container for domain registration +type Container_Dns_Domain_Registration_Nameserver_List struct { + Entity + + // The IPv4 address of the nameserver. + Ipv4Address *string `json:"ipv4Address,omitempty" xmlrpc:"ipv4Address,omitempty"` + + // The IPv6 address of the nameserver. + Ipv6Address *string `json:"ipv6Address,omitempty" xmlrpc:"ipv6Address,omitempty"` + + // The fully qualified name of the nameserver + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The sort order of the nameserver + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// no documentation yet +type Container_Dns_Domain_Registration_Registrant_Verification_StatusDetail struct { + Entity + + // The current status of the verification. + Status *Dns_Domain_Registration_Registrant_Verification_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The adate when the domain will be suspended. + VerificationDeadlineDate *Time `json:"verificationDeadlineDate,omitempty" xmlrpc:"verificationDeadlineDate,omitempty"` +} + +// Transfer Information container for domain registration +type Container_Dns_Domain_Registration_Transfer_Information struct { + Entity + + // The reason why a domain is not transferable. + Reason *string `json:"reason,omitempty" xmlrpc:"reason,omitempty"` + + // The registrant email. + RegistrantEmail *string `json:"registrantEmail,omitempty" xmlrpc:"registrantEmail,omitempty"` + + // The status of the latest transfer on the domain. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The date and time of the most recent update to the state of the transfer. + TimeStamp *Time `json:"timeStamp,omitempty" xmlrpc:"timeStamp,omitempty"` + + // Indicates if the domain can be transferred. + Transferrable *int `json:"transferrable,omitempty" xmlrpc:"transferrable,omitempty"` +} + +// The SoftLayer_Container_Exception data type represents a SoftLayer_Exception. +type Container_Exception struct { + Entity + + // The SoftLayer_Exception class that the error is. + ExceptionClass *string `json:"exceptionClass,omitempty" xmlrpc:"exceptionClass,omitempty"` + + // The exception message. + ExceptionMessage *string `json:"exceptionMessage,omitempty" xmlrpc:"exceptionMessage,omitempty"` +} + +// no documentation yet +type Container_Graph struct { + Entity + + // base units associated with the graph. + BaseUnit *string `json:"baseUnit,omitempty" xmlrpc:"baseUnit,omitempty"` + + // Graph range end datetime. + EndDatetime *string `json:"endDatetime,omitempty" xmlrpc:"endDatetime,omitempty"` + + // The height of the graph image. + Height *int `json:"height,omitempty" xmlrpc:"height,omitempty"` + + // The graph image. + Image *[]byte `json:"image,omitempty" xmlrpc:"image,omitempty"` + + // The graph interval in seconds. + Interval *int `json:"interval,omitempty" xmlrpc:"interval,omitempty"` + + // Metric types associated with the graph. + Metrics []Container_Metric_Data_Type `json:"metrics,omitempty" xmlrpc:"metrics,omitempty"` + + // Indicator to control whether the graph data is normalized. + NormalizeFlag *[]byte `json:"normalizeFlag,omitempty" xmlrpc:"normalizeFlag,omitempty"` + + // The options used to control the graph appearance. + Options []Container_Graph_Option `json:"options,omitempty" xmlrpc:"options,omitempty"` + + // A collection of graph plots. + Plots []Container_Graph_Plot `json:"plots,omitempty" xmlrpc:"plots,omitempty"` + + // option to not return the image. + ReturnImage *bool `json:"returnImage,omitempty" xmlrpc:"returnImage,omitempty"` + + // Graph range start datetime. + StartDatetime *string `json:"startDatetime,omitempty" xmlrpc:"startDatetime,omitempty"` + + // The name of the template to use; may be null. + Template *string `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // The title of the graph image. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // The width of the graph image. + Width *int `json:"width,omitempty" xmlrpc:"width,omitempty"` +} + +// no documentation yet +type Container_Graph_Option struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Graph_Plot struct { + Entity + + // no documentation yet + Data []Container_Graph_Plot_Coordinate `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // no documentation yet + Metric *Container_Metric_Data_Type `json:"metric,omitempty" xmlrpc:"metric,omitempty"` + + // no documentation yet + Unit *string `json:"unit,omitempty" xmlrpc:"unit,omitempty"` +} + +// no documentation yet +type Container_Graph_Plot_Coordinate struct { + Entity + + // no documentation yet + XValue *Float64 `json:"xValue,omitempty" xmlrpc:"xValue,omitempty"` + + // no documentation yet + YValue *Float64 `json:"yValue,omitempty" xmlrpc:"yValue,omitempty"` + + // no documentation yet + ZValue *Float64 `json:"zValue,omitempty" xmlrpc:"zValue,omitempty"` +} + +// The hardware configuration container is used to provide configuration options for servers. +// +// Each configuration option will include both an itemPrice and a template. +// +// The itemPrice value will provide hourly and monthly costs (if either are applicable), and a description of the option. +// +// The template will provide a fragment of the request with the properties and values that must be sent when creating a server with the option. +// +// The [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] method returns this data structure. +// +// +type Container_Hardware_Configuration struct { + Entity + + // + //
    + // Available datacenter options. + // + // + // The datacenter.name value in the template represents which datacenter the server will be provisioned in. + //
    + Datacenters []Container_Hardware_Configuration_Option `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // + //
    + // Available fixed configuration preset options. + // + // + // The fixedConfigurationPreset.keyName value in the template is an identifier for a particular fixed configuration. When provided exactly as shown in the template, that fixed configuration will be used. + // + // + // When providing a fixedConfigurationPreset.keyName while ordering a server the processors and hardDrives configuration options cannot be used. + //
    + FixedConfigurationPresets []Container_Hardware_Configuration_Option `json:"fixedConfigurationPresets,omitempty" xmlrpc:"fixedConfigurationPresets,omitempty"` + + // + //
    + // Available hard drive options. + // + // + // A server will have at least one hard drive. + // + // + // The hardDrives.capacity value in the template represents the size, in gigabytes, of the disk. + //
    + HardDrives []Container_Hardware_Configuration_Option `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // + //
    + // Available network component options. + // + // + // The networkComponent.maxSpeed value in the template represents the link speed, in megabits per second, of the network connections for a server. + //
    + NetworkComponents []Container_Hardware_Configuration_Option `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // + //
    + // Available operating system options. + // + // + // The operatingSystemReferenceCode value in the template is an identifier for a particular operating system. When provided exactly as shown in the template, that operating system will be used. + // + // + // A reference code is structured as three tokens separated by underscores. The first token represents the product, the second is the version of the product, and the third is whether the OS is 32 or 64bit. + // + // + // When providing an operatingSystemReferenceCode while ordering a server the only token required to match exactly is the product. The version token may be given as 'LATEST', else it will require an exact match as well. When the bits token is not provided, 64 bits will be assumed. + // + // + // Providing the value of 'LATEST' for a version will select the latest release of that product for the operating system. As this may change over time, you should be sure that the release version is irrelevant for your applications. + // + // + // For Windows based operating systems the version will represent both the release version (2008, 2012, etc) and the edition (Standard, Enterprise, etc). For all other operating systems the version will represent the major version (Centos 6, Ubuntu 12, etc) of that operating system, minor versions are represented in few reference codes where they are significant. + //
    + OperatingSystems []Container_Hardware_Configuration_Option `json:"operatingSystems,omitempty" xmlrpc:"operatingSystems,omitempty"` + + // + //
    + // Available processor options. + // + // + // The processorCoreAmount value in the template represents the number of cores allocated to the server. + // The memoryCapacity value in the template represents the amount of memory, in gigabytes, allocated to the server. + //
    + Processors []Container_Hardware_Configuration_Option `json:"processors,omitempty" xmlrpc:"processors,omitempty"` +} + +// An option found within a [[SoftLayer_Container_Hardware_Configuration (type)]] structure. +type Container_Hardware_Configuration_Option struct { + Entity + + // + // Provides hourly and monthly costs (if either are applicable), and a description of the option. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // + // Provides a description of a fixed configuration preset with monthly and hourly costs. + Preset *Product_Package_Preset `json:"preset,omitempty" xmlrpc:"preset,omitempty"` + + // + // Provides a fragment of the request with the properties and values that must be sent when creating a server with the option. + Template *Hardware `json:"template,omitempty" xmlrpc:"template,omitempty"` +} + +// no documentation yet +type Container_Hardware_MassUpdate struct { + Entity + + // The hardwares updated by the mass update tool + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Errors encountered while mass updating hardwares + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The hardwares that failed to update + SuccessFlag *string `json:"successFlag,omitempty" xmlrpc:"successFlag,omitempty"` +} + +// no documentation yet +type Container_Hardware_Pool_Details struct { + Entity + + // no documentation yet + PendingOrders *int `json:"pendingOrders,omitempty" xmlrpc:"pendingOrders,omitempty"` + + // no documentation yet + PendingTransactions *int `json:"pendingTransactions,omitempty" xmlrpc:"pendingTransactions,omitempty"` + + // no documentation yet + PoolDescription *string `json:"poolDescription,omitempty" xmlrpc:"poolDescription,omitempty"` + + // no documentation yet + PoolKeyName *string `json:"poolKeyName,omitempty" xmlrpc:"poolKeyName,omitempty"` + + // no documentation yet + PoolName *string `json:"poolName,omitempty" xmlrpc:"poolName,omitempty"` + + // no documentation yet + Routers []Container_Hardware_Pool_Details_Router `json:"routers,omitempty" xmlrpc:"routers,omitempty"` + + // no documentation yet + TotalHardware *int `json:"totalHardware,omitempty" xmlrpc:"totalHardware,omitempty"` + + // no documentation yet + TotalInventoryHardware *int `json:"totalInventoryHardware,omitempty" xmlrpc:"totalInventoryHardware,omitempty"` + + // no documentation yet + TotalProvisionedHardware *int `json:"totalProvisionedHardware,omitempty" xmlrpc:"totalProvisionedHardware,omitempty"` + + // no documentation yet + TotalTestedHardware *int `json:"totalTestedHardware,omitempty" xmlrpc:"totalTestedHardware,omitempty"` + + // no documentation yet + TotalTestingHardware *int `json:"totalTestingHardware,omitempty" xmlrpc:"totalTestingHardware,omitempty"` +} + +// no documentation yet +type Container_Hardware_Pool_Details_Router struct { + Entity + + // no documentation yet + PoolThreshold *int `json:"poolThreshold,omitempty" xmlrpc:"poolThreshold,omitempty"` + + // no documentation yet + RouterId *int `json:"routerId,omitempty" xmlrpc:"routerId,omitempty"` + + // no documentation yet + RouterName *string `json:"routerName,omitempty" xmlrpc:"routerName,omitempty"` + + // no documentation yet + TotalHardware *int `json:"totalHardware,omitempty" xmlrpc:"totalHardware,omitempty"` + + // no documentation yet + TotalInventoryHardware *int `json:"totalInventoryHardware,omitempty" xmlrpc:"totalInventoryHardware,omitempty"` + + // no documentation yet + TotalProvisionedHardware *int `json:"totalProvisionedHardware,omitempty" xmlrpc:"totalProvisionedHardware,omitempty"` + + // no documentation yet + TotalTestedHardware *int `json:"totalTestedHardware,omitempty" xmlrpc:"totalTestedHardware,omitempty"` + + // no documentation yet + TotalTestingHardware *int `json:"totalTestingHardware,omitempty" xmlrpc:"totalTestingHardware,omitempty"` +} + +// The SoftLayer_Container_Hardware_Server_Configuration data type contains information relating to a server's item price information, and hard drive partition information. +type Container_Hardware_Server_Configuration struct { + Entity + + // A flag indicating that the server will be moved into the spare pool after an Operating system reload. + AddToSparePoolAfterOsReload *int `json:"addToSparePoolAfterOsReload,omitempty" xmlrpc:"addToSparePoolAfterOsReload,omitempty"` + + // The customer provision uri will be used to download and execute a customer defined script on the host at the end of provisioning. + CustomProvisionScriptUri *string `json:"customProvisionScriptUri,omitempty" xmlrpc:"customProvisionScriptUri,omitempty"` + + // A flag indicating that the primary drive will be converted to a portable storage volume during an Operating System reload. + DriveRetentionFlag *bool `json:"driveRetentionFlag,omitempty" xmlrpc:"driveRetentionFlag,omitempty"` + + // A flag indicating that all data will be erased from drives during an Operating System reload. + EraseHardDrives *int `json:"eraseHardDrives,omitempty" xmlrpc:"eraseHardDrives,omitempty"` + + // The hard drive partitions that a server can be partitioned with. + HardDrives []Hardware_Component `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // An Image Template ID [[SoftLayer_Virtual_Guest_Block_Device_Template_Group]] that will be deployed to the host. If provided no item prices are required. + ImageTemplateId *int `json:"imageTemplateId,omitempty" xmlrpc:"imageTemplateId,omitempty"` + + // The item prices that a server can be configured with. + ItemPrices []Product_Item_Price `json:"itemPrices,omitempty" xmlrpc:"itemPrices,omitempty"` + + // A flag indicating that the provision should use LVM for all logical drives. + LvmFlag *int `json:"lvmFlag,omitempty" xmlrpc:"lvmFlag,omitempty"` + + // A flag indicating that the remote management cards password will be reset. + ResetIpmiPassword *int `json:"resetIpmiPassword,omitempty" xmlrpc:"resetIpmiPassword,omitempty"` + + // The token of the requesting service. Do not set. + ServiceToken *string `json:"serviceToken,omitempty" xmlrpc:"serviceToken,omitempty"` + + // IDs to SoftLayer_Security_Ssh_Key objects on the current account which will be added to the server for authentication. SSH Keys will not be added to servers with Microsoft Windows. + SshKeyIds []int `json:"sshKeyIds,omitempty" xmlrpc:"sshKeyIds,omitempty"` + + // A flag indicating that the BIOS will be updated when installing the operating system. + UpgradeBios *int `json:"upgradeBios,omitempty" xmlrpc:"upgradeBios,omitempty"` + + // A flag indicating that the firmware on all hard drives will be updated when installing the operating system. + UpgradeHardDriveFirmware *int `json:"upgradeHardDriveFirmware,omitempty" xmlrpc:"upgradeHardDriveFirmware,omitempty"` +} + +// The SoftLayer_Container_Hardware_Server_Details data type contains information relating to a server's component information, network information, and software information. +type Container_Hardware_Server_Details struct { + Entity + + // The components that belong to a piece of hardware. + Components []Hardware_Component `json:"components,omitempty" xmlrpc:"components,omitempty"` + + // The network components that belong to a piece of hardware. + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // The software that belong to a piece of hardware. + Software []Software_Component `json:"software,omitempty" xmlrpc:"software,omitempty"` +} + +// no documentation yet +type Container_Hardware_Server_Request struct { + Entity + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + SuccessFlag *bool `json:"successFlag,omitempty" xmlrpc:"successFlag,omitempty"` +} + +// SoftLayer_Container_KnowledgeLayer_QuestionAnswer models a single question and answer pair from SoftLayer's KnowledgeLayer knowledge base. SoftLayer's backend network interfaces with the KnowledgeLayer to recommend helpful articles when support tickets are created. +type Container_KnowledgeLayer_QuestionAnswer struct { + Entity + + // The answer to a question asked on the SoftLayer KnowledgeLayer. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // The link to a question asked on the SoftLayer KnowledgeLayer. + Link *string `json:"link,omitempty" xmlrpc:"link,omitempty"` + + // A question asked on the SoftLayer KnowledgeLayer. + Question *string `json:"question,omitempty" xmlrpc:"question,omitempty"` +} + +// no documentation yet +type Container_Message struct { + Entity + + // no documentation yet + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Container_Metric_Data_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + SummaryType *string `json:"summaryType,omitempty" xmlrpc:"summaryType,omitempty"` + + // no documentation yet + Unit *string `json:"unit,omitempty" xmlrpc:"unit,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Details This container is a parent class for detailing diverse metrics. +type Container_Metric_Tracking_Object_Details struct { + Entity + + // The name that best describes the metric being collected. + MetricName *string `json:"metricName,omitempty" xmlrpc:"metricName,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Summary This container is a parent class for summarizing diverse metrics. +type Container_Metric_Tracking_Object_Summary struct { + Entity + + // The name that best describes the metric being collected. + MetricName *string `json:"metricName,omitempty" xmlrpc:"metricName,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Virtual_Host_Details This container details a virtual host's metric data. +type Container_Metric_Tracking_Object_Virtual_Host_Details struct { + Container_Metric_Tracking_Object_Details + + // The day this metric was collected. + Day *Time `json:"day,omitempty" xmlrpc:"day,omitempty"` + + // The maximum number of guests hosted by this platform for the given day. + MaxInstances *int `json:"maxInstances,omitempty" xmlrpc:"maxInstances,omitempty"` + + // The maximum amount of memory utilized by this platform for the given day. + MaxMemoryUsage *int `json:"maxMemoryUsage,omitempty" xmlrpc:"maxMemoryUsage,omitempty"` + + // The mean number of guests hosted by this platform for the given day. + MeanInstances *Float64 `json:"meanInstances,omitempty" xmlrpc:"meanInstances,omitempty"` + + // The mean amount of memory utilized by this platform for the given day. + MeanMemoryUsage *Float64 `json:"meanMemoryUsage,omitempty" xmlrpc:"meanMemoryUsage,omitempty"` + + // The minimum number of guests hosted by this platform for the given day. + MinInstances *int `json:"minInstances,omitempty" xmlrpc:"minInstances,omitempty"` + + // The minimum amount of memory utilized by this platform for the given day. + MinMemoryUsage *int `json:"minMemoryUsage,omitempty" xmlrpc:"minMemoryUsage,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Virtual_Host_Summary This container summarizes a virtual host's metric data. +type Container_Metric_Tracking_Object_Virtual_Host_Summary struct { + Container_Metric_Tracking_Object_Summary + + // The average amount of memory usage thus far in this billing cycle. + AvgMemoryUsageInBillingCycle *int `json:"avgMemoryUsageInBillingCycle,omitempty" xmlrpc:"avgMemoryUsageInBillingCycle,omitempty"` + + // Current bill cycle end date. + CurrentBillCycleEnd *Time `json:"currentBillCycleEnd,omitempty" xmlrpc:"currentBillCycleEnd,omitempty"` + + // Current bill cycle start date. + CurrentBillCycleStart *Time `json:"currentBillCycleStart,omitempty" xmlrpc:"currentBillCycleStart,omitempty"` + + // The last count of instances this platform was hosting. + LastInstanceCount *int `json:"lastInstanceCount,omitempty" xmlrpc:"lastInstanceCount,omitempty"` + + // The last amount of memory this platform was using. + LastMemoryUsageAmount *int `json:"lastMemoryUsageAmount,omitempty" xmlrpc:"lastMemoryUsageAmount,omitempty"` + + // The last time this virtual host was polled for metrics. + LastPollTime *Time `json:"lastPollTime,omitempty" xmlrpc:"lastPollTime,omitempty"` + + // The max number of instances hosted thus far in this billing cycle. + MaxInstanceInBillingCycle *int `json:"maxInstanceInBillingCycle,omitempty" xmlrpc:"maxInstanceInBillingCycle,omitempty"` + + // Previous bill cycle end date. + PreviousBillCycleEnd *Time `json:"previousBillCycleEnd,omitempty" xmlrpc:"previousBillCycleEnd,omitempty"` + + // Previous bill cycle start date. + PreviousBillCycleStart *Time `json:"previousBillCycleStart,omitempty" xmlrpc:"previousBillCycleStart,omitempty"` + + // This virtual hosting platform name. + VirtualPlatformName *string `json:"virtualPlatformName,omitempty" xmlrpc:"virtualPlatformName,omitempty"` +} + +// The SoftLayer_Container_Monitoring_Alarm_History data type contains information relating to SoftLayer monitoring alarm history. +type Container_Monitoring_Alarm_History struct { + Entity + + // Account ID that this alarm belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // ID of the monitoring agent that triggered this alarm + AgentId *int `json:"agentId,omitempty" xmlrpc:"agentId,omitempty"` + + // Alarm ID + AlarmId *string `json:"alarmId,omitempty" xmlrpc:"alarmId,omitempty"` + + // Time that an alarm was closed. + ClosedDate *Time `json:"closedDate,omitempty" xmlrpc:"closedDate,omitempty"` + + // Time that an alarm was triggered + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Alarm message + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // Robot ID + RobotId *int `json:"robotId,omitempty" xmlrpc:"robotId,omitempty"` + + // Severity of an alarm + Severity *string `json:"severity,omitempty" xmlrpc:"severity,omitempty"` +} + +// SoftLayer_Container_Monitoring_Graph_Outputs models a single outbound object for a graph of given data sets. +type Container_Monitoring_Graph_Outputs struct { + Entity + + // The maximum date included in this graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // Error message encountered during graphing + GraphError *string `json:"graphError,omitempty" xmlrpc:"graphError,omitempty"` + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The minimum date included in this graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// This object holds authentication data to a server. +type Container_Network_Authentication_Data struct { + Entity + + // The name of a host + Host *string `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // The authentication password + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The port number + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The type of network protocol. This can be ftp, ssh and so on. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The authentication username + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// SoftLayer_Container_Network_Bandwidth_Data_Summary models an interface's overall bandwidth usage during it's current billing cycle. +type Container_Network_Bandwidth_Data_Summary struct { + Entity + + // The amount of bandwidth a server has allocated to it in it's current billing period. + AllowedUsage *Float64 `json:"allowedUsage,omitempty" xmlrpc:"allowedUsage,omitempty"` + + // The amount of bandwidth that a server has used within it's current billing period. + EstimatedUsage *Float64 `json:"estimatedUsage,omitempty" xmlrpc:"estimatedUsage,omitempty"` + + // The amount of bandwidth a server is projected to use within its billing period, based on it's current usage. + ProjectedUsage *Float64 `json:"projectedUsage,omitempty" xmlrpc:"projectedUsage,omitempty"` + + // The unit of measurement used in a bandwidth data summary. + UsageUnits *string `json:"usageUnits,omitempty" xmlrpc:"usageUnits,omitempty"` +} + +// SoftLayer_Container_Network_Bandwidth_Version1_Usage models an hourly bandwidth record. +type Container_Network_Bandwidth_Version1_Usage struct { + Entity + + // The amount of incoming bandwidth that a server has used within the hour of the recordedDate. + IncomingAmount *Float64 `json:"incomingAmount,omitempty" xmlrpc:"incomingAmount,omitempty"` + + // The amount of outgoing bandwidth that a server has used within the hour of the recordedDate. + OutgoingAmount *Float64 `json:"outgoingAmount,omitempty" xmlrpc:"outgoingAmount,omitempty"` + + // The date and time that the bandwidth was used by a piece of hardware + RecordedDate *Time `json:"recordedDate,omitempty" xmlrpc:"recordedDate,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Configuration_Cache_Purge struct { + Entity + + // no documentation yet + Date *string `json:"date,omitempty" xmlrpc:"date,omitempty"` + + // no documentation yet + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // no documentation yet + Saved *string `json:"saved,omitempty" xmlrpc:"saved,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Configuration_Input struct { + Entity + + // no documentation yet + BucketName *string `json:"bucketName,omitempty" xmlrpc:"bucketName,omitempty"` + + // no documentation yet + CacheKeyQueryRule *string `json:"cacheKeyQueryRule,omitempty" xmlrpc:"cacheKeyQueryRule,omitempty"` + + // no documentation yet + CertificateType *string `json:"certificateType,omitempty" xmlrpc:"certificateType,omitempty"` + + // no documentation yet + Cname *string `json:"cname,omitempty" xmlrpc:"cname,omitempty"` + + // no documentation yet + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // no documentation yet + FileExtension *string `json:"fileExtension,omitempty" xmlrpc:"fileExtension,omitempty"` + + // no documentation yet + GeoblockingRule *Network_CdnMarketplace_Configuration_Behavior_Geoblocking `json:"geoblockingRule,omitempty" xmlrpc:"geoblockingRule,omitempty"` + + // no documentation yet + Header *string `json:"header,omitempty" xmlrpc:"header,omitempty"` + + // no documentation yet + HttpPort *int `json:"httpPort,omitempty" xmlrpc:"httpPort,omitempty"` + + // no documentation yet + HttpsPort *int `json:"httpsPort,omitempty" xmlrpc:"httpsPort,omitempty"` + + // Used by the following method: updateOriginPath(). This property will store the path of the path record to be saved. The $path attribute stores the new path. + OldPath *string `json:"oldPath,omitempty" xmlrpc:"oldPath,omitempty"` + + // no documentation yet + Origin *string `json:"origin,omitempty" xmlrpc:"origin,omitempty"` + + // no documentation yet + OriginType *string `json:"originType,omitempty" xmlrpc:"originType,omitempty"` + + // no documentation yet + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // no documentation yet + PerformanceConfiguration *string `json:"performanceConfiguration,omitempty" xmlrpc:"performanceConfiguration,omitempty"` + + // no documentation yet + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // no documentation yet + RespectHeaders *string `json:"respectHeaders,omitempty" xmlrpc:"respectHeaders,omitempty"` + + // no documentation yet + ServeStale *string `json:"serveStale,omitempty" xmlrpc:"serveStale,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` + + // no documentation yet + VendorName *string `json:"vendorName,omitempty" xmlrpc:"vendorName,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Configuration_Mapping struct { + Entity + + // no documentation yet + BucketName *string `json:"bucketName,omitempty" xmlrpc:"bucketName,omitempty"` + + // no documentation yet + CacheKeyQueryRule *string `json:"cacheKeyQueryRule,omitempty" xmlrpc:"cacheKeyQueryRule,omitempty"` + + // no documentation yet + CertificateType *string `json:"certificateType,omitempty" xmlrpc:"certificateType,omitempty"` + + // no documentation yet + Cname *string `json:"cname,omitempty" xmlrpc:"cname,omitempty"` + + // no documentation yet + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // no documentation yet + FileExtension *string `json:"fileExtension,omitempty" xmlrpc:"fileExtension,omitempty"` + + // no documentation yet + Header *string `json:"header,omitempty" xmlrpc:"header,omitempty"` + + // no documentation yet + HttpPort *int `json:"httpPort,omitempty" xmlrpc:"httpPort,omitempty"` + + // no documentation yet + HttpsChallengeRedirectUrl *string `json:"httpsChallengeRedirectUrl,omitempty" xmlrpc:"httpsChallengeRedirectUrl,omitempty"` + + // no documentation yet + HttpsChallengeResponse *string `json:"httpsChallengeResponse,omitempty" xmlrpc:"httpsChallengeResponse,omitempty"` + + // no documentation yet + HttpsChallengeUrl *string `json:"httpsChallengeUrl,omitempty" xmlrpc:"httpsChallengeUrl,omitempty"` + + // no documentation yet + HttpsPort *int `json:"httpsPort,omitempty" xmlrpc:"httpsPort,omitempty"` + + // no documentation yet + OriginHost *string `json:"originHost,omitempty" xmlrpc:"originHost,omitempty"` + + // no documentation yet + OriginType *string `json:"originType,omitempty" xmlrpc:"originType,omitempty"` + + // no documentation yet + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // no documentation yet + PerformanceConfiguration *string `json:"performanceConfiguration,omitempty" xmlrpc:"performanceConfiguration,omitempty"` + + // no documentation yet + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // no documentation yet + RespectHeaders *bool `json:"respectHeaders,omitempty" xmlrpc:"respectHeaders,omitempty"` + + // no documentation yet + ServeStale *bool `json:"serveStale,omitempty" xmlrpc:"serveStale,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + UniqueId *string `json:"uniqueId,omitempty" xmlrpc:"uniqueId,omitempty"` + + // no documentation yet + VendorName *string `json:"vendorName,omitempty" xmlrpc:"vendorName,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Configuration_Mapping_Path struct { + Entity + + // no documentation yet + BucketName *string `json:"bucketName,omitempty" xmlrpc:"bucketName,omitempty"` + + // no documentation yet + CacheKeyQueryRule *string `json:"cacheKeyQueryRule,omitempty" xmlrpc:"cacheKeyQueryRule,omitempty"` + + // no documentation yet + FileExtension *string `json:"fileExtension,omitempty" xmlrpc:"fileExtension,omitempty"` + + // no documentation yet + Header *string `json:"header,omitempty" xmlrpc:"header,omitempty"` + + // no documentation yet + HttpPort *int `json:"httpPort,omitempty" xmlrpc:"httpPort,omitempty"` + + // no documentation yet + HttpsPort *int `json:"httpsPort,omitempty" xmlrpc:"httpsPort,omitempty"` + + // no documentation yet + MappingUniqueId *string `json:"mappingUniqueId,omitempty" xmlrpc:"mappingUniqueId,omitempty"` + + // no documentation yet + Origin *string `json:"origin,omitempty" xmlrpc:"origin,omitempty"` + + // no documentation yet + OriginType *string `json:"originType,omitempty" xmlrpc:"originType,omitempty"` + + // no documentation yet + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // no documentation yet + PerformanceConfiguration *string `json:"performanceConfiguration,omitempty" xmlrpc:"performanceConfiguration,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Metrics struct { + Entity + + // no documentation yet + Names []string `json:"names,omitempty" xmlrpc:"names,omitempty"` + + // no documentation yet + Percentage []string `json:"percentage,omitempty" xmlrpc:"percentage,omitempty"` + + // no documentation yet + Time []int `json:"time,omitempty" xmlrpc:"time,omitempty"` + + // no documentation yet + Totals []string `json:"totals,omitempty" xmlrpc:"totals,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + Xaxis []string `json:"xaxis,omitempty" xmlrpc:"xaxis,omitempty"` + + // no documentation yet + Yaxis1 []string `json:"yaxis1,omitempty" xmlrpc:"yaxis1,omitempty"` + + // no documentation yet + Yaxis10 []string `json:"yaxis10,omitempty" xmlrpc:"yaxis10,omitempty"` + + // no documentation yet + Yaxis11 []string `json:"yaxis11,omitempty" xmlrpc:"yaxis11,omitempty"` + + // no documentation yet + Yaxis12 []string `json:"yaxis12,omitempty" xmlrpc:"yaxis12,omitempty"` + + // no documentation yet + Yaxis13 []string `json:"yaxis13,omitempty" xmlrpc:"yaxis13,omitempty"` + + // no documentation yet + Yaxis14 []string `json:"yaxis14,omitempty" xmlrpc:"yaxis14,omitempty"` + + // no documentation yet + Yaxis15 []string `json:"yaxis15,omitempty" xmlrpc:"yaxis15,omitempty"` + + // no documentation yet + Yaxis16 []string `json:"yaxis16,omitempty" xmlrpc:"yaxis16,omitempty"` + + // no documentation yet + Yaxis17 []string `json:"yaxis17,omitempty" xmlrpc:"yaxis17,omitempty"` + + // no documentation yet + Yaxis18 []string `json:"yaxis18,omitempty" xmlrpc:"yaxis18,omitempty"` + + // no documentation yet + Yaxis19 []string `json:"yaxis19,omitempty" xmlrpc:"yaxis19,omitempty"` + + // no documentation yet + Yaxis2 []string `json:"yaxis2,omitempty" xmlrpc:"yaxis2,omitempty"` + + // no documentation yet + Yaxis20 []string `json:"yaxis20,omitempty" xmlrpc:"yaxis20,omitempty"` + + // no documentation yet + Yaxis3 []string `json:"yaxis3,omitempty" xmlrpc:"yaxis3,omitempty"` + + // no documentation yet + Yaxis4 []string `json:"yaxis4,omitempty" xmlrpc:"yaxis4,omitempty"` + + // no documentation yet + Yaxis5 []string `json:"yaxis5,omitempty" xmlrpc:"yaxis5,omitempty"` + + // no documentation yet + Yaxis6 []string `json:"yaxis6,omitempty" xmlrpc:"yaxis6,omitempty"` + + // no documentation yet + Yaxis7 []string `json:"yaxis7,omitempty" xmlrpc:"yaxis7,omitempty"` + + // no documentation yet + Yaxis8 []string `json:"yaxis8,omitempty" xmlrpc:"yaxis8,omitempty"` + + // no documentation yet + Yaxis9 []string `json:"yaxis9,omitempty" xmlrpc:"yaxis9,omitempty"` +} + +// no documentation yet +type Container_Network_CdnMarketplace_Vendor struct { + Entity + + // no documentation yet + FeatureSummary *string `json:"featureSummary,omitempty" xmlrpc:"featureSummary,omitempty"` + + // no documentation yet + Features *string `json:"features,omitempty" xmlrpc:"features,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + VendorName *string `json:"vendorName,omitempty" xmlrpc:"vendorName,omitempty"` +} + +// SoftLayer_Container_Network_ContentDelivery_Authentication_Directory represents a token authentication directory on your CDN FTP or on your origin server. +type Container_Network_ContentDelivery_Authentication_Directory struct { + Entity + + // The date that a token authentication directory was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The name of a directory or a file within a directory listing. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The type of platform that a token authentication directory is defined for. Possible types are HTTP Large, HTTP Small, Flash and Windows Media + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// This container is used for CDN content authentication service. +type Container_Network_ContentDelivery_Authentication_Parameter struct { + Entity + + // A CDN account name + CdnAccountName *string `json:"cdnAccountName,omitempty" xmlrpc:"cdnAccountName,omitempty"` + + // A client IP address + ClientIp *string `json:"clientIp,omitempty" xmlrpc:"clientIp,omitempty"` + + // A client referrer information + Referrer *string `json:"referrer,omitempty" xmlrpc:"referrer,omitempty"` + + // A source URL + SourceUrl *string `json:"sourceUrl,omitempty" xmlrpc:"sourceUrl,omitempty"` + + // An authentication token string + Token *string `json:"token,omitempty" xmlrpc:"token,omitempty"` +} + +// CDN supports the content authentication service. With the content authentication service, customers can control access to their contents. There are several scenarios where this authentication capability could be useful. Websites can prevent other rogue websites from linking to their videos. Content owners can prevent users from passing around http links, thus forcing them to login to view contents. It is also possible to authenticate via the client IP address. Referrer information is also checked if provided by a client's browser. servers will invoke a web service method to validate a content authentication token. +// +// CDN uses the default authentication web service provided by SoftLayer to validate a token. A customer can use their own implementation of the token authentication web service by using [[SoftLayer_Network_ContentDelivery_Account::setAuthenticationServiceEndpoint|setAuthenticationServiceEndpoint]] method. +// +// This container class holds the token validation web service endpoint information. CDN supports 3 different protocols: HTTP, RTMP (streaming Flash), and MMS (streaming Windows Media) +type Container_Network_ContentDelivery_Authentication_ServiceEndpoint struct { + Entity + + // The authentication web service endpoint that CDN servers will use to validate a token + Endpoint *string `json:"endpoint,omitempty" xmlrpc:"endpoint,omitempty"` + + // The protocol that the WSDL will be used for. This can be HTTP, WINDOWSMEDIA, or FLASH + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` +} + +// SoftLayer_Container_Network_ContentDelivery_Bandwidth_PointsOfPresence_Summary models an individual CDN point of presence's bandwidth usage for a CDN account within a given date range. CDN POPs are located throughout the world, so individual POP usage may be beneficial in determining who is downloading your CDN hosted content. +type Container_Network_ContentDelivery_Bandwidth_PointsOfPresence_Summary struct { + Entity + + // The amount of bandwidth used by a CDN POP. + Bandwidth *uint `json:"bandwidth,omitempty" xmlrpc:"bandwidth,omitempty"` + + // The ending date of a CDN POP bandwidth summary. + EndDateTime *Time `json:"endDateTime,omitempty" xmlrpc:"endDateTime,omitempty"` + + // A CDN POP's name. This is typically the city the POP resides in. + PopName *string `json:"popName,omitempty" xmlrpc:"popName,omitempty"` + + // The starting date of a CDN POP bandwidth summary. + StartDateTime *Time `json:"startDateTime,omitempty" xmlrpc:"startDateTime,omitempty"` + + // The unit of measurement used in a CDN POP bandwidth summary. + UsageUnits *string `json:"usageUnits,omitempty" xmlrpc:"usageUnits,omitempty"` + + // The view count + ViewCount *uint `json:"viewCount,omitempty" xmlrpc:"viewCount,omitempty"` +} + +// SoftLayer_Container_Network_ContentDelivery_Bandwidth_Summary models a CDN account's overall bandwidth usage and overages within a given date range. +type Container_Network_ContentDelivery_Bandwidth_Summary struct { + Entity + + // The CDN account id + CdnAccountId *int `json:"cdnAccountId,omitempty" xmlrpc:"cdnAccountId,omitempty"` + + // The ending date of a CDN bandwidth summary. + EndDateTime *Time `json:"endDateTime,omitempty" xmlrpc:"endDateTime,omitempty"` + + // The name of a file that is requested by visitors + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The media type + MediaType *string `json:"mediaType,omitempty" xmlrpc:"mediaType,omitempty"` + + // The starting date of a CDN bandwidth summary. + StartDateTime *Time `json:"startDateTime,omitempty" xmlrpc:"startDateTime,omitempty"` + + // The amount of bandwidth used by a CDN account in between a given starting and ending date. + Usage *Float64 `json:"usage,omitempty" xmlrpc:"usage,omitempty"` + + // The unit of measurement used in a CDN bandwidth summary. + UsageUnits *string `json:"usageUnits,omitempty" xmlrpc:"usageUnits,omitempty"` +} + +// SoftLayer_Container_Network_ContentDelivery_Bandwidth_Summary_File models a CDN account's overall bandwidth usage and overages within a given date range. +type Container_Network_ContentDelivery_Bandwidth_Summary_Detail struct { + Container_Network_ContentDelivery_Bandwidth_Summary + + // The duration of a file that is viewed. + Duration *Float64 `json:"duration,omitempty" xmlrpc:"duration,omitempty"` + + // The number of times that a file is viewed. + ViewCount *int `json:"viewCount,omitempty" xmlrpc:"viewCount,omitempty"` +} + +// SoftLayer's CDN allows for multiple origin pull domains and CNAME records. This container holds the origin pull configuration details. CDN currently supports origin pull method for HTTP content. +type Container_Network_ContentDelivery_OriginPull_Mapping struct { + Entity + + // The CNAME record. + Cname *string `json:"cname,omitempty" xmlrpc:"cname,omitempty"` + + // The unique identifier of an origin pull configuration + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This indicates if an origin pull mapping is for the secure content or not. + IsSecureContent *bool `json:"isSecureContent,omitempty" xmlrpc:"isSecureContent,omitempty"` + + // The type of a media supported by CDN. Supported media types are: "HTTP", "FLASH" and "WM" + MediaType *string `json:"mediaType,omitempty" xmlrpc:"mediaType,omitempty"` + + // The URL of a origin server. A URL can contain a directory path. + OriginUrl *string `json:"originUrl,omitempty" xmlrpc:"originUrl,omitempty"` +} + +// SoftLayer's CDN content delivery network offering replicates your data to a number of Points of Presence (POP's) around the world. SoftLayer_Container_Network_ContentDelivery_PointsOfPresence models one of these POP locations. +type Container_Network_ContentDelivery_PointsOfPresence struct { + Entity + + // A CDN Point of Presence's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A CDN Point of Presence's name. This is typically the city that the POP is located in. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This container holds information on a purge request. [[SoftLayer_Network_ContentDelivery_Account::purgeCache|Purge method]] for more details. +// +// Status code can be "SUCCESS", "FAILED", or "INVALID_URL" "INVALID_URL" code is returned when a URL is malformed or does not belong to customer. "FAILED" is returned in case there was an internal error. +type Container_Network_ContentDelivery_PurgeService_Response struct { + Entity + + // A status code indicates whether your request was successful or not + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // A URL that you wish to purge its cache object + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Container_Network_ContentDelivery_Report_Usage struct { + Entity + + // no documentation yet + ApplicationDeliveryNetwork *Float64 `json:"applicationDeliveryNetwork,omitempty" xmlrpc:"applicationDeliveryNetwork,omitempty"` + + // no documentation yet + ApplicationDeliveryNetworkSsl *Float64 `json:"applicationDeliveryNetworkSsl,omitempty" xmlrpc:"applicationDeliveryNetworkSsl,omitempty"` + + // no documentation yet + DiskSpace *Float64 `json:"diskSpace,omitempty" xmlrpc:"diskSpace,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + Flash *Float64 `json:"flash,omitempty" xmlrpc:"flash,omitempty"` + + // no documentation yet + Http *Float64 `json:"http,omitempty" xmlrpc:"http,omitempty"` + + // no documentation yet + HttpSmall *Float64 `json:"httpSmall,omitempty" xmlrpc:"httpSmall,omitempty"` + + // no documentation yet + Https *Float64 `json:"https,omitempty" xmlrpc:"https,omitempty"` + + // no documentation yet + HttpsSmall *Float64 `json:"httpsSmall,omitempty" xmlrpc:"httpsSmall,omitempty"` + + // no documentation yet + Region *string `json:"region,omitempty" xmlrpc:"region,omitempty"` + + // no documentation yet + SslTotal *Float64 `json:"sslTotal,omitempty" xmlrpc:"sslTotal,omitempty"` + + // no documentation yet + StandardTotal *Float64 `json:"standardTotal,omitempty" xmlrpc:"standardTotal,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // no documentation yet + WindowsMedia *Float64 `json:"windowsMedia,omitempty" xmlrpc:"windowsMedia,omitempty"` +} + +// SoftLayer's CDN content delivery network allows for multiple types of media hosting in addition to traditional HTTP hosting. Each of these media types are accessible form a different URL. SoftLayer_Container_Network_ContentDelivery_SupportedProtocol holds details about CDN supported media types and their associated URLs. +// +// CDN media URLs follow the standard ://..cdn.softlayer.net +// +// Flash streaming, Windows Media streaming and HTTP protocols are supported: Flash streaming: rtmp://.flash.cdn.softlayer.net Windows Media streaming: mms://.wm.cdn.softlayer.net HTTP: http://.http.cdn.softlayer.net +type Container_Network_ContentDelivery_SupportedProtocol struct { + Entity + + // The host name related to CDN supported media, and is represented in the hostname portion of a CDN URL. + Host *string `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // The type of a media supported by CDN such as "FLASH", "WINDOWSMEDIA" or "HTTP" + MediaType *string `json:"mediaType,omitempty" xmlrpc:"mediaType,omitempty"` + + // The platform name. It's a friendly name of media type. + Platform *string `json:"platform,omitempty" xmlrpc:"platform,omitempty"` + + // The media protocol supported by CDN. This represents the media portion of a CDN URL. Currently supported protocols are: rtmp, mms and http + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` +} + +// SoftLayer_Container_Network_Directory_Listing represents a single entry in a listing of files within a remote directory. API methods that return remote directory listings typically return arrays of SoftLayer_Container_Network_Directory_Listing objects. +type Container_Network_Directory_Listing struct { + Entity + + // If the file in a directory listing is a directory itself then fileCount is the number of files within the directory. + FileCount *int `json:"fileCount,omitempty" xmlrpc:"fileCount,omitempty"` + + // The name of a directory or a file within a directory listing. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The type of file in a directory listing. If a directory listing entry is a directory itself then type is set to "directory". Otherwise, type is a blank string. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The IntrusionProtection_Event object stores information about individual intrusion protection events. +// +// It is a data container that cannot be edited, deleted, or saved. +// +// It is returned by many methods in the TippingPointReporting object, but never directly, always as a child of another container object. +type Container_Network_IntrusionProtection_Event struct { + Entity + + // The CVE ID(s), if any, associated with this attack signature. + CVEId *string `json:"CVEId,omitempty" xmlrpc:"CVEId,omitempty"` + + // The action that was taken when this attack was discovered. Can be either "Block" or "Permit" + ActionTaken *string `json:"actionTaken,omitempty" xmlrpc:"actionTaken,omitempty"` + + // The number of attacks in this block. Attacks are grouped differently based on the query performed on the tippingPointReporting object. + AttackCount *int `json:"attackCount,omitempty" xmlrpc:"attackCount,omitempty"` + + // Long description of the attack. May contain links to more information + AttackLongDescription *string `json:"attackLongDescription,omitempty" xmlrpc:"attackLongDescription,omitempty"` + + // Name of the attack + AttackName *string `json:"attackName,omitempty" xmlrpc:"attackName,omitempty"` + + // The starting timestamp of the attack recorded, in Y-m-d H:i:s format. May not be set, depending on the type of query performed. + BeginTime *string `json:"beginTime,omitempty" xmlrpc:"beginTime,omitempty"` + + // The BugTraq ID(s), if any, associated with this attack signature. + BugtraqId *string `json:"bugtraqId,omitempty" xmlrpc:"bugtraqId,omitempty"` + + // The human-readable classification of the attack + Classification *string `json:"classification,omitempty" xmlrpc:"classification,omitempty"` + + // The IP Address (as a dotted decimal string) of the machine that was the target of the attack + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The port the attack was directed at + DestinationPort *int `json:"destinationPort,omitempty" xmlrpc:"destinationPort,omitempty"` + + // The ending timestamp of the attack recorded, in Y-m-d H:i:s format. May not be set, depending on the type of query performed. + EndTime *string `json:"endTime,omitempty" xmlrpc:"endTime,omitempty"` + + // The platform affected by the attack + Platform *string `json:"platform,omitempty" xmlrpc:"platform,omitempty"` + + // The protocol used in the attack + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The human-readable severity of this attack, from "Low" to "Critical" + Severity *string `json:"severity,omitempty" xmlrpc:"severity,omitempty"` + + // Unique ID of the "Signature" in question. The signature determines the type of attack recorded. SignatureId is used in the drillDown() function on the TippingPointReporting service + SignatureId *string `json:"signatureId,omitempty" xmlrpc:"signatureId,omitempty"` + + // The IP Address (as a dotted decimal string) of the machine originating the attack + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The port the attack originated from + SourcePort *int `json:"sourcePort,omitempty" xmlrpc:"sourcePort,omitempty"` +} + +// The IntrusionProtection_Statistic is used exclusively by the getMainStatistics method on the TippingPointReporting service, and serves mainly as a pair object, storing a name and an attack count. Name is usually the name of an attack, but it can also be an attacking IP Address +type Container_Network_IntrusionProtection_Statistic struct { + Entity + + // The number of attacks effecting this name over the time period + AttackCount *int `json:"attackCount,omitempty" xmlrpc:"attackCount,omitempty"` + + // Either the name of the attack in question, or the attacking IP Address + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The IntrusionProtection_Statistics Type is used as a container for SoftLayer_Container_Network_IntrusionProtection_Statistic objects. The SoftLayer_Container_Network_IntrusionProtection_Statistics class holds the "header" information, like the item being queried (either account or data center), the time frame, and the grand total of the attacks. +type Container_Network_IntrusionProtection_Statistics struct { + Entity + + // The actual target, either a datacenter name, an account ID, or a subnet IP + Target *string `json:"target,omitempty" xmlrpc:"target,omitempty"` + + // The type of the target, right now either "datacenter", "account", or "subnet" + TargetType *string `json:"targetType,omitempty" xmlrpc:"targetType,omitempty"` + + // The time frame of the attack, in string form, like "Last 24 hours" + TimeFrame *string `json:"timeFrame,omitempty" xmlrpc:"timeFrame,omitempty"` + + // The top attacks for this target over this time frame + TopAttacks []Container_Network_IntrusionProtection_Statistic `json:"topAttacks,omitempty" xmlrpc:"topAttacks,omitempty"` + + // Total attacks for this $target over this time frame + TotalAttacks *int `json:"totalAttacks,omitempty" xmlrpc:"totalAttacks,omitempty"` +} + +// The IntrusionProtection_SubnetReport object is the container that holds the SoftLayer_Container_Network_IntrusionProtection_Event objects for a particular subnet, or "All Subnets", whatever the case may be. Subnet, subnet mask, direction, and the individual events are returned by this object. +type Container_Network_IntrusionProtection_SubnetReport struct { + Entity + + // cidr for this report. If the subnetIpAddress is "All Subnets", this is set to 32 and should be ignored. + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // Direction of the attack, either 'Inbound' or 'Outbound' + Direction *string `json:"direction,omitempty" xmlrpc:"direction,omitempty"` + + // The class SoftLayer_Container_Network_IntrusionProtection_Event objects on this report. + Events []Container_Network_IntrusionProtection_Event `json:"events,omitempty" xmlrpc:"events,omitempty"` + + // The "target" of this report, could be an IP address, a subnet's network identifier, or "All Subnets" + SubnetIpAddress *string `json:"subnetIpAddress,omitempty" xmlrpc:"subnetIpAddress,omitempty"` +} + +// The LoadBalancer_StatusEntry object stores information about the current status of a particular load balancer service. +// +// It is a data container that cannot be edited, deleted, or saved. +// +// It is returned exclusively by the getStatus method on the [[SoftLayer_Network_LoadBalancer_Service]] service +type Container_Network_LoadBalancer_StatusEntry struct { + Entity + + // The value of the entry. + Content *string `json:"content,omitempty" xmlrpc:"content,omitempty"` + + // Text description of the status entry + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` +} + +// This container class holds information on a media file such as file name, codec, frame rate and so on +type Container_Network_Media_Information struct { + Entity + + // The audio bit rate + AudioBitRate *int `json:"audioBitRate,omitempty" xmlrpc:"audioBitRate,omitempty"` + + // The audio channel mode + AudioChannelMode *string `json:"audioChannelMode,omitempty" xmlrpc:"audioChannelMode,omitempty"` + + // The number of audio channels + AudioChannels *int `json:"audioChannels,omitempty" xmlrpc:"audioChannels,omitempty"` + + // The audio codec name + AudioCodec *string `json:"audioCodec,omitempty" xmlrpc:"audioCodec,omitempty"` + + // The audio sample rate + AudioSampleRate *int `json:"audioSampleRate,omitempty" xmlrpc:"audioSampleRate,omitempty"` + + // The duration of a media + Duration *Float64 `json:"duration,omitempty" xmlrpc:"duration,omitempty"` + + // The error message if any. + ErrorMessage *string `json:"errorMessage,omitempty" xmlrpc:"errorMessage,omitempty"` + + // The name of a media file + File *string `json:"file,omitempty" xmlrpc:"file,omitempty"` + + // The file format + FileFormat *string `json:"fileFormat,omitempty" xmlrpc:"fileFormat,omitempty"` + + // The size of a media file in byte + FileSize *uint `json:"fileSize,omitempty" xmlrpc:"fileSize,omitempty"` + + // The frame rate + FrameRate *Float64 `json:"frameRate,omitempty" xmlrpc:"frameRate,omitempty"` + + // The width of a media in pixel + SizeX *int `json:"sizeX,omitempty" xmlrpc:"sizeX,omitempty"` + + // The height of a media in pixel + SizeY *int `json:"sizeY,omitempty" xmlrpc:"sizeY,omitempty"` + + // The total of frames + TotalFrames *uint `json:"totalFrames,omitempty" xmlrpc:"totalFrames,omitempty"` + + // The width in a video's width to height aspect ratio + VideoAspectX *Float64 `json:"videoAspectX,omitempty" xmlrpc:"videoAspectX,omitempty"` + + // The height in a video's width to height aspect ratio + VideoAspectY *int `json:"videoAspectY,omitempty" xmlrpc:"videoAspectY,omitempty"` + + // The video codec name + VideoCodec *string `json:"videoCodec,omitempty" xmlrpc:"videoCodec,omitempty"` +} + +// no documentation yet +type Container_Network_Media_Transcode_Job_Watermark struct { + Entity + + // Time to stop showing watermark in milliseconds + EndTime *int `json:"endTime,omitempty" xmlrpc:"endTime,omitempty"` + + // Filename of image to use as watermark in transcoding job + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // Position to place watermark at + Position *Container_Network_Media_Transcode_Job_Watermark_Position `json:"position,omitempty" xmlrpc:"position,omitempty"` + + // Time to start showing watermark in milliseconds + StartTime *int `json:"startTime,omitempty" xmlrpc:"startTime,omitempty"` + + // Text to Place in Watermark + Text *string `json:"text,omitempty" xmlrpc:"text,omitempty"` + + // Percentage Transparent watermark should be + TransparencyPercentage *int `json:"transparencyPercentage,omitempty" xmlrpc:"transparencyPercentage,omitempty"` +} + +// no documentation yet +type Container_Network_Media_Transcode_Job_Watermark_Position struct { + Entity + + // X Coordinate of Watermark + X *int `json:"x,omitempty" xmlrpc:"x,omitempty"` + + // vertical Coordinate of Watermark + Y *int `json:"y,omitempty" xmlrpc:"y,omitempty"` +} + +// Transcode preset is a set of configuration parameters that defines a Transcode output format. SoftLayer_Container_Network_Media_Transcode_Preset contains a preset information defined on a Transcode server +type Container_Network_Media_Transcode_Preset struct { + Entity + + // The unique id that is used by a Transcode server + GUID *string `json:"GUID,omitempty" xmlrpc:"GUID,omitempty"` + + // The category that a preset belongs to + Category *string `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The description of the preset + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The friendly name of a preset + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Transcode preset element +type Container_Network_Media_Transcode_Preset_Element struct { + Entity + + // The additional elements for DROPDOWNLIST element + AdditionalElements []Container_Network_Media_Transcode_Preset_Element_Option `json:"additionalElements,omitempty" xmlrpc:"additionalElements,omitempty"` + + // The default value of an element. + DefaultValue *string `json:"defaultValue,omitempty" xmlrpc:"defaultValue,omitempty"` + + // The description of a preset element + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The flag that indicates whether an element is enabled or not + Enabled *bool `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // The extended description of a preset element + ExtendedDescription *string `json:"extendedDescription,omitempty" xmlrpc:"extendedDescription,omitempty"` + + // The flag that indicates whether an element is hidden or not + Hidden *bool `json:"hidden,omitempty" xmlrpc:"hidden,omitempty"` + + // The maximum value of an element + MaximumValue *int `json:"maximumValue,omitempty" xmlrpc:"maximumValue,omitempty"` + + // The minimum value of an element + MinimumValue *int `json:"minimumValue,omitempty" xmlrpc:"minimumValue,omitempty"` + + // The name of an preset element + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The name of a parent element + ParentName *string `json:"parentName,omitempty" xmlrpc:"parentName,omitempty"` + + // The type of an preset element. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// Transcode preset element +type Container_Network_Media_Transcode_Preset_Element_Option struct { + Entity + + // The name of a additional preset element + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The value of a additional preset element + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email struct { + Entity + + // no documentation yet + Body *string `json:"body,omitempty" xmlrpc:"body,omitempty"` + + // no documentation yet + ContainsHtml *bool `json:"containsHtml,omitempty" xmlrpc:"containsHtml,omitempty"` + + // no documentation yet + From *string `json:"from,omitempty" xmlrpc:"from,omitempty"` + + // no documentation yet + Subject *string `json:"subject,omitempty" xmlrpc:"subject,omitempty"` + + // no documentation yet + To *string `json:"to,omitempty" xmlrpc:"to,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Account_Overview struct { + Entity + + // no documentation yet + CreditsAllowed *int `json:"creditsAllowed,omitempty" xmlrpc:"creditsAllowed,omitempty"` + + // no documentation yet + CreditsOverage *int `json:"creditsOverage,omitempty" xmlrpc:"creditsOverage,omitempty"` + + // no documentation yet + CreditsRemain *int `json:"creditsRemain,omitempty" xmlrpc:"creditsRemain,omitempty"` + + // no documentation yet + CreditsUsed *int `json:"creditsUsed,omitempty" xmlrpc:"creditsUsed,omitempty"` + + // no documentation yet + Package *string `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // no documentation yet + Reputation *int `json:"reputation,omitempty" xmlrpc:"reputation,omitempty"` + + // no documentation yet + Requests *int `json:"requests,omitempty" xmlrpc:"requests,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Customer_Profile struct { + Entity + + // no documentation yet + Address *string `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Website *string `json:"website,omitempty" xmlrpc:"website,omitempty"` + + // no documentation yet + Zip *string `json:"zip,omitempty" xmlrpc:"zip,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_List_Entry struct { + Entity + + // no documentation yet + Created *string `json:"created,omitempty" xmlrpc:"created,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + Reason *string `json:"reason,omitempty" xmlrpc:"reason,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Statistics struct { + Entity + + // no documentation yet + Blocks *int `json:"blocks,omitempty" xmlrpc:"blocks,omitempty"` + + // no documentation yet + Bounces *int `json:"bounces,omitempty" xmlrpc:"bounces,omitempty"` + + // no documentation yet + Clicks *int `json:"clicks,omitempty" xmlrpc:"clicks,omitempty"` + + // no documentation yet + Date *string `json:"date,omitempty" xmlrpc:"date,omitempty"` + + // no documentation yet + Delivered *int `json:"delivered,omitempty" xmlrpc:"delivered,omitempty"` + + // no documentation yet + InvalidEmail *int `json:"invalidEmail,omitempty" xmlrpc:"invalidEmail,omitempty"` + + // no documentation yet + Opens *int `json:"opens,omitempty" xmlrpc:"opens,omitempty"` + + // no documentation yet + RepeatBounces *int `json:"repeatBounces,omitempty" xmlrpc:"repeatBounces,omitempty"` + + // no documentation yet + RepeatSpamReports *int `json:"repeatSpamReports,omitempty" xmlrpc:"repeatSpamReports,omitempty"` + + // no documentation yet + RepeatUnsubscribes *int `json:"repeatUnsubscribes,omitempty" xmlrpc:"repeatUnsubscribes,omitempty"` + + // no documentation yet + Requests *int `json:"requests,omitempty" xmlrpc:"requests,omitempty"` + + // no documentation yet + SpamReports *int `json:"spamReports,omitempty" xmlrpc:"spamReports,omitempty"` + + // no documentation yet + UniqueClicks *int `json:"uniqueClicks,omitempty" xmlrpc:"uniqueClicks,omitempty"` + + // no documentation yet + UniqueOpens *int `json:"uniqueOpens,omitempty" xmlrpc:"uniqueOpens,omitempty"` + + // no documentation yet + Unsubscribes *int `json:"unsubscribes,omitempty" xmlrpc:"unsubscribes,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Graph struct { + Entity + + // no documentation yet + GraphError *string `json:"graphError,omitempty" xmlrpc:"graphError,omitempty"` + + // no documentation yet + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // no documentation yet + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Options struct { + Entity + + // no documentation yet + AggregatesOnly *bool `json:"aggregatesOnly,omitempty" xmlrpc:"aggregatesOnly,omitempty"` + + // no documentation yet + Category *string `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // no documentation yet + Days *int `json:"days,omitempty" xmlrpc:"days,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + SelectedStatistics []string `json:"selectedStatistics,omitempty" xmlrpc:"selectedStatistics,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// no documentation yet +type Container_Network_Port_Statistic struct { + Entity + + // no documentation yet + AdministrativeStatus *int `json:"administrativeStatus,omitempty" xmlrpc:"administrativeStatus,omitempty"` + + // no documentation yet + InDiscardPackets *uint `json:"inDiscardPackets,omitempty" xmlrpc:"inDiscardPackets,omitempty"` + + // no documentation yet + InErrorPackets *uint `json:"inErrorPackets,omitempty" xmlrpc:"inErrorPackets,omitempty"` + + // no documentation yet + InOctets *uint `json:"inOctets,omitempty" xmlrpc:"inOctets,omitempty"` + + // no documentation yet + InUnicastPackets *uint `json:"inUnicastPackets,omitempty" xmlrpc:"inUnicastPackets,omitempty"` + + // no documentation yet + MaximumTransmissionUnit *uint `json:"maximumTransmissionUnit,omitempty" xmlrpc:"maximumTransmissionUnit,omitempty"` + + // no documentation yet + OperationalStatus *int `json:"operationalStatus,omitempty" xmlrpc:"operationalStatus,omitempty"` + + // no documentation yet + OutDiscardPackets *uint `json:"outDiscardPackets,omitempty" xmlrpc:"outDiscardPackets,omitempty"` + + // no documentation yet + OutErrorPackets *uint `json:"outErrorPackets,omitempty" xmlrpc:"outErrorPackets,omitempty"` + + // no documentation yet + OutOctets *uint `json:"outOctets,omitempty" xmlrpc:"outOctets,omitempty"` + + // no documentation yet + OutUnicastPackets *uint `json:"outUnicastPackets,omitempty" xmlrpc:"outUnicastPackets,omitempty"` + + // no documentation yet + PortDuplex *uint `json:"portDuplex,omitempty" xmlrpc:"portDuplex,omitempty"` + + // no documentation yet + Speed *uint `json:"speed,omitempty" xmlrpc:"speed,omitempty"` +} + +// no documentation yet +type Container_Network_SecurityGroup_Limit struct { + Entity + + // A key value describing what type of limit. + TypeKey *string `json:"typeKey,omitempty" xmlrpc:"typeKey,omitempty"` + + // The value of the security group limit. + Value *int `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Network_Service_Resource_ObjectStorage_ConnectionInformation struct { + Entity + + // no documentation yet + Datacenter *string `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // no documentation yet + DatacenterShortName *string `json:"datacenterShortName,omitempty" xmlrpc:"datacenterShortName,omitempty"` + + // no documentation yet + PrivateEndpoint *string `json:"privateEndpoint,omitempty" xmlrpc:"privateEndpoint,omitempty"` + + // no documentation yet + PublicEndpoint *string `json:"publicEndpoint,omitempty" xmlrpc:"publicEndpoint,omitempty"` +} + +// no documentation yet +type Container_Network_Storage_Backup_Evault_WebCc_Authentication_Details struct { + Entity + + // no documentation yet + EventValidation *string `json:"eventValidation,omitempty" xmlrpc:"eventValidation,omitempty"` + + // no documentation yet + ViewState *string `json:"viewState,omitempty" xmlrpc:"viewState,omitempty"` + + // no documentation yet + WebCcUrl *string `json:"webCcUrl,omitempty" xmlrpc:"webCcUrl,omitempty"` +} + +// SoftLayer's StorageLayer Evault services provides details regarding the the purchased vault. +// +// When a job is created using the Webcc Console, the job created is identified as a task on the vault. Using this service, information regarding the task can be retrieved. +// +// +type Container_Network_Storage_Evault_Vault_Task struct { + Entity + + // Unique identifier for the task. + Id *uint `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The hostname provided at time of agent registration. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Total compressed bytes used for the task. + UsedPoolsize *uint `json:"usedPoolsize,omitempty" xmlrpc:"usedPoolsize,omitempty"` +} + +// The SoftLayer_Container_Network_Storage_Evault_WebCc_AgentStatus will contain the timestamp of the last backup performed by the EVault agent. The agent status will also be returned. +type Container_Network_Storage_Evault_WebCc_AgentStatus struct { + Entity + + // Timestamp of last backup performed by the EVault backup agent + LastBackup *Time `json:"lastBackup,omitempty" xmlrpc:"lastBackup,omitempty"` + + // Status indicating the accumulative status result of all jobs performed by the evault agent. For example, if one job out three jobs failed agent status will by "Failed Backup(s)". + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Container_Network_Storage_Evault_WebCc_BackupResults will contain the timeframe of backups and the results will also be returned. +type Container_Network_Storage_Evault_WebCc_BackupResults struct { + Entity + + // Timestamp of begin time + BeginTime *Time `json:"beginTime,omitempty" xmlrpc:"beginTime,omitempty"` + + // Count of backups with conflicts. + Conflict *string `json:"conflict,omitempty" xmlrpc:"conflict,omitempty"` + + // Timestamp of end time + EndTime *Time `json:"endTime,omitempty" xmlrpc:"endTime,omitempty"` + + // Count of failed backups. + Failed *string `json:"failed,omitempty" xmlrpc:"failed,omitempty"` + + // Count of successfull backups. + Success *string `json:"success,omitempty" xmlrpc:"success,omitempty"` +} + +// The SoftLayer_Container_Network_Storage_Evault_WebCc_JobDetails will contain basic details for all backup and restore jobs performed by the StorageLayer EVault service offering. +type Container_Network_Storage_Evault_WebCc_JobDetails struct { + Entity + + // The number of bytes currently used by the backup job. (provided only for backup jobs) + BytesUsed *uint `json:"bytesUsed,omitempty" xmlrpc:"bytesUsed,omitempty"` + + // Description of the backup/restore job + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // hardware id + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Date of the last jobrun. + LastRunDate *Time `json:"lastRunDate,omitempty" xmlrpc:"lastRunDate,omitempty"` + + // Name of the backup/restore job + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Size of backup job when it was first run. (provided only for backup jobs) + OriginalSize *uint `json:"originalSize,omitempty" xmlrpc:"originalSize,omitempty"` + + // Percentage of overall used space allocated by the job. (provided only for backup jobs) + PercentageOfTotalUsage *int `json:"percentageOfTotalUsage,omitempty" xmlrpc:"percentageOfTotalUsage,omitempty"` + + // Result of the latest jobrun. + Result *string `json:"result,omitempty" xmlrpc:"result,omitempty"` + + // virtual guest id + VirtualGuestId *int `json:"virtualGuestId,omitempty" xmlrpc:"virtualGuestId,omitempty"` +} + +// The SoftLayer_Container_Network_Storage_Host will contain the reference id field for the object associated with the host. The host object type will also be returned. +type Container_Network_Storage_Host struct { + Entity + + // Reference id field for object associated with host. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Type for the object associated with host + ObjectType *string `json:"objectType,omitempty" xmlrpc:"objectType,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Bucket provides description of a bucket +type Container_Network_Storage_Hub_ObjectStorage_Bucket struct { + Entity + + // no documentation yet + BytesUsed *int `json:"bytesUsed,omitempty" xmlrpc:"bytesUsed,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + ObjectCount *int `json:"objectCount,omitempty" xmlrpc:"objectCount,omitempty"` + + // no documentation yet + StorageLocation *string `json:"storageLocation,omitempty" xmlrpc:"storageLocation,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl provides specific details is a container which contains the cdn urls associated with an object storage account +type Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl struct { + Entity + + // no documentation yet + Datacenter *string `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // no documentation yet + FlashUrl *string `json:"flashUrl,omitempty" xmlrpc:"flashUrl,omitempty"` + + // no documentation yet + HttpUrl *string `json:"httpUrl,omitempty" xmlrpc:"httpUrl,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Endpoint provides specific details on available endpoint URLs and locations. +type Container_Network_Storage_Hub_ObjectStorage_Endpoint struct { + Entity + + // no documentation yet + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + Region *string `json:"region,omitempty" xmlrpc:"region,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_File provides specific details that only apply to files that are sent or received from CloudLayer storage resources. +type Container_Network_Storage_Hub_ObjectStorage_File struct { + Container_Utility_File_Entity + + // no documentation yet + Folder *string `json:"folder,omitempty" xmlrpc:"folder,omitempty"` + + // no documentation yet + Hash *string `json:"hash,omitempty" xmlrpc:"hash,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_Container provides details about containers which store collections of files. +type Container_Network_Storage_Hub_ObjectStorage_Folder struct { + Entity + + // no documentation yet + Bytes *uint `json:"bytes,omitempty" xmlrpc:"bytes,omitempty"` + + // no documentation yet + Count *uint `json:"count,omitempty" xmlrpc:"count,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Node provides detailed information for a particular object storage node +type Container_Network_Storage_Hub_ObjectStorage_Node struct { + Entity + + // no documentation yet + DeviceName *string `json:"deviceName,omitempty" xmlrpc:"deviceName,omitempty"` + + // no documentation yet + ResourceName *string `json:"resourceName,omitempty" xmlrpc:"resourceName,omitempty"` + + // no documentation yet + UserAuthUrl *string `json:"userAuthUrl,omitempty" xmlrpc:"userAuthUrl,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Policy provides specific details on available storage policies. +type Container_Network_Storage_Hub_ObjectStorage_Policy struct { + Entity + + // no documentation yet + PolicyCode *string `json:"policyCode,omitempty" xmlrpc:"policyCode,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Provision provides description of a provision +type Container_Network_Storage_Hub_ObjectStorage_Provision struct { + Entity + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Provision *string `json:"provision,omitempty" xmlrpc:"provision,omitempty"` + + // no documentation yet + ProvisionCreateDate *Time `json:"provisionCreateDate,omitempty" xmlrpc:"provisionCreateDate,omitempty"` + + // no documentation yet + ProvisionModifyDate *Time `json:"provisionModifyDate,omitempty" xmlrpc:"provisionModifyDate,omitempty"` + + // no documentation yet + ProvisionTime *int `json:"provisionTime,omitempty" xmlrpc:"provisionTime,omitempty"` +} + +// no documentation yet +type Container_Network_Storage_MassDataMigration_Request_Address struct { + Entity + + // Line 1 of the address - typically the number and street address the MDMS device will be delivered to + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Line 2 of the address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // First and last name of the customer on the shipping address + AddressAttention *string `json:"addressAttention,omitempty" xmlrpc:"addressAttention,omitempty"` + + // The datacenter name where the MDMS device will be shipped to + AddressNickname *string `json:"addressNickname,omitempty" xmlrpc:"addressNickname,omitempty"` + + // The shipping address city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Name of the company device is being shipped to + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // The shipping address country + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The shipping address postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The shipping address state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// no documentation yet +type Container_Network_Storage_NetworkConnectionInformation struct { + Entity + + // no documentation yet + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // no documentation yet + StorageType *string `json:"storageType,omitempty" xmlrpc:"storageType,omitempty"` +} + +// Container for Volume Duplicate Information +type Container_Network_Storage_VolumeDuplicateParameters struct { + Entity + + // The iopsPerGB of the volume + IopsPerGb *Float64 `json:"iopsPerGb,omitempty" xmlrpc:"iopsPerGb,omitempty"` + + // Returns true if volume can be duplicated; false otherwise + IsDuplicatable *bool `json:"isDuplicatable,omitempty" xmlrpc:"isDuplicatable,omitempty"` + + // This represents the location id + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // This represents the location name + LocationName *string `json:"locationName,omitempty" xmlrpc:"locationName,omitempty"` + + // The maximumIopsPerGb allowed for a duplicated volume + MaximumIopsPerGb *Float64 `json:"maximumIopsPerGb,omitempty" xmlrpc:"maximumIopsPerGb,omitempty"` + + // The maximumIopsTier allowed for a duplicated volume + MaximumIopsTier *Float64 `json:"maximumIopsTier,omitempty" xmlrpc:"maximumIopsTier,omitempty"` + + // The maximumVolumeSize allowed for a duplicated volume + MaximumVolumeSize *int `json:"maximumVolumeSize,omitempty" xmlrpc:"maximumVolumeSize,omitempty"` + + // The minimumIopsPerGb allowed for a duplicated volume + MinimumIopsPerGb *Float64 `json:"minimumIopsPerGb,omitempty" xmlrpc:"minimumIopsPerGb,omitempty"` + + // The minimumIopsTier allowed for a duplicated volume + MinimumIopsTier *Float64 `json:"minimumIopsTier,omitempty" xmlrpc:"minimumIopsTier,omitempty"` + + // The minimumVolumeSize allowed for a duplicated volume + MinimumVolumeSize *int `json:"minimumVolumeSize,omitempty" xmlrpc:"minimumVolumeSize,omitempty"` + + // The volume duplicate status description + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // This represents the volume username + VolumeUsername *string `json:"volumeUsername,omitempty" xmlrpc:"volumeUsername,omitempty"` +} + +// SoftLayer_Container_Subnet_IPAddress models an IP v4 address as it exists as a member of it's subnet, letting the user know if it is a network identifier, gateway, broadcast, or useable address. Addresses that are neither the network identifier nor the gateway nor the broadcast addresses are usable by SoftLayer servers. +type Container_Network_Subnet_IpAddress struct { + Entity + + // The hardware that an IP address is associated with. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // An IP address expressed in dotted-quad notation. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // Whether an IP address is its subnet's broadcast address. + IsBroadcastAddress *bool `json:"isBroadcastAddress,omitempty" xmlrpc:"isBroadcastAddress,omitempty"` + + // Whether an IP address is its subnet's gateway address. Gateway addresses exist on SoftLayer's routers and many not be assigned to servers. + IsGatewayAddress *bool `json:"isGatewayAddress,omitempty" xmlrpc:"isGatewayAddress,omitempty"` + + // Whether an IP address is its subnet's network identifier address. + IsNetworkAddress *bool `json:"isNetworkAddress,omitempty" xmlrpc:"isNetworkAddress,omitempty"` +} + +// SoftLayer_Container_Network_Subnet_Registration_SubnetReference is provided to reference [[SoftLayer_Network_Subnet_Registration]] object and the [[SoftLayer_Network_Subnet]] it references, in CIDR form. +type Container_Network_Subnet_Registration_SubnetReference struct { + Entity + + // The ID of the [[SoftLayer_Network_Subnet_Registration]] object. + RegistrationId *int `json:"registrationId,omitempty" xmlrpc:"registrationId,omitempty"` + + // The subnet address in CIDR form. + SubnetCidr *string `json:"subnetCidr,omitempty" xmlrpc:"subnetCidr,omitempty"` +} + +// SoftLayer_Container_Subnet_Registration_TransactionDetails is provided to return details of a newly created Subnet Registration Transaction. +type Container_Network_Subnet_Registration_TransactionDetails struct { + Entity + + // The IDs and Subnets of the [[SoftLayer_Network_Subnet_Registration]] object. + SubnetReferences []Container_Network_Subnet_Registration_SubnetReference `json:"subnetReferences,omitempty" xmlrpc:"subnetReferences,omitempty"` + + // The ID of the Transaction object. + TransactionId *int `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` +} + +// no documentation yet +type Container_Notification_Mass_Filter_TemplateKey struct { + Entity +} + +// no documentation yet +type Container_Notification_Mass_Filter_TemplateValue struct { + Entity +} + +// Represents the acceptance status of a Policy. +type Container_Policy_Acceptance struct { + Entity + + // Flag to indicate if a policy has been previously accepted. + AcceptedFlag *bool `json:"acceptedFlag,omitempty" xmlrpc:"acceptedFlag,omitempty"` + + // Name of the policy for which we are representing it's acceptance status. + PolicyName *string `json:"policyName,omitempty" xmlrpc:"policyName,omitempty"` + + // ID of the [[SoftLayer_Product_Item_Policy_Assignment]]. + ProductPolicyAssignmentId *int `json:"productPolicyAssignmentId,omitempty" xmlrpc:"productPolicyAssignmentId,omitempty"` +} + +// The SoftLayer_Container_Product_Item_Category data type represents a single product item category. +type Container_Product_Item_Category struct { + Entity + + // identifier for category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// The SoftLayer_Container_Product_Item_Category_Question_Answer data type represents an answer to an item category question. It contains the category, the question being answered, and the answer. +type Container_Product_Item_Category_Question_Answer struct { + Entity + + // The answer to the question. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // The product item category code. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // The product item category id. + CategoryId *int `json:"categoryId,omitempty" xmlrpc:"categoryId,omitempty"` + + // The product item category question id. + QuestionId *int `json:"questionId,omitempty" xmlrpc:"questionId,omitempty"` +} + +// The SoftLayer_Container_Product_Item_Category_ZeroFee_Count data type represents a count of zero fee billing/invoice items. +type Container_Product_Item_Category_ZeroFee_Count struct { + Entity + + // The product item category code. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // The product item category id. + CategoryId *int `json:"categoryId,omitempty" xmlrpc:"categoryId,omitempty"` + + // The product item category name. + CategoryName *string `json:"categoryName,omitempty" xmlrpc:"categoryName,omitempty"` + + // The count of zero fee items for this category. + Count *int `json:"count,omitempty" xmlrpc:"count,omitempty"` +} + +// The SoftLayer_Container_Product_Item_Discount_Program data type represents the information about a discount that is related to a specific product item. +type Container_Product_Item_Discount_Program struct { + Entity + + // The number of times the item discount(s) may be applied for that order container. At a minimum the number will be 1 and at most, it will match the quantity of the order container. + ApplicableQuantity *int `json:"applicableQuantity,omitempty" xmlrpc:"applicableQuantity,omitempty"` + + // The product item that the discount applies to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The sum of the one time fees (one time, setup and labor) of the prices of this container multiplied by the applicable quantity of this container. + OneTimeAmount *Float64 `json:"oneTimeAmount,omitempty" xmlrpc:"oneTimeAmount,omitempty"` + + // The tax amount on the one time fees (one time, setup and labor) of the prices of this container mulitiplied by the applicable quantity of this container. + OneTimeTax *Float64 `json:"oneTimeTax,omitempty" xmlrpc:"oneTimeTax,omitempty"` + + // The item prices that contain the amount of the discount in the recurringFee field. There may be one or more prices. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // The sum of the one time fees (one time, setup and labor) of the prices of this container multiplied by the applicable quantity of this container with the proration factor applied. + ProratedOneTimeAmount *Float64 `json:"proratedOneTimeAmount,omitempty" xmlrpc:"proratedOneTimeAmount,omitempty"` + + // The tax amount on the one time fees (one time, setup and labor) of the prices of this container mulitiplied by the applicable quantity of this container with the proration factor applied. + ProratedOneTimeTax *Float64 `json:"proratedOneTimeTax,omitempty" xmlrpc:"proratedOneTimeTax,omitempty"` + + // The sum of the recurring fees of the prices of this container multiplied by the applicable quantity of this container with the proration factor applied. + ProratedRecurringAmount *Float64 `json:"proratedRecurringAmount,omitempty" xmlrpc:"proratedRecurringAmount,omitempty"` + + // The tax amount on the recurring fees of the prices of this container mulitiplied by the applicable quantity of this container with the proration factor applied. + ProratedRecurringTax *Float64 `json:"proratedRecurringTax,omitempty" xmlrpc:"proratedRecurringTax,omitempty"` + + // The sum of the recurring fees of the prices of this container multiplied by the applicable quantity of this container. + RecurringAmount *Float64 `json:"recurringAmount,omitempty" xmlrpc:"recurringAmount,omitempty"` + + // The tax amount on the recurring fees of the prices of this container mulitiplied by the applicable quantity of this container. + RecurringTax *Float64 `json:"recurringTax,omitempty" xmlrpc:"recurringTax,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order struct { + Entity + + // Flag for identifying an order for Big Data Deployment. + BigDataOrderFlag *bool `json:"bigDataOrderFlag,omitempty" xmlrpc:"bigDataOrderFlag,omitempty"` + + // Billing Information associated with an order. For existing customers this information is completely ignored. Do not send this information for existing customers. + BillingInformation *Container_Product_Order_Billing_Information `json:"billingInformation,omitempty" xmlrpc:"billingInformation,omitempty"` + + // This is the ID of the [[SoftLayer_Billing_Order_Item]] of this configuration/container. It is used for rebuilding an order container from a quote and is set automatically. + BillingOrderItemId *int `json:"billingOrderItemId,omitempty" xmlrpc:"billingOrderItemId,omitempty"` + + // The URL to which PayPal redirects browser after checkout has been canceled before completion of a payment. + CancelUrl *string `json:"cancelUrl,omitempty" xmlrpc:"cancelUrl,omitempty"` + + // Added by softlayer-go. This hints to the API what kind of product order this is. + ComplexType *string `json:"complexType,omitempty" xmlrpc:"complexType,omitempty"` + + // User-specified description to identify a particular order container. This is useful if you have a multi-configuration order (multiple orderContainers) and you want to be able to easily determine one from another. Populating this value may be helpful if an exception is thrown when placing an order and it's tied to a specific order container. + ContainerIdentifier *string `json:"containerIdentifier,omitempty" xmlrpc:"containerIdentifier,omitempty"` + + // This hash is internally-generated and is used to for tracking order containers. + ContainerSplHash *string `json:"containerSplHash,omitempty" xmlrpc:"containerSplHash,omitempty"` + + // The currency type chosen at checkout. + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // Device Fingerprint Identifier - Optional. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // This is the configuration identifier for tracking orders on the HTML order forms. + DisplayLayerSessionId *string `json:"displayLayerSessionId,omitempty" xmlrpc:"displayLayerSessionId,omitempty"` + + // no documentation yet + ExtendedHardwareTesting *bool `json:"extendedHardwareTesting,omitempty" xmlrpc:"extendedHardwareTesting,omitempty"` + + // The [[SoftLayer_Product_Item_Price]] for the Flexible Credit Program discount. The oneTimeFee field contains the calculated discount being applied to the order. + FlexibleCreditProgramPrice *Product_Item_Price `json:"flexibleCreditProgramPrice,omitempty" xmlrpc:"flexibleCreditProgramPrice,omitempty"` + + // This flag indicates that the customer consented to the GDPR terms for the quote. + GdprConsentFlag *bool `json:"gdprConsentFlag,omitempty" xmlrpc:"gdprConsentFlag,omitempty"` + + // For orders that contain servers (bare metal, virtual server, big data, etc.), the hardware property is required. This property is an array of [[SoftLayer_Hardware]] objects. The hostname and domain properties are required for each hardware object. Note that virtual server ([[SoftLayer_Container_Product_Order_Virtual_Guest]]) orders may populate this field instead of the virtualGuests property. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // An optional virtual disk image template identifier to be used as an installation base for a computing instance order + ImageTemplateGlobalIdentifier *string `json:"imageTemplateGlobalIdentifier,omitempty" xmlrpc:"imageTemplateGlobalIdentifier,omitempty"` + + // An optional virtual disk image template identifier to be used as an installation base for a computing instance order + ImageTemplateId *int `json:"imageTemplateId,omitempty" xmlrpc:"imageTemplateId,omitempty"` + + // Flag to identify a "managed" order. This value is set internally. + IsManagedOrder *int `json:"isManagedOrder,omitempty" xmlrpc:"isManagedOrder,omitempty"` + + // The collection of [[SoftLayer_Container_Product_Item_Category_Question_Answer]] for any product category that has additional questions requiring user input. + ItemCategoryQuestionAnswers []Container_Product_Item_Category_Question_Answer `json:"itemCategoryQuestionAnswers,omitempty" xmlrpc:"itemCategoryQuestionAnswers,omitempty"` + + // The [[SoftLayer_Location_Region]] keyname or specific [[SoftLayer_Location_Datacenter]] id where the order should be provisioned. If this value is provided and the regionalGroup property is also specified, an exception will be thrown indicating that only 1 is allowed. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // This [[SoftLayer_Location]] object will be determined from the location property and will be returned in the order verification or placement response. Any value specified here will get overwritten by the verification process. + LocationObject *Location `json:"locationObject,omitempty" xmlrpc:"locationObject,omitempty"` + + // A generic message about the order. Does not need to be sent in with any orders. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // Orders may contain an array of configurations. Populating this property allows you to purchase multiple configurations within a single order. Each order container will have its own individual settings independent of the other order containers. For example, it is possible to order a bare metal server in one configuration and a virtual server in another. + // + // If orderContainers is populated on the base order container, most of the configuration-specific properties are ignored on the base container. For example, prices, location and packageId will be ignored on the base container, but since the billingInformation is a property that's not specific to a single order container (but the order as a whole) it must be populated on the base container. + OrderContainers []Container_Product_Order `json:"orderContainers,omitempty" xmlrpc:"orderContainers,omitempty"` + + // This is deprecated and does not do anything. + OrderHostnames []string `json:"orderHostnames,omitempty" xmlrpc:"orderHostnames,omitempty"` + + // Collection of exceptions resulting from the verification of the order. This value is set internally and is not required for end users when placing an order. When placing API orders, users can use this value to determine the container-specific exception that was thrown. + OrderVerificationExceptions []Container_Exception `json:"orderVerificationExceptions,omitempty" xmlrpc:"orderVerificationExceptions,omitempty"` + + // The [[SoftLayer_Product_Package]] id for an order container. This is required to place an order. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The Payment Type is Optional. If nothing is sent in, then the normal method of payment will be used. For paypal customers, this means a paypalToken will be returned in the receipt. This token is to be used on the paypal website to complete the order. For Credit Card customers, the card on file in our system will be used to make an initial authorization. To force the order to use a payment type, use one of the following: CARD_ON_FILE or PAYPAL + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // The post-tax recurring charge for the order. This is the sum of preTaxRecurring + totalRecurringTax. + PostTaxRecurring *Float64 `json:"postTaxRecurring,omitempty" xmlrpc:"postTaxRecurring,omitempty"` + + // The post-tax recurring hourly charge for the order. Since taxes are not calculated for hourly orders, this value will be the same as preTaxRecurringHourly. + PostTaxRecurringHourly *Float64 `json:"postTaxRecurringHourly,omitempty" xmlrpc:"postTaxRecurringHourly,omitempty"` + + // The post-tax recurring monthly charge for the order. This is the sum of preTaxRecurringMonthly + totalRecurringTax. + PostTaxRecurringMonthly *Float64 `json:"postTaxRecurringMonthly,omitempty" xmlrpc:"postTaxRecurringMonthly,omitempty"` + + // The post-tax setup fees of the order. This is the sum of preTaxSetup + totalSetupTax; + PostTaxSetup *Float64 `json:"postTaxSetup,omitempty" xmlrpc:"postTaxSetup,omitempty"` + + // The pre-tax recurring total of the order. If there are mixed monthly and hourly prices on the order, this will be the sum of preTaxRecurringHourly and preTaxRecurringMonthly. + PreTaxRecurring *Float64 `json:"preTaxRecurring,omitempty" xmlrpc:"preTaxRecurring,omitempty"` + + // The pre-tax hourly recurring total of the order. If there are only monthly prices on the order, this value will be 0. + PreTaxRecurringHourly *Float64 `json:"preTaxRecurringHourly,omitempty" xmlrpc:"preTaxRecurringHourly,omitempty"` + + // The pre-tax monthly recurring total of the order. If there are only hourly prices on the order, this value will be 0. + PreTaxRecurringMonthly *Float64 `json:"preTaxRecurringMonthly,omitempty" xmlrpc:"preTaxRecurringMonthly,omitempty"` + + // The pre-tax setup fee total of the order. + PreTaxSetup *Float64 `json:"preTaxSetup,omitempty" xmlrpc:"preTaxSetup,omitempty"` + + // If there are any presale events available for an order, this value will be populated. It is set internally and is not required for end users when placing an order. See [[SoftLayer_Sales_Presale_Event]] for more info. + PresaleEvent *Sales_Presale_Event `json:"presaleEvent,omitempty" xmlrpc:"presaleEvent,omitempty"` + + // A preset configuration id for the package. Is required if not submitting any prices. + PresetId *int `json:"presetId,omitempty" xmlrpc:"presetId,omitempty"` + + // This is a collection of [[SoftLayer_Product_Item_Price]] objects. The only required property to populate for an item price object when ordering is its id - all other supplied information about the price (e.g., recurringFee, setupFee, etc.) will be ignored. Unless the [[SoftLayer_Product_Package]] associated with the order allows for preset prices, this property is required to place an order. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // The id of a [[SoftLayer_Hardware_Component_Partition_Template]]. This property is optional. If no partition template is provided, a default will be used according to the operating system chosen with the order. Using the [[SoftLayer_Hardware_Component_Partition_OperatingSystem]] service, getPartitionTemplates will return those available for the particular operating system. + PrimaryDiskPartitionId *int `json:"primaryDiskPartitionId,omitempty" xmlrpc:"primaryDiskPartitionId,omitempty"` + + // Priorities to set on replication set servers. + Priorities []string `json:"priorities,omitempty" xmlrpc:"priorities,omitempty"` + + // Flag for identifying a container as Virtual Server (Private Node). + PrivateCloudOrderFlag *bool `json:"privateCloudOrderFlag,omitempty" xmlrpc:"privateCloudOrderFlag,omitempty"` + + // Type of Virtual Server (Private Node) order. Potential values: INITIAL, ADDHOST, ADDIPS, ADDZONE + PrivateCloudOrderType *string `json:"privateCloudOrderType,omitempty" xmlrpc:"privateCloudOrderType,omitempty"` + + // Optional promotion code for an order. + PromotionCode *string `json:"promotionCode,omitempty" xmlrpc:"promotionCode,omitempty"` + + // Generic properties. + Properties []Container_Product_Order_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // The Prorated Initial Charge plus the balance on the account. Only the recurring fees are prorated. Here's how the calculation works: We take the postTaxRecurring value and we prorate it based on the time between now and the next bill date for this account. After this, we add in the setup fee since this is not prorated. Then, if there is a balance on the account, we add that to the account. In the event that there is a credit balance on the account, we will subtract this amount from the order total. If the credit balance on the account is greater than the prorated initial charge, the order will go through without a charge to the credit card on the account or requiring a paypal payment. The credit on the account will be reduced by the order total, and the order will await approval from sales, as normal. If there is a pending order already in the system, We will ignore the balance on the account completely, in the calculation of the initial charge. This is to protect against two orders coming into the system and getting the benefit of a credit balance, or worse, both orders being charged the order amount + the balance on the account. + ProratedInitialCharge *Float64 `json:"proratedInitialCharge,omitempty" xmlrpc:"proratedInitialCharge,omitempty"` + + // This is the same as the proratedInitialCharge, except the balance on the account is ignored. This is the prorated total amount of the order. + ProratedOrderTotal *Float64 `json:"proratedOrderTotal,omitempty" xmlrpc:"proratedOrderTotal,omitempty"` + + // The URLs for scripts to execute on their respective servers after they have been provisioned. Provision scripts are not available for Microsoft Windows servers. + ProvisionScripts []string `json:"provisionScripts,omitempty" xmlrpc:"provisionScripts,omitempty"` + + // The quantity of the item being ordered + Quantity *int `json:"quantity,omitempty" xmlrpc:"quantity,omitempty"` + + // A custom name to be assigned to the quote. + QuoteName *string `json:"quoteName,omitempty" xmlrpc:"quoteName,omitempty"` + + // Specifying a regional group name allows you to not worry about placing your server or service at a specific datacenter, but to any datacenter within that regional group. See [[SoftLayer_Location_Group_Regional]] to get a list of available regional group names. + // + // location and regionalGroup are mutually exclusive on an order container. If both location and regionalGroup are provided, an exception will be thrown indicating that only 1 is allowed. + // + // If a regional group is provided and VLANs are specified (within the hardware or virtualGuests properties), we will use the datacenter where the VLANs are located. If no VLANs are specified, we will use the preferred datacenter on the regional group object. + RegionalGroup *string `json:"regionalGroup,omitempty" xmlrpc:"regionalGroup,omitempty"` + + // An optional resource group identifier specifying the resource group to attach the order to + ResourceGroupId *int `json:"resourceGroupId,omitempty" xmlrpc:"resourceGroupId,omitempty"` + + // This variable specifies the name of the resource group the server configuration belongs to. For MongoDB Replica sets, it would be the replica set name. + ResourceGroupName *string `json:"resourceGroupName,omitempty" xmlrpc:"resourceGroupName,omitempty"` + + // An optional resource group template identifier to be used as a deployment base for a Virtual Server (Private Node) order. + ResourceGroupTemplateId *int `json:"resourceGroupTemplateId,omitempty" xmlrpc:"resourceGroupTemplateId,omitempty"` + + // The URL to which PayPal redirects browser after a payment is completed. + ReturnUrl *string `json:"returnUrl,omitempty" xmlrpc:"returnUrl,omitempty"` + + // This flag indicates that the quote should be sent to the email address associated with the account or order. + SendQuoteEmailFlag *bool `json:"sendQuoteEmailFlag,omitempty" xmlrpc:"sendQuoteEmailFlag,omitempty"` + + // The number of cores for the server being ordered. This value is set internally. + ServerCoreCount *int `json:"serverCoreCount,omitempty" xmlrpc:"serverCoreCount,omitempty"` + + // The token of a requesting service. Do not set. + ServiceToken *string `json:"serviceToken,omitempty" xmlrpc:"serviceToken,omitempty"` + + // An optional computing instance identifier to be used as an installation base for a computing instance order + SourceVirtualGuestId *int `json:"sourceVirtualGuestId,omitempty" xmlrpc:"sourceVirtualGuestId,omitempty"` + + // The containers which hold SoftLayer_Security_Ssh_Key IDs to add to their respective servers. The order of containers passed in needs to match the order they are assigned to either hardware or virtualGuests. SSH Keys will not be assigned for servers with Microsoft Windows. + SshKeys []Container_Product_Order_SshKeys `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // An optional parameter for step-based order processing. + StepId *int `json:"stepId,omitempty" xmlrpc:"stepId,omitempty"` + + // + // + // For orders that want to add storage groups such as RAID across multiple disks, simply add [[SoftLayer_Container_Product_Order_Storage_Group]] objects to this array. Storage groups will only be used if the 'RAID' disk controller price is selected. Any other disk controller types will ignore the storage groups set here. + // + // The first storage group in this array will be considered the primary storage group, which is used for the OS. Any other storage groups will act as data storage. + // + // + StorageGroups []Container_Product_Order_Storage_Group `json:"storageGroups,omitempty" xmlrpc:"storageGroups,omitempty"` + + // The order container may not contain the final tax rates when it is returned from [[SoftLayer_Product_Order/verifyOrder|verifyOrder]]. This hash will facilitate checking if the tax rates have finished being calculated and retrieving the accurate tax rate values. + TaxCacheHash *string `json:"taxCacheHash,omitempty" xmlrpc:"taxCacheHash,omitempty"` + + // Flag to indicate if the order container has the final tax rates for the order. Some tax rates are calculated in the background because they take longer, and they might not be finished when the container is returned from [[SoftLayer_Product_Order/verifyOrder|verifyOrder]]. + TaxCompletedFlag *bool `json:"taxCompletedFlag,omitempty" xmlrpc:"taxCompletedFlag,omitempty"` + + // The SoftLayer_Product_Item_Price for the Tech Incubator discount. The oneTimeFee field contain the calculated discount being applied to the order. + TechIncubatorItemPrice *Product_Item_Price `json:"techIncubatorItemPrice,omitempty" xmlrpc:"techIncubatorItemPrice,omitempty"` + + // The total tax portion of the recurring fees. + TotalRecurringTax *Float64 `json:"totalRecurringTax,omitempty" xmlrpc:"totalRecurringTax,omitempty"` + + // The tax amount of the setup fees. + TotalSetupTax *Float64 `json:"totalSetupTax,omitempty" xmlrpc:"totalSetupTax,omitempty"` + + // An optional flag to use hourly pricing instead of standard monthly pricing. + UseHourlyPricing *bool `json:"useHourlyPricing,omitempty" xmlrpc:"useHourlyPricing,omitempty"` + + // For virtual guest (virtual server) orders, this property is required if you did not specify data in the hardware property. This is an array of [[SoftLayer_Virtual_Guest]] objects. The hostname and domain properties are required for each virtual guest object. There is no need to specify data in this property and the hardware property - only one is required for virtual server orders. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// This datatype is to be used for data transfer requests. +type Container_Product_Order_Account_Media_Data_Transfer_Request struct { + Container_Product_Order + + // An instance of [[SoftLayer_Account_Media_Data_Transfer_Request]] + Request *Account_Media_Data_Transfer_Request `json:"request,omitempty" xmlrpc:"request,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Attribute_Address datatype contains the address information. +type Container_Product_Order_Attribute_Address struct { + Entity + + // The physical street address. + AddressLine1 *string `json:"addressLine1,omitempty" xmlrpc:"addressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + AddressLine2 *string `json:"addressLine2,omitempty" xmlrpc:"addressLine2,omitempty"` + + // The city name + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The 2-character Country code. (i.e. US) + CountryCode *string `json:"countryCode,omitempty" xmlrpc:"countryCode,omitempty"` + + // The non US/Canadian state or region. + NonUsState *string `json:"nonUsState,omitempty" xmlrpc:"nonUsState,omitempty"` + + // The Zip or Postal Code. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The state or region. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Attribute_Contact datatype contains the contact information. +type Container_Product_Order_Attribute_Contact struct { + Entity + + // The address information of the contact. + Address *Container_Product_Order_Attribute_Address `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // The email address of the contact. + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // The fax number associated with a contact. This is an optional value. + FaxNumber *string `json:"faxNumber,omitempty" xmlrpc:"faxNumber,omitempty"` + + // The first name of the contact. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // The last name of the contact. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The organization name of the contact. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The phone number associated with a contact. + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // The title of the contact. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Attribute_Organization datatype contains the organization information. +type Container_Product_Order_Attribute_Organization struct { + Entity + + // The address information of the contact. + Address *Container_Product_Order_Attribute_Address `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // The fax number associated with an organization. This is an optional value. + FaxNumber *string `json:"faxNumber,omitempty" xmlrpc:"faxNumber,omitempty"` + + // The name of an organization. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The phone number associated with an organization. + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Billing_Information struct { + Entity + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The credit card number to use. + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // The payment card expiration month + CardExpirationMonth *int `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The payment card expiration year + CardExpirationYear *int `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // The Card Verification Value Code (CVV) number + CreditCardVerificationNumber *string `json:"creditCardVerificationNumber,omitempty" xmlrpc:"creditCardVerificationNumber,omitempty"` + + // 1 = opted in, 0 = not opted in. Select the EU Supported option if you use IBM Bluemix Infrastructure services to process EU citizens' personal data. This option limits Level 1 and Level 2 support to the EU. However, IBM Bluemix and SoftLayer teams outside the EU perform processing activities when they are not resolved at Level 1 or 2. These activities are always at your instruction and do not impact the security or privacy of your data. As with our standard services, you must review the impact these cross-border processing activities have on your services and take any necessary measures, including review of IBM's US-EU Privacy Shield registration and Data Processing Addendum. If you select products, services, or locations outside the EU, all processing activities will be performed outside of the EU. If you select other IBM services in addition to Bluemix IaaS (IBM or a third party), determine the service location in order to meet any additional data protection or processing requirements that permit cross-border transfers. + EuSupported *bool `json:"euSupported,omitempty" xmlrpc:"euSupported,omitempty"` + + // Tax exempt status. 1 = exempt (not taxable), 0 = not exempt (taxable) + TaxExempt *int `json:"taxExempt,omitempty" xmlrpc:"taxExempt,omitempty"` + + // The VAT ID entered at checkout + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Dns_Domain_Registration datatype contains everything required to place a domain registration order with SoftLayer. +type Container_Product_Order_Dns_Domain_Registration struct { + Container_Product_Order + + // Administrative contact information associated with an registraton or transfer. This is required if registration type is 'new' or 'transfer'. + AdministrativeContact *Container_Dns_Domain_Registration_Contact `json:"administrativeContact,omitempty" xmlrpc:"administrativeContact,omitempty"` + + // Billing contact information associated with an registraton or transfer. This is required if registration type is 'new' or 'transfer'. + BillingContact *Container_Dns_Domain_Registration_Contact `json:"billingContact,omitempty" xmlrpc:"billingContact,omitempty"` + + // The list of domains to be registered. This is required if registration type is 'new', 'renew', or 'transfer'. + DomainRegistrationList []Container_Dns_Domain_Registration_List `json:"domainRegistrationList,omitempty" xmlrpc:"domainRegistrationList,omitempty"` + + // Owner contact information associated with an registraton or transfer. This is required if registration type is 'new' or 'transfer'. + OwnerContact *Container_Dns_Domain_Registration_Contact `json:"ownerContact,omitempty" xmlrpc:"ownerContact,omitempty"` + + // The type of a domain registration order. The registration type is Required. Allowed values are new, transfer, and renew + RegistrationType *string `json:"registrationType,omitempty" xmlrpc:"registrationType,omitempty"` + + // Technical contact information associated with an registraton or transfer. This is required if registration type is 'new' or 'transfer'. + TechnicalContact *Container_Dns_Domain_Registration_Contact `json:"technicalContact,omitempty" xmlrpc:"technicalContact,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Dns_Domain_Reseller datatype contains everything required to place a domain reseller credit order with SoftLayer. +type Container_Product_Order_Dns_Domain_Reseller struct { + Container_Product_Order + + // Amount to be credited to the domain reseller account. + CreditAmount *Float64 `json:"creditAmount,omitempty" xmlrpc:"creditAmount,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a Gateway Appliance Cluster order with SoftLayer. +type Container_Product_Order_Gateway_Appliance_Cluster struct { + Container_Product_Order + + // Used to identify which items on an order belong in the same cluster. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" xmlrpc:"clusterIdentifier,omitempty"` + + // Indicates what type of cluster order is being placed (HA, Provision). + ClusterOrderType *string `json:"clusterOrderType,omitempty" xmlrpc:"clusterOrderType,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a hardware security module order with SoftLayer. +type Container_Product_Order_Hardware_Security_Module struct { + Container_Product_Order_Hardware_Server +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Hardware_Server struct { + Container_Product_Order + + // Used to identify which items on an order belong in the same cluster. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" xmlrpc:"clusterIdentifier,omitempty"` + + // Indicates what type of cluster order is being placed (HA, Provision). + ClusterOrderType *string `json:"clusterOrderType,omitempty" xmlrpc:"clusterOrderType,omitempty"` + + // Used to identify which gateway is being upgraded to HA. + ClusterResourceId *int `json:"clusterResourceId,omitempty" xmlrpc:"clusterResourceId,omitempty"` + + // Id of the [[SoftLayer_Monitoring_Agent_Configuration_Template_Group]] to be used with the monitoring package + MonitoringAgentConfigurationTemplateGroupId *int `json:"monitoringAgentConfigurationTemplateGroupId,omitempty" xmlrpc:"monitoringAgentConfigurationTemplateGroupId,omitempty"` + + // When ordering Virtual Server (Private Node), this variable specifies the role of the server configuration. (Deprecated) + PrivateCloudServerRole *string `json:"privateCloudServerRole,omitempty" xmlrpc:"privateCloudServerRole,omitempty"` + + // Used to identify which device the new server should be attached to. + RequiredUpstreamDeviceId *int `json:"requiredUpstreamDeviceId,omitempty" xmlrpc:"requiredUpstreamDeviceId,omitempty"` + + // tags (used in MongoDB deployments). (Deprecated) + Tags []Container_Product_Order_Property `json:"tags,omitempty" xmlrpc:"tags,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Hardware_Server_Colocation struct { + Container_Product_Order_Hardware_Server +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a Gateway Appliance order. +type Container_Product_Order_Hardware_Server_Gateway_Appliance struct { + Container_Product_Order_Hardware_Server +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Hardware_Server_Upgrade struct { + Container_Product_Order_Hardware_Server +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a Monitoring Package order with SoftLayer. +type Container_Product_Order_Monitoring_Package struct { + Container_Product_Order + + // no documentation yet + ConfigurationTemplateGroups []Monitoring_Agent_Configuration_Template_Group `json:"configurationTemplateGroups,omitempty" xmlrpc:"configurationTemplateGroups,omitempty"` + + // no documentation yet + ServerType *string `json:"serverType,omitempty" xmlrpc:"serverType,omitempty"` +} + +// This is a datatype used with multi-configuration deployments. Multi-configuration deployments also have a deployment specific datatype that should be used in lieu of this one. +type Container_Product_Order_MultiConfiguration struct { + Container_Product_Order +} + +// no documentation yet +type Container_Product_Order_MultiConfiguration_Tornado struct { + Container_Product_Order_MultiConfiguration +} + +// This type contains the structure of network-related objects that may be specified when ordering services. +type Container_Product_Order_Network struct { + Entity + + // The [[SoftLayer_Network]] object. + Network *Network `json:"network,omitempty" xmlrpc:"network,omitempty"` + + // The list of public [[SoftLayer_Container_Product_Order_Network_Vlan|vlans]] available for ordering. Each VLAN will have list of public subnets that are accessible to the VLAN. + PublicVlans []Container_Product_Order `json:"publicVlans,omitempty" xmlrpc:"publicVlans,omitempty"` + + // The list of private [[SoftLayer_Container_Product_Order_Network_Subnet|subnets]] available for ordering with a description of their available IP space. + Subnets []Container_Product_Order `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an application delivery controller order with SoftLayer. +type Container_Product_Order_Network_Application_Delivery_Controller struct { + Container_Product_Order + + // An optional [[SoftLayer_Network_Application_Delivery_Controller]] identifier that is used for upgrading an existing application delivery controller. + ApplicationDeliveryControllerId *int `json:"applicationDeliveryControllerId,omitempty" xmlrpc:"applicationDeliveryControllerId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a CDN order with SoftLayer. +type Container_Product_Order_Network_ContentDelivery_Account struct { + Container_Product_Order + + // The CDN account name + CdnAccountName *string `json:"cdnAccountName,omitempty" xmlrpc:"cdnAccountName,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a CDN order with SoftLayer. +type Container_Product_Order_Network_ContentDelivery_Account_Upgrade struct { + Container_Product_Order + + // ID of an existing CDN account. You can use this to upgrade an existing CDN account. + CdnAccountId *string `json:"cdnAccountId,omitempty" xmlrpc:"cdnAccountId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a CDN Service order with SoftLayer. +type Container_Product_Order_Network_ContentDelivery_Service struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder when purchasing a Network Interconnect. +type Container_Product_Order_Network_Interconnect struct { + Container_Product_Order + + // The BGP ASN. + BgpAsn *string `json:"bgpAsn,omitempty" xmlrpc:"bgpAsn,omitempty"` + + // The [[SoftLayer_Network_Interconnect]] for this order, ID must be provided. + InterconnectId *int `json:"interconnectId,omitempty" xmlrpc:"interconnectId,omitempty"` + + // The [[SoftLayer_Network_DirectLink_Location]] for this order, ID must be provided. + InterconnectLocation *Network_DirectLink_Location `json:"interconnectLocation,omitempty" xmlrpc:"interconnectLocation,omitempty"` + + // A name to identify this Direct Link resource. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Optional network identifier for this link. + NetworkIdentifier *string `json:"networkIdentifier,omitempty" xmlrpc:"networkIdentifier,omitempty"` +} + +// This is the default container type for network load balancer orders. +type Container_Product_Order_Network_LoadBalancer struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for a Load Balancer as a Service. +type Container_Product_Order_Network_LoadBalancer_AsAService struct { + Container_Product_Order + + // A description of this Load Balancer. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The [[SoftLayer_Network_LBaaS_LoadBalancerHealthMonitorConfiguration]]s for this Load Balancer. + HealthMonitorConfigurations []Network_LBaaS_LoadBalancerHealthMonitorConfiguration `json:"healthMonitorConfigurations,omitempty" xmlrpc:"healthMonitorConfigurations,omitempty"` + + // Specify whether this load balancer is a public or internal facing load balancer. If this value is omitted, the value will default to true. + IsPublic *bool `json:"isPublic,omitempty" xmlrpc:"isPublic,omitempty"` + + // A name to identify this Load Balancer. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The [[SoftLayer_Network_LBaaS_LoadBalancerProtocolConfiguration]]s for this Load Balancer. + ProtocolConfigurations []Network_LBaaS_LoadBalancerProtocolConfiguration `json:"protocolConfigurations,omitempty" xmlrpc:"protocolConfigurations,omitempty"` + + // The [[SoftLayer_Network_LBaaS_LoadBalancerServerInstanceInfo]]s for this Load Balancer. + ServerInstancesInformation []Network_LBaaS_LoadBalancerServerInstanceInfo `json:"serverInstancesInformation,omitempty" xmlrpc:"serverInstancesInformation,omitempty"` + + // The [[SoftLayer_Network_Subnet]]s where this Load Balancer will be provisioned. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // Specify if this load balancer uses system IP pool (true, default) or customer's (null|false) public subnet to allocate IP addresses. + UseSystemPublicIpPool *bool `json:"useSystemPublicIpPool,omitempty" xmlrpc:"useSystemPublicIpPool,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a global load balancer order with SoftLayer. +type Container_Product_Order_Network_LoadBalancer_Global struct { + Container_Product_Order + + // The domain name that will be load balanced. + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // The hostname that will be load balanced. + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a network message delivery order with SoftLayer. +type Container_Product_Order_Network_Message_Delivery struct { + Container_Product_Order + + // The account password for SendGrid enrollment. + AccountPassword *string `json:"accountPassword,omitempty" xmlrpc:"accountPassword,omitempty"` + + // The username for SendGrid enrollment. + AccountUsername *string `json:"accountUsername,omitempty" xmlrpc:"accountUsername,omitempty"` + + // The email address for SendGrid enrollment. + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` +} + +// This is the base data type for Performance storage order containers. If you wish to place an order you must not use this class and instead use the appropriate child container for the type of storage you would like to order: [[SoftLayer_Container_Product_Order_Network_PerformanceStorage_Nfs]] for File and [[SoftLayer_Container_Product_Order_Network_PerformanceStorage_Iscsi]] for Block storage. +type Container_Product_Order_Network_PerformanceStorage struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for iSCSI (Block) Performance Storage +type Container_Product_Order_Network_PerformanceStorage_Iscsi struct { + Container_Product_Order_Network_PerformanceStorage + + // OS Type to be used when formatting the storage space, this should match the OS type that will be connecting to the LUN. The only required property its the keyName of the OS type. + OsFormatType *Network_Storage_Iscsi_OS_Type `json:"osFormatType,omitempty" xmlrpc:"osFormatType,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for NFS (File) Performance Storage +type Container_Product_Order_Network_PerformanceStorage_Nfs struct { + Container_Product_Order_Network_PerformanceStorage +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a hardware firewall order with SoftLayer. +type Container_Product_Order_Network_Protection_Firewall struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a hardware (dedicated) firewall order with SoftLayer. +type Container_Product_Order_Network_Protection_Firewall_Dedicated struct { + Container_Product_Order + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + RouterId *int `json:"routerId,omitempty" xmlrpc:"routerId,omitempty"` + + // generic properties. + Vlan *Network_Vlan `json:"vlan,omitempty" xmlrpc:"vlan,omitempty"` + + // generic properties. + VlanId *int `json:"vlanId,omitempty" xmlrpc:"vlanId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Network_Protection_Firewall_Dedicated_Upgrade struct { + Container_Product_Order_Network_Protection_Firewall_Dedicated + + // no documentation yet + FirewallId *int `json:"firewallId,omitempty" xmlrpc:"firewallId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for Storage as a Service. +type Container_Product_Order_Network_Storage_AsAService struct { + Container_Product_Order + + // This must be populated only for duplicating a specific snapshot for volume duplicating. It represents the identifier of the origin [[SoftLayer_Network_Storage_Snapshot]] + DuplicateOriginSnapshotId *int `json:"duplicateOriginSnapshotId,omitempty" xmlrpc:"duplicateOriginSnapshotId,omitempty"` + + // This must be populated only for duplicate volume ordering. It represents the identifier of the origin [[SoftLayer_Network_Storage]]. + DuplicateOriginVolumeId *int `json:"duplicateOriginVolumeId,omitempty" xmlrpc:"duplicateOriginVolumeId,omitempty"` + + // When ordering performance by IOPS, populate this property with how many. + Iops *int `json:"iops,omitempty" xmlrpc:"iops,omitempty"` + + // This must be populated only for replicant volume ordering. It represents the identifier of the origin [[SoftLayer_Network_Storage]]. + OriginVolumeId *int `json:"originVolumeId,omitempty" xmlrpc:"originVolumeId,omitempty"` + + // This must be populated only for replicant volume ordering. It represents the [[SoftLayer_Network_Storage_Schedule]] that will be be used to replicate the origin [[SoftLayer_Network_Storage]] volume. + OriginVolumeScheduleId *int `json:"originVolumeScheduleId,omitempty" xmlrpc:"originVolumeScheduleId,omitempty"` + + // This must be populated for block storage orders. This should match the OS type of the host(s) that will connect to the volume. The only required property is the keyName of the OS type. This property is ignored for file storage orders. + OsFormatType *Network_Storage_Iscsi_OS_Type `json:"osFormatType,omitempty" xmlrpc:"osFormatType,omitempty"` + + // Volume size in GB's. + VolumeSize *int `json:"volumeSize,omitempty" xmlrpc:"volumeSize,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an upgrade order for Storage as a Service. +type Container_Product_Order_Network_Storage_AsAService_Upgrade struct { + Container_Product_Order_Network_Storage_AsAService + + // The [[SoftLayer_Network_Storage]] being upgraded. Only it's ID is required. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for additional Evault plugins. +type Container_Product_Order_Network_Storage_Backup_Evault_Plugin struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an Evault order with SoftLayer. +type Container_Product_Order_Network_Storage_Backup_Evault_Vault struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for Enterprise Storage +type Container_Product_Order_Network_Storage_Enterprise struct { + Container_Product_Order + + // This must be populated only for replicant volume ordering. It represents the identifier of the origin [[SoftLayer_Network_Storage]]. + OriginVolumeId *int `json:"originVolumeId,omitempty" xmlrpc:"originVolumeId,omitempty"` + + // This must be populated only for replicant volume ordering. It represents the [[SoftLayer_Network_Storage_Schedule]] that will be be used to replicate the origin [[SoftLayer_Network_Storage]] volume. + OriginVolumeScheduleId *int `json:"originVolumeScheduleId,omitempty" xmlrpc:"originVolumeScheduleId,omitempty"` + + // This must be populated for block storage orders. This should match the OS type of the host(s) that will connect to the volume. The only required property is the keyName of the OS type. This property is ignored for file storage orders. + OsFormatType *Network_Storage_Iscsi_OS_Type `json:"osFormatType,omitempty" xmlrpc:"osFormatType,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for Enterprise Storage Snapshot Space. +type Container_Product_Order_Network_Storage_Enterprise_SnapshotSpace struct { + Container_Product_Order + + // The [[SoftLayer_Network_Storage]] id for which snapshot space is being ordered for. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an upgrade order for Enterprise Storage Snapshot Space. +type Container_Product_Order_Network_Storage_Enterprise_SnapshotSpace_Upgrade struct { + Container_Product_Order_Network_Storage_Enterprise_SnapshotSpace +} + +// This datatype is to be used for object storage orders. +type Container_Product_Order_Network_Storage_Hub struct { + Container_Product_Order +} + +// This class is used to contain a datacenter location and its associated active usage rate prices for object storage ordering. +type Container_Product_Order_Network_Storage_Hub_Datacenter struct { + Entity + + // The datacenter location where object storage is available. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The collection of active usage rate item prices. + UsageRatePrices []Product_Item_Price `json:"usageRatePrices,omitempty" xmlrpc:"usageRatePrices,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an ISCSI order with SoftLayer. +type Container_Product_Order_Network_Storage_Iscsi struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an ISCSI Replication order with SoftLayer. +type Container_Product_Order_Network_Storage_Iscsi_Replication struct { + Container_Product_Order + + // the [[SoftLayer_Network_Storage_Iscsi_EqualLogic_Version3]] Id. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an ISCSI Snapshot Space order with SoftLayer. +type Container_Product_Order_Network_Storage_Iscsi_SnapshotSpace struct { + Container_Product_Order + + // the [[SoftLayer_Network_Storage_Iscsi_EqualLogic_Version3]] Id. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// This datatype is to be used for mass data migration requests. +type Container_Product_Order_Network_Storage_MassDataMigration_Request struct { + Container_Product_Order + + // Line 1 of the address - typically the number and street address the MDMS device will be delivered to + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Line 2 of the address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // First and last name of the customer on the shipping address + AddressAttention *string `json:"addressAttention,omitempty" xmlrpc:"addressAttention,omitempty"` + + // The datacenter name where the MDMS device will be shipped to + AddressNickname *string `json:"addressNickname,omitempty" xmlrpc:"addressNickname,omitempty"` + + // The shipping address city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Name of the company device is being shipped to + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // Cloud Object Storage Account ID for the data offload destination + CosAccountId *string `json:"cosAccountId,omitempty" xmlrpc:"cosAccountId,omitempty"` + + // Cloud Object Storage Bucket for the data offload destination + CosBucketName *string `json:"cosBucketName,omitempty" xmlrpc:"cosBucketName,omitempty"` + + // The shipping address country + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Default Gateway used for preconfiguring the Eth1 port on the MDMS device to access the user interface + Eth1DefaultGateway *string `json:"eth1DefaultGateway,omitempty" xmlrpc:"eth1DefaultGateway,omitempty"` + + // Netmask used for preconfiguring the Eth1 port on the MDMS device to access the user interface + Eth1Netmask *string `json:"eth1Netmask,omitempty" xmlrpc:"eth1Netmask,omitempty"` + + // Static IP Address used for preconfiguring the Eth1 port on the MDMS device to access the user interface + Eth1StaticIp *string `json:"eth1StaticIp,omitempty" xmlrpc:"eth1StaticIp,omitempty"` + + // Netmask used for preconfiguring the Eth3 port on the MDMS device to enable data transfer + Eth3Netmask *string `json:"eth3Netmask,omitempty" xmlrpc:"eth3Netmask,omitempty"` + + // Static IP Address used for preconfiguring the Eth3 port on the MDMS device to enable data transfer + Eth3StaticIp *string `json:"eth3StaticIp,omitempty" xmlrpc:"eth3StaticIp,omitempty"` + + // The e-mails of the MDMS key contacts + KeyContactEmails []string `json:"keyContactEmails,omitempty" xmlrpc:"keyContactEmails,omitempty"` + + // The names of the MDMS key contacts + KeyContactNames []string `json:"keyContactNames,omitempty" xmlrpc:"keyContactNames,omitempty"` + + // The phone numbers of the MDMS key contacts + KeyContactPhoneNumbers []string `json:"keyContactPhoneNumbers,omitempty" xmlrpc:"keyContactPhoneNumbers,omitempty"` + + // The roles of the MDMS key contacts + KeyContactRoles []string `json:"keyContactRoles,omitempty" xmlrpc:"keyContactRoles,omitempty"` + + // The shipping address postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Name of the Mass Data Migration Service job request + RequestName *string `json:"requestName,omitempty" xmlrpc:"requestName,omitempty"` + + // Shipping address and information where device will be shipped to + ShippingAddress *Container_Network_Storage_MassDataMigration_Request_Address `json:"shippingAddress,omitempty" xmlrpc:"shippingAddress,omitempty"` + + // The shipping address state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// The SoftLayer_Container_Product_Order_Network_Storage_Modification datatype has everything required to place a modification to an existing StorageLayer account with SoftLayer. Modifications, at present time, include upgrade and downgrades only. The ''volumeId'' property must be set to the network storage volume id to be upgraded. Once populated send this container to the [[SoftLayer_Product_Order::placeOrder]] method. +// +// The ''packageId'' property passed in for CloudLayer storage accounts must be set to 0 (zero) and the ''quantity'' property must be set to 1. The location does not have to be set. Please use the [[SoftLayer_Product_Package]] service to retrieve a list of CloudLayer items. +// +// NOTE: When upgrading CloudLayer storage service from a metered plan (pay as you go) to a non-metered plan, make sure the chosen plan's storage allotment has enough space to cover the current usage. If the chosen plan's usage allotment is less than the CloudLayer storage's usage the order will be rejected. +type Container_Product_Order_Network_Storage_Modification struct { + Container_Product_Order + + // The id of the StorageLayer account to modify. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder when placing network attached storage orders. +type Container_Product_Order_Network_Storage_Nas struct { + Container_Product_Order +} + +// This datatype is to be used for ordering object storage products using the object_storage [[SoftLayer_Product_Item_Category|category]]. For object storage products using hub [[SoftLayer_Product_Item_Category|category]] use the [[SoftLayer_Container_Product_Order_Network_Storage_Hub]] order container. +type Container_Product_Order_Network_Storage_Object struct { + Container_Product_Order +} + +// This class is used to contain a location group and its associated active usage rate prices for object storage ordering. +type Container_Product_Order_Network_Storage_ObjectStorage_LocationGroup struct { + Entity + + // The datacenter location where object storage is available. + ClusterGeolocationType *string `json:"clusterGeolocationType,omitempty" xmlrpc:"clusterGeolocationType,omitempty"` + + // The datacenter location where object storage is available. + LocationGroup *Location_Group `json:"locationGroup,omitempty" xmlrpc:"locationGroup,omitempty"` + + // The collection of active usage rate item prices. + UsageRatePrices []Product_Item_Price `json:"usageRatePrices,omitempty" xmlrpc:"usageRatePrices,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a subnet order with SoftLayer. +type Container_Product_Order_Network_Subnet struct { + Container_Product_Order + + // The description which includes the network identifier, Classless Inter-Domain Routing prefix and the available slot count. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The [[SoftLayer_Network_Subnet_IpAddress]] id. + EndPointIpAddressId *int `json:"endPointIpAddressId,omitempty" xmlrpc:"endPointIpAddressId,omitempty"` + + // The [[SoftLayer_Network_Vlan]] id. + EndPointVlanId *int `json:"endPointVlanId,omitempty" xmlrpc:"endPointVlanId,omitempty"` + + // The [[SoftLayer_Network_Subnet]] id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is the hostname for the router associated with the [[SoftLayer_Network_Subnet|subnet]]. This is a readonly property. + RouterHostname *string `json:"routerHostname,omitempty" xmlrpc:"routerHostname,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a network ipsec vpn order with SoftLayer. +type Container_Product_Order_Network_Tunnel_Ipsec struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a network vlan order with SoftLayer. +type Container_Product_Order_Network_Vlan struct { + Container_Product_Order + + // The description which includes the primary router's hostname plus the vlan number. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The datacenter portion of the hostname. + HostnameDatacenter *string `json:"hostnameDatacenter,omitempty" xmlrpc:"hostnameDatacenter,omitempty"` + + // The router portion of the hostname. + HostnameRouter *string `json:"hostnameRouter,omitempty" xmlrpc:"hostnameRouter,omitempty"` + + // The [[SoftLayer_Network_Vlan]] id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The optional name for this VLAN + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The router object on which the new VLAN should be created. + Router *Hardware `json:"router,omitempty" xmlrpc:"router,omitempty"` + + // The ID of the [[SoftLayer_Hardware_Router]] object on which the new VLAN should be created. + RouterId *int `json:"routerId,omitempty" xmlrpc:"routerId,omitempty"` + + // The collection of subnets associated with this vlan. + Subnets []Container_Product_Order `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // The vlan number. + VlanNumber *int `json:"vlanNumber,omitempty" xmlrpc:"vlanNumber,omitempty"` +} + +// This class contains the collections of public and private VLANs that are available during the ordering process. +type Container_Product_Order_Network_Vlans struct { + Entity + + // The collection of private vlans available during ordering. + PrivateVlans []Container_Product_Order `json:"privateVlans,omitempty" xmlrpc:"privateVlans,omitempty"` + + // The collection of public vlans available during ordering. + PublicVlans []Container_Product_Order `json:"publicVlans,omitempty" xmlrpc:"publicVlans,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder when linking a Bluemix account to a newly created SoftLayer account. +type Container_Product_Order_NewCustomerSetup struct { + Container_Product_Order + + // no documentation yet + AuthorizationToken *string `json:"authorizationToken,omitempty" xmlrpc:"authorizationToken,omitempty"` + + // no documentation yet + ExternalAccountId *string `json:"externalAccountId,omitempty" xmlrpc:"externalAccountId,omitempty"` + + // no documentation yet + ExternalServiceProviderKey *string `json:"externalServiceProviderKey,omitempty" xmlrpc:"externalServiceProviderKey,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for Private Cloud. +type Container_Product_Order_Private_Cloud struct { + Container_Product_Order +} + +// This is used for storing various items about the order. Currently used for storing additional raid information when ordering servers. This is optional +type Container_Product_Order_Property struct { + Entity + + // The property name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The property value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// When an order is placed (SoftLayer_Product_Order::placeOrder), a receipt is returned when the order is created successfully. The information in the receipt helps explain information about the order. It's order ID, and all the data within the order as well. +// +// For PayPal Orders, an URL is also returned to the user so that the user can complete the transaction. Users paying with PayPal must continue on to this URL, login and pay. When doing this, PayPal will redirect the user back to a SoftLayer page which will then "finalize" the authorization process. From here, Sales will verify the order by contacting the user in some way, unless sales has already spoken to the user about approving the order. +// +// For users paying with a credit card, a receipt means the order has gone to sales and is awaiting approval. +type Container_Product_Order_Receipt struct { + Entity + + // This URL refers to the location where you will visit to complete the payment authorization for an external service, such as PayPal. This property is associated with externalPaymentToken and will only be populated when purchasing products with an external service. + // + // Once you visit this location, you will be presented with the options to confirm payment or deny payment. If you confirm payment, you will be redirected back to the receipt for your order. If you deny, you will be redirected back to the cancel order page where you do not need to take any additional action. + // + // Until you confirm payment with the external service, your products will not be provisioned or accessible for your consumption. Upon successfully confirming payment, our system will be notified and the order approval and provisioning systems will begin processing. After provisioning is complete, your services will be available. + ExternalPaymentCheckoutUrl *string `json:"externalPaymentCheckoutUrl,omitempty" xmlrpc:"externalPaymentCheckoutUrl,omitempty"` + + // This token refers to the identifier for the external payment authorization. This token is associated with the externalPaymentCheckoutUrl and is only populated when purchasing products with an external service like PayPal. + ExternalPaymentToken *string `json:"externalPaymentToken,omitempty" xmlrpc:"externalPaymentToken,omitempty"` + + // The date when SoftLayer received the order. + OrderDate *Time `json:"orderDate,omitempty" xmlrpc:"orderDate,omitempty"` + + // This is a copy of the order container (SoftLayer_Container_Product_Order) which holds all the data related to an order. This will only return when an order is processed successfully. It will contain all the items in an order as well as the order totals. + OrderDetails *Container_Product_Order `json:"orderDetails,omitempty" xmlrpc:"orderDetails,omitempty"` + + // SoftLayer's unique identifier for the order. + OrderId *int `json:"orderId,omitempty" xmlrpc:"orderId,omitempty"` + + // Deprecation notice: use externalPaymentCheckoutUrl instead of this property. + // + // This URL refers to the location where you will visit to complete the payment authorization for PayPal. This property is associated with paypalToken and will only be populated when purchasing products with PayPal. + // + // Once you visit PayPal's site, you will be presented with the options to confirm payment or deny payment. If you confirm payment, you will be redirected back to the receipt for your order. If you deny, you will be redirected back to the cancel order page where you do not need to take any additional action. + // + // Until you confirm payment with PayPal, your products will not be provisioned or accessible for your consumption. Upon successfully confirming payment, our system will be notified and the order approval and provisioning systems will begin processing. After provisioning is complete, your services will be available. + PaypalCheckoutUrl *string `json:"paypalCheckoutUrl,omitempty" xmlrpc:"paypalCheckoutUrl,omitempty"` + + // Deprecation notice: use externalPaymentToken instead of this property. + // + // This token refers to the identifier provided when payment is processed via PayPal. This token is associated with the paypalCheckoutUrl. + PaypalToken *string `json:"paypalToken,omitempty" xmlrpc:"paypalToken,omitempty"` + + // This is a copy of the order that was successfully placed (SoftLayer_Billing_Order). This will only return when an order is processed successfully. + PlacedOrder *Billing_Order `json:"placedOrder,omitempty" xmlrpc:"placedOrder,omitempty"` + + // This is a copy of the quote container (SoftLayer_Billing_Order_Quote) which holds all the data related to a quote. This will only return when a quote is processed successfully. + Quote *Billing_Order_Quote `json:"quote,omitempty" xmlrpc:"quote,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype contains everything required to place a secure certificate order with SoftLayer. +type Container_Product_Order_Security_Certificate struct { + Container_Product_Order + + // The administrator contact associated with a SSL certificate. If the contact is not provided the technical contact will be used. If the address is not provided the organization information address will be used. + AdministrativeContact *Container_Product_Order_Attribute_Contact `json:"administrativeContact,omitempty" xmlrpc:"administrativeContact,omitempty"` + + // The billing contact associated with a SSL certificate. If the contact is not provided the technical contact will be used. If the address is not provided the organization information address will be used. + BillingContact *Container_Product_Order_Attribute_Contact `json:"billingContact,omitempty" xmlrpc:"billingContact,omitempty"` + + // The base64 encoded string that sent from an applicant to a certificate authority. The CSR contains information identifying the applicant and the public key chosen by the applicant. The corresponding private key should not be included. + CertificateSigningRequest *string `json:"certificateSigningRequest,omitempty" xmlrpc:"certificateSigningRequest,omitempty"` + + // The email address that can approve a secure certificate order. + OrderApproverEmailAddress *string `json:"orderApproverEmailAddress,omitempty" xmlrpc:"orderApproverEmailAddress,omitempty"` + + // The organization information associated with a SSL certificate. + OrganizationInformation *Container_Product_Order_Attribute_Organization `json:"organizationInformation,omitempty" xmlrpc:"organizationInformation,omitempty"` + + // Indicates if it is an renewal order of an existing SSL certificate. + RenewalFlag *bool `json:"renewalFlag,omitempty" xmlrpc:"renewalFlag,omitempty"` + + // The number of servers. + ServerCount *int `json:"serverCount,omitempty" xmlrpc:"serverCount,omitempty"` + + // The server type. This is the name from a [[SoftLayer_Security_Certificate_Request_ServerType]] object. + ServerType *string `json:"serverType,omitempty" xmlrpc:"serverType,omitempty"` + + // The technical contact associated with a SSL certificate. If the address is not provided the organization information address will be used. + TechnicalContact *Container_Product_Order_Attribute_Contact `json:"technicalContact,omitempty" xmlrpc:"technicalContact,omitempty"` + + // The period that a SSL certificate is valid for. For example, 12, 24, 36 + ValidityMonths *int `json:"validityMonths,omitempty" xmlrpc:"validityMonths,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. +type Container_Product_Order_Service struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. +type Container_Product_Order_Service_External struct { + Container_Product_Order + + // For orders that contain servers (bare metal, virtual server, big data, etc.), the hardware property is required. This property is an array of [[SoftLayer_Hardware]] objects. The hostname and domain properties are required for each hardware object. Note that virtual server ([[SoftLayer_Container_Product_Order_Virtual_Guest]]) orders may populate this field instead of the virtualGuests property. + ExternalResources []Service_External_Resource `json:"externalResources,omitempty" xmlrpc:"externalResources,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a virtual license order with SoftLayer. +type Container_Product_Order_Software_Component_Virtual struct { + Container_Product_Order + + // array of ip address ids for which a license should be allocated for. + EndPointIpAddressIds []int `json:"endPointIpAddressIds,omitempty" xmlrpc:"endPointIpAddressIds,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a hardware security module order with SoftLayer. +type Container_Product_Order_Software_License struct { + Container_Product_Order +} + +// This object holds all of the ssh key ids that will allow authentication to a single server. +type Container_Product_Order_SshKeys struct { + Entity + + // An array of SoftLayer_Security_Ssh_Key IDs to assign to a server. + SshKeyIds []int `json:"sshKeyIds,omitempty" xmlrpc:"sshKeyIds,omitempty"` +} + +// A single storage group container used for a hardware server order. +// +// This object describes a single storage group that can be added to an order container. +type Container_Product_Order_Storage_Group struct { + Entity + + // Size of the array in gigabytes. Must be within limitations of the smallest drive assigned to the storage group and the storage group type. + ArraySize *Float64 `json:"arraySize,omitempty" xmlrpc:"arraySize,omitempty"` + + // The array type id from a [[SoftLayer_Configuration_Storage_Group_Array_Type]] object. + ArrayTypeId *int `json:"arrayTypeId,omitempty" xmlrpc:"arrayTypeId,omitempty"` + + // Defines the disk controller to put the storage group and the hard drives on. + // + // This must match a disk controller price on the order. The disk controller index is 0-indexed. 'disk_controller' = 0 'disk_controller1' = 1 'disk_controller2' = 2 + DiskControllerIndex *int `json:"diskControllerIndex,omitempty" xmlrpc:"diskControllerIndex,omitempty"` + + // Integer array of drive indexes to use in the storage group. + HardDrives []int `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // If an array should be protected by an hotspare, the drive index of the hotspares should be here. + // + // If a drive is a hotspare for all arrays then a separate storage group with array type GLOBAL_HOT_SPARE should be used + HotSpareDrives []int `json:"hotSpareDrives,omitempty" xmlrpc:"hotSpareDrives,omitempty"` + + // << EOT + LvmFlag *bool `json:"lvmFlag,omitempty" xmlrpc:"lvmFlag,omitempty"` + + // The id for a [[SoftLayer_Hardware_Component_Partition_Template]] object, which will determine the partitions to add to the storage group. + // + // If this storage group is not a primary storage group, then this will not be used. + PartitionTemplateId *int `json:"partitionTemplateId,omitempty" xmlrpc:"partitionTemplateId,omitempty"` + + // Defines the partitions for the storage group. + // + // If this storage group is not a secondary storage group, then this will not be used. + Partitions []Container_Product_Order_Storage_Group_Partition `json:"partitions,omitempty" xmlrpc:"partitions,omitempty"` +} + +// A storage group partition container used for a hardware server order. +// +// This object describes the partitions for a single storage group that can be added to an order container. +type Container_Product_Order_Storage_Group_Partition struct { + Entity + + // Is this a grow partition + IsGrow *bool `json:"isGrow,omitempty" xmlrpc:"isGrow,omitempty"` + + // The name of this partition + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The size of this partition + Size *Float64 `json:"size,omitempty" xmlrpc:"size,omitempty"` +} + +// When ordering paid support this datatype needs to be populated and sent to SoftLayer_Product_Order::placeOrder. +type Container_Product_Order_Support struct { + Container_Product_Order +} + +// This container type is used for placing orders for external authentication, such as phone-based authentication. +type Container_Product_Order_User_Customer_External_Binding struct { + Container_Product_Order + + // The external id that access to external authentication is being purchased for. + ExternalId *string `json:"externalId,omitempty" xmlrpc:"externalId,omitempty"` + + // The SoftLayer [[SoftLayer_User_Customer|user]] identifier that an external binding is being purchased for. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // The [[SoftLayer_User_Customer_External_Binding_Vendor|vendor]] identifier for the external binding being purchased. + VendorId *int `json:"vendorId,omitempty" xmlrpc:"vendorId,omitempty"` +} + +// This is the default container type for Dedicated Virtual Host orders. +type Container_Product_Order_Virtual_DedicatedHost struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a Portable Storage order with SoftLayer. +type Container_Product_Order_Virtual_Disk_Image struct { + Container_Product_Order + + // Label for the portable storage volume. + DiskDescription *string `json:"diskDescription,omitempty" xmlrpc:"diskDescription,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Virtual_Guest struct { + Container_Product_Order_Hardware_Server + + // The mode used to boot the [[SoftLayer_Virtual_Guest]]. Supported values are 'PV' and 'HVM'. + BootMode *string `json:"bootMode,omitempty" xmlrpc:"bootMode,omitempty"` + + // Identifier of the [[SoftLayer_Virtual_Disk_Image]] to boot from. + BootableDiskId *int `json:"bootableDiskId,omitempty" xmlrpc:"bootableDiskId,omitempty"` + + // Identifier of [[SoftLayer_Virtual_DedicatedHost]] to order + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Virtual_Guest_Upgrade struct { + Container_Product_Order_Virtual_Guest +} + +// no documentation yet +type Container_Product_Order_Virtual_Guest_Vpc struct { + Container_Product_Order_Virtual_Guest + + // no documentation yet + AdditionalNetworkInterfaces []Container_Product_Order_Virtual_Guest_Vpc_NetworkInterface `json:"additionalNetworkInterfaces,omitempty" xmlrpc:"additionalNetworkInterfaces,omitempty"` + + // no documentation yet + IpAllocations []Container_Product_Order_Virtual_Guest_Vpc_IpAllocation `json:"ipAllocations,omitempty" xmlrpc:"ipAllocations,omitempty"` + + // no documentation yet + ServerId *string `json:"serverId,omitempty" xmlrpc:"serverId,omitempty"` + + // no documentation yet + ServicePortInterfaceId *string `json:"servicePortInterfaceId,omitempty" xmlrpc:"servicePortInterfaceId,omitempty"` + + // no documentation yet + ServicePortIpAllocationId *string `json:"servicePortIpAllocationId,omitempty" xmlrpc:"servicePortIpAllocationId,omitempty"` + + // no documentation yet + ServicePortVpcId *string `json:"servicePortVpcId,omitempty" xmlrpc:"servicePortVpcId,omitempty"` + + // no documentation yet + Subnets []Container_Product_Order_Virtual_Guest_Vpc_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` +} + +// no documentation yet +type Container_Product_Order_Virtual_Guest_Vpc_IpAllocation struct { + Entity + + // no documentation yet + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Ip *string `json:"ip,omitempty" xmlrpc:"ip,omitempty"` +} + +// no documentation yet +type Container_Product_Order_Virtual_Guest_Vpc_NetworkInterface struct { + Entity + + // no documentation yet + InterfaceId *string `json:"interfaceId,omitempty" xmlrpc:"interfaceId,omitempty"` + + // no documentation yet + IpAllocationId *string `json:"ipAllocationId,omitempty" xmlrpc:"ipAllocationId,omitempty"` + + // no documentation yet + SecurityGroupIds []int `json:"securityGroupIds,omitempty" xmlrpc:"securityGroupIds,omitempty"` + + // no documentation yet + SubnetId *string `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` + + // no documentation yet + VpcId *string `json:"vpcId,omitempty" xmlrpc:"vpcId,omitempty"` +} + +// no documentation yet +type Container_Product_Order_Virtual_Guest_Vpc_Subnet struct { + Entity + + // no documentation yet + Cidr *string `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // no documentation yet + Dns *string `json:"dns,omitempty" xmlrpc:"dns,omitempty"` + + // no documentation yet + Gateway *string `json:"gateway,omitempty" xmlrpc:"gateway,omitempty"` + + // no documentation yet + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Vlan *int `json:"vlan,omitempty" xmlrpc:"vlan,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Provisioning_Maintenance_Window::addCustomerUpgradeWindow. This datatype has everything required to place an order with SoftLayer. +type Container_Provisioning_Maintenance_Window struct { + Entity + + // Maintenance classifications. + ClassificationIds []Provisioning_Maintenance_Classification `json:"classificationIds,omitempty" xmlrpc:"classificationIds,omitempty"` + + // Maintenance classifications. + ItemCategoryIds []Product_Item_Category `json:"itemCategoryIds,omitempty" xmlrpc:"itemCategoryIds,omitempty"` + + // The maintenance window id + MaintenanceWindowId *int `json:"maintenanceWindowId,omitempty" xmlrpc:"maintenanceWindowId,omitempty"` + + // Maintenance window ticket id + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // Maintenance window date + WindowMaintenanceDate *Time `json:"windowMaintenanceDate,omitempty" xmlrpc:"windowMaintenanceDate,omitempty"` +} + +// no documentation yet +type Container_Referral_Partner_Commission struct { + Entity + + // no documentation yet + CommissionAmount *Float64 `json:"commissionAmount,omitempty" xmlrpc:"commissionAmount,omitempty"` + + // no documentation yet + CommissionRate *Float64 `json:"commissionRate,omitempty" xmlrpc:"commissionRate,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + ReferralAccountId *int `json:"referralAccountId,omitempty" xmlrpc:"referralAccountId,omitempty"` + + // no documentation yet + ReferralCompanyName *string `json:"referralCompanyName,omitempty" xmlrpc:"referralCompanyName,omitempty"` + + // no documentation yet + ReferralPartnerAccountId *int `json:"referralPartnerAccountId,omitempty" xmlrpc:"referralPartnerAccountId,omitempty"` + + // no documentation yet + ReferralRevenue *Float64 `json:"referralRevenue,omitempty" xmlrpc:"referralRevenue,omitempty"` +} + +// no documentation yet +type Container_Referral_Partner_Payment_Option struct { + Entity + + // no documentation yet + AccountNumber *string `json:"accountNumber,omitempty" xmlrpc:"accountNumber,omitempty"` + + // no documentation yet + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + BankTransitNumber *string `json:"bankTransitNumber,omitempty" xmlrpc:"bankTransitNumber,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + FederalTaxId *string `json:"federalTaxId,omitempty" xmlrpc:"federalTaxId,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // no documentation yet + PaypalEmail *string `json:"paypalEmail,omitempty" xmlrpc:"paypalEmail,omitempty"` + + // no documentation yet + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// no documentation yet +type Container_Referral_Partner_Prospect struct { + Entity + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + Questions []string `json:"questions,omitempty" xmlrpc:"questions,omitempty"` + + // no documentation yet + Responses []Survey_Response `json:"responses,omitempty" xmlrpc:"responses,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + SurveyId *string `json:"surveyId,omitempty" xmlrpc:"surveyId,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_Graphs_SensorSpeed contains graphs to display speed for each of the server's fans. Fan speeds are gathered from the server's remote management card. +type Container_RemoteManagement_Graphs_SensorSpeed struct { + Entity + + // The graph to display the server's fan speed. + Graph *[]byte `json:"graph,omitempty" xmlrpc:"graph,omitempty"` + + // A title that may be used to display for the graph. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_Graphs_SensorTemperature contains graphs to display the cpu(s) and system temperatures retrieved from the management card using thermometer graphs. +type Container_RemoteManagement_Graphs_SensorTemperature struct { + Entity + + // The graph to display the server's cpu(s) and system temperatures. + Graph *[]byte `json:"graph,omitempty" xmlrpc:"graph,omitempty"` + + // A title that may be used to display for the graph. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_PmInfo contains pminfo information retrieved from a server's remote management card. +type Container_RemoteManagement_PmInfo struct { + Entity + + // PmInfo ID + PmInfoId *string `json:"pmInfoId,omitempty" xmlrpc:"pmInfoId,omitempty"` + + // PmInfo Reading + PmInfoReading *string `json:"pmInfoReading,omitempty" xmlrpc:"pmInfoReading,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_SensorReadings contains sensor information retrieved from a server's remote management card. +type Container_RemoteManagement_SensorReading struct { + Entity + + // Lower Non-Recoverable threshold + LowerCritical *string `json:"lowerCritical,omitempty" xmlrpc:"lowerCritical,omitempty"` + + // Lower Non-Critical threshold + LowerNonCritical *string `json:"lowerNonCritical,omitempty" xmlrpc:"lowerNonCritical,omitempty"` + + // Lower Non-Recoverable threshold + LowerNonRecoverable *string `json:"lowerNonRecoverable,omitempty" xmlrpc:"lowerNonRecoverable,omitempty"` + + // Sensor ID + SensorId *string `json:"sensorId,omitempty" xmlrpc:"sensorId,omitempty"` + + // Sensor Reading + SensorReading *string `json:"sensorReading,omitempty" xmlrpc:"sensorReading,omitempty"` + + // Sensor Units + SensorUnits *string `json:"sensorUnits,omitempty" xmlrpc:"sensorUnits,omitempty"` + + // Sensor Status + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Upper Critical threshold + UpperCritical *string `json:"upperCritical,omitempty" xmlrpc:"upperCritical,omitempty"` + + // Upper Non-Critical threshold + UpperNonCritical *string `json:"upperNonCritical,omitempty" xmlrpc:"upperNonCritical,omitempty"` + + // Upper Non-Recoverable threshold + UpperNonRecoverable *string `json:"upperNonRecoverable,omitempty" xmlrpc:"upperNonRecoverable,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_SensorReadingsWithGraphs contains the raw data retrieved from a server's remote management card. Along with the raw data, two sets of graphs will be returned. One set of graphs is used to display, using thermometer graphs, the temperatures (cpu(s) and system) retrieved from the management card. The other set is used to display speed for each of the server's fans. +type Container_RemoteManagement_SensorReadingsWithGraphs struct { + Entity + + // The raw data returned from the server's remote management card. + RawData []Container_RemoteManagement_SensorReading `json:"rawData,omitempty" xmlrpc:"rawData,omitempty"` + + // The graph(s) to display the server's fan speeds. + SpeedGraphs []Container_RemoteManagement_Graphs_SensorSpeed `json:"speedGraphs,omitempty" xmlrpc:"speedGraphs,omitempty"` + + // The graph(s) to display the server's cpu(s) and system temperatures. + TemperatureGraphs []Container_RemoteManagement_Graphs_SensorTemperature `json:"temperatureGraphs,omitempty" xmlrpc:"temperatureGraphs,omitempty"` +} + +// The metadata service resource container is used to store information about a single service resource. +type Container_Resource_Metadata_ServiceResource struct { + Entity + + // The backend IP address for this resource + BackendIpAddress *string `json:"backendIpAddress,omitempty" xmlrpc:"backendIpAddress,omitempty"` + + // The type for this resource + Type *Network_Service_Resource_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// This data type is a container that stores information about a single indexed object type. Object type information can be used for discovery of searchable data and for creation or validation of object index search strings. Each of these containers holds a collection of [[SoftLayer_Container_Search_ObjectType_Property (type)|SoftLayer_Container_Search_ObjectType_Property]] objects, specifying which object properties are exposed for the current user. Refer to the the documentation for the [[SoftLayer_Search/search|search()]] method for information on using object types in search strings. +type Container_Search_ObjectType struct { + Entity + + // Name of object type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A collection of [[SoftLayer_Container_Search_ObjectType_Property|object properties]]. + Properties []Container_Search_ObjectType_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` +} + +// This data type is a container that stores information about a single property of a searchable object type. Each [[SoftLayer_Container_Search_ObjectType (type)|SoftLayer_Container_Search_ObjectType]] object holds a collection of these properties. Property information can be used for discovery of searchable data and for the creation or validation of object index search strings. Note that properties are only understood by the [[SoftLayer_Search/advancedSearch|advancedSearch()]] method. Refer to the advancedSearch() method for information on using properties in search strings. +type Container_Search_ObjectType_Property struct { + Entity + + // Name of property. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Indicates if this property can be sorted. + SortableFlag *bool `json:"sortableFlag,omitempty" xmlrpc:"sortableFlag,omitempty"` + + // Property data type. Valid values include 'boolean', 'integer', 'date', 'string' or 'text'. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Container_Search_Result data type represents a result row from an execution of Search service. +type Container_Search_Result struct { + Entity + + // An array of terms that were matched in the resource object. + MatchedTerms []string `json:"matchedTerms,omitempty" xmlrpc:"matchedTerms,omitempty"` + + // The score ratio of the result for relevance to the search criteria. + RelevanceScore *Float64 `json:"relevanceScore,omitempty" xmlrpc:"relevanceScore,omitempty"` + + // A search results resource object that matched search criteria. + Resource *Entity `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The type of the resource object that matched search criteria. + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// The SoftLayer_Container_Software_Component_HostIps_Policy container holds the title and value of a current host ips policy. +type Container_Software_Component_HostIps_Policy struct { + Entity + + // The value of a host ips category. + Policy *string `json:"policy,omitempty" xmlrpc:"policy,omitempty"` + + // The category title of a host ips policy. + PolicyTitle *string `json:"policyTitle,omitempty" xmlrpc:"policyTitle,omitempty"` +} + +// These are the results of a tax calculation. The tax calculation was kicked off but allowed to run in the background. This type stores the results so that an interface can be updated with up-to-date information. +type Container_Tax_Cache struct { + Entity + + // The percentage of the final total that should be tax. + EffectiveTaxRate *Float64 `json:"effectiveTaxRate,omitempty" xmlrpc:"effectiveTaxRate,omitempty"` + + // The container that holds the four actual tax rates, one for each fee type. + Items []Container_Tax_Cache_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The status of the tax request. This should be PENDING, FAILED, or COMPLETED. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The final amount of tax for the order. + TotalTaxAmount *Float64 `json:"totalTaxAmount,omitempty" xmlrpc:"totalTaxAmount,omitempty"` +} + +// This represents one order item in a tax calculation. +type Container_Tax_Cache_Item struct { + Entity + + // The category code for the referenced product. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // This hash will match to the hash on an order container. + ContainerHash *string `json:"containerHash,omitempty" xmlrpc:"containerHash,omitempty"` + + // The reference to the price for this order item. + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // This is the container containing the individual tax rates. + TaxRates *Container_Tax_Rates `json:"taxRates,omitempty" xmlrpc:"taxRates,omitempty"` +} + +// This contains the four tax rates, one for each fee type. +type Container_Tax_Rates struct { + Entity + + // The tax rate associated with the labor fee. + LaborTaxRate *Float64 `json:"laborTaxRate,omitempty" xmlrpc:"laborTaxRate,omitempty"` + + // A reference to a location. + LocationId *Float64 `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The tax rate associated with the one-time fee. + OneTimeTaxRate *Float64 `json:"oneTimeTaxRate,omitempty" xmlrpc:"oneTimeTaxRate,omitempty"` + + // The tax rate associated with the recurring fee. + RecurringTaxRate *Float64 `json:"recurringTaxRate,omitempty" xmlrpc:"recurringTaxRate,omitempty"` + + // The tax rate associated with the setup fee. + SetupTaxRate *Float64 `json:"setupTaxRate,omitempty" xmlrpc:"setupTaxRate,omitempty"` +} + +// SoftLayer_Container_Ticket_GraphInputs models a single inbound object for a given ticket graph. +type Container_Ticket_GraphInputs struct { + Entity + + // This is a unix timestamp that represents the stop date/time for a graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The front-end or back-end network uplink interface associated with this server. + NetworkInterfaceId *int `json:"networkInterfaceId,omitempty" xmlrpc:"networkInterfaceId,omitempty"` + + // * + Pod *int `json:"pod,omitempty" xmlrpc:"pod,omitempty"` + + // This is a human readable name for the server or rack being graphed. + ServerName *string `json:"serverName,omitempty" xmlrpc:"serverName,omitempty"` + + // This is a unix timestamp that represents the begin date/time for a graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// SoftLayer_Container_Ticket_GraphOutputs models a single outbound object for a given bandwidth graph. +type Container_Ticket_GraphOutputs struct { + Entity + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The title that ended up being displayed as part of the graph image. + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` + + // The maximum date included in this graph. + MaxEndDate *Time `json:"maxEndDate,omitempty" xmlrpc:"maxEndDate,omitempty"` + + // The minimum date included in this graph. + MinStartDate *Time `json:"minStartDate,omitempty" xmlrpc:"minStartDate,omitempty"` +} + +// no documentation yet +type Container_Ticket_Priority struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Value *int `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Ticket_Survey_Preference struct { + Entity + + // no documentation yet + Applicable *bool `json:"applicable,omitempty" xmlrpc:"applicable,omitempty"` + + // no documentation yet + OptedOut *bool `json:"optedOut,omitempty" xmlrpc:"optedOut,omitempty"` + + // no documentation yet + OptedOutDate *Time `json:"optedOutDate,omitempty" xmlrpc:"optedOutDate,omitempty"` +} + +// Container class used to hold user authentication token +type Container_User_Authentication_Token struct { + Entity + + // hash that gets populated for user authentication + Hash *string `json:"hash,omitempty" xmlrpc:"hash,omitempty"` + + // the user authenticated object + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // the id of the user to authenticate + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// Container classed used to hold external authentication information +type Container_User_Customer_External_Binding struct { + Entity + + // The unique token that is created by an external authentication request. + AuthenticationToken *string `json:"authenticationToken,omitempty" xmlrpc:"authenticationToken,omitempty"` + + // Added by softlayer-go. This hints to the API what kind of binding this is. + ComplexType *string `json:"complexType,omitempty" xmlrpc:"complexType,omitempty"` + + // The OpenID Connect access token which provides access to a resource by the OpenID Connect provider. + OpenIdConnectAccessToken *string `json:"openIdConnectAccessToken,omitempty" xmlrpc:"openIdConnectAccessToken,omitempty"` + + // The account to login to, if not provided a default will be used. + OpenIdConnectAccountId *int `json:"openIdConnectAccountId,omitempty" xmlrpc:"openIdConnectAccountId,omitempty"` + + // The OpenID Connect provider type, as a string. + OpenIdConnectProvider *string `json:"openIdConnectProvider,omitempty" xmlrpc:"openIdConnectProvider,omitempty"` + + // Your SoftLayer customer portal user's portal password. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // A second security code that is only required if your credential has become unsynchronized. + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // The security code used to validate a VeriSign credential. + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // The answer to your security question. + SecurityQuestionAnswer *string `json:"securityQuestionAnswer,omitempty" xmlrpc:"securityQuestionAnswer,omitempty"` + + // A security question you wish to answer when authenticating to the SoftLayer customer portal. This parameter isn't required if no security questions are set on your portal account or if your account is configured to not require answering a security account upon login. + SecurityQuestionId *int `json:"securityQuestionId,omitempty" xmlrpc:"securityQuestionId,omitempty"` + + // The username you wish to authenticate to the SoftLayer customer portal with. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // The name of the vendor that will be used for external authentication + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Customer_External_Binding_Phone struct { + Container_User_Customer_External_Binding +} + +// This container can be used to configure the phone authentication mode. By default, "VOICE_CALL" in "STANDARD" mode with no Pin number will be used. With the default mode, you will have to answer a phone call from a trusted 2 form factor vendor during authentication process. You have to answer the call and follow the instruction in order to complete the authentication. +// +// You can also use SMS text message or PhoneFactor mobile app modes (in case you're using PhoneFactor). Additionally, you can set up a Pin number. By requiring you to verify your secret PIN, you can ensure that you have possession of your phone. +type Container_User_Customer_External_Binding_Phone_Mode struct { + Entity + + // Authentication mode. Valid modes are: VOICE_CALL, SMS_TEXT, PHONE_APP + // + // + // *VOICE_CALL + // In this mode, users will receive a phone call to authenticate. Using PIN can enhance the security of the phone authentication by requiring the user to enter a PIN during the authentication call. Valid Pin modes are: PIN, VOICE_PRINT, STANDARD + // + // + // **STANDARD: (default) No PIN is used. + // **PIN: 4 to 10 digit numeric value + // **VOICE_PRINT: The user's voice will be used to identify the user. + // + // + // *SMS_TEXT + // SMS Text mode will send a SMS text message to the user's phone to complete the authentication. There are 2 different pin modes: + // + // + // **OTP: (default) A text message containing a One-Time Passcode (OTP) is sent to the user. The user must reply to the text message entering this OTP to complete the authentication. + // **OTP_PIN: This mode enhances the security of the authentication by requiring the user to enter the OTP + their PIN in the text reply. + // + // + // + // + // *PHONE_APP + // This mode is applicable for PhoneFactor. Phone App mode results in a notification being sent to the user's PhoneFactor phone app. There are 2 different pin modes for the mobile app authentication. + // **STANDARD: (default) The first authentication is when the user signs on using a username and password. + // The second authentication is when the user receives a notification in the PhoneFactor phone app. In Standard Mode, users will prompted to authenticate, deny, or deny and report fraud. + // **PIN: This mode enhances the security of the authentication by requiring the user to enter their PIN in the phone app. + Mode *string `json:"mode,omitempty" xmlrpc:"mode,omitempty"` + + // Optional authentication pin. + Pin *string `json:"pin,omitempty" xmlrpc:"pin,omitempty"` + + // Available Pin modes are: PIN, VOICE_PRINT, STANDARD Default: STANDARD (Pin is not used) + PinMode *string `json:"pinMode,omitempty" xmlrpc:"pinMode,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Customer_External_Binding_Totp struct { + Container_User_Customer_External_Binding + + // The security code used to validate a Totp credential. + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` +} + +// Container classed used to hold details about an external authentication vendor. +type Container_User_Customer_External_Binding_Vendor struct { + Entity + + // The keyname used to identify an external authentication vendor. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of an external authentication vendor. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Customer_External_Binding_Verisign struct { + Container_User_Customer_External_Binding + + // A second security code that is only required if your credential has become unsynchronized. + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // The security code used to validate a VeriSign credential. + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` +} + +// no documentation yet +type Container_User_Customer_OpenIdConnect_LoginAccountInfo struct { + Entity + + // The customer account's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The company name associated with an account. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Container_User_Customer_OpenIdConnect_MigrationState struct { + Entity + + // The number of days remaining in the grace period for this user's account to + DaysToGracePeriodEnd *int `json:"daysToGracePeriodEnd,omitempty" xmlrpc:"daysToGracePeriodEnd,omitempty"` + + // Flag for whether the email address inside this SoftLayer_User_Customer object + EmailAlreadyUsedForInvitationToAccount *bool `json:"emailAlreadyUsedForInvitationToAccount,omitempty" xmlrpc:"emailAlreadyUsedForInvitationToAccount,omitempty"` + + // Flag for whether the email address inside this SoftLayer_User_Customer object + EmailAlreadyUsedForLinkToAccount *bool `json:"emailAlreadyUsedForLinkToAccount,omitempty" xmlrpc:"emailAlreadyUsedForLinkToAccount,omitempty"` + + // The IBMid email address where an invitation was sent. + ExistingInvitationOpenIdConnectName *string `json:"existingInvitationOpenIdConnectName,omitempty" xmlrpc:"existingInvitationOpenIdConnectName,omitempty"` + + // Flag for whether the account is OpenIdConnect authenticated or not. + IsAccountOpenIdConnectAuthenticated *bool `json:"isAccountOpenIdConnectAuthenticated,omitempty" xmlrpc:"isAccountOpenIdConnectAuthenticated,omitempty"` +} + +// Container for holding information necessary for the setting and resetting of customer passwords +// +// +type Container_User_Customer_PasswordSet struct { + Entity + + // id of SoftLayer_User_Security_Question + AnsweredSecurityQuestionId *int `json:"answeredSecurityQuestionId,omitempty" xmlrpc:"answeredSecurityQuestionId,omitempty"` + + // the authentication methods required + AuthenticationMethods []int `json:"authenticationMethods,omitempty" xmlrpc:"authenticationMethods,omitempty"` + + // the password key provided to user in the password set url link sent via email + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // the user's new password + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // answer to security question provided by the user + SecurityAnswer *string `json:"securityAnswer,omitempty" xmlrpc:"securityAnswer,omitempty"` + + // array of SoftLayer_User_Security_Question + SecurityQuestions []User_Security_Question `json:"securityQuestions,omitempty" xmlrpc:"securityQuestions,omitempty"` + + // the id of the user to authenticate + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// Container classed used to hold mobile portal token +type Container_User_Customer_Portal_MobileToken struct { + Container_User_Customer_Portal_Token + + // True if this user login required an external binding. + HasExternalBinding *bool `json:"hasExternalBinding,omitempty" xmlrpc:"hasExternalBinding,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Customer_Portal_Token struct { + Entity + + // hash of logged in user session id + Hash *string `json:"hash,omitempty" xmlrpc:"hash,omitempty"` + + // the logged in user data + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // the id of the logged in user + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// This container holds user's phone information. +type Container_User_Data_Phone struct { + Entity + + // Country code number for the phone number Default: 1 (United States & Canada +1) + CountryCode *int `json:"countryCode,omitempty" xmlrpc:"countryCode,omitempty"` + + // Phone extension code. It can be digits, commas, *, and # are allowed. + Extension *string `json:"extension,omitempty" xmlrpc:"extension,omitempty"` + + // Phone number can be a mobile phone number, desk phone number, or some other option. The phone number format must match the format selected in the country code. + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // Type of phone number such as "primary", "office" or "home" + PhoneType *string `json:"phoneType,omitempty" xmlrpc:"phoneType,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Employee_External_Binding_Verisign struct { + Entity +} + +// At times,such as when attaching files to tickets, it is necessary to send files to SoftLayer API methods. The SoftLayer_Container_Utility_File_Attachment data type models a single file to upload to the API. +type Container_Utility_File_Attachment struct { + Entity + + // The contents of a file that is uploaded to the SoftLayer API. + Data *[]byte `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // The name of a file that is uploaded to the SoftLayer API. + Filename *string `json:"filename,omitempty" xmlrpc:"filename,omitempty"` +} + +// Used to describe a document in the file system on the file server +type Container_Utility_File_Descriptor struct { + Entity + + // The name of a file as it exists on the file server. + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The friendly name of a file as it exists on the file server. + FriendlyName *string `json:"friendlyName,omitempty" xmlrpc:"friendlyName,omitempty"` + + // The date the file was last modified on the file server. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// SoftLayer_Container_Utility_File_Entity data type models a single entity on a storage resource. Entities can include anything within a storage volume including files, folders, directories, and CloudLayer storage projects. +type Container_Utility_File_Entity struct { + Entity + + // A file entity's raw content. + Content *[]byte `json:"content,omitempty" xmlrpc:"content,omitempty"` + + // A file entity's MIME content type. + ContentType *string `json:"contentType,omitempty" xmlrpc:"contentType,omitempty"` + + // The date a file entity was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The date a CloudLayer storage file entity was moved into the recycle bin. This field applies to files that are pending deletion in the recycle bin. + DeleteDate *Time `json:"deleteDate,omitempty" xmlrpc:"deleteDate,omitempty"` + + // Unique identifier for the file. This can be either a number or guid. + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Whether a CloudLayer storage file entity is shared with another CloudLayer user. + IsShared *int `json:"isShared,omitempty" xmlrpc:"isShared,omitempty"` + + // The date a file entity was last changed. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A file entity's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The owner is usually the account who first upload or created the file on the resource or the account who is responsible for the file at the moment. + Owner *string `json:"owner,omitempty" xmlrpc:"owner,omitempty"` + + // The size of a file entity in bytes. + Size *uint `json:"size,omitempty" xmlrpc:"size,omitempty"` + + // A CloudLayer storage file entity's type. Types can include "file", "folder", "dir", and "project". + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The latest revision of a file on a CloudLayer storage volume. This number increments each time a new revision of the file is uploaded. + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// no documentation yet +type Container_Utility_Message struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Summary *string `json:"summary,omitempty" xmlrpc:"summary,omitempty"` +} + +// SoftLayer customer servers that are purchased with the Microsoft Windows operating system are configured by default to retrieve updates from SoftLayer's local Windows Server Update Services (WSUS) server. Periodically, these servers synchronize and check for new updates from their local WSUS server. SoftLayer_Container_Utility_Microsoft_Windows_UpdateServices_Status models the results of a server's last synchronization attempt as queried from SoftLayer's WSUS servers. +type Container_Utility_Microsoft_Windows_UpdateServices_Status struct { + Entity + + // The last time a server rebooted due to a Windows Update. + LastRebootDate *Time `json:"lastRebootDate,omitempty" xmlrpc:"lastRebootDate,omitempty"` + + // The last time that SoftLayer's local WSUS server received a status update from a customer server. + LastStatusDate *Time `json:"lastStatusDate,omitempty" xmlrpc:"lastStatusDate,omitempty"` + + // The last time a server synchronized with SoftLayer's local WSUS server. + LastSyncDate *Time `json:"lastSyncDate,omitempty" xmlrpc:"lastSyncDate,omitempty"` + + // This is the private IP address for this server. + PrivateIPAddress *string `json:"privateIPAddress,omitempty" xmlrpc:"privateIPAddress,omitempty"` + + // The status message returned from a server's last synchronization with SoftLayer's local WSUS server. + SyncStatus *string `json:"syncStatus,omitempty" xmlrpc:"syncStatus,omitempty"` + + // A server's update status, as retrieved form SoftLayer's local WSUS server. + UpdateStatus *string `json:"updateStatus,omitempty" xmlrpc:"updateStatus,omitempty"` +} + +// SoftLayer_Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem models a single Microsoft Update as reported by SoftLayer's private Windows Server Update Services (WSUS) services. All servers purchased with Microsoft Windows retrieve updates from SoftLayer's WSUS servers by default. +type Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem struct { + Entity + + // A short description of a Microsoft Windows Update. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Flag indicating that this patch failed to properly install + Failed *bool `json:"failed,omitempty" xmlrpc:"failed,omitempty"` + + // A Windows Update's knowledge base article number. Every Windows Update can be referenced on the Microsoft Help and Support site at the URL http://support.microsoft.com/kb/
    . + KbArticleNumber *int `json:"kbArticleNumber,omitempty" xmlrpc:"kbArticleNumber,omitempty"` + + // Flag indicating that the update is entirely optionals + Optional *bool `json:"optional,omitempty" xmlrpc:"optional,omitempty"` + + // Flag indicating that a reboot is needed for this update to be fully applied + RequiresReboot *bool `json:"requiresReboot,omitempty" xmlrpc:"requiresReboot,omitempty"` +} + +// The SoftLayer_Container_Utility_Network_Firewall_Rule_Attribute data type contains information relating to a single firewall rule. +type Container_Utility_Network_Firewall_Rule_Attribute struct { + Entity + + // The valid actions for use with rules. + Actions []string `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // Maximum allowed number of rules. + MaximumRuleCount *int `json:"maximumRuleCount,omitempty" xmlrpc:"maximumRuleCount,omitempty"` + + // The valid protocols for use with rules. + Protocols []string `json:"protocols,omitempty" xmlrpc:"protocols,omitempty"` + + // The valid source ip subnet masks for use with rules. + SourceIpSubnetMasks []Container_Utility_Network_Subnet_Mask_Generic_Detail `json:"sourceIpSubnetMasks,omitempty" xmlrpc:"sourceIpSubnetMasks,omitempty"` +} + +// The SoftLayer_Container_Utility_Network_Subnet_Mask_Generic_Detail data type contains information relating to a subnet mask and details associated with that object. +type Container_Utility_Network_Subnet_Mask_Generic_Detail struct { + Entity + + // The subnet cidr prefix. + Cidr *string `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // The subnet mask description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The subnet mask. + Mask *string `json:"mask,omitempty" xmlrpc:"mask,omitempty"` +} + +// This data type represents the structure to hold the allocation properties of a [[SoftLayer_Virtual_DedicatedHost]]. +type Container_Virtual_DedicatedHost_AllocationStatus struct { + Entity + + // Number of allocated CPU cores on the specified dedicated host. + CpuAllocated *int `json:"cpuAllocated,omitempty" xmlrpc:"cpuAllocated,omitempty"` + + // Number of available CPU cores on the specified dedicated host. + CpuAvailable *int `json:"cpuAvailable,omitempty" xmlrpc:"cpuAvailable,omitempty"` + + // Total number of CPU cores on the dedicated host. + CpuCount *int `json:"cpuCount,omitempty" xmlrpc:"cpuCount,omitempty"` + + // Amount of allocated disk space on the specified dedicated host. + DiskAllocated *int `json:"diskAllocated,omitempty" xmlrpc:"diskAllocated,omitempty"` + + // Amount of available disk space on the specified dedicated host. + DiskAvailable *int `json:"diskAvailable,omitempty" xmlrpc:"diskAvailable,omitempty"` + + // Total amount of disk capacity on the dedicated host. + DiskCapacity *int `json:"diskCapacity,omitempty" xmlrpc:"diskCapacity,omitempty"` + + // Number of allocated guests on the specified dedicated host. + GuestCount *int `json:"guestCount,omitempty" xmlrpc:"guestCount,omitempty"` + + // Amount of allocated memory on the specified dedicated host. + MemoryAllocated *int `json:"memoryAllocated,omitempty" xmlrpc:"memoryAllocated,omitempty"` + + // Amount of available memory on the specified dedicated host. + MemoryAvailable *int `json:"memoryAvailable,omitempty" xmlrpc:"memoryAvailable,omitempty"` + + // Total amount of memory capacity on the dedicated host. + MemoryCapacity *int `json:"memoryCapacity,omitempty" xmlrpc:"memoryCapacity,omitempty"` +} + +// This data type represents PCI device allocation properties of a [[SoftLayer_Virtual_DedicatedHost]]. +type Container_Virtual_DedicatedHost_Pci_Device_AllocationStatus struct { + Entity + + // The number of PCI devices on the host. + DeviceCount *int `json:"deviceCount,omitempty" xmlrpc:"deviceCount,omitempty"` + + // The number of PCI devices currently allocated to guests. + DevicesAllocated *int `json:"devicesAllocated,omitempty" xmlrpc:"devicesAllocated,omitempty"` + + // The number of PCI devices available for allocation. + DevicesAvailable *int `json:"devicesAvailable,omitempty" xmlrpc:"devicesAvailable,omitempty"` + + // The generic component model ID of the PCI device. + HardwareComponentModelGenericId *int `json:"hardwareComponentModelGenericId,omitempty" xmlrpc:"hardwareComponentModelGenericId,omitempty"` + + // The ID of the host that the dedicated host is on. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` +} + +// The SoftLayer_Container_Virtual_Guest_Block_Device_Template_Configuration data type contains information relating to a template's external location for importing and exporting +type Container_Virtual_Guest_Block_Device_Template_Configuration struct { + Entity + + // + // Optional virtualization boot mode parameter, if set, can mark a template to boot specifically into PV or HVM. + BootMode *string `json:"bootMode,omitempty" xmlrpc:"bootMode,omitempty"` + + // + // Specifies if image is using a customer's software license. + Byol *bool `json:"byol,omitempty" xmlrpc:"byol,omitempty"` + + // + // Specifies if image requires cloud-init. + CloudInit *bool `json:"cloudInit,omitempty" xmlrpc:"cloudInit,omitempty"` + + // The group name to be applied to the imported template + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The note to be applied to the imported template + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // + // The referenceCode of the operating system software description for the imported VHD + OperatingSystemReferenceCode *string `json:"operatingSystemReferenceCode,omitempty" xmlrpc:"operatingSystemReferenceCode,omitempty"` + + // + // Optional Collection of modes that this template supports booting into. + SupportedBootModes []string `json:"supportedBootModes,omitempty" xmlrpc:"supportedBootModes,omitempty"` + + // + // The URI for an object storage object (.vhd/.iso file) + // swift://@// + Uri *string `json:"uri,omitempty" xmlrpc:"uri,omitempty"` +} + +// The guest configuration container is used to provide configuration options for creating computing instances. +// +// Each configuration option will include both an itemPrice and a template. +// +// The itemPrice value will provide hourly and monthly costs (if either are applicable), and a description of the option. +// +// The template will provide a fragment of the request with the properties and values that must be sent when creating a computing instance with the option. +// +// The [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] method returns this data structure. +// +// +type Container_Virtual_Guest_Configuration struct { + Entity + + // + //
    + // Available block device options. + // + // + // A computing instance will have at least one block device represented by a device number of '0'. + // + // + // The blockDevices.device value in the template represents which device the option is for. + // The blockDevices.diskImage.capacity value in the template represents the size, in gigabytes, of the disk. + // The localDiskFlag value in the template represents whether the option is a local or SAN based disk. + // + // + // Note: The block device number '1' is reserved for the SWAP disk attached to the computing instance. + //
    + BlockDevices []Container_Virtual_Guest_Configuration_Option `json:"blockDevices,omitempty" xmlrpc:"blockDevices,omitempty"` + + // + //
    + // Available datacenter options. + // + // + // The datacenter.name value in the template represents which datacenter the computing instance will be provisioned in. + //
    + Datacenters []Container_Virtual_Guest_Configuration_Option `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // + //
    + // + // + // Available flavor options. + // + // + // The supplementalCreateObjectOptions.flavorKeyName value in the template is an identifier for a particular core, ram, and primary disk configuration. + // + // + // When providing a supplementalCreateObjectOptions.flavorKeyName option the core, ram, and primary disk options are not needed. If those options are provided they are validated against the flavor. + //
    + Flavors []Container_Virtual_Guest_Configuration_Option `json:"flavors,omitempty" xmlrpc:"flavors,omitempty"` + + // + //
    + // Available memory options. + // + // + // The maxMemory value in the template represents the amount of memory, in megabytes, allocated to the computing instance. + //
    + Memory []Container_Virtual_Guest_Configuration_Option `json:"memory,omitempty" xmlrpc:"memory,omitempty"` + + // + //
    + // Available network component options. + // + // + // The networkComponent.maxSpeed value in the template represents the link speed, in megabits per second, of the network connections for a computing instance. + //
    + NetworkComponents []Container_Virtual_Guest_Configuration_Option `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // + //
    + // Available operating system options. + // + // + // The operatingSystemReferenceCode value in the template is an identifier for a particular operating system. When provided exactly as shown in the template, that operating system will be used. + // + // + // A reference code is structured as three tokens separated by underscores. The first token represents the product, the second is the version of the product, and the third is whether the OS is 32 or 64bit. + // + // + // When providing an operatingSystemReferenceCode while ordering a computing instance the only token required to match exactly is the product. The version token may be given as 'LATEST', else it will require an exact match as well. When the bits token is not provided, 64 bits will be assumed. + // + // + // Providing the value of 'LATEST' for a version will select the latest release of that product for the operating system. As this may change over time, you should be sure that the release version is irrelevant for your applications. + // + // + // For Windows based operating systems the version will represent both the release version (2008, 2012, etc) and the edition (Standard, Enterprise, etc). For all other operating systems the version will represent the major version (Centos 6, Ubuntu 12, etc) of that operating system, minor versions are not represented in a reference code. + // + // + // Notice - Some operating systems are charged based on the value specified in startCpus. The price which is used can be determined by calling [[SoftLayer_Virtual_Guest/generateOrderTemplate|generateOrderTemplate]] with your desired device specifications. + //
    + OperatingSystems []Container_Virtual_Guest_Configuration_Option `json:"operatingSystems,omitempty" xmlrpc:"operatingSystems,omitempty"` + + // + //
    + // Available processor options. + // + // + // The startCpus value in the template represents the number of cores allocated to the computing instance. + // The dedicatedAccountHostOnlyFlag value in the template represents whether the instance will run on hosts with instances belonging to other accounts. + //
    + Processors []Container_Virtual_Guest_Configuration_Option `json:"processors,omitempty" xmlrpc:"processors,omitempty"` +} + +// An option found within a [[SoftLayer_Container_Virtual_Guest_Configuration (type)]] structure. +type Container_Virtual_Guest_Configuration_Option struct { + Entity + + // + // Provides a description of a pre-defined configuration with monthly and hourly costs. + Flavor *Product_Package_Preset `json:"flavor,omitempty" xmlrpc:"flavor,omitempty"` + + // + // Provides hourly and monthly costs (if either are applicable), and a description of the option. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // + // Provides a fragment of the request with the properties and values that must be sent when creating a computing instance with the option. + Template *Virtual_Guest `json:"template,omitempty" xmlrpc:"template,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/dns.go b/vendor/github.com/softlayer/softlayer-go/datatypes/dns.go new file mode 100644 index 000000000000..98be7e711bf6 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/dns.go @@ -0,0 +1,430 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Dns_Domain data type represents a single DNS domain record hosted on the SoftLayer nameservers. Domains contain general information about the domain name such as name and serial. Individual records such as A, AAAA, CTYPE, and MX records are stored in the domain's associated [[SoftLayer_Dns_Domain_ResourceRecord (type)|SoftLayer_Dns_Domain_ResourceRecord]] records. +type Dns_Domain struct { + Entity + + // The SoftLayer customer account that owns a domain. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A domain record's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A flag indicating that the dns domain record is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // A domain's name including top-level domain, for example "example.com". + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the individual records contained within a domain record. These include but are not limited to A, AAAA, MX, CTYPE, SPF and TXT records. + ResourceRecordCount *uint `json:"resourceRecordCount,omitempty" xmlrpc:"resourceRecordCount,omitempty"` + + // The individual records contained within a domain record. These include but are not limited to A, AAAA, MX, CTYPE, SPF and TXT records. + ResourceRecords []Dns_Domain_ResourceRecord `json:"resourceRecords" xmlrpc:"resourceRecords"` + + // The secondary DNS record that defines this domain as being managed through zone transfers. + Secondary *Dns_Secondary `json:"secondary,omitempty" xmlrpc:"secondary,omitempty"` + + // A unique number denoting the latest revision of a domain. Whenever a domain is changed its corresponding serial number is also changed. Serial numbers typically follow the format yyyymmdd## where yyyy is the current year, mm is the current month, dd is the current day of the month, and ## is the number of the revision for that day. A domain's serial number is automatically updated when edited via the API. + Serial *int `json:"serial,omitempty" xmlrpc:"serial,omitempty"` + + // The start of authority (SOA) record contains authoritative and propagation details for a DNS zone. This property is not considered in requests to createObject and editObject. + SoaResourceRecord *Dns_Domain_ResourceRecord_SoaType `json:"soaResourceRecord,omitempty" xmlrpc:"soaResourceRecord,omitempty"` + + // The date that this domain record was last updated. + UpdateDate *Time `json:"updateDate,omitempty" xmlrpc:"updateDate,omitempty"` +} + +// The SoftLayer_Dns_Domain_Forward data type represents a single DNS domain record hosted on the SoftLayer nameservers. Domains contain general information about the domain name such as name and serial. Individual records such as A, AAAA, CTYPE, and MX records are stored in the domain's associated [[SoftLayer_Dns_Domain_ResourceRecord (type)|SoftLayer_Dns_Domain_ResourceRecord]] records. +type Dns_Domain_Forward struct { + Dns_Domain +} + +// The SoftLayer_Dns_Domain_Registration data type represents a domain registration record. +type Dns_Domain_Registration struct { + Entity + + // The SoftLayer customer account that the domain is registered to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The domain registration status. + DomainRegistrationStatus *Dns_Domain_Registration_Status `json:"domainRegistrationStatus,omitempty" xmlrpc:"domainRegistrationStatus,omitempty"` + + // no documentation yet + DomainRegistrationStatusId *int `json:"domainRegistrationStatusId,omitempty" xmlrpc:"domainRegistrationStatusId,omitempty"` + + // The date that the domain registration will expire. + ExpireDate *Time `json:"expireDate,omitempty" xmlrpc:"expireDate,omitempty"` + + // A domain record's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Indicates whether a domain is locked or unlocked. + LockedFlag *int `json:"lockedFlag,omitempty" xmlrpc:"lockedFlag,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A domain's name, for example "example.com". + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The registrant verification status. + RegistrantVerificationStatus *Dns_Domain_Registration_Registrant_Verification_Status `json:"registrantVerificationStatus,omitempty" xmlrpc:"registrantVerificationStatus,omitempty"` + + // no documentation yet + RegistrantVerificationStatusId *int `json:"registrantVerificationStatusId,omitempty" xmlrpc:"registrantVerificationStatusId,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` +} + +// SoftLayer_Dns_Domain_Registration_Registrant_Verification_Status models the state of the registrant. Here are the following status codes: +// +// +// *'''Admin Reviewing''': The registrant data has been submitted and being reviewed by compliance team. +// *'''Pending''': The verification process has been inititated, and verification email will be sent. +// *'''Suspended''': The registrant has failed verification and the domain has been suspended. +// *'''Verified''': The registrant has been validated. +// *'''Verifying''': The verification process has been initiated and is waiting for registrant response. +// *'''Unverified''': The verification process has not been inititated. +// +// +type Dns_Domain_Registration_Registrant_Verification_Status struct { + Entity + + // The description of the registrant verification status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique identifier of the registrant verification status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the registrant verification status. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the registrant verification status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Dns_Domain_Registration_Status models the state of domain name. Here are the following status codes: +// +// +// *'''Active''': This domain name is active. +// *'''Pending Owner Approval''': Pending owner approval for completion of transfer. +// *'''Pending Admin Review''': Pending admin review for transfer. +// *'''Pending Registry''': Pending registry for transfer. +// *'''Expired''': Domain name has expired. +// +// +type Dns_Domain_Registration_Status struct { + Entity + + // The description of the domain registration status names. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique identifier of the domain registration status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the domain registration status. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the domain registration status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Dns_Domain_ResourceRecord data type represents a single resource record entry in a SoftLayer hosted domain. Each resource record contains a ''host'' and ''data'' property, defining a resource's name and it's target data. Domains contain multiple types of resource records. The ''type'' property separates out resource records by type. ''Type'' can take one of the following values: +// * '''"a"''' for [[SoftLayer_Dns_Domain_ResourceRecord_AType|address]] records +// * '''"aaaa"''' for [[SoftLayer_Dns_Domain_ResourceRecord_AaaaType|address]] records +// * '''"cname"''' for [[SoftLayer_Dns_Domain_ResourceRecord_CnameType|canonical name]] records +// * '''"mx"''' for [[SoftLayer_Dns_Domain_ResourceRecord_MxType|mail exchanger]] records +// * '''"ns"''' for [[SoftLayer_Dns_Domain_ResourceRecord_NsType|name server]] records +// * '''"ptr"''' for [[SoftLayer_Dns_Domain_ResourceRecord_PtrType|pointer]] records in reverse domains +// * '''"soa"''' for a domain's [[SoftLayer_Dns_Domain_ResourceRecord_SoaType|start of authority]] record +// * '''"spf"''' for [[SoftLayer_Dns_Domain_ResourceRecord_SpfType|sender policy framework]] records +// * '''"srv"''' for [[SoftLayer_Dns_Domain_ResourceRecord_SrvType|service]] records +// * '''"txt"''' for [[SoftLayer_Dns_Domain_ResourceRecord_TxtType|text]] records +// +// +// As ''SoftLayer_Dns_Domain_ResourceRecord'' objects are created and loaded, the API verifies the ''type'' property and casts the object as the appropriate type. +type Dns_Domain_ResourceRecord struct { + Entity + + // The value of a domain's resource record. This can be an IP address or a hostname. Fully qualified host and domain name data must end with the "." character. + Data *string `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // The domain that a resource record belongs to. + Domain *Dns_Domain `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // An identifier belonging to the domain that a resource record is associated with. + DomainId *int `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"` + + // The amount of time in seconds that a secondary name server (or servers) will hold a zone before it is no longer considered authoritative. + Expire *int `json:"expire,omitempty" xmlrpc:"expire,omitempty"` + + // The host defined by a resource record. A value of "@" denotes a wildcard. + Host *string `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // A domain resource record's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Whether the address associated with a PTR record is the gateway address of a subnet. + IsGatewayAddress *bool `json:"isGatewayAddress,omitempty" xmlrpc:"isGatewayAddress,omitempty"` + + // The amount of time in seconds that a domain's resource records are valid. This is also known as a minimum TTL, and can be overridden by an individual resource record's TTL. + Minimum *int `json:"minimum,omitempty" xmlrpc:"minimum,omitempty"` + + // Useful in cases where a domain has more than one mail exchanger, the priority property is the priority of the MTA that delivers mail for a domain. A lower number denotes a higher priority, and mail will attempt to deliver through that MTA before moving to lower priority mail servers. Priority is defaulted to 10 upon resource record creation. + MxPriority *int `json:"mxPriority,omitempty" xmlrpc:"mxPriority,omitempty"` + + // The TCP or UDP port on which the service is to be found. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The priority of the target host, lower value means more preferred. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // The protocol of the desired service; this is usually either TCP or UDP. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The amount of time in seconds that a secondary name server should wait to check for a new copy of a DNS zone from the domain's primary name server. If a zone file has changed then the secondary DNS server will update it's copy of the zone to match the primary DNS server's zone. + Refresh *int `json:"refresh,omitempty" xmlrpc:"refresh,omitempty"` + + // The email address of the person responsible for a domain, with the "@" replaced with a ".". For instance, if root@example.org is responsible for example.org, then example.org's SOA responsibility is "root.example.org.". + ResponsiblePerson *string `json:"responsiblePerson,omitempty" xmlrpc:"responsiblePerson,omitempty"` + + // The amount of time in seconds that a domain's primary name server (or servers) should wait if an attempt to refresh by a secondary name server failed before attempting to refresh a domain's zone with that secondary name server again. + Retry *int `json:"retry,omitempty" xmlrpc:"retry,omitempty"` + + // The symbolic name of the desired service + Service *string `json:"service,omitempty" xmlrpc:"service,omitempty"` + + // The Time To Live value of a resource record, measured in seconds. TTL is used by a name server to determine how long to cache a resource record. An SOA record's TTL value defines the domain's overall TTL. + Ttl *int `json:"ttl,omitempty" xmlrpc:"ttl,omitempty"` + + // A domain resource record's type. A value of "a" denotes an A (address) record, "aaaa" denotes an AAAA (IPv6 address) record, "cname" denotes a CNAME (canonical name) record, "mx" denotes an MX (mail exchanger) record, "ns" denotes an NS (nameserver) record, "ptr" denotes a PTR (pointer/reverse) record, "soa" denotes the SOA (start of authority) record, "spf" denotes a SPF (sender policy framework) record, and "txt" denotes a TXT (text) record. A domain record's type also denotes which class in the SoftLayer API is a best match for extending a resource record. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A relative weight for records with the same priority. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// SoftLayer_Dns_Domain_ResourceRecord_AType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "a" and defines a DNS A record on a SoftLayer hosted domain. An A record directs a host name to an IP address. For instance if the A record for "host.example.org" points to the IP address 10.0.0.1 then the ''host'' property for the A record equals "host" and the ''data'' property equals "10.0.0.1". +type Dns_Domain_ResourceRecord_AType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_AaaaType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "aaaa" and defines a DNS AAAA record on a SoftLayer hosted domain. An AAAA record directs a host name to an IPv6 address. For instance if the AAAA record for "host.example.org" points to the IPv6 address "fe80:0:0:0:0:0:a00:0" then the ''host'' property for the AAAA record equals "host" and the ''data'' property equals "fe80:0:0:0:0:0:a00:0". +type Dns_Domain_ResourceRecord_AaaaType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_CnameType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "cname" and defines a DNS CNAME record on a SoftLayer hosted domain. A CNAME record directs a host name to another host. For instance, if the CNAME record for "alias.example.org" points to the host "host.example.org" then the ''host'' property equals "alias" and the ''data'' property equals "host.example.org.". +// +// DNS entries defined by CNAME should not be used as the data field for an MX record. +type Dns_Domain_ResourceRecord_CnameType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_MxType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "mx" and used to describe MX resource records. MX records control which hosts are responsible as mail exchangers for a domain. For instance, in the domain example.org, an MX record whose host is "@" and data is "mail" says that the host "mail.example.org" is responsible for handling mail for example.org. That means mail sent to users @example.org are delivered to mail.example.org. +// +// Domains can have more than one MX record if it uses more than one server to send mail through. Multiple MX records are denoted by their priority, defined by the mxPriority property. +// +// MX records must be defined for hosts with accompanying A or AAAA resource records. They may not point mail towards a host defined by a CNAME record. +type Dns_Domain_ResourceRecord_MxType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_NsType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "ns" and defines a DNS NS record on a SoftLayer hosted domain. An NS record defines the authoritative name server for a domain. All SoftLayer hosted domains contain NS records for "ns1.softlayer.com" and "ns2.softlayer.com" . For instance, if example.org is hosted on ns1.softlayer.com, then example.org contains an NS record whose ''host'' property equals "@" and whose ''data'' property equals "ns1.example.org". +// +// NS resource records pointing to ns1.softlayer.com or ns2.softlayer.com many not be removed from a SoftLayer hosted domain. +type Dns_Domain_ResourceRecord_NsType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_PtrType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "ptr" and defines a reverse DNS PTR record on the SoftLayer name servers. +// +// The format for a reverse DNS PTR record varies based on whether it is for an IPv4 or IPv6 address. +// +// For an IPv4 address the ''host'' property for every PTR record is the last octet of the IP address that the PTR record belongs to, while the ''data'' property is the canonical name of the host that the reverse lookup resolves to. Every PTR record belongs to a domain on the SoftLayer name servers named by the first three octets of an IP address in reverse order followed by ".in-addr.arpa". +// +// For instance, if the reverse DNS record for 10.0.0.1 is "host.example.org" then it's corresponding SoftLayer_Dns_Domain_ResourceRecord_PtrType host is "1", while it's data property equals "host.example.org". The full name of the reverse record for host.example.org including the domain name is "1.0.0.10.in-addr.arpa". +// +// For an IPv6 address the ''host'' property for every PTR record is the last four octets of the IP address that the PTR record belongs to. The last four octets need to be in reversed order and each digit separated by a period. The ''data'' property is the canonical name of the host that the reverse lookup resolves to. Every PTR record belongs to a domain on the SoftLayer name servers named by the first four octets of an IP address in reverse order, split up by digit with a period, and followed by ".ip6.arpa". +// +// For instance, if the reverse DNS record for fe80:0000:0000:0000:0000:0000:0a00:0001 is "host.example.org" then it's corresponding SoftLayer_Dns_Domain_ResourceRecord_PtrType host is "1.0.0.0.0.0.a.0.0.0.0.0.0.0.0.0", while it's data property equals "host.example.org". The full name of the reverse record for host.example.org including the domain name is "1.0.0.0.0.0.a.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.e.f.ip6.arpa". +// +// PTR record host names may not be changed by [[SoftLayer_Dns_Domain_ResourceRecord::editObject]] or [[SoftLayer_Dns_Domain_ResourceRecord::editObjects]]. +type Dns_Domain_ResourceRecord_PtrType struct { + Dns_Domain_ResourceRecord + + // Whether the address associated with a PTR record is the gateway address of a subnet. + IsGatewayAddress *bool `json:"isGatewayAddress,omitempty" xmlrpc:"isGatewayAddress,omitempty"` +} + +// SoftLayer_Dns_Domain_ResourceRecord_SoaType defines a domains' Start of Authority (or SOA) resource record. A domain's SOA record contains a domain's general and propagation information. Every domain must have one SOA record, and it is not possible to remove a domain's SOA record. +// +// SOA records typically contain a domain's serial number, but the SoftLayer API associates a domain's serial number directly with it's SoftLayer_Dns_Domain record. +type Dns_Domain_ResourceRecord_SoaType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_SpfType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "spf" and defines a DNS SPF record on a SoftLayer hosted domain. An SPF record provides sender policy framework data for a host. For instance, if defining the SPF record "v=spf1 mx:mail.example.org ~all" for "host.example.org". then the ''host'' property equals "host" and the ''data'' property equals "v=spf1 mx:mail.example.org ~all". +// +// SPF records are commonly used in email verification methods such as Sender Policy Framework. +type Dns_Domain_ResourceRecord_SpfType struct { + Dns_Domain_ResourceRecord_TxtType +} + +// SoftLayer_Dns_Domain_ResourceRecord_SrvType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "srv" and defines a DNS SRV record on a SoftLayer hosted domain. +type Dns_Domain_ResourceRecord_SrvType struct { + Dns_Domain_ResourceRecord + + // The TCP or UDP port on which the service is to be found. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The priority of the target host, lower value means more preferred. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // The protocol of the desired service; this is usually either TCP or UDP. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The symbolic name of the desired service + Service *string `json:"service,omitempty" xmlrpc:"service,omitempty"` + + // A relative weight for records with the same priority. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// SoftLayer_Dns_Domain_ResourceRecord_TxtType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "txt" and defines a DNS TXT record on a SoftLayer hosted domain. A TXT record provides a text description for a host. For instance, if defining the TXT record "My test host" for "host.example.org". then the ''host'' property equals "host" and the ''data'' property equals "My test host". +// +// TXT records are commonly used in email verification methods such as Sender Policy Framework. +type Dns_Domain_ResourceRecord_TxtType struct { + Dns_Domain_ResourceRecord +} + +// The SoftLayer_Dns_Domain_Reverse data type represents a reverse IP address record. +type Dns_Domain_Reverse struct { + Dns_Domain + + // Network address the domain is associated with. + NetworkAddress *string `json:"networkAddress,omitempty" xmlrpc:"networkAddress,omitempty"` +} + +// The SoftLayer_Dns_Domain_Reverse_Version4 data type represents a reverse IPv4 address record. +type Dns_Domain_Reverse_Version4 struct { + Dns_Domain_Reverse +} + +// The SoftLayer_Dns_Domain_Reverse_Version6 data type represents a reverse IPv6 address record. +type Dns_Domain_Reverse_Version6 struct { + Dns_Domain_Reverse +} + +// The SoftLayer_Dns_Message data type contains information for a single message generated by the SoftLayer DNS system. SoftLayer_Dns_Messages are typically created during the secondary DNS transfer process. +type Dns_Message struct { + Entity + + // The date the message was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The domain that is associated with a message. + Domain *Dns_Domain `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // The internal identifier for a DNS message. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The message text. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The priority level for a DNS message. The possible levels are 'notice' and 'error'. + Priority *string `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // The resource record that is associated with a message. + ResourceRecord *Dns_Domain_ResourceRecord `json:"resourceRecord,omitempty" xmlrpc:"resourceRecord,omitempty"` + + // The secondary DNS record that a message belongs to. + Secondary *Dns_Secondary `json:"secondary,omitempty" xmlrpc:"secondary,omitempty"` +} + +// The SoftLayer_Dns_Secondary data type contains information on a single secondary DNS zone which is managed through SoftLayer's zone transfer service. Domains created via zone transfer may not be modified by the SoftLayer portal or API. +type Dns_Secondary struct { + Entity + + // The SoftLayer account that owns a secondary DNS record. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The date a secondary DNS record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The domain record created by zone transfer from a secondary DNS record. + Domain *Dns_Domain `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // A count of the error messages created during secondary DNS record transfer. + ErrorMessageCount *uint `json:"errorMessageCount,omitempty" xmlrpc:"errorMessageCount,omitempty"` + + // The error messages created during secondary DNS record transfer. + ErrorMessages []Dns_Message `json:"errorMessages,omitempty" xmlrpc:"errorMessages,omitempty"` + + // The internal identifier for a secondary DNS record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date when the most recent secondary DNS zone transfer took place. + LastUpdate *Time `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` + + // The IP address of the master name server where a secondary DNS zone is transferred from. + MasterIpAddress *string `json:"masterIpAddress,omitempty" xmlrpc:"masterIpAddress,omitempty"` + + // The current status of the secondary DNS zone. + Status *Dns_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The current status of a secondary DNS record. The status may be one of the following: + // :*'''0''': Disabled + // :*'''1''': Active + // :*'''2''': Transfer Now + // :*'''3''': An error occurred that prevented the zone transfer from being completed. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The textual representation of a secondary DNS zone's status. + StatusText *string `json:"statusText,omitempty" xmlrpc:"statusText,omitempty"` + + // How often a secondary DNS zone should be transferred in minutes. + TransferFrequency *int `json:"transferFrequency,omitempty" xmlrpc:"transferFrequency,omitempty"` + + // The name of the zone that is transferred. + ZoneName *string `json:"zoneName,omitempty" xmlrpc:"zoneName,omitempty"` +} + +// The SoftLayer_Dns_Status data type contains information for a DNS status +type Dns_Status struct { + Entity + + // Internal identifier of a DNS status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Monitoring DNS status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/email.go b/vendor/github.com/softlayer/softlayer-go/datatypes/email.go new file mode 100644 index 000000000000..93128114a765 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/email.go @@ -0,0 +1,63 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Email_Subscription struct { + Entity + + // Brief description of the purpose of the email. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Enabled *bool `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Email template name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Email_Subscription_Group struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Email subscription group name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of all email subscriptions associated with this group. + SubscriptionCount *uint `json:"subscriptionCount,omitempty" xmlrpc:"subscriptionCount,omitempty"` + + // All email subscriptions associated with this group. + Subscriptions []Email_Subscription `json:"subscriptions,omitempty" xmlrpc:"subscriptions,omitempty"` +} + +// no documentation yet +type Email_Subscription_Suppression_User struct { + Entity + + // no documentation yet + Subscription *Email_Subscription `json:"subscription,omitempty" xmlrpc:"subscription,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/entity.go b/vendor/github.com/softlayer/softlayer-go/datatypes/entity.go new file mode 100644 index 000000000000..49e32cf35898 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/entity.go @@ -0,0 +1,25 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Entity struct { +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/event.go b/vendor/github.com/softlayer/softlayer-go/datatypes/event.go new file mode 100644 index 000000000000..ee0084edbfe0 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/event.go @@ -0,0 +1,71 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Event_Log data type contains an event detail occurred upon various SoftLayer resources. +type Event_Log struct { + Entity + + // Account id with which the event is associated + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Event creation date in millisecond precision + EventCreateDate *Time `json:"eventCreateDate,omitempty" xmlrpc:"eventCreateDate,omitempty"` + + // Event name such as "reboot", "cancel", "update host" and so on. + EventName *string `json:"eventName,omitempty" xmlrpc:"eventName,omitempty"` + + // The remote IP Address that made the request + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // Label or description of the event object + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` + + // Meta data for an event in JSON string + MetaData *string `json:"metaData,omitempty" xmlrpc:"metaData,omitempty"` + + // Event object id + ObjectId *int `json:"objectId,omitempty" xmlrpc:"objectId,omitempty"` + + // Event object name such as "server", "dns" and so on. + ObjectName *string `json:"objectName,omitempty" xmlrpc:"objectName,omitempty"` + + // OpenIdConnectUserName of the customer who initiated the event + OpenIdConnectUserName *string `json:"openIdConnectUserName,omitempty" xmlrpc:"openIdConnectUserName,omitempty"` + + // A resource object that is associated with the event + Resource *Entity `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // A unique trace id. Multiple event can be grouped by a trace id. + TraceId *string `json:"traceId,omitempty" xmlrpc:"traceId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // Id of customer who initiated the event + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // Type of user that triggered the event. User type can be CUSTOMER, EMPLOYEE or SYSTEM. + UserType *string `json:"userType,omitempty" xmlrpc:"userType,omitempty"` + + // Customer username who initiated the event + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/flexiblecredit.go b/vendor/github.com/softlayer/softlayer-go/datatypes/flexiblecredit.go new file mode 100644 index 000000000000..23da7fbfec39 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/flexiblecredit.go @@ -0,0 +1,110 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type FlexibleCredit_Affiliate struct { + Entity + + // Flexible Credit Program the affiliate belongs to. + FlexibleCreditProgram *FlexibleCredit_Program `json:"flexibleCreditProgram,omitempty" xmlrpc:"flexibleCreditProgram,omitempty"` + + // Primary ID for the affiliate + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of this affiliate + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type FlexibleCredit_Company_Type struct { + Entity + + // Description of the company type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Primary ID for the company type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type FlexibleCredit_Enrollment struct { + Entity + + // Account the enrollment belongs to + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Account ID associated with this enrollment + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Affiliate associated with the account enrollment + Affiliate *FlexibleCredit_Affiliate `json:"affiliate,omitempty" xmlrpc:"affiliate,omitempty"` + + // ID of the corresponding Flexible Credit Program Affiliate + AffiliateId *int `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // Indicates signing of Flexible Credit agreement (independent from MSA) + AgreementCompleteFlag *int `json:"agreementCompleteFlag,omitempty" xmlrpc:"agreementCompleteFlag,omitempty"` + + // Brief description of the company + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // Category which best describes the company + CompanyType *FlexibleCredit_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // ID of the Flexible Credit Program Company classification for this enrollment + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // Date when participation in the Flexible Credit program began + EnrollmentDate *Time `json:"enrollmentDate,omitempty" xmlrpc:"enrollmentDate,omitempty"` + + // Discount program the enrollment belongs to + FlexibleCreditProgram *FlexibleCredit_Program `json:"flexibleCreditProgram,omitempty" xmlrpc:"flexibleCreditProgram,omitempty"` + + // Date Flexible Credit Program benefits end. + GraduationDate *Time `json:"graduationDate,omitempty" xmlrpc:"graduationDate,omitempty"` + + // Flag indicating whether an enrollment is active (true) or inactive (false) + IsActiveFlag *bool `json:"isActiveFlag,omitempty" xmlrpc:"isActiveFlag,omitempty"` + + // Amount of monthly credit (USD) given to the account + MonthlyCreditAmount *Float64 `json:"monthlyCreditAmount,omitempty" xmlrpc:"monthlyCreditAmount,omitempty"` + + // Employee overseeing the enrollment + Representative *User_Employee `json:"representative,omitempty" xmlrpc:"representative,omitempty"` + + // ID of the employee representing this account. + RepresentativeEmployeeId *int `json:"representativeEmployeeId,omitempty" xmlrpc:"representativeEmployeeId,omitempty"` +} + +// no documentation yet +type FlexibleCredit_Program struct { + Entity + + // Primary ID of the Flexible Credit Program + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique name for the Flexible Credit Program + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Name of the Flexible Credit Program. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/hardware.go b/vendor/github.com/softlayer/softlayer-go/datatypes/hardware.go new file mode 100644 index 000000000000..a5032bea2f5c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/hardware.go @@ -0,0 +1,1938 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Hardware data type contains general information relating to a single SoftLayer hardware. +type Hardware struct { + Entity + + // The account associated with a piece of hardware. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A hardware's associated [[SoftLayer_Account|account]] id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of a piece of hardware's active physical components. + ActiveComponentCount *uint `json:"activeComponentCount,omitempty" xmlrpc:"activeComponentCount,omitempty"` + + // A piece of hardware's active physical components. + ActiveComponents []Hardware_Component `json:"activeComponents,omitempty" xmlrpc:"activeComponents,omitempty"` + + // A piece of hardware's active network monitoring incidents. + ActiveNetworkMonitorIncident []Network_Monitor_Version1_Incident `json:"activeNetworkMonitorIncident,omitempty" xmlrpc:"activeNetworkMonitorIncident,omitempty"` + + // A count of a piece of hardware's active network monitoring incidents. + ActiveNetworkMonitorIncidentCount *uint `json:"activeNetworkMonitorIncidentCount,omitempty" xmlrpc:"activeNetworkMonitorIncidentCount,omitempty"` + + // A count of + AllPowerComponentCount *uint `json:"allPowerComponentCount,omitempty" xmlrpc:"allPowerComponentCount,omitempty"` + + // no documentation yet + AllPowerComponents []Hardware_Power_Component `json:"allPowerComponents,omitempty" xmlrpc:"allPowerComponents,omitempty"` + + // The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. + AllowedHost *Network_Storage_Allowed_Host `json:"allowedHost,omitempty" xmlrpc:"allowedHost,omitempty"` + + // The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorage []Network_Storage `json:"allowedNetworkStorage,omitempty" xmlrpc:"allowedNetworkStorage,omitempty"` + + // A count of the SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorageCount *uint `json:"allowedNetworkStorageCount,omitempty" xmlrpc:"allowedNetworkStorageCount,omitempty"` + + // A count of the SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicaCount *uint `json:"allowedNetworkStorageReplicaCount,omitempty" xmlrpc:"allowedNetworkStorageReplicaCount,omitempty"` + + // The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicas []Network_Storage `json:"allowedNetworkStorageReplicas,omitempty" xmlrpc:"allowedNetworkStorageReplicas,omitempty"` + + // Information regarding an antivirus/spyware software component object. + AntivirusSpywareSoftwareComponent *Software_Component `json:"antivirusSpywareSoftwareComponent,omitempty" xmlrpc:"antivirusSpywareSoftwareComponent,omitempty"` + + // A count of information regarding a piece of hardware's specific attributes. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // Information regarding a piece of hardware's specific attributes. + Attributes []Hardware_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // The average daily public bandwidth usage for the current billing cycle. + AverageDailyPublicBandwidthUsage *Float64 `json:"averageDailyPublicBandwidthUsage,omitempty" xmlrpc:"averageDailyPublicBandwidthUsage,omitempty"` + + // A count of a piece of hardware's back-end or private network components. + BackendNetworkComponentCount *uint `json:"backendNetworkComponentCount,omitempty" xmlrpc:"backendNetworkComponentCount,omitempty"` + + // A piece of hardware's back-end or private network components. + BackendNetworkComponents []Network_Component `json:"backendNetworkComponents,omitempty" xmlrpc:"backendNetworkComponents,omitempty"` + + // A count of a hardware's backend or private router. + BackendRouterCount *uint `json:"backendRouterCount,omitempty" xmlrpc:"backendRouterCount,omitempty"` + + // A hardware's backend or private router. + BackendRouters []Hardware `json:"backendRouters,omitempty" xmlrpc:"backendRouters,omitempty"` + + // A hardware's allotted bandwidth (measured in GB). + BandwidthAllocation *Float64 `json:"bandwidthAllocation,omitempty" xmlrpc:"bandwidthAllocation,omitempty"` + + // A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. + BandwidthAllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"bandwidthAllotmentDetail,omitempty" xmlrpc:"bandwidthAllotmentDetail,omitempty"` + + // When true, this flag specifies that a hardware is Bare Metal Server. Bare Metal Servers are physical bare metal servers that are billed with the same options as Virtual Servers, with monthly and hourly rates. Bare Metal instances are ordered based on processor core count and ram amount. + BareMetalInstanceFlag *int `json:"bareMetalInstanceFlag,omitempty" xmlrpc:"bareMetalInstanceFlag,omitempty"` + + // A count of information regarding a piece of hardware's benchmark certifications. + BenchmarkCertificationCount *uint `json:"benchmarkCertificationCount,omitempty" xmlrpc:"benchmarkCertificationCount,omitempty"` + + // Information regarding a piece of hardware's benchmark certifications. + BenchmarkCertifications []Hardware_Benchmark_Certification `json:"benchmarkCertifications,omitempty" xmlrpc:"benchmarkCertifications,omitempty"` + + // Information regarding the billing item for a server. + BillingItem *Billing_Item_Hardware `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A flag indicating that a billing item exists. + BillingItemFlag *bool `json:"billingItemFlag,omitempty" xmlrpc:"billingItemFlag,omitempty"` + + // Determines whether the hardware is ineligible for cancellation because it is disconnected. + BlockCancelBecauseDisconnectedFlag *bool `json:"blockCancelBecauseDisconnectedFlag,omitempty" xmlrpc:"blockCancelBecauseDisconnectedFlag,omitempty"` + + // Status indicating whether or not a piece of hardware has business continuance insurance. + BusinessContinuanceInsuranceFlag *bool `json:"businessContinuanceInsuranceFlag,omitempty" xmlrpc:"businessContinuanceInsuranceFlag,omitempty"` + + // Child hardware. + ChildrenHardware []Hardware `json:"childrenHardware,omitempty" xmlrpc:"childrenHardware,omitempty"` + + // A count of child hardware. + ChildrenHardwareCount *uint `json:"childrenHardwareCount,omitempty" xmlrpc:"childrenHardwareCount,omitempty"` + + // A count of a piece of hardware's components. + ComponentCount *uint `json:"componentCount,omitempty" xmlrpc:"componentCount,omitempty"` + + // A piece of hardware's components. + Components []Hardware_Component `json:"components,omitempty" xmlrpc:"components,omitempty"` + + // A continuous data protection/server backup software component object. + ContinuousDataProtectionSoftwareComponent *Software_Component `json:"continuousDataProtectionSoftwareComponent,omitempty" xmlrpc:"continuousDataProtectionSoftwareComponent,omitempty"` + + // The current billable public outbound bandwidth for this hardware for the current billing cycle. + CurrentBillableBandwidthUsage *Float64 `json:"currentBillableBandwidthUsage,omitempty" xmlrpc:"currentBillableBandwidthUsage,omitempty"` + + // Information regarding the datacenter in which a piece of hardware resides. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The name of the datacenter in which a piece of hardware resides. + DatacenterName *string `json:"datacenterName,omitempty" xmlrpc:"datacenterName,omitempty"` + + // Number of day(s) a server have been in spare pool. + DaysInSparePool *int `json:"daysInSparePool,omitempty" xmlrpc:"daysInSparePool,omitempty"` + + // A piece of hardware's local network domain name. + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // All hardware that has uplink network connections to a piece of hardware. + DownlinkHardware []Hardware `json:"downlinkHardware,omitempty" xmlrpc:"downlinkHardware,omitempty"` + + // A count of all hardware that has uplink network connections to a piece of hardware. + DownlinkHardwareCount *uint `json:"downlinkHardwareCount,omitempty" xmlrpc:"downlinkHardwareCount,omitempty"` + + // All hardware that has uplink network connections to a piece of hardware. + DownlinkNetworkHardware []Hardware `json:"downlinkNetworkHardware,omitempty" xmlrpc:"downlinkNetworkHardware,omitempty"` + + // A count of all hardware that has uplink network connections to a piece of hardware. + DownlinkNetworkHardwareCount *uint `json:"downlinkNetworkHardwareCount,omitempty" xmlrpc:"downlinkNetworkHardwareCount,omitempty"` + + // A count of information regarding all servers attached to a piece of network hardware. + DownlinkServerCount *uint `json:"downlinkServerCount,omitempty" xmlrpc:"downlinkServerCount,omitempty"` + + // Information regarding all servers attached to a piece of network hardware. + DownlinkServers []Hardware `json:"downlinkServers,omitempty" xmlrpc:"downlinkServers,omitempty"` + + // A count of information regarding all virtual guests attached to a piece of network hardware. + DownlinkVirtualGuestCount *uint `json:"downlinkVirtualGuestCount,omitempty" xmlrpc:"downlinkVirtualGuestCount,omitempty"` + + // Information regarding all virtual guests attached to a piece of network hardware. + DownlinkVirtualGuests []Virtual_Guest `json:"downlinkVirtualGuests,omitempty" xmlrpc:"downlinkVirtualGuests,omitempty"` + + // A count of all hardware downstream from a network device. + DownstreamHardwareBindingCount *uint `json:"downstreamHardwareBindingCount,omitempty" xmlrpc:"downstreamHardwareBindingCount,omitempty"` + + // All hardware downstream from a network device. + DownstreamHardwareBindings []Network_Component_Uplink_Hardware `json:"downstreamHardwareBindings,omitempty" xmlrpc:"downstreamHardwareBindings,omitempty"` + + // All network hardware downstream from the selected piece of hardware. + DownstreamNetworkHardware []Hardware `json:"downstreamNetworkHardware,omitempty" xmlrpc:"downstreamNetworkHardware,omitempty"` + + // A count of all network hardware downstream from the selected piece of hardware. + DownstreamNetworkHardwareCount *uint `json:"downstreamNetworkHardwareCount,omitempty" xmlrpc:"downstreamNetworkHardwareCount,omitempty"` + + // A count of all network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. + DownstreamNetworkHardwareWithIncidentCount *uint `json:"downstreamNetworkHardwareWithIncidentCount,omitempty" xmlrpc:"downstreamNetworkHardwareWithIncidentCount,omitempty"` + + // All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. + DownstreamNetworkHardwareWithIncidents []Hardware `json:"downstreamNetworkHardwareWithIncidents,omitempty" xmlrpc:"downstreamNetworkHardwareWithIncidents,omitempty"` + + // A count of information regarding all servers attached downstream to a piece of network hardware. + DownstreamServerCount *uint `json:"downstreamServerCount,omitempty" xmlrpc:"downstreamServerCount,omitempty"` + + // Information regarding all servers attached downstream to a piece of network hardware. + DownstreamServers []Hardware `json:"downstreamServers,omitempty" xmlrpc:"downstreamServers,omitempty"` + + // A count of information regarding all virtual guests attached to a piece of network hardware. + DownstreamVirtualGuestCount *uint `json:"downstreamVirtualGuestCount,omitempty" xmlrpc:"downstreamVirtualGuestCount,omitempty"` + + // Information regarding all virtual guests attached to a piece of network hardware. + DownstreamVirtualGuests []Virtual_Guest `json:"downstreamVirtualGuests,omitempty" xmlrpc:"downstreamVirtualGuests,omitempty"` + + // A count of the drive controllers contained within a piece of hardware. + DriveControllerCount *uint `json:"driveControllerCount,omitempty" xmlrpc:"driveControllerCount,omitempty"` + + // The drive controllers contained within a piece of hardware. + DriveControllers []Hardware_Component `json:"driveControllers,omitempty" xmlrpc:"driveControllers,omitempty"` + + // Information regarding a piece of hardware's associated EVault network storage service account. + EvaultNetworkStorage []Network_Storage `json:"evaultNetworkStorage,omitempty" xmlrpc:"evaultNetworkStorage,omitempty"` + + // A count of information regarding a piece of hardware's associated EVault network storage service account. + EvaultNetworkStorageCount *uint `json:"evaultNetworkStorageCount,omitempty" xmlrpc:"evaultNetworkStorageCount,omitempty"` + + // Information regarding a piece of hardware's firewall services. + FirewallServiceComponent *Network_Component_Firewall `json:"firewallServiceComponent,omitempty" xmlrpc:"firewallServiceComponent,omitempty"` + + // Defines the fixed components in a fixed configuration bare metal server. + FixedConfigurationPreset *Product_Package_Preset `json:"fixedConfigurationPreset,omitempty" xmlrpc:"fixedConfigurationPreset,omitempty"` + + // A count of a piece of hardware's front-end or public network components. + FrontendNetworkComponentCount *uint `json:"frontendNetworkComponentCount,omitempty" xmlrpc:"frontendNetworkComponentCount,omitempty"` + + // A piece of hardware's front-end or public network components. + FrontendNetworkComponents []Network_Component `json:"frontendNetworkComponents,omitempty" xmlrpc:"frontendNetworkComponents,omitempty"` + + // A count of a hardware's frontend or public router. + FrontendRouterCount *uint `json:"frontendRouterCount,omitempty" xmlrpc:"frontendRouterCount,omitempty"` + + // A hardware's frontend or public router. + FrontendRouters []Hardware `json:"frontendRouters,omitempty" xmlrpc:"frontendRouters,omitempty"` + + // A name reflecting the hostname and domain of the hardware. This is created from the combined values of the hardware's hostname and domain name automatically, and thus should not be edited directly. + FullyQualifiedDomainName *string `json:"fullyQualifiedDomainName,omitempty" xmlrpc:"fullyQualifiedDomainName,omitempty"` + + // A hardware's universally unique identifier. + GlobalIdentifier *string `json:"globalIdentifier,omitempty" xmlrpc:"globalIdentifier,omitempty"` + + // A count of the hard drives contained within a piece of hardware. + HardDriveCount *uint `json:"hardDriveCount,omitempty" xmlrpc:"hardDriveCount,omitempty"` + + // The hard drives contained within a piece of hardware. + HardDrives []Hardware_Component `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // The chassis that a piece of hardware is housed in. + HardwareChassis *Hardware_Chassis `json:"hardwareChassis,omitempty" xmlrpc:"hardwareChassis,omitempty"` + + // A hardware's function. + HardwareFunction *Hardware_Function `json:"hardwareFunction,omitempty" xmlrpc:"hardwareFunction,omitempty"` + + // A hardware's function. + HardwareFunctionDescription *string `json:"hardwareFunctionDescription,omitempty" xmlrpc:"hardwareFunctionDescription,omitempty"` + + // A hardware's status. + HardwareStatus *Hardware_Status `json:"hardwareStatus,omitempty" xmlrpc:"hardwareStatus,omitempty"` + + // A number reflecting the state of a hardware + HardwareStatusId *int `json:"hardwareStatusId,omitempty" xmlrpc:"hardwareStatusId,omitempty"` + + // Determine in hardware object has TPM enabled. + HasTrustedPlatformModuleBillingItemFlag *bool `json:"hasTrustedPlatformModuleBillingItemFlag,omitempty" xmlrpc:"hasTrustedPlatformModuleBillingItemFlag,omitempty"` + + // Information regarding a host IPS software component object. + HostIpsSoftwareComponent *Software_Component `json:"hostIpsSoftwareComponent,omitempty" xmlrpc:"hostIpsSoftwareComponent,omitempty"` + + // A hardware's hostname + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // A server's hourly billing status. + HourlyBillingFlag *bool `json:"hourlyBillingFlag,omitempty" xmlrpc:"hourlyBillingFlag,omitempty"` + + // A hardware's internal identification number + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The sum of all the inbound network traffic data for the last 30 days. + InboundBandwidthUsage *Float64 `json:"inboundBandwidthUsage,omitempty" xmlrpc:"inboundBandwidthUsage,omitempty"` + + // The total public inbound bandwidth for this hardware for the current billing cycle. + InboundPublicBandwidthUsage *Float64 `json:"inboundPublicBandwidthUsage,omitempty" xmlrpc:"inboundPublicBandwidthUsage,omitempty"` + + // Information regarding the last transaction a server performed. + LastTransaction *Provisioning_Version1_Transaction `json:"lastTransaction,omitempty" xmlrpc:"lastTransaction,omitempty"` + + // A piece of hardware's latest network monitoring incident. + LatestNetworkMonitorIncident *Network_Monitor_Version1_Incident `json:"latestNetworkMonitorIncident,omitempty" xmlrpc:"latestNetworkMonitorIncident,omitempty"` + + // Where a piece of hardware is located within SoftLayer's location hierarchy. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationPathString *string `json:"locationPathString,omitempty" xmlrpc:"locationPathString,omitempty"` + + // Information regarding a lockbox account associated with a server. + LockboxNetworkStorage *Network_Storage `json:"lockboxNetworkStorage,omitempty" xmlrpc:"lockboxNetworkStorage,omitempty"` + + // A flag indicating that the hardware is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // A hardware's serial number that is supplied by the manufacturer. + ManufacturerSerialNumber *string `json:"manufacturerSerialNumber,omitempty" xmlrpc:"manufacturerSerialNumber,omitempty"` + + // Information regarding a piece of hardware's memory. + Memory []Hardware_Component `json:"memory,omitempty" xmlrpc:"memory,omitempty"` + + // The amount of memory a piece of hardware has, measured in gigabytes. + MemoryCapacity *uint `json:"memoryCapacity,omitempty" xmlrpc:"memoryCapacity,omitempty"` + + // A count of information regarding a piece of hardware's memory. + MemoryCount *uint `json:"memoryCount,omitempty" xmlrpc:"memoryCount,omitempty"` + + // A piece of hardware's metric tracking object. + MetricTrackingObject *Metric_Tracking_Object_HardwareServer `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // A count of information regarding the monitoring agents associated with a piece of hardware. + MonitoringAgentCount *uint `json:"monitoringAgentCount,omitempty" xmlrpc:"monitoringAgentCount,omitempty"` + + // Information regarding the monitoring agents associated with a piece of hardware. + MonitoringAgents []Monitoring_Agent `json:"monitoringAgents,omitempty" xmlrpc:"monitoringAgents,omitempty"` + + // Information regarding the hardware's monitoring robot. + MonitoringRobot *Monitoring_Robot `json:"monitoringRobot,omitempty" xmlrpc:"monitoringRobot,omitempty"` + + // Information regarding a piece of hardware's network monitoring services. + MonitoringServiceComponent *Network_Monitor_Version1_Query_Host_Stratum `json:"monitoringServiceComponent,omitempty" xmlrpc:"monitoringServiceComponent,omitempty"` + + // The monitoring service flag eligibility status for a piece of hardware. + MonitoringServiceEligibilityFlag *bool `json:"monitoringServiceEligibilityFlag,omitempty" xmlrpc:"monitoringServiceEligibilityFlag,omitempty"` + + // The service flag status for a piece of hardware. + MonitoringServiceFlag *bool `json:"monitoringServiceFlag,omitempty" xmlrpc:"monitoringServiceFlag,omitempty"` + + // Information regarding a piece of hardware's motherboard. + Motherboard *Hardware_Component `json:"motherboard,omitempty" xmlrpc:"motherboard,omitempty"` + + // A count of information regarding a piece of hardware's network cards. + NetworkCardCount *uint `json:"networkCardCount,omitempty" xmlrpc:"networkCardCount,omitempty"` + + // Information regarding a piece of hardware's network cards. + NetworkCards []Hardware_Component `json:"networkCards,omitempty" xmlrpc:"networkCards,omitempty"` + + // A count of returns a hardware's network components. + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // Returns a hardware's network components. + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // The gateway member if this device is part of a network gateway. + NetworkGatewayMember *Network_Gateway_Member `json:"networkGatewayMember,omitempty" xmlrpc:"networkGatewayMember,omitempty"` + + // Whether or not this device is part of a network gateway. + NetworkGatewayMemberFlag *bool `json:"networkGatewayMemberFlag,omitempty" xmlrpc:"networkGatewayMemberFlag,omitempty"` + + // A piece of hardware's network management IP address. + NetworkManagementIpAddress *string `json:"networkManagementIpAddress,omitempty" xmlrpc:"networkManagementIpAddress,omitempty"` + + // All servers with failed monitoring that are attached downstream to a piece of hardware. + NetworkMonitorAttachedDownHardware []Hardware `json:"networkMonitorAttachedDownHardware,omitempty" xmlrpc:"networkMonitorAttachedDownHardware,omitempty"` + + // A count of all servers with failed monitoring that are attached downstream to a piece of hardware. + NetworkMonitorAttachedDownHardwareCount *uint `json:"networkMonitorAttachedDownHardwareCount,omitempty" xmlrpc:"networkMonitorAttachedDownHardwareCount,omitempty"` + + // A count of virtual guests that are attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownVirtualGuestCount *uint `json:"networkMonitorAttachedDownVirtualGuestCount,omitempty" xmlrpc:"networkMonitorAttachedDownVirtualGuestCount,omitempty"` + + // Virtual guests that are attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownVirtualGuests []Virtual_Guest `json:"networkMonitorAttachedDownVirtualGuests,omitempty" xmlrpc:"networkMonitorAttachedDownVirtualGuests,omitempty"` + + // A count of information regarding a piece of hardware's network monitors. + NetworkMonitorCount *uint `json:"networkMonitorCount,omitempty" xmlrpc:"networkMonitorCount,omitempty"` + + // A count of the status of all of a piece of hardware's network monitoring incidents. + NetworkMonitorIncidentCount *uint `json:"networkMonitorIncidentCount,omitempty" xmlrpc:"networkMonitorIncidentCount,omitempty"` + + // The status of all of a piece of hardware's network monitoring incidents. + NetworkMonitorIncidents []Network_Monitor_Version1_Incident `json:"networkMonitorIncidents,omitempty" xmlrpc:"networkMonitorIncidents,omitempty"` + + // Information regarding a piece of hardware's network monitors. + NetworkMonitors []Network_Monitor_Version1_Query_Host `json:"networkMonitors,omitempty" xmlrpc:"networkMonitors,omitempty"` + + // The value of a hardware's network status attribute. + NetworkStatus *string `json:"networkStatus,omitempty" xmlrpc:"networkStatus,omitempty"` + + // The hardware's related network status attribute. + NetworkStatusAttribute *Hardware_Attribute `json:"networkStatusAttribute,omitempty" xmlrpc:"networkStatusAttribute,omitempty"` + + // Information regarding a piece of hardware's associated network storage service account. + NetworkStorage []Network_Storage `json:"networkStorage,omitempty" xmlrpc:"networkStorage,omitempty"` + + // A count of information regarding a piece of hardware's associated network storage service account. + NetworkStorageCount *uint `json:"networkStorageCount,omitempty" xmlrpc:"networkStorageCount,omitempty"` + + // A count of the network virtual LANs (VLANs) associated with a piece of hardware's network components. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // The network virtual LANs (VLANs) associated with a piece of hardware's network components. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // A hardware's allotted bandwidth for the next billing cycle (measured in GB). + NextBillingCycleBandwidthAllocation *Float64 `json:"nextBillingCycleBandwidthAllocation,omitempty" xmlrpc:"nextBillingCycleBandwidthAllocation,omitempty"` + + // A small note about a piece of hardware to use at your discretion. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + NotesHistory []Hardware_Note `json:"notesHistory,omitempty" xmlrpc:"notesHistory,omitempty"` + + // A count of + NotesHistoryCount *uint `json:"notesHistoryCount,omitempty" xmlrpc:"notesHistoryCount,omitempty"` + + // Information regarding a piece of hardware's operating system. + OperatingSystem *Software_Component_OperatingSystem `json:"operatingSystem,omitempty" xmlrpc:"operatingSystem,omitempty"` + + // A hardware's operating system software description. + OperatingSystemReferenceCode *string `json:"operatingSystemReferenceCode,omitempty" xmlrpc:"operatingSystemReferenceCode,omitempty"` + + // The sum of all the outbound network traffic data for the last 30 days. + OutboundBandwidthUsage *Float64 `json:"outboundBandwidthUsage,omitempty" xmlrpc:"outboundBandwidthUsage,omitempty"` + + // The total public outbound bandwidth for this hardware for the current billing cycle. + OutboundPublicBandwidthUsage *Float64 `json:"outboundPublicBandwidthUsage,omitempty" xmlrpc:"outboundPublicBandwidthUsage,omitempty"` + + // Blade Bay + ParentBay *Hardware_Blade `json:"parentBay,omitempty" xmlrpc:"parentBay,omitempty"` + + // Parent Hardware. + ParentHardware *Hardware `json:"parentHardware,omitempty" xmlrpc:"parentHardware,omitempty"` + + // Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. + PointOfPresenceLocation *Location `json:"pointOfPresenceLocation,omitempty" xmlrpc:"pointOfPresenceLocation,omitempty"` + + // URI of the script to be downloaded and executed after installation is complete. + PostInstallScriptUri *string `json:"postInstallScriptUri,omitempty" xmlrpc:"postInstallScriptUri,omitempty"` + + // A count of the power components for a hardware object. + PowerComponentCount *uint `json:"powerComponentCount,omitempty" xmlrpc:"powerComponentCount,omitempty"` + + // The power components for a hardware object. + PowerComponents []Hardware_Power_Component `json:"powerComponents,omitempty" xmlrpc:"powerComponents,omitempty"` + + // Information regarding a piece of hardware's power supply. + PowerSupply []Hardware_Component `json:"powerSupply,omitempty" xmlrpc:"powerSupply,omitempty"` + + // A count of information regarding a piece of hardware's power supply. + PowerSupplyCount *uint `json:"powerSupplyCount,omitempty" xmlrpc:"powerSupplyCount,omitempty"` + + // The hardware's primary private IP address. + PrimaryBackendIpAddress *string `json:"primaryBackendIpAddress,omitempty" xmlrpc:"primaryBackendIpAddress,omitempty"` + + // Information regarding the hardware's primary back-end network component. + PrimaryBackendNetworkComponent *Network_Component `json:"primaryBackendNetworkComponent,omitempty" xmlrpc:"primaryBackendNetworkComponent,omitempty"` + + // The hardware's primary public IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // Information regarding the hardware's primary public network component. + PrimaryNetworkComponent *Network_Component `json:"primaryNetworkComponent,omitempty" xmlrpc:"primaryNetworkComponent,omitempty"` + + // Whether the hardware only has access to the private network. + PrivateNetworkOnlyFlag *bool `json:"privateNetworkOnlyFlag,omitempty" xmlrpc:"privateNetworkOnlyFlag,omitempty"` + + // The total number of processor cores, summed from all processors that are attached to a piece of hardware + ProcessorCoreAmount *uint `json:"processorCoreAmount,omitempty" xmlrpc:"processorCoreAmount,omitempty"` + + // A count of information regarding a piece of hardware's processors. + ProcessorCount *uint `json:"processorCount,omitempty" xmlrpc:"processorCount,omitempty"` + + // The total number of physical processor cores, summed from all processors that are attached to a piece of hardware + ProcessorPhysicalCoreAmount *uint `json:"processorPhysicalCoreAmount,omitempty" xmlrpc:"processorPhysicalCoreAmount,omitempty"` + + // Information regarding a piece of hardware's processors. + Processors []Hardware_Component `json:"processors,omitempty" xmlrpc:"processors,omitempty"` + + // no documentation yet + ProvisionDate *Time `json:"provisionDate,omitempty" xmlrpc:"provisionDate,omitempty"` + + // no documentation yet + Rack *Location `json:"rack,omitempty" xmlrpc:"rack,omitempty"` + + // A count of the RAID controllers contained within a piece of hardware. + RaidControllerCount *uint `json:"raidControllerCount,omitempty" xmlrpc:"raidControllerCount,omitempty"` + + // The RAID controllers contained within a piece of hardware. + RaidControllers []Hardware_Component `json:"raidControllers,omitempty" xmlrpc:"raidControllers,omitempty"` + + // A count of recent events that impact this hardware. + RecentEventCount *uint `json:"recentEventCount,omitempty" xmlrpc:"recentEventCount,omitempty"` + + // Recent events that impact this hardware. + RecentEvents []Notification_Occurrence_Event `json:"recentEvents,omitempty" xmlrpc:"recentEvents,omitempty"` + + // A count of user credentials to issue commands and/or interact with the server's remote management card. + RemoteManagementAccountCount *uint `json:"remoteManagementAccountCount,omitempty" xmlrpc:"remoteManagementAccountCount,omitempty"` + + // User credentials to issue commands and/or interact with the server's remote management card. + RemoteManagementAccounts []Hardware_Component_RemoteManagement_User `json:"remoteManagementAccounts,omitempty" xmlrpc:"remoteManagementAccounts,omitempty"` + + // A hardware's associated remote management component. This is normally IPMI. + RemoteManagementComponent *Network_Component `json:"remoteManagementComponent,omitempty" xmlrpc:"remoteManagementComponent,omitempty"` + + // A count of + ResourceConfigurationCount *uint `json:"resourceConfigurationCount,omitempty" xmlrpc:"resourceConfigurationCount,omitempty"` + + // no documentation yet + ResourceConfigurations []Hardware_Resource_Configuration `json:"resourceConfigurations,omitempty" xmlrpc:"resourceConfigurations,omitempty"` + + // A count of the resource groups in which this hardware is a member. + ResourceGroupCount *uint `json:"resourceGroupCount,omitempty" xmlrpc:"resourceGroupCount,omitempty"` + + // A count of + ResourceGroupMemberReferenceCount *uint `json:"resourceGroupMemberReferenceCount,omitempty" xmlrpc:"resourceGroupMemberReferenceCount,omitempty"` + + // no documentation yet + ResourceGroupMemberReferences []Resource_Group_Member `json:"resourceGroupMemberReferences,omitempty" xmlrpc:"resourceGroupMemberReferences,omitempty"` + + // A count of + ResourceGroupRoleCount *uint `json:"resourceGroupRoleCount,omitempty" xmlrpc:"resourceGroupRoleCount,omitempty"` + + // no documentation yet + ResourceGroupRoles []Resource_Group_Role `json:"resourceGroupRoles,omitempty" xmlrpc:"resourceGroupRoles,omitempty"` + + // The resource groups in which this hardware is a member. + ResourceGroups []Resource_Group `json:"resourceGroups,omitempty" xmlrpc:"resourceGroups,omitempty"` + + // A count of a hardware's routers. + RouterCount *uint `json:"routerCount,omitempty" xmlrpc:"routerCount,omitempty"` + + // A hardware's routers. + Routers []Hardware `json:"routers,omitempty" xmlrpc:"routers,omitempty"` + + // A count of collection of scale assets this hardware corresponds to. + ScaleAssetCount *uint `json:"scaleAssetCount,omitempty" xmlrpc:"scaleAssetCount,omitempty"` + + // Collection of scale assets this hardware corresponds to. + ScaleAssets []Scale_Asset `json:"scaleAssets,omitempty" xmlrpc:"scaleAssets,omitempty"` + + // A count of information regarding a piece of hardware's vulnerability scan requests. + SecurityScanRequestCount *uint `json:"securityScanRequestCount,omitempty" xmlrpc:"securityScanRequestCount,omitempty"` + + // Information regarding a piece of hardware's vulnerability scan requests. + SecurityScanRequests []Network_Security_Scanner_Request `json:"securityScanRequests,omitempty" xmlrpc:"securityScanRequests,omitempty"` + + // A hardware's serial number that is supplied by SoftLayer. + SerialNumber *string `json:"serialNumber,omitempty" xmlrpc:"serialNumber,omitempty"` + + // Information regarding the server room in which the hardware is located. + ServerRoom *Location `json:"serverRoom,omitempty" xmlrpc:"serverRoom,omitempty"` + + // Information regarding the piece of hardware's service provider. + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // A hardware's internal identification number at its service provider + ServiceProviderResourceId *int `json:"serviceProviderResourceId,omitempty" xmlrpc:"serviceProviderResourceId,omitempty"` + + // A count of information regarding a piece of hardware's installed software. + SoftwareComponentCount *uint `json:"softwareComponentCount,omitempty" xmlrpc:"softwareComponentCount,omitempty"` + + // Information regarding a piece of hardware's installed software. + SoftwareComponents []Software_Component `json:"softwareComponents,omitempty" xmlrpc:"softwareComponents,omitempty"` + + // Information regarding the billing item for a spare pool server. + SparePoolBillingItem *Billing_Item_Hardware `json:"sparePoolBillingItem,omitempty" xmlrpc:"sparePoolBillingItem,omitempty"` + + // A count of sSH keys to be installed on the server during provisioning or an OS reload. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // SSH keys to be installed on the server during provisioning or an OS reload. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // A count of + StorageNetworkComponentCount *uint `json:"storageNetworkComponentCount,omitempty" xmlrpc:"storageNetworkComponentCount,omitempty"` + + // no documentation yet + StorageNetworkComponents []Network_Component `json:"storageNetworkComponents,omitempty" xmlrpc:"storageNetworkComponents,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // no documentation yet + TopLevelLocation *Location `json:"topLevelLocation,omitempty" xmlrpc:"topLevelLocation,omitempty"` + + // An account's associated upgrade request object, if any. + UpgradeRequest *Product_Upgrade_Request `json:"upgradeRequest,omitempty" xmlrpc:"upgradeRequest,omitempty"` + + // The network device connected to a piece of hardware. + UplinkHardware *Hardware `json:"uplinkHardware,omitempty" xmlrpc:"uplinkHardware,omitempty"` + + // A count of information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. + UplinkNetworkComponentCount *uint `json:"uplinkNetworkComponentCount,omitempty" xmlrpc:"uplinkNetworkComponentCount,omitempty"` + + // Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. + UplinkNetworkComponents []Network_Component `json:"uplinkNetworkComponents,omitempty" xmlrpc:"uplinkNetworkComponents,omitempty"` + + // An array containing a single string of custom user data for a hardware order. Max size is 16 kb. + UserData []Hardware_Attribute `json:"userData,omitempty" xmlrpc:"userData,omitempty"` + + // A count of an array containing a single string of custom user data for a hardware order. Max size is 16 kb. + UserDataCount *uint `json:"userDataCount,omitempty" xmlrpc:"userDataCount,omitempty"` + + // Information regarding the virtual chassis for a piece of hardware. + VirtualChassis *Hardware_Group `json:"virtualChassis,omitempty" xmlrpc:"virtualChassis,omitempty"` + + // A count of information regarding the virtual chassis siblings for a piece of hardware. + VirtualChassisSiblingCount *uint `json:"virtualChassisSiblingCount,omitempty" xmlrpc:"virtualChassisSiblingCount,omitempty"` + + // Information regarding the virtual chassis siblings for a piece of hardware. + VirtualChassisSiblings []Hardware `json:"virtualChassisSiblings,omitempty" xmlrpc:"virtualChassisSiblings,omitempty"` + + // A piece of hardware's virtual host record. + VirtualHost *Virtual_Host `json:"virtualHost,omitempty" xmlrpc:"virtualHost,omitempty"` + + // A count of information regarding a piece of hardware's virtual software licenses. + VirtualLicenseCount *uint `json:"virtualLicenseCount,omitempty" xmlrpc:"virtualLicenseCount,omitempty"` + + // Information regarding a piece of hardware's virtual software licenses. + VirtualLicenses []Software_VirtualLicense `json:"virtualLicenses,omitempty" xmlrpc:"virtualLicenses,omitempty"` + + // Information regarding the bandwidth allotment to which a piece of hardware belongs. + VirtualRack *Network_Bandwidth_Version1_Allotment `json:"virtualRack,omitempty" xmlrpc:"virtualRack,omitempty"` + + // The name of the bandwidth allotment belonging to a piece of hardware. + VirtualRackId *int `json:"virtualRackId,omitempty" xmlrpc:"virtualRackId,omitempty"` + + // The name of the bandwidth allotment belonging to a piece of hardware. + VirtualRackName *string `json:"virtualRackName,omitempty" xmlrpc:"virtualRackName,omitempty"` + + // A piece of hardware's virtualization platform software. + VirtualizationPlatform *Software_Component `json:"virtualizationPlatform,omitempty" xmlrpc:"virtualizationPlatform,omitempty"` +} + +// The SoftLayer_Hardware_Attribute type contains general information for a hardware attribute. Hardware attributes can be assigned to specific hardware objects to describe relatively arbitrary information. +type Hardware_Attribute struct { + Entity + + // The type of hardware attribute that this represents. + HardwareAttributeType *Hardware_Attribute_Type `json:"hardwareAttributeType,omitempty" xmlrpc:"hardwareAttributeType,omitempty"` + + // The unique identifier of a hardware attribute's type. + HardwareAttributeTypeId *int `json:"hardwareAttributeTypeId,omitempty" xmlrpc:"hardwareAttributeTypeId,omitempty"` + + // A hardware attribute's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Retrieve attributes associated with a hardware object. +type Hardware_Attribute_Type struct { + Entity + + // The attribute type key name or code. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The attribute type name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Hardware_Attribute_UserData struct { + Hardware_Attribute +} + +// The SoftLayer_Hardware_Benchmark_Certification data type contains general information relating to a single SoftLayer hardware benchmark certification document. +type Hardware_Benchmark_Certification struct { + Entity + + // Information regarding a benchmark certification result's associated SoftLayer customer account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the SoftLayer customer account associated with a benchmark certification result. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The date that a benchmark certification result was generated. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Information regarding the piece of hardware on which a benchmark certification test was performed. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A benchmark certification results's associated hardware's internal identification number. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` +} + +// no documentation yet +type Hardware_Blade struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Disabled *int `json:"disabled,omitempty" xmlrpc:"disabled,omitempty"` + + // no documentation yet + HardwareChild *Hardware `json:"hardwareChild,omitempty" xmlrpc:"hardwareChild,omitempty"` + + // no documentation yet + HardwareChildId *int `json:"hardwareChildId,omitempty" xmlrpc:"hardwareChildId,omitempty"` + + // no documentation yet + HardwareParent *Hardware `json:"hardwareParent,omitempty" xmlrpc:"hardwareParent,omitempty"` + + // no documentation yet + HardwareParentId *int `json:"hardwareParentId,omitempty" xmlrpc:"hardwareParentId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of this blade as referenced by the operating system. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Every piece of hardware in SoftLayer's datacenters, including customer servers, are housed in one of many hardware chassis. The SoftLayer_Hardware_Chassis data type defines these chassis. +type Hardware_Chassis struct { + Entity + + // no documentation yet + BackplaneCapacity *string `json:"backplaneCapacity,omitempty" xmlrpc:"backplaneCapacity,omitempty"` + + // no documentation yet + BayCapacity *string `json:"bayCapacity,omitempty" xmlrpc:"bayCapacity,omitempty"` + + // no documentation yet + DriveCapacity *string `json:"driveCapacity,omitempty" xmlrpc:"driveCapacity,omitempty"` + + // no documentation yet + DriveControllerCapacity *string `json:"driveControllerCapacity,omitempty" xmlrpc:"driveControllerCapacity,omitempty"` + + // A hardware form factor internal identifier. + FormFactorId *int `json:"formFactorId,omitempty" xmlrpc:"formFactorId,omitempty"` + + // no documentation yet + GpuCapacity *string `json:"gpuCapacity,omitempty" xmlrpc:"gpuCapacity,omitempty"` + + // A hardware's function. + HardwareFunction *Hardware_Function `json:"hardwareFunction,omitempty" xmlrpc:"hardwareFunction,omitempty"` + + // A hardware chassis' internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware chassis' manufacturer. + Manufacturer *string `json:"manufacturer,omitempty" xmlrpc:"manufacturer,omitempty"` + + // no documentation yet + ModuleCapacity *string `json:"moduleCapacity,omitempty" xmlrpc:"moduleCapacity,omitempty"` + + // A hardware chassis' name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + PowerCapacity *string `json:"powerCapacity,omitempty" xmlrpc:"powerCapacity,omitempty"` + + // The physical size of a hardware chassis. Currently this relates to the 'U' size of a chassis buy default. + UnitSize *int `json:"unitSize,omitempty" xmlrpc:"unitSize,omitempty"` + + // A hardware chassis' revision number. + Version *string `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// The SoftLayer_Hardware_Component data type abstracts information related to a hardware component. +type Hardware_Component struct { + Entity + + // A component's capacity. + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // A components sub components. Devices that are usually integrated or in some way attached to a component. + Children []Hardware_Component `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of a components sub components. Devices that are usually integrated or in some way attached to a component. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // A component's Revision. + ComponentRevision *string `json:"componentRevision,omitempty" xmlrpc:"componentRevision,omitempty"` + + // A count of + DownlinkHardwareComponentCount *uint `json:"downlinkHardwareComponentCount,omitempty" xmlrpc:"downlinkHardwareComponentCount,omitempty"` + + // no documentation yet + DownlinkHardwareComponents []Hardware_Component `json:"downlinkHardwareComponents,omitempty" xmlrpc:"downlinkHardwareComponents,omitempty"` + + // The hardware object that this component belongs to. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The general group of component models. + HardwareComponentModel *Hardware_Component_Model `json:"hardwareComponentModel,omitempty" xmlrpc:"hardwareComponentModel,omitempty"` + + // The internal identifier of a hardware component's component model. + HardwareComponentModelId *int `json:"hardwareComponentModelId,omitempty" xmlrpc:"hardwareComponentModelId,omitempty"` + + // A components type. + HardwareComponentType *Hardware_Component_Type `json:"hardwareComponentType,omitempty" xmlrpc:"hardwareComponentType,omitempty"` + + // The internal identifier of the hardware that a hardware component resides inside. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // A hardware component's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A component's M.2 SATA capacity. + M2SataSlotCapacity *string `json:"m2SataSlotCapacity,omitempty" xmlrpc:"m2SataSlotCapacity,omitempty"` + + // The date that a hardware component was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of the module's hardware components + ModuleComponentCount *uint `json:"moduleComponentCount,omitempty" xmlrpc:"moduleComponentCount,omitempty"` + + // The module's hardware components + ModuleComponents []Hardware_Component `json:"moduleComponents,omitempty" xmlrpc:"moduleComponents,omitempty"` + + // A count of the module's hardware components + ModuleHardwareComponentCount *uint `json:"moduleHardwareComponentCount,omitempty" xmlrpc:"moduleHardwareComponentCount,omitempty"` + + // The module's hardware components + ModuleHardwareComponents []Hardware_Component `json:"moduleHardwareComponents,omitempty" xmlrpc:"moduleHardwareComponents,omitempty"` + + // A count of the module's network components + ModuleNetworkComponentCount *uint `json:"moduleNetworkComponentCount,omitempty" xmlrpc:"moduleNetworkComponentCount,omitempty"` + + // The module's network components + ModuleNetworkComponents []Hardware_Component `json:"moduleNetworkComponents,omitempty" xmlrpc:"moduleNetworkComponents,omitempty"` + + // The name of this component as referenced by the operating system. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the components local ethernet and remote management interfaces + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // The components local ethernet and remote management interfaces + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // The account this component belongs to. + Owner *Account `json:"owner,omitempty" xmlrpc:"owner,omitempty"` + + // A components parent. Devices that are usually integrated or in some way attached to a component. + Parent *Hardware_Component `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // no documentation yet + ParentModule *Hardware_Component `json:"parentModule,omitempty" xmlrpc:"parentModule,omitempty"` + + // no documentation yet + PrefixAttribute *Hardware_Component_Model_Generic_Attribute `json:"prefixAttribute,omitempty" xmlrpc:"prefixAttribute,omitempty"` + + // A RAID controllers RAID mode. + RaidMode *string `json:"raidMode,omitempty" xmlrpc:"raidMode,omitempty"` + + // The component revision designation. + Revision *Hardware_Component_Revision `json:"revision,omitempty" xmlrpc:"revision,omitempty"` + + // The component serial number. + SerialNumber *string `json:"serialNumber,omitempty" xmlrpc:"serialNumber,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // A hardware's internal identification number at its service provider + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // A count of + UplinkHardwareComponentCount *uint `json:"uplinkHardwareComponentCount,omitempty" xmlrpc:"uplinkHardwareComponentCount,omitempty"` + + // no documentation yet + UplinkHardwareComponents []Hardware_Component `json:"uplinkHardwareComponents,omitempty" xmlrpc:"uplinkHardwareComponents,omitempty"` +} + +// The SoftLayer_Hardware_Component_Attribute data type contains general information relating to a single hardware setting or attribute for a component model. For Example: A RAID controller may be setup for many different RAID configurations. A RAID controller with a configuration of RAID-1 will have a single attribute for this RAID setting. +type Hardware_Component_Attribute struct { + Entity + + // A hardware component attribute's associated [[SoftLayer_Hardware_Component|Hardware Component]]. + HardwareComponent *Hardware_Component `json:"hardwareComponent,omitempty" xmlrpc:"hardwareComponent,omitempty"` + + // A hardware component attribute's associated [[SoftLayer_Hardware_Component_Attribute_Type|type]]. + HardwareComponentAttributeType *Hardware_Component_Attribute_Type `json:"hardwareComponentAttributeType,omitempty" xmlrpc:"hardwareComponentAttributeType,omitempty"` + + // A hardware component attribute's associated [[SoftLayer_Hardware_Component_Attribute_Type|type]] Id. + HardwareComponentAttributeTypeId *int `json:"hardwareComponentAttributeTypeId,omitempty" xmlrpc:"hardwareComponentAttributeTypeId,omitempty"` + + // A hardware component attribute's associated [[SoftLayer_Hardware_Component|hardware component]] Id. + HardwareComponentId *int `json:"hardwareComponentId,omitempty" xmlrpc:"hardwareComponentId,omitempty"` + + // A hardware component attribute's value. A value can have many different values depending on the attributes [[SoftLayer_Hardware_Component_Attribute_Type|type]]. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Hardware_Component_Attribute_Type data type contains general information for the type of an attribute for a hardware component. +type Hardware_Component_Attribute_Type struct { + Entity + + // The description for the date that a hardware component attribute type's [[SoftLayer_Hardware_Component_Attribute|Attribute]] contains. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A hardware component attribute type's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware component attribute type's unique name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A hardware component attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Hardware_Component_DriveController data type abstracts information related to a drive controller. +type Hardware_Component_DriveController struct { + Hardware_Component +} + +// no documentation yet +type Hardware_Component_Firmware struct { + Entity + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Hardware_Component_Firmware_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // no documentation yet + BuildDate *Time `json:"buildDate,omitempty" xmlrpc:"buildDate,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The Hardware Component Model this Firmware applies to. + HardwareComponentModel *Hardware_Component_Model `json:"hardwareComponentModel,omitempty" xmlrpc:"hardwareComponentModel,omitempty"` + + // no documentation yet + HardwareComponentModelId *int `json:"hardwareComponentModelId,omitempty" xmlrpc:"hardwareComponentModelId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IsQualified *int `json:"isQualified,omitempty" xmlrpc:"isQualified,omitempty"` + + // no documentation yet + ReleaseNotes *string `json:"releaseNotes,omitempty" xmlrpc:"releaseNotes,omitempty"` + + // no documentation yet + Version *string `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// The SoftLayer_Hardware_Component_Firmware_Attribute data type contains general information for a hardware model's firmware. +type Hardware_Component_Firmware_Attribute struct { + Entity + + // A hardware component firmware attribute's associated [[SoftLayer_Hardware_Component_Firmware|firmware]]. + Firmware *Hardware_Component_Firmware `json:"firmware,omitempty" xmlrpc:"firmware,omitempty"` + + // A hardware component firmware attribute's firmware Id. + FirmwareId *int `json:"firmwareId,omitempty" xmlrpc:"firmwareId,omitempty"` + + // A hardware component firmware attribute's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware component firmware attribute's associated [[SoftLayer_Hardware_Component_Firmware_Attribute_Type|type]]. + Type *Hardware_Component_Firmware_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A hardware component firmware attribute's type Id. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // A hardware component firmware attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Hardware_Component_Firmware_Attribute_Type data type defines attribute types for a hardware component model's firmware. +type Hardware_Component_Firmware_Attribute_Type struct { + Entity + + // The description for the date that a hardware component attribute type's [[SoftLayer_Hardware_Component_Attribute|Attribute]] contains. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A hardware component firmware attribute type's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware component firmware attribute type's unique name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A hardware component firmware attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Hardware_Component_HardDrive data type abstracts information related to a hard drive. +type Hardware_Component_HardDrive struct { + Hardware_Component + + // A count of the attached component partitions. + PartitionCount *uint `json:"partitionCount,omitempty" xmlrpc:"partitionCount,omitempty"` + + // The attached component partitions. + Partitions []Hardware_Component_Partition `json:"partitions,omitempty" xmlrpc:"partitions,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model data type contains general information relating to a single SoftLayer component model. A component model represents a vendor specific representation of a hardware component. Every piece of hardware on a server will have a specific hardware component model. +type Hardware_Component_Model struct { + Entity + + // no documentation yet + ArchitectureType *Hardware_Component_Model_Architecture_Type `json:"architectureType,omitempty" xmlrpc:"architectureType,omitempty"` + + // no documentation yet + ArchitectureTypeId *string `json:"architectureTypeId,omitempty" xmlrpc:"architectureTypeId,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Hardware_Component_Model_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A component model's capacity. The capacity of a component model depends on the model itself. For Example: Hard drives have a capacity that reflects the amount of data that hard drive can store. + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // A count of + CompatibleArrayTypeCount *uint `json:"compatibleArrayTypeCount,omitempty" xmlrpc:"compatibleArrayTypeCount,omitempty"` + + // no documentation yet + CompatibleArrayTypes []Configuration_Storage_Group_Array_Type `json:"compatibleArrayTypes,omitempty" xmlrpc:"compatibleArrayTypes,omitempty"` + + // A count of all the component models that are compatible with a hardware component model. + CompatibleChildComponentModelCount *uint `json:"compatibleChildComponentModelCount,omitempty" xmlrpc:"compatibleChildComponentModelCount,omitempty"` + + // All the component models that are compatible with a hardware component model. + CompatibleChildComponentModels []Hardware_Component_Model `json:"compatibleChildComponentModels,omitempty" xmlrpc:"compatibleChildComponentModels,omitempty"` + + // A count of all the component models that a hardware component model is compatible with. + CompatibleParentComponentModelCount *uint `json:"compatibleParentComponentModelCount,omitempty" xmlrpc:"compatibleParentComponentModelCount,omitempty"` + + // All the component models that a hardware component model is compatible with. + CompatibleParentComponentModels []Hardware_Component_Model `json:"compatibleParentComponentModels,omitempty" xmlrpc:"compatibleParentComponentModels,omitempty"` + + // A colon delimited list of hardware component model attributes. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of + FirmwareCount *uint `json:"firmwareCount,omitempty" xmlrpc:"firmwareCount,omitempty"` + + // no documentation yet + FirmwareQuantity *uint `json:"firmwareQuantity,omitempty" xmlrpc:"firmwareQuantity,omitempty"` + + // no documentation yet + Firmwares []Hardware_Component_Firmware `json:"firmwares,omitempty" xmlrpc:"firmwares,omitempty"` + + // A hardware component model's physical components in inventory. + HardwareComponents []Hardware_Component `json:"hardwareComponents,omitempty" xmlrpc:"hardwareComponents,omitempty"` + + // The non-vendor specific generic component model for a hardware component model. + HardwareGenericComponentModel *Hardware_Component_Model_Generic `json:"hardwareGenericComponentModel,omitempty" xmlrpc:"hardwareGenericComponentModel,omitempty"` + + // The internal identifier of the generic component model for a component model. + HardwareGenericComponentModelId *int `json:"hardwareGenericComponentModelId,omitempty" xmlrpc:"hardwareGenericComponentModelId,omitempty"` + + // A hardware component model's internal identifier number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + InfinibandCompatibleAttribute *Hardware_Component_Model_Attribute `json:"infinibandCompatibleAttribute,omitempty" xmlrpc:"infinibandCompatibleAttribute,omitempty"` + + // no documentation yet + IsFlexSkuCompatible *bool `json:"isFlexSkuCompatible,omitempty" xmlrpc:"isFlexSkuCompatible,omitempty"` + + // no documentation yet + IsInfinibandCompatible *bool `json:"isInfinibandCompatible,omitempty" xmlrpc:"isInfinibandCompatible,omitempty"` + + // no documentation yet + LongDescription *string `json:"longDescription,omitempty" xmlrpc:"longDescription,omitempty"` + + // A hardware component model's manufacturer. + Manufacturer *string `json:"manufacturer,omitempty" xmlrpc:"manufacturer,omitempty"` + + // The model name of a hardware component model. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A motherboard's average reboot time. + RebootTime *Hardware_Component_Motherboard_Reboot_Time `json:"rebootTime,omitempty" xmlrpc:"rebootTime,omitempty"` + + // A hardware component model's type. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A count of the types of attributes that are allowed for a given hardware component model. + ValidAttributeTypeCount *uint `json:"validAttributeTypeCount,omitempty" xmlrpc:"validAttributeTypeCount,omitempty"` + + // The types of attributes that are allowed for a given hardware component model. + ValidAttributeTypes []Hardware_Component_Model_Attribute_Type `json:"validAttributeTypes,omitempty" xmlrpc:"validAttributeTypes,omitempty"` + + // The model number or model description of a hardware component model. + Version *string `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// no documentation yet +type Hardware_Component_Model_Architecture_Type struct { + Entity + + // no documentation yet + Children []Hardware_Component_Model_Architecture_Type `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Parent *Hardware_Component_Model_Architecture_Type `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // no documentation yet + ParentId *string `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` +} + +// The SoftLayer_Hardware_Component__Model_Attribute data type contains general information relating to a single hardware setting or attribute for a component model. +type Hardware_Component_Model_Attribute struct { + Entity + + // A hardware component model attribute's associated [[SoftLayer_Hardware_Component_Model_Attribute_Type|type]] Id. + AttributeTypeId *int `json:"attributeTypeId,omitempty" xmlrpc:"attributeTypeId,omitempty"` + + // no documentation yet + HardwareComponent *Hardware_Component_Model `json:"hardwareComponent,omitempty" xmlrpc:"hardwareComponent,omitempty"` + + // no documentation yet + HardwareComponentAttributeType *Hardware_Component_Model_Attribute_Type `json:"hardwareComponentAttributeType,omitempty" xmlrpc:"hardwareComponentAttributeType,omitempty"` + + // A hardware component model attribute's associated [[SoftLayer_Hardware_Component_Model|hardware component model]] Id. + HardwareComponentModelId *int `json:"hardwareComponentModelId,omitempty" xmlrpc:"hardwareComponentModelId,omitempty"` + + // A hardware component model attribute's value. A value can have many different values depending on the attributes [[SoftLayer_Hardware_Component_Model_Attribute_Type|type]]. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model_Attribute_Type data type contains general information for the type of an attribute for a hardware component model. +type Hardware_Component_Model_Attribute_Type struct { + Entity + + // The description for the data that a hardware component model type's [[SoftLayer_Hardware_Component_Model_Attribute|Attribute]] contains. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A hardware component model attribute type's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware component model attribute type's unique name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A hardware component model attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + ValidComponentTypeCount *uint `json:"validComponentTypeCount,omitempty" xmlrpc:"validComponentTypeCount,omitempty"` + + // no documentation yet + ValidComponentTypes []Hardware_Component_Type `json:"validComponentTypes,omitempty" xmlrpc:"validComponentTypes,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model_Generic data type contains general information relating to a single SoftLayer generic component model. A generic component model represents a non-vendor specific representation of a hardware component. Frequently SoftLayer utilizes components from various vendors in the servers they provision. For Example: Several different vendors produce 6GB DDR2 memory. The generic component model for the 6GB stick of RAM encompasses every instance of this component regardless of make and model. +type Hardware_Component_Model_Generic struct { + Entity + + // A generic component model's capacity. The capacity of a generic component model depends on the model itself. For Example: Hard drives have a capacity that reflects the amount of data that hard drive can store. + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // A brief description for a generic component model that typically defines it's function. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of a generic component model's hardware component model. + HardwareComponentModelCount *uint `json:"hardwareComponentModelCount,omitempty" xmlrpc:"hardwareComponentModelCount,omitempty"` + + // A generic component model's hardware component model. + HardwareComponentModels []Hardware_Component_Model `json:"hardwareComponentModels,omitempty" xmlrpc:"hardwareComponentModels,omitempty"` + + // A generic component model's type. + HardwareComponentType *Hardware_Component_Type `json:"hardwareComponentType,omitempty" xmlrpc:"hardwareComponentType,omitempty"` + + // The internal identifier of the component type for a generic component model. + HardwareComponentTypeId *int `json:"hardwareComponentTypeId,omitempty" xmlrpc:"hardwareComponentTypeId,omitempty"` + + // A generic component model's internal identification number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A list of features that a generic component model can provide. + MarketingFeatures *Hardware_Component_Model_Generic_MarketingFeature `json:"marketingFeatures,omitempty" xmlrpc:"marketingFeatures,omitempty"` + + // The unit of measurement for the capacity of a generic component model. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` + + // A generic component model's upgrade priority. The upgrade priority indicates the order a generic component model should be considered over other generic component models. A higher number indicates that a generic component model receives a higher upgrade preference in comparison to a generic component model with a lower priority number. + UpgradePriority *int `json:"upgradePriority,omitempty" xmlrpc:"upgradePriority,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model_Generic_Attribute data type contains information relating to a single SoftLayer generic component model. Generic component model attributes can hold any information to describe functionality of the model. For Example: The number of cores that a processor has. +type Hardware_Component_Model_Generic_Attribute struct { + Entity + + // An attributes generic component model. + HardwareGenericComponentModel *Hardware_Component_Model_Generic `json:"hardwareGenericComponentModel,omitempty" xmlrpc:"hardwareGenericComponentModel,omitempty"` + + // A generic component model attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model_Generic_MarketingFeature data type contains general information relating to all the advertising features of a single SoftLayer hardware generic component model. +type Hardware_Component_Model_Generic_MarketingFeature struct { + Entity + + // An html formatted list of all features. + Features *string `json:"features,omitempty" xmlrpc:"features,omitempty"` + + // The generic component model for a list of advertising or marketing features + HardwareGenericComponentModel *Hardware_Component_Model_Generic `json:"hardwareGenericComponentModel,omitempty" xmlrpc:"hardwareGenericComponentModel,omitempty"` + + // A hardware component's upgrade price. + Price *string `json:"price,omitempty" xmlrpc:"price,omitempty"` +} + +// The SoftLayer_Hardware_Component_DriveController data type abstracts information related to a motherboard. +type Hardware_Component_Motherboard struct { + Hardware_Component +} + +// The SoftLayer_Hardware_Component_Motherboard_Reboot_Time contains the average reboot times for motherboards. There are two types of average times. One is for motherboards without raid, and the other is for motherboards with raid. These times are based on averages and have been gathered through numerous test cases. +type Hardware_Component_Motherboard_Reboot_Time struct { + Entity + + // Motherboard's specifications (manufacturer, version, etc....) + HardwareComponentModel *Hardware_Component_Model `json:"hardwareComponentModel,omitempty" xmlrpc:"hardwareComponentModel,omitempty"` + + // Average reboot time in seconds for the motherboard when raid is installed. + WithRaid *int `json:"withRaid,omitempty" xmlrpc:"withRaid,omitempty"` + + // Average reboot time in seconds for the motherboard when NO raid is installed. + WithoutRaid *int `json:"withoutRaid,omitempty" xmlrpc:"withoutRaid,omitempty"` +} + +// The SoftLayer_Hardware_Component_NetworkCard data type abstracts information related to a network card. +type Hardware_Component_NetworkCard struct { + Hardware_Component +} + +// The SoftLayer_Hardware_Component_Partition data type contains general information relating to a single hard drive partition. +type Hardware_Component_Partition struct { + Entity + + // A hardware component partition's order in the [[SoftLayer_Hardware_Hardware|hardware]]. + DiskNumber *int `json:"diskNumber,omitempty" xmlrpc:"diskNumber,omitempty"` + + // A flag indicating if a partition is the grow partition. The grow partition will grow to fill all remaining space on a disk. There can only be one. + Grow *int `json:"grow,omitempty" xmlrpc:"grow,omitempty"` + + // A hardware component partitions's associated [[SoftLayer_Hardware_Component|Hardware Component]]. Likely to be a [[SoftLayer_Hardware_Component_HardDrive|Hard Drive]] + HardwareComponent *Hardware_Component `json:"hardwareComponent,omitempty" xmlrpc:"hardwareComponent,omitempty"` + + // A hardware component partition's associated [[SoftLayer_Hardware_Component|hardware component]] Id. + HardwareComponentId *int `json:"hardwareComponentId,omitempty" xmlrpc:"hardwareComponentId,omitempty"` + + // A hardware component partition's minimum size(GB). + MinimumSize *Float64 `json:"minimumSize,omitempty" xmlrpc:"minimumSize,omitempty"` + + // A hardware component partition's name. On a server with windows this may be 'C' and on Linux this may be '/var' + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Hardware_Component_Partition_OperatingSystem data type contains general information relating to a single SoftLayer Operating System Partition Template. +type Hardware_Component_Partition_OperatingSystem struct { + Entity + + // A partition template operating system's description. Typically the title of the Operating System. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A partition template operating system's id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Information about the kinds of partition templates assigned to this operating system. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of information regarding an operating system's [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]]. + PartitionTemplateCount *uint `json:"partitionTemplateCount,omitempty" xmlrpc:"partitionTemplateCount,omitempty"` + + // Information regarding an operating system's [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]]. + PartitionTemplates []Hardware_Component_Partition_Template `json:"partitionTemplates,omitempty" xmlrpc:"partitionTemplates,omitempty"` +} + +// The SoftLayer_Hardware_Component_Partition_Template data type contains general information relating to a single SoftLayer partition template. Partition templates group 1 or more partition configurations that can be used to predefine how a hard drive's partitions will be configured. +type Hardware_Component_Partition_Template struct { + Entity + + // A partition template's associated [[SoftLayer_Account|Account]]. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A partition template's owner. The [[SoftLayer_Account|Account]] that a template was created by. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // An individual partition for a partition template. This is identical to 'partitionTemplatePartition' except this will sort unix partitions. + Data []Hardware_Component_Partition_Template_Partition `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // A count of an individual partition for a partition template. This is identical to 'partitionTemplatePartition' except this will sort unix partitions. + DataCount *uint `json:"dataCount,omitempty" xmlrpc:"dataCount,omitempty"` + + // A partition template's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + ExpireDate *string `json:"expireDate,omitempty" xmlrpc:"expireDate,omitempty"` + + // A partition template's id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A partition template's associated [[SoftLayer_Hardware_Component_Partition_OperatingSystem|Operating System]]. + PartitionOperatingSystem *Hardware_Component_Partition_OperatingSystem `json:"partitionOperatingSystem,omitempty" xmlrpc:"partitionOperatingSystem,omitempty"` + + // A partition template's associated [[SoftLayer_Hardware_Component_Partition_OperatingSystem|Operating System]] Id. + PartitionOperatingSystemId *int `json:"partitionOperatingSystemId,omitempty" xmlrpc:"partitionOperatingSystemId,omitempty"` + + // An individual partition for a partition template. + PartitionTemplatePartition []Hardware_Component_Partition_Template_Partition `json:"partitionTemplatePartition,omitempty" xmlrpc:"partitionTemplatePartition,omitempty"` + + // A count of an individual partition for a partition template. + PartitionTemplatePartitionCount *uint `json:"partitionTemplatePartitionCount,omitempty" xmlrpc:"partitionTemplatePartitionCount,omitempty"` + + // A partition template's status code. ACTIVE ,INACTIVE. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // A partition template's Type. SYSTEM - template generated by softlayer. CUSTOM - templates generated by SoftLayer customers. + TemplateType *string `json:"templateType,omitempty" xmlrpc:"templateType,omitempty"` +} + +// The SoftLayer_Hardware_Component_Partition_Template_Partition data type contains general information relating to a single SoftLayer Template Partition. +type Hardware_Component_Partition_Template_Partition struct { + Entity + + // The filesystem type of a partition + FilesystemType *Configuration_Storage_Filesystem_Type `json:"filesystemType,omitempty" xmlrpc:"filesystemType,omitempty"` + + // A partition's id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A flag indication if a partition will be the grow partition. The grow partition will have its size adjusted to fill all available space on a hard drive. + IsGrow *bool `json:"isGrow,omitempty" xmlrpc:"isGrow,omitempty"` + + // A partition's default name. + PartitionName *string `json:"partitionName,omitempty" xmlrpc:"partitionName,omitempty"` + + // A partition's default size. + PartitionSize *Float64 `json:"partitionSize,omitempty" xmlrpc:"partitionSize,omitempty"` + + // A partition's [[SoftLayer_Hardware_Component_Partition_Template|Partition Template]]. + PartitionTemplate *Hardware_Component_Partition_Template `json:"partitionTemplate,omitempty" xmlrpc:"partitionTemplate,omitempty"` + + // A partition's associated [[SoftLayer_Hardware_Component_Partition_Template|Partition Template]] Id. + PartitionTemplateId *int `json:"partitionTemplateId,omitempty" xmlrpc:"partitionTemplateId,omitempty"` + + // The volume the partition will be put on + VolumeNumber *int `json:"volumeNumber,omitempty" xmlrpc:"volumeNumber,omitempty"` +} + +// The SoftLayer_Hardware_Component_Processor data type abstracts information related to a processor. +type Hardware_Component_Processor struct { + Hardware_Component +} + +// The SoftLayer_Hardware_Component_Ram data type abstracts information related to RAM. +type Hardware_Component_Ram struct { + Hardware_Component +} + +// This class adds functionality to the base SoftLayer_Hardware class for web servers (all server hardware) +type Hardware_Component_RemoteManagement struct { + Hardware_Component + + // A network component data type. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` +} + +// The SoftLayer_Network_Storage_Evault_Version6 contains the names of the remote management commands. Currently, only the reboot and power commands for the remote management card exist. +type Hardware_Component_RemoteManagement_Command struct { + Entity + + // The name of the remote management command. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of all requests issued for the remote management command. + RequestCount *uint `json:"requestCount,omitempty" xmlrpc:"requestCount,omitempty"` + + // All requests issued for the remote management command. + Requests []Hardware_Component_RemoteManagement_Command_Request `json:"requests,omitempty" xmlrpc:"requests,omitempty"` +} + +// The SoftLayer_Hardware_Component_RemoteManagement_Command_Request contains details for remote management commands issued to a server's remote management card. Details for remote management commands such as powerOn, powerOff, powerCycle, rebootDefault, rebootSoft, rebootHard can be retrieved. Details such as the user who issued the command, the id of the remote management card the command was issued, when the command was issued may be retrieved. +type Hardware_Component_RemoteManagement_Command_Request struct { + Entity + + // The timestamp the remote management command was issued. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The id of the hardware to perform the remote management or powerstrip command on. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The hardware id the command was issued for. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The timestamp recorded when the remote management command returned a status of the command issued. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A hardware's network components. Network components are hardware components such as IPMI cards or Ethernet cards. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // Execution status of the remote management command. True is successful. False is failure. + Processed *bool `json:"processed,omitempty" xmlrpc:"processed,omitempty"` + + // The remote management command issued. + RemoteManagementCommand *Hardware_Component_RemoteManagement_Command `json:"remoteManagementCommand,omitempty" xmlrpc:"remoteManagementCommand,omitempty"` + + // Information regarding the user who issued the remote management command. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` +} + +// The credentials used for remote management such as username, password, etc... +type Hardware_Component_RemoteManagement_User struct { + Entity + + // no documentation yet + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The password used for this remote management command. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The username used for this remote management command. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// no documentation yet +type Hardware_Component_Revision struct { + Entity + + // The firmware build date + BiosDate *Time `json:"biosDate,omitempty" xmlrpc:"biosDate,omitempty"` + + // The Firmware installed on this record's Hardware Component. + Firmware *Hardware_Component_Firmware `json:"firmware,omitempty" xmlrpc:"firmware,omitempty"` + + // no documentation yet + FirmwareVersionId *int `json:"firmwareVersionId,omitempty" xmlrpc:"firmwareVersionId,omitempty"` + + // The Hardware Component this revision record applies to. + HardwareComponent *Hardware_Component `json:"hardwareComponent,omitempty" xmlrpc:"hardwareComponent,omitempty"` + + // no documentation yet + HardwareComponentId *int `json:"hardwareComponentId,omitempty" xmlrpc:"hardwareComponentId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The firmware revision + Revision *string `json:"revision,omitempty" xmlrpc:"revision,omitempty"` +} + +// The SoftLayer_Hardware_Component_SecurityDevice is used to determine the security devices attached to the hardware component. +type Hardware_Component_SecurityDevice struct { + Hardware_Component +} + +// The SoftLayer_Hardware_Component_SecurityDevice_Infineon is used to determine the Infineon security device attached to the hardware component. +type Hardware_Component_SecurityDevice_Infineon struct { + Hardware_Component_SecurityDevice +} + +// The SoftLayer_Hardware_Component_Type data type provides details on the type of component requested +type Hardware_Component_Type struct { + Entity + + // A count of the generic component model description for this component type object. + HardwareGenericComponentModelCount *uint `json:"hardwareGenericComponentModelCount,omitempty" xmlrpc:"hardwareGenericComponentModelCount,omitempty"` + + // The generic component model description for this component type object. + HardwareGenericComponentModels []Hardware_Component_Model_Generic `json:"hardwareGenericComponentModels,omitempty" xmlrpc:"hardwareGenericComponentModels,omitempty"` + + // The ID associated with this component type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The hardware component type key name or code. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The type associated with this component type. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The parent generic component model object for this generic component model object. + TypeParent *Hardware_Component_Type `json:"typeParent,omitempty" xmlrpc:"typeParent,omitempty"` + + // The parent id associated with this component type. + TypeParentId *int `json:"typeParentId,omitempty" xmlrpc:"typeParentId,omitempty"` +} + +// The SoftLayer_Hardware_Firewall data type contains general information relating to a single SoftLayer firewall. +type Hardware_Firewall struct { + Hardware_Switch + + // A count of a list of users that have access to this hardware firewall. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // A list of users that have access to this hardware firewall. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` +} + +// The SoftLayer_Hardware_Function data type contains a generic object type for a piece of hardware, like switch, firewall, server, etc.. +type Hardware_Function struct { + Entity + + // The code associated with this hardware function. + Code *string `json:"code,omitempty" xmlrpc:"code,omitempty"` + + // The description for a hardware function. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The id associated with a hardware function. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Hardware_Group struct { + Entity + + // no documentation yet + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // A count of all servers attached to a network hardware. + DownlinkServerCount *uint `json:"downlinkServerCount,omitempty" xmlrpc:"downlinkServerCount,omitempty"` + + // All servers attached to a network hardware. + DownlinkServers []Hardware `json:"downlinkServers,omitempty" xmlrpc:"downlinkServers,omitempty"` + + // A count of all virtual guests attached to a network hardware. + DownlinkVirtualGuestCount *uint `json:"downlinkVirtualGuestCount,omitempty" xmlrpc:"downlinkVirtualGuestCount,omitempty"` + + // All virtual guests attached to a network hardware. + DownlinkVirtualGuests []Virtual_Guest `json:"downlinkVirtualGuests,omitempty" xmlrpc:"downlinkVirtualGuests,omitempty"` + + // All network hardware downstream from this hardware. + DownstreamNetworkHardware []Hardware `json:"downstreamNetworkHardware,omitempty" xmlrpc:"downstreamNetworkHardware,omitempty"` + + // A count of all network hardware downstream from this hardware. + DownstreamNetworkHardwareCount *uint `json:"downstreamNetworkHardwareCount,omitempty" xmlrpc:"downstreamNetworkHardwareCount,omitempty"` + + // A count of all network hardware with monitoring warnings or errors downstream from this hardware. + DownstreamNetworkHardwareWithIncidentCount *uint `json:"downstreamNetworkHardwareWithIncidentCount,omitempty" xmlrpc:"downstreamNetworkHardwareWithIncidentCount,omitempty"` + + // All network hardware with monitoring warnings or errors downstream from this hardware. + DownstreamNetworkHardwareWithIncidents []Hardware `json:"downstreamNetworkHardwareWithIncidents,omitempty" xmlrpc:"downstreamNetworkHardwareWithIncidents,omitempty"` + + // The chassis that a piece of hardware is housed in. + HardwareChassis *Hardware_Chassis `json:"hardwareChassis,omitempty" xmlrpc:"hardwareChassis,omitempty"` + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // All servers attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownHardware []Hardware `json:"networkMonitorAttachedDownHardware,omitempty" xmlrpc:"networkMonitorAttachedDownHardware,omitempty"` + + // A count of all servers attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownHardwareCount *uint `json:"networkMonitorAttachedDownHardwareCount,omitempty" xmlrpc:"networkMonitorAttachedDownHardwareCount,omitempty"` + + // A count of virtual guests that are attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownVirtualGuestCount *uint `json:"networkMonitorAttachedDownVirtualGuestCount,omitempty" xmlrpc:"networkMonitorAttachedDownVirtualGuestCount,omitempty"` + + // Virtual guests that are attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownVirtualGuests []Virtual_Guest `json:"networkMonitorAttachedDownVirtualGuests,omitempty" xmlrpc:"networkMonitorAttachedDownVirtualGuests,omitempty"` + + // The value of a hardware's network status attribute. + NetworkStatus *string `json:"networkStatus,omitempty" xmlrpc:"networkStatus,omitempty"` +} + +// no documentation yet +type Hardware_LoadBalancer struct { + Hardware + + // no documentation yet + ModelFamily *string `json:"modelFamily,omitempty" xmlrpc:"modelFamily,omitempty"` + + // A count of a list of users that have access to this hardware load balancer. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // A list of users that have access to this hardware load balancer. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` +} + +// no documentation yet +type Hardware_Note struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // no documentation yet + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // no documentation yet + Type *Hardware_Note_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// no documentation yet +type Hardware_Note_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// no documentation yet +type Hardware_Power_Component struct { + Entity + + // no documentation yet + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Hardware_Resource_Configuration struct { + Entity + + // no documentation yet + ConfigurationTypeId *int `json:"configurationTypeId,omitempty" xmlrpc:"configurationTypeId,omitempty"` + + // no documentation yet + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Properties []Hardware_Resource_Configuration_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // no documentation yet + Type *Hardware_Resource_Configuration_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Hardware_Resource_Configuration_Property struct { + Entity + + // no documentation yet + Configuration *Hardware_Resource_Configuration `json:"configuration,omitempty" xmlrpc:"configuration,omitempty"` + + // no documentation yet + ConfigurationId *int `json:"configurationId,omitempty" xmlrpc:"configurationId,omitempty"` + + // no documentation yet + ConfigurationPropertyTypeId *int `json:"configurationPropertyTypeId,omitempty" xmlrpc:"configurationPropertyTypeId,omitempty"` + + // no documentation yet + Type *Hardware_Resource_Configuration_Property_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Hardware_Resource_Configuration_Property_Type struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Properties []Hardware_Resource_Configuration_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // no documentation yet + Unit *string `json:"unit,omitempty" xmlrpc:"unit,omitempty"` +} + +// no documentation yet +type Hardware_Resource_Configuration_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Hardware_Router data type contains general information relating to a single SoftLayer router. +type Hardware_Router struct { + Hardware_Switch + + // A count of associated subnets for a router object. + BoundSubnetCount *uint `json:"boundSubnetCount,omitempty" xmlrpc:"boundSubnetCount,omitempty"` + + // Associated subnets for a router object. + BoundSubnets []Network_Subnet `json:"boundSubnets,omitempty" xmlrpc:"boundSubnets,omitempty"` + + // A flag indicating that a VLAN on the router can be assigned to a host that has local disk functionality. + LocalDiskStorageCapabilityFlag *bool `json:"localDiskStorageCapabilityFlag,omitempty" xmlrpc:"localDiskStorageCapabilityFlag,omitempty"` + + // A flag indicating that a VLAN on the router can be assigned to a host that has SAN disk functionality. + SanStorageCapabilityFlag *bool `json:"sanStorageCapabilityFlag,omitempty" xmlrpc:"sanStorageCapabilityFlag,omitempty"` +} + +// The SoftLayer_Hardware_Router_Backend data type contains general information relating to a single SoftLayer router item for hardware. +type Hardware_Router_Backend struct { + Hardware_Router +} + +// The SoftLayer_Hardware_Router_Frontend data type contains general information relating to a single SoftLayer router item for hardware. +type Hardware_Router_Frontend struct { + Hardware_Router +} + +// no documentation yet +type Hardware_SecurityModule struct { + Hardware_Server +} + +// no documentation yet +type Hardware_SecurityModule750 struct { + Hardware_SecurityModule +} + +// The SoftLayer_Hardware_Server data type contains general information relating to a single SoftLayer server. +type Hardware_Server struct { + Hardware + + // The billing item for a server's attached network firewall. + ActiveNetworkFirewallBillingItem *Billing_Item `json:"activeNetworkFirewallBillingItem,omitempty" xmlrpc:"activeNetworkFirewallBillingItem,omitempty"` + + // A count of + ActiveTicketCount *uint `json:"activeTicketCount,omitempty" xmlrpc:"activeTicketCount,omitempty"` + + // no documentation yet + ActiveTickets []Ticket `json:"activeTickets,omitempty" xmlrpc:"activeTickets,omitempty"` + + // Transaction currently running for server. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // A count of any active transaction(s) that are currently running for the server (example: os reload). + ActiveTransactionCount *uint `json:"activeTransactionCount,omitempty" xmlrpc:"activeTransactionCount,omitempty"` + + // Any active transaction(s) that are currently running for the server (example: os reload). + ActiveTransactions []Provisioning_Version1_Transaction `json:"activeTransactions,omitempty" xmlrpc:"activeTransactions,omitempty"` + + // An object that stores the maximum level for the monitoring query types and response types. + AvailableMonitoring []Network_Monitor_Version1_Query_Host_Stratum `json:"availableMonitoring,omitempty" xmlrpc:"availableMonitoring,omitempty"` + + // A count of an object that stores the maximum level for the monitoring query types and response types. + AvailableMonitoringCount *uint `json:"availableMonitoringCount,omitempty" xmlrpc:"availableMonitoringCount,omitempty"` + + // The average daily total bandwidth usage for the current billing cycle. + AverageDailyBandwidthUsage *Float64 `json:"averageDailyBandwidthUsage,omitempty" xmlrpc:"averageDailyBandwidthUsage,omitempty"` + + // The average daily private bandwidth usage for the current billing cycle. + AverageDailyPrivateBandwidthUsage *Float64 `json:"averageDailyPrivateBandwidthUsage,omitempty" xmlrpc:"averageDailyPrivateBandwidthUsage,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // Determine if BIOS password should be left as null. + BiosPasswordNullFlag *bool `json:"biosPasswordNullFlag,omitempty" xmlrpc:"biosPasswordNullFlag,omitempty"` + + // no documentation yet + ContainsSolidStateDrivesFlag *bool `json:"containsSolidStateDrivesFlag,omitempty" xmlrpc:"containsSolidStateDrivesFlag,omitempty"` + + // A server's control panel. + ControlPanel *Software_Component_ControlPanel `json:"controlPanel,omitempty" xmlrpc:"controlPanel,omitempty"` + + // The total cost of a server, measured in US Dollars ($USD). + Cost *Float64 `json:"cost,omitempty" xmlrpc:"cost,omitempty"` + + // An object that provides commonly used bandwidth summary components for the current billing cycle. + CurrentBandwidthSummary *Metric_Tracking_Object_Bandwidth_Summary `json:"currentBandwidthSummary,omitempty" xmlrpc:"currentBandwidthSummary,omitempty"` + + // Indicates if a server has a Customer Installed OS + CustomerInstalledOperatingSystemFlag *bool `json:"customerInstalledOperatingSystemFlag,omitempty" xmlrpc:"customerInstalledOperatingSystemFlag,omitempty"` + + // Indicates if a server is a customer owned device. + CustomerOwnedFlag *bool `json:"customerOwnedFlag,omitempty" xmlrpc:"customerOwnedFlag,omitempty"` + + // Determine if hardware has Single Root IO VIrtualization (SR-IOV) billing item. + HasSingleRootVirtualizationBillingItemFlag *bool `json:"hasSingleRootVirtualizationBillingItemFlag,omitempty" xmlrpc:"hasSingleRootVirtualizationBillingItemFlag,omitempty"` + + // The total private inbound bandwidth for this hardware for the current billing cycle. + InboundPrivateBandwidthUsage *Float64 `json:"inboundPrivateBandwidthUsage,omitempty" xmlrpc:"inboundPrivateBandwidthUsage,omitempty"` + + // Determine if hardware object has the IBM_CLOUD_READY_NODE_CERTIFIED attribute. + IsCloudReadyNodeCertified *bool `json:"isCloudReadyNodeCertified,omitempty" xmlrpc:"isCloudReadyNodeCertified,omitempty"` + + // The last transaction that a server's operating system was loaded. + LastOperatingSystemReload *Provisioning_Version1_Transaction `json:"lastOperatingSystemReload,omitempty" xmlrpc:"lastOperatingSystemReload,omitempty"` + + // The metric tracking object id for this server. + MetricTrackingObjectId *int `json:"metricTrackingObjectId,omitempty" xmlrpc:"metricTrackingObjectId,omitempty"` + + // The monitoring notification objects for this hardware. Each object links this hardware instance to a user account that will be notified if monitoring on this hardware object fails + MonitoringUserNotification []User_Customer_Notification_Hardware `json:"monitoringUserNotification,omitempty" xmlrpc:"monitoringUserNotification,omitempty"` + + // A count of the monitoring notification objects for this hardware. Each object links this hardware instance to a user account that will be notified if monitoring on this hardware object fails + MonitoringUserNotificationCount *uint `json:"monitoringUserNotificationCount,omitempty" xmlrpc:"monitoringUserNotificationCount,omitempty"` + + // An open ticket requesting cancellation of this server, if one exists. + OpenCancellationTicket *Ticket `json:"openCancellationTicket,omitempty" xmlrpc:"openCancellationTicket,omitempty"` + + // The total private outbound bandwidth for this hardware for the current billing cycle. + OutboundPrivateBandwidthUsage *Float64 `json:"outboundPrivateBandwidthUsage,omitempty" xmlrpc:"outboundPrivateBandwidthUsage,omitempty"` + + // Whether the bandwidth usage for this hardware for the current billing cycle exceeds the allocation. + OverBandwidthAllocationFlag *int `json:"overBandwidthAllocationFlag,omitempty" xmlrpc:"overBandwidthAllocationFlag,omitempty"` + + // A server's primary private IP address. + PrivateIpAddress *string `json:"privateIpAddress,omitempty" xmlrpc:"privateIpAddress,omitempty"` + + // Whether the bandwidth usage for this hardware for the current billing cycle is projected to exceed the allocation. + ProjectedOverBandwidthAllocationFlag *int `json:"projectedOverBandwidthAllocationFlag,omitempty" xmlrpc:"projectedOverBandwidthAllocationFlag,omitempty"` + + // The projected public outbound bandwidth for this hardware for the current billing cycle. + ProjectedPublicBandwidthUsage *Float64 `json:"projectedPublicBandwidthUsage,omitempty" xmlrpc:"projectedPublicBandwidthUsage,omitempty"` + + // Determine if hardware object is vSan Ready Node. + ReadyNodeFlag *bool `json:"readyNodeFlag,omitempty" xmlrpc:"readyNodeFlag,omitempty"` + + // A count of the last five commands issued to the server's remote management card. + RecentRemoteManagementCommandCount *uint `json:"recentRemoteManagementCommandCount,omitempty" xmlrpc:"recentRemoteManagementCommandCount,omitempty"` + + // The last five commands issued to the server's remote management card. + RecentRemoteManagementCommands []Hardware_Component_RemoteManagement_Command_Request `json:"recentRemoteManagementCommands,omitempty" xmlrpc:"recentRemoteManagementCommands,omitempty"` + + // no documentation yet + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // A server's remote management card. + RemoteManagement *Hardware_Component_RemoteManagement `json:"remoteManagement,omitempty" xmlrpc:"remoteManagement,omitempty"` + + // A count of user(s) who have access to issue commands and/or interact with the server's remote management card. + RemoteManagementUserCount *uint `json:"remoteManagementUserCount,omitempty" xmlrpc:"remoteManagementUserCount,omitempty"` + + // User(s) who have access to issue commands and/or interact with the server's remote management card. + RemoteManagementUsers []Hardware_Component_RemoteManagement_User `json:"remoteManagementUsers,omitempty" xmlrpc:"remoteManagementUsers,omitempty"` + + // Determine if hardware object has Software Guard Extension (SGX) enabled. + SoftwareGuardExtensionEnabled *bool `json:"softwareGuardExtensionEnabled,omitempty" xmlrpc:"softwareGuardExtensionEnabled,omitempty"` + + // A server's remote management card used for statistics. + StatisticsRemoteManagement *Hardware_Component_RemoteManagement `json:"statisticsRemoteManagement,omitempty" xmlrpc:"statisticsRemoteManagement,omitempty"` + + // A count of a list of users that have access to this computing instance. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // A list of users that have access to this computing instance. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` + + // A count of a hardware server's virtual servers. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // A hardware server's virtual servers. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// SoftLayer_Hardware_Status models the inventory state of any piece of hardware in SoftLayer's inventory. Most of these statuses are used by SoftLayer while a server is not provisioned or undergoing provisioning. SoftLayer uses the following status codes: +// +// +// *'''ACTIVE''': This server is active and in use. +// *'''DEPLOY''': Used during server provisioning. +// *'''DEPLOY2''': Used during server provisioning. +// *'''MACWAIT''': Used during server provisioning. +// *'''RECLAIM''': This server has been reclaimed by SoftLayer and is awaiting de-provisioning. +// +// +// Servers in production and in use should stay in the ACTIVE state. If a server's status ever reads anything else then please contact SoftLayer support. +type Hardware_Status struct { + Entity + + // A hardware status' internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware's status code. See the SoftLayer_Hardware_Status Overview for ''status''' possible values. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Hardware_Switch object extends the base functionality of the SoftLayer_Hardware service. +type Hardware_Switch struct { + Hardware +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/layout.go b/vendor/github.com/softlayer/softlayer-go/datatypes/layout.go new file mode 100644 index 000000000000..880585bfc53c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/layout.go @@ -0,0 +1,245 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Layout_Container contains definitions for default page layouts +type Layout_Container struct { + Entity + + // The internal identifier of a layout container + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the layout container, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The type of the layout container object + LayoutContainerType *Layout_Container_Type `json:"layoutContainerType,omitempty" xmlrpc:"layoutContainerType,omitempty"` + + // The internal identifier of the related [[SoftLayer_Layout_Container_Type]] + LayoutContainerTypeId *int `json:"layoutContainerTypeId,omitempty" xmlrpc:"layoutContainerTypeId,omitempty"` + + // A count of the layout items assigned to this layout container + LayoutItemCount *uint `json:"layoutItemCount,omitempty" xmlrpc:"layoutItemCount,omitempty"` + + // The layout items assigned to this layout container + LayoutItems []Layout_Item `json:"layoutItems,omitempty" xmlrpc:"layoutItems,omitempty"` + + // The friendly name of the layout container + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Layout_Container_Type contains definitions for container types +type Layout_Container_Type struct { + Entity + + // The internal identifier of the container type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the container type, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The friendly name of the container type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Layout_Item contains definitions for default layout items +type Layout_Item struct { + Entity + + // The internal identifier of a layout item + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the layout item, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A count of the layout preferences assigned to this layout item + LayoutItemPreferenceCount *uint `json:"layoutItemPreferenceCount,omitempty" xmlrpc:"layoutItemPreferenceCount,omitempty"` + + // The layout preferences assigned to this layout item + LayoutItemPreferences []Layout_Preference `json:"layoutItemPreferences,omitempty" xmlrpc:"layoutItemPreferences,omitempty"` + + // The type of the layout item object + LayoutItemType *Layout_Item_Type `json:"layoutItemType,omitempty" xmlrpc:"layoutItemType,omitempty"` + + // The internal identifier of the related [[SoftLayer_Layout_Item_Type]] + LayoutItemTypeId *int `json:"layoutItemTypeId,omitempty" xmlrpc:"layoutItemTypeId,omitempty"` + + // The friendly name of the layout item + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Layout_Item_Type contains definitions for item types +type Layout_Item_Type struct { + Entity + + // The internal identifier of the item type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the item type, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The friendly name of the item type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Layout_Preference contains definitions for default layout item preferences +type Layout_Preference struct { + Entity + + // The internal identifier of a layout preference + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The type of the preference object + LayoutPreferenceType *Layout_Preference_Type `json:"layoutPreferenceType,omitempty" xmlrpc:"layoutPreferenceType,omitempty"` + + // The internal identifier of the related [[SoftLayer_Layout_Preference_Type]] + LayoutPreferenceTypeId *int `json:"layoutPreferenceTypeId,omitempty" xmlrpc:"layoutPreferenceTypeId,omitempty"` + + // The default value of the preference + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Layout_Preference_Type contains definitions for preference types +type Layout_Preference_Type struct { + Entity + + // The internal identifier of the item type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the item type, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The friendly name of the item type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A regular expression used to validate the related [[SoftLayer_Layout_Preference]] + ValueExpression *string `json:"valueExpression,omitempty" xmlrpc:"valueExpression,omitempty"` +} + +// The SoftLayer_Layout_Profile contains the definition of the layout profile +type Layout_Profile struct { + Entity + + // Active status of the layout profile + ActiveFlag *int `json:"activeFlag,omitempty" xmlrpc:"activeFlag,omitempty"` + + // Timestamp of when the layout profile was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The internal identifier of a layout profile + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + LayoutContainerCount *uint `json:"layoutContainerCount,omitempty" xmlrpc:"layoutContainerCount,omitempty"` + + // no documentation yet + LayoutContainers []Layout_Container `json:"layoutContainers,omitempty" xmlrpc:"layoutContainers,omitempty"` + + // A count of + LayoutPreferenceCount *uint `json:"layoutPreferenceCount,omitempty" xmlrpc:"layoutPreferenceCount,omitempty"` + + // no documentation yet + LayoutPreferences []Layout_Profile_Preference `json:"layoutPreferences,omitempty" xmlrpc:"layoutPreferences,omitempty"` + + // Timestamp of when the layout profile was last updated + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The friendly name of the layout profile + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The [[SoftLayer_User_Customer]] owning this layout profile + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// no documentation yet +type Layout_Profile_Containers struct { + Entity + + // Timestamp of when the reference was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The internal identifier of the container reference + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The id of the referenced [[SoftLayer_Layout_Container]] + LayoutContainerId *int `json:"layoutContainerId,omitempty" xmlrpc:"layoutContainerId,omitempty"` + + // The container to be contained + LayoutContainerType *Layout_Container `json:"layoutContainerType,omitempty" xmlrpc:"layoutContainerType,omitempty"` + + // The profile containing this container + LayoutProfile *Layout_Profile `json:"layoutProfile,omitempty" xmlrpc:"layoutProfile,omitempty"` + + // The id of the referenced [[SoftLayer_Layout_Profile]] + LayoutProfileId *int `json:"layoutProfileId,omitempty" xmlrpc:"layoutProfileId,omitempty"` + + // Timestamp of when the reference was last updated + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// no documentation yet +type Layout_Profile_Customer struct { + Layout_Profile + + // no documentation yet + UserRecord *User_Customer `json:"userRecord,omitempty" xmlrpc:"userRecord,omitempty"` +} + +// The SoftLayer_Layout_Profile_Preference contains definitions for layout preferences +type Layout_Profile_Preference struct { + Entity + + // Timestamp of when the preference was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Indicates whether this is a default value or not + DefaultValueFlag *int `json:"defaultValueFlag,omitempty" xmlrpc:"defaultValueFlag,omitempty"` + + // no documentation yet + LayoutContainer *Layout_Container `json:"layoutContainer,omitempty" xmlrpc:"layoutContainer,omitempty"` + + // The id of the related [[SoftLayer_Layout_Container]] + LayoutContainerId *int `json:"layoutContainerId,omitempty" xmlrpc:"layoutContainerId,omitempty"` + + // no documentation yet + LayoutItem *Layout_Item `json:"layoutItem,omitempty" xmlrpc:"layoutItem,omitempty"` + + // The id of the related [[SoftLayer_Layout_Item]] + LayoutItemId *int `json:"layoutItemId,omitempty" xmlrpc:"layoutItemId,omitempty"` + + // no documentation yet + LayoutPreference *Layout_Preference `json:"layoutPreference,omitempty" xmlrpc:"layoutPreference,omitempty"` + + // The internal identifier of the overridden [[SoftLayer_Layout_Preference]] + LayoutPreferenceId *int `json:"layoutPreferenceId,omitempty" xmlrpc:"layoutPreferenceId,omitempty"` + + // no documentation yet + LayoutProfile *Layout_Profile `json:"layoutProfile,omitempty" xmlrpc:"layoutProfile,omitempty"` + + // The internal identifier of the related [[SoftLayer_Layout_Profile]] + LayoutProfileId *int `json:"layoutProfileId,omitempty" xmlrpc:"layoutProfileId,omitempty"` + + // Timestamp of when the preference was last updated + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The value overriding the default value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/legal.go b/vendor/github.com/softlayer/softlayer-go/datatypes/legal.go new file mode 100644 index 000000000000..f3bb064ba70b --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/legal.go @@ -0,0 +1,58 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Legal_RegulatedWorkload struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + EnabledFlag *bool `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Type *Legal_RegulatedWorkload_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + WorkloadTypeId *int `json:"workloadTypeId,omitempty" xmlrpc:"workloadTypeId,omitempty"` +} + +// no documentation yet +type Legal_RegulatedWorkload_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/locale.go b/vendor/github.com/softlayer/softlayer-go/datatypes/locale.go new file mode 100644 index 000000000000..4c15cfeff0ce --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/locale.go @@ -0,0 +1,101 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Locale struct { + Entity + + // no documentation yet + FriendlyName *string `json:"friendlyName,omitempty" xmlrpc:"friendlyName,omitempty"` + + // Internal identification number of a locale + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LanguageTag *string `json:"languageTag,omitempty" xmlrpc:"languageTag,omitempty"` + + // Locale name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Locale_Country struct { + Entity + + // Binary flag denoting if this country is part of the European Union + IsEuropeanUnionFlag *int `json:"isEuropeanUnionFlag,omitempty" xmlrpc:"isEuropeanUnionFlag,omitempty"` + + // no documentation yet + IsoCodeAlphaThree *string `json:"isoCodeAlphaThree,omitempty" xmlrpc:"isoCodeAlphaThree,omitempty"` + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + PostalCodeFormat *string `json:"postalCodeFormat,omitempty" xmlrpc:"postalCodeFormat,omitempty"` + + // no documentation yet + PostalCodeRequiredFlag *int `json:"postalCodeRequiredFlag,omitempty" xmlrpc:"postalCodeRequiredFlag,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` + + // A count of states that belong to this country. + StateCount *uint `json:"stateCount,omitempty" xmlrpc:"stateCount,omitempty"` + + // States that belong to this country. + States []Locale_StateProvince `json:"states,omitempty" xmlrpc:"states,omitempty"` + + // no documentation yet + VatIdRegex *string `json:"vatIdRegex,omitempty" xmlrpc:"vatIdRegex,omitempty"` +} + +// This object represents a state or province for a country. +type Locale_StateProvince struct { + Entity + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` +} + +// Each User is assigned a timezone allowing for a precise local timestamp. +type Locale_Timezone struct { + Entity + + // A timezone's identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A timezone's long name. For example, "(GMT-06:00) America/Dallas - CST". + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // A timezone's name. For example, "America/Dallas". + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A timezone's offset based on the GMT standard. For example, Central Standard Time's offset is "-0600" from GMT=0000. + Offset *string `json:"offset,omitempty" xmlrpc:"offset,omitempty"` + + // A timezone's common abbreviation. For example, Central Standard Time's abbreviation is "CST". + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/location.go b/vendor/github.com/softlayer/softlayer-go/datatypes/location.go new file mode 100644 index 000000000000..0c9a47807d69 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/location.go @@ -0,0 +1,443 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Every piece of hardware and network connection owned by SoftLayer is tracked physically by location and stored in the SoftLayer_Location data type. SoftLayer locations exist in parent/child relationships, a convenient way to track equipment from it's city, datacenter, server room, rack, then slot. Network backbones are tied to datacenters only, not to a room, rack, or slot. +type Location struct { + Entity + + // A count of + ActivePresaleEventCount *uint `json:"activePresaleEventCount,omitempty" xmlrpc:"activePresaleEventCount,omitempty"` + + // no documentation yet + ActivePresaleEvents []Sales_Presale_Event `json:"activePresaleEvents,omitempty" xmlrpc:"activePresaleEvents,omitempty"` + + // A count of + BackboneDependentCount *uint `json:"backboneDependentCount,omitempty" xmlrpc:"backboneDependentCount,omitempty"` + + // no documentation yet + BackboneDependents []Network_Backbone_Location_Dependent `json:"backboneDependents,omitempty" xmlrpc:"backboneDependents,omitempty"` + + // A flag indicating whether or not the datacenter/location is EU compliant. + EuCompliantFlag *bool `json:"euCompliantFlag,omitempty" xmlrpc:"euCompliantFlag,omitempty"` + + // A count of a location can be a member of 1 or more groups. This will show which groups to which a location belongs. + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // A location can be a member of 1 or more groups. This will show which groups to which a location belongs. + Groups []Location_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // A count of + HardwareFirewallCount *uint `json:"hardwareFirewallCount,omitempty" xmlrpc:"hardwareFirewallCount,omitempty"` + + // no documentation yet + HardwareFirewalls []Hardware `json:"hardwareFirewalls,omitempty" xmlrpc:"hardwareFirewalls,omitempty"` + + // The unique identifier of a specific location. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A location's physical address. + LocationAddress *Account_Address `json:"locationAddress,omitempty" xmlrpc:"locationAddress,omitempty"` + + // A location's Dedicated Rack member + LocationReservationMember *Location_Reservation_Rack_Member `json:"locationReservationMember,omitempty" xmlrpc:"locationReservationMember,omitempty"` + + // The current locations status. + LocationStatus *Location_Status `json:"locationStatus,omitempty" xmlrpc:"locationStatus,omitempty"` + + // A longer location description. + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // A short location description. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + NetworkConfigurationAttribute *Hardware_Attribute `json:"networkConfigurationAttribute,omitempty" xmlrpc:"networkConfigurationAttribute,omitempty"` + + // The total number of users online using SoftLayer's PPTP VPN service for a location. + OnlinePptpVpnUserCount *int `json:"onlinePptpVpnUserCount,omitempty" xmlrpc:"onlinePptpVpnUserCount,omitempty"` + + // The total number of users online using SoftLayer's SSL VPN service for a location. + OnlineSslVpnUserCount *int `json:"onlineSslVpnUserCount,omitempty" xmlrpc:"onlineSslVpnUserCount,omitempty"` + + // no documentation yet + PathString *string `json:"pathString,omitempty" xmlrpc:"pathString,omitempty"` + + // A count of a location can be a member of 1 or more Price Groups. This will show which groups to which a location belongs. + PriceGroupCount *uint `json:"priceGroupCount,omitempty" xmlrpc:"priceGroupCount,omitempty"` + + // A location can be a member of 1 or more Price Groups. This will show which groups to which a location belongs. + PriceGroups []Location_Group `json:"priceGroups,omitempty" xmlrpc:"priceGroups,omitempty"` + + // A count of a location can be a member of 1 or more regions. This will show which regions to which a location belongs. + RegionCount *uint `json:"regionCount,omitempty" xmlrpc:"regionCount,omitempty"` + + // A location can be a member of 1 or more regions. This will show which regions to which a location belongs. + Regions []Location_Region `json:"regions,omitempty" xmlrpc:"regions,omitempty"` + + // no documentation yet + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // no documentation yet + Timezone *Locale_Timezone `json:"timezone,omitempty" xmlrpc:"timezone,omitempty"` + + // A location can be a member of 1 Bandwidth Pooling Group. This will show which group to which a location belongs. + VdrGroup *Location_Group_Location_CrossReference `json:"vdrGroup,omitempty" xmlrpc:"vdrGroup,omitempty"` +} + +// SoftLayer_Location_Datacenter extends the [[SoftLayer_Location]] data type to include datacenter-specific properties. +type Location_Datacenter struct { + Location + + // A count of + ActiveItemPresaleEventCount *uint `json:"activeItemPresaleEventCount,omitempty" xmlrpc:"activeItemPresaleEventCount,omitempty"` + + // no documentation yet + ActiveItemPresaleEvents []Sales_Presale_Event `json:"activeItemPresaleEvents,omitempty" xmlrpc:"activeItemPresaleEvents,omitempty"` + + // A count of + BackendHardwareRouterCount *uint `json:"backendHardwareRouterCount,omitempty" xmlrpc:"backendHardwareRouterCount,omitempty"` + + // no documentation yet + BackendHardwareRouters []Hardware `json:"backendHardwareRouters,omitempty" xmlrpc:"backendHardwareRouters,omitempty"` + + // A count of subnets which are directly bound to one or more routers in a given datacenter, and currently allow routing. + BoundSubnetCount *uint `json:"boundSubnetCount,omitempty" xmlrpc:"boundSubnetCount,omitempty"` + + // Subnets which are directly bound to one or more routers in a given datacenter, and currently allow routing. + BoundSubnets []Network_Subnet `json:"boundSubnets,omitempty" xmlrpc:"boundSubnets,omitempty"` + + // A count of this references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + BrandCountryRestrictionCount *uint `json:"brandCountryRestrictionCount,omitempty" xmlrpc:"brandCountryRestrictionCount,omitempty"` + + // This references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + BrandCountryRestrictions []Brand_Restriction_Location_CustomerCountry `json:"brandCountryRestrictions,omitempty" xmlrpc:"brandCountryRestrictions,omitempty"` + + // A count of + FrontendHardwareRouterCount *uint `json:"frontendHardwareRouterCount,omitempty" xmlrpc:"frontendHardwareRouterCount,omitempty"` + + // no documentation yet + FrontendHardwareRouters []Hardware `json:"frontendHardwareRouters,omitempty" xmlrpc:"frontendHardwareRouters,omitempty"` + + // A count of + HardwareRouterCount *uint `json:"hardwareRouterCount,omitempty" xmlrpc:"hardwareRouterCount,omitempty"` + + // no documentation yet + HardwareRouters []Hardware `json:"hardwareRouters,omitempty" xmlrpc:"hardwareRouters,omitempty"` + + // A count of + PresaleEventCount *uint `json:"presaleEventCount,omitempty" xmlrpc:"presaleEventCount,omitempty"` + + // no documentation yet + PresaleEvents []Sales_Presale_Event `json:"presaleEvents,omitempty" xmlrpc:"presaleEvents,omitempty"` + + // The regional group this datacenter belongs to. + RegionalGroup *Location_Group_Regional `json:"regionalGroup,omitempty" xmlrpc:"regionalGroup,omitempty"` + + // no documentation yet + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // A count of retrieve all subnets that are eligible to be routed; those which the account has permission to associate with a vlan. + RoutableBoundSubnetCount *uint `json:"routableBoundSubnetCount,omitempty" xmlrpc:"routableBoundSubnetCount,omitempty"` + + // Retrieve all subnets that are eligible to be routed; those which the account has permission to associate with a vlan. + RoutableBoundSubnets []Network_Subnet `json:"routableBoundSubnets,omitempty" xmlrpc:"routableBoundSubnets,omitempty"` +} + +// no documentation yet +type Location_Group struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the locations in a group. + LocationCount *uint `json:"locationCount,omitempty" xmlrpc:"locationCount,omitempty"` + + // The type for this location group. + LocationGroupType *Location_Group_Type `json:"locationGroupType,omitempty" xmlrpc:"locationGroupType,omitempty"` + + // no documentation yet + LocationGroupTypeId *int `json:"locationGroupTypeId,omitempty" xmlrpc:"locationGroupTypeId,omitempty"` + + // The locations in a group. + Locations []Location `json:"locations,omitempty" xmlrpc:"locations,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + SecurityLevelId *int `json:"securityLevelId,omitempty" xmlrpc:"securityLevelId,omitempty"` +} + +// no documentation yet +type Location_Group_Location_CrossReference struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationGroup *Location_Group `json:"locationGroup,omitempty" xmlrpc:"locationGroup,omitempty"` + + // no documentation yet + LocationGroupId *int `json:"locationGroupId,omitempty" xmlrpc:"locationGroupId,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // If set, this is the priority of this cross reference record in the group. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` +} + +// no documentation yet +type Location_Group_Pricing struct { + Location_Group + + // A count of the prices that this pricing location group limits. All of these prices will only be available in the locations defined by this pricing location group. + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // The prices that this pricing location group limits. All of these prices will only be available in the locations defined by this pricing location group. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` +} + +// no documentation yet +type Location_Group_Regional struct { + Location_Group + + // A count of the datacenters in a group. + DatacenterCount *uint `json:"datacenterCount,omitempty" xmlrpc:"datacenterCount,omitempty"` + + // The datacenters in a group. + Datacenters []Location `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // The preferred datacenters of a group. + PreferredDatacenter *Location_Datacenter `json:"preferredDatacenter,omitempty" xmlrpc:"preferredDatacenter,omitempty"` +} + +// no documentation yet +type Location_Group_Type struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Location_Inventory_Room extends the [[SoftLayer_Location]] data type to include inventory room-specific properties. +type Location_Inventory_Room struct { + Location +} + +// SoftLayer_Location_Network_Operations_Center extends the [[SoftLayer_Location]] data type to include network operation center-specific properties. +type Location_Network_Operations_Center struct { + Location +} + +// SoftLayer_Location_Office extends the [[SoftLayer_Location]] data type to include office-specific properties. +type Location_Office struct { + Location +} + +// SoftLayer_Location_Rack extends the [[SoftLayer_Location]] data type to include rack-specific properties. +type Location_Rack struct { + Location +} + +// A region is made up of a keyname and a description of that region. A region keyname can be used as part of an order. Check the SoftLayer_Product_Order service for more details. +type Location_Region struct { + Entity + + // A short description of a region's name. This description is seen on the order forms. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A unique key name for a region. Provided for easy debugging. This is to be sent in with an order. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // Each region can have many locations tied to it. However, this is the location we currently provision to for a region. This location is the current valid location for a region. (Deprecated, use 'locations') + Location *Location_Region_Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A count of the locations (like datacenters or PoPs) in this region. + LocationCount *uint `json:"locationCount,omitempty" xmlrpc:"locationCount,omitempty"` + + // The locations (like datacenters or PoPs) in this region. + Locations []Location_Region_Location `json:"locations,omitempty" xmlrpc:"locations,omitempty"` + + // An integer representing the order in which this element is displayed. + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// The SoftLayer_Location_Region_Location is very specific to the location where services will actually be provisioned. When accessed through a package, this location is the top priority location for a region. All new servers and services are provisioned at this location. When a server is ordered and a region is selected, this is the location within that region where the server will actually exist and have software/services installed. +type Location_Region_Location struct { + Entity + + // The SoftLayer_Location tied to a region's location. This provides more information about the location, including specific datacenter information. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A count of a region's location also has delivery information as well as other information to be determined. For now, availability is provided and could weigh into the decision as to where to decide to have a server provisioned.' + LocationPackageDetailCount *uint `json:"locationPackageDetailCount,omitempty" xmlrpc:"locationPackageDetailCount,omitempty"` + + // A region's location also has delivery information as well as other information to be determined. For now, availability is provided and could weigh into the decision as to where to decide to have a server provisioned.' + LocationPackageDetails []Product_Package_Locations `json:"locationPackageDetails,omitempty" xmlrpc:"locationPackageDetails,omitempty"` + + // The region to which this location belongs. + Region *Location_Region `json:"region,omitempty" xmlrpc:"region,omitempty"` +} + +// no documentation yet +type Location_Reservation struct { + Entity + + // The account that a billing item belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The bandwidth allotment that the reservation belongs to. + Allotment *Network_Bandwidth_Version1_Allotment `json:"allotment,omitempty" xmlrpc:"allotment,omitempty"` + + // no documentation yet + AllotmentId *int `json:"allotmentId,omitempty" xmlrpc:"allotmentId,omitempty"` + + // The bandwidth allotment that the reservation belongs to. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The datacenter location that the reservation belongs to. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // Rack information for the reservation + LocationReservationRack *Location_Reservation_Rack `json:"locationReservationRack,omitempty" xmlrpc:"locationReservationRack,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` +} + +// no documentation yet +type Location_Reservation_Rack struct { + Entity + + // The bandwidth allotment that the reservation belongs to. + Allotment *Network_Bandwidth_Version1_Allotment `json:"allotment,omitempty" xmlrpc:"allotment,omitempty"` + + // Members of the rack. + Children []Location_Reservation_Rack_Member `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of members of the rack. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // no documentation yet + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // no documentation yet + LocationReservation *Location_Reservation `json:"locationReservation,omitempty" xmlrpc:"locationReservation,omitempty"` + + // no documentation yet + LocationReservationId *int `json:"locationReservationId,omitempty" xmlrpc:"locationReservationId,omitempty"` + + // no documentation yet + NetworkConnectionCapacity *int `json:"networkConnectionCapacity,omitempty" xmlrpc:"networkConnectionCapacity,omitempty"` + + // no documentation yet + NetworkConnectionReservation *int `json:"networkConnectionReservation,omitempty" xmlrpc:"networkConnectionReservation,omitempty"` + + // no documentation yet + PowerConnectionCapacity *int `json:"powerConnectionCapacity,omitempty" xmlrpc:"powerConnectionCapacity,omitempty"` + + // no documentation yet + PowerConnectionReservation *int `json:"powerConnectionReservation,omitempty" xmlrpc:"powerConnectionReservation,omitempty"` + + // no documentation yet + SlotCapacity *int `json:"slotCapacity,omitempty" xmlrpc:"slotCapacity,omitempty"` + + // no documentation yet + SlotReservation *int `json:"slotReservation,omitempty" xmlrpc:"slotReservation,omitempty"` +} + +// no documentation yet +type Location_Reservation_Rack_Member struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Location relation for the rack member + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // no documentation yet + LocationReservationRack *Location_Reservation_Rack `json:"locationReservationRack,omitempty" xmlrpc:"locationReservationRack,omitempty"` +} + +// SoftLayer_Location_Root extends the [[SoftLayer_Location]] data type to include root-specific properties. +type Location_Root struct { + Location +} + +// SoftLayer_Location_Server_Room extends the [[SoftLayer_Location]] data type to include server room-specific properties. +type Location_Server_Room struct { + Location +} + +// SoftLayer_Location_Slot extends the [[SoftLayer_Location]] data type to include slot-specific properties. +type Location_Slot struct { + Location +} + +// SoftLayer_Location_Status models the state of any location. SoftLayer uses the following status codes: +// +// +// *'''ACTIVE''': The location is currently active and available for public usage. +// *'''PLANNED''': Used when a location is planned but not yet active. +// *'''RETIRED''': Used when a location has been retired and no longer active. +// +// +// Locations in use should stay in the ACTIVE state. If a locations status ever reads anything else and contains active hardware then please contact SoftLayer support. +type Location_Status struct { + Entity + + // A locations status's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A Location's status code. See the SoftLayer_Locaiton_Status Overview for ''status''' possible values. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// SoftLayer_Location_Storage_Room extends the [[SoftLayer_Location]] data type to include storage room-specific properties. +type Location_Storage_Room struct { + Location +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/marketplace.go b/vendor/github.com/softlayer/softlayer-go/datatypes/marketplace.go new file mode 100644 index 000000000000..9e5b5a1c3829 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/marketplace.go @@ -0,0 +1,195 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Marketplace_EmailDistribution struct { + Entity + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Marketplace_Partner struct { + Entity + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AttachedFiles []Marketplace_Partner_Attachment `json:"attachedFiles,omitempty" xmlrpc:"attachedFiles,omitempty"` + + // A count of + AttachmentCount *uint `json:"attachmentCount,omitempty" xmlrpc:"attachmentCount,omitempty"` + + // no documentation yet + Attachments []Marketplace_Partner_Attachment `json:"attachments,omitempty" xmlrpc:"attachments,omitempty"` + + // no documentation yet + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + HeadlineDescription *string `json:"headlineDescription,omitempty" xmlrpc:"headlineDescription,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LinkFreeTrial *string `json:"linkFreeTrial,omitempty" xmlrpc:"linkFreeTrial,omitempty"` + + // no documentation yet + LinkOrderPage *string `json:"linkOrderPage,omitempty" xmlrpc:"linkOrderPage,omitempty"` + + // no documentation yet + LinkWebsite *string `json:"linkWebsite,omitempty" xmlrpc:"linkWebsite,omitempty"` + + // no documentation yet + LogoMedium *Marketplace_Partner_Attachment `json:"logoMedium,omitempty" xmlrpc:"logoMedium,omitempty"` + + // no documentation yet + LogoMediumTemp *Marketplace_Partner_Attachment `json:"logoMediumTemp,omitempty" xmlrpc:"logoMediumTemp,omitempty"` + + // no documentation yet + LogoSmall *Marketplace_Partner_Attachment `json:"logoSmall,omitempty" xmlrpc:"logoSmall,omitempty"` + + // no documentation yet + LogoSmallTemp *Marketplace_Partner_Attachment `json:"logoSmallTemp,omitempty" xmlrpc:"logoSmallTemp,omitempty"` + + // no documentation yet + MetaDescription *string `json:"metaDescription,omitempty" xmlrpc:"metaDescription,omitempty"` + + // no documentation yet + MetaKeywords *string `json:"metaKeywords,omitempty" xmlrpc:"metaKeywords,omitempty"` + + // no documentation yet + ProductBenefits *string `json:"productBenefits,omitempty" xmlrpc:"productBenefits,omitempty"` + + // no documentation yet + ProductCategoryId *int `json:"productCategoryId,omitempty" xmlrpc:"productCategoryId,omitempty"` + + // no documentation yet + ProductDescriptionLong *string `json:"productDescriptionLong,omitempty" xmlrpc:"productDescriptionLong,omitempty"` + + // no documentation yet + ProductDescriptionShort *string `json:"productDescriptionShort,omitempty" xmlrpc:"productDescriptionShort,omitempty"` + + // no documentation yet + ProductFeatures *string `json:"productFeatures,omitempty" xmlrpc:"productFeatures,omitempty"` + + // no documentation yet + ProductName *string `json:"productName,omitempty" xmlrpc:"productName,omitempty"` + + // no documentation yet + ProductTitle *string `json:"productTitle,omitempty" xmlrpc:"productTitle,omitempty"` + + // no documentation yet + UrlIdentifier *string `json:"urlIdentifier,omitempty" xmlrpc:"urlIdentifier,omitempty"` +} + +// no documentation yet +type Marketplace_Partner_Attachment struct { + Entity + + // no documentation yet + AttachmentType *Marketplace_Partner_Attachment_Type `json:"attachmentType,omitempty" xmlrpc:"attachmentType,omitempty"` + + // no documentation yet + AttachmentTypeId *int `json:"attachmentTypeId,omitempty" xmlrpc:"attachmentTypeId,omitempty"` + + // no documentation yet + BaseName *string `json:"baseName,omitempty" xmlrpc:"baseName,omitempty"` + + // no documentation yet + DisplayName *string `json:"displayName,omitempty" xmlrpc:"displayName,omitempty"` + + // no documentation yet + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + MarketplacePartnerId *int `json:"marketplacePartnerId,omitempty" xmlrpc:"marketplacePartnerId,omitempty"` + + // no documentation yet + SaveAsName *string `json:"saveAsName,omitempty" xmlrpc:"saveAsName,omitempty"` +} + +// no documentation yet +type Marketplace_Partner_Attachment_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Marketplace_Partner_File struct { + Entity + + // no documentation yet + Attributes *Marketplace_Partner_File_Attributes `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // no documentation yet + Contents *[]byte `json:"contents,omitempty" xmlrpc:"contents,omitempty"` +} + +// no documentation yet +type Marketplace_Partner_File_Attributes struct { + Entity + + // no documentation yet + Bits *int `json:"bits,omitempty" xmlrpc:"bits,omitempty"` + + // no documentation yet + Channels *int `json:"channels,omitempty" xmlrpc:"channels,omitempty"` + + // no documentation yet + Height *int `json:"height,omitempty" xmlrpc:"height,omitempty"` + + // no documentation yet + HtmlAttributes *string `json:"htmlAttributes,omitempty" xmlrpc:"htmlAttributes,omitempty"` + + // no documentation yet + ImageType *int `json:"imageType,omitempty" xmlrpc:"imageType,omitempty"` + + // no documentation yet + IsImage *bool `json:"isImage,omitempty" xmlrpc:"isImage,omitempty"` + + // no documentation yet + MimeType *string `json:"mimeType,omitempty" xmlrpc:"mimeType,omitempty"` + + // no documentation yet + Width *int `json:"width,omitempty" xmlrpc:"width,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/mcafee.go b/vendor/github.com/softlayer/softlayer-go/datatypes/mcafee.go new file mode 100644 index 000000000000..dbb99d5c20d4 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/mcafee.go @@ -0,0 +1,319 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The McAfee_Epolicy_Orchestrator_Version36_Agent_Details data type represents a virus scan agent and contains details about its version. +type McAfee_Epolicy_Orchestrator_Version36_Agent_Details struct { + Entity + + // Version number of the anti-virus scan agent. + AgentVersion *string `json:"agentVersion,omitempty" xmlrpc:"agentVersion,omitempty"` + + // The current anti-virus policy of an agent. + CurrentPolicy *McAfee_Epolicy_Orchestrator_Version36_Agent_Parent_Details `json:"currentPolicy,omitempty" xmlrpc:"currentPolicy,omitempty"` + + // The date of the last time the anti-virus agent checked in. + LastUpdate *string `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Agent_Parent_Details data type contains the name of an anti-virus policy. +type McAfee_Epolicy_Orchestrator_Version36_Agent_Parent_Details struct { + Entity + + // The current anti-virus policy of an agent. + CurrentPolicy *McAfee_Epolicy_Orchestrator_Version36_Agent_Parent_Details `json:"currentPolicy,omitempty" xmlrpc:"currentPolicy,omitempty"` + + // The name of a policy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event data type represents a single anti-virus event. It contains details about the event such as the date the event occurred, the virus that is detected and the action that is taken. +type McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event struct { + Entity + + // The date when an anti-virus event occurs. + EventLocalDateTime *Time `json:"eventLocalDateTime,omitempty" xmlrpc:"eventLocalDateTime,omitempty"` + + // Name of the file found to be infected. + Filename *string `json:"filename,omitempty" xmlrpc:"filename,omitempty"` + + // The action taken when a virus is detected. + VirusActionTaken *McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_Filter_Description `json:"virusActionTaken,omitempty" xmlrpc:"virusActionTaken,omitempty"` + + // The name of a virus that is found. + VirusName *string `json:"virusName,omitempty" xmlrpc:"virusName,omitempty"` + + // The type of virus that is found. + VirusType *string `json:"virusType,omitempty" xmlrpc:"virusType,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_AccessProtection data type represents an access protection event. It contains details about the event such as when it occurs, the process that caused it, and the rule that triggered the event. +type McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_AccessProtection struct { + Entity + + // The date that an access protection event occurs. + EventLocalDateTime *Time `json:"eventLocalDateTime,omitempty" xmlrpc:"eventLocalDateTime,omitempty"` + + // The name of the file that was protected from access. + Filename *string `json:"filename,omitempty" xmlrpc:"filename,omitempty"` + + // The name of the process that was protected from access. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` + + // The name of the rule that triggered an access protection event. + RuleName *string `json:"ruleName,omitempty" xmlrpc:"ruleName,omitempty"` + + // The IP address that caused an access protection event. + Source *string `json:"source,omitempty" xmlrpc:"source,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_Filter_Description data type contains the name of the rule that was triggered by an anti-virus event. +type McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_Filter_Description struct { + Entity + + // The name of the rule that triggered an anti-virus event. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_BlockedApplicationEvent data type contains a single blocked application event. The details of the event are the time the event occurred, the process that generated the event and a brief description of the application that was blocked. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_BlockedApplicationEvent struct { + Entity + + // A brief description of the application that is blocked. + ApplicationDescription *string `json:"applicationDescription,omitempty" xmlrpc:"applicationDescription,omitempty"` + + // The time that an application is blocked. + IncidentTime *Time `json:"incidentTime,omitempty" xmlrpc:"incidentTime,omitempty"` + + // The name of a process that is blocked. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_Event_Signature data type contains the signature name of a rule that generated an IPS event. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_Event_Signature struct { + Entity + + // The name of a rule that triggered an IPS event. + SignatureName *string `json:"signatureName,omitempty" xmlrpc:"signatureName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_IPSEvent data type represents a single IPS event. It contains details about the event such as the date the event occurred, the process that generated it, the severity of the event, and the action taken. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_IPSEvent struct { + Entity + + // The time when an IPS event occurred. + IncidentTime *Time `json:"incidentTime,omitempty" xmlrpc:"incidentTime,omitempty"` + + // Name of the process that generated an IPS event. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` + + // The action taken because of an IPS event. + ReactionText *string `json:"reactionText,omitempty" xmlrpc:"reactionText,omitempty"` + + // The IP address that generated an IPS event. + RemoteIpAddress *string `json:"remoteIpAddress,omitempty" xmlrpc:"remoteIpAddress,omitempty"` + + // The severity level for an IPS event. + SeverityText *string `json:"severityText,omitempty" xmlrpc:"severityText,omitempty"` + + // The signature that generated an IPS event. + Signature *McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_Event_Signature `json:"signature,omitempty" xmlrpc:"signature,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_BlockedApplicationEvent data type contains a single blocked application event. The details of the event are the time the event occurred, the process that generated the event and a brief description of the application that was blocked. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_BlockedApplicationEvent struct { + Entity + + // A brief description of the application that is blocked. + ApplicationDescription *string `json:"applicationDescription,omitempty" xmlrpc:"applicationDescription,omitempty"` + + // The time that an application is blocked. + IncidentTime *Time `json:"incidentTime,omitempty" xmlrpc:"incidentTime,omitempty"` + + // The name of a process that is blocked. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_Event_Signature data type contains the signature name of a rule that generated an IPS event. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_Event_Signature struct { + Entity + + // The name of a rule that triggered an IPS event. + SignatureName *string `json:"signatureName,omitempty" xmlrpc:"signatureName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_IPSEvent data type represents a single IPS event. It contains details about the event such as the date the event occurred, the process that generated it, the severity of the event, and the action taken. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_IPSEvent struct { + Entity + + // The time when an IPS event occurred. + IncidentTime *Time `json:"incidentTime,omitempty" xmlrpc:"incidentTime,omitempty"` + + // Name of the process that generated an IPS event. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` + + // The action taken because of an IPS event. + ReactionText *string `json:"reactionText,omitempty" xmlrpc:"reactionText,omitempty"` + + // The IP address that generated an IPS event. + RemoteIpAddress *string `json:"remoteIpAddress,omitempty" xmlrpc:"remoteIpAddress,omitempty"` + + // The severity level for an IPS event. + SeverityText *string `json:"severityText,omitempty" xmlrpc:"severityText,omitempty"` + + // The signature that generated an IPS event. + Signature *McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_Event_Signature `json:"signature,omitempty" xmlrpc:"signature,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Policy_Object data type contains the name of a policy that may be assigned to a server. +type McAfee_Epolicy_Orchestrator_Version36_Policy_Object struct { + Entity + + // The name of a policy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Product_Properties data type contains the virus definition file version. +type McAfee_Epolicy_Orchestrator_Version36_Product_Properties struct { + Entity + + // The virus definition file version. + DatVersion *string `json:"datVersion,omitempty" xmlrpc:"datVersion,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Agent_Details data type represents a virus scan agent and contains details about its version. +type McAfee_Epolicy_Orchestrator_Version45_Agent_Details struct { + Entity + + // Version number of the anti-virus scan agent. + AgentVersion *string `json:"agentVersion,omitempty" xmlrpc:"agentVersion,omitempty"` + + // The date of the last time the anti-virus agent checked in. + LastUpdate *Time `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Agent_Parent_Details data type contains the name of an anti-virus policy. +type McAfee_Epolicy_Orchestrator_Version45_Agent_Parent_Details struct { + Entity + + // Additional information about an agent. + AgentDetails *McAfee_Epolicy_Orchestrator_Version45_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // The name of a policy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The current anti-virus policy of an agent. + Policies []McAfee_Epolicy_Orchestrator_Version45_Agent_Parent_Details `json:"policies,omitempty" xmlrpc:"policies,omitempty"` + + // A count of the current anti-virus policy of an agent. + PolicyCount *uint `json:"policyCount,omitempty" xmlrpc:"policyCount,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Event data type represents a single event. It contains details about the event such as the date the event occurred, the virus or intrusion that is detected and the action that is taken. +type McAfee_Epolicy_Orchestrator_Version45_Event struct { + Entity + + // Additional information about an agent. + AgentDetails *McAfee_Epolicy_Orchestrator_Version45_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // The time that an event was detected. + DetectedUtc *Time `json:"detectedUtc,omitempty" xmlrpc:"detectedUtc,omitempty"` + + // The IP address of the source that generated an event. + SourceIpv4 *string `json:"sourceIpv4,omitempty" xmlrpc:"sourceIpv4,omitempty"` + + // The name of the process that generated an event. + SourceProcessName *string `json:"sourceProcessName,omitempty" xmlrpc:"sourceProcessName,omitempty"` + + // The name of the file that was the target of the event. + TargetFilename *string `json:"targetFilename,omitempty" xmlrpc:"targetFilename,omitempty"` + + // The action taken regarding a threat. + ThreatActionTaken *string `json:"threatActionTaken,omitempty" xmlrpc:"threatActionTaken,omitempty"` + + // The name of the threat. + ThreatName *string `json:"threatName,omitempty" xmlrpc:"threatName,omitempty"` + + // The textual representation of the severity level. + ThreatSeverityLabel *string `json:"threatSeverityLabel,omitempty" xmlrpc:"threatSeverityLabel,omitempty"` + + // The type of threat. + ThreatType *string `json:"threatType,omitempty" xmlrpc:"threatType,omitempty"` + + // The action taken when a virus is detected. + VirusActionTaken *McAfee_Epolicy_Orchestrator_Version45_Event_Filter_Description `json:"virusActionTaken,omitempty" xmlrpc:"virusActionTaken,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Event_Filter_Description data type contains the name of the rule that was triggered by an event. +type McAfee_Epolicy_Orchestrator_Version45_Event_Filter_Description struct { + Entity + + // The name of the rule that triggered an event. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Event_Version7 data type represents a single event. It contains details about the event such as the date the event occurred, the virus or intrusion that is detected and the action that is taken. +type McAfee_Epolicy_Orchestrator_Version45_Event_Version7 struct { + McAfee_Epolicy_Orchestrator_Version45_Event + + // The signature information for an event. + Signature *McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version7 `json:"signature,omitempty" xmlrpc:"signature,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Event_Version8 data type represents a single event. It contains details about the event such as the date the event occurred, the virus or intrusion that is detected and the action that is taken. +type McAfee_Epolicy_Orchestrator_Version45_Event_Version8 struct { + McAfee_Epolicy_Orchestrator_Version45_Event + + // The signature information for an event. + Signature *McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version8 `json:"signature,omitempty" xmlrpc:"signature,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version7 data type contains the signature name of a rule that generated an IPS event. +type McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version7 struct { + Entity + + // The name of a rule that triggered an IPS event. + SignatureName *string `json:"signatureName,omitempty" xmlrpc:"signatureName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version8 data type contains the signature name of a rule that generated an IPS event. +type McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version8 struct { + Entity + + // The name of a rule that triggered an IPS event. + SignatureName *string `json:"signatureName,omitempty" xmlrpc:"signatureName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Policy_Object data type contains the name of a policy that may be assigned to a server. +type McAfee_Epolicy_Orchestrator_Version45_Policy_Object struct { + Entity + + // The name of a policy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Product_Properties data type contains the virus definition file version. +type McAfee_Epolicy_Orchestrator_Version45_Product_Properties struct { + Entity + + // The virus definition file version. + DatVersion *string `json:"datVersion,omitempty" xmlrpc:"datVersion,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/metric.go b/vendor/github.com/softlayer/softlayer-go/datatypes/metric.go new file mode 100644 index 000000000000..46cfef55ac9f --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/metric.go @@ -0,0 +1,207 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Metric tracking objects provides a common interface to all metrics provided by SoftLayer. These metrics range from network component traffic for a server to aggregated Bandwidth Pooling traffic and more. Every object within SoftLayer's range of objects that has data that can be tracked over time has an associated tracking object. Use the [[SoftLayer_Metric_Tracking_Object]] service to retrieve raw and graph data from a tracking object. +type Metric_Tracking_Object struct { + Entity + + // The data recorded by a tracking object. + Data []Metric_Tracking_Object_Data `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // A tracking object's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Tracking object label + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` + + // The identifier of the existing resource this object is attempting to track. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // The date this tracker began tracking this particular resource. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The type of data that a tracking object polls. + Type *Metric_Tracking_Object_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_Abstract models a generic tracking object type. Typically a tracking object with a specific purpose has it's own data type defined within the SoftLayer API. +type Metric_Tracking_Object_Abstract struct { + Metric_Tracking_Object +} + +// This data type provides commonly used bandwidth summary components for the current billing cycle. +type Metric_Tracking_Object_Bandwidth_Summary struct { + Entity + + // This is the amount of bandwidth (measured in gigabytes) allocated for this tracking object. + AllocationAmount *Float64 `json:"allocationAmount,omitempty" xmlrpc:"allocationAmount,omitempty"` + + // no documentation yet + AllocationId *int `json:"allocationId,omitempty" xmlrpc:"allocationId,omitempty"` + + // The amount of outbound bandwidth (measured in gigabytes) currently used this billing period. Same as $outboundBandwidthAmount. Aliased for backward compatability. + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // The daily average amount of outbound bandwidth usage. + AverageDailyUsage *Float64 `json:"averageDailyUsage,omitempty" xmlrpc:"averageDailyUsage,omitempty"` + + // A flag that tells whether or not this tracking object's bandwidth usage is already over the allocation. 1 means yes, 0 means no. + CurrentlyOverAllocationFlag *int `json:"currentlyOverAllocationFlag,omitempty" xmlrpc:"currentlyOverAllocationFlag,omitempty"` + + // The metric tracking id for this resource. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The amount of outbound bandwidth (measured in gigabytes) currently used this billing period + OutboundBandwidthAmount *Float64 `json:"outboundBandwidthAmount,omitempty" xmlrpc:"outboundBandwidthAmount,omitempty"` + + // The amount of bandwidth (measured in gigabytes) of projected usage, using a basic average calculation of daily usage. + ProjectedBandwidthUsage *Float64 `json:"projectedBandwidthUsage,omitempty" xmlrpc:"projectedBandwidthUsage,omitempty"` + + // A flag that tells whether or not this tracking object's bandwidth usage is projected to go over the allocation, based on daily average usage. 1 means yes, 0 means no. + ProjectedOverAllocationFlag *int `json:"projectedOverAllocationFlag,omitempty" xmlrpc:"projectedOverAllocationFlag,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_Data models an individual unit of data tracked by a SoftLayer tracking object, including the type of data polled, the date it was polled at, and the counter value that was measured at polling time. +type Metric_Tracking_Object_Data struct { + Entity + + // The value stored for a data record. + Counter *Float64 `json:"counter,omitempty" xmlrpc:"counter,omitempty"` + + // The time a data record was stored. + DateTime *Time `json:"dateTime,omitempty" xmlrpc:"dateTime,omitempty"` + + // The type of data held in a record. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_Data_Network_ContentDelivery_Account models usage data polled from the CDN system. +type Metric_Tracking_Object_Data_Network_ContentDelivery_Account struct { + Metric_Tracking_Object_Data + + // The name of a file. This value is only populated in file-based bandwidth reports. + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The internal identifier of a CDN POP (Points of Presence). + PopId *int `json:"popId,omitempty" xmlrpc:"popId,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_HardwareServer models tracking objects specific to physical hardware and the data that are recorded by those servers. +type Metric_Tracking_Object_HardwareServer struct { + Metric_Tracking_Object_Abstract + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // The total public inbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // The server that this tracking object tracks. + Resource *Hardware_Server `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// SoftLayer [[SoftLayer_Metric_Tracking_Object|tracking objects]] can model various kinds of measured data, from server and virtual server bandwidth usage to CPU use to remote storage usage. SoftLayer_Metric_Tracking_Object_Type models one of these types and is referred to in tracking objects to reflect what type of data they track. +type Metric_Tracking_Object_Type struct { + Entity + + // Description A tracking object type's key name. This is a shorter description of what kind of data a tracking object group is polling. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // Description A tracking object type's name. This describes what kind of data a tracking object group is polling. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_VirtualDedicatedRack models tracking objects specific to virtual dedicated racks. Bandwidth Pooling aggregate the bandwidth used by multiple servers within the rack. +type Metric_Tracking_Object_VirtualDedicatedRack struct { + Metric_Tracking_Object_Abstract + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // The total public inbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // The virtual rack that this tracking object tracks. + Resource *Network_Bandwidth_Version1_Allotment `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Metric_Tracking_Object_Virtual_Storage_Repository struct { + Metric_Tracking_Object_Abstract + + // The virtual storage repository that this tracking object tracks. + Resource *Virtual_Storage_Repository `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/monitoring.go b/vendor/github.com/softlayer/softlayer-go/datatypes/monitoring.go new file mode 100644 index 000000000000..174f63b05420 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/monitoring.go @@ -0,0 +1,248 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// A monitoring agent object contains information describing the agent. +type Monitoring_Agent struct { + Entity + + // The current status of the corresponding agent + AgentStatus *Monitoring_Agent_Status `json:"agentStatus,omitempty" xmlrpc:"agentStatus,omitempty"` + + // A count of all custom configuration profiles associated with the corresponding agent + ConfigurationProfileCount *uint `json:"configurationProfileCount,omitempty" xmlrpc:"configurationProfileCount,omitempty"` + + // All custom configuration profiles associated with the corresponding agent + ConfigurationProfiles []Configuration_Template_Section_Profile `json:"configurationProfiles,omitempty" xmlrpc:"configurationProfiles,omitempty"` + + // A template of an agent's current configuration which contains information about the structure of the configuration values. + ConfigurationTemplate *Configuration_Template `json:"configurationTemplate,omitempty" xmlrpc:"configurationTemplate,omitempty"` + + // Internal identifier of a configuration template that is used to configure this agent + ConfigurationTemplateId *int `json:"configurationTemplateId,omitempty" xmlrpc:"configurationTemplateId,omitempty"` + + // A count of the values associated with the corresponding Agent configuration. + ConfigurationValueCount *uint `json:"configurationValueCount,omitempty" xmlrpc:"configurationValueCount,omitempty"` + + // The values associated with the corresponding Agent configuration. + ConfigurationValues []Monitoring_Agent_Configuration_Value `json:"configurationValues,omitempty" xmlrpc:"configurationValues,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // SoftLayer hardware related to the agent. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // Internal identifier of a monitoring agent + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Monitoring agent name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Contains general information relating to a single SoftLayer product. + ProductItem *Product_Item `json:"productItem,omitempty" xmlrpc:"productItem,omitempty"` + + // Indicates if this monitoring agent resides on your local box or on a SoftLayer monitoring cluster. + RemoteMonitoringAgentFlag *bool `json:"remoteMonitoringAgentFlag,omitempty" xmlrpc:"remoteMonitoringAgentFlag,omitempty"` + + // Internal identifier of a monitoring robot that this agent belongs to + RobotId *int `json:"robotId,omitempty" xmlrpc:"robotId,omitempty"` + + // A description for a specific installation of a Software Component + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // Internal identifier of a monitoring agent status + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // Monitoring agent status name. + StatusName *string `json:"statusName,omitempty" xmlrpc:"statusName,omitempty"` + + // Softlayer_Virtual_Guest object related to the monitoring agent, which this virtual guest object and hardware is on the server of the running agent. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` +} + +// The SoftLayer_Monitoring_Agent_Configuration_Template_Group class is consisted of configuration templates for agents in a monitoring package. +type Monitoring_Agent_Configuration_Template_Group struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Internal identifier of a SoftLayer account that this configuration template belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ConfigurationTemplateCount *uint `json:"configurationTemplateCount,omitempty" xmlrpc:"configurationTemplateCount,omitempty"` + + // A count of + ConfigurationTemplateReferenceCount *uint `json:"configurationTemplateReferenceCount,omitempty" xmlrpc:"configurationTemplateReferenceCount,omitempty"` + + // no documentation yet + ConfigurationTemplateReferences []Monitoring_Agent_Configuration_Template_Group_Reference `json:"configurationTemplateReferences,omitempty" xmlrpc:"configurationTemplateReferences,omitempty"` + + // no documentation yet + ConfigurationTemplates []Configuration_Template `json:"configurationTemplates,omitempty" xmlrpc:"configurationTemplates,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Description of a monitoring agent configuration group + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a monitoring agent configuration group + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // Internal identifier of a configuration template type + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Name of a monitoring agent configuration group + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference class holds the reference information, essentially a SQL join, between a monitoring configuration group and agent configuration templates. +type Monitoring_Agent_Configuration_Template_Group_Reference struct { + Entity + + // no documentation yet + ConfigurationTemplate *Configuration_Template `json:"configurationTemplate,omitempty" xmlrpc:"configurationTemplate,omitempty"` + + // Internal identifier of a configuration template + ConfigurationTemplateId *int `json:"configurationTemplateId,omitempty" xmlrpc:"configurationTemplateId,omitempty"` + + // Internal identifier of a configuration group reference record + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + TemplateGroup *Monitoring_Agent_Configuration_Template_Group `json:"templateGroup,omitempty" xmlrpc:"templateGroup,omitempty"` + + // Internal identifier of a monitoring agent configuration group + TemplateGroupId *int `json:"templateGroupId,omitempty" xmlrpc:"templateGroupId,omitempty"` +} + +// Monitoring agent configuration value +type Monitoring_Agent_Configuration_Value struct { + Entity + + // Internal identifier of a monitoring Agent that this configuration value + AgentId *int `json:"agentId,omitempty" xmlrpc:"agentId,omitempty"` + + // Internal identifier of a monitoring configuration definition by which + ConfigurationDefinitionId *int `json:"configurationDefinitionId,omitempty" xmlrpc:"configurationDefinitionId,omitempty"` + + // no documentation yet + Definition *Configuration_Template_Section_Definition `json:"definition,omitempty" xmlrpc:"definition,omitempty"` + + // User-friendly description of a configuration value. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a monitoring configuration value. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The metric data type used to retrieve metric data currently being tracked. + MetricDataType *Container_Metric_Data_Type `json:"metricDataType,omitempty" xmlrpc:"metricDataType,omitempty"` + + // no documentation yet + MonitoringAgent *Monitoring_Agent `json:"monitoringAgent,omitempty" xmlrpc:"monitoringAgent,omitempty"` + + // no documentation yet + Profile *Configuration_Template_Section_Profile `json:"profile,omitempty" xmlrpc:"profile,omitempty"` + + // Internal identifier of a configuration profile. Configuration profile is associated with a configuration section type of "Template section". + // + // A "Template section" defines skeleton configuration definitions. For instance, if you want to monitor additional hard disks with "CPU/Memory/Disk Monitoring Agent", you will have to add a new configuration profiles. + ProfileId *int `json:"profileId,omitempty" xmlrpc:"profileId,omitempty"` + + // Configuration value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Monitoring agent status +type Monitoring_Agent_Status struct { + Entity + + // Description of a monitoring agent status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a monitoring agent status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Monitoring agent status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Monitoring_Robot data type contains general information relating to a monitoring robot. +type Monitoring_Robot struct { + Entity + + // The account associated with the corresponding robot. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Internal identifier of a SoftLayer account that this robot belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Internal identifier of a monitoring robot + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the program (monitoring agent) that gets details of a system or application and reporting of the metric data and triggers alarms for predefined events. + MonitoringAgentCount *uint `json:"monitoringAgentCount,omitempty" xmlrpc:"monitoringAgentCount,omitempty"` + + // The program (monitoring agent) that gets details of a system or application and reporting of the metric data and triggers alarms for predefined events. + MonitoringAgents []Monitoring_Agent `json:"monitoringAgents,omitempty" xmlrpc:"monitoringAgents,omitempty"` + + // Robot name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The current status of the robot. + RobotStatus *Monitoring_Robot_Status `json:"robotStatus,omitempty" xmlrpc:"robotStatus,omitempty"` + + // The SoftLayer_Software_Component that corresponds to the robot installation on the server. + SoftwareComponent *Software_Component `json:"softwareComponent,omitempty" xmlrpc:"softwareComponent,omitempty"` + + // Internal identifier of a monitoring robot status + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// Your monitoring robot will be in "Active" status under normal circumstances. If you perform an OS reload, your robot will be in "Reclaim" status until it's reloaded on your server or virtual server. +// +// Advanced monitoring system requires "Nimsoft Monitoring (Advanced)" service running and TCP ports 48000 - 48020 to be open on your server or virtual server. Monitoring agents cannot be managed nor can the usage data be updated if these ports are closed. Your monitoring robot will be in "Limited Connectivity" status if our monitoring management system cannot communicate with your system. +// +// See [[SoftLayer_Monitoring_Robot::resetStatus|resetStatus]] service for more details. +type Monitoring_Robot_Status struct { + Entity + + // Monitoring robot status description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a monitoring robot status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Monitoring robot status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/network.go b/vendor/github.com/softlayer/softlayer-go/datatypes/network.go new file mode 100644 index 000000000000..64bcbce070a4 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/network.go @@ -0,0 +1,6136 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Network struct { + Entity + + // The owning account identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The size of the Network specified in CIDR notation. Specified in conjunction with the ``networkIdentifier`` to describe the bounding subnet size for the Network. Required for creation. See [[SoftLayer_Network/createObject]] documentation for creation details. + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // Unique identifier for the network. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A name for the Network. This is required during creation of a Network and is entirely user defined. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The starting IP address of the Network. Specified in conjunction with the ``cidr`` property to specify the bounding IP address space for the Network. Required for creation. See [[SoftLayer_Network/createObject]] documentation for creation details. + NetworkIdentifier *string `json:"networkIdentifier,omitempty" xmlrpc:"networkIdentifier,omitempty"` + + // Notes, or a description of the Network. This is entirely user defined. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of the Subnets within the Network. These represent the realized segments of the Network and reside within a [[SoftLayer_Network_Pod|Pod]]. A Subnet must be specified when provisioning a compute resource within a Network. + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // The Subnets within the Network. These represent the realized segments of the Network and reside within a [[SoftLayer_Network_Pod|Pod]]. A Subnet must be specified when provisioning a compute resource within a Network. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` +} + +// The SoftLayer_Network_Application_Delivery_Controller data type models a single instance of an application delivery controller. Local properties are read only, except for a ''notes'' property, which can be used to describe your application delivery controller service. The type's relational properties provide more information to the service's function and login information to the controller's backend management if advanced view is enabled. +type Network_Application_Delivery_Controller struct { + Entity + + // The SoftLayer customer account that owns an application delivery controller record. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The unique identifier of the SoftLayer customer account that owns an application delivery controller record + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The average daily public bandwidth usage for the current billing cycle. + AverageDailyPublicBandwidthUsage *Float64 `json:"averageDailyPublicBandwidthUsage,omitempty" xmlrpc:"averageDailyPublicBandwidthUsage,omitempty"` + + // The billing item for a Application Delivery Controller. + BillingItem *Billing_Item_Network_Application_Delivery_Controller `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Previous configurations for an Application Delivery Controller. + ConfigurationHistory []Network_Application_Delivery_Controller_Configuration_History `json:"configurationHistory,omitempty" xmlrpc:"configurationHistory,omitempty"` + + // A count of previous configurations for an Application Delivery Controller. + ConfigurationHistoryCount *uint `json:"configurationHistoryCount,omitempty" xmlrpc:"configurationHistoryCount,omitempty"` + + // The date that an application delivery controller record was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The datacenter that the application delivery controller resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // A brief description of an application delivery controller record. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // An application delivery controller's unique identifier + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date in which the license for this application delivery controller will expire. + LicenseExpirationDate *Time `json:"licenseExpirationDate,omitempty" xmlrpc:"licenseExpirationDate,omitempty"` + + // A count of the virtual IP address records that belong to an application delivery controller based load balancer. + LoadBalancerCount *uint `json:"loadBalancerCount,omitempty" xmlrpc:"loadBalancerCount,omitempty"` + + // The virtual IP address records that belong to an application delivery controller based load balancer. + LoadBalancers []Network_LoadBalancer_VirtualIpAddress `json:"loadBalancers,omitempty" xmlrpc:"loadBalancers,omitempty"` + + // A flag indicating that this Application Delivery Controller is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // An application delivery controller's management ip address. + ManagementIpAddress *string `json:"managementIpAddress,omitempty" xmlrpc:"managementIpAddress,omitempty"` + + // The date that an application delivery controller record was last modified + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // An application delivery controller's name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The network VLAN that an application delivery controller resides on. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A count of the network VLANs that an application delivery controller resides on. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // The network VLANs that an application delivery controller resides on. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // Editable notes used to describe an application delivery controller's function + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The total public outbound bandwidth for the current billing cycle. + OutboundPublicBandwidthUsage *Float64 `json:"outboundPublicBandwidthUsage,omitempty" xmlrpc:"outboundPublicBandwidthUsage,omitempty"` + + // The password used to connect to an application delivery controller's management interface when it is operating in advanced view mode. + Password *Software_Component_Password `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // An application delivery controller's primary public IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // The projected public outbound bandwidth for the current billing cycle. + ProjectedPublicBandwidthUsage *Float64 `json:"projectedPublicBandwidthUsage,omitempty" xmlrpc:"projectedPublicBandwidthUsage,omitempty"` + + // A count of a network application controller's subnets. A subnet is a group of IP addresses + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // A network application controller's subnets. A subnet is a group of IP addresses + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // no documentation yet + Type *Network_Application_Delivery_Controller_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // A count of + VirtualIpAddressCount *uint `json:"virtualIpAddressCount,omitempty" xmlrpc:"virtualIpAddressCount,omitempty"` + + // no documentation yet + VirtualIpAddresses []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"virtualIpAddresses,omitempty" xmlrpc:"virtualIpAddresses,omitempty"` +} + +// The SoftLayer_Network_Application_Delivery_Controller_Configuration_History data type models a single instance of a configuration history entry for an application delivery controller. The configuration history entries are used to support creating backups of an application delivery controller's configuration state in order to restore them later if needed. +type Network_Application_Delivery_Controller_Configuration_History struct { + Entity + + // The application delivery controller that a configuration history record belongs to. + Controller *Network_Application_Delivery_Controller `json:"controller,omitempty" xmlrpc:"controller,omitempty"` + + // The date a configuration history record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // An configuration history record's unique identifier + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Editable notes used to describe a configuration history record + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute struct { + Entity + + // no documentation yet + HealthAttributeTypeId *int `json:"healthAttributeTypeId,omitempty" xmlrpc:"healthAttributeTypeId,omitempty"` + + // no documentation yet + HealthCheck *Network_Application_Delivery_Controller_LoadBalancer_Health_Check `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // no documentation yet + HealthCheckId *int `json:"healthCheckId,omitempty" xmlrpc:"healthCheckId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Type *Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + ValueExpression *string `json:"valueExpression,omitempty" xmlrpc:"valueExpression,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Check struct { + Entity + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // no documentation yet + HealthCheckTypeId *int `json:"healthCheckTypeId,omitempty" xmlrpc:"healthCheckTypeId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of collection of scale load balancers that use this health check. + ScaleLoadBalancerCount *uint `json:"scaleLoadBalancerCount,omitempty" xmlrpc:"scaleLoadBalancerCount,omitempty"` + + // Collection of scale load balancers that use this health check. + ScaleLoadBalancers []Scale_LoadBalancer `json:"scaleLoadBalancers,omitempty" xmlrpc:"scaleLoadBalancers,omitempty"` + + // A count of + ServiceCount *uint `json:"serviceCount,omitempty" xmlrpc:"serviceCount,omitempty"` + + // no documentation yet + Services []Network_Application_Delivery_Controller_LoadBalancer_Service `json:"services,omitempty" xmlrpc:"services,omitempty"` + + // no documentation yet + Type *Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Routing_Method struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Routing_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service struct { + Entity + + // no documentation yet + Enabled *int `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // A count of + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // A count of + GroupReferenceCount *uint `json:"groupReferenceCount,omitempty" xmlrpc:"groupReferenceCount,omitempty"` + + // no documentation yet + GroupReferences []Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference `json:"groupReferences,omitempty" xmlrpc:"groupReferences,omitempty"` + + // no documentation yet + Groups []Network_Application_Delivery_Controller_LoadBalancer_Service_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // no documentation yet + HealthCheck *Network_Application_Delivery_Controller_LoadBalancer_Health_Check `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // A count of + HealthCheckCount *uint `json:"healthCheckCount,omitempty" xmlrpc:"healthCheckCount,omitempty"` + + // no documentation yet + HealthChecks []Network_Application_Delivery_Controller_LoadBalancer_Health_Check `json:"healthChecks,omitempty" xmlrpc:"healthChecks,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // no documentation yet + IpAddressId *int `json:"ipAddressId,omitempty" xmlrpc:"ipAddressId,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // no documentation yet + ServiceGroup *Network_Application_Delivery_Controller_LoadBalancer_Service_Group `json:"serviceGroup,omitempty" xmlrpc:"serviceGroup,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service_Group struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + RoutingMethod *Network_Application_Delivery_Controller_LoadBalancer_Routing_Method `json:"routingMethod,omitempty" xmlrpc:"routingMethod,omitempty"` + + // no documentation yet + RoutingMethodId *int `json:"routingMethodId,omitempty" xmlrpc:"routingMethodId,omitempty"` + + // no documentation yet + RoutingType *Network_Application_Delivery_Controller_LoadBalancer_Routing_Type `json:"routingType,omitempty" xmlrpc:"routingType,omitempty"` + + // no documentation yet + RoutingTypeId *int `json:"routingTypeId,omitempty" xmlrpc:"routingTypeId,omitempty"` + + // A count of + ServiceCount *uint `json:"serviceCount,omitempty" xmlrpc:"serviceCount,omitempty"` + + // A count of + ServiceReferenceCount *uint `json:"serviceReferenceCount,omitempty" xmlrpc:"serviceReferenceCount,omitempty"` + + // no documentation yet + ServiceReferences []Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference `json:"serviceReferences,omitempty" xmlrpc:"serviceReferences,omitempty"` + + // no documentation yet + Services []Network_Application_Delivery_Controller_LoadBalancer_Service `json:"services,omitempty" xmlrpc:"services,omitempty"` + + // The timeout value for connections from remote clients to the load balancer. Timeout values are only valid for HTTP service groups. + Timeout *int `json:"timeout,omitempty" xmlrpc:"timeout,omitempty"` + + // no documentation yet + VirtualServer *Network_Application_Delivery_Controller_LoadBalancer_VirtualServer `json:"virtualServer,omitempty" xmlrpc:"virtualServer,omitempty"` + + // A count of + VirtualServerCount *uint `json:"virtualServerCount,omitempty" xmlrpc:"virtualServerCount,omitempty"` + + // no documentation yet + VirtualServers []Network_Application_Delivery_Controller_LoadBalancer_VirtualServer `json:"virtualServers,omitempty" xmlrpc:"virtualServers,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference struct { + Entity + + // no documentation yet + Service *Network_Application_Delivery_Controller_LoadBalancer_Service `json:"service,omitempty" xmlrpc:"service,omitempty"` + + // no documentation yet + ServiceGroup *Network_Application_Delivery_Controller_LoadBalancer_Service_Group `json:"serviceGroup,omitempty" xmlrpc:"serviceGroup,omitempty"` + + // no documentation yet + ServiceGroupId *int `json:"serviceGroupId,omitempty" xmlrpc:"serviceGroupId,omitempty"` + + // no documentation yet + ServiceId *int `json:"serviceId,omitempty" xmlrpc:"serviceId,omitempty"` + + // no documentation yet + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The unique identifier of the SoftLayer customer account that owns the virtual IP address + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A virtual IP address's associated application delivery controller. + ApplicationDeliveryController *Network_Application_Delivery_Controller `json:"applicationDeliveryController,omitempty" xmlrpc:"applicationDeliveryController,omitempty"` + + // A count of a virtual IP address's associated application delivery controllers. + ApplicationDeliveryControllerCount *uint `json:"applicationDeliveryControllerCount,omitempty" xmlrpc:"applicationDeliveryControllerCount,omitempty"` + + // A virtual IP address's associated application delivery controllers. + ApplicationDeliveryControllers []Network_Application_Delivery_Controller `json:"applicationDeliveryControllers,omitempty" xmlrpc:"applicationDeliveryControllers,omitempty"` + + // The current billing item for the load balancer virtual IP. This is only valid when dedicatedFlag is false. This is an independent virtual IP, and if canceled, will only affect the associated virtual IP. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The connection limit for this virtual IP address + ConnectionLimit *int `json:"connectionLimit,omitempty" xmlrpc:"connectionLimit,omitempty"` + + // The units for the connection limit + ConnectionLimitUnits *string `json:"connectionLimitUnits,omitempty" xmlrpc:"connectionLimitUnits,omitempty"` + + // The current billing item for the load balancing device housing the virtual IP. This billing item represents a device which could contain other virtual IPs. Caution should be taken when canceling. This is only valid when dedicatedFlag is true. + DedicatedBillingItem *Billing_Item_Network_LoadBalancer `json:"dedicatedBillingItem,omitempty" xmlrpc:"dedicatedBillingItem,omitempty"` + + // A flag that determines if a VIP is dedicated or not. This is used to override the connection limit and use an unlimited value. + DedicatedFlag *bool `json:"dedicatedFlag,omitempty" xmlrpc:"dedicatedFlag,omitempty"` + + // Denotes whether the virtual IP is configured within a high availability cluster. + HighAvailabilityFlag *bool `json:"highAvailabilityFlag,omitempty" xmlrpc:"highAvailabilityFlag,omitempty"` + + // The unique identifier of the virtual IP address record + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // ID of the IP address this virtual IP utilizes + IpAddressId *int `json:"ipAddressId,omitempty" xmlrpc:"ipAddressId,omitempty"` + + // no documentation yet + LoadBalancerHardware []Hardware `json:"loadBalancerHardware,omitempty" xmlrpc:"loadBalancerHardware,omitempty"` + + // A count of + LoadBalancerHardwareCount *uint `json:"loadBalancerHardwareCount,omitempty" xmlrpc:"loadBalancerHardwareCount,omitempty"` + + // A flag indicating that the load balancer is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // User-created notes for this load balancer virtual IP address + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of the list of security ciphers enabled for this virtual IP address + SecureTransportCipherCount *uint `json:"secureTransportCipherCount,omitempty" xmlrpc:"secureTransportCipherCount,omitempty"` + + // The list of security ciphers enabled for this virtual IP address + SecureTransportCiphers []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportCipher `json:"secureTransportCiphers,omitempty" xmlrpc:"secureTransportCiphers,omitempty"` + + // A count of the list of secure transport protocols enabled for this virtual IP address + SecureTransportProtocolCount *uint `json:"secureTransportProtocolCount,omitempty" xmlrpc:"secureTransportProtocolCount,omitempty"` + + // The list of secure transport protocols enabled for this virtual IP address + SecureTransportProtocols []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportProtocol `json:"secureTransportProtocols,omitempty" xmlrpc:"secureTransportProtocols,omitempty"` + + // The SSL certificate currently associated with the VIP. + SecurityCertificate *Security_Certificate `json:"securityCertificate,omitempty" xmlrpc:"securityCertificate,omitempty"` + + // The SSL certificate currently associated with the VIP. Provides chosen certificate visibility to unprivileged users. + SecurityCertificateEntry *Security_Certificate_Entry `json:"securityCertificateEntry,omitempty" xmlrpc:"securityCertificateEntry,omitempty"` + + // The unique identifier of the Security Certificate to be utilized when SSL support is enabled. + SecurityCertificateId *int `json:"securityCertificateId,omitempty" xmlrpc:"securityCertificateId,omitempty"` + + // Determines if the VIP currently has SSL acceleration enabled + SslActiveFlag *bool `json:"sslActiveFlag,omitempty" xmlrpc:"sslActiveFlag,omitempty"` + + // Determines if the VIP is _allowed_ to utilize SSL acceleration + SslEnabledFlag *bool `json:"sslEnabledFlag,omitempty" xmlrpc:"sslEnabledFlag,omitempty"` + + // A count of + VirtualServerCount *uint `json:"virtualServerCount,omitempty" xmlrpc:"virtualServerCount,omitempty"` + + // no documentation yet + VirtualServers []Network_Application_Delivery_Controller_LoadBalancer_VirtualServer `json:"virtualServers,omitempty" xmlrpc:"virtualServers,omitempty"` +} + +// A single cipher configured for a load balancer virtual IP address instance. Instances of this class are immutable and should reflect a cipher that is configurable on a load balancer device. +type Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportCipher struct { + Entity + + // Unique identifier for the cipher instance + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Identifier for the associated encryption algorithm + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + VirtualIpAddress *Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"virtualIpAddress,omitempty" xmlrpc:"virtualIpAddress,omitempty"` + + // Identifier for the associated [[SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress (type)|virtual IP address]] instance + VirtualIpAddressId *int `json:"virtualIpAddressId,omitempty" xmlrpc:"virtualIpAddressId,omitempty"` +} + +// Links a SSL transport protocol with a virtual IP address instance. Instances of this class are immutable and should reflect a protocol that is configurable on a load balancer device. +type Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportProtocol struct { + Entity + + // Unique identifier for the protocol instance + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Identifier for the associated communication protocol + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + VirtualIpAddress *Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"virtualIpAddress,omitempty" xmlrpc:"virtualIpAddress,omitempty"` + + // Identifier for the associated [[SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress (type)|virtual IP address]] instance + VirtualIpAddressId *int `json:"virtualIpAddressId,omitempty" xmlrpc:"virtualIpAddressId,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_VirtualServer struct { + Entity + + // no documentation yet + Allocation *int `json:"allocation,omitempty" xmlrpc:"allocation,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // no documentation yet + RoutingMethod *Network_Application_Delivery_Controller_LoadBalancer_Routing_Method `json:"routingMethod,omitempty" xmlrpc:"routingMethod,omitempty"` + + // no documentation yet + RoutingMethodId *int `json:"routingMethodId,omitempty" xmlrpc:"routingMethodId,omitempty"` + + // A count of collection of scale load balancers this virtual server applies to. + ScaleLoadBalancerCount *uint `json:"scaleLoadBalancerCount,omitempty" xmlrpc:"scaleLoadBalancerCount,omitempty"` + + // Collection of scale load balancers this virtual server applies to. + ScaleLoadBalancers []Scale_LoadBalancer `json:"scaleLoadBalancers,omitempty" xmlrpc:"scaleLoadBalancers,omitempty"` + + // A count of + ServiceGroupCount *uint `json:"serviceGroupCount,omitempty" xmlrpc:"serviceGroupCount,omitempty"` + + // no documentation yet + ServiceGroups []Network_Application_Delivery_Controller_LoadBalancer_Service_Group `json:"serviceGroups,omitempty" xmlrpc:"serviceGroups,omitempty"` + + // no documentation yet + VirtualIpAddress *Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"virtualIpAddress,omitempty" xmlrpc:"virtualIpAddress,omitempty"` + + // no documentation yet + VirtualIpAddressId *int `json:"virtualIpAddressId,omitempty" xmlrpc:"virtualIpAddressId,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A SoftLayer_Network_Backbone represents a single backbone connection from SoftLayer to the public Internet, from the Internet to the SoftLayer private network, or a link that connects the private networks between SoftLayer's datacenters. The SoftLayer_Network_Backbone data type is a collection of data associated with one of those connections. +type Network_Backbone struct { + Entity + + // The numeric portion of the bandwidth capacity of a SoftLayer backbone. For instance, if a backbone is rated at "1 GigE" capacity then the capacity property of the backbone is 1. + Capacity *int `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // The unit portion of the bandwidth capacity of a SoftLayer backbone. For instance, if a backbone is rated at "10 G" capacity then the capacityUnits property of the backbone is "G". + CapacityUnits *string `json:"capacityUnits,omitempty" xmlrpc:"capacityUnits,omitempty"` + + // A backbone's status. + Health *string `json:"health,omitempty" xmlrpc:"health,omitempty"` + + // A backbone's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Which of the SoftLayer datacenters a backbone is connected to. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A backbone's name. This is usually the name of the backbone's network provider followed by a number in case SoftLayer uses more than one backbone from a provider. Backbone provider numbers start with the number one and increment from there. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A backbone's primary network component. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The internal identifier of the network component that backbone is connected to. + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` + + // Whether a SoftLayer backbone connects to the public Internet, to the private network, or connecting the private networks of SoftLayer's datacenters. Type is either the string "public", "private", or "private-interconnect". + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Network_Backbone_Location_Dependent struct { + Entity + + // no documentation yet + DependentLocation *Location `json:"dependentLocation,omitempty" xmlrpc:"dependentLocation,omitempty"` + + // no documentation yet + DependentLocationId *int `json:"dependentLocationId,omitempty" xmlrpc:"dependentLocationId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + SourceLocation *Location `json:"sourceLocation,omitempty" xmlrpc:"sourceLocation,omitempty"` + + // no documentation yet + SourceLocationId *int `json:"sourceLocationId,omitempty" xmlrpc:"sourceLocationId,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Usage data type contains specific information relating to bandwidth utilization at a specific point in time on a given network interface. +type Network_Bandwidth_Usage struct { + Entity + + // Incoming bandwidth utilization. + AmountIn *Float64 `json:"amountIn,omitempty" xmlrpc:"amountIn,omitempty"` + + // Outgoing bandwidth utilization. + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // ID of the bandwidth usage detail type for this record. + BandwidthUsageDetailTypeId *Float64 `json:"bandwidthUsageDetailTypeId,omitempty" xmlrpc:"bandwidthUsageDetailTypeId,omitempty"` + + // The tracking object this bandwidth usage record describes. + TrackingObject *Metric_Tracking_Object `json:"trackingObject,omitempty" xmlrpc:"trackingObject,omitempty"` + + // In and out bandwidth utilization for a specified time stamp. + Type *Network_Bandwidth_Version1_Usage_Detail_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Usage_Detail data type contains specific information relating to bandwidth utilization at a specific point in time on a given network interface. +type Network_Bandwidth_Usage_Detail struct { + Entity + + // The account tied to this tracking object + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Incoming bandwidth utilization. + AmountIn *Float64 `json:"amountIn,omitempty" xmlrpc:"amountIn,omitempty"` + + // Outgoing bandwidth utilization. + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // ID of the bandwidth usage detail type for this record. + BandwidthUsageDetailTypeId *Float64 `json:"bandwidthUsageDetailTypeId,omitempty" xmlrpc:"bandwidthUsageDetailTypeId,omitempty"` + + // The tracking object this bandwidth usage record describes. + TrackingObject *Metric_Tracking_Object `json:"trackingObject,omitempty" xmlrpc:"trackingObject,omitempty"` + + // In and out bandwidth utilization for a specified time stamp. + Type *Network_Bandwidth_Version1_Usage_Detail_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Allocation data type contains general information relating to a single bandwidth allocation record. +type Network_Bandwidth_Version1_Allocation struct { + Entity + + // A bandwidth allotment detail. + AllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"allotmentDetail,omitempty" xmlrpc:"allotmentDetail,omitempty"` + + // The amount of bandwidth allocated. + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // Billing item associated with this hardware allocation. + BillingItem *Billing_Item_Hardware `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Internal ID associated with this allocation. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Allotment class provides methods and data structures necessary to work with an array of hardware objects associated with a single Bandwidth Pooling. +type Network_Bandwidth_Version1_Allotment struct { + Entity + + // The account associated with this virtual rack. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The user account identifier associated with this allotment. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the bandwidth allotment detail records associated with this virtual rack. + ActiveDetailCount *uint `json:"activeDetailCount,omitempty" xmlrpc:"activeDetailCount,omitempty"` + + // The bandwidth allotment detail records associated with this virtual rack. + ActiveDetails []Network_Bandwidth_Version1_Allotment_Detail `json:"activeDetails,omitempty" xmlrpc:"activeDetails,omitempty"` + + // A count of the Application Delivery Controller contained within a virtual rack. + ApplicationDeliveryControllerCount *uint `json:"applicationDeliveryControllerCount,omitempty" xmlrpc:"applicationDeliveryControllerCount,omitempty"` + + // The Application Delivery Controller contained within a virtual rack. + ApplicationDeliveryControllers []Network_Application_Delivery_Controller `json:"applicationDeliveryControllers,omitempty" xmlrpc:"applicationDeliveryControllers,omitempty"` + + // The average daily public bandwidth usage for the current billing cycle. + AverageDailyPublicBandwidthUsage *Float64 `json:"averageDailyPublicBandwidthUsage,omitempty" xmlrpc:"averageDailyPublicBandwidthUsage,omitempty"` + + // The bandwidth allotment type of this virtual rack. + BandwidthAllotmentType *Network_Bandwidth_Version1_Allotment_Type `json:"bandwidthAllotmentType,omitempty" xmlrpc:"bandwidthAllotmentType,omitempty"` + + // An identifier marking this allotment as a virtual private rack (1) or a bandwidth pooling(2). + BandwidthAllotmentTypeId *int `json:"bandwidthAllotmentTypeId,omitempty" xmlrpc:"bandwidthAllotmentTypeId,omitempty"` + + // A count of the bare metal server instances contained within a virtual rack. + BareMetalInstanceCount *uint `json:"bareMetalInstanceCount,omitempty" xmlrpc:"bareMetalInstanceCount,omitempty"` + + // The bare metal server instances contained within a virtual rack. + BareMetalInstances []Hardware `json:"bareMetalInstances,omitempty" xmlrpc:"bareMetalInstances,omitempty"` + + // A virtual rack's raw bandwidth usage data for an account's current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of a virtual rack's raw bandwidth usage data for an account's current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // A virtual rack's raw private network bandwidth usage data for an account's current billing cycle. + BillingCyclePrivateBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A virtual rack's raw public network bandwidth usage data for an account's current billing cycle. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // The total public bandwidth used in this virtual rack for an account's current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // A virtual rack's billing item. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Creation date for an allotment. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // An object that provides commonly used bandwidth summary components for the current billing cycle. + CurrentBandwidthSummary *Metric_Tracking_Object_Bandwidth_Summary `json:"currentBandwidthSummary,omitempty" xmlrpc:"currentBandwidthSummary,omitempty"` + + // A count of the bandwidth allotment detail records associated with this virtual rack. + DetailCount *uint `json:"detailCount,omitempty" xmlrpc:"detailCount,omitempty"` + + // The bandwidth allotment detail records associated with this virtual rack. + Details []Network_Bandwidth_Version1_Allotment_Detail `json:"details,omitempty" xmlrpc:"details,omitempty"` + + // End date for an allotment. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The hardware contained within a virtual rack. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of the hardware contained within a virtual rack. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // A virtual rack's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The total public inbound bandwidth used in this virtual rack for an account's current billing cycle. + InboundPublicBandwidthUsage *Float64 `json:"inboundPublicBandwidthUsage,omitempty" xmlrpc:"inboundPublicBandwidthUsage,omitempty"` + + // The location group associated with this virtual rack. + LocationGroup *Location_Group `json:"locationGroup,omitempty" xmlrpc:"locationGroup,omitempty"` + + // Location Group Id for an allotment + LocationGroupId *int `json:"locationGroupId,omitempty" xmlrpc:"locationGroupId,omitempty"` + + // A count of the managed bare metal server instances contained within a virtual rack. + ManagedBareMetalInstanceCount *uint `json:"managedBareMetalInstanceCount,omitempty" xmlrpc:"managedBareMetalInstanceCount,omitempty"` + + // The managed bare metal server instances contained within a virtual rack. + ManagedBareMetalInstances []Hardware `json:"managedBareMetalInstances,omitempty" xmlrpc:"managedBareMetalInstances,omitempty"` + + // The managed hardware contained within a virtual rack. + ManagedHardware []Hardware `json:"managedHardware,omitempty" xmlrpc:"managedHardware,omitempty"` + + // A count of the managed hardware contained within a virtual rack. + ManagedHardwareCount *uint `json:"managedHardwareCount,omitempty" xmlrpc:"managedHardwareCount,omitempty"` + + // A count of the managed Virtual Server contained within a virtual rack. + ManagedVirtualGuestCount *uint `json:"managedVirtualGuestCount,omitempty" xmlrpc:"managedVirtualGuestCount,omitempty"` + + // The managed Virtual Server contained within a virtual rack. + ManagedVirtualGuests []Virtual_Guest `json:"managedVirtualGuests,omitempty" xmlrpc:"managedVirtualGuests,omitempty"` + + // A virtual rack's metric tracking object. This object records all periodic polled data available to this rack. + MetricTrackingObject *Metric_Tracking_Object_VirtualDedicatedRack `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // The metric tracking object id for this allotment. + MetricTrackingObjectId *int `json:"metricTrackingObjectId,omitempty" xmlrpc:"metricTrackingObjectId,omitempty"` + + // Text A virtual rack's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The total public outbound bandwidth used in this virtual rack for an account's current billing cycle. + OutboundPublicBandwidthUsage *Float64 `json:"outboundPublicBandwidthUsage,omitempty" xmlrpc:"outboundPublicBandwidthUsage,omitempty"` + + // Whether the bandwidth usage for this bandwidth pool for the current billing cycle exceeds the allocation. + OverBandwidthAllocationFlag *int `json:"overBandwidthAllocationFlag,omitempty" xmlrpc:"overBandwidthAllocationFlag,omitempty"` + + // The private network only hardware contained within a virtual rack. + PrivateNetworkOnlyHardware []Hardware `json:"privateNetworkOnlyHardware,omitempty" xmlrpc:"privateNetworkOnlyHardware,omitempty"` + + // A count of the private network only hardware contained within a virtual rack. + PrivateNetworkOnlyHardwareCount *uint `json:"privateNetworkOnlyHardwareCount,omitempty" xmlrpc:"privateNetworkOnlyHardwareCount,omitempty"` + + // Whether the bandwidth usage for this bandwidth pool for the current billing cycle is projected to exceed the allocation. + ProjectedOverBandwidthAllocationFlag *int `json:"projectedOverBandwidthAllocationFlag,omitempty" xmlrpc:"projectedOverBandwidthAllocationFlag,omitempty"` + + // The projected public outbound bandwidth for this virtual server for the current billing cycle. + ProjectedPublicBandwidthUsage *Float64 `json:"projectedPublicBandwidthUsage,omitempty" xmlrpc:"projectedPublicBandwidthUsage,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // Service Provider Id for an allotment + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // The combined allocated bandwidth for all servers in a virtual rack. + TotalBandwidthAllocated *uint `json:"totalBandwidthAllocated,omitempty" xmlrpc:"totalBandwidthAllocated,omitempty"` + + // A count of the Virtual Server contained within a virtual rack. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // The Virtual Server contained within a virtual rack. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Allotment_Detail data type contains specific information relating to a single bandwidth allotment record. +type Network_Bandwidth_Version1_Allotment_Detail struct { + Entity + + // Allocated bandwidth. + Allocation *Network_Bandwidth_Version1_Allocation `json:"allocation,omitempty" xmlrpc:"allocation,omitempty"` + + // Allocated bandwidth. + AllocationId *int `json:"allocationId,omitempty" xmlrpc:"allocationId,omitempty"` + + // The parent Bandwidth Pool. + BandwidthAllotment *Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotment,omitempty" xmlrpc:"bandwidthAllotment,omitempty"` + + // Bandwidth Pool associated with this detail. + BandwidthAllotmentId *int `json:"bandwidthAllotmentId,omitempty" xmlrpc:"bandwidthAllotmentId,omitempty"` + + // Bandwidth used. + BandwidthUsage []Network_Bandwidth_Version1_Usage `json:"bandwidthUsage,omitempty" xmlrpc:"bandwidthUsage,omitempty"` + + // A count of bandwidth used. + BandwidthUsageCount *uint `json:"bandwidthUsageCount,omitempty" xmlrpc:"bandwidthUsageCount,omitempty"` + + // Beginning this date the bandwidth allotment is active. + EffectiveDate *Time `json:"effectiveDate,omitempty" xmlrpc:"effectiveDate,omitempty"` + + // From this date the bandwidth allotment is no longer active. + EndEffectiveDate *Time `json:"endEffectiveDate,omitempty" xmlrpc:"endEffectiveDate,omitempty"` + + // Internal ID associated with this allotment detail. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Service Provider Id for an allotment + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Allotment_Type contains a description of the associated SoftLayer_Network_Bandwidth_Version1_Allotment object. +type Network_Bandwidth_Version1_Allotment_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ShortDescription *string `json:"shortDescription,omitempty" xmlrpc:"shortDescription,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Host type contains general information used to the route a server to its pod. +type Network_Bandwidth_Version1_Host struct { + Entity + + // Pod ID for this host device. + PodId *int `json:"podId,omitempty" xmlrpc:"podId,omitempty"` +} + +// All bandwidth tracking is maintained through the switch that the bandwidth is used through. All bandwidth is stored in a "pod" repository. An interface links the hardware switch with the pod repository identification number. This is only relevant to bandwidth data. It is not common to use this. +type Network_Bandwidth_Version1_Interface struct { + Entity + + // The host for an interface. This is not to be confused with a SoftLayer hardware + Host *Network_Bandwidth_Version1_Host `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // A interface's host. The host stores the pod number for the bandwidth data. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // The switch for an interface. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The network component for this interface. + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Usage data type contains general information relating to a single bandwidth usage record. +type Network_Bandwidth_Version1_Usage struct { + Entity + + // Bandwidth allotment detail for this hardware. + BandwidthAllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"bandwidthAllotmentDetail,omitempty" xmlrpc:"bandwidthAllotmentDetail,omitempty"` + + // Bandwidth usage details for this hardware. + BandwidthUsageDetail []Network_Bandwidth_Version1_Usage_Detail `json:"bandwidthUsageDetail,omitempty" xmlrpc:"bandwidthUsageDetail,omitempty"` + + // A count of bandwidth usage details for this hardware. + BandwidthUsageDetailCount *uint `json:"bandwidthUsageDetailCount,omitempty" xmlrpc:"bandwidthUsageDetailCount,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Usage_Detail data type contains specific information relating to bandwidth utilization at a specific point in time on a given network interface. +type Network_Bandwidth_Version1_Usage_Detail struct { + Entity + + // Incoming bandwidth utilization . + AmountIn *Float64 `json:"amountIn,omitempty" xmlrpc:"amountIn,omitempty"` + + // Outgoing bandwidth utilization . + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // In and out bandwidth utilization for a specified time stamp. + BandwidthUsage *Network_Bandwidth_Version1_Usage `json:"bandwidthUsage,omitempty" xmlrpc:"bandwidthUsage,omitempty"` + + // Describes this bandwidth utilization record as on the public or private network interface. + BandwidthUsageDetailType *Network_Bandwidth_Version1_Usage_Detail_Type `json:"bandwidthUsageDetailType,omitempty" xmlrpc:"bandwidthUsageDetailType,omitempty"` + + // Day and time this bandwidth utilization event was recorded. + Day *Time `json:"day,omitempty" xmlrpc:"day,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Usage_Detail data type contains specific information relating to bandwidth utilization at a specific point in time on a given network interface. +type Network_Bandwidth_Version1_Usage_Detail_Total struct { + Entity + + // The account tied to this tracking object + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Incoming bandwidth utilization. + AmountIn *Float64 `json:"amountIn,omitempty" xmlrpc:"amountIn,omitempty"` + + // Outgoing bandwidth utilization. + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // ID of the bandwidth usage detail type for this record. + BandwidthUsageDetailTypeId *Float64 `json:"bandwidthUsageDetailTypeId,omitempty" xmlrpc:"bandwidthUsageDetailTypeId,omitempty"` + + // The tracking object this bandwidth usage record describes. + TrackingObject *Metric_Tracking_Object `json:"trackingObject,omitempty" xmlrpc:"trackingObject,omitempty"` + + // In and out bandwidth utilization for a specified time stamp. + Type *Network_Bandwidth_Version1_Usage_Detail_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Usage_Detail_Type data type contains generic information relating to the types of bandwidth records available, currently just public and private. +type Network_Bandwidth_Version1_Usage_Detail_Type struct { + Entity + + // Database key associated with this bandwidth detail type. + Alias *string `json:"alias,omitempty" xmlrpc:"alias,omitempty"` +} + +// The SoftLayer_Network_CdnMarketplace_Account data type models an individual CDN account. CDN accounts contain the SoftLayer account ID of the customer, the vendor ID the account belongs to, the customer ID provided by the vendor, and a CDN account's status. +type Network_CdnMarketplace_Account struct { + Entity + + // SoftLayer account to which the CDN account belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // An associated parent billing item which is active. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` +} + +// no documentation yet +type Network_CdnMarketplace_Configuration_Behavior_Geoblocking struct { + Entity + + // no documentation yet + AccessType *string `json:"accessType,omitempty" xmlrpc:"accessType,omitempty"` + + // no documentation yet + RegionType *string `json:"regionType,omitempty" xmlrpc:"regionType,omitempty"` + + // no documentation yet + Regions []string `json:"regions,omitempty" xmlrpc:"regions,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Network_CdnMarketplace_Configuration_Behavior_Geoblocking_Type struct { + Entity + + // no documentation yet + AccessType []string `json:"accessType,omitempty" xmlrpc:"accessType,omitempty"` + + // no documentation yet + Continent []string `json:"continent,omitempty" xmlrpc:"continent,omitempty"` + + // no documentation yet + Country []string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + RegionType []string `json:"regionType,omitempty" xmlrpc:"regionType,omitempty"` +} + +// This data type models a purge event that occurs in caching server. It contains a reference to a mapping configuration, the path to execute the purge on, the status of the purge, and flag that enables saving the purge information for future use. +type Network_CdnMarketplace_Configuration_Cache_Purge struct { + Entity +} + +// This data type models a purge event that occurs repetitively and automatically in caching server after a set interval of time. A time to live instance contains a reference to a mapping configuration, the path to execute the purge on, the result of the purge, and the time interval after which the purge will be executed. +type Network_CdnMarketplace_Configuration_Cache_TimeToLive struct { + Entity + + // date record is created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Path where purge will be executed after TTL + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // Time interval after which purge will occur repeatedly + TimeToLive *int `json:"timeToLive,omitempty" xmlrpc:"timeToLive,omitempty"` +} + +// This data type represents the mapping Configuration settings for enabling CDN services. Each instance contains a reference to a CDN account, and CDN configuration properties such as a domain, an origin host and its port, a cname we generate, a cname the vendor generates, and a status. Other properties include the type of content to be cached (static or dynamic), the origin type (a host server or an object storage account), and the protocol to be used for caching. +type Network_CdnMarketplace_Configuration_Mapping struct { + Entity +} + +// no documentation yet +type Network_CdnMarketplace_Configuration_Mapping_Path struct { + Entity +} + +// This Metrics class provides methods to get CDN metrics based on account or mapping unique id. +type Network_CdnMarketplace_Metrics struct { + Entity +} + +// no documentation yet +type Network_CdnMarketplace_Utils_Response struct { + Entity + + // no documentation yet + Code *int `json:"code,omitempty" xmlrpc:"code,omitempty"` + + // no documentation yet + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` +} + +// The SoftLayer_Network_CdnMarketplace_Vendor contains information regarding
 a CDN Vendor. This class is associated with
 SoftLayer_Network_CdnMarketplace_Vendor_Attribute class. +type Network_CdnMarketplace_Vendor struct { + Entity +} + +// Every piece of hardware running in SoftLayer's datacenters connected to the public, private, or management networks (where applicable) have a corresponding network component. These network components are modeled by the SoftLayer_Network_Component data type. These data types reflect the servers' local ethernet and remote management interfaces. +type Network_Component struct { + Entity + + // Reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) command currently executing by the server's remote management card. + ActiveCommand *Hardware_Component_RemoteManagement_Command_Request `json:"activeCommand,omitempty" xmlrpc:"activeCommand,omitempty"` + + // The network component linking this object to a child device + DownlinkComponent *Network_Component `json:"downlinkComponent,omitempty" xmlrpc:"downlinkComponent,omitempty"` + + // The duplex mode of a network component. + DuplexMode *Network_Component_Duplex_Mode `json:"duplexMode,omitempty" xmlrpc:"duplexMode,omitempty"` + + // A network component's Duplex mode. + DuplexModeId *string `json:"duplexModeId,omitempty" xmlrpc:"duplexModeId,omitempty"` + + // The hardware that a network component resides in. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The internal identifier of the hardware that a network component belongs to. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + HighAvailabilityFirewallFlag *bool `json:"highAvailabilityFirewallFlag,omitempty" xmlrpc:"highAvailabilityFirewallFlag,omitempty"` + + // A network component's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware switch's interface to the bandwidth pod. + Interface *Network_Bandwidth_Version1_Interface `json:"interface,omitempty" xmlrpc:"interface,omitempty"` + + // A count of the records of all IP addresses bound to a network component. + IpAddressBindingCount *uint `json:"ipAddressBindingCount,omitempty" xmlrpc:"ipAddressBindingCount,omitempty"` + + // The records of all IP addresses bound to a network component. + IpAddressBindings []Network_Component_IpAddress `json:"ipAddressBindings,omitempty" xmlrpc:"ipAddressBindings,omitempty"` + + // A count of + IpAddressCount *uint `json:"ipAddressCount,omitempty" xmlrpc:"ipAddressCount,omitempty"` + + // no documentation yet + IpAddresses []Network_Subnet_IpAddress `json:"ipAddresses,omitempty" xmlrpc:"ipAddresses,omitempty"` + + // The IP address of an IPMI-based management network component. + IpmiIpAddress *string `json:"ipmiIpAddress,omitempty" xmlrpc:"ipmiIpAddress,omitempty"` + + // The MAC address of an IPMI-based management network component. + IpmiMacAddress *string `json:"ipmiMacAddress,omitempty" xmlrpc:"ipmiMacAddress,omitempty"` + + // Last reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) command issued to the server's remote management card. + LastCommand *Hardware_Component_RemoteManagement_Command_Request `json:"lastCommand,omitempty" xmlrpc:"lastCommand,omitempty"` + + // A network component's unique MAC address. IPMI-based management network interfaces may not have a MAC address. + MacAddress *string `json:"macAddress,omitempty" xmlrpc:"macAddress,omitempty"` + + // A network component's maximum allowed speed, measured in Mbit per second. ''maxSpeed'' is determined by the capabilities of the network interface and the port speed purchased on your SoftLayer server. + MaxSpeed *int `json:"maxSpeed,omitempty" xmlrpc:"maxSpeed,omitempty"` + + // The metric tracking object for this network component. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // The date a network component was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A network component's short name. For most servers this is the string "eth" for ethernet ports or "mgmt" for remote management ports. Use this in conjunction with the ''port'' property to identify a network component. For instance, the "eth0" interface on a server has the network component name "eth" and port 0. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The upstream network component firewall. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // A network component's associated group. + NetworkComponentGroup *Network_Component_Group `json:"networkComponentGroup,omitempty" xmlrpc:"networkComponentGroup,omitempty"` + + // All network devices in SoftLayer's network hierarchy that this device is connected to. + NetworkHardware []Hardware `json:"networkHardware,omitempty" xmlrpc:"networkHardware,omitempty"` + + // A count of all network devices in SoftLayer's network hierarchy that this device is connected to. + NetworkHardwareCount *uint `json:"networkHardwareCount,omitempty" xmlrpc:"networkHardwareCount,omitempty"` + + // The VLAN that a network component's subnet is associated with. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // The unique internal id of the network VLAN that the port belongs to. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` + + // A count of the VLANs that are trunked to this network component. + NetworkVlanTrunkCount *uint `json:"networkVlanTrunkCount,omitempty" xmlrpc:"networkVlanTrunkCount,omitempty"` + + // The VLANs that are trunked to this network component. + NetworkVlanTrunks []Network_Component_Network_Vlan_Trunk `json:"networkVlanTrunks,omitempty" xmlrpc:"networkVlanTrunks,omitempty"` + + // A network component's port number. Most hardware has more than one network interface. The port property separates these interfaces. Use this in conjunction with the ''name'' property to identify a network component. For instance, the "eth0" interface on a server has the network component name "eth" and port 0. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // A network component's primary IP address. IPMI-based management network interfaces may not have an IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // The primary IPv4 Address record for a network component. + PrimaryIpAddressRecord *Network_Subnet_IpAddress `json:"primaryIpAddressRecord,omitempty" xmlrpc:"primaryIpAddressRecord,omitempty"` + + // The subnet of the primary IP address assigned to this network component. + PrimarySubnet *Network_Subnet `json:"primarySubnet,omitempty" xmlrpc:"primarySubnet,omitempty"` + + // The primary IPv6 Address record for a network component. + PrimaryVersion6IpAddressRecord *Network_Subnet_IpAddress `json:"primaryVersion6IpAddressRecord,omitempty" xmlrpc:"primaryVersion6IpAddressRecord,omitempty"` + + // A count of the last five reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) commands issued to the server's remote management card. + RecentCommandCount *uint `json:"recentCommandCount,omitempty" xmlrpc:"recentCommandCount,omitempty"` + + // The last five reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) commands issued to the server's remote management card. + RecentCommands []Hardware_Component_RemoteManagement_Command_Request `json:"recentCommands,omitempty" xmlrpc:"recentCommands,omitempty"` + + // Indicates whether the network component is participating in a group of two or more components capable of being operationally redundant, if enabled. + RedundancyCapableFlag *bool `json:"redundancyCapableFlag,omitempty" xmlrpc:"redundancyCapableFlag,omitempty"` + + // Indicates whether the network component is participating in a group of two or more components which is actively providing link redundancy. + RedundancyEnabledFlag *bool `json:"redundancyEnabledFlag,omitempty" xmlrpc:"redundancyEnabledFlag,omitempty"` + + // A count of user(s) credentials to issue commands and/or interact with the server's remote management card. + RemoteManagementUserCount *uint `json:"remoteManagementUserCount,omitempty" xmlrpc:"remoteManagementUserCount,omitempty"` + + // User(s) credentials to issue commands and/or interact with the server's remote management card. + RemoteManagementUsers []Hardware_Component_RemoteManagement_User `json:"remoteManagementUsers,omitempty" xmlrpc:"remoteManagementUsers,omitempty"` + + // A network component's routers. + Router *Hardware `json:"router,omitempty" xmlrpc:"router,omitempty"` + + // A network component's speed, measured in Mbit per second. + Speed *int `json:"speed,omitempty" xmlrpc:"speed,omitempty"` + + // A network component's status. This can take one of four possible values: "ACTIVE", "DISABLE", "USER_OFF", or "MACWAIT". "ACTIVE" network components are enabled and in use on a servers. "DISABLE" status components have been administratively disabled by SoftLayer accounting or abuse. "USER_OFF" components have been administratively disabled by you, the user. "MACWAIT" components only exist on network components that have not been provisioned. You should never see a network interface in MACWAIT state. If you happen to see one please contact SoftLayer support. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Whether a network component's primary ip address is from a storage network subnet or not. + StorageNetworkFlag *bool `json:"storageNetworkFlag,omitempty" xmlrpc:"storageNetworkFlag,omitempty"` + + // A count of a network component's subnets. A subnet is a group of IP addresses + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // A network component's subnets. A subnet is a group of IP addresses + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // The network component linking this object to parent + UplinkComponent *Network_Component `json:"uplinkComponent,omitempty" xmlrpc:"uplinkComponent,omitempty"` + + // The duplex mode of the uplink network component linking to this object + UplinkDuplexMode *Network_Component_Duplex_Mode `json:"uplinkDuplexMode,omitempty" xmlrpc:"uplinkDuplexMode,omitempty"` +} + +// Duplex Mode allows finer grained control over networking options and settings. +type Network_Component_Duplex_Mode struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_Component_Firewall data type contains general information relating to a single SoftLayer network component firewall. This is the object which ties the running rules to a specific downstream server. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Component_Firewall struct { + Entity + + // A count of the additional subnets linked to this network component firewall, that inherit rules from the host that the context slot is attached to. + ApplyServerRuleSubnetCount *uint `json:"applyServerRuleSubnetCount,omitempty" xmlrpc:"applyServerRuleSubnetCount,omitempty"` + + // The additional subnets linked to this network component firewall, that inherit rules from the host that the context slot is attached to. + ApplyServerRuleSubnets []Network_Subnet `json:"applyServerRuleSubnets,omitempty" xmlrpc:"applyServerRuleSubnets,omitempty"` + + // The billing item for a Hardware Firewall (Dedicated). + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The network component of the guest virtual server that this network component firewall belongs to. + GuestNetworkComponent *Virtual_Guest_Network_Component `json:"guestNetworkComponent,omitempty" xmlrpc:"guestNetworkComponent,omitempty"` + + // Unique ID for the network component of the switch interface that this network component firewall is attached to. + GuestNetworkComponentId *int `json:"guestNetworkComponentId,omitempty" xmlrpc:"guestNetworkComponentId,omitempty"` + + // Unique ID for the network component firewall. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network component of the switch interface that this network component firewall belongs to. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // Unique ID for the network component of the switch interface that this network component firewall is attached to. + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` + + // The update requests made for this firewall. + NetworkFirewallUpdateRequest []Network_Firewall_Update_Request `json:"networkFirewallUpdateRequest,omitempty" xmlrpc:"networkFirewallUpdateRequest,omitempty"` + + // A count of the update requests made for this firewall. + NetworkFirewallUpdateRequestCount *uint `json:"networkFirewallUpdateRequestCount,omitempty" xmlrpc:"networkFirewallUpdateRequestCount,omitempty"` + + // A count of the currently running rule set of this network component firewall. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The currently running rule set of this network component firewall. + Rules []Network_Component_Firewall_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` + + // Current status of the network component firewall. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A count of the additional subnets linked to this network component firewall. + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // The additional subnets linked to this network component firewall. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` +} + +// A SoftLayer_Network_Component_Firewall_Rule object type represents a currently running firewall rule and contains relative information. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Component_Firewall_Rule struct { + Entity + + // The action that the rule is to take [permit or deny]. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // The destination IP address considered for determining rule application. + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + DestinationIpCidr *int `json:"destinationIpCidr,omitempty" xmlrpc:"destinationIpCidr,omitempty"` + + // The destination IP subnet mask considered for determining rule application. + DestinationIpSubnetMask *string `json:"destinationIpSubnetMask,omitempty" xmlrpc:"destinationIpSubnetMask,omitempty"` + + // The ending (upper end of range) destination port considered for determining rule application. + DestinationPortRangeEnd *int `json:"destinationPortRangeEnd,omitempty" xmlrpc:"destinationPortRangeEnd,omitempty"` + + // The starting (lower end of range) destination port considered for determining rule application. + DestinationPortRangeStart *int `json:"destinationPortRangeStart,omitempty" xmlrpc:"destinationPortRangeStart,omitempty"` + + // The rule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network component firewall that this rule belongs to. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The notes field for the rule. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The numeric value describing the order in which the rule should be applied. + OrderValue *int `json:"orderValue,omitempty" xmlrpc:"orderValue,omitempty"` + + // The protocol considered for determining rule application. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The source IP address considered for determining rule application. + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + SourceIpCidr *int `json:"sourceIpCidr,omitempty" xmlrpc:"sourceIpCidr,omitempty"` + + // The source IP subnet mask considered for determining rule application. + SourceIpSubnetMask *string `json:"sourceIpSubnetMask,omitempty" xmlrpc:"sourceIpSubnetMask,omitempty"` + + // Current status of the network component firewall. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Whether this rule is an IPv4 rule or an IPv6 rule. If + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// A SoftLayer_Network_Component_Firewall_Subnets object type represents the current linked subnets and contains relative information. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Component_Firewall_Subnets struct { + Entity + + // A boolean flag that indicates whether the subnet should receive all the rules intended for the host on this context slot. + ApplyServerRulesFlag *bool `json:"applyServerRulesFlag,omitempty" xmlrpc:"applyServerRulesFlag,omitempty"` + + // The network component firewall that write rules for this subnet. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The subnet that this link binds to the network component firewall. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // The unique identifier of the subnet being linked to the network component firewall. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` +} + +// no documentation yet +type Network_Component_Group struct { + Entity + + // no documentation yet + GroupTypeId *int `json:"groupTypeId,omitempty" xmlrpc:"groupTypeId,omitempty"` + + // A count of a network component group's associated network components. + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // A network component group's associated network components. + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` +} + +// The SoftLayer_Network_Component_IpAddress data type contains general information relating to the binding of a single network component to a single SoftLayer IP address. +type Network_Component_IpAddress struct { + Entity + + // The IP address associated with this object's network component. + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The network component associated with this object's IP address. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` +} + +// Represents the association between a Network_Component and Network_Vlan in the manner of a 'trunk'. Trunking a VLAN to a port allows that ports to receive and send packets tagged with the corresponding VLAN number. +type Network_Component_Network_Vlan_Trunk struct { + Entity + + // The network component that the VLAN is being trunked to. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The network component's identifier. + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` + + // The VLAN that is being trunked to the network component. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // The identifier of the network VLAN that is a trunk on the network component. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` +} + +// The SoftLayer_Network_Component_RemoteManagement data type contains general information relating to a single SoftLayer remote management network component. +type Network_Component_RemoteManagement struct { + Network_Component +} + +// The SoftLayer_Network_Component_Uplink_Hardware data type abstracts information related to network connections between SoftLayer hardware and SoftLayer network components. +// +// It is populated via triggers on the network_connection table (SoftLayer_Network_Connection), so you shouldn't have to delete or insert records into this table, ever. +// +// +type Network_Component_Uplink_Hardware struct { + Entity + + // A network component uplink's connected [[SoftLayer_Hardware|Hardware]]. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The [[SoftLayer_Network_Component|Network Component]] that a uplink connection belongs to.. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` +} + +// The SoftLayer_Network_ContentDelivery_Account data type models an individual CDN account. CDN accounts contain references to the SoftLayer customer account they belong to, login credentials for upload services, and a CDN account's status. Please contact SoftLayer sales to purchase or cancel a CDN account +type Network_ContentDelivery_Account struct { + Entity + + // The customer account that a CDN account belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the customer account that a CDN account belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The CDN account id that this CDN account is associated with. + AssociatedCdnAccountId *string `json:"associatedCdnAccountId,omitempty" xmlrpc:"associatedCdnAccountId,omitempty"` + + // A count of the IP addresses that are used for the content authentication service. + AuthenticationIpAddressCount *uint `json:"authenticationIpAddressCount,omitempty" xmlrpc:"authenticationIpAddressCount,omitempty"` + + // The IP addresses that are used for the content authentication service. + AuthenticationIpAddresses []Network_ContentDelivery_Authentication_Address `json:"authenticationIpAddresses,omitempty" xmlrpc:"authenticationIpAddresses,omitempty"` + + // The current billing item for a CDN account. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The name of a CDN account. + CdnAccountName *string `json:"cdnAccountName,omitempty" xmlrpc:"cdnAccountName,omitempty"` + + // A brief note on a CDN account. + CdnAccountNote *string `json:"cdnAccountNote,omitempty" xmlrpc:"cdnAccountNote,omitempty"` + + // The solution type of a CDN account. + CdnSolutionName *string `json:"cdnSolutionName,omitempty" xmlrpc:"cdnSolutionName,omitempty"` + + // The date that a CDN account was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Indicates if CDN account is dependent on other service. If set, this CDN account is limited to these services: createOriginPullMapping, deleteOriginPullRule, getOriginPullMappingInformation, getCdnUrls, purgeCache, loadContent, manageHttpCompression + DependantServiceFlag *bool `json:"dependantServiceFlag,omitempty" xmlrpc:"dependantServiceFlag,omitempty"` + + // A CDN account's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Indicates if it is a legacy CDN or not + LegacyCdnFlag *bool `json:"legacyCdnFlag,omitempty" xmlrpc:"legacyCdnFlag,omitempty"` + + // Indicates if CDN logging is enabled. + LogEnabledFlag *string `json:"logEnabledFlag,omitempty" xmlrpc:"logEnabledFlag,omitempty"` + + // Indicates if customer is allowed to access the CDN provider's management portal. + ProviderPortalAccessFlag *bool `json:"providerPortalAccessFlag,omitempty" xmlrpc:"providerPortalAccessFlag,omitempty"` + + // A CDN account's status presented in a more detailed data type. + Status *Network_ContentDelivery_Account_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The internal identifier of a CDN status + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // Indicates if the token authentication service is enabled or not. + TokenAuthenticationEnabledFlag *bool `json:"tokenAuthenticationEnabledFlag,omitempty" xmlrpc:"tokenAuthenticationEnabledFlag,omitempty"` +} + +// The SoftLayer_Network_ContentDelivery_Account_Status contains information on a CDN account. +type Network_ContentDelivery_Account_Status struct { + Entity + + // A longer description of a CDN account's status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A CDN account status' internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A CDN account status' name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_ContentDelivery_Authentication_Address data type models an individual IP address that CDN allow or deny access from. +type Network_ContentDelivery_Authentication_Address struct { + Entity + + // The type of access on an IP address. It can be "ALLOW" or "DENY" + AccessType *string `json:"accessType,omitempty" xmlrpc:"accessType,omitempty"` + + // The internal identifier of the CDN account + CdnAccountId *int `json:"cdnAccountId,omitempty" xmlrpc:"cdnAccountId,omitempty"` + + // The created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The internal identifier of an authentication IP address + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP address that you want to block or allow access to + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of an authentication IP. This helps you to keep track of IP addresses. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The priority of an authentication IP address. The smaller number, the higher in priority. Higher priority IP will be matched first. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // The internal identifier of the user who created an authentication IP record + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// The SoftLayer_Network_ContentDelivery_Authentication_Address data type models an individual IP address that CDN allow or deny access from. +type Network_ContentDelivery_Authentication_Token struct { + Entity + + // The internal identifier of a CDN account + CdnAccountId *int `json:"cdnAccountId,omitempty" xmlrpc:"cdnAccountId,omitempty"` + + // The client IP address. This is optional. + ClientIp *string `json:"clientIp,omitempty" xmlrpc:"clientIp,omitempty"` + + // The created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The customer id. You can use this optional value to tie a user id to an authentication token. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The referrer information. This is optional. + Referrer *string `json:"referrer,omitempty" xmlrpc:"referrer,omitempty"` + + // The managed token string + Token *string `json:"token,omitempty" xmlrpc:"token,omitempty"` +} + +// The SoftLayer_Network_Customer_Subnet data type contains general information relating to a single customer subnet (remote). +type Network_Customer_Subnet struct { + Entity + + // The account id a customer subnet belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A subnet's Classless Inter-Domain Routing prefix. This is a number between 0 and 32 signifying the number of bits in a subnet's netmask. These bits separate a subnet's network address from it's host addresses. It performs the same function as the ''netmask'' property, but is represented as an integer. + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // A customer subnet's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of all ip addresses associated with a subnet. + IpAddressCount *uint `json:"ipAddressCount,omitempty" xmlrpc:"ipAddressCount,omitempty"` + + // All ip addresses associated with a subnet. + IpAddresses []Network_Customer_Subnet_IpAddress `json:"ipAddresses,omitempty" xmlrpc:"ipAddresses,omitempty"` + + // A bitmask in dotted-quad format that is used to separate a subnet's network address from it's host addresses. This performs the same function as the ''cidr'' property, but is expressed in a string format. + Netmask *string `json:"netmask,omitempty" xmlrpc:"netmask,omitempty"` + + // A subnet's network identifier. This is the first IP address of a subnet. + NetworkIdentifier *string `json:"networkIdentifier,omitempty" xmlrpc:"networkIdentifier,omitempty"` + + // The total number of ip addresses in a subnet. + TotalIpAddresses *int `json:"totalIpAddresses,omitempty" xmlrpc:"totalIpAddresses,omitempty"` +} + +// The SoftLayer_Network_Customer_Subnet_IpAddress data type contains general information relating to a single Customer Subnet (Remote) IPv4 address. +type Network_Customer_Subnet_IpAddress struct { + Entity + + // Unique identifier for an ip address. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An IP address expressed in dotted quad format. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // An IP address' user defined note. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The customer subnet (remote) that the ip address belongs to. + Subnet *Network_Customer_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // The unique identifier for the customer subnet (remote) the ip address belongs to. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` + + // A count of all the address translations that are tied to an IP address. + TranslationCount *uint `json:"translationCount,omitempty" xmlrpc:"translationCount,omitempty"` + + // All the address translations that are tied to an IP address. + Translations []Network_Tunnel_Module_Context_Address_Translation `json:"translations,omitempty" xmlrpc:"translations,omitempty"` +} + +// The SoftLayer_Network_DirectLink_Location presents a structure containing attributes of a Direct Link location, and its related object SoftLayer location. +type Network_DirectLink_Location struct { + Entity + + // The Direct Link specific location owner for POP/DC facilities. Like Equinix, Pacnet, Verizon etc. + BuildingColocationOwner *string `json:"buildingColocationOwner,omitempty" xmlrpc:"buildingColocationOwner,omitempty"` + + // The unique identifier of a Direct Link location. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Specifies if The Direct Link specific location has Redundancy:secondary XCR availability. + IsRedundantXcr *bool `json:"isRedundantXcr,omitempty" xmlrpc:"isRedundantXcr,omitempty"` + + // The location of Direct Link facility. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The Direct Link specific location ie. Data Center & Network POP facility. Refer to location object Like Dallas in US, London in England etc. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The Direct Link Market location used in Direct Link Order. Like Europe, North America, Asia pacific etc. + MarketGeography *string `json:"marketGeography,omitempty" xmlrpc:"marketGeography,omitempty"` + + // The Id of Direct Link provider. + Provider *Network_DirectLink_Provider `json:"provider,omitempty" xmlrpc:"provider,omitempty"` + + // The Id of Direct Link service type. + ServiceType *Network_DirectLink_ServiceType `json:"serviceType,omitempty" xmlrpc:"serviceType,omitempty"` +} + +// The SoftLayer_Network_DirectLink_Provider presents a structure containing attributes of a Direct Link provider. +type Network_DirectLink_Provider struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_DirectLink_ServiceType presents a structure containing attributes of a Direct Link Service Type. +type Network_DirectLink_ServiceType struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_Firewall_AccessControlList data type contains general information relating to a single SoftLayer firewall access to controll list. This is the object which ties the running rules to a specific context. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_AccessControlList struct { + Entity + + // no documentation yet + Direction *string `json:"direction,omitempty" xmlrpc:"direction,omitempty"` + + // no documentation yet + FirewallContextInterfaceId *int `json:"firewallContextInterfaceId,omitempty" xmlrpc:"firewallContextInterfaceId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the update requests made for this firewall. + NetworkFirewallUpdateRequestCount *uint `json:"networkFirewallUpdateRequestCount,omitempty" xmlrpc:"networkFirewallUpdateRequestCount,omitempty"` + + // The update requests made for this firewall. + NetworkFirewallUpdateRequests []Network_Firewall_Update_Request `json:"networkFirewallUpdateRequests,omitempty" xmlrpc:"networkFirewallUpdateRequests,omitempty"` + + // no documentation yet + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A count of the currently running rule set of this context access control list firewall. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The currently running rule set of this context access control list firewall. + Rules []Network_Vlan_Firewall_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` +} + +// The SoftLayer_Network_Firewall_Interface data type contains general information relating to a single SoftLayer firewall interface. This is the object which ties the firewall context access control list to a firewall. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Interface struct { + Network_Firewall_Module_Context_Interface +} + +// no documentation yet +type Network_Firewall_Module_Context_Interface struct { + Entity + + // A count of + FirewallContextAccessControlListCount *uint `json:"firewallContextAccessControlListCount,omitempty" xmlrpc:"firewallContextAccessControlListCount,omitempty"` + + // no documentation yet + FirewallContextAccessControlLists []Network_Firewall_AccessControlList `json:"firewallContextAccessControlLists,omitempty" xmlrpc:"firewallContextAccessControlLists,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` +} + +// The SoftLayer_Network_Firewall_Template type contains general information for a SoftLayer network firewall template. +// +// Firewall templates are recommend rule sets for use with SoftLayer Hardware Firewall (Dedicated). These optimized templates are designed to balance security restriction with application availability. The templates given may be altered to provide custom network security, or may be used as-is for basic security. At least one rule set MUST be applied for the firewall to block traffic. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Template struct { + Entity + + // A Firewall template's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of the firewall rules template. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the rule set that belongs to this firewall rules template. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The rule set that belongs to this firewall rules template. + Rules []Network_Firewall_Template_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` +} + +// The SoftLayer_Network_Component_Firewall_Rule type contains general information relating to a single SoftLayer firewall template rule. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Template_Rule struct { + Entity + + // The action that this template rule is to take [permit or deny]. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // The destination IP address considered for determining rule application. + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The destination IP subnet mask considered for determining rule application. + DestinationIpSubnetMask *string `json:"destinationIpSubnetMask,omitempty" xmlrpc:"destinationIpSubnetMask,omitempty"` + + // The ending (upper end of range) destination port considered for determining rule application. + DestinationPortRangeEnd *int `json:"destinationPortRangeEnd,omitempty" xmlrpc:"destinationPortRangeEnd,omitempty"` + + // The starting (lower end of range) destination port considered for determining rule application. + DestinationPortRangeStart *int `json:"destinationPortRangeStart,omitempty" xmlrpc:"destinationPortRangeStart,omitempty"` + + // The firewall template that this rule is attached to. + FirewallTemplate *Network_Firewall_Template `json:"firewallTemplate,omitempty" xmlrpc:"firewallTemplate,omitempty"` + + // The unique identifier of the firewall template that a firewall template rule is associated with. + FirewallTemplateId *int `json:"firewallTemplateId,omitempty" xmlrpc:"firewallTemplateId,omitempty"` + + // A Firewall template rule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The notes field for the firewall template rule. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The numeric value describing the order in which the rule set should be applied. + OrderValue *int `json:"orderValue,omitempty" xmlrpc:"orderValue,omitempty"` + + // The protocol considered for determining rule application. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The source IP address considered for determining rule application. + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The source IP subnet mask considered for determining rule application. + SourceIpSubnetMask *string `json:"sourceIpSubnetMask,omitempty" xmlrpc:"sourceIpSubnetMask,omitempty"` +} + +// The SoftLayer_Network_Firewall_Update_Request data type contains information relating to a SoftLayer network firewall update request. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request struct { + Entity + + // Timestamp of when the rules from the update request were applied to the firewall. + ApplyDate *Time `json:"applyDate,omitempty" xmlrpc:"applyDate,omitempty"` + + // The user that authorized this firewall update request. + AuthorizingUser *User_Interface `json:"authorizingUser,omitempty" xmlrpc:"authorizingUser,omitempty"` + + // The unique identifier of the user that authorized the update request. + AuthorizingUserId *int `json:"authorizingUserId,omitempty" xmlrpc:"authorizingUserId,omitempty"` + + // The type of user that authorized the update request [EMP or USR]. + AuthorizingUserType *string `json:"authorizingUserType,omitempty" xmlrpc:"authorizingUserType,omitempty"` + + // Flag indicating whether the request is for a rule bypass configuration [0 or 1]. + BypassFlag *bool `json:"bypassFlag,omitempty" xmlrpc:"bypassFlag,omitempty"` + + // Timestamp of the creation of the record. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The unique identifier of the firewall access control list that the rule set is destined for. + FirewallContextAccessControlListId *int `json:"firewallContextAccessControlListId,omitempty" xmlrpc:"firewallContextAccessControlListId,omitempty"` + + // The downstream virtual server that the rule set will be applied to. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The downstream server that the rule set will be applied to. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The unique identifier of the server that the rule set is destined to protect. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The unique identifier of the firewall update request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network component firewall that the rule set will be applied to. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The unique identifier of the network component firewall that the rule set is destined for. + NetworkComponentFirewallId *int `json:"networkComponentFirewallId,omitempty" xmlrpc:"networkComponentFirewallId,omitempty"` + + // A count of the group of rules contained within the update request. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The group of rules contained within the update request. + Rules []Network_Firewall_Update_Request_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` +} + +// A SoftLayer_Ticket_Update_Customer is a single update made by a customer to a ticket. +type Network_Firewall_Update_Request_Customer struct { + Network_Firewall_Update_Request +} + +// The SoftLayer_Network_Firewall_Update_Request_Employee data type returns a user object for the SoftLayer employee that created the request. +type Network_Firewall_Update_Request_Employee struct { + Network_Firewall_Update_Request +} + +// The SoftLayer_Network_Firewall_Update_Request_Rule type contains information relating to a SoftLayer network firewall update request rule. This rule is a member of a [[SoftLayer Network Firewall Update Request]]. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request_Rule struct { + Entity + + // The action that this update request rule is to take [permit or deny]. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // The destination IP address considered for determining rule application. + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + DestinationIpCidr *int `json:"destinationIpCidr,omitempty" xmlrpc:"destinationIpCidr,omitempty"` + + // The destination IP subnet mask considered for determining rule application. + DestinationIpSubnetMask *string `json:"destinationIpSubnetMask,omitempty" xmlrpc:"destinationIpSubnetMask,omitempty"` + + // The ending (upper end of range) destination port considered for determining rule application. + DestinationPortRangeEnd *int `json:"destinationPortRangeEnd,omitempty" xmlrpc:"destinationPortRangeEnd,omitempty"` + + // The starting (lower end of range) destination port considered for determining rule application. + DestinationPortRangeStart *int `json:"destinationPortRangeStart,omitempty" xmlrpc:"destinationPortRangeStart,omitempty"` + + // The update request that this rule belongs to. + FirewallUpdateRequest *Network_Firewall_Update_Request `json:"firewallUpdateRequest,omitempty" xmlrpc:"firewallUpdateRequest,omitempty"` + + // The unique identifier of the firewall update request that a firewall update request rule is associated with. + FirewallUpdateRequestId *int `json:"firewallUpdateRequestId,omitempty" xmlrpc:"firewallUpdateRequestId,omitempty"` + + // A Firewall update request rule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The notes field for the firewall update request rule. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The numeric value describing the order in which the rule should be applied. + OrderValue *int `json:"orderValue,omitempty" xmlrpc:"orderValue,omitempty"` + + // The protocol considered for determining rule application. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The source IP address considered for determining rule application. + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + SourceIpCidr *int `json:"sourceIpCidr,omitempty" xmlrpc:"sourceIpCidr,omitempty"` + + // The source IP subnet mask considered for determining rule application. + SourceIpSubnetMask *string `json:"sourceIpSubnetMask,omitempty" xmlrpc:"sourceIpSubnetMask,omitempty"` + + // Whether this rule is an IPv4 rule or an IPv6 rule. If + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// The SoftLayer_Network_Firewall_Update_Request_Rule_Version6 type contains information relating to a SoftLayer network firewall update request rule for IPv6. This rule is a member of a [[SoftLayer Network Firewall Update Request]]. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request_Rule_Version6 struct { + Network_Firewall_Update_Request_Rule +} + +// no documentation yet +type Network_Gateway struct { + Entity + + // The account for this gateway. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the account assigned to this gateway. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The VRRP group number for this gateway. This is set internally and cannot be provided on create. + GroupNumber *int `json:"groupNumber,omitempty" xmlrpc:"groupNumber,omitempty"` + + // A gateway's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of all VLANs trunked to this gateway. + InsideVlanCount *uint `json:"insideVlanCount,omitempty" xmlrpc:"insideVlanCount,omitempty"` + + // All VLANs trunked to this gateway. + InsideVlans []Network_Gateway_Vlan `json:"insideVlans,omitempty" xmlrpc:"insideVlans,omitempty"` + + // A count of the members for this gateway. + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // The members for this gateway. + Members []Network_Gateway_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // A gateway's name. This is required on create and can be no more than 255 characters. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The firewall associated with this gateway, if any. + NetworkFirewall *Network_Vlan_Firewall `json:"networkFirewall,omitempty" xmlrpc:"networkFirewall,omitempty"` + + // Whether or not there is a firewall associated with this gateway. + NetworkFirewallFlag *bool `json:"networkFirewallFlag,omitempty" xmlrpc:"networkFirewallFlag,omitempty"` + + // A gateway's network space. Currently, only 'private' or 'both' is allowed. When this value is 'private', it is a backend gateway only. Otherwise, it is a gateway for both frontend and backend traffic. + NetworkSpace *string `json:"networkSpace,omitempty" xmlrpc:"networkSpace,omitempty"` + + // The private gateway IP address. + PrivateIpAddress *Network_Subnet_IpAddress `json:"privateIpAddress,omitempty" xmlrpc:"privateIpAddress,omitempty"` + + // The internal identifier of the private IP address for this gateway. + PrivateIpAddressId *int `json:"privateIpAddressId,omitempty" xmlrpc:"privateIpAddressId,omitempty"` + + // The private VLAN for accessing this gateway. + PrivateVlan *Network_Vlan `json:"privateVlan,omitempty" xmlrpc:"privateVlan,omitempty"` + + // The internal identifier of the private VLAN for this gateway. + PrivateVlanId *int `json:"privateVlanId,omitempty" xmlrpc:"privateVlanId,omitempty"` + + // The public gateway IP address. + PublicIpAddress *Network_Subnet_IpAddress `json:"publicIpAddress,omitempty" xmlrpc:"publicIpAddress,omitempty"` + + // The internal identifier of the public IP address for this gateway. + PublicIpAddressId *int `json:"publicIpAddressId,omitempty" xmlrpc:"publicIpAddressId,omitempty"` + + // The public gateway IPv6 address. + PublicIpv6Address *Network_Subnet_IpAddress `json:"publicIpv6Address,omitempty" xmlrpc:"publicIpv6Address,omitempty"` + + // The internal identifier of the public IPv6 address for this gateway. + PublicIpv6AddressId *int `json:"publicIpv6AddressId,omitempty" xmlrpc:"publicIpv6AddressId,omitempty"` + + // The public VLAN for accessing this gateway. + PublicVlan *Network_Vlan `json:"publicVlan,omitempty" xmlrpc:"publicVlan,omitempty"` + + // The internal identifier of the public VLAN for this gateway. This is set internally and cannot be provided on create. + PublicVlanId *int `json:"publicVlanId,omitempty" xmlrpc:"publicVlanId,omitempty"` + + // The current status of the gateway. + Status *Network_Gateway_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The current status of this gateway. This is always active unless there is a process running to change the gateway. This can not be set on creation. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// no documentation yet +type Network_Gateway_Member struct { + Entity + + // The device for this member. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The internal identifier of the hardware for this member. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // A gateway member's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The gateway this member belongs to. + NetworkGateway *Network_Gateway `json:"networkGateway,omitempty" xmlrpc:"networkGateway,omitempty"` + + // The internal identifier of the gateway this member belongs to. + NetworkGatewayId *int `json:"networkGatewayId,omitempty" xmlrpc:"networkGatewayId,omitempty"` + + // The priority for this gateway member. This is set internally and cannot be provided on create. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` +} + +// no documentation yet +type Network_Gateway_Status struct { + Entity + + // A gateway status's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A gateway status's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A gateway status's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A gateway status's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Gateway_Vlan struct { + Entity + + // If true, this VLAN is bypassed. If false, it is routed through the gateway. + BypassFlag *bool `json:"bypassFlag,omitempty" xmlrpc:"bypassFlag,omitempty"` + + // A gateway VLAN's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The gateway this VLAN is attached to. + NetworkGateway *Network_Gateway `json:"networkGateway,omitempty" xmlrpc:"networkGateway,omitempty"` + + // The internal identifier of the gateway this VLAN is attached to. + NetworkGatewayId *int `json:"networkGatewayId,omitempty" xmlrpc:"networkGatewayId,omitempty"` + + // The network VLAN record. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // The internal identifier of the network VLAN. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` +} + +// no documentation yet +type Network_Interconnect_Tenant struct { + Entity + + // Specifies ASN used for BGP. + BgpAsn *int `json:"bgpAsn,omitempty" xmlrpc:"bgpAsn,omitempty"` + + // The billing item for a network interconnect. + BillingItem *Billing_Item_Network_Interconnect `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DatacenterName *string `json:"datacenterName,omitempty" xmlrpc:"datacenterName,omitempty"` + + // no documentation yet + ErrorMessage *string `json:"errorMessage,omitempty" xmlrpc:"errorMessage,omitempty"` + + // The Direct Link connectivity to all SoftLayer data centers if globalRoutingFlag = 1 and local connectivity if globalRoutingFlag = 0. + GlobalRoutingFlag *bool `json:"globalRoutingFlag,omitempty" xmlrpc:"globalRoutingFlag,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Link speed of a Direct Link connection. + LinkSpeed *int `json:"linkSpeed,omitempty" xmlrpc:"linkSpeed,omitempty"` + + // IP address (v4 or v6) of "near" router serial interface. No check/update of IP Address table. + LocalIpAddress *string `json:"localIpAddress,omitempty" xmlrpc:"localIpAddress,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Specifies the Interconnect connection name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // This field will have the ticket id if the tenant workflow fails + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // Link speed of a Direct Link connection on Equinix Side. + PeerLinkSpeed *int `json:"peerLinkSpeed,omitempty" xmlrpc:"peerLinkSpeed,omitempty"` + + // no documentation yet + PortLabel *string `json:"portLabel,omitempty" xmlrpc:"portLabel,omitempty"` + + // Specifies redundant connection is available if 1. + RedundancyFlag *bool `json:"redundancyFlag,omitempty" xmlrpc:"redundancyFlag,omitempty"` + + // no documentation yet + RemoteIpAddress *string `json:"remoteIpAddress,omitempty" xmlrpc:"remoteIpAddress,omitempty"` + + // Service key for Interconnect connection. + ServiceKey *string `json:"serviceKey,omitempty" xmlrpc:"serviceKey,omitempty"` + + // no documentation yet + ServiceType *Network_DirectLink_ServiceType `json:"serviceType,omitempty" xmlrpc:"serviceType,omitempty"` + + // no documentation yet + ServiceTypeId *int `json:"serviceTypeId,omitempty" xmlrpc:"serviceTypeId,omitempty"` + + // The direct link connection status. IN_PROGRESS, PROVISIONING, CONNECTION_UP, CONNECTION_DOWN + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + VendorName *string `json:"vendorName,omitempty" xmlrpc:"vendorName,omitempty"` + + // no documentation yet + VlanId *int `json:"vlanId,omitempty" xmlrpc:"vlanId,omitempty"` + + // no documentation yet + ZoneName *string `json:"zoneName,omitempty" xmlrpc:"zoneName,omitempty"` +} + +// The SoftLayer_Network_LBaaS_HealthMonitor type presents a structure containing attributes of a health monitor object associated with load balancer instance. Note that the relationship between backend (pool) and health monitor is N-to-1, especially that the pools object associated with a health monitor must have the same pair of protocol and port. Example: frontend FA: http, 80 - backend BA: tcp, 3456 - healthmonitor HM_tcp3456 frontend FB: https, 443 - backend BB: tcp, 3456 - healthmonitor HM_tcp3456 In above example both backends BA and BB share the same healthmonitor HM_tcp3456 +type Network_LBaaS_HealthMonitor struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Interval *int `json:"interval,omitempty" xmlrpc:"interval,omitempty"` + + // no documentation yet + MaxRetries *int `json:"maxRetries,omitempty" xmlrpc:"maxRetries,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + MonitorType *string `json:"monitorType,omitempty" xmlrpc:"monitorType,omitempty"` + + // no documentation yet + ProvisioningStatus *string `json:"provisioningStatus,omitempty" xmlrpc:"provisioningStatus,omitempty"` + + // no documentation yet + Timeout *int `json:"timeout,omitempty" xmlrpc:"timeout,omitempty"` + + // no documentation yet + UrlPath *string `json:"urlPath,omitempty" xmlrpc:"urlPath,omitempty"` + + // no documentation yet + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// The SoftLayer_Network_LBaaS_Listener type presents a data structure for a load balancers listener, also called frontend. +type Network_LBaaS_Listener struct { + Entity + + // Limit of connections a listener can accept + ConnectionLimit *int `json:"connectionLimit,omitempty" xmlrpc:"connectionLimit,omitempty"` + + // Specifies when the listener was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultPool *Network_LBaaS_Pool `json:"defaultPool,omitempty" xmlrpc:"defaultPool,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Specifies when the listener was updated previously. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Listeners protocol, one of "TCP", "HTTP", "HTTPS". + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // Listeners protocol port number. + ProtocolPort *int `json:"protocolPort,omitempty" xmlrpc:"protocolPort,omitempty"` + + // The provisioning status of listener. + ProvisioningStatus *string `json:"provisioningStatus,omitempty" xmlrpc:"provisioningStatus,omitempty"` + + // This references to SSL/TLS certificate (optional) for a listener + TlsCertificateId *int `json:"tlsCertificateId,omitempty" xmlrpc:"tlsCertificateId,omitempty"` + + // The UUID of a listener. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// The SoftLayer_Network_LBaaS_LoadBalancer type presents a structure containing attributes of a load balancer, and its related objects including listeners, pools and members. +type Network_LBaaS_LoadBalancer struct { + Entity + + // The account this load balancer belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Address (Host name) of a load balancer. + Address *string `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // Specifies when a load balancer was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Datacenter, where load balancer is located. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // Description of a load balancer. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of health monitors for the backend members. + HealthMonitorCount *uint `json:"healthMonitorCount,omitempty" xmlrpc:"healthMonitorCount,omitempty"` + + // Health monitors for the backend members. + HealthMonitors []Network_LBaaS_HealthMonitor `json:"healthMonitors,omitempty" xmlrpc:"healthMonitors,omitempty"` + + // The unique identifier of a load balancer. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Specifies whether the load balancer is a public or internal load balancer. + IsPublic *int `json:"isPublic,omitempty" xmlrpc:"isPublic,omitempty"` + + // A count of listeners assigned to load balancer. + ListenerCount *uint `json:"listenerCount,omitempty" xmlrpc:"listenerCount,omitempty"` + + // Listeners assigned to load balancer. + Listeners []Network_LBaaS_Listener `json:"listeners,omitempty" xmlrpc:"listeners,omitempty"` + + // This references to location with type datacenter + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // A count of members assigned to load balancer. + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // Members assigned to load balancer. + Members []Network_LBaaS_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // Specifies when a load balancer was updated last. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The load balancer's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The operation status "ONLINE" or "OFFLINE" of a load balancer. + OperatingStatus *string `json:"operatingStatus,omitempty" xmlrpc:"operatingStatus,omitempty"` + + // Error message of previous API call in case of failure + PreviousErrorText *string `json:"previousErrorText,omitempty" xmlrpc:"previousErrorText,omitempty"` + + // The provisioning status of a load balancer. + ProvisioningStatus *string `json:"provisioningStatus,omitempty" xmlrpc:"provisioningStatus,omitempty"` + + // A count of list of preferred custom ciphers configured for the load balancer. + SslCipherCount *uint `json:"sslCipherCount,omitempty" xmlrpc:"sslCipherCount,omitempty"` + + // list of preferred custom ciphers configured for the load balancer. + SslCiphers []Network_LBaaS_SSLCipher `json:"sslCiphers,omitempty" xmlrpc:"sslCiphers,omitempty"` + + // Applicable for public load balancer only. It specifies whether the public IP addresses are allocated from system public IP pool (1, default) or public subnet (null | 0) from the account ordering the load balancer. For internal load balancer, useSystemPublicIpPool will be ignored, and it always defaults to 1. + UseSystemPublicIpPool *int `json:"useSystemPublicIpPool,omitempty" xmlrpc:"useSystemPublicIpPool,omitempty"` + + // The UUID of a load balancer. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// SoftLayer_Network_LBaaS_LoadBalancerHealthMonitorConfiguration specifies the check method to be used for health monitoring backend members. +type Network_LBaaS_LoadBalancerHealthMonitorConfiguration struct { + Entity + + // Backends port + BackendPort *int `json:"backendPort,omitempty" xmlrpc:"backendPort,omitempty"` + + // <
  • The metric value
  • The timestamp when the metric value was obtained
  • +type Network_LBaaS_LoadBalancerMonitoringMetricDataPoint struct { + Entity + + // Epoch Time + EpochTimestamp *int `json:"epochTimestamp,omitempty" xmlrpc:"epochTimestamp,omitempty"` + + // a value + Value *Float64 `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Network_LBaaS_LoadBalancerProtocolConfiguration specifies the protocol, port, maximum number of allowed connections and session stickiness for load balancer's front- and backend. +type Network_LBaaS_LoadBalancerProtocolConfiguration struct { + Entity + + // Backends port + BackendPort *int `json:"backendPort,omitempty" xmlrpc:"backendPort,omitempty"` + + // <
  • NUmber of members up
  • Number of members down
  • Total number of active connections
  • Throughput
  • Data processed by month
  • Connection rate
  • +type Network_LBaaS_LoadBalancerStatistics struct { + Entity + + // Number of connections seen at the + ConnectionRate *int `json:"connectionRate,omitempty" xmlrpc:"connectionRate,omitempty"` + + // Data processed by month is the total of bin and bout + DataProcessedByMonth *int `json:"dataProcessedByMonth,omitempty" xmlrpc:"dataProcessedByMonth,omitempty"` + + // Number of members in DOWN health state + NumberOfMembersDown *int `json:"numberOfMembersDown,omitempty" xmlrpc:"numberOfMembersDown,omitempty"` + + // Number of members in UP health state + NumberOfMembersUp *int `json:"numberOfMembersUp,omitempty" xmlrpc:"numberOfMembersUp,omitempty"` + + // Throughput measures the total number of bits + Throughput *Float64 `json:"throughput,omitempty" xmlrpc:"throughput,omitempty"` + + // Number of total active established connections + TotalConnections *int `json:"totalConnections,omitempty" xmlrpc:"totalConnections,omitempty"` +} + +// The SoftLayer_Network_LBaaS_Member represents the backend member for a load balancer. It can be either a virtual server or a bare metal machine. +type Network_LBaaS_Member struct { + Entity + + // The IP address of a load balancer member. + Address *string `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // Specifies when a load balancers + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Specifies when a load balancers + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The provisioning status of a load balancer member. + ProvisioningStatus *string `json:"provisioningStatus,omitempty" xmlrpc:"provisioningStatus,omitempty"` + + // The UUID of a load balancer member. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` + + // The weight of a load balancer member. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// SoftLayer_Network_LBaaS_MemberHealth is a collection member metrics retrieved from a LBaaS VSI instance. The available metrics are:
    • Name of the member
    • Status of the member up or down
    • Uuid of the member
    +type Network_LBaaS_MemberHealth struct { + Entity + + // Members status (UP/DOWN). + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Members UUID. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// The SoftLayer_Network_LBaaS_Pool type presents a structure containing attributes of a load balancer pool such as the protocol, protocol port and the load balancing algorithm used. +type Network_LBaaS_Pool struct { + Entity + + // Create date of the pool instance + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + HealthMonitor *Network_LBaaS_HealthMonitor `json:"healthMonitor,omitempty" xmlrpc:"healthMonitor,omitempty"` + + // Load balancing algorithm: "ROUNDROBIN", "WEIGHTED_RR", "LEASTCONNECTION" + LoadBalancingAlgorithm *string `json:"loadBalancingAlgorithm,omitempty" xmlrpc:"loadBalancingAlgorithm,omitempty"` + + // A count of + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // no documentation yet + Members []Network_LBaaS_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // Last updated date of the pool + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Backends protocol, supported protocols are "TCP", "HTTP" + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // Backends protocol port + ProtocolPort *int `json:"protocolPort,omitempty" xmlrpc:"protocolPort,omitempty"` + + // Provisioning status of a load balancer pool. + ProvisioningStatus *string `json:"provisioningStatus,omitempty" xmlrpc:"provisioningStatus,omitempty"` + + // no documentation yet + SessionAffinity *Network_LBaaS_SessionAffinity `json:"sessionAffinity,omitempty" xmlrpc:"sessionAffinity,omitempty"` + + // Instance uuid of the pool + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// SoftLayer_Network_LBaaS_PoolMembersHealth provides statistics of members belonging to a particular pool. +type Network_LBaaS_PoolMembersHealth struct { + Entity + + // Members statistics of the pool + MembersHealth []Network_LBaaS_MemberHealth `json:"membersHealth,omitempty" xmlrpc:"membersHealth,omitempty"` + + // Instance uuid of the pool + PoolUuid *string `json:"poolUuid,omitempty" xmlrpc:"poolUuid,omitempty"` +} + +// The SoftLayer_Network_LBaaS_SSLCipher type presents a structure that contains attributes of load balancer cipher suites. +// +// +type Network_LBaaS_SSLCipher struct { + Entity + + // Cipher identifier + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of the cipher + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Network_LBaaS_SessionAffinity represents the session affinity, aka session persistence, configuration for a load balancer backend pool. +type Network_LBaaS_SessionAffinity struct { + Entity + + // no documentation yet + Pool *Network_LBaaS_Pool `json:"pool,omitempty" xmlrpc:"pool,omitempty"` + + // Type of the session persistence + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_Global_Account data type contains the properties for a single global load balancer account. The properties you are able to edit are fallbackIp, loadBalanceTypeId, and notes. The hosts relational property can be used for creating and editing hosts that belong to the global load balancer account. The [[SoftLayer_Network_LoadBalancer_Global_Account::editObject|editObject]] method contains details on creating and edited hosts through the hosts relational property. +type Network_LoadBalancer_Global_Account struct { + Entity + + // Your SoftLayer customer account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The maximum number of hosts that a global load balancer account is allowed to have. + AllowedNumberOfHosts *int `json:"allowedNumberOfHosts,omitempty" xmlrpc:"allowedNumberOfHosts,omitempty"` + + // The average amount of connections per second used within the current billing cycle. This number is updated daily. + AverageConnectionsPerSecond *Float64 `json:"averageConnectionsPerSecond,omitempty" xmlrpc:"averageConnectionsPerSecond,omitempty"` + + // The current billing item for a Global Load Balancer account. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The amount of connections per second a global load balancer account may use within a billing cycle without being billed for an overage. + ConnectionsPerSecond *int `json:"connectionsPerSecond,omitempty" xmlrpc:"connectionsPerSecond,omitempty"` + + // The IP address that will be return to a DNS request when none of the hosts for a global load balancer account could be returned. + FallbackIp *string `json:"fallbackIp,omitempty" xmlrpc:"fallbackIp,omitempty"` + + // A count of the hosts in the load balancing pool for a global load balancer account. + HostCount *uint `json:"hostCount,omitempty" xmlrpc:"hostCount,omitempty"` + + // The hostname of a global load balancer account that is being load balanced. + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // The hosts in the load balancing pool for a global load balancer account. + Hosts []Network_LoadBalancer_Global_Host `json:"hosts,omitempty" xmlrpc:"hosts,omitempty"` + + // The unique identifier of a global load balancer account. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The load balance method of a global load balancer account + LoadBalanceType *Network_LoadBalancer_Global_Type `json:"loadBalanceType,omitempty" xmlrpc:"loadBalanceType,omitempty"` + + // The identifier of the load balance method for a global load balancer account. + LoadBalanceTypeId *int `json:"loadBalanceTypeId,omitempty" xmlrpc:"loadBalanceTypeId,omitempty"` + + // A flag indicating that the global load balancer is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // Additional customer defined information for a global load balancer account. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_Global_Host data type represents a single host that belongs to a global load balancer account's load balancing pool. +// +// The destination IP address of a host must be one that belongs to your SoftLayer customer account, or to a datacenter load balancer virtual ip that belongs to your SoftLayer customer account. The destination IP address and port of a global load balancer host is a required field and must exist during creation and can not be removed. The acceptable values for the health check type are 'none', 'http', and 'tcp'. The status property is updated in 5 minute intervals and the hits property is updated in 10 minute intervals. +// +// The order of the host is only important if you are using the 'failover' load balance method, and the weight is only important if you are using the 'weighted round robin' load balance method. +type Network_LoadBalancer_Global_Host struct { + Entity + + // The IP address of the host that will be returned by the global load balancers in response to a dns request. + DestinationIp *string `json:"destinationIp,omitempty" xmlrpc:"destinationIp,omitempty"` + + // The port of the host that will be used for health checks. + DestinationPort *int `json:"destinationPort,omitempty" xmlrpc:"destinationPort,omitempty"` + + // Whether the host is enabled or not. The value can be '0' for disabled, or '1' for enabled. + Enabled *int `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // The health check type of a host. Valid values include 'none', 'http', and 'tcp'. + HealthCheck *string `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // The number of times the host was selected by the load balance method. + Hits *Float64 `json:"hits,omitempty" xmlrpc:"hits,omitempty"` + + // The unique identifier of a global load balancer host. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The order of this host within the load balance pool. This is only significant if the load balance method is set to failover. + LoadBalanceOrder *int `json:"loadBalanceOrder,omitempty" xmlrpc:"loadBalanceOrder,omitempty"` + + // The global load balancer account a host belongs to. + LoadBalancerAccount *Network_LoadBalancer_Global_Account `json:"loadBalancerAccount,omitempty" xmlrpc:"loadBalancerAccount,omitempty"` + + // The location of a host in a datacenter.serverRoom format. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The health status of a host. The status can be either 'UP', 'DOWN', or null which could mean that the health check type is set to 'none' or an update to the ip, port, or health check type was recently done and the host is waiting for the new status. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The load balance weight of a host. The total weight of all hosts in the load balancing pool must not exceed 100. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_Global_Type data type represents a single load balance method that can be assigned to a global load balancer account. The load balance method determines how hosts in a load balancing pool are chosen by the global load balancers. +type Network_LoadBalancer_Global_Type struct { + Entity + + // The unique identifier of a load balance method. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of a load balance method. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_Service data type contains all the information relating to a specific service (destination) on a particular load balancer. +// +// Information retained on the object itself is the the source and destination of the service, routing type, weight, and whether or not the service is currently enabled. +type Network_LoadBalancer_Service struct { + Entity + + // Connection limit on this service. + ConnectionLimit *int `json:"connectionLimit,omitempty" xmlrpc:"connectionLimit,omitempty"` + + // Creation Date of this service + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The IP Address of the real server you wish to direct traffic to. Your account must own this IP + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The port on the real server to direct the traffic. This can be different than the source port. If you wish to obfuscate your HTTP traffic, you can accept requests on port 80 on the load balancer, then redirect them to port 932 on your real server. + DestinationPort *int `json:"destinationPort,omitempty" xmlrpc:"destinationPort,omitempty"` + + // A flag (either true or false) that determines if this particular service should be enabled on the load balancer. Set to false to bring the server out of rotation without losing your configuration + Enabled *bool `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // The health check type for this service. If one is supplied, the load balancer will occasionally ping your server to determine if it is still up. Servers that are down are removed from the queue and will not be used to handle requests until their status returns to "up". The value of the health check is determined directly by what option you have selected for the routing type. + // + // {| + // |- + // ! Type + // ! Valid Health Checks + // |- + // | HTTP + // | HTTP, TCP, ICMP + // |- + // | TCP + // | HTTP, TCP, ICMP + // |- + // | FTP + // | TCP, ICMP + // |- + // | DNS + // | DNS, ICMP + // |- + // | UDP + // | None + // |} + // + // + HealthCheck *string `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // The URL provided here (starting with /) is what the load balancer will request in order to perform a custom HTTP health check. You must specify either "GET /location/of/file.html" or "HEAD /location/of/file.php" + HealthCheckURL *string `json:"healthCheckURL,omitempty" xmlrpc:"healthCheckURL,omitempty"` + + // The expected response from the custom HTTP health check. If the requested page contains this response, the check succeeds. + HealthResponse *string `json:"healthResponse,omitempty" xmlrpc:"healthResponse,omitempty"` + + // Unique ID for this object, used for the getObject method, and must be set if you are editing this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Last modification date of this service + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Name of the load balancer service + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Holds whether this server is up or down. Does not affect load balancer configuration at all, just for the customer's informational purposes + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // Peak historical connections since the creation of this service. Is reset any time you make a configuration change + PeakConnections *int `json:"peakConnections,omitempty" xmlrpc:"peakConnections,omitempty"` + + // The port on the load balancer that this service maps to. This is the port for incoming traffic, it needs to be shared with other services to form a group. + SourcePort *int `json:"sourcePort,omitempty" xmlrpc:"sourcePort,omitempty"` + + // The connection type of this service. Valid values are HTTP, FTP, TCP, UDP, and DNS. The value of this variable affects available values of healthCheck, listed in that variable's description + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The load balancer that this service belongs to. + Vip *Network_LoadBalancer_VirtualIpAddress `json:"vip,omitempty" xmlrpc:"vip,omitempty"` + + // Unique ID for this object's parent. Probably not useful in the API, as this object will always be a child of a VirtualIpAddress anyway. + VipId *int `json:"vipId,omitempty" xmlrpc:"vipId,omitempty"` + + // Weight affects the choices the load balancer makes between your services. The weight of each service is expressed as a percentage of the TOTAL CONNECTION LIMIT on the virtual IP Address. All services draw from the same pool of connections, so if you expect to have 4 times as much HTTP traffic as HTTPS, your weights for the above example routes would be 40%, 40%, 10%, 10% respectively. The weights should add up to 100% If you go over 100%, an exception will be thrown. Weights must be whole numbers, no fractions or decimals are accepted. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_VirtualIpAddress data type contains all the information relating to a specific load balancer assigned to a customer account. +// +// Information retained on the object itself is the virtual IP address, load balancing method, and any notes that are related to the load balancer. There is also an array of SoftLayer_Network_LoadBalancer_Service objects, which represent the load balancer services, explained more fully in the SoftLayer_Network_LoadBalancer_Service documentation. +type Network_LoadBalancer_VirtualIpAddress struct { + Entity + + // The account that owns this load balancer. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The current billing item for the Load Balancer. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Connection limit on this VIP. Can be upgraded through the upgradeConnectionLimit() function + ConnectionLimit *int `json:"connectionLimit,omitempty" xmlrpc:"connectionLimit,omitempty"` + + // If false, this VIP and associated services may be edited via the portal or the API. If true, you must configure this VIP manually on the device. + CustomerManagedFlag *int `json:"customerManagedFlag,omitempty" xmlrpc:"customerManagedFlag,omitempty"` + + // Unique ID for this object, used for the getObject method, and must be set if you are editing this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The load balancing method that determines which server is used "next" by the load balancer. The method is stored in an abbreviated form, represented in parentheses after the full name. Methods include: Round Robin (Value "rr"): Each server is used sequentially in a circular queue Shortest Response (Value "sr"): The server with the lowest ping at the last health check gets the next request Least Connections (Value "lc"): The server with the least current connections is given the next request Persistent IP - Round Robin (Value "pi"): The same server will be returned to a request during a users session. Servers are chosen through round robin. Persistent IP - Shortest Response (Value "pi-sr"): The same server will be returned to a request during a users session. Servers are chosen through shortest response. Persistent IP - Least Connections (Value "pi-lc"): The same server will be returned to a request during a users session. Servers are chosen through least connections. Insert Cookie - Round Robin (Value "ic"): Inserts a cookie into the HTTP stream that will tie that client to a particular balanced server. Servers are chosen through round robin. Insert Cookie - Shortest Response (Value "ic-sr"): Inserts a cookie into the HTTP stream that will tie that client to a particular balanced server. Servers are chosen through shortest response. Insert Cookie - Least Connections (Value "ic-lc"): Inserts a cookie into the HTTP stream that will tie that client to a particular balanced server. Servers are chosen through least connections. + LoadBalancingMethod *string `json:"loadBalancingMethod,omitempty" xmlrpc:"loadBalancingMethod,omitempty"` + + // A human readable version of loadBalancingMethod, intended mainly for API users. + LoadBalancingMethodFullName *string `json:"loadBalancingMethodFullName,omitempty" xmlrpc:"loadBalancingMethodFullName,omitempty"` + + // A flag indicating that the load balancer is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // Date this load balancer was last modified + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the load balancer instance + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // User-created notes on this load balancer. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The unique identifier of the Security Certificate to be utilized when SSL support is enabled. + SecurityCertificateId *int `json:"securityCertificateId,omitempty" xmlrpc:"securityCertificateId,omitempty"` + + // A count of the services on this load balancer. + ServiceCount *uint `json:"serviceCount,omitempty" xmlrpc:"serviceCount,omitempty"` + + // the services on this load balancer. + Services []Network_LoadBalancer_Service `json:"services,omitempty" xmlrpc:"services,omitempty"` + + // This is the port for incoming traffic. + SourcePort *int `json:"sourcePort,omitempty" xmlrpc:"sourcePort,omitempty"` + + // The connection type of this VIP. Valid values are HTTP, FTP, TCP, UDP, and DNS. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The virtual, public-facing IP address for your load balancer. This is the address of all incoming traffic + VirtualIpAddress *string `json:"virtualIpAddress,omitempty" xmlrpc:"virtualIpAddress,omitempty"` +} + +// The Syslog class holds a single line from the Networking Firewall "Syslog" record, for firewall detected and blocked attempts on a server. +type Network_Logging_Syslog struct { + Entity + + // Timestamp for when the connection was blocked by the firewall + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The Destination IP Address of the blocked connection (your end) + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The Destination Port of the blocked connection (your end) + DestinationPort *int `json:"destinationPort,omitempty" xmlrpc:"destinationPort,omitempty"` + + // This tells you what kind of firewall event this log line is for: accept or deny. + EventType *string `json:"eventType,omitempty" xmlrpc:"eventType,omitempty"` + + // Raw syslog message for the event + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // Connection protocol used to make the call that was blocked (tcp, udp, etc) + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The Source IP Address of the call that was blocked (attacker's end) + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The Source Port where the blocked connection was established (attacker's end) + SourcePort *int `json:"sourcePort,omitempty" xmlrpc:"sourcePort,omitempty"` + + // If this is an aggregation of syslog events, this property shows the total events. + TotalEvents *int `json:"totalEvents,omitempty" xmlrpc:"totalEvents,omitempty"` +} + +// The SoftLayer_Network_Media_Transcode_Account contains information regarding a transcode account. +type Network_Media_Transcode_Account struct { + Entity + + // The SoftLayer account information + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of a SoftLayer account + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The internal identifier of a transcode account + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of transcode jobs + TranscodeJobCount *uint `json:"transcodeJobCount,omitempty" xmlrpc:"transcodeJobCount,omitempty"` + + // Transcode jobs + TranscodeJobs []Network_Media_Transcode_Job `json:"transcodeJobs,omitempty" xmlrpc:"transcodeJobs,omitempty"` +} + +// The SoftLayer_Network_Media_Transcode_Job contains information regarding a transcode job such as input file, output format, user id and so on. +type Network_Media_Transcode_Job struct { + Entity + + // The auto-deletion duration in seconds. This value determines how long the input file will be kept on the storage. + AutoDeleteDuration *int `json:"autoDeleteDuration,omitempty" xmlrpc:"autoDeleteDuration,omitempty"` + + // The size of an input file in byte + ByteIn *int `json:"byteIn,omitempty" xmlrpc:"byteIn,omitempty"` + + // The created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + History []Network_Media_Transcode_Job_History `json:"history,omitempty" xmlrpc:"history,omitempty"` + + // A count of + HistoryCount *uint `json:"historyCount,omitempty" xmlrpc:"historyCount,omitempty"` + + // The internal identifier of a transcode job + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The input file name + InputFile *string `json:"inputFile,omitempty" xmlrpc:"inputFile,omitempty"` + + // The last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of a transcode job + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The output file name + OutputFile *string `json:"outputFile,omitempty" xmlrpc:"outputFile,omitempty"` + + // The transcode service account + TranscodeAccount *Network_Media_Transcode_Account `json:"transcodeAccount,omitempty" xmlrpc:"transcodeAccount,omitempty"` + + // The internal identifier of SoftLayer account + TranscodeAccountId *int `json:"transcodeAccountId,omitempty" xmlrpc:"transcodeAccountId,omitempty"` + + // The unique id of a transcode job + TranscodeJobGuid *string `json:"transcodeJobGuid,omitempty" xmlrpc:"transcodeJobGuid,omitempty"` + + // The unique id of a pre-defined output format + TranscodePresetGuid *string `json:"transcodePresetGuid,omitempty" xmlrpc:"transcodePresetGuid,omitempty"` + + // The name of a transcode output preset + TranscodePresetName *string `json:"transcodePresetName,omitempty" xmlrpc:"transcodePresetName,omitempty"` + + // The status information of a transcode job + TranscodeStatus *Network_Media_Transcode_Job_Status `json:"transcodeStatus,omitempty" xmlrpc:"transcodeStatus,omitempty"` + + // The internal identifier of a transcode status + TranscodeStatusId *int `json:"transcodeStatusId,omitempty" xmlrpc:"transcodeStatusId,omitempty"` + + // The status of a transcode job + TranscodeStatusName *string `json:"transcodeStatusName,omitempty" xmlrpc:"transcodeStatusName,omitempty"` + + // The SoftLayer user that created the transcode job + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The internal identifier of the user who created a transcode job + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // Watermark to apply to job + Watermark *Container_Network_Media_Transcode_Job_Watermark `json:"watermark,omitempty" xmlrpc:"watermark,omitempty"` +} + +// no documentation yet +type Network_Media_Transcode_Job_History struct { + Entity + + // The creation date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The note created by system + PublicNotes *string `json:"publicNotes,omitempty" xmlrpc:"publicNotes,omitempty"` + + // The internal identifier of a transcode job + TranscodeJobId *int `json:"transcodeJobId,omitempty" xmlrpc:"transcodeJobId,omitempty"` + + // The status of a transcode job + TranscodeStatusName *string `json:"transcodeStatusName,omitempty" xmlrpc:"transcodeStatusName,omitempty"` +} + +// The SoftLayer_Network_Media_Transcode_Job_Status contains information on a transcode job status. +type Network_Media_Transcode_Job_Status struct { + Entity + + // The description of a transcode job status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of a transcode job status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery struct { + Entity + + // The SoftLayer customer account that a network message delivery account belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The billing item for a network message delivery account. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The message delivery type of a network message delivery account. + Type *Network_Message_Delivery_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // The vendor for a network message delivery account. + Vendor *Network_Message_Delivery_Vendor `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` + + // no documentation yet + VendorId *int `json:"vendorId,omitempty" xmlrpc:"vendorId,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery_Attribute struct { + Entity + + // no documentation yet + NetworkMessageDelivery *Network_Message_Delivery `json:"networkMessageDelivery,omitempty" xmlrpc:"networkMessageDelivery,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery_Email_Sendgrid struct { + Network_Message_Delivery + + // The contact e-mail address used by SendGrid. + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // A flag that determines if a SendGrid e-mail delivery account has access to send mail through the SendGrid SMTP server. + SmtpAccess *string `json:"smtpAccess,omitempty" xmlrpc:"smtpAccess,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery_Vendor struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Monitor struct { + Entity +} + +// The SoftLayer_Network_Monitor_Version1_Incident data type models a single virtual server or physical hardware network monitoring event. SoftLayer_Network_Monitor_Version1_Incidents are created when the SoftLayer monitoring system detects a service down on your hardware of virtual server. As the incident is resolved it's status changes from "SERVICE FAILURE" to "COMPLETED". +type Network_Monitor_Version1_Incident struct { + Entity + + // A network monitoring incident's status, either the string "SERVICE FAILURE" denoting an ongoing incident or "COMPLETE" meaning the incident has been resolved. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The Monitoring_Query_Host type represents a monitoring instance. It consists of a hardware ID to monitor, an IP address attached to that hardware ID, a method of monitoring, and what to do in the instance that the monitor ever fails. +type Network_Monitor_Version1_Query_Host struct { + Entity + + // The argument to be used for this monitor, if necessary. The lowest monitoring levels (like ping) ignore this setting, but higher levels like HTTP custom use it. + Arg1Value *string `json:"arg1Value,omitempty" xmlrpc:"arg1Value,omitempty"` + + // Virtual Guest Identification Number for the guest being monitored. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The hardware that is being monitored by this monitoring instance + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The ID of the hardware being monitored + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Identification Number for the host being monitored. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP address to be monitored. Must be attached to the hardware on this object + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The most recent result for this particular monitoring instance. + LastResult *Network_Monitor_Version1_Query_Result `json:"lastResult,omitempty" xmlrpc:"lastResult,omitempty"` + + // The type of monitoring query that is executed when this hardware is monitored. + QueryType *Network_Monitor_Version1_Query_Type `json:"queryType,omitempty" xmlrpc:"queryType,omitempty"` + + // The ID of the query type to use. + QueryTypeId *int `json:"queryTypeId,omitempty" xmlrpc:"queryTypeId,omitempty"` + + // The action taken when a monitor fails. + ResponseAction *Network_Monitor_Version1_Query_ResponseType `json:"responseAction,omitempty" xmlrpc:"responseAction,omitempty"` + + // The ID of the response action to take when the monitor fails + ResponseActionId *int `json:"responseActionId,omitempty" xmlrpc:"responseActionId,omitempty"` + + // The status of this monitoring instance. Anything other than "ON" means that the monitor has been disabled + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The number of 5-minute cycles to wait before the "responseAction" is taken. If set to 0, the response action will be taken immediately + WaitCycles *int `json:"waitCycles,omitempty" xmlrpc:"waitCycles,omitempty"` +} + +// The monitoring stratum type stores the maximum level of the various components of the monitoring system that a particular hardware object has access to. This object cannot be accessed by ID, and cannot be modified. The user can access this object through Hardware_Server->availableMonitoring. +// +// There are two values on this object that are important: +// # monitorLevel determines the highest level of SoftLayer_Network_Monitor_Version1_Query_Type object that can be placed in a monitoring instance on this server +// # responseLevel determines the highest level of SoftLayer_Network_Monitor_Version1_Query_ResponseType object that can be placed in a monitoring instance on this server +// +// +// Also note that the query type and response types are available through getAllQueryTypes and getAllResponseTypes, respectively. +type Network_Monitor_Version1_Query_Host_Stratum struct { + Entity + + // The hardware object that these monitoring permissions applies to. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The highest level of a monitoring query type allowed on this server + MonitorLevel *int `json:"monitorLevel,omitempty" xmlrpc:"monitorLevel,omitempty"` + + // The highest level of a monitoring response type allowed on this server + ResponseLevel *int `json:"responseLevel,omitempty" xmlrpc:"responseLevel,omitempty"` +} + +// The ResponseType type stores only an ID and a description of the response type. The only use for this object is in reference. The user chooses a response action that would be appropriate for a monitoring instance, and sets the ResponseTypeId to the SoftLayer_Network_Monitor_Version1_Query_Host->responseActionId value. +// +// The user can retrieve all available ResponseTypes with the getAllObjects method on this service. +type Network_Monitor_Version1_Query_ResponseType struct { + Entity + + // The description of the action the monitoring system will take on failure + ActionDescription *string `json:"actionDescription,omitempty" xmlrpc:"actionDescription,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The level of this response. The level the customer has access to is determined by values in SoftLayer_Network_Monitor_Version1_Query_Host_Stratum + Level *int `json:"level,omitempty" xmlrpc:"level,omitempty"` +} + +// The monitoring result object is used to show the status of the actions taken by the monitoring system. +// +// In general, only the responseStatus variable is needed, as it holds the information on the status of the service. +type Network_Monitor_Version1_Query_Result struct { + Entity + + // The timestamp of when this monitor was co + FinishTime *Time `json:"finishTime,omitempty" xmlrpc:"finishTime,omitempty"` + + // References the queryHost that this response relates to. + QueryHost *Network_Monitor_Version1_Query_Host `json:"queryHost,omitempty" xmlrpc:"queryHost,omitempty"` + + // The response status for this server. The response status meanings are: 0: Down/Critical: Server is down and/or has passed the critical response threshold (extremely long ping response, abnormal behavior, etc.) 1: Warning - Server may be recovering from a previous down state, or may have taken too long to respond 2: Up 3: Not used 4: Unknown - An unknown error has occurred. If the problem persists, contact support. 5: Unknown - An unknown error has occurred. If the problem persists, contact support. + ResponseStatus *int `json:"responseStatus,omitempty" xmlrpc:"responseStatus,omitempty"` + + // The length of time it took the server to respond + ResponseTime *Float64 `json:"responseTime,omitempty" xmlrpc:"responseTime,omitempty"` +} + +// The MonitorType type stores a name, long description, and default arguments for the monitor types. The only use for this object is in reference. The user chooses a monitoring type that would be appropriate for their server, and sets the id of the Query_Type to SoftLayer_Network_Monitor_Version1_Query_Host->queryTypeId +// +// The user can retrieve all available Query Types with the getAllObjects method on this service. +type Network_Monitor_Version1_Query_Type struct { + Entity + + // The type of parameter sent to the monitoring command. + ArgumentDescription *string `json:"argumentDescription,omitempty" xmlrpc:"argumentDescription,omitempty"` + + // Long description of the monitoring type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The level of this monitoring type. The level the customer has access to is determined by values in SoftLayer_Network_Monitor_Version1_Query_Host_Stratum + MonitorLevel *int `json:"monitorLevel,omitempty" xmlrpc:"monitorLevel,omitempty"` + + // Short name of the monitoring type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Network_Pod refers to a portion of a data center that share a Backend Customer Router (BCR) and usually a front-end counterpart known as a Frontend Customer Router (FCR). A Pod primarily denotes a logical location within the network and the physical aspects that support networks. This is in contrast to representing a specific physical location. +// +// A ``Pod`` is identified by a ``name``, which is unique. A Pod name follows the format 'dddnn.podii', where 'ddd' is a data center code, 'nn' is the data center number, 'pod' is a literal string and 'ii' is a two digit, left-zero- padded number which corresponds to a Backend Customer Router (BCR) of the desired data center. Examples: +// * dal09.pod01 = Dallas 9, Pod 1 (ie. bcr01) +// * sjc01.pod04 = San Jose 1, Pod 4 (ie. bcr04) +// * ams01.pod01 = Amsterdam 1, Pod 1 (ie. bcr01) +type Network_Pod struct { + Entity + + // Identifier for this Pod's Backend Customer Router (BCR) + BackendRouterId *int `json:"backendRouterId,omitempty" xmlrpc:"backendRouterId,omitempty"` + + // Host name of Pod's Backend Customer Router (BCR), e.g. bcr01a.dal09 + BackendRouterName *string `json:"backendRouterName,omitempty" xmlrpc:"backendRouterName,omitempty"` + + // The list of capabilities this Pod has. + Capabilities []string `json:"capabilities,omitempty" xmlrpc:"capabilities,omitempty"` + + // Long form name of the data center in which this Pod resides, e.g. Dallas 9 + DatacenterLongName *string `json:"datacenterLongName,omitempty" xmlrpc:"datacenterLongName,omitempty"` + + // Name of data center in which this Pod resides, e.g. dal09 + DatacenterName *string `json:"datacenterName,omitempty" xmlrpc:"datacenterName,omitempty"` + + // (optional) Identifier for this Pod's Frontend Customer Router (FCR) + FrontendRouterId *int `json:"frontendRouterId,omitempty" xmlrpc:"frontendRouterId,omitempty"` + + // Host name of Pod's Frontend Customer Router (FCR), e.g. fcr01a.dal09 + FrontendRouterName *string `json:"frontendRouterName,omitempty" xmlrpc:"frontendRouterName,omitempty"` + + // The unique name of the Pod. See [[SoftLayer_Network_Pod (type)]] for details of the name's construction. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Protection_Address struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + DepartmentId *int `json:"departmentId,omitempty" xmlrpc:"departmentId,omitempty"` + + // no documentation yet + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // no documentation yet + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + ManagementMethodType *string `json:"managementMethodType,omitempty" xmlrpc:"managementMethodType,omitempty"` + + // no documentation yet + ModifiedUser *User_Employee `json:"modifiedUser,omitempty" xmlrpc:"modifiedUser,omitempty"` + + // no documentation yet + PrimaryRouter *Hardware_Router `json:"primaryRouter,omitempty" xmlrpc:"primaryRouter,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // no documentation yet + SubnetIpAddress *Network_Subnet_IpAddress `json:"subnetIpAddress,omitempty" xmlrpc:"subnetIpAddress,omitempty"` + + // no documentation yet + TerminatedUser *User_Employee `json:"terminatedUser,omitempty" xmlrpc:"terminatedUser,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // A count of + TransactionCount *uint `json:"transactionCount,omitempty" xmlrpc:"transactionCount,omitempty"` + + // no documentation yet + Transactions []Provisioning_Version1_Transaction `json:"transactions,omitempty" xmlrpc:"transactions,omitempty"` + + // no documentation yet + UserDepartment *User_Employee_Department `json:"userDepartment,omitempty" xmlrpc:"userDepartment,omitempty"` + + // no documentation yet + UserRecord *User_Employee `json:"userRecord,omitempty" xmlrpc:"userRecord,omitempty"` +} + +// Regional Internet Registries are the organizations who delegate IP address blocks to other groups or organizations around the Internet. The information contained in this data type is used throughout the networking-related services in our systems. +type Network_Regional_Internet_Registry struct { + Entity + + // Unique ID of the object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The system-level name of the registry + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The friendly name of the registry + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_SecurityGroup data type contains general information for a single security group. A security group contains a set of IP filter [[SoftLayer_Network_SecurityGroup_Rule (type)|rules]] that define how to handle incoming (ingress) and outgoing (egress) traffic to both the public and private interfaces of a virtual server instance and a set of [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)|bindings]] to associate virtual guest network components with the security group. +type Network_SecurityGroup struct { + Entity + + // The account this security group belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The date a security group was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The (optional) description for a security group. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique ID for a security group. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Metadata *string `json:"metadata,omitempty" xmlrpc:"metadata,omitempty"` + + // The date a security group was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The (optional) name for a security group. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the network component bindings for this security group. + NetworkComponentBindingCount *uint `json:"networkComponentBindingCount,omitempty" xmlrpc:"networkComponentBindingCount,omitempty"` + + // The network component bindings for this security group. + NetworkComponentBindings []Virtual_Network_SecurityGroup_NetworkComponentBinding `json:"networkComponentBindings,omitempty" xmlrpc:"networkComponentBindings,omitempty"` + + // A count of the order bindings for this security group + OrderBindingCount *uint `json:"orderBindingCount,omitempty" xmlrpc:"orderBindingCount,omitempty"` + + // The order bindings for this security group + OrderBindings []Network_SecurityGroup_OrderBinding `json:"orderBindings,omitempty" xmlrpc:"orderBindings,omitempty"` + + // A count of the rules for this security group. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The rules for this security group. + Rules []Network_SecurityGroup_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` +} + +// The SoftLayer_Network_SecurityGroup_OrderBinding data type contains links between security groups and product orders. +type Network_SecurityGroup_OrderBinding struct { + Entity + + // The virtual guest associated with the binding + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The ID of the Virtual Guest associated with the security group. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The unique ID for a security group, order, binding + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The order associated with the binding + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The ID of the order associated with the security group. + OrderId *int `json:"orderId,omitempty" xmlrpc:"orderId,omitempty"` + + // The security group associated with the order + SecurityGroup *Network_SecurityGroup `json:"securityGroup,omitempty" xmlrpc:"securityGroup,omitempty"` + + // The ID of the security group that is associated with the order. + SecurityGroupId *int `json:"securityGroupId,omitempty" xmlrpc:"securityGroupId,omitempty"` +} + +// The SoftLayer_Network_SecurityGroup_Request data type contains the ID of a specific request sent to the API. This ID is used to identify specific calls to attach and detach network components, as well as add, edit, and remove security group rules. +type Network_SecurityGroup_Request struct { + Entity + + // The unique ID for a request. + RequestId *string `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` +} + +// The SoftLayer_Network_SecurityGroup_RequestRules data type contains the ID of a specific request sent to the API, as well as an associative array of the rules that were created, edited, or removed by the request. +type Network_SecurityGroup_RequestRules struct { + Network_SecurityGroup_Request + + // Whether the API call was valid or not. + Rules []Network_SecurityGroup_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` +} + +// The SoftLayer_Network_SecurityGroup_Rule data type contains general information for a single rule that belongs to a [[SoftLayer_Network_SecurityGroup|security group]]. By default, all traffic (both inbound and
 outbound) to a virtual server instance is blocked. Security group rules are permissive, and define the allowed incoming (ingress) and outgoing (egress) traffic to both the public and private interfaces of a
 virtual server instance. The order of rules within a security group does not matter and priority always falls to the least restrictive rule. +type Network_SecurityGroup_Rule struct { + Entity + + // The direction of traffic (ingress or egress). + Direction *string `json:"direction,omitempty" xmlrpc:"direction,omitempty"` + + // IPv4 or IPv6. If the remoteIp or ethertype properties are not specified, the default is IPv4. Otherwise ethertype will default based on the format of the specified remoteIp. + Ethertype *string `json:"ethertype,omitempty" xmlrpc:"ethertype,omitempty"` + + // The unique ID for a rule. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The end of the port range for allowed traffic. When the protocol is icmp, this value specifies the icmp code to permit. When icmp code is specified, icmp type is required. + PortRangeMax *int `json:"portRangeMax,omitempty" xmlrpc:"portRangeMax,omitempty"` + + // The start of the port range for allowed traffic. When the protocol is icmp, this value specifies the icmp type to permit. + PortRangeMin *int `json:"portRangeMin,omitempty" xmlrpc:"portRangeMin,omitempty"` + + // The protocol of packets (icmp, tcp, or udp). + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The remote security group allowed as part of this rule. + RemoteGroup *Network_SecurityGroup `json:"remoteGroup,omitempty" xmlrpc:"remoteGroup,omitempty"` + + // The ID of the remote security group allowed as part of the rule. This property is mutually exclusive with the remoteIp property. + RemoteGroupId *int `json:"remoteGroupId,omitempty" xmlrpc:"remoteGroupId,omitempty"` + + // CIDR or IP address for allowed connections. This property is mutually exclusive with the remoteGroupId property. + RemoteIp *string `json:"remoteIp,omitempty" xmlrpc:"remoteIp,omitempty"` + + // The security group of this rule. + SecurityGroup *Network_SecurityGroup `json:"securityGroup,omitempty" xmlrpc:"securityGroup,omitempty"` + + // The ID of the security group that owns the rule. + SecurityGroupId *int `json:"securityGroupId,omitempty" xmlrpc:"securityGroupId,omitempty"` +} + +// The SoftLayer_Network_Security_Scanner_Request data type represents a single vulnerability scan request. It provides information on when the scan was created, last updated, and the current status. The status messages are as follows: +// *Scan Pending +// *Scan Processing +// *Scan Complete +// *Scan Cancelled +// *Generating Report. +type Network_Security_Scanner_Request struct { + Entity + + // The account associated with a security scan request. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A request's associated customer account identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The date and time that the request is created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The virtual guest a security scan is run against. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // Virtual Guest Identification Number for the guest this security scanner request belongs to. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The hardware a security scan is run against. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The identifier of the hardware item a scan is run on. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Identification Number for the host this security scanner request belongs to. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // A security scan request's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP address that a scan will be performed on. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The date and time that the request was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Flag whether the requestor owns the hardware the scan was run on. This flag will return for hardware servers only, virtual servers will result in a null return even if you have a request out for them. + RequestorOwnedFlag *bool `json:"requestorOwnedFlag,omitempty" xmlrpc:"requestorOwnedFlag,omitempty"` + + // A security scan request's status. + Status *Network_Security_Scanner_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A request status identifier. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// The SoftLayer_Network_Security_Scanner_Request_Status data type represents the current status of a vulnerability scan. The status messages are as follows: +// *Scan Pending +// *Scan Processing +// *Scan Complete +// *Scan Cancelled +// *Generating Report. +// +// +// The status of a vulnerability scan will change over the course of a scan's execution. +type Network_Security_Scanner_Request_Status struct { + Entity + + // The identifier of a vulnerability scan's status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The status message of a vulnerability scan. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Many general services that SoftLayer provides are tracked on the customer portal with a quick status message. These status message provide users with a quick reference to the health of a service, whether it's up or down. These services include SoftLayer's Internet backbone connections, VPN entry points, and router networks. The SoftLayer_Network_Service_Health data type provides the relationship between these services and their health status. +type Network_Service_Health struct { + Entity + + // The date that a service's status was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A service's location. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A service's location identifier. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The date that a service's status was last changed. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The status portion of a service/status relationship. + Status *Network_Service_Health_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A service's status identifier. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// Many general services that SoftLayer provides are marked by a status message. These health messages give portal users a quick way of determining the state of a SoftLayer service. Services range from backbones to VPN endpoints and routers. Generally a health status is either "Up" or "Down". +type Network_Service_Health_Status struct { + Entity + + // The status of a SoftLayer service. This is typically "Up" or "Down". + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_Service_Resource is used to store information related to a service. It is used for determining the correct resource to connect to for a given service, like NAS, Evault, etc. +type Network_Service_Resource struct { + Entity + + // no documentation yet + ApiHost *string `json:"apiHost,omitempty" xmlrpc:"apiHost,omitempty"` + + // no documentation yet + ApiPassword *string `json:"apiPassword,omitempty" xmlrpc:"apiPassword,omitempty"` + + // no documentation yet + ApiPath *string `json:"apiPath,omitempty" xmlrpc:"apiPath,omitempty"` + + // no documentation yet + ApiPort *string `json:"apiPort,omitempty" xmlrpc:"apiPort,omitempty"` + + // no documentation yet + ApiProtocol *string `json:"apiProtocol,omitempty" xmlrpc:"apiProtocol,omitempty"` + + // no documentation yet + ApiUsername *string `json:"apiUsername,omitempty" xmlrpc:"apiUsername,omitempty"` + + // no documentation yet + ApiVersion *string `json:"apiVersion,omitempty" xmlrpc:"apiVersion,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Network_Service_Resource_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // The backend IP address for this resource + BackendIpAddress *string `json:"backendIpAddress,omitempty" xmlrpc:"backendIpAddress,omitempty"` + + // no documentation yet + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The frontend IP address for this resource + FrontendIpAddress *string `json:"frontendIpAddress,omitempty" xmlrpc:"frontendIpAddress,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name associated with this resource + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The hardware information associated with this resource. + NetworkDevice *Hardware `json:"networkDevice,omitempty" xmlrpc:"networkDevice,omitempty"` + + // no documentation yet + SshUsername *string `json:"sshUsername,omitempty" xmlrpc:"sshUsername,omitempty"` + + // The network information associated with this resource. + Type *Network_Service_Resource_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Network_Service_Resource_Attribute struct { + Entity + + // no documentation yet + AttributeType *Network_Service_Resource_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // no documentation yet + ServiceResource *Network_Service_Resource `json:"serviceResource,omitempty" xmlrpc:"serviceResource,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Network_Service_Resource_Attribute_Type struct { + Entity + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` +} + +// no documentation yet +type Network_Service_Resource_Hub struct { + Network_Service_Resource +} + +// no documentation yet +type Network_Service_Resource_Hub_Swift struct { + Network_Service_Resource_Hub +} + +// no documentation yet +type Network_Service_Resource_MonitoringHub struct { + Network_Service_Resource + + // no documentation yet + AdnServicesIp *string `json:"adnServicesIp,omitempty" xmlrpc:"adnServicesIp,omitempty"` + + // no documentation yet + HubAddress *string `json:"hubAddress,omitempty" xmlrpc:"hubAddress,omitempty"` + + // no documentation yet + HubConnectionTimeout *string `json:"hubConnectionTimeout,omitempty" xmlrpc:"hubConnectionTimeout,omitempty"` + + // no documentation yet + RobotsCount *string `json:"robotsCount,omitempty" xmlrpc:"robotsCount,omitempty"` + + // no documentation yet + RobotsMax *string `json:"robotsMax,omitempty" xmlrpc:"robotsMax,omitempty"` +} + +// no documentation yet +type Network_Service_Resource_NimsoftLandingHub struct { + Network_Service_Resource_MonitoringHub +} + +// no documentation yet +type Network_Service_Resource_Type struct { + Entity + + // A count of + ServiceResourceCount *uint `json:"serviceResourceCount,omitempty" xmlrpc:"serviceResourceCount,omitempty"` + + // no documentation yet + ServiceResources []Network_Service_Resource `json:"serviceResources,omitempty" xmlrpc:"serviceResources,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_Service_Vpn_Overrides data type contains information relating user ids to subnet ids when VPN access is manually configured. It is essentially an entry in a 'white list' of subnets a SoftLayer portal VPN user may access. +type Network_Service_Vpn_Overrides struct { + Entity + + // The internal identifier of the record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Subnet components accessible by a SoftLayer VPN portal user. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // The identifier of a subnet accessible by the SoftLayer portal VPN user. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` + + // SoftLayer VPN portal user. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The identifier of the SoftLayer portal VPN user. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// The SoftLayer_Network_Storage data type contains general information regarding a Storage product such as account id, access username and password, the Storage product type, and the server the Storage service is associated with. Currently, only EVault backup storage has an associated server. +type Network_Storage struct { + Entity + + // The account that a Storage services belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the SoftLayer customer account that a Storage account belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Other usernames and passwords associated with a Storage volume. + AccountPassword *Account_Password `json:"accountPassword,omitempty" xmlrpc:"accountPassword,omitempty"` + + // A count of the currently active transactions on a network storage volume. + ActiveTransactionCount *uint `json:"activeTransactionCount,omitempty" xmlrpc:"activeTransactionCount,omitempty"` + + // The currently active transactions on a network storage volume. + ActiveTransactions []Provisioning_Version1_Transaction `json:"activeTransactions,omitempty" xmlrpc:"activeTransactions,omitempty"` + + // The SoftLayer_Hardware objects which are allowed access to this storage volume. + AllowedHardware []Hardware `json:"allowedHardware,omitempty" xmlrpc:"allowedHardware,omitempty"` + + // A count of the SoftLayer_Hardware objects which are allowed access to this storage volume. + AllowedHardwareCount *uint `json:"allowedHardwareCount,omitempty" xmlrpc:"allowedHardwareCount,omitempty"` + + // A count of the SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. + AllowedIpAddressCount *uint `json:"allowedIpAddressCount,omitempty" xmlrpc:"allowedIpAddressCount,omitempty"` + + // The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. + AllowedIpAddresses []Network_Subnet_IpAddress `json:"allowedIpAddresses,omitempty" xmlrpc:"allowedIpAddresses,omitempty"` + + // The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. + AllowedReplicationHardware []Hardware `json:"allowedReplicationHardware,omitempty" xmlrpc:"allowedReplicationHardware,omitempty"` + + // A count of the SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. + AllowedReplicationHardwareCount *uint `json:"allowedReplicationHardwareCount,omitempty" xmlrpc:"allowedReplicationHardwareCount,omitempty"` + + // A count of the SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. + AllowedReplicationIpAddressCount *uint `json:"allowedReplicationIpAddressCount,omitempty" xmlrpc:"allowedReplicationIpAddressCount,omitempty"` + + // The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. + AllowedReplicationIpAddresses []Network_Subnet_IpAddress `json:"allowedReplicationIpAddresses,omitempty" xmlrpc:"allowedReplicationIpAddresses,omitempty"` + + // A count of the SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. + AllowedReplicationSubnetCount *uint `json:"allowedReplicationSubnetCount,omitempty" xmlrpc:"allowedReplicationSubnetCount,omitempty"` + + // The SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. + AllowedReplicationSubnets []Network_Subnet `json:"allowedReplicationSubnets,omitempty" xmlrpc:"allowedReplicationSubnets,omitempty"` + + // A count of the SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. + AllowedReplicationVirtualGuestCount *uint `json:"allowedReplicationVirtualGuestCount,omitempty" xmlrpc:"allowedReplicationVirtualGuestCount,omitempty"` + + // The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. + AllowedReplicationVirtualGuests []Virtual_Guest `json:"allowedReplicationVirtualGuests,omitempty" xmlrpc:"allowedReplicationVirtualGuests,omitempty"` + + // A count of the SoftLayer_Network_Subnet objects which are allowed access to this storage volume. + AllowedSubnetCount *uint `json:"allowedSubnetCount,omitempty" xmlrpc:"allowedSubnetCount,omitempty"` + + // The SoftLayer_Network_Subnet objects which are allowed access to this storage volume. + AllowedSubnets []Network_Subnet `json:"allowedSubnets,omitempty" xmlrpc:"allowedSubnets,omitempty"` + + // A count of the SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. + AllowedVirtualGuestCount *uint `json:"allowedVirtualGuestCount,omitempty" xmlrpc:"allowedVirtualGuestCount,omitempty"` + + // The SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. + AllowedVirtualGuests []Virtual_Guest `json:"allowedVirtualGuests,omitempty" xmlrpc:"allowedVirtualGuests,omitempty"` + + // The current billing item for a Storage volume. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // no documentation yet + BillingItemCategory *Product_Item_Category `json:"billingItemCategory,omitempty" xmlrpc:"billingItemCategory,omitempty"` + + // The amount of space used by the volume, in bytes. + BytesUsed *string `json:"bytesUsed,omitempty" xmlrpc:"bytesUsed,omitempty"` + + // A Storage account's capacity, measured in gigabytes. + CapacityGb *int `json:"capacityGb,omitempty" xmlrpc:"capacityGb,omitempty"` + + // The date a network storage volume was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The schedule id which was executed to create a snapshot. + CreationScheduleId *string `json:"creationScheduleId,omitempty" xmlrpc:"creationScheduleId,omitempty"` + + // A count of + CredentialCount *uint `json:"credentialCount,omitempty" xmlrpc:"credentialCount,omitempty"` + + // no documentation yet + Credentials []Network_Storage_Credential `json:"credentials,omitempty" xmlrpc:"credentials,omitempty"` + + // The Daily Schedule which is associated with this network storage volume. + DailySchedule *Network_Storage_Schedule `json:"dailySchedule,omitempty" xmlrpc:"dailySchedule,omitempty"` + + // A count of the events which have taken place on a network storage volume. + EventCount *uint `json:"eventCount,omitempty" xmlrpc:"eventCount,omitempty"` + + // The events which have taken place on a network storage volume. + Events []Network_Storage_Event `json:"events,omitempty" xmlrpc:"events,omitempty"` + + // Retrieves the NFS Network Mount Address Name for a given File Storage Volume. + FileNetworkMountAddress *string `json:"fileNetworkMountAddress,omitempty" xmlrpc:"fileNetworkMountAddress,omitempty"` + + // The unique identification number of the guest associated with a Storage volume. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // When applicable, the hardware associated with a Storage service. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The server that is associated with a Storage service. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + HasEncryptionAtRest *bool `json:"hasEncryptionAtRest,omitempty" xmlrpc:"hasEncryptionAtRest,omitempty"` + + // The unique identification number of the host associated with a Storage volume. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // The Hourly Schedule which is associated with this network storage volume. + HourlySchedule *Network_Storage_Schedule `json:"hourlySchedule,omitempty" xmlrpc:"hourlySchedule,omitempty"` + + // A Storage account's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The Interval Schedule which is associated with this network storage volume. + IntervalSchedule *Network_Storage_Schedule `json:"intervalSchedule,omitempty" xmlrpc:"intervalSchedule,omitempty"` + + // The maximum number of IOPs selected for this volume. + Iops *string `json:"iops,omitempty" xmlrpc:"iops,omitempty"` + + // Determines whether a volume is ready to order snapshot space, or, if snapshot space is already available, to assign a snapshot schedule, or to take a manual snapshot. + IsReadyForSnapshot *bool `json:"isReadyForSnapshot,omitempty" xmlrpc:"isReadyForSnapshot,omitempty"` + + // Determines whether a volume is ready to have Hosts authorized to access it. This does not indicate whether another operation may be blocking, please refer to this volume's volumeStatus property for details. + IsReadyToMount *bool `json:"isReadyToMount,omitempty" xmlrpc:"isReadyToMount,omitempty"` + + // A count of relationship between a container volume and iSCSI LUNs. + IscsiLunCount *uint `json:"iscsiLunCount,omitempty" xmlrpc:"iscsiLunCount,omitempty"` + + // Relationship between a container volume and iSCSI LUNs. + IscsiLuns []Network_Storage `json:"iscsiLuns,omitempty" xmlrpc:"iscsiLuns,omitempty"` + + // A count of returns the target IP addresses of an iSCSI volume. + IscsiTargetIpAddressCount *uint `json:"iscsiTargetIpAddressCount,omitempty" xmlrpc:"iscsiTargetIpAddressCount,omitempty"` + + // Returns the target IP addresses of an iSCSI volume. + IscsiTargetIpAddresses []string `json:"iscsiTargetIpAddresses,omitempty" xmlrpc:"iscsiTargetIpAddresses,omitempty"` + + // The ID of the LUN volume. + LunId *string `json:"lunId,omitempty" xmlrpc:"lunId,omitempty"` + + // A count of the manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. + ManualSnapshotCount *uint `json:"manualSnapshotCount,omitempty" xmlrpc:"manualSnapshotCount,omitempty"` + + // The manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. + ManualSnapshots []Network_Storage `json:"manualSnapshots,omitempty" xmlrpc:"manualSnapshots,omitempty"` + + // A network storage volume's metric tracking object. This object records all periodic polled data available to this volume. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // Whether or not a network storage volume may be mounted. + MountableFlag *string `json:"mountableFlag,omitempty" xmlrpc:"mountableFlag,omitempty"` + + // The current status of split or move operation as a part of volume duplication. + MoveAndSplitStatus *string `json:"moveAndSplitStatus,omitempty" xmlrpc:"moveAndSplitStatus,omitempty"` + + // A Storage account's type. Valid examples are "NAS", "LOCKBOX", "ISCSI", "EVAULT", and "HUB". + NasType *string `json:"nasType,omitempty" xmlrpc:"nasType,omitempty"` + + // Public notes related to a Storage volume. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of the subscribers that will be notified for usage amount warnings and overages. + NotificationSubscriberCount *uint `json:"notificationSubscriberCount,omitempty" xmlrpc:"notificationSubscriberCount,omitempty"` + + // The subscribers that will be notified for usage amount warnings and overages. + NotificationSubscribers []Notification_User_Subscriber `json:"notificationSubscribers,omitempty" xmlrpc:"notificationSubscribers,omitempty"` + + // The name of the snapshot that this volume was duplicated from. + OriginalSnapshotName *string `json:"originalSnapshotName,omitempty" xmlrpc:"originalSnapshotName,omitempty"` + + // The name of the volume that this volume was duplicated from. + OriginalVolumeName *string `json:"originalVolumeName,omitempty" xmlrpc:"originalVolumeName,omitempty"` + + // The size (in GB) of the volume or LUN before any size expansion, or of the volume (before any possible size expansion) from which the duplicate volume or LUN was created. + OriginalVolumeSize *string `json:"originalVolumeSize,omitempty" xmlrpc:"originalVolumeSize,omitempty"` + + // A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type. + OsType *Network_Storage_Iscsi_OS_Type `json:"osType,omitempty" xmlrpc:"osType,omitempty"` + + // A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type ID. + OsTypeId *string `json:"osTypeId,omitempty" xmlrpc:"osTypeId,omitempty"` + + // A count of the volumes or snapshots partnered with a network storage volume in a parental role. + ParentPartnershipCount *uint `json:"parentPartnershipCount,omitempty" xmlrpc:"parentPartnershipCount,omitempty"` + + // The volumes or snapshots partnered with a network storage volume in a parental role. + ParentPartnerships []Network_Storage_Partnership `json:"parentPartnerships,omitempty" xmlrpc:"parentPartnerships,omitempty"` + + // The parent volume of a volume in a complex storage relationship. + ParentVolume *Network_Storage `json:"parentVolume,omitempty" xmlrpc:"parentVolume,omitempty"` + + // A count of the volumes or snapshots partnered with a network storage volume. + PartnershipCount *uint `json:"partnershipCount,omitempty" xmlrpc:"partnershipCount,omitempty"` + + // The volumes or snapshots partnered with a network storage volume. + Partnerships []Network_Storage_Partnership `json:"partnerships,omitempty" xmlrpc:"partnerships,omitempty"` + + // The password used to access a non-EVault Storage volume. This password is used to register the EVault server agent with the vault backup system. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // A count of all permissions group(s) this volume is in. + PermissionsGroupCount *uint `json:"permissionsGroupCount,omitempty" xmlrpc:"permissionsGroupCount,omitempty"` + + // All permissions group(s) this volume is in. + PermissionsGroups []Network_Storage_Group `json:"permissionsGroups,omitempty" xmlrpc:"permissionsGroups,omitempty"` + + // The properties used to provide additional details about a network storage volume. + Properties []Network_Storage_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of the properties used to provide additional details about a network storage volume. + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // The number of IOPs provisioned for this volume. + ProvisionedIops *string `json:"provisionedIops,omitempty" xmlrpc:"provisionedIops,omitempty"` + + // A count of the iSCSI LUN volumes being replicated by this network storage volume. + ReplicatingLunCount *uint `json:"replicatingLunCount,omitempty" xmlrpc:"replicatingLunCount,omitempty"` + + // The iSCSI LUN volumes being replicated by this network storage volume. + ReplicatingLuns []Network_Storage `json:"replicatingLuns,omitempty" xmlrpc:"replicatingLuns,omitempty"` + + // The network storage volume being replicated by a volume. + ReplicatingVolume *Network_Storage `json:"replicatingVolume,omitempty" xmlrpc:"replicatingVolume,omitempty"` + + // A count of the volume replication events. + ReplicationEventCount *uint `json:"replicationEventCount,omitempty" xmlrpc:"replicationEventCount,omitempty"` + + // The volume replication events. + ReplicationEvents []Network_Storage_Event `json:"replicationEvents,omitempty" xmlrpc:"replicationEvents,omitempty"` + + // A count of the network storage volumes configured to be replicants of a volume. + ReplicationPartnerCount *uint `json:"replicationPartnerCount,omitempty" xmlrpc:"replicationPartnerCount,omitempty"` + + // The network storage volumes configured to be replicants of a volume. + ReplicationPartners []Network_Storage `json:"replicationPartners,omitempty" xmlrpc:"replicationPartners,omitempty"` + + // The Replication Schedule associated with a network storage volume. + ReplicationSchedule *Network_Storage_Schedule `json:"replicationSchedule,omitempty" xmlrpc:"replicationSchedule,omitempty"` + + // The current replication status of a network storage volume. Indicates Failover or Failback status. + ReplicationStatus *string `json:"replicationStatus,omitempty" xmlrpc:"replicationStatus,omitempty"` + + // A count of the schedules which are associated with a network storage volume. + ScheduleCount *uint `json:"scheduleCount,omitempty" xmlrpc:"scheduleCount,omitempty"` + + // The schedules which are associated with a network storage volume. + Schedules []Network_Storage_Schedule `json:"schedules,omitempty" xmlrpc:"schedules,omitempty"` + + // Service Provider ID + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // The network resource a Storage service is connected to. + ServiceResource *Network_Service_Resource `json:"serviceResource,omitempty" xmlrpc:"serviceResource,omitempty"` + + // The IP address of a Storage resource. + ServiceResourceBackendIpAddress *string `json:"serviceResourceBackendIpAddress,omitempty" xmlrpc:"serviceResourceBackendIpAddress,omitempty"` + + // The name of a Storage's network resource. + ServiceResourceName *string `json:"serviceResourceName,omitempty" xmlrpc:"serviceResourceName,omitempty"` + + // A volume's configured snapshot space size. + SnapshotCapacityGb *string `json:"snapshotCapacityGb,omitempty" xmlrpc:"snapshotCapacityGb,omitempty"` + + // A count of the snapshots associated with this SoftLayer_Network_Storage volume. + SnapshotCount *uint `json:"snapshotCount,omitempty" xmlrpc:"snapshotCount,omitempty"` + + // The creation timestamp of the snapshot on the storage platform. + SnapshotCreationTimestamp *string `json:"snapshotCreationTimestamp,omitempty" xmlrpc:"snapshotCreationTimestamp,omitempty"` + + // The percentage of used snapshot space after which to delete automated snapshots. + SnapshotDeletionThresholdPercentage *string `json:"snapshotDeletionThresholdPercentage,omitempty" xmlrpc:"snapshotDeletionThresholdPercentage,omitempty"` + + // The snapshot size in bytes. + SnapshotSizeBytes *string `json:"snapshotSizeBytes,omitempty" xmlrpc:"snapshotSizeBytes,omitempty"` + + // A volume's available snapshot reservation space. + SnapshotSpaceAvailable *string `json:"snapshotSpaceAvailable,omitempty" xmlrpc:"snapshotSpaceAvailable,omitempty"` + + // The snapshots associated with this SoftLayer_Network_Storage volume. + Snapshots []Network_Storage `json:"snapshots,omitempty" xmlrpc:"snapshots,omitempty"` + + // no documentation yet + StaasVersion *string `json:"staasVersion,omitempty" xmlrpc:"staasVersion,omitempty"` + + // A count of the network storage groups this volume is attached to. + StorageGroupCount *uint `json:"storageGroupCount,omitempty" xmlrpc:"storageGroupCount,omitempty"` + + // The network storage groups this volume is attached to. + StorageGroups []Network_Storage_Group `json:"storageGroups,omitempty" xmlrpc:"storageGroups,omitempty"` + + // no documentation yet + StorageTierLevel *string `json:"storageTierLevel,omitempty" xmlrpc:"storageTierLevel,omitempty"` + + // A description of the Storage object. + StorageType *Network_Storage_Type `json:"storageType,omitempty" xmlrpc:"storageType,omitempty"` + + // A storage object's type. + StorageTypeId *string `json:"storageTypeId,omitempty" xmlrpc:"storageTypeId,omitempty"` + + // The amount of space used by the volume. + TotalBytesUsed *string `json:"totalBytesUsed,omitempty" xmlrpc:"totalBytesUsed,omitempty"` + + // The total snapshot retention count of all schedules on this network storage volume. + TotalScheduleSnapshotRetentionCount *uint `json:"totalScheduleSnapshotRetentionCount,omitempty" xmlrpc:"totalScheduleSnapshotRetentionCount,omitempty"` + + // This flag indicates whether this storage type is upgradable or not. + UpgradableFlag *bool `json:"upgradableFlag,omitempty" xmlrpc:"upgradableFlag,omitempty"` + + // The usage notification for SL Storage services. + UsageNotification *Notification `json:"usageNotification,omitempty" xmlrpc:"usageNotification,omitempty"` + + // The username used to access a non-EVault Storage volume. This username is used to register the EVault server agent with the vault backup system. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // The type of network storage service. + VendorName *string `json:"vendorName,omitempty" xmlrpc:"vendorName,omitempty"` + + // When applicable, the virtual guest associated with a Storage service. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // The username and password history for a Storage service. + VolumeHistory []Network_Storage_History `json:"volumeHistory,omitempty" xmlrpc:"volumeHistory,omitempty"` + + // A count of the username and password history for a Storage service. + VolumeHistoryCount *uint `json:"volumeHistoryCount,omitempty" xmlrpc:"volumeHistoryCount,omitempty"` + + // The current status of a network storage volume. + VolumeStatus *string `json:"volumeStatus,omitempty" xmlrpc:"volumeStatus,omitempty"` + + // The account username and password for the EVault webCC interface. + WebccAccount *Account_Password `json:"webccAccount,omitempty" xmlrpc:"webccAccount,omitempty"` + + // The Weekly Schedule which is associated with this network storage volume. + WeeklySchedule *Network_Storage_Schedule `json:"weeklySchedule,omitempty" xmlrpc:"weeklySchedule,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host struct { + Entity + + // The account to which this allowed host belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. + AssignedGroupCount *uint `json:"assignedGroupCount,omitempty" xmlrpc:"assignedGroupCount,omitempty"` + + // The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. + AssignedGroups []Network_Storage_Group `json:"assignedGroups,omitempty" xmlrpc:"assignedGroups,omitempty"` + + // A count of the SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. + AssignedIscsiVolumeCount *uint `json:"assignedIscsiVolumeCount,omitempty" xmlrpc:"assignedIscsiVolumeCount,omitempty"` + + // The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. + AssignedIscsiVolumes []Network_Storage `json:"assignedIscsiVolumes,omitempty" xmlrpc:"assignedIscsiVolumes,omitempty"` + + // A count of the SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. + AssignedNfsVolumeCount *uint `json:"assignedNfsVolumeCount,omitempty" xmlrpc:"assignedNfsVolumeCount,omitempty"` + + // The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. + AssignedNfsVolumes []Network_Storage `json:"assignedNfsVolumes,omitempty" xmlrpc:"assignedNfsVolumes,omitempty"` + + // A count of the SoftLayer_Network_Storage primary volumes whose replicas are allowed access. + AssignedReplicationVolumeCount *uint `json:"assignedReplicationVolumeCount,omitempty" xmlrpc:"assignedReplicationVolumeCount,omitempty"` + + // The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. + AssignedReplicationVolumes []Network_Storage `json:"assignedReplicationVolumes,omitempty" xmlrpc:"assignedReplicationVolumes,omitempty"` + + // A count of the SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. + AssignedVolumeCount *uint `json:"assignedVolumeCount,omitempty" xmlrpc:"assignedVolumeCount,omitempty"` + + // The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. + AssignedVolumes []Network_Storage `json:"assignedVolumes,omitempty" xmlrpc:"assignedVolumes,omitempty"` + + // The SoftLayer_Network_Storage_Credential this allowed host uses. + Credential *Network_Storage_Credential `json:"credential,omitempty" xmlrpc:"credential,omitempty"` + + // The credential this allowed host will use + CredentialId *int `json:"credentialId,omitempty" xmlrpc:"credentialId,omitempty"` + + // The internal identifier of the igroup + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of allowed host, usually an IQN or other identifier + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // no documentation yet + ResourceTableName *string `json:"resourceTableName,omitempty" xmlrpc:"resourceTableName,omitempty"` + + // Connections to a target with a source IP in this subnet prefix are allowed. + SourceSubnet *string `json:"sourceSubnet,omitempty" xmlrpc:"sourceSubnet,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host_Hardware struct { + Network_Storage_Allowed_Host + + // The SoftLayer_Account object which this SoftLayer_Network_Storage_Allowed_Host belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The SoftLayer_Hardware object which this SoftLayer_Network_Storage_Allowed_Host is referencing. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host_IpAddress struct { + Network_Storage_Allowed_Host + + // The SoftLayer_Account object which this SoftLayer_Network_Storage_Allowed_Host belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The SoftLayer_Network_Subnet_IpAddress object which this SoftLayer_Network_Storage_Allowed_Host is referencing. + Resource *Network_Subnet_IpAddress `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host_Subnet struct { + Network_Storage_Allowed_Host + + // The SoftLayer_Account object which this SoftLayer_Network_Storage_Allowed_Host belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The SoftLayer_Network_Subnet object which this SoftLayer_Network_Storage_Allowed_Host is referencing. + Resource *Network_Subnet `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host_VirtualGuest struct { + Network_Storage_Allowed_Host + + // The SoftLayer_Account object which this SoftLayer_Network_Storage_Allowed_Host belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The SoftLayer_Virtual_Guest object which this SoftLayer_Network_Storage_Allowed_Host is referencing. + Resource *Virtual_Guest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Network_Storage_Backup contains general information regarding a Storage backup service such as account id, username, maximum capacity, password, Storage's product type and the server id. +type Network_Storage_Backup struct { + Network_Storage + + // Peak number of bytes used in the vault for the current billing cycle. + CurrentCyclePeakUsage *uint `json:"currentCyclePeakUsage,omitempty" xmlrpc:"currentCyclePeakUsage,omitempty"` + + // Peak number of bytes used in the vault for the previous billing cycle. + PreviousCyclePeakUsage *uint `json:"previousCyclePeakUsage,omitempty" xmlrpc:"previousCyclePeakUsage,omitempty"` +} + +// The SoftLayer_Network_Storage_Backup_Evault contains general information regarding an EVault Storage service such as account id, username, maximum capacity, password, Storage's product type and the server id. +type Network_Storage_Backup_Evault struct { + Network_Storage_Backup +} + +// The SoftLayer_Network_Storage_Backup_Evault_Version6 contains the same properties as the SoftLayer_Network_Storage_Backup_Evault. Additional properties available for the EVault Storage type: softwareComponent, totalBytesUsed, backupJobDetails, restoreJobDetails and agentStatuses +type Network_Storage_Backup_Evault_Version6 struct { + Network_Storage_Backup_Evault + + // A count of statuses (most of the time will be one status) for the agent tied to the EVault Storage services. + AgentStatusCount *uint `json:"agentStatusCount,omitempty" xmlrpc:"agentStatusCount,omitempty"` + + // Statuses (most of the time will be one status) for the agent tied to the EVault Storage services. + AgentStatuses []Container_Network_Storage_Evault_WebCc_AgentStatus `json:"agentStatuses,omitempty" xmlrpc:"agentStatuses,omitempty"` + + // A count of all the of the backup jobs for the EVault Storage account. + BackupJobDetailCount *uint `json:"backupJobDetailCount,omitempty" xmlrpc:"backupJobDetailCount,omitempty"` + + // All the of the backup jobs for the EVault Storage account. + BackupJobDetails []Container_Network_Storage_Evault_WebCc_JobDetails `json:"backupJobDetails,omitempty" xmlrpc:"backupJobDetails,omitempty"` + + // A count of the billing items for plugins tied to the EVault Storage service. + PluginBillingItemCount *uint `json:"pluginBillingItemCount,omitempty" xmlrpc:"pluginBillingItemCount,omitempty"` + + // The billing items for plugins tied to the EVault Storage service. + PluginBillingItems []Billing_Item `json:"pluginBillingItems,omitempty" xmlrpc:"pluginBillingItems,omitempty"` + + // A count of all the of the restore jobs for the EVault Storage account. + RestoreJobDetailCount *uint `json:"restoreJobDetailCount,omitempty" xmlrpc:"restoreJobDetailCount,omitempty"` + + // All the of the restore jobs for the EVault Storage account. + RestoreJobDetails []Container_Network_Storage_Evault_WebCc_JobDetails `json:"restoreJobDetails,omitempty" xmlrpc:"restoreJobDetails,omitempty"` + + // The software component for the EVault base client. + SoftwareComponent *Software_Component `json:"softwareComponent,omitempty" xmlrpc:"softwareComponent,omitempty"` + + // A count of retrieve the task information for the EVault Storage service. + TaskCount *uint `json:"taskCount,omitempty" xmlrpc:"taskCount,omitempty"` + + // Retrieve the task information for the EVault Storage service. + Tasks []Container_Network_Storage_Evault_Vault_Task `json:"tasks,omitempty" xmlrpc:"tasks,omitempty"` +} + +// The SoftLayer_Network_Storage_Credential data type will give you an overview of the usernames that are currently attached to your storage device. +type Network_Storage_Credential struct { + Entity + + // This is the account that the storage credential is tied to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // This is the account id associated with the volume. + AccountId *string `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // This is the data that the record was created in the table. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is the date that the record was last updated in the table. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // This is the id of the type of credential that this object represents. + NasCredentialTypeId *int `json:"nasCredentialTypeId,omitempty" xmlrpc:"nasCredentialTypeId,omitempty"` + + // These are the SoftLayer_Network_Storage_Allowed_Host entries that this credential is assigned to. + NetworkStorageAllowedHosts *Network_Storage_Allowed_Host `json:"networkStorageAllowedHosts,omitempty" xmlrpc:"networkStorageAllowedHosts,omitempty"` + + // This is the password associated with the volume. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // These are the types of storage that the credential can be assigned to. + Type *Network_Storage_Credential_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // This is the username associated with the volume. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // A count of these are the SoftLayer_Network_Storage volumes that this credential is assigned to. + VolumeCount *uint `json:"volumeCount,omitempty" xmlrpc:"volumeCount,omitempty"` + + // These are the SoftLayer_Network_Storage volumes that this credential is assigned to. + Volumes []Network_Storage `json:"volumes,omitempty" xmlrpc:"volumes,omitempty"` +} + +// <<< +type Network_Storage_Credential_Type struct { + Entity + + // The date a credential type was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A short description of the credential type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The key name of the credential type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The date a credential was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The human readable name of the credential type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Storage_Daily_Usage struct { + Entity + + // no documentation yet + BytesUsed *uint `json:"bytesUsed,omitempty" xmlrpc:"bytesUsed,omitempty"` + + // no documentation yet + CdnHttpBandwidth *uint `json:"cdnHttpBandwidth,omitempty" xmlrpc:"cdnHttpBandwidth,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + NasVolume *Network_Storage `json:"nasVolume,omitempty" xmlrpc:"nasVolume,omitempty"` + + // no documentation yet + NasVolumeId *int `json:"nasVolumeId,omitempty" xmlrpc:"nasVolumeId,omitempty"` + + // no documentation yet + PublicBandwidthOut *uint `json:"publicBandwidthOut,omitempty" xmlrpc:"publicBandwidthOut,omitempty"` +} + +// Storage volumes can create various events to keep track of what has occurred to the volume. Events provide an audit trail that can be used to verify that various tasks have occurred, such as snapshots to be created by a schedule or remote replication synchronization. +type Network_Storage_Event struct { + Entity + + // The date an event was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The message text for an event. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // A schedule that is associated with an event. Not all events will have a schedule. + Schedule *Network_Storage_Schedule `json:"schedule,omitempty" xmlrpc:"schedule,omitempty"` + + // An identifier for the schedule which is associated with an event. + ScheduleId *int `json:"scheduleId,omitempty" xmlrpc:"scheduleId,omitempty"` + + // A Storage volume's event type. The type provides a standardized definition for an event. + Type *Network_Storage_Event_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // An identifier for the type of an event. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The associated volume for an event. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` + + // The volume id which an event is associated with. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// no documentation yet +type Network_Storage_Event_Type struct { + Entity + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Storage_Group struct { + Entity + + // The SoftLayer_Account which owns this group. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID which owns this group + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The friendly name of this group + Alias *string `json:"alias,omitempty" xmlrpc:"alias,omitempty"` + + // A count of the allowed hosts list for this group. + AllowedHostCount *uint `json:"allowedHostCount,omitempty" xmlrpc:"allowedHostCount,omitempty"` + + // The allowed hosts list for this group. + AllowedHosts []Network_Storage_Allowed_Host `json:"allowedHosts,omitempty" xmlrpc:"allowedHosts,omitempty"` + + // A count of the network storage volumes this group is attached to. + AttachedVolumeCount *uint `json:"attachedVolumeCount,omitempty" xmlrpc:"attachedVolumeCount,omitempty"` + + // The network storage volumes this group is attached to. + AttachedVolumes []Network_Storage `json:"attachedVolumes,omitempty" xmlrpc:"attachedVolumes,omitempty"` + + // The date this group was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The type which defines this group. + GroupType *Network_Storage_Group_Type `json:"groupType,omitempty" xmlrpc:"groupType,omitempty"` + + // The SoftLayer_Network_Storage_Group_Type which describes this group. + GroupTypeId *int `json:"groupTypeId,omitempty" xmlrpc:"groupTypeId,omitempty"` + + // The internal identifier of the group + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The OS Type this group is configured for. + OsType *Network_Storage_Iscsi_OS_Type `json:"osType,omitempty" xmlrpc:"osType,omitempty"` + + // A SoftLayer_Network_Storage_OS_Type Operating System designation that this group was created for. + OsTypeId *int `json:"osTypeId,omitempty" xmlrpc:"osTypeId,omitempty"` + + // The network resource this group is created on. + ServiceResource *Network_Service_Resource `json:"serviceResource,omitempty" xmlrpc:"serviceResource,omitempty"` + + // A SoftLayer_Network_Service_Resource that this group was created on. + ServiceResourceId *int `json:"serviceResourceId,omitempty" xmlrpc:"serviceResourceId,omitempty"` +} + +// no documentation yet +type Network_Storage_Group_Iscsi struct { + Network_Storage_Group +} + +// no documentation yet +type Network_Storage_Group_Nfs struct { + Network_Storage_Group +} + +// no documentation yet +type Network_Storage_Group_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_Storage_History contains the username/password past history for Storage services except Evault. Information such as the username, passwords, notes and the date of the password change may be retrieved. +type Network_Storage_History struct { + Entity + + // The account that the Storage services belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Date the password was changed. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The Storage service that the password history belongs to. + NasVolume *Network_Storage `json:"nasVolume,omitempty" xmlrpc:"nasVolume,omitempty"` + + // Past notes for the Storage service. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // Password for the Storage service that was used in the past. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // Username for the Storage service. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// The SoftLayer_Network_Storage_Hub data type models Virtual Server type Storage storage offerings. +type Network_Storage_Hub struct { + Network_Storage + + // A count of the billing items tied to a Storage service's bandwidth usage. + BandwidthBillingItemCount *uint `json:"bandwidthBillingItemCount,omitempty" xmlrpc:"bandwidthBillingItemCount,omitempty"` + + // The billing items tied to a Storage service's bandwidth usage. + BandwidthBillingItems []Billing_Item `json:"bandwidthBillingItems,omitempty" xmlrpc:"bandwidthBillingItems,omitempty"` +} + +// no documentation yet +type Network_Storage_Hub_Cleversafe_Account struct { + Entity + + // SoftLayer account to which an IBM Cloud Object Storage account belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ID of the SoftLayer_Account which this IBM Cloud Object Storage account is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // An associated parent billing item which is active. Includes billing items which are scheduled to be cancelled in the future. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // An associated parent billing item which has been cancelled. + CancelledBillingItem *Billing_Item `json:"cancelledBillingItem,omitempty" xmlrpc:"cancelledBillingItem,omitempty"` + + // A count of credentials used for generating an AWS signature. Max of 2. + CredentialCount *uint `json:"credentialCount,omitempty" xmlrpc:"credentialCount,omitempty"` + + // Credentials used for generating an AWS signature. Max of 2. + Credentials []Network_Storage_Credential `json:"credentials,omitempty" xmlrpc:"credentials,omitempty"` + + // The IMS ID of an IBM Cloud Object Storage account. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Provides an interface to various metrics relating to the usage of an IBM Cloud Object Storage account. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // A user-defined field of notes. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // Human readable identifier of IBM Cloud Object Storage accounts. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // Unique identifier for an IBM Cloud Object Storage account. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// no documentation yet +type Network_Storage_Hub_Swift struct { + Network_Storage_Hub + + // A count of + StorageNodeCount *uint `json:"storageNodeCount,omitempty" xmlrpc:"storageNodeCount,omitempty"` + + // no documentation yet + StorageNodes []Network_Service_Resource `json:"storageNodes,omitempty" xmlrpc:"storageNodes,omitempty"` +} + +// no documentation yet +type Network_Storage_Hub_Swift_Container struct { + Network_Storage_Hub_Swift +} + +// no documentation yet +type Network_Storage_Hub_Swift_Share struct { + Entity +} + +// no documentation yet +type Network_Storage_Hub_Swift_Version1 struct { + Network_Storage_Hub_Swift +} + +// The iscsi data type provides access to additional information about an iscsi volume such as the snapshot capacity limit and replication partners. +type Network_Storage_Iscsi struct { + Network_Storage +} + +// The iscsi EqualLogic Version 3 data type provides access to additional information about an iscsi volume such as the available snapshot reserve space. +type Network_Storage_Iscsi_EqualLogic_Version3 struct { + Network_Storage_Iscsi +} + +// An iscsi replicant receives incoming data from an associated iscsi volume. While the replicant is not in failover mode it will not be mountable. Upon failover the replicant can be mounted and used as a normal volume. It is suggested to only do this as part of a disaster recovery plan. +type Network_Storage_Iscsi_EqualLogic_Version3_Replicant struct { + Network_Storage_Iscsi_EqualLogic_Version3 + + // When a replicant is in the process of synchronizing with the parent volume this flag will be true. + FailbackInProgressFlag *bool `json:"failbackInProgressFlag,omitempty" xmlrpc:"failbackInProgressFlag,omitempty"` + + // The volume name for an iscsi replicant. + VolumeName *string `json:"volumeName,omitempty" xmlrpc:"volumeName,omitempty"` +} + +// An iscsi snapshot is a point-in-time view of the data on an associated iscsi volume. Iscsi snapshots use a copy-on-write technology to minimize the amount of snapshot space used. When a snapshot is initially created it will use no snapshot space. At the time data changes on a volume which existed when a snapshot was created the original data will be saved in the associated volume's snapshot reserve space. +// +// As a snapshot is created offline it must be set mountable in order to mount it via an iscsi initiator service. +type Network_Storage_Iscsi_EqualLogic_Version3_Snapshot struct { + Network_Storage_Iscsi_EqualLogic_Version3 + + // If applicable, the schedule which was executed to create a snapshot. + CreationSchedule *Network_Storage_Schedule `json:"creationSchedule,omitempty" xmlrpc:"creationSchedule,omitempty"` + + // The volume name for an iscsi snapshot. + VolumeName *string `json:"volumeName,omitempty" xmlrpc:"volumeName,omitempty"` +} + +// no documentation yet +type Network_Storage_Iscsi_OS_Type struct { + Entity + + // The date this OS type record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description of this OS type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of the OS type selection + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The key name of this OS type + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of this OS type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Storage_MassDataMigration_CrossRegion_Country_Xref struct { + Entity + + // SoftLayer_Locale_Country Id. + Country *Locale_Country `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + CountryId *int `json:"countryId,omitempty" xmlrpc:"countryId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Location Group ID of CleverSafe cross region. + LocationGroup *Location_Group `json:"locationGroup,omitempty" xmlrpc:"locationGroup,omitempty"` + + // no documentation yet + LocationGroupId *int `json:"locationGroupId,omitempty" xmlrpc:"locationGroupId,omitempty"` +} + +// The SoftLayer_Network_Storage_MassDataMigration_Request data type contains information on a single Mass Data Migration request. Creation of these requests is limited to SoftLayer customers through the SoftLayer Customer Portal. +type Network_Storage_MassDataMigration_Request struct { + Entity + + // The account to which the request belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account id of the request. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the active tickets that are attached to the MDMS request. + ActiveTicketCount *uint `json:"activeTicketCount,omitempty" xmlrpc:"activeTicketCount,omitempty"` + + // The active tickets that are attached to the MDMS request. + ActiveTickets []Ticket `json:"activeTickets,omitempty" xmlrpc:"activeTickets,omitempty"` + + // The customer address where the device is shipped to. + Address *Account_Address `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // The address id of address assigned to this request. + AddressId *int `json:"addressId,omitempty" xmlrpc:"addressId,omitempty"` + + // An associated parent billing item which is active. Includes billing items which are scheduled to be cancelled in the future. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The employee user who created the request. + CreateEmployee *User_Employee `json:"createEmployee,omitempty" xmlrpc:"createEmployee,omitempty"` + + // The customer user who created the request. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The create user id of the request. + CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"` + + // The device configurations. + DeviceConfiguration *Network_Storage_MassDataMigration_Request_DeviceConfiguration `json:"deviceConfiguration,omitempty" xmlrpc:"deviceConfiguration,omitempty"` + + // The end date of the request. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The unique id of the request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the key contacts for this requests. + KeyContactCount *uint `json:"keyContactCount,omitempty" xmlrpc:"keyContactCount,omitempty"` + + // The key contacts for this requests. + KeyContacts []Network_Storage_MassDataMigration_Request_KeyContact `json:"keyContacts,omitempty" xmlrpc:"keyContacts,omitempty"` + + // The employee who last modified the request. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the request. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The modify user id of the request. + ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"` + + // The unique id of the request. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the shipments of the request. + ShipmentCount *uint `json:"shipmentCount,omitempty" xmlrpc:"shipmentCount,omitempty"` + + // The shipments of the request. + Shipments []Account_Shipment `json:"shipments,omitempty" xmlrpc:"shipments,omitempty"` + + // The start date of the request. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The status of the request. + Status *Network_Storage_MassDataMigration_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status id of the request. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // Ticket that is attached to this mass data migration request. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // A count of all tickets that are attached to the mass data migration request. + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // All tickets that are attached to the mass data migration request. + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` +} + +// The SoftLayer_Network_Storage_MassDataMigration_Request_DeviceConfiguration data type contains settings such networking, COS account, which needs to be configured on device for a Mass Data Migration Request. +type Network_Storage_MassDataMigration_Request_DeviceConfiguration struct { + Entity + + // The account id. + CosAccountId *int `json:"cosAccountId,omitempty" xmlrpc:"cosAccountId,omitempty"` + + // The Cloud Object Storage bucket. + CosBucket *string `json:"cosBucket,omitempty" xmlrpc:"cosBucket,omitempty"` + + // The eth1 gateway for connecting to private network in datacenter. + Eth1Gateway *string `json:"eth1Gateway,omitempty" xmlrpc:"eth1Gateway,omitempty"` + + // The eth1 IP address for connecting to private network in datacenter. + Eth1IpAddress *string `json:"eth1IpAddress,omitempty" xmlrpc:"eth1IpAddress,omitempty"` + + // The eth1 netmask for connecting to private network in datacenter. + Eth1Netmask *string `json:"eth1Netmask,omitempty" xmlrpc:"eth1Netmask,omitempty"` + + // The eth3 gateway for connecting to private network at customer's location. + Eth3Gateway *string `json:"eth3Gateway,omitempty" xmlrpc:"eth3Gateway,omitempty"` + + // The eth3 IP address for connecting to private network at customer location. + Eth3IpAddress *string `json:"eth3IpAddress,omitempty" xmlrpc:"eth3IpAddress,omitempty"` + + // The eth3 netmask for connecting to private network in at customer's location. + Eth3Netmask *string `json:"eth3Netmask,omitempty" xmlrpc:"eth3Netmask,omitempty"` + + // The unique id of the request status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The password for configuring network share. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The pool lock password for configuring network share. + PoolLockPassword *string `json:"poolLockPassword,omitempty" xmlrpc:"poolLockPassword,omitempty"` + + // The request this device configurations belongs to. + Request *Network_Storage_MassDataMigration_Request `json:"request,omitempty" xmlrpc:"request,omitempty"` + + // The request id. + RequestId *int `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` + + // The Cloud Object Storage bucket URL. + S3Url *string `json:"s3Url,omitempty" xmlrpc:"s3Url,omitempty"` + + // The name of network share. + ShareName *string `json:"shareName,omitempty" xmlrpc:"shareName,omitempty"` + + // The storage account to use for this request. + StorageAccount *Network_Storage_Hub_Cleversafe_Account `json:"storageAccount,omitempty" xmlrpc:"storageAccount,omitempty"` + + // The username for configuring network share. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// The SoftLayer_Network_Storage_MassDataMigration_Request_KeyContact data type contains name, email, and phone for key contact at customer location who will handle Mass Data Migration. +type Network_Storage_MassDataMigration_Request_KeyContact struct { + Entity + + // The request this key contact belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // An account number that is linked to a KeyContact. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The date a KeyContact was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // KeyContact's Email Id. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The unique id of the key contact. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date a KeyContact was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // KeyContact's Name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A phone number assigned to a KeyContact. + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // The request this key contact belongs to. + Request *Network_Storage_MassDataMigration_Request `json:"request,omitempty" xmlrpc:"request,omitempty"` + + // A request id that is linked to a KeyContact. + RequestId *int `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` +} + +// The SoftLayer_Network_Storage_MassDataMigration_Request_Status data type contains general information relating to the statuses to which a Mass Data Migration Request may be set. +type Network_Storage_MassDataMigration_Request_Status struct { + Entity + + // The description of the request status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the request status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the request status. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the request status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_Storage_Nas contains general information regarding a NAS Storage service such as account id, username, password, maximum capacity, Storage's product type and capacity. +type Network_Storage_Nas struct { + Network_Storage + + // no documentation yet + RecentBytesUsed *Network_Storage_Daily_Usage `json:"recentBytesUsed,omitempty" xmlrpc:"recentBytesUsed,omitempty"` +} + +// The SoftLayer_Network_Storage_OpenStack_Object data type models OpenStack specific object storage objects. These storages authenticate through Keystone to access Swift. +type Network_Storage_OpenStack_Object struct { + Network_Storage + + // A count of the billing item tied to an OpenStack Object Storage's bandwidth service. + BandwidthBillingItemCount *uint `json:"bandwidthBillingItemCount,omitempty" xmlrpc:"bandwidthBillingItemCount,omitempty"` + + // The billing item tied to an OpenStack Object Storage's bandwidth service. + BandwidthBillingItems []Billing_Item `json:"bandwidthBillingItems,omitempty" xmlrpc:"bandwidthBillingItems,omitempty"` +} + +// A network storage partnership is used to link multiple volumes to each other. These partnerships describe replication hierarchies or link volume snapshots to their associated storage volume. +type Network_Storage_Partnership struct { + Entity + + // The date a partnership was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The date a partnership was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The associated child volume for a partnership. + PartnerVolume *Network_Storage `json:"partnerVolume,omitempty" xmlrpc:"partnerVolume,omitempty"` + + // The child volume id which a partnership is associated with. + PartnerVolumeId *int `json:"partnerVolumeId,omitempty" xmlrpc:"partnerVolumeId,omitempty"` + + // The type provides a standardized definition for a partnership. + Type *Network_Storage_Partnership_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The associated parent volume for a partnership. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` + + // The volume id which a partnership is associated with. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// A network storage partnership type is used to define the link between two volumes. +type Network_Storage_Partnership_Type struct { + Entity + + // A type's description, for example 'ISCSI snapshot partnership'. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A type's key name, for example 'ISCSI_SNAPSHOT'. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A type's name, for example 'ISCSI Snapshot'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A property provides additional information about a volume which it is assigned to. This information can range from "Mountable" flags to utilized snapshot space. +type Network_Storage_Property struct { + Entity + + // The date a property was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The date a property was last modified; + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The type provides a standardized definition for a property. + Type *Network_Storage_Property_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The value of a property. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` + + // The associated volume for a property. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` + + // The volume id which a property is associated with. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// The storage property types provide standard definitions for properties which can be used with any type for Storage offering. The properties provide additional information about a volume which they are assigned to. +type Network_Storage_Property_Type struct { + Entity + + // A type's description, for example 'Determines whether the volume is currently mountable'. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A type's keyname, for example 'MOUNTABLE'. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A type's name, for example 'Mountable'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Storage_Replicant struct { + Network_Storage + + // When a replicant is in the process of synchronizing with the parent volume this flag will be true. + FailbackInProgressFlag *string `json:"failbackInProgressFlag,omitempty" xmlrpc:"failbackInProgressFlag,omitempty"` + + // The volume name for a replicant. + VolumeName *string `json:"volumeName,omitempty" xmlrpc:"volumeName,omitempty"` +} + +// Schedules can be created for select Storage services, such as iscsi. These schedules are used to perform various tasks such as scheduling snapshots or synchronizing replicants. +type Network_Storage_Schedule struct { + Entity + + // A flag which determines if a schedule is active. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // The date a schedule was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The hour parameter of this schedule. + Day *string `json:"day,omitempty" xmlrpc:"day,omitempty"` + + // The day of the month parameter of this schedule. + DayOfMonth *string `json:"dayOfMonth,omitempty" xmlrpc:"dayOfMonth,omitempty"` + + // The day of the week parameter of this schedule. + DayOfWeek *string `json:"dayOfWeek,omitempty" xmlrpc:"dayOfWeek,omitempty"` + + // A count of events which have been created as the result of a schedule execution. + EventCount *uint `json:"eventCount,omitempty" xmlrpc:"eventCount,omitempty"` + + // Events which have been created as the result of a schedule execution. + Events []Network_Storage_Event `json:"events,omitempty" xmlrpc:"events,omitempty"` + + // The hour parameter of this schedule. + Hour *string `json:"hour,omitempty" xmlrpc:"hour,omitempty"` + + // A schedule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The minute parameter of this schedule. + Minute *string `json:"minute,omitempty" xmlrpc:"minute,omitempty"` + + // The date a schedule was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The month of the year parameter of this schedule. + MonthOfYear *string `json:"monthOfYear,omitempty" xmlrpc:"monthOfYear,omitempty"` + + // A schedule's name, for example 'Daily'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The associated partnership for a schedule. + Partnership *Network_Storage_Partnership `json:"partnership,omitempty" xmlrpc:"partnership,omitempty"` + + // The partnership id which a schedule is associated with. + PartnershipId *int `json:"partnershipId,omitempty" xmlrpc:"partnershipId,omitempty"` + + // Properties used for configuration of a schedule. + Properties []Network_Storage_Schedule_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of properties used for configuration of a schedule. + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // A count of replica snapshots which have been created as the result of this schedule's execution. + ReplicaSnapshotCount *uint `json:"replicaSnapshotCount,omitempty" xmlrpc:"replicaSnapshotCount,omitempty"` + + // Replica snapshots which have been created as the result of this schedule's execution. + ReplicaSnapshots []Network_Storage `json:"replicaSnapshots,omitempty" xmlrpc:"replicaSnapshots,omitempty"` + + // The number of snapshots this schedule is configured to retain. + RetentionCount *string `json:"retentionCount,omitempty" xmlrpc:"retentionCount,omitempty"` + + // The minute parameter of this schedule. + Second *string `json:"second,omitempty" xmlrpc:"second,omitempty"` + + // A count of snapshots which have been created as the result of this schedule's execution. + SnapshotCount *uint `json:"snapshotCount,omitempty" xmlrpc:"snapshotCount,omitempty"` + + // Snapshots which have been created as the result of this schedule's execution. + Snapshots []Network_Storage `json:"snapshots,omitempty" xmlrpc:"snapshots,omitempty"` + + // The type provides a standardized definition for a schedule. + Type *Network_Storage_Schedule_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type id which a schedule is associated with. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The associated volume for a schedule. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` + + // The volume id which a schedule is associated with. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// Schedule properties provide attributes such as start date, end date, interval, and other properties to a storage schedule. +type Network_Storage_Schedule_Property struct { + Entity + + // The date a schedule property was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A schedule property's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date a schedule property was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The associated schedule for a property. + Schedule *Network_Storage_Schedule `json:"schedule,omitempty" xmlrpc:"schedule,omitempty"` + + // The type provides a standardized definition for a property. + Type *Network_Storage_Schedule_Property_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // An identifier for the type of a property. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The value of a property. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// A schedule property type is used to allow for a standardized method of defining network storage schedules. +type Network_Storage_Schedule_Property_Type struct { + Entity + + // A type's description, for example 'Date for the schedule to start.'. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A schedule property type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A schedule property type's key name, for example 'START_DATE'. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A schedule property type's name, for example 'Start Date'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The type of Storage volume type which a property type may be associated with. + NasType *string `json:"nasType,omitempty" xmlrpc:"nasType,omitempty"` +} + +// A schedule type is used to define what a schedule was created to do. When creating a schedule to take snapshots of a volume, the 'Snapshot' schedule type would be used. +type Network_Storage_Schedule_Type struct { + Entity + + // A schedule type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A schedule type's key name, for example 'SNAPSHOT'. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A schedule type's name, for example 'Snapshot'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Storage_Snapshot struct { + Network_Storage + + // If applicable, the schedule which was executed to create a snapshot. + CreationSchedule *Network_Storage_Schedule `json:"creationSchedule,omitempty" xmlrpc:"creationSchedule,omitempty"` + + // The volume name for the snapshot. + VolumeName *string `json:"volumeName,omitempty" xmlrpc:"volumeName,omitempty"` +} + +// The SoftLayer_Network_Storage_Type contains a description of the associated SoftLayer_Network_Storage object. +type Network_Storage_Type struct { + Entity + + // Human readable description for the associated SoftLayer_Network_Storage object. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // ID which corresponds with storageTypeId on storage objects. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Machine readable description code for the associated SoftLayer_Network_Storage object. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of the SoftLayer_Network_Storage object that uses this type. + VolumeCount *uint `json:"volumeCount,omitempty" xmlrpc:"volumeCount,omitempty"` + + // The SoftLayer_Network_Storage object that uses this type. + Volumes []Network_Storage `json:"volumes,omitempty" xmlrpc:"volumes,omitempty"` +} + +// The SoftLayer_Network_Subnet data type contains general information relating to a single SoftLayer subnet. Personal information in this type such as names, addresses, and phone numbers are assigned to the account only and not to users belonging to the account. +type Network_Subnet struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // If present, the active registration for this subnet. + ActiveRegistration *Network_Subnet_Registration `json:"activeRegistration,omitempty" xmlrpc:"activeRegistration,omitempty"` + + // All the swip transactions associated with a subnet that are still active. + ActiveSwipTransaction *Network_Subnet_Swip_Transaction `json:"activeSwipTransaction,omitempty" xmlrpc:"activeSwipTransaction,omitempty"` + + // The billing item for a subnet. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // Identifier which distinguishes whether the subnet is public or private address space. + AddressSpace *string `json:"addressSpace,omitempty" xmlrpc:"addressSpace,omitempty"` + + // The SoftLayer_Network_Storage_Allowed_Host information to connect this Subnet to Network Storage supporting access control lists. + AllowedHost *Network_Storage_Allowed_Host `json:"allowedHost,omitempty" xmlrpc:"allowedHost,omitempty"` + + // The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorage []Network_Storage `json:"allowedNetworkStorage,omitempty" xmlrpc:"allowedNetworkStorage,omitempty"` + + // A count of the SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorageCount *uint `json:"allowedNetworkStorageCount,omitempty" xmlrpc:"allowedNetworkStorageCount,omitempty"` + + // A count of the SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicaCount *uint `json:"allowedNetworkStorageReplicaCount,omitempty" xmlrpc:"allowedNetworkStorageReplicaCount,omitempty"` + + // The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicas []Network_Storage `json:"allowedNetworkStorageReplicas,omitempty" xmlrpc:"allowedNetworkStorageReplicas,omitempty"` + + // The billing item for a subnet. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A count of + BoundDescendantCount *uint `json:"boundDescendantCount,omitempty" xmlrpc:"boundDescendantCount,omitempty"` + + // no documentation yet + BoundDescendants []Network_Subnet `json:"boundDescendants,omitempty" xmlrpc:"boundDescendants,omitempty"` + + // A count of + BoundRouterCount *uint `json:"boundRouterCount,omitempty" xmlrpc:"boundRouterCount,omitempty"` + + // Whether or not this subnet is associated with a router. Subnets that are not associated with a router cannot be routed. + BoundRouterFlag *bool `json:"boundRouterFlag,omitempty" xmlrpc:"boundRouterFlag,omitempty"` + + // no documentation yet + BoundRouters []Hardware `json:"boundRouters,omitempty" xmlrpc:"boundRouters,omitempty"` + + // The last IP address in a subnet is the subnet's broadcast address. This is an IP address that will broadcast network requests to the entire subnet and may not be assigned to a network interface. + BroadcastAddress *string `json:"broadcastAddress,omitempty" xmlrpc:"broadcastAddress,omitempty"` + + // no documentation yet + Children []Network_Subnet `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // A subnet's Classless Inter-Domain Routing prefix. This is a number between 0 and 32 signifying the number of bits in a subnet's netmask. These bits separate a subnet's network address from it's host addresses. It performs the same function as the ''netmask'' property, but is represented as an integer. + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // The data center this subnet may be routed within. + Datacenter *Location_Datacenter `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // A count of + DescendantCount *uint `json:"descendantCount,omitempty" xmlrpc:"descendantCount,omitempty"` + + // no documentation yet + Descendants []Network_Subnet `json:"descendants,omitempty" xmlrpc:"descendants,omitempty"` + + // no documentation yet + DisplayLabel *string `json:"displayLabel,omitempty" xmlrpc:"displayLabel,omitempty"` + + // A static routed ip address + EndPointIpAddress *Network_Subnet_IpAddress `json:"endPointIpAddress,omitempty" xmlrpc:"endPointIpAddress,omitempty"` + + // A subnet's gateway address. This is an IP address that belongs to the router on the subnet and may not be assigned to a network interface. + Gateway *string `json:"gateway,omitempty" xmlrpc:"gateway,omitempty"` + + // no documentation yet + GlobalIpRecord *Network_Subnet_IpAddress_Global `json:"globalIpRecord,omitempty" xmlrpc:"globalIpRecord,omitempty"` + + // The hardware using IP addresses on this subnet. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of the hardware using IP addresses on this subnet. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // A subnet's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of all the ip addresses associated with a subnet. + IpAddressCount *uint `json:"ipAddressCount,omitempty" xmlrpc:"ipAddressCount,omitempty"` + + // All the ip addresses associated with a subnet. + IpAddresses []Network_Subnet_IpAddress `json:"ipAddresses,omitempty" xmlrpc:"ipAddresses,omitempty"` + + // no documentation yet + IsCustomerOwned *bool `json:"isCustomerOwned,omitempty" xmlrpc:"isCustomerOwned,omitempty"` + + // no documentation yet + IsCustomerRoutable *bool `json:"isCustomerRoutable,omitempty" xmlrpc:"isCustomerRoutable,omitempty"` + + // The last time this subnet was last modified + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A bitmask in dotted-quad format that is used to separate a subnet's network address from it's host addresses. This performs the same function as the ''cidr'' property, but is expressed in a string format. + Netmask *string `json:"netmask,omitempty" xmlrpc:"netmask,omitempty"` + + // The upstream network component firewall. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The Private Network identifier this subnet is within, if applicable. + NetworkId *int `json:"networkId,omitempty" xmlrpc:"networkId,omitempty"` + + // A subnet's network identifier. This is the first IP address of a subnet and may not be assigned to a network interface. + NetworkIdentifier *string `json:"networkIdentifier,omitempty" xmlrpc:"networkIdentifier,omitempty"` + + // A count of + NetworkProtectionAddressCount *uint `json:"networkProtectionAddressCount,omitempty" xmlrpc:"networkProtectionAddressCount,omitempty"` + + // no documentation yet + NetworkProtectionAddresses []Network_Protection_Address `json:"networkProtectionAddresses,omitempty" xmlrpc:"networkProtectionAddresses,omitempty"` + + // A count of iPSec network tunnels that have access to a private subnet. + NetworkTunnelContextCount *uint `json:"networkTunnelContextCount,omitempty" xmlrpc:"networkTunnelContextCount,omitempty"` + + // IPSec network tunnels that have access to a private subnet. + NetworkTunnelContexts []Network_Tunnel_Module_Context `json:"networkTunnelContexts,omitempty" xmlrpc:"networkTunnelContexts,omitempty"` + + // The VLAN object that a subnet is associated with. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A subnet's associated VLAN's internal identifier. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` + + // This is the note field. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The pod in which this subnet resides. + PodName *string `json:"podName,omitempty" xmlrpc:"podName,omitempty"` + + // A count of + ProtectedIpAddressCount *uint `json:"protectedIpAddressCount,omitempty" xmlrpc:"protectedIpAddressCount,omitempty"` + + // no documentation yet + ProtectedIpAddresses []Network_Subnet_IpAddress `json:"protectedIpAddresses,omitempty" xmlrpc:"protectedIpAddresses,omitempty"` + + // no documentation yet + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // A count of all registrations that have been created for this subnet. + RegistrationCount *uint `json:"registrationCount,omitempty" xmlrpc:"registrationCount,omitempty"` + + // All registrations that have been created for this subnet. + Registrations []Network_Subnet_Registration `json:"registrations,omitempty" xmlrpc:"registrations,omitempty"` + + // The reverse DNS domain associated with this subnet. + ReverseDomain *Dns_Domain `json:"reverseDomain,omitempty" xmlrpc:"reverseDomain,omitempty"` + + // An identifier of the role the subnet is within. Roles dictate how a subnet may be used. + RoleKeyName *string `json:"roleKeyName,omitempty" xmlrpc:"roleKeyName,omitempty"` + + // The name of the role the subnet is within. Roles dictate how a subnet may be used. + RoleName *string `json:"roleName,omitempty" xmlrpc:"roleName,omitempty"` + + // The identifier for the type of route then subnet is currently configured for. + RoutingTypeKeyName *string `json:"routingTypeKeyName,omitempty" xmlrpc:"routingTypeKeyName,omitempty"` + + // The name for the type of route then subnet is currently configured for. + RoutingTypeName *string `json:"routingTypeName,omitempty" xmlrpc:"routingTypeName,omitempty"` + + // A subnet can be one of several types. PRIMARY, ADDITIONAL_PRIMARY, SECONDARY, ROUTED_TO_VLAN, SECONDARY_ON_VLAN, and STATIC_IP_ROUTED. The type determines the order in which many subnets are sorted in the SoftLayer customer portal. This groups subnets of similar type together. + SortOrder *string `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` + + // A subnet can be one of several types. PRIMARY, ADDITIONAL_PRIMARY, SECONDARY, ROUTED_TO_VLAN, SECONDARY_ON_VLAN, STORAGE_NETWORK, and STATIC_IP_ROUTED. A "PRIMARY" subnet is the primary network bound to a VLAN within the softlayer network. An "ADDITIONAL_PRIMARY" subnet is bound to a network VLAN to augment the pool of available primary IP addresses that may be assigned to a server. A "SECONDARY" subnet is any of the secondary subnet's bound to a VLAN interface. A "ROUTED_TO_VLAN" subnet is a portable subnet that can be routed to any server on a vlan. A "SECONDARY_ON_VLAN" subnet also doesn't exist as a VLAN interface, but is routed directly to a VLAN instead of a single IP address by SoftLayer's routers. + SubnetType *string `json:"subnetType,omitempty" xmlrpc:"subnetType,omitempty"` + + // All the swip transactions associated with a subnet. + SwipTransaction []Network_Subnet_Swip_Transaction `json:"swipTransaction,omitempty" xmlrpc:"swipTransaction,omitempty"` + + // A count of all the swip transactions associated with a subnet. + SwipTransactionCount *uint `json:"swipTransactionCount,omitempty" xmlrpc:"swipTransactionCount,omitempty"` + + // The number of IP addresses contained within this subnet. + TotalIpAddresses *Float64 `json:"totalIpAddresses,omitempty" xmlrpc:"totalIpAddresses,omitempty"` + + // A count of + UnboundDescendantCount *uint `json:"unboundDescendantCount,omitempty" xmlrpc:"unboundDescendantCount,omitempty"` + + // no documentation yet + UnboundDescendants []Network_Subnet `json:"unboundDescendants,omitempty" xmlrpc:"unboundDescendants,omitempty"` + + // The number of IP addresses that can be addressed within this subnet. For IPv4 subnets with a CIDR value of at most 30, a discount of 3 is taken from the total number of IP addresses for the subnet's unusable network, gateway and broadcast IP addresses. For IPv6 subnets with a CIDR value of at most 126, a discount of 2 is taken for the subnet's network and gateway IP addresses. + UsableIpAddressCount *Float64 `json:"usableIpAddressCount,omitempty" xmlrpc:"usableIpAddressCount,omitempty"` + + // Provides the total number of utilized IP addresses on this subnet. The primary consumer of IP addresses are compute resources, which can consume more than one address. This value is only supported for primary subnet types. + UtilizedIpAddressCount *uint `json:"utilizedIpAddressCount,omitempty" xmlrpc:"utilizedIpAddressCount,omitempty"` + + // This is the Internet Protocol version. Current values may be either 4 or 6. + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` + + // A count of the Virtual Servers using IP addresses on this subnet. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // The Virtual Servers using IP addresses on this subnet. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// The SoftLayer_Network_Subnet_IpAddress data type contains general information relating to a single SoftLayer IPv4 address. +type Network_Subnet_IpAddress struct { + Entity + + // The SoftLayer_Network_Storage_Allowed_Host information to connect this IP Address to Network Storage supporting access control lists. + AllowedHost *Network_Storage_Allowed_Host `json:"allowedHost,omitempty" xmlrpc:"allowedHost,omitempty"` + + // The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorage []Network_Storage `json:"allowedNetworkStorage,omitempty" xmlrpc:"allowedNetworkStorage,omitempty"` + + // A count of the SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorageCount *uint `json:"allowedNetworkStorageCount,omitempty" xmlrpc:"allowedNetworkStorageCount,omitempty"` + + // A count of the SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicaCount *uint `json:"allowedNetworkStorageReplicaCount,omitempty" xmlrpc:"allowedNetworkStorageReplicaCount,omitempty"` + + // The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicas []Network_Storage `json:"allowedNetworkStorageReplicas,omitempty" xmlrpc:"allowedNetworkStorageReplicas,omitempty"` + + // The application delivery controller using this address. + ApplicationDeliveryController *Network_Application_Delivery_Controller `json:"applicationDeliveryController,omitempty" xmlrpc:"applicationDeliveryController,omitempty"` + + // A count of an IPSec network tunnel's address translations. These translations use a SoftLayer ip address from an assigned static NAT subnet to deliver the packets to the remote (customer) destination. + ContextTunnelTranslationCount *uint `json:"contextTunnelTranslationCount,omitempty" xmlrpc:"contextTunnelTranslationCount,omitempty"` + + // An IPSec network tunnel's address translations. These translations use a SoftLayer ip address from an assigned static NAT subnet to deliver the packets to the remote (customer) destination. + ContextTunnelTranslations []Network_Tunnel_Module_Context_Address_Translation `json:"contextTunnelTranslations,omitempty" xmlrpc:"contextTunnelTranslations,omitempty"` + + // A count of all the subnets routed to an IP address. + EndpointSubnetCount *uint `json:"endpointSubnetCount,omitempty" xmlrpc:"endpointSubnetCount,omitempty"` + + // All the subnets routed to an IP address. + EndpointSubnets []Network_Subnet `json:"endpointSubnets,omitempty" xmlrpc:"endpointSubnets,omitempty"` + + // A network component that is statically routed to an IP address. + GuestNetworkComponent *Virtual_Guest_Network_Component `json:"guestNetworkComponent,omitempty" xmlrpc:"guestNetworkComponent,omitempty"` + + // A network component that is statically routed to an IP address. + GuestNetworkComponentBinding *Virtual_Guest_Network_Component_IpAddress `json:"guestNetworkComponentBinding,omitempty" xmlrpc:"guestNetworkComponentBinding,omitempty"` + + // A server that this IP address is routed to. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // An IP's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An IP address expressed in dotted quad format. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // Indicates if an IP address is reserved to be used as the network broadcast address and cannot be assigned to a network interface + IsBroadcast *bool `json:"isBroadcast,omitempty" xmlrpc:"isBroadcast,omitempty"` + + // Indicates if an IP address is reserved to a gateway and cannot be assigned to a network interface + IsGateway *bool `json:"isGateway,omitempty" xmlrpc:"isGateway,omitempty"` + + // Indicates if an IP address is reserved to a network address and cannot be assigned to a network interface + IsNetwork *bool `json:"isNetwork,omitempty" xmlrpc:"isNetwork,omitempty"` + + // Indicates if an IP address is reserved and cannot be assigned to a network interface + IsReserved *bool `json:"isReserved,omitempty" xmlrpc:"isReserved,omitempty"` + + // A network component that is statically routed to an IP address. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // An IP address' user defined note. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The network gateway appliance using this address as the private IP address. + PrivateNetworkGateway *Network_Gateway `json:"privateNetworkGateway,omitempty" xmlrpc:"privateNetworkGateway,omitempty"` + + // no documentation yet + ProtectionAddress []Network_Protection_Address `json:"protectionAddress,omitempty" xmlrpc:"protectionAddress,omitempty"` + + // A count of + ProtectionAddressCount *uint `json:"protectionAddressCount,omitempty" xmlrpc:"protectionAddressCount,omitempty"` + + // The network gateway appliance using this address as the public IP address. + PublicNetworkGateway *Network_Gateway `json:"publicNetworkGateway,omitempty" xmlrpc:"publicNetworkGateway,omitempty"` + + // An IPMI-based management network component of the IP address. + RemoteManagementNetworkComponent *Network_Component `json:"remoteManagementNetworkComponent,omitempty" xmlrpc:"remoteManagementNetworkComponent,omitempty"` + + // An IP address' associated subnet. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // An IP address' subnet id. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` + + // All events for this IP address stored in the datacenter syslogs from the last 24 hours + SyslogEventsOneDay []Network_Logging_Syslog `json:"syslogEventsOneDay,omitempty" xmlrpc:"syslogEventsOneDay,omitempty"` + + // A count of all events for this IP address stored in the datacenter syslogs from the last 24 hours + SyslogEventsOneDayCount *uint `json:"syslogEventsOneDayCount,omitempty" xmlrpc:"syslogEventsOneDayCount,omitempty"` + + // A count of all events for this IP address stored in the datacenter syslogs from the last 7 days + SyslogEventsSevenDayCount *uint `json:"syslogEventsSevenDayCount,omitempty" xmlrpc:"syslogEventsSevenDayCount,omitempty"` + + // All events for this IP address stored in the datacenter syslogs from the last 7 days + SyslogEventsSevenDays []Network_Logging_Syslog `json:"syslogEventsSevenDays,omitempty" xmlrpc:"syslogEventsSevenDays,omitempty"` + + // Top Ten network datacenter syslog events, grouped by destination port, for the last 24 hours + TopTenSyslogEventsByDestinationPortOneDay []Network_Logging_Syslog `json:"topTenSyslogEventsByDestinationPortOneDay,omitempty" xmlrpc:"topTenSyslogEventsByDestinationPortOneDay,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by destination port, for the last 24 hours + TopTenSyslogEventsByDestinationPortOneDayCount *uint `json:"topTenSyslogEventsByDestinationPortOneDayCount,omitempty" xmlrpc:"topTenSyslogEventsByDestinationPortOneDayCount,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by destination port, for the last 7 days + TopTenSyslogEventsByDestinationPortSevenDayCount *uint `json:"topTenSyslogEventsByDestinationPortSevenDayCount,omitempty" xmlrpc:"topTenSyslogEventsByDestinationPortSevenDayCount,omitempty"` + + // Top Ten network datacenter syslog events, grouped by destination port, for the last 7 days + TopTenSyslogEventsByDestinationPortSevenDays []Network_Logging_Syslog `json:"topTenSyslogEventsByDestinationPortSevenDays,omitempty" xmlrpc:"topTenSyslogEventsByDestinationPortSevenDays,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source port, for the last 24 hours + TopTenSyslogEventsByProtocolsOneDay []Network_Logging_Syslog `json:"topTenSyslogEventsByProtocolsOneDay,omitempty" xmlrpc:"topTenSyslogEventsByProtocolsOneDay,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source port, for the last 24 hours + TopTenSyslogEventsByProtocolsOneDayCount *uint `json:"topTenSyslogEventsByProtocolsOneDayCount,omitempty" xmlrpc:"topTenSyslogEventsByProtocolsOneDayCount,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source port, for the last 7 days + TopTenSyslogEventsByProtocolsSevenDayCount *uint `json:"topTenSyslogEventsByProtocolsSevenDayCount,omitempty" xmlrpc:"topTenSyslogEventsByProtocolsSevenDayCount,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source port, for the last 7 days + TopTenSyslogEventsByProtocolsSevenDays []Network_Logging_Syslog `json:"topTenSyslogEventsByProtocolsSevenDays,omitempty" xmlrpc:"topTenSyslogEventsByProtocolsSevenDays,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source ip address, for the last 24 hours + TopTenSyslogEventsBySourceIpOneDay []Network_Logging_Syslog `json:"topTenSyslogEventsBySourceIpOneDay,omitempty" xmlrpc:"topTenSyslogEventsBySourceIpOneDay,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source ip address, for the last 24 hours + TopTenSyslogEventsBySourceIpOneDayCount *uint `json:"topTenSyslogEventsBySourceIpOneDayCount,omitempty" xmlrpc:"topTenSyslogEventsBySourceIpOneDayCount,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source ip address, for the last 7 days + TopTenSyslogEventsBySourceIpSevenDayCount *uint `json:"topTenSyslogEventsBySourceIpSevenDayCount,omitempty" xmlrpc:"topTenSyslogEventsBySourceIpSevenDayCount,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source ip address, for the last 7 days + TopTenSyslogEventsBySourceIpSevenDays []Network_Logging_Syslog `json:"topTenSyslogEventsBySourceIpSevenDays,omitempty" xmlrpc:"topTenSyslogEventsBySourceIpSevenDays,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source port, for the last 24 hours + TopTenSyslogEventsBySourcePortOneDay []Network_Logging_Syslog `json:"topTenSyslogEventsBySourcePortOneDay,omitempty" xmlrpc:"topTenSyslogEventsBySourcePortOneDay,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source port, for the last 24 hours + TopTenSyslogEventsBySourcePortOneDayCount *uint `json:"topTenSyslogEventsBySourcePortOneDayCount,omitempty" xmlrpc:"topTenSyslogEventsBySourcePortOneDayCount,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source port, for the last 7 days + TopTenSyslogEventsBySourcePortSevenDayCount *uint `json:"topTenSyslogEventsBySourcePortSevenDayCount,omitempty" xmlrpc:"topTenSyslogEventsBySourcePortSevenDayCount,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source port, for the last 7 days + TopTenSyslogEventsBySourcePortSevenDays []Network_Logging_Syslog `json:"topTenSyslogEventsBySourcePortSevenDays,omitempty" xmlrpc:"topTenSyslogEventsBySourcePortSevenDays,omitempty"` + + // A virtual guest that this IP address is routed to. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // A count of virtual licenses allocated for an IP Address. + VirtualLicenseCount *uint `json:"virtualLicenseCount,omitempty" xmlrpc:"virtualLicenseCount,omitempty"` + + // Virtual licenses allocated for an IP Address. + VirtualLicenses []Software_VirtualLicense `json:"virtualLicenses,omitempty" xmlrpc:"virtualLicenses,omitempty"` +} + +// no documentation yet +type Network_Subnet_IpAddress_Global struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The active transaction associated with this Global IP. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // The billing item for this Global IP. + BillingItem *Billing_Item_Network_Subnet_IpAddress_Global `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A Global IP Address' associated description + Description *int `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + DestinationIpAddress *Network_Subnet_IpAddress `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // A Global IP Address' associated [[SoftLayer_Network_Subnet_IpAddress|ipAddress]] ID + DestinationIpAddressId *int `json:"destinationIpAddressId,omitempty" xmlrpc:"destinationIpAddressId,omitempty"` + + // A Global IP Address' unique identifier + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // A Global IP Address' associated [[SoftLayer_Account|account]] ID + IpAddressId *int `json:"ipAddressId,omitempty" xmlrpc:"ipAddressId,omitempty"` + + // A Global IP Address' associated type [[SoftLayer_Network_Subnet_IpAddress_Global_Type|id]] ID + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// The SoftLayer_Network_Subnet_IpAddress data type contains general information relating to a single SoftLayer IPv6 address. +type Network_Subnet_IpAddress_Version6 struct { + Network_Subnet_IpAddress + + // The network gateway appliance using this address as the public IPv6 address. + PublicVersion6NetworkGateway *Network_Gateway `json:"publicVersion6NetworkGateway,omitempty" xmlrpc:"publicVersion6NetworkGateway,omitempty"` +} + +// The subnet registration data type contains general information relating to a single subnet registration instance. These registration instances can be updated to reflect changes, and will record the changes in the [[SoftLayer_Network_Subnet_Registration_Event|events]]. +type Network_Subnet_Registration struct { + Entity + + // The account that this registration belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The registration object's associated [[SoftLayer_Account|account]] id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The CIDR prefix for the registered subnet + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of the cross-reference records that tie the [[SoftLayer_Account_Regional_Registry_Detail]] objects to the registration object. + DetailReferenceCount *uint `json:"detailReferenceCount,omitempty" xmlrpc:"detailReferenceCount,omitempty"` + + // The cross-reference records that tie the [[SoftLayer_Account_Regional_Registry_Detail]] objects to the registration object. + DetailReferences []Network_Subnet_Registration_Details `json:"detailReferences,omitempty" xmlrpc:"detailReferences,omitempty"` + + // A count of the related registration events. + EventCount *uint `json:"eventCount,omitempty" xmlrpc:"eventCount,omitempty"` + + // The related registration events. + Events []Network_Subnet_Registration_Event `json:"events,omitempty" xmlrpc:"events,omitempty"` + + // Unique ID of the registration object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The "network" detail object. + NetworkDetail *Account_Regional_Registry_Detail `json:"networkDetail,omitempty" xmlrpc:"networkDetail,omitempty"` + + // The RIR-specific handle or name of the registered subnet. This field is read-only. + NetworkHandle *string `json:"networkHandle,omitempty" xmlrpc:"networkHandle,omitempty"` + + // The base IP address of the registered subnet + NetworkIdentifier *string `json:"networkIdentifier,omitempty" xmlrpc:"networkIdentifier,omitempty"` + + // The "person" detail object. + PersonDetail *Account_Regional_Registry_Detail `json:"personDetail,omitempty" xmlrpc:"personDetail,omitempty"` + + // The related Regional Internet Registry. + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // The RIR handle that this registration object belongs to. This field may not be populated until the registration is complete. + RegionalInternetRegistryHandle *Account_Rwhois_Handle `json:"regionalInternetRegistryHandle,omitempty" xmlrpc:"regionalInternetRegistryHandle,omitempty"` + + // The registration object's associated [[SoftLayer_Account_Rwhois_Handle|RIR handle]] id + RegionalInternetRegistryHandleId *int `json:"regionalInternetRegistryHandleId,omitempty" xmlrpc:"regionalInternetRegistryHandleId,omitempty"` + + // The registration object's associated [[SoftLayer_Network_Regional_Internet_Registry|RIR]] id + RegionalInternetRegistryId *int `json:"regionalInternetRegistryId,omitempty" xmlrpc:"regionalInternetRegistryId,omitempty"` + + // The status of this registration. + Status *Network_Subnet_Registration_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The registration object's associated [[SoftLayer_Network_Subnet_Registration_Status|status]] id + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The subnet that this registration pertains to. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` +} + +// APNIC-specific registration object. For more detail see [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]]. +type Network_Subnet_Registration_Apnic struct { + Network_Subnet_Registration +} + +// ARIN-specific registration object. For more detail see [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]]. +type Network_Subnet_Registration_Arin struct { + Network_Subnet_Registration +} + +// The SoftLayer_Network_Subnet_Registration_Details objects are used to relate [[SoftLayer_Account_Regional_Registry_Detail]] objects to a [[SoftLayer_Network_Subnet_Registration]] object. This allows for easy reuse of registration details. It is important to note that only one detail object per type may be associated to a registration object. +type Network_Subnet_Registration_Details struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The related [[SoftLayer_Account_Regional_Registry_Detail|detail object]]. + Detail *Account_Regional_Registry_Detail `json:"detail,omitempty" xmlrpc:"detail,omitempty"` + + // Numeric ID of the related [[SoftLayer_Account_Regional_Registry_Detail]] object + DetailId *int `json:"detailId,omitempty" xmlrpc:"detailId,omitempty"` + + // Unique numeric ID of the object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The related [[SoftLayer_Network_Subnet_Registration|registration object]]. + Registration *Network_Subnet_Registration `json:"registration,omitempty" xmlrpc:"registration,omitempty"` + + // Numeric ID of the related [[SoftLayer_Network_Subnet_Registration]] object + RegistrationId *int `json:"registrationId,omitempty" xmlrpc:"registrationId,omitempty"` +} + +// Each time a [[SoftLayer_Network_Subnet_Registration|subnet registration]] object is created or modified, the system will generate an event for it. Additional actions that would create an event include RIR responses and error cases. * +type Network_Subnet_Registration_Event struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the event object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A string message indicating what took place during this event + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The registration this event pertains to. + Registration *Network_Subnet_Registration `json:"registration,omitempty" xmlrpc:"registration,omitempty"` + + // The numeric ID of the related [[SoftLayer_Network_Subnet_Registration]] object + RegistrationId *int `json:"registrationId,omitempty" xmlrpc:"registrationId,omitempty"` + + // The type of this event. + Type *Network_Subnet_Registration_Event_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The numeric ID of the associated [[SoftLayer_Network_Subnet_Registration_Event_Type|event type]] object + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// Subnet Registration Event Type objects describe the nature of a [[SoftLayer_Network_Subnet_Registration_Event]] +// +// The standard values for these objects are as follows:
    • REGISTRATION_CREATED - Indicates that the registration has been created
    • REGISTRATION_UPDATED - Indicates that the registration has been updated
    • REGISTRATION_CANCELLED - Indicates that the registration has been cancelled
    • RIR_RESPONSE - Indicates that an action taken against the RIR has produced a response. More details will be provided in the event message.
    • ERROR - Indicates that an error has been encountered. More details will be provided in the event message.
    • NOTE - An employee or other system has entered a note regarding the registration. The note content will be provided in the event message.
    +type Network_Subnet_Registration_Event_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the event type object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Code-friendly string name of the event type + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Human-readable name of the event type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// RIPE-specific registration object. For more detail see [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]]. +type Network_Subnet_Registration_Ripe struct { + Network_Subnet_Registration +} + +// Subnet Registration Status objects describe the current status of a subnet registration. +// +// The standard values for these objects are as follows:
    • OPEN - Indicates that the registration object is new and has yet to be submitted to the RIR
    • PENDING - Indicates that the registration object has been submitted to the RIR and is awaiting response
    • COMPLETE - Indicates that the RIR action has completed
    • DELETED - Indicates that the registration object has been gracefully removed is no longer valid
    • CANCELLED - Indicates that the registration object has been abruptly removed is no longer valid
    +type Network_Subnet_Registration_Status struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the status object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Code-friendly string name of the status + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Human-readable name of the status + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Every SoftLayer customer account has contact information associated with it for reverse WHOIS purposes. An account's RWHOIS data, modeled by the SoftLayer_Network_Subnet_Rwhois_Data data type, is used by SoftLayer's reverse WHOIS server as well as for SWIP transactions. SoftLayer's reverse WHOIS servers respond to WHOIS queries for IP addresses belonging to a customer's servers, returning this RWHOIS data. +// +// A SoftLayer customer's RWHOIS data may not necessarily match their account or portal users' contact information. +type Network_Subnet_Rwhois_Data struct { + Entity + + // An email address associated with an account's RWHOIS data that is responsible for responding to network abuse queries about malicious traffic coming from your servers' IP addresses. + AbuseEmail *string `json:"abuseEmail,omitempty" xmlrpc:"abuseEmail,omitempty"` + + // The SoftLayer customer account associated with this reverse WHOIS data. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // An account's RWHOIS data's associated account identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The first line of the mailing address associated with an account's RWHOIS data. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line of the mailing address associated with an account's RWHOIS data. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The city of the mailing address associated with an account's RWHOIS data. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The company name associated with an account's RWHOIS data. + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // A two-letter abbreviation of the country of the mailing address associated with an account's RWHOIS data. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The date an account's RWHOIS data was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The first name associated with an account's RWHOIS data. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // An account's RWHOIS data's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The last name associated with an account's RWHOIS data. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The date an account's RWHOIS data was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The postal code of the mailing address associated with an account's RWHOIS data. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Whether an account's RWHOIS data refers to a private residence or not. + PrivateResidenceFlag *bool `json:"privateResidenceFlag,omitempty" xmlrpc:"privateResidenceFlag,omitempty"` + + // A two-letter abbreviation of the state of the mailing address associated with an account's RWHOIS data. If an account does not reside in a province then this is typically blank. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// The SoftLayer_Network_Subnet_Swip_Transaction data type contains basic information tracked at SoftLayer to allow automation of Swip creation, update, and removal requests. A specific transaction is attached to an accountId and a subnetId. This also contains a "Status Name" which tells the customer what the transaction is doing: +// +// +// * REQUEST QUEUED: Request is queued up to be sent to ARIN +// * REQUEST SENT: The email request has been sent to ARIN +// * REQUEST CONFIRMED: ARIN has confirmed that the request is good, and should be available in 24 hours +// * OK: The subnet has been checked with WHOIS and it the SWIP transaction has completed correctly +// * REMOVE QUEUED: A subnet is queued to be removed from ARIN's systems +// * REMOVE SENT: The removal email request has been sent to ARIN +// * REMOVE CONFIRMED: ARIN has confirmed that the removal request is good, and the subnet should be clear in WHOIS in 24 hours +// * DELETED: This specific SWIP Transaction has been removed from ARIN and is no longer in effect +// * SOFTLAYER MANUALLY PROCESSING: Sometimes a request doesn't go through correctly and has to be manually processed by SoftLayer. This may take some time. +type Network_Subnet_Swip_Transaction struct { + Entity + + // The Account whose RWHOIS data was used to SWIP this subnet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A SWIP transaction's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A Name describing which state a SWIP transaction is in. + StatusName *string `json:"statusName,omitempty" xmlrpc:"statusName,omitempty"` + + // The subnet that this SWIP transaction was created for. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // ID Number of the Subnet for this SWIP transaction. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` +} + +// no documentation yet +type Network_TippingPointReporting struct { + Entity +} + +// The SoftLayer_Network_Tunnel_Module_Context data type contains general information relating to a single SoftLayer network tunnel. The SoftLayer_Network_Tunnel_Module_Context is useful to gather information such as related customer subnets (remote) and internal subnets (local) associated with the network tunnel as well as other information needed to manage the network tunnel. Account and billing information related to the network tunnel can also be retrieved. +type Network_Tunnel_Module_Context struct { + Entity + + // The account that a network tunnel belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A network tunnel's account identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The transaction that is currently applying configurations for the network tunnel. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // A count of a network tunnel's address translations. + AddressTranslationCount *uint `json:"addressTranslationCount,omitempty" xmlrpc:"addressTranslationCount,omitempty"` + + // A network tunnel's address translations. + AddressTranslations []Network_Tunnel_Module_Context_Address_Translation `json:"addressTranslations,omitempty" xmlrpc:"addressTranslations,omitempty"` + + // A flag used to specify when advanced configurations, complex configurations that require manual setup, are being applied to network devices for a network tunnel. When the flag is set to true (1), a network tunnel cannot be configured through the management portal nor the API. + AdvancedConfigurationFlag *int `json:"advancedConfigurationFlag,omitempty" xmlrpc:"advancedConfigurationFlag,omitempty"` + + // A count of subnets that provide access to SoftLayer services such as the management portal and the SoftLayer API. + AllAvailableServiceSubnetCount *uint `json:"allAvailableServiceSubnetCount,omitempty" xmlrpc:"allAvailableServiceSubnetCount,omitempty"` + + // Subnets that provide access to SoftLayer services such as the management portal and the SoftLayer API. + AllAvailableServiceSubnets []Network_Subnet `json:"allAvailableServiceSubnets,omitempty" xmlrpc:"allAvailableServiceSubnets,omitempty"` + + // The current billing item for network tunnel. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The date a network tunnel was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The remote end of a network tunnel. This end of the network tunnel resides on an outside network and will be sending and receiving the IPSec packets. + CustomerPeerIpAddress *string `json:"customerPeerIpAddress,omitempty" xmlrpc:"customerPeerIpAddress,omitempty"` + + // A count of remote subnets that are allowed access through a network tunnel. + CustomerSubnetCount *uint `json:"customerSubnetCount,omitempty" xmlrpc:"customerSubnetCount,omitempty"` + + // Remote subnets that are allowed access through a network tunnel. + CustomerSubnets []Network_Customer_Subnet `json:"customerSubnets,omitempty" xmlrpc:"customerSubnets,omitempty"` + + // The datacenter location for one end of the network tunnel that allows access to account's private subnets. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The name giving to a network tunnel by a user. + FriendlyName *string `json:"friendlyName,omitempty" xmlrpc:"friendlyName,omitempty"` + + // A network tunnel's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The local end of a network tunnel. This end of the network tunnel resides on the SoftLayer networks and allows access to remote end of the tunnel to subnets on SoftLayer networks. + InternalPeerIpAddress *string `json:"internalPeerIpAddress,omitempty" xmlrpc:"internalPeerIpAddress,omitempty"` + + // A count of private subnets that can be accessed through the network tunnel. + InternalSubnetCount *uint `json:"internalSubnetCount,omitempty" xmlrpc:"internalSubnetCount,omitempty"` + + // Private subnets that can be accessed through the network tunnel. + InternalSubnets []Network_Subnet `json:"internalSubnets,omitempty" xmlrpc:"internalSubnets,omitempty"` + + // The date a network tunnel was last modified. + // + // NOTE: This date should NOT be used to determine when the network tunnel configurations were last applied to the network device. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A network tunnel's unique name used on the network device. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Authentication used to generate keys for protecting the negotiations for a network tunnel. + PhaseOneAuthentication *string `json:"phaseOneAuthentication,omitempty" xmlrpc:"phaseOneAuthentication,omitempty"` + + // Determines the strength of the key used in the key exchange process. The higher the group number the stronger the key is and the more secure it is. However, processing time will increase as the strength of the key increases. Both peers in the must use the Diffie-Hellman Group. + PhaseOneDiffieHellmanGroup *int `json:"phaseOneDiffieHellmanGroup,omitempty" xmlrpc:"phaseOneDiffieHellmanGroup,omitempty"` + + // Encryption used to generate keys for protecting the negotiations for a network tunnel. + PhaseOneEncryption *string `json:"phaseOneEncryption,omitempty" xmlrpc:"phaseOneEncryption,omitempty"` + + // Amount of time (in seconds) allowed to pass before the encryption key expires. A new key is generated without interrupting service. Valid times are from 120 to 172800 seconds. + PhaseOneKeylife *int `json:"phaseOneKeylife,omitempty" xmlrpc:"phaseOneKeylife,omitempty"` + + // The authentication used in phase 2 proposal negotiation process. + PhaseTwoAuthentication *string `json:"phaseTwoAuthentication,omitempty" xmlrpc:"phaseTwoAuthentication,omitempty"` + + // Determines the strength of the key used in the key exchange process. The higher the group number the stronger the key is and the more secure it is. However, processing time will increase as the strength of the key increases. Both peers must use the Diffie-Hellman Group. + PhaseTwoDiffieHellmanGroup *int `json:"phaseTwoDiffieHellmanGroup,omitempty" xmlrpc:"phaseTwoDiffieHellmanGroup,omitempty"` + + // The encryption used in phase 2 proposal negotiation process. + PhaseTwoEncryption *string `json:"phaseTwoEncryption,omitempty" xmlrpc:"phaseTwoEncryption,omitempty"` + + // Amount of time (in seconds) allowed to pass before the encryption key expires. A new key is generated without interrupting service. Valid times are from 120 to 172800 seconds. + PhaseTwoKeylife *int `json:"phaseTwoKeylife,omitempty" xmlrpc:"phaseTwoKeylife,omitempty"` + + // Determines if the generated keys are made from previous keys. When PFS is specified, a Diffie-Hellman exchange occurs each time a new security association is negotiated. + PhaseTwoPerfectForwardSecrecy *int `json:"phaseTwoPerfectForwardSecrecy,omitempty" xmlrpc:"phaseTwoPerfectForwardSecrecy,omitempty"` + + // A key used so that peers authenticate each other. This key is hashed by using the phase one encryption and phase one authentication. + PresharedKey *string `json:"presharedKey,omitempty" xmlrpc:"presharedKey,omitempty"` + + // A count of service subnets that can be access through the network tunnel. + ServiceSubnetCount *uint `json:"serviceSubnetCount,omitempty" xmlrpc:"serviceSubnetCount,omitempty"` + + // Service subnets that can be access through the network tunnel. + ServiceSubnets []Network_Subnet `json:"serviceSubnets,omitempty" xmlrpc:"serviceSubnets,omitempty"` + + // A count of subnets used for a network tunnel's address translations. + StaticRouteSubnetCount *uint `json:"staticRouteSubnetCount,omitempty" xmlrpc:"staticRouteSubnetCount,omitempty"` + + // Subnets used for a network tunnel's address translations. + StaticRouteSubnets []Network_Subnet `json:"staticRouteSubnets,omitempty" xmlrpc:"staticRouteSubnets,omitempty"` + + // The transaction history for this network tunnel. + TransactionHistory []Provisioning_Version1_Transaction `json:"transactionHistory,omitempty" xmlrpc:"transactionHistory,omitempty"` + + // A count of the transaction history for this network tunnel. + TransactionHistoryCount *uint `json:"transactionHistoryCount,omitempty" xmlrpc:"transactionHistoryCount,omitempty"` +} + +// The SoftLayer_Network_Tunnel_Module_Context_Address_Translation data type contains general information relating to a single address translation. Information such as notes, ip addresses, along with record information, and network tunnel data may be retrieved. +type Network_Tunnel_Module_Context_Address_Translation struct { + Entity + + // The ip address record that will receive the encrypted traffic. + CustomerIpAddress *string `json:"customerIpAddress,omitempty" xmlrpc:"customerIpAddress,omitempty"` + + // The unique identifier for the ip address record that will receive the encrypted traffic. + CustomerIpAddressId *int `json:"customerIpAddressId,omitempty" xmlrpc:"customerIpAddressId,omitempty"` + + // The ip address record for the ip that will receive the encrypted traffic from the IPSec network tunnel. + CustomerIpAddressRecord *Network_Customer_Subnet_IpAddress `json:"customerIpAddressRecord,omitempty" xmlrpc:"customerIpAddressRecord,omitempty"` + + // An address translation's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The ip address record that will deliver the encrypted traffic. + InternalIpAddress *string `json:"internalIpAddress,omitempty" xmlrpc:"internalIpAddress,omitempty"` + + // The unique identifier for the ip address record that will deliver the encrypted traffic. + InternalIpAddressId *int `json:"internalIpAddressId,omitempty" xmlrpc:"internalIpAddressId,omitempty"` + + // The ip address record for the ip that will deliver the encrypted traffic from the IPSec network tunnel. + InternalIpAddressRecord *Network_Subnet_IpAddress `json:"internalIpAddressRecord,omitempty" xmlrpc:"internalIpAddressRecord,omitempty"` + + // The IPSec network tunnel an address translation belongs to. + NetworkTunnelContext *Network_Tunnel_Module_Context `json:"networkTunnelContext,omitempty" xmlrpc:"networkTunnelContext,omitempty"` + + // An address translation's network tunnel identifier. + NetworkTunnelContextId *int `json:"networkTunnelContextId,omitempty" xmlrpc:"networkTunnelContextId,omitempty"` + + // A name or description given to an address translation to help identify the address translation. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` +} + +// The SoftLayer_Network_Vlan data type models a single VLAN within SoftLayer's public and private networks. a Virtual LAN is a structure that associates network interfaces on routers, switches, and servers in different locations to act as if they were on the same local network broadcast domain. VLANs are a central part of the SoftLayer network. They can determine how new IP subnets are routed and how individual servers communicate to each other. +type Network_Vlan struct { + Entity + + // The SoftLayer customer account associated with a VLAN. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the SoftLayer customer account that a VLAN is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of a VLAN's additional primary subnets. These are used to extend the number of servers attached to the VLAN by adding more ip addresses to the primary IP address pool. + AdditionalPrimarySubnetCount *uint `json:"additionalPrimarySubnetCount,omitempty" xmlrpc:"additionalPrimarySubnetCount,omitempty"` + + // A VLAN's additional primary subnets. These are used to extend the number of servers attached to the VLAN by adding more ip addresses to the primary IP address pool. + AdditionalPrimarySubnets []Network_Subnet `json:"additionalPrimarySubnets,omitempty" xmlrpc:"additionalPrimarySubnets,omitempty"` + + // The gateway this VLAN is inside of. + AttachedNetworkGateway *Network_Gateway `json:"attachedNetworkGateway,omitempty" xmlrpc:"attachedNetworkGateway,omitempty"` + + // Whether or not this VLAN is inside a gateway. + AttachedNetworkGatewayFlag *bool `json:"attachedNetworkGatewayFlag,omitempty" xmlrpc:"attachedNetworkGatewayFlag,omitempty"` + + // The inside VLAN record if this VLAN is inside a network gateway. + AttachedNetworkGatewayVlan *Network_Gateway_Vlan `json:"attachedNetworkGatewayVlan,omitempty" xmlrpc:"attachedNetworkGatewayVlan,omitempty"` + + // The billing item for a network vlan. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A flag indicating that a network vlan is on a Hardware Firewall (Dedicated). + DedicatedFirewallFlag *int `json:"dedicatedFirewallFlag,omitempty" xmlrpc:"dedicatedFirewallFlag,omitempty"` + + // The extension router that a VLAN is associated with. + ExtensionRouter *Hardware_Router `json:"extensionRouter,omitempty" xmlrpc:"extensionRouter,omitempty"` + + // A count of a firewalled Vlan's network components. + FirewallGuestNetworkComponentCount *uint `json:"firewallGuestNetworkComponentCount,omitempty" xmlrpc:"firewallGuestNetworkComponentCount,omitempty"` + + // A firewalled Vlan's network components. + FirewallGuestNetworkComponents []Network_Component_Firewall `json:"firewallGuestNetworkComponents,omitempty" xmlrpc:"firewallGuestNetworkComponents,omitempty"` + + // A count of a firewalled vlan's inbound/outbound interfaces. + FirewallInterfaceCount *uint `json:"firewallInterfaceCount,omitempty" xmlrpc:"firewallInterfaceCount,omitempty"` + + // A firewalled vlan's inbound/outbound interfaces. + FirewallInterfaces []Network_Firewall_Module_Context_Interface `json:"firewallInterfaces,omitempty" xmlrpc:"firewallInterfaces,omitempty"` + + // A count of a firewalled Vlan's network components. + FirewallNetworkComponentCount *uint `json:"firewallNetworkComponentCount,omitempty" xmlrpc:"firewallNetworkComponentCount,omitempty"` + + // A firewalled Vlan's network components. + FirewallNetworkComponents []Network_Component_Firewall `json:"firewallNetworkComponents,omitempty" xmlrpc:"firewallNetworkComponents,omitempty"` + + // A count of the currently running rule set of a firewalled VLAN. + FirewallRuleCount *uint `json:"firewallRuleCount,omitempty" xmlrpc:"firewallRuleCount,omitempty"` + + // The currently running rule set of a firewalled VLAN. + FirewallRules []Network_Vlan_Firewall_Rule `json:"firewallRules,omitempty" xmlrpc:"firewallRules,omitempty"` + + // A count of the networking components that are connected to a VLAN. + GuestNetworkComponentCount *uint `json:"guestNetworkComponentCount,omitempty" xmlrpc:"guestNetworkComponentCount,omitempty"` + + // The networking components that are connected to a VLAN. + GuestNetworkComponents []Virtual_Guest_Network_Component `json:"guestNetworkComponents,omitempty" xmlrpc:"guestNetworkComponents,omitempty"` + + // All of the hardware that exists on a VLAN. Hardware is associated with a VLAN by its networking components. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of all of the hardware that exists on a VLAN. Hardware is associated with a VLAN by its networking components. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // no documentation yet + HighAvailabilityFirewallFlag *bool `json:"highAvailabilityFirewallFlag,omitempty" xmlrpc:"highAvailabilityFirewallFlag,omitempty"` + + // A VLAN's internal identifier. This should not be confused with the ''vlanNumber'' property, which is used in network configuration. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A flag indicating that a vlan can be assigned to a host that has local disk functionality. + LocalDiskStorageCapabilityFlag *bool `json:"localDiskStorageCapabilityFlag,omitempty" xmlrpc:"localDiskStorageCapabilityFlag,omitempty"` + + // The date a VLAN was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The optional name for this VLAN + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The network in which this VLAN resides. + Network *Network `json:"network,omitempty" xmlrpc:"network,omitempty"` + + // A count of the networking components that are connected to a VLAN. + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // A count of the network components that are connected to this VLAN through a trunk. + NetworkComponentTrunkCount *uint `json:"networkComponentTrunkCount,omitempty" xmlrpc:"networkComponentTrunkCount,omitempty"` + + // The network components that are connected to this VLAN through a trunk. + NetworkComponentTrunks []Network_Component_Network_Vlan_Trunk `json:"networkComponentTrunks,omitempty" xmlrpc:"networkComponentTrunks,omitempty"` + + // The networking components that are connected to a VLAN. + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // Identifier to denote whether a VLAN is used for public or private connectivity. + NetworkSpace *string `json:"networkSpace,omitempty" xmlrpc:"networkSpace,omitempty"` + + // The Hardware Firewall (Dedicated) for a network vlan. + NetworkVlanFirewall *Network_Vlan_Firewall `json:"networkVlanFirewall,omitempty" xmlrpc:"networkVlanFirewall,omitempty"` + + // The note for this vlan. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The primary router that a VLAN is associated with. Every SoftLayer VLAN is connected to more than one router for greater network redundancy. + PrimaryRouter *Hardware_Router `json:"primaryRouter,omitempty" xmlrpc:"primaryRouter,omitempty"` + + // A VLAN's primary subnet. Each VLAN has at least one subnet, usually the subnet that is assigned to a server or new IP address block when it's purchased. + PrimarySubnet *Network_Subnet `json:"primarySubnet,omitempty" xmlrpc:"primarySubnet,omitempty"` + + // A count of + PrimarySubnetCount *uint `json:"primarySubnetCount,omitempty" xmlrpc:"primarySubnetCount,omitempty"` + + // The internal identifier of the primary subnet addressed on a VLAN. + PrimarySubnetId *int `json:"primarySubnetId,omitempty" xmlrpc:"primarySubnetId,omitempty"` + + // A VLAN's primary IPv6 subnet. Some VLAN's may not have a primary IPv6 subnet. + PrimarySubnetVersion6 *Network_Subnet `json:"primarySubnetVersion6,omitempty" xmlrpc:"primarySubnetVersion6,omitempty"` + + // no documentation yet + PrimarySubnets []Network_Subnet `json:"primarySubnets,omitempty" xmlrpc:"primarySubnets,omitempty"` + + // A count of the gateways this VLAN is the private VLAN of. + PrivateNetworkGatewayCount *uint `json:"privateNetworkGatewayCount,omitempty" xmlrpc:"privateNetworkGatewayCount,omitempty"` + + // The gateways this VLAN is the private VLAN of. + PrivateNetworkGateways []Network_Gateway `json:"privateNetworkGateways,omitempty" xmlrpc:"privateNetworkGateways,omitempty"` + + // A count of + ProtectedIpAddressCount *uint `json:"protectedIpAddressCount,omitempty" xmlrpc:"protectedIpAddressCount,omitempty"` + + // no documentation yet + ProtectedIpAddresses []Network_Subnet_IpAddress `json:"protectedIpAddresses,omitempty" xmlrpc:"protectedIpAddresses,omitempty"` + + // A count of the gateways this VLAN is the public VLAN of. + PublicNetworkGatewayCount *uint `json:"publicNetworkGatewayCount,omitempty" xmlrpc:"publicNetworkGatewayCount,omitempty"` + + // The gateways this VLAN is the public VLAN of. + PublicNetworkGateways []Network_Gateway `json:"publicNetworkGateways,omitempty" xmlrpc:"publicNetworkGateways,omitempty"` + + // A flag indicating that a vlan can be assigned to a host that has SAN disk functionality. + SanStorageCapabilityFlag *bool `json:"sanStorageCapabilityFlag,omitempty" xmlrpc:"sanStorageCapabilityFlag,omitempty"` + + // A count of collection of scale VLANs this VLAN applies to. + ScaleVlanCount *uint `json:"scaleVlanCount,omitempty" xmlrpc:"scaleVlanCount,omitempty"` + + // Collection of scale VLANs this VLAN applies to. + ScaleVlans []Scale_Network_Vlan `json:"scaleVlans,omitempty" xmlrpc:"scaleVlans,omitempty"` + + // The secondary router that a VLAN is associated with. Every SoftLayer VLAN is connected to more than one router for greater network redundancy. + SecondaryRouter *Hardware `json:"secondaryRouter,omitempty" xmlrpc:"secondaryRouter,omitempty"` + + // A count of the subnets that exist as secondary interfaces on a VLAN + SecondarySubnetCount *uint `json:"secondarySubnetCount,omitempty" xmlrpc:"secondarySubnetCount,omitempty"` + + // The subnets that exist as secondary interfaces on a VLAN + SecondarySubnets []Network_Subnet `json:"secondarySubnets,omitempty" xmlrpc:"secondarySubnets,omitempty"` + + // A count of all of the subnets that exist as VLAN interfaces. + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // All of the subnets that exist as VLAN interfaces. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // A count of references to all tags for this VLAN. + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // References to all tags for this VLAN. + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // The number of primary IP addresses in a VLAN. + TotalPrimaryIpAddressCount *uint `json:"totalPrimaryIpAddressCount,omitempty" xmlrpc:"totalPrimaryIpAddressCount,omitempty"` + + // The type of this VLAN. + Type *Network_Vlan_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A count of all of the Virtual Servers that are connected to a VLAN. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // All of the Virtual Servers that are connected to a VLAN. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` + + // A VLAN's number as recorded within the SoftLayer network. This is configured directly on SoftLayer's networking equipment and should not be confused with a VLAN's ''id'' property. + VlanNumber *int `json:"vlanNumber,omitempty" xmlrpc:"vlanNumber,omitempty"` +} + +// The SoftLayer_Network_Vlan_Firewall data type contains general information relating to a single SoftLayer VLAN firewall. This is the object which ties the running rules to a specific downstream server. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Vlan_Firewall struct { + Entity + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A flag to indicate if the firewall is in administrative bypass mode. In other words, no rules are being applied to the traffic coming through. + AdministrativeBypassFlag *string `json:"administrativeBypassFlag,omitempty" xmlrpc:"administrativeBypassFlag,omitempty"` + + // A firewall's allotted bandwidth (measured in GB). + BandwidthAllocation *Float64 `json:"bandwidthAllocation,omitempty" xmlrpc:"bandwidthAllocation,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this firewall is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this firewall is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // The billing item for a Hardware Firewall (Dedicated). + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Administrative bypass request status. + BypassRequestStatus *string `json:"bypassRequestStatus,omitempty" xmlrpc:"bypassRequestStatus,omitempty"` + + // Whether or not this firewall can be directly logged in to. + CustomerManagedFlag *bool `json:"customerManagedFlag,omitempty" xmlrpc:"customerManagedFlag,omitempty"` + + // The datacenter that the firewall resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The firewall device type. + FirewallType *string `json:"firewallType,omitempty" xmlrpc:"firewallType,omitempty"` + + // A name reflecting the hostname and domain of the firewall. This is created from the combined values of the firewall's logical name and vlan number automatically, and thus can not be edited directly. + FullyQualifiedDomainName *string `json:"fullyQualifiedDomainName,omitempty" xmlrpc:"fullyQualifiedDomainName,omitempty"` + + // A firewall's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The credentials to log in to a firewall device. This is only present for dedicated appliances. + ManagementCredentials *Software_Component_Password `json:"managementCredentials,omitempty" xmlrpc:"managementCredentials,omitempty"` + + // A firewall's metric tracking object. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // The metric tracking object ID for this firewall. + MetricTrackingObjectId *int `json:"metricTrackingObjectId,omitempty" xmlrpc:"metricTrackingObjectId,omitempty"` + + // A count of the update requests made for this firewall. + NetworkFirewallUpdateRequestCount *uint `json:"networkFirewallUpdateRequestCount,omitempty" xmlrpc:"networkFirewallUpdateRequestCount,omitempty"` + + // The update requests made for this firewall. + NetworkFirewallUpdateRequests []Network_Firewall_Update_Request `json:"networkFirewallUpdateRequests,omitempty" xmlrpc:"networkFirewallUpdateRequests,omitempty"` + + // The gateway associated with this firewall, if any. + NetworkGateway *Network_Gateway `json:"networkGateway,omitempty" xmlrpc:"networkGateway,omitempty"` + + // The VLAN object that a firewall is associated with and protecting. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A count of the VLAN objects that a firewall is associated with and protecting. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // The VLAN objects that a firewall is associated with and protecting. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // A firewall's primary IP address. This field will be the IP shown when doing network traces and reverse DNS and is a read-only property. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // A count of the currently running rule set of this network component firewall. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The currently running rule set of this network component firewall. + Rules []Network_Vlan_Firewall_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // A firewall's associated upgrade request object, if any. + UpgradeRequest *Product_Upgrade_Request `json:"upgradeRequest,omitempty" xmlrpc:"upgradeRequest,omitempty"` +} + +// A SoftLayer_Network_Component_Firewall_Rule object type represents a currently running firewall rule and contains relative information. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Vlan_Firewall_Rule struct { + Entity + + // The action that the rule is to take [permit or deny]. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // The destination IP address considered for determining rule application. + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + DestinationIpCidr *int `json:"destinationIpCidr,omitempty" xmlrpc:"destinationIpCidr,omitempty"` + + // The destination IP subnet mask considered for determining rule application. + DestinationIpSubnetMask *string `json:"destinationIpSubnetMask,omitempty" xmlrpc:"destinationIpSubnetMask,omitempty"` + + // The ending (upper end of range) destination port considered for determining rule application. + DestinationPortRangeEnd *int `json:"destinationPortRangeEnd,omitempty" xmlrpc:"destinationPortRangeEnd,omitempty"` + + // The starting (lower end of range) destination port considered for determining rule application. + DestinationPortRangeStart *int `json:"destinationPortRangeStart,omitempty" xmlrpc:"destinationPortRangeStart,omitempty"` + + // The rule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network component firewall that this rule belongs to. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The notes field for the rule. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The numeric value describing the order in which the rule should be applied. + OrderValue *int `json:"orderValue,omitempty" xmlrpc:"orderValue,omitempty"` + + // The protocol considered for determining rule application. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The source IP address considered for determining rule application. + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + SourceIpCidr *int `json:"sourceIpCidr,omitempty" xmlrpc:"sourceIpCidr,omitempty"` + + // The source IP subnet mask considered for determining rule application. + SourceIpSubnetMask *string `json:"sourceIpSubnetMask,omitempty" xmlrpc:"sourceIpSubnetMask,omitempty"` + + // Current status of the network component firewall. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Whether this rule is an IPv4 rule or an IPv6 rule. If + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// no documentation yet +type Network_Vlan_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/notification.go b/vendor/github.com/softlayer/softlayer-go/datatypes/notification.go new file mode 100644 index 000000000000..c18a8b886e5c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/notification.go @@ -0,0 +1,676 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Details provided for the notification are basic. Details such as the related preferences, name and keyname for the notification can be retrieved. The keyname property for the notification can be used to refer to a notification when integrating into the SoftLayer Notification system. The name property can used more for display purposes. +type Notification struct { + Entity + + // Unique identifier for the notification. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name that can be used by external systems to refer to a notification. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Friendly name for the notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the preferences related to the notification. These are preferences are configurable and optional for subscribers to use. + PreferenceCount *uint `json:"preferenceCount,omitempty" xmlrpc:"preferenceCount,omitempty"` + + // The preferences related to the notification. These are preferences are configurable and optional for subscribers to use. + Preferences []Notification_Preference `json:"preferences,omitempty" xmlrpc:"preferences,omitempty"` + + // A count of the required preferences related to the notification. While configurable, the subscriber does not have the option whether to use the preference. + RequiredPreferenceCount *uint `json:"requiredPreferenceCount,omitempty" xmlrpc:"requiredPreferenceCount,omitempty"` + + // The required preferences related to the notification. While configurable, the subscriber does not have the option whether to use the preference. + RequiredPreferences []Notification_Preference `json:"requiredPreferences,omitempty" xmlrpc:"requiredPreferences,omitempty"` +} + +// Provides details for the delivery methods available. +type Notification_Delivery_Method struct { + Entity + + // Determines if the delivery method is still used by the system. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // Description used for the delivery method. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Unique identifier for the various notification delivery methods. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name that can be used by external systems to refer to delivery method. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Friendly name used for the delivery method. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This is an extension of the SoftLayer_Notification class. These are implementation details specific to those notifications which can be subscribed to and received on a mobile device. +type Notification_Mobile struct { + Notification +} + +// no documentation yet +type Notification_Occurrence_Account struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // no documentation yet + LastNotificationUpdate *Notification_Occurrence_Update `json:"lastNotificationUpdate,omitempty" xmlrpc:"lastNotificationUpdate,omitempty"` + + // no documentation yet + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` +} + +// no documentation yet +type Notification_Occurrence_Event struct { + Entity + + // Indicates whether or not this event has been acknowledged by the user. + AcknowledgedFlag *bool `json:"acknowledgedFlag,omitempty" xmlrpc:"acknowledgedFlag,omitempty"` + + // A count of a collection of attachments for this event which provide supplementary information to impacted users some examples are RFO (Reason For Outage) and root cause analysis documents. + AttachmentCount *uint `json:"attachmentCount,omitempty" xmlrpc:"attachmentCount,omitempty"` + + // A collection of attachments for this event which provide supplementary information to impacted users some examples are RFO (Reason For Outage) and root cause analysis documents. + Attachments []Notification_Occurrence_Event_Attachment `json:"attachments,omitempty" xmlrpc:"attachments,omitempty"` + + // When this event will end. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The first update for this event. + FirstUpdate *Notification_Occurrence_Update `json:"firstUpdate,omitempty" xmlrpc:"firstUpdate,omitempty"` + + // Unique identifier for this event. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a collection of accounts impacted by this event. Each impacted account record relates directly to a [[SoftLayer_Account]]. + ImpactedAccountCount *uint `json:"impactedAccountCount,omitempty" xmlrpc:"impactedAccountCount,omitempty"` + + // A collection of accounts impacted by this event. Each impacted account record relates directly to a [[SoftLayer_Account]]. + ImpactedAccounts []Notification_Occurrence_Account `json:"impactedAccounts,omitempty" xmlrpc:"impactedAccounts,omitempty"` + + // A count of a collection of resources impacted by this event. Each record will relate to some physical resource that the user has access to such as [[SoftLayer_Hardware]] or [[SoftLayer_Virtual_Guest]]. + ImpactedResourceCount *uint `json:"impactedResourceCount,omitempty" xmlrpc:"impactedResourceCount,omitempty"` + + // A collection of resources impacted by this event. Each record will relate to some physical resource that the user has access to such as [[SoftLayer_Hardware]] or [[SoftLayer_Virtual_Guest]]. + ImpactedResources []Notification_Occurrence_Resource `json:"impactedResources,omitempty" xmlrpc:"impactedResources,omitempty"` + + // A count of a collection of users impacted by this event. Each impacted user record relates directly to a [[SoftLayer_User_Customer]]. + ImpactedUserCount *uint `json:"impactedUserCount,omitempty" xmlrpc:"impactedUserCount,omitempty"` + + // A collection of users impacted by this event. Each impacted user record relates directly to a [[SoftLayer_User_Customer]]. + ImpactedUsers []Notification_Occurrence_User `json:"impactedUsers,omitempty" xmlrpc:"impactedUsers,omitempty"` + + // Latest count of users impacted by this event. + LastImpactedUserCount *int `json:"lastImpactedUserCount,omitempty" xmlrpc:"lastImpactedUserCount,omitempty"` + + // The last update for this event. + LastUpdate *Notification_Occurrence_Update `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` + + // When this event was last updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The type of event such as planned or unplanned maintenance. + NotificationOccurrenceEventType *Notification_Occurrence_Event_Type `json:"notificationOccurrenceEventType,omitempty" xmlrpc:"notificationOccurrenceEventType,omitempty"` + + // no documentation yet + RecoveryTime *int `json:"recoveryTime,omitempty" xmlrpc:"recoveryTime,omitempty"` + + // When this event started. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // no documentation yet + StatusCode *Notification_Occurrence_Status_Code `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // Brief description of this event. + Subject *string `json:"subject,omitempty" xmlrpc:"subject,omitempty"` + + // Details of this event. + Summary *string `json:"summary,omitempty" xmlrpc:"summary,omitempty"` + + // Unique identifier for the [[SoftLayer_Ticket]] associated with this event. + SystemTicketId *int `json:"systemTicketId,omitempty" xmlrpc:"systemTicketId,omitempty"` + + // A count of all updates for this event. + UpdateCount *uint `json:"updateCount,omitempty" xmlrpc:"updateCount,omitempty"` + + // All updates for this event. + Updates []Notification_Occurrence_Update `json:"updates,omitempty" xmlrpc:"updates,omitempty"` +} + +// SoftLayer events can have have files attached to them by a SoftLayer employee. Attaching a file to a event is a way to provide supplementary information such as a RFO (reason for outage) document or root cause analysis. The SoftLayer_Notification_Occurrence_Event_Attachment data type models a single file attached to a event. +type Notification_Occurrence_Event_Attachment struct { + Entity + + // The date the file was attached to the event. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The name of the file attached to the event. + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The size of the file, measured in bytes. + FileSize *string `json:"fileSize,omitempty" xmlrpc:"fileSize,omitempty"` + + // A event attachments' unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` + + // The unique event identifier that the file is attached to. + NotificationOccurrenceEventId *int `json:"notificationOccurrenceEventId,omitempty" xmlrpc:"notificationOccurrenceEventId,omitempty"` +} + +// This represents the type of SoftLayer_Notification_Occurrence_Event. +type Notification_Occurrence_Event_Type struct { + Entity + + // The friendly unique identifier for this event type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// This type contains general information relating to any hardware or services that may be impacted by a SoftLayer_Notification_Occurrence_Event. +type Notification_Occurrence_Resource struct { + Entity + + // no documentation yet + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // <<< EOT A label which gives some background as to what piece of + FilterLabel *string `json:"filterLabel,omitempty" xmlrpc:"filterLabel,omitempty"` + + // The associated event. + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` + + // <<< EOT The unique identifier for the associated + NotificationOccurrenceEventId *int `json:"notificationOccurrenceEventId,omitempty" xmlrpc:"notificationOccurrenceEventId,omitempty"` + + // The physical resource. + Resource *Entity `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // <<< EOT The unique identifier for the [[SoftLayer_Account]] associated with + ResourceAccountId *int `json:"resourceAccountId,omitempty" xmlrpc:"resourceAccountId,omitempty"` + + // no documentation yet + ResourceName *string `json:"resourceName,omitempty" xmlrpc:"resourceName,omitempty"` + + // <<< EOT The unique identifier for the physical resource that is associated + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Hardware]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Hardware struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + PublicIp *string `json:"publicIp,omitempty" xmlrpc:"publicIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Application_Delivery_Controller]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Application_Delivery_Controller struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + PublicIp *string `json:"publicIp,omitempty" xmlrpc:"publicIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PublicIp *string `json:"publicIp,omitempty" xmlrpc:"publicIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_Iscsi_EqualLogic]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_Iscsi_EqualLogic struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_Iscsi_NetApp]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_Iscsi_NetApp struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_Lockbox]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_Lockbox struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_Nas]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_Nas struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_NetApp_Volume]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_NetApp_Volume struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_NetApp_Volume_Replicant_Iscsi]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_NetApp_Volume_Replicant_Iscsi struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_NetApp_Volume_Replicant_Nas]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_NetApp_Volume_Replicant_Nas struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Virtual_Guest]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Virtual struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + PublicIp *string `json:"publicIp,omitempty" xmlrpc:"publicIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// no documentation yet +type Notification_Occurrence_Status_Code struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Notification_Occurrence_Update struct { + Entity + + // no documentation yet + Contents *string `json:"contents,omitempty" xmlrpc:"contents,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// This type contains general information relating to a user that may be impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_User struct { + Entity + + // no documentation yet + AcknowledgedFlag *int `json:"acknowledgedFlag,omitempty" xmlrpc:"acknowledgedFlag,omitempty"` + + // no documentation yet + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a collection of resources impacted by the associated event. + ImpactedResourceCount *uint `json:"impactedResourceCount,omitempty" xmlrpc:"impactedResourceCount,omitempty"` + + // A collection of resources impacted by the associated event. + ImpactedResources []Notification_Occurrence_Resource `json:"impactedResources,omitempty" xmlrpc:"impactedResources,omitempty"` + + // The associated event. + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` + + // The impacted user. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UsrRecordId *int `json:"usrRecordId,omitempty" xmlrpc:"usrRecordId,omitempty"` +} + +// Retrieve details for preferences. Preferences are used to allow the subscriber to modify their subscription in various ways. Details such as friendly name, keyname maximum and minimum values can be retrieved. These provide details to help configure subscriber preferences correctly. +type Notification_Preference struct { + Entity + + // A description of what the preference is used for. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Unique identifier for the notification preference. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name that can be used by external systems to refer to preference. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Largest value allowed for the preference. + MaximumValue *string `json:"maximumValue,omitempty" xmlrpc:"maximumValue,omitempty"` + + // Smallest value allowed for the preference. + MinimumValue *string `json:"minimumValue,omitempty" xmlrpc:"minimumValue,omitempty"` + + // Friendly name for the notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The unit of measure used for the preference's value, minimum and maximum as well. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` + + // Default value used when setting up preferences for a new subscriber. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Notification_Subscriber struct { + Entity + + // no documentation yet + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + DeliveryMethodCount *uint `json:"deliveryMethodCount,omitempty" xmlrpc:"deliveryMethodCount,omitempty"` + + // no documentation yet + DeliveryMethods []Notification_Subscriber_Delivery_Method `json:"deliveryMethods,omitempty" xmlrpc:"deliveryMethods,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Notification *Notification `json:"notification,omitempty" xmlrpc:"notification,omitempty"` + + // no documentation yet + NotificationId *int `json:"notificationId,omitempty" xmlrpc:"notificationId,omitempty"` + + // no documentation yet + NotificationSubscriberTypeId *int `json:"notificationSubscriberTypeId,omitempty" xmlrpc:"notificationSubscriberTypeId,omitempty"` + + // no documentation yet + NotificationSubscriberTypeResourceId *int `json:"notificationSubscriberTypeResourceId,omitempty" xmlrpc:"notificationSubscriberTypeResourceId,omitempty"` +} + +// no documentation yet +type Notification_Subscriber_Customer struct { + Notification_Subscriber + + // no documentation yet + SubscriberRecord *User_Customer `json:"subscriberRecord,omitempty" xmlrpc:"subscriberRecord,omitempty"` +} + +// Provides details for the subscriber's delivery methods. +type Notification_Subscriber_Delivery_Method struct { + Entity + + // Indicates the subscriber's delivery method availability for notifications. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // Date the subscriber's delivery method was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Date the subscriber's delivery method was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + NotificationDeliveryMethod *Notification_Delivery_Method `json:"notificationDeliveryMethod,omitempty" xmlrpc:"notificationDeliveryMethod,omitempty"` + + // Identifier for the notification delivery method. + NotificationDeliveryMethodId *int `json:"notificationDeliveryMethodId,omitempty" xmlrpc:"notificationDeliveryMethodId,omitempty"` + + // no documentation yet + NotificationSubscriber *Notification_Subscriber `json:"notificationSubscriber,omitempty" xmlrpc:"notificationSubscriber,omitempty"` + + // Identifier for the subscriber. + NotificationSubscriberId *int `json:"notificationSubscriberId,omitempty" xmlrpc:"notificationSubscriberId,omitempty"` +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber struct { + Entity + + // The current status of the subscription. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // A count of the delivery methods used to send the subscribed notification. + DeliveryMethodCount *uint `json:"deliveryMethodCount,omitempty" xmlrpc:"deliveryMethodCount,omitempty"` + + // The delivery methods used to send the subscribed notification. + DeliveryMethods []Notification_Delivery_Method `json:"deliveryMethods,omitempty" xmlrpc:"deliveryMethods,omitempty"` + + // Unique identifier of the subscriber that will receive the alerts. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Notification subscribed to. + Notification *Notification `json:"notification,omitempty" xmlrpc:"notification,omitempty"` + + // Unique identifier of the notification subscribed to. + NotificationId *int `json:"notificationId,omitempty" xmlrpc:"notificationId,omitempty"` + + // A count of associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. + PreferenceCount *uint `json:"preferenceCount,omitempty" xmlrpc:"preferenceCount,omitempty"` + + // Associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. + Preferences []Notification_User_Subscriber_Preference `json:"preferences,omitempty" xmlrpc:"preferences,omitempty"` + + // A count of preference details such as description, minimum and maximum limits, default value and unit of measure. + PreferencesDetailCount *uint `json:"preferencesDetailCount,omitempty" xmlrpc:"preferencesDetailCount,omitempty"` + + // Preference details such as description, minimum and maximum limits, default value and unit of measure. + PreferencesDetails []Notification_Preference `json:"preferencesDetails,omitempty" xmlrpc:"preferencesDetails,omitempty"` + + // The subscriber id to resource id mapping. + ResourceRecord *Notification_User_Subscriber_Resource `json:"resourceRecord,omitempty" xmlrpc:"resourceRecord,omitempty"` + + // User record for the subscription. + UserRecord *User_Customer `json:"userRecord,omitempty" xmlrpc:"userRecord,omitempty"` + + // Unique identifier of the user the subscription is for. + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber_Billing struct { + Notification_User_Subscriber +} + +// Provides mapping details of how the subscriber's notification will be delivered. This maps the subscriber's id with all the delivery method ids used to delivery the notification. +type Notification_User_Subscriber_Delivery_Method struct { + Entity + + // Determines if the delivery method is active for the user. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // Provides details for the method used to deliver the notification (email, sms, ticket). + DeliveryMethod *Notification_Delivery_Method `json:"deliveryMethod,omitempty" xmlrpc:"deliveryMethod,omitempty"` + + // Unique identifier of the method used to deliver notification. + NotificationMethodId *int `json:"notificationMethodId,omitempty" xmlrpc:"notificationMethodId,omitempty"` + + // The Subscriber information tied to the delivery method. + NotificationUserSubscriber *Notification_User_Subscriber `json:"notificationUserSubscriber,omitempty" xmlrpc:"notificationUserSubscriber,omitempty"` + + // Unique identifier of the subscriber tied to the delivery method. + NotificationUserSubscriberId *int `json:"notificationUserSubscriberId,omitempty" xmlrpc:"notificationUserSubscriberId,omitempty"` +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber_Mobile struct { + Notification_User_Subscriber +} + +// Preferences are settings that can be modified to change the behavior of the subscription. For example, modify the limit preference to only receive notifications 10 times instead of 1 during a billing cycle. +// +// NOTE: Some preferences have certain restrictions on values that can be set. +type Notification_User_Subscriber_Preference struct { + Entity + + // Details such name, keyname, minimum and maximum values for the preference. + DefaultPreference *Notification_Preference `json:"defaultPreference,omitempty" xmlrpc:"defaultPreference,omitempty"` + + // Unique identifier for the subscriber's preferences. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique identifier of the default preference for which the subscriber preference is based on. For example, if no preferences are supplied during the creation of a subscriber. The default values are pulled using this property. + NotificationPreferenceId *int `json:"notificationPreferenceId,omitempty" xmlrpc:"notificationPreferenceId,omitempty"` + + // Details of the subscriber tied to the preference. + NotificationUserSubscriber *Notification_User_Subscriber `json:"notificationUserSubscriber,omitempty" xmlrpc:"notificationUserSubscriber,omitempty"` + + // Unique identifier of the subscriber tied to the subscriber preference. + NotificationUserSubscriberId *int `json:"notificationUserSubscriberId,omitempty" xmlrpc:"notificationUserSubscriberId,omitempty"` + + // The user supplied value to "override" the "default" preference's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Retrieve identifier cross-reference information. SoftLayer_Notification_User_Subscriber_Resource provides the resource table id and subscriber id relation. The resource table id is the id of the service the subscriber receives alerts for. This resource table id could be the unique identifier for a Storage Evault service, Global Load Balancer or CDN service. +type Notification_User_Subscriber_Resource struct { + Entity + + // The Subscriber information tied to the resource service. + NotificationUserSubscriber *Notification_User_Subscriber `json:"notificationUserSubscriber,omitempty" xmlrpc:"notificationUserSubscriber,omitempty"` + + // Unique identifier of the subscriber that will receive the alerts for the resource subscribed to a notification. + NotificationUserSubscriberId *int `json:"notificationUserSubscriberId,omitempty" xmlrpc:"notificationUserSubscriberId,omitempty"` + + // Unique identifier for a SoftLayer service that is subscribed to a notification. Currently, the SoftLayer services that can be subscribed to notifications are: + // + // Storage EVault CDN Global Load Balancer + // + // + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/policy.go b/vendor/github.com/softlayer/softlayer-go/datatypes/policy.go new file mode 100644 index 000000000000..03d5ac62a5a7 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/policy.go @@ -0,0 +1,29 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Billing_Oder_Quote data type records acceptance of policy documents for a quote. +type Policy_Document_Acceptance_Quote struct { + Entity + + // no documentation yet + Resource *Billing_Order_Quote `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/product.go b/vendor/github.com/softlayer/softlayer-go/datatypes/product.go new file mode 100644 index 000000000000..8e4cd1459f0b --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/product.go @@ -0,0 +1,1895 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// A Catalog is defined as a set of prices for products that SoftLayer offers for sale. These prices are organized into packages which represent the different servers and services that SoftLayer offers. +type Product_Catalog struct { + Entity + + // A count of brands using this Catalog + BrandCount *uint `json:"brandCount,omitempty" xmlrpc:"brandCount,omitempty"` + + // Brands using this Catalog + Brands []Brand `json:"brands,omitempty" xmlrpc:"brands,omitempty"` + + // A count of packages available in this catalog + PackageCount *uint `json:"packageCount,omitempty" xmlrpc:"packageCount,omitempty"` + + // Packages available in this catalog + Packages []Product_Package `json:"packages,omitempty" xmlrpc:"packages,omitempty"` + + // A count of prices available in this catalog + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // Prices available in this catalog + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // A count of products available in catalog + ProductCount *uint `json:"productCount,omitempty" xmlrpc:"productCount,omitempty"` + + // Products available in catalog + Products []Product_Item `json:"products,omitempty" xmlrpc:"products,omitempty"` +} + +// The SoftLayer_Product_Catalog_Item_Price type assigns an Item Price to a Catalog. This relation defines the composition of Item Prices in a Catalog. +type Product_Catalog_Item_Price struct { + Entity + + // Catalog being assigned + Catalog *Product_Catalog `json:"catalog,omitempty" xmlrpc:"catalog,omitempty"` + + // The id of the Catalog the Item Price is part of. + CatalogId *int `json:"catalogId,omitempty" xmlrpc:"catalogId,omitempty"` + + // The time the Item Price was defined in the Catalog + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The time the Item Price was changed for the Catalog + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Price being assigned + Price *Product_Item_Price `json:"price,omitempty" xmlrpc:"price,omitempty"` + + // The id of the Item Price that is part of the Catalog. + PriceId *int `json:"priceId,omitempty" xmlrpc:"priceId,omitempty"` +} + +// The SoftLayer_Product_Group data type contains product group relationship. +type Product_Group struct { + Entity + + // The name of the product group. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item data type contains general information relating to a single SoftLayer product. +type Product_Item struct { + Entity + + // A count of + ActivePresaleEventCount *uint `json:"activePresaleEventCount,omitempty" xmlrpc:"activePresaleEventCount,omitempty"` + + // no documentation yet + ActivePresaleEvents []Sales_Presale_Event `json:"activePresaleEvents,omitempty" xmlrpc:"activePresaleEvents,omitempty"` + + // A count of active usage based prices. + ActiveUsagePriceCount *uint `json:"activeUsagePriceCount,omitempty" xmlrpc:"activeUsagePriceCount,omitempty"` + + // Active usage based prices. + ActiveUsagePrices []Product_Item_Price `json:"activeUsagePrices,omitempty" xmlrpc:"activeUsagePrices,omitempty"` + + // A count of the attribute values for a product item. These are additional properties that give extra information about the product being sold. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // The attribute values for a product item. These are additional properties that give extra information about the product being sold. + Attributes []Product_Item_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A count of attributes that govern when an item may no longer be available. + AvailabilityAttributeCount *uint `json:"availabilityAttributeCount,omitempty" xmlrpc:"availabilityAttributeCount,omitempty"` + + // Attributes that govern when an item may no longer be available. + AvailabilityAttributes []Product_Item_Attribute `json:"availabilityAttributes,omitempty" xmlrpc:"availabilityAttributes,omitempty"` + + // An item's special billing type, if applicable. + BillingType *string `json:"billingType,omitempty" xmlrpc:"billingType,omitempty"` + + // An item's included product item references. Some items have other items included in them that we specifically detail. They are here called Bundled Items. An example is Plesk unlimited. It as a bundled item labeled 'SiteBuilder'. These are the SoftLayer_Product_Item_Bundles objects. See the SoftLayer_Product_Item::bundleItems property for bundle of SoftLayer_Product_Item of objects. + Bundle []Product_Item_Bundles `json:"bundle,omitempty" xmlrpc:"bundle,omitempty"` + + // A count of an item's included product item references. Some items have other items included in them that we specifically detail. They are here called Bundled Items. An example is Plesk unlimited. It as a bundled item labeled 'SiteBuilder'. These are the SoftLayer_Product_Item_Bundles objects. See the SoftLayer_Product_Item::bundleItems property for bundle of SoftLayer_Product_Item of objects. + BundleCount *uint `json:"bundleCount,omitempty" xmlrpc:"bundleCount,omitempty"` + + // A count of an item's included products. Some items have other items included in them that we specifically detail. They are here called Bundled Items. An example is Plesk unlimited. It as a bundled item labeled 'SiteBuilder'. These are the SoftLayer_Product_Item objects. + BundleItemCount *uint `json:"bundleItemCount,omitempty" xmlrpc:"bundleItemCount,omitempty"` + + // An item's included products. Some items have other items included in them that we specifically detail. They are here called Bundled Items. An example is Plesk unlimited. It as a bundled item labeled 'SiteBuilder'. These are the SoftLayer_Product_Item objects. + BundleItems []Product_Item `json:"bundleItems,omitempty" xmlrpc:"bundleItems,omitempty"` + + // Some Product Items have capacity information such as RAM and bandwidth, and others. This provides the numerical representation of the capacity given in the description of this product item. + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // When the product capacity is best described as a range, this holds the ceiling of the range. + CapacityMaximum *string `json:"capacityMaximum,omitempty" xmlrpc:"capacityMaximum,omitempty"` + + // When the product capacity is best described as a range, this holds the floor of the range. + CapacityMinimum *string `json:"capacityMinimum,omitempty" xmlrpc:"capacityMinimum,omitempty"` + + // This flag indicates that this product is restricted by a capacity on a related product. + CapacityRestrictedProductFlag *bool `json:"capacityRestrictedProductFlag,omitempty" xmlrpc:"capacityRestrictedProductFlag,omitempty"` + + // An item's associated item categories. + Categories []Product_Item_Category `json:"categories,omitempty" xmlrpc:"categories,omitempty"` + + // A count of an item's associated item categories. + CategoryCount *uint `json:"categoryCount,omitempty" xmlrpc:"categoryCount,omitempty"` + + // A count of some product items have configuration templates which can be used to during provisioning of that product. + ConfigurationTemplateCount *uint `json:"configurationTemplateCount,omitempty" xmlrpc:"configurationTemplateCount,omitempty"` + + // Some product items have configuration templates which can be used to during provisioning of that product. + ConfigurationTemplates []Configuration_Template `json:"configurationTemplates,omitempty" xmlrpc:"configurationTemplates,omitempty"` + + // An item's conflicts. For example, McAfee LinuxShield cannot be ordered with Windows. It was not meant for that operating system and as such is a conflict. + Conflicts []Product_Item_Resource_Conflict `json:"conflicts,omitempty" xmlrpc:"conflicts,omitempty"` + + // This flag indicates that this product is restricted by the number of cores on the compute instance. This is deprecated. Use [[SoftLayer_Product_Item/getCapacityRestrictedProductFlag|getCapacityRestrictedProductFlag]] + CoreRestrictedItemFlag *bool `json:"coreRestrictedItemFlag,omitempty" xmlrpc:"coreRestrictedItemFlag,omitempty"` + + // A product's description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Some product items have a downgrade path. This is the first product item in the downgrade path. + DowngradeItem *Product_Item `json:"downgradeItem,omitempty" xmlrpc:"downgradeItem,omitempty"` + + // A count of some product items have a downgrade path. These are those product items. + DowngradeItemCount *uint `json:"downgradeItemCount,omitempty" xmlrpc:"downgradeItemCount,omitempty"` + + // Some product items have a downgrade path. These are those product items. + DowngradeItems []Product_Item `json:"downgradeItems,omitempty" xmlrpc:"downgradeItems,omitempty"` + + // An item's category conflicts. For example, 10 Gbps redundant network functionality cannot be ordered with a secondary GPU and as such is a conflict. + GlobalCategoryConflicts []Product_Item_Resource_Conflict `json:"globalCategoryConflicts,omitempty" xmlrpc:"globalCategoryConflicts,omitempty"` + + // The generic hardware component that this item represents. + HardwareGenericComponentModel *Hardware_Component_Model_Generic `json:"hardwareGenericComponentModel,omitempty" xmlrpc:"hardwareGenericComponentModel,omitempty"` + + // no documentation yet + HideFromPortalFlag *bool `json:"hideFromPortalFlag,omitempty" xmlrpc:"hideFromPortalFlag,omitempty"` + + // A product's internal identification number + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // DEPRECATED. An item's inventory status per datacenter. + Inventory []Product_Package_Inventory `json:"inventory,omitempty" xmlrpc:"inventory,omitempty"` + + // A count of dEPRECATED. An item's inventory status per datacenter. + InventoryCount *uint `json:"inventoryCount,omitempty" xmlrpc:"inventoryCount,omitempty"` + + // Flag to indicate the server product is engineered for a multi-server solution. (Deprecated) + IsEngineeredServerProduct *bool `json:"isEngineeredServerProduct,omitempty" xmlrpc:"isEngineeredServerProduct,omitempty"` + + // An item's primary item category. + ItemCategory *Product_Item_Category `json:"itemCategory,omitempty" xmlrpc:"itemCategory,omitempty"` + + // A products tax category internal identification number + ItemTaxCategoryId *int `json:"itemTaxCategoryId,omitempty" xmlrpc:"itemTaxCategoryId,omitempty"` + + // A unique key name for the product. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + LocalDiskFlag *bool `json:"localDiskFlag,omitempty" xmlrpc:"localDiskFlag,omitempty"` + + // An item's location conflicts. For example, Dual Path network functionality cannot be ordered in WDC and as such is a conflict. + LocationConflicts []Product_Item_Resource_Conflict `json:"locationConflicts,omitempty" xmlrpc:"locationConflicts,omitempty"` + + // Detailed product description + LongDescription *string `json:"longDescription,omitempty" xmlrpc:"longDescription,omitempty"` + + // no documentation yet + ObjectStorageClusterGeolocationType *string `json:"objectStorageClusterGeolocationType,omitempty" xmlrpc:"objectStorageClusterGeolocationType,omitempty"` + + // no documentation yet + ObjectStorageItemFlag *bool `json:"objectStorageItemFlag,omitempty" xmlrpc:"objectStorageItemFlag,omitempty"` + + // no documentation yet + ObjectStorageServiceClass *string `json:"objectStorageServiceClass,omitempty" xmlrpc:"objectStorageServiceClass,omitempty"` + + // A count of a collection of all the SoftLayer_Product_Package(s) in which this item exists. + PackageCount *uint `json:"packageCount,omitempty" xmlrpc:"packageCount,omitempty"` + + // A collection of all the SoftLayer_Product_Package(s) in which this item exists. + Packages []Product_Package `json:"packages,omitempty" xmlrpc:"packages,omitempty"` + + // The number of cores that a processor has. + PhysicalCoreCapacity *string `json:"physicalCoreCapacity,omitempty" xmlrpc:"physicalCoreCapacity,omitempty"` + + // A count of + PresaleEventCount *uint `json:"presaleEventCount,omitempty" xmlrpc:"presaleEventCount,omitempty"` + + // no documentation yet + PresaleEvents []Sales_Presale_Event `json:"presaleEvents,omitempty" xmlrpc:"presaleEvents,omitempty"` + + // A count of a product item's prices. + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // A product item's prices. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // If an item must be ordered with another item, it will have a requirement item here. + Requirements []Product_Item_Requirement `json:"requirements,omitempty" xmlrpc:"requirements,omitempty"` + + // A count of an item's rules. This includes the requirements and conflicts to resources that an item has. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // An item's rules. This includes the requirements and conflicts to resources that an item has. + Rules []Product_Item_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` + + // The SoftLayer_Software_Description tied to this item. This will only be populated for software items. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The unique identifier of the SoftLayer_Software_Description tied to this item. + SoftwareDescriptionId *int `json:"softwareDescriptionId,omitempty" xmlrpc:"softwareDescriptionId,omitempty"` + + // An item's tax category, if applicable. + TaxCategory *Product_Item_Tax_Category `json:"taxCategory,omitempty" xmlrpc:"taxCategory,omitempty"` + + // A count of third-party policy assignments for this product. + ThirdPartyPolicyAssignmentCount *uint `json:"thirdPartyPolicyAssignmentCount,omitempty" xmlrpc:"thirdPartyPolicyAssignmentCount,omitempty"` + + // Third-party policy assignments for this product. + ThirdPartyPolicyAssignments []Product_Item_Policy_Assignment `json:"thirdPartyPolicyAssignments,omitempty" xmlrpc:"thirdPartyPolicyAssignments,omitempty"` + + // The 3rd party vendor for a support subscription item. (Deprecated) + ThirdPartySupportVendor *string `json:"thirdPartySupportVendor,omitempty" xmlrpc:"thirdPartySupportVendor,omitempty"` + + // The total number of physical processing cores (excluding virtual cores / hyperthreads) for this server. + TotalPhysicalCoreCapacity *int `json:"totalPhysicalCoreCapacity,omitempty" xmlrpc:"totalPhysicalCoreCapacity,omitempty"` + + // Shows the total number of cores. This is deprecated. Use [[SoftLayer_Product_Item/getCapacity|getCapacity]] for guest_core products and [[SoftLayer_Product_Item/getTotalPhysicalCoreCapacity|getTotalPhysicalCoreCapacity]] for server products + TotalPhysicalCoreCount *int `json:"totalPhysicalCoreCount,omitempty" xmlrpc:"totalPhysicalCoreCount,omitempty"` + + // The total number of processors for this server. + TotalProcessorCapacity *int `json:"totalProcessorCapacity,omitempty" xmlrpc:"totalProcessorCapacity,omitempty"` + + // The unit of measurement that a product item is measured in. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` + + // Some product items have an upgrade path. This is the next product item in the upgrade path. + UpgradeItem *Product_Item `json:"upgradeItem,omitempty" xmlrpc:"upgradeItem,omitempty"` + + // A count of some product items have an upgrade path. These are those upgrade product items. + UpgradeItemCount *uint `json:"upgradeItemCount,omitempty" xmlrpc:"upgradeItemCount,omitempty"` + + // A products upgrade item's internal identification number + UpgradeItemId *int `json:"upgradeItemId,omitempty" xmlrpc:"upgradeItemId,omitempty"` + + // Some product items have an upgrade path. These are those upgrade product items. + UpgradeItems []Product_Item `json:"upgradeItems,omitempty" xmlrpc:"upgradeItems,omitempty"` +} + +// The [[SoftLayer_Product_Item_Attribute]] data type allows us to describe a [[SoftLayer_Product_Item]] by attaching specific attributes, which may dictate how it interacts with other products and services. Most, if not all, of these attributes are geared towards internal usage, so customers should rarely be concerned with them. +type Product_Item_Attribute struct { + Entity + + // This represents the attribute type of this product attribute. + AttributeType *Product_Item_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // This represents the attribute type's key name of this product attribute. + AttributeTypeKeyName *string `json:"attributeTypeKeyName,omitempty" xmlrpc:"attributeTypeKeyName,omitempty"` + + // This is the primary key value for the product attribute. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This represents the product that an attribute is tied to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // This is a foreign key value for the [[SoftLayer_Product_Item_Attribute_Type]]. + ItemAttributeTypeId *int `json:"itemAttributeTypeId,omitempty" xmlrpc:"itemAttributeTypeId,omitempty"` + + // This is a foreign key value for the [[SoftLayer_Product_Item]]. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // This is the value for the attribute. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The [[SoftLayer_Product_Item_Attribute_Type]] data type defines the available type of product attributes that are available. This allows for convenient reference to a [[SoftLayer_Product_Item_Attribute|product attribute]] by a unique key name value. +type Product_Item_Attribute_Type struct { + Entity + + // This is the unique identifier of the attribute type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // This is the user-friendly readable name of the attribute type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item_Billing_Type data type models special billing types for non-monthly billed items in the SoftLayer product catalog. +type Product_Item_Billing_Type struct { + Entity + + // A keyword describing a SoftLayer product item billing type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item_Bundles contains item to price cross references Relates a category, price and item to a bundle. Match bundle ids to see all items and prices in a particular bundle. +type Product_Item_Bundles struct { + Entity + + // Item in bundle. + BundleItem *Product_Item `json:"bundleItem,omitempty" xmlrpc:"bundleItem,omitempty"` + + // Identifier for bundle. + BundleItemId *int `json:"bundleItemId,omitempty" xmlrpc:"bundleItemId,omitempty"` + + // Category bundle falls in. + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // Identifier for record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Price of item in bundle + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // Identifier for price. + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` +} + +// The SoftLayer_Product_Item_Category data type contains general category information for prices. +type Product_Item_Category struct { + Entity + + // A count of the billing items associated with an account that share a category code with an item category's category code. + BillingItemCount *uint `json:"billingItemCount,omitempty" xmlrpc:"billingItemCount,omitempty"` + + // The billing items associated with an account that share a category code with an item category's category code. + BillingItems []Billing_Item `json:"billingItems,omitempty" xmlrpc:"billingItems,omitempty"` + + // The code used to identify this category. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // This invoice item's "item category group". + Group *Product_Item_Category_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A count of a collection of service offering category groups. Each group contains a collection of items associated with this category. + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // A collection of service offering category groups. Each group contains a collection of items associated with this category. + Groups []Product_Package_Item_Category_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // identifier for category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The friendly, descriptive name of the category as seen on the order forms and on invoices. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of any unique options associated with an item category. + OrderOptionCount *uint `json:"orderOptionCount,omitempty" xmlrpc:"orderOptionCount,omitempty"` + + // Any unique options associated with an item category. + OrderOptions []Product_Item_Category_Order_Option_Type `json:"orderOptions,omitempty" xmlrpc:"orderOptions,omitempty"` + + // A count of a list of configuration available in this category.' + PackageConfigurationCount *uint `json:"packageConfigurationCount,omitempty" xmlrpc:"packageConfigurationCount,omitempty"` + + // A list of configuration available in this category.' + PackageConfigurations []Product_Package_Order_Configuration `json:"packageConfigurations,omitempty" xmlrpc:"packageConfigurations,omitempty"` + + // A count of a list of preset configurations this category is used in.' + PresetConfigurationCount *uint `json:"presetConfigurationCount,omitempty" xmlrpc:"presetConfigurationCount,omitempty"` + + // A list of preset configurations this category is used in.' + PresetConfigurations []Product_Package_Preset_Configuration `json:"presetConfigurations,omitempty" xmlrpc:"presetConfigurations,omitempty"` + + // Quantity that can be ordered. If 0, it will inherit the quantity from the server quantity ordered. Otherwise it can be specified with the order separately + QuantityLimit *int `json:"quantityLimit,omitempty" xmlrpc:"quantityLimit,omitempty"` + + // A count of the questions that are associated with an item category. + QuestionCount *uint `json:"questionCount,omitempty" xmlrpc:"questionCount,omitempty"` + + // A count of the question references that are associated with an item category. + QuestionReferenceCount *uint `json:"questionReferenceCount,omitempty" xmlrpc:"questionReferenceCount,omitempty"` + + // The question references that are associated with an item category. + QuestionReferences []Product_Item_Category_Question_Xref `json:"questionReferences,omitempty" xmlrpc:"questionReferences,omitempty"` + + // The questions that are associated with an item category. + Questions []Product_Item_Category_Question `json:"questions,omitempty" xmlrpc:"questions,omitempty"` + + // The sort order of the category. It may be used to affect the order in which the category may appear in lists (on order forms and invoices). + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Group data type contains general category group information. +type Product_Item_Category_Group struct { + Entity + + // identifier for category group. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The friendly, descriptive name of the category group as seen on the order forms and on invoices. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Order_Option_Type data type contains options that can be applied to orders for prices. +type Product_Item_Category_Order_Option_Type struct { + Entity + + // An item category order type's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // An item category order type's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A simple description for an item category order type. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // An item category order type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The value of the item category type's option. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Question data type represents a single question to be answered by an end user. The question may or may not be required which can be located by looking at the 'required' property on the item category references. The answerValueExpression property is a regular expression that is used to validate the answer to the question. The description and valueExample properties can be used to get an idea of the type of answer that should be provided. +type Product_Item_Category_Question struct { + Entity + + // The type of answer expected. + AnswerValueExpression *string `json:"answerValueExpression,omitempty" xmlrpc:"answerValueExpression,omitempty"` + + // The description for the question. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The type of field that should be used in an HTML form to accept an answer from an end user. + FieldType *Product_Item_Category_Question_Field_Type `json:"fieldType,omitempty" xmlrpc:"fieldType,omitempty"` + + // The type of field to use. + FieldTypeId *int `json:"fieldTypeId,omitempty" xmlrpc:"fieldTypeId,omitempty"` + + // identifier for category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the link between an item category and an item category question. + ItemCategoryReferenceCount *uint `json:"itemCategoryReferenceCount,omitempty" xmlrpc:"itemCategoryReferenceCount,omitempty"` + + // The link between an item category and an item category question. + ItemCategoryReferences []Product_Item_Category_Question_Xref `json:"itemCategoryReferences,omitempty" xmlrpc:"itemCategoryReferences,omitempty"` + + // The keyname for the question. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The question for the category. + Question *string `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // An example and/or explanation of what the answer for the question is expected to look like. + ValueExample *string `json:"valueExample,omitempty" xmlrpc:"valueExample,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Question_Field_Type data type represents the recommended type of field that should be rendered on an HTML form. +type Product_Item_Category_Question_Field_Type struct { + Entity + + // Identifier for the question type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Keyname for the question field type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Short name for the question field type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Question_Xref data type represents a link between an item category and an item category question. It also contains a 'required' field that designates if the question is required to be answered for the given item category. +type Product_Item_Category_Question_Xref struct { + Entity + + // Identifier for category question xref record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The product item category that this reference points to. + ItemCategory *Product_Item_Category `json:"itemCategory,omitempty" xmlrpc:"itemCategory,omitempty"` + + // Identifier for item category. + ItemCategoryId *int `json:"itemCategoryId,omitempty" xmlrpc:"itemCategoryId,omitempty"` + + // Identifier for the question. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The item category question that this reference points to. + Question *Product_Item_Category_Question `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // Identifier for the question. + QuestionId *int `json:"questionId,omitempty" xmlrpc:"questionId,omitempty"` + + // Flag to indicate whether an answer is required for the question.. + Required *bool `json:"required,omitempty" xmlrpc:"required,omitempty"` +} + +// no documentation yet +type Product_Item_Link_ThePlanet struct { + Entity + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` +} + +// no documentation yet +type Product_Item_Overage_Price struct { + Entity + + // no documentation yet + DefaultOverageItem *Product_Item `json:"defaultOverageItem,omitempty" xmlrpc:"defaultOverageItem,omitempty"` + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` +} + +// Represents the assignment of a policy to a product. The existence of a record means that the associated product is subject to the terms defined in the document content of the policy. +type Product_Item_Policy_Assignment struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of the assigned policy. + PolicyName *string `json:"policyName,omitempty" xmlrpc:"policyName,omitempty"` + + // The [[SoftLayer_Product_Item]] for this policy assignment. + Product *Product_Item `json:"product,omitempty" xmlrpc:"product,omitempty"` + + // no documentation yet + ProductId *int `json:"productId,omitempty" xmlrpc:"productId,omitempty"` +} + +// The SoftLayer_Product_Item_Price data type contains general information relating to a single SoftLayer product item price. You can find out what packages each price is in as well as which category under which this price is sold. All prices are returned in floating point values measured in US Dollars ($USD). +type Product_Item_Price struct { + Entity + + // A count of the account that the item price is restricted to. + AccountRestrictionCount *uint `json:"accountRestrictionCount,omitempty" xmlrpc:"accountRestrictionCount,omitempty"` + + // The account that the item price is restricted to. + AccountRestrictions []Product_Item_Price_Account_Restriction `json:"accountRestrictions,omitempty" xmlrpc:"accountRestrictions,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Product_Item_Price_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // Whether the price is for Big Data OS/Journal disks only. (Deprecated) + BigDataOsJournalDiskFlag *bool `json:"bigDataOsJournalDiskFlag,omitempty" xmlrpc:"bigDataOsJournalDiskFlag,omitempty"` + + // A count of cross reference for bundles + BundleReferenceCount *uint `json:"bundleReferenceCount,omitempty" xmlrpc:"bundleReferenceCount,omitempty"` + + // cross reference for bundles + BundleReferences []Product_Item_Bundles `json:"bundleReferences,omitempty" xmlrpc:"bundleReferences,omitempty"` + + // The maximum capacity value for which this price is suitable. + CapacityRestrictionMaximum *string `json:"capacityRestrictionMaximum,omitempty" xmlrpc:"capacityRestrictionMaximum,omitempty"` + + // The minimum capacity value for which this price is suitable. + CapacityRestrictionMinimum *string `json:"capacityRestrictionMinimum,omitempty" xmlrpc:"capacityRestrictionMinimum,omitempty"` + + // The type of capacity restriction by which this price must abide. + CapacityRestrictionType *string `json:"capacityRestrictionType,omitempty" xmlrpc:"capacityRestrictionType,omitempty"` + + // All categories which this item is a member. + Categories []Product_Item_Category `json:"categories,omitempty" xmlrpc:"categories,omitempty"` + + // A count of all categories which this item is a member. + CategoryCount *uint `json:"categoryCount,omitempty" xmlrpc:"categoryCount,omitempty"` + + // This flag is used by the [[SoftLayer_Hardware::getUpgradeItems|getUpgradeItems]] method to indicate if a product price is used for the current billing item. + CurrentPriceFlag *bool `json:"currentPriceFlag,omitempty" xmlrpc:"currentPriceFlag,omitempty"` + + // Signifies pricing that is only available on a dedicated host virtual server order. + DedicatedHostInstanceFlag *bool `json:"dedicatedHostInstanceFlag,omitempty" xmlrpc:"dedicatedHostInstanceFlag,omitempty"` + + // Whether this price defines a software license for its product item. + DefinedSoftwareLicenseFlag *bool `json:"definedSoftwareLicenseFlag,omitempty" xmlrpc:"definedSoftwareLicenseFlag,omitempty"` + + // The hourly price for this item, should this item be part of an hourly pricing package. + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // The unique identifier of a Product Item Price. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The product item a price is tied to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The unique identifier for a product Item + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The labor fee for a product item price. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The id of the [[SoftLayer_Location_Group_Pricing]] that this price is part of. If set to null, the price is considered a standard price, which can be used with any location when ordering. + // + // During order [[SoftLayer_Product_Order/verifyOrder|verification]] and [[SoftLayer_Product_Order/placeOrder|placement]], if a standard price is used, that price may be replaced with a location based price, which does not have this property set to null. The location based price must be part of a [[SoftLayer_Location_Group_Pricing]] that has the location being ordered in order for this to happen. + LocationGroupId *int `json:"locationGroupId,omitempty" xmlrpc:"locationGroupId,omitempty"` + + // On sale flag. + OnSaleFlag *bool `json:"onSaleFlag,omitempty" xmlrpc:"onSaleFlag,omitempty"` + + // The one time fee for a product item price. + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // A price's total tax amount of the one time fees (oneTimeFee, laborFee, and setupFee). This is only populated after the order is verified via SoftLayer_Product_Order::verifyOrder() + OneTimeFeeTax *Float64 `json:"oneTimeFeeTax,omitempty" xmlrpc:"oneTimeFeeTax,omitempty"` + + // Order options for the category that this price is associated with. + OrderOptions []Product_Item_Category_Order_Option_Type `json:"orderOptions,omitempty" xmlrpc:"orderOptions,omitempty"` + + // A count of + OrderPremiumCount *uint `json:"orderPremiumCount,omitempty" xmlrpc:"orderPremiumCount,omitempty"` + + // no documentation yet + OrderPremiums []Product_Item_Price_Premium `json:"orderPremiums,omitempty" xmlrpc:"orderPremiums,omitempty"` + + // A count of a price's packages under which this item is sold. + PackageCount *uint `json:"packageCount,omitempty" xmlrpc:"packageCount,omitempty"` + + // A count of cross reference for packages + PackageReferenceCount *uint `json:"packageReferenceCount,omitempty" xmlrpc:"packageReferenceCount,omitempty"` + + // cross reference for packages + PackageReferences []Product_Package_Item_Prices `json:"packageReferences,omitempty" xmlrpc:"packageReferences,omitempty"` + + // A price's packages under which this item is sold. + Packages []Product_Package `json:"packages,omitempty" xmlrpc:"packages,omitempty"` + + // A count of a list of preset configurations this price is used in.' + PresetConfigurationCount *uint `json:"presetConfigurationCount,omitempty" xmlrpc:"presetConfigurationCount,omitempty"` + + // A list of preset configurations this price is used in.' + PresetConfigurations []Product_Package_Preset_Configuration `json:"presetConfigurations,omitempty" xmlrpc:"presetConfigurations,omitempty"` + + // The type keyname of this price which can be STANDARD or TIERED. + PriceType *string `json:"priceType,omitempty" xmlrpc:"priceType,omitempty"` + + // The pricing location group that this price is applicable for. Prices that have a pricing location group will only be available for ordering with the locations specified on the location group. + PricingLocationGroup *Location_Group_Pricing `json:"pricingLocationGroup,omitempty" xmlrpc:"pricingLocationGroup,omitempty"` + + // A recurring fee is a fee that happens every billing period. This fee is represented as a floating point decimal in US dollars ($USD). + ProratedRecurringFee *Float64 `json:"proratedRecurringFee,omitempty" xmlrpc:"proratedRecurringFee,omitempty"` + + // A price's tax amount of the recurring fee. This is only populated after the order is verified via SoftLayer_Product_Order::verifyOrder() + ProratedRecurringFeeTax *Float64 `json:"proratedRecurringFeeTax,omitempty" xmlrpc:"proratedRecurringFeeTax,omitempty"` + + // no documentation yet + Quantity *int `json:"quantity,omitempty" xmlrpc:"quantity,omitempty"` + + // A recurring fee is a fee that happens every billing period. This fee is represented as a floating point decimal in US dollars ($USD). + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // A price's tax amount of the recurring fee. This is only populated after the order is verified via SoftLayer_Product_Order::verifyOrder() + RecurringFeeTax *Float64 `json:"recurringFeeTax,omitempty" xmlrpc:"recurringFeeTax,omitempty"` + + // The number of server cores required to order this item. This is deprecated. Use [[SoftLayer_Product_Item_Price/getCapacityRestrictionMinimum|getCapacityRestrictionMinimum]] and [[SoftLayer_Product_Item_Price/getCapacityRestrictionMaximum|getCapacityRestrictionMaximum]] + RequiredCoreCount *int `json:"requiredCoreCount,omitempty" xmlrpc:"requiredCoreCount,omitempty"` + + // The setup fee associated with a product item price. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // Used for ordering items on sales orders. + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // The minimum threshold for which this tiered usage price begins to apply. The unit for the price is defined by the item to which this belongs, see [[SoftLayer_Product_Item::$units]]. + TierMinimumThreshold *int `json:"tierMinimumThreshold,omitempty" xmlrpc:"tierMinimumThreshold,omitempty"` + + // The rate for a usage based item + UsageRate *Float64 `json:"usageRate,omitempty" xmlrpc:"usageRate,omitempty"` +} + +// The SoftLayer_Product_Item_Price data type gives more information about the item price restrictions. An item price may be restricted to one or more accounts. If the item price is restricted to an account, only that account will see the restriction details. +type Product_Item_Price_Account_Restriction struct { + Entity + + // The account the item price is restricted to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account id for the item price account restriction. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The unique identifier for the item price account restriction. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The item price that has the account restriction. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // The item price id for the item price account restriction. + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` +} + +// no documentation yet +type Product_Item_Price_Attribute struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // no documentation yet + ItemPriceAttributeType *Product_Item_Price_Attribute_Type `json:"itemPriceAttributeType,omitempty" xmlrpc:"itemPriceAttributeType,omitempty"` + + // no documentation yet + ItemPriceAttributeTypeId *int `json:"itemPriceAttributeTypeId,omitempty" xmlrpc:"itemPriceAttributeTypeId,omitempty"` + + // no documentation yet + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Product_Item_Price_Attribute_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` +} + +// no documentation yet +type Product_Item_Price_Premium struct { + Entity + + // no documentation yet + HourlyModifier *Float64 `json:"hourlyModifier,omitempty" xmlrpc:"hourlyModifier,omitempty"` + + // no documentation yet + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // no documentation yet + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // no documentation yet + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // no documentation yet + MonthlyModifier *Float64 `json:"monthlyModifier,omitempty" xmlrpc:"monthlyModifier,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // no documentation yet + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// The SoftLayer_Product_Item_Requirement data type contains information relating to what requirements, if any, exist for an item. The requiredItemId local property is the item id that is required. +type Product_Item_Requirement struct { + Entity + + // Identifier for this record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Item requirement applies to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // This is the id of the item affected by the requirement. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // This is a custom message to display to the user when this requirement shortfall arises. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The product containing the requirement. + Product *Product_Item `json:"product,omitempty" xmlrpc:"product,omitempty"` + + // This is the id of the item required. + RequiredItemId *int `json:"requiredItemId,omitempty" xmlrpc:"requiredItemId,omitempty"` +} + +// no documentation yet +type Product_Item_Resource_Conflict struct { + Entity + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The unique identifier of the item that contains the conflict. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // An optional conflict message. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The unique identifier of the service offering that is associated with the conflict. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The unique identifier of the conflicting type. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// no documentation yet +type Product_Item_Resource_Conflict_Item struct { + Product_Item_Resource_Conflict + + // A product item that conflicts with another product item. + Resource *Product_Item `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Resource_Conflict_Item_Category struct { + Product_Item_Resource_Conflict + + // An item category that conflicts with a product item. + Resource *Product_Item_Category `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Resource_Conflict_Location struct { + Product_Item_Resource_Conflict + + // A location that conflicts with a product item. + Resource *Location `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The item rule data type represents a rule that must be followed when the item assigned to the rule is ordered. The type and operation applied to the resources of the rule will affect how the rule is checked during ordering. +type Product_Item_Rule struct { + Entity + + // The product item that a rule applies to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // A count of + ItemCategoryResourceCount *uint `json:"itemCategoryResourceCount,omitempty" xmlrpc:"itemCategoryResourceCount,omitempty"` + + // no documentation yet + ItemCategoryResources []Product_Item_Rule_Resource_Item_Category `json:"itemCategoryResources,omitempty" xmlrpc:"itemCategoryResources,omitempty"` + + // The unique identifier of the item that the rule applies to. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // A count of + ItemResourceCount *uint `json:"itemResourceCount,omitempty" xmlrpc:"itemResourceCount,omitempty"` + + // no documentation yet + ItemResources []Product_Item_Rule_Resource_Item `json:"itemResources,omitempty" xmlrpc:"itemResources,omitempty"` + + // A count of + LocationResourceCount *uint `json:"locationResourceCount,omitempty" xmlrpc:"locationResourceCount,omitempty"` + + // no documentation yet + LocationResources []Product_Item_Rule_Resource_Location `json:"locationResources,omitempty" xmlrpc:"locationResources,omitempty"` + + // An optional message shown for when the rule is found to be invalid when ordering. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + Operation *string `json:"operation,omitempty" xmlrpc:"operation,omitempty"` + + // The package that a rule is applicable to when ordering. If no package exists, the rule applies to any package. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The unique identifier of the service offering that is associated with the rule. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // A count of + PermissionResourceCount *uint `json:"permissionResourceCount,omitempty" xmlrpc:"permissionResourceCount,omitempty"` + + // no documentation yet + PermissionResources []Product_Item_Rule_Resource_Permission `json:"permissionResources,omitempty" xmlrpc:"permissionResources,omitempty"` + + // A count of resources for this rule that are validated when ordering. + ResourceCount *uint `json:"resourceCount,omitempty" xmlrpc:"resourceCount,omitempty"` + + // Resources for this rule that are validated when ordering. + Resources []Product_Item_Rule_Resource `json:"resources,omitempty" xmlrpc:"resources,omitempty"` + + // The type a rule is. The type affects how the rule is validated when ordering. + Type *Product_Item_Rule_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The unique identifier of the type of resource rule. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// The item rule resource data type represents a resource that is part of an item rule. The item rule resource is used when its item rule is checked on an order. +type Product_Item_Rule_Resource struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique identifier of the resource. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // no documentation yet + Rule *Product_Item_Rule `json:"rule,omitempty" xmlrpc:"rule,omitempty"` + + // The unique identifier of the rule this resource is included in. + RuleId *int `json:"ruleId,omitempty" xmlrpc:"ruleId,omitempty"` +} + +// no documentation yet +type Product_Item_Rule_Resource_Item struct { + Product_Item_Rule_Resource + + // A product item that the associated rule applies to. + Resource *Product_Item `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Rule_Resource_Item_Category struct { + Product_Item_Rule_Resource + + // An item category that the associated rule applies to. + Resource *Product_Item_Category `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Rule_Resource_Location struct { + Product_Item_Rule_Resource + + // A location that the associated rule applies to. + Resource *Location `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Rule_Resource_Permission struct { + Product_Item_Rule_Resource + + // A user permission that the associated rule applies to. + Resource *User_Customer_CustomerPermission_Permission `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The item rule type data type represents the type of an item rule. +type Product_Item_Rule_Type struct { + Entity + + // The identifier for the item rule type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// The SoftLayer_Product_Item_Server_Group data type details the type of compute service a [[SoftLayer_Product_Item (type)|SoftLayer_Product_Item]] or [[SoftLayer_Product_Package_Preset (type)|SoftLayer_Product_Package_Preset]] belongs to. +type Product_Item_Server_Group struct { + Entity + + // The server group's string identifier + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The server group's friendly name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item_Tax_Category data type contains the tax categories that are associated with products. +type Product_Item_Tax_Category struct { + Entity + + // An internal identifier for each tax category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // no documentation yet + Items []Product_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The key name of the tax category. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the tax category. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The status of the tax category. + StatusFlag *int `json:"statusFlag,omitempty" xmlrpc:"statusFlag,omitempty"` +} + +// no documentation yet +type Product_Order struct { + Entity +} + +// The SoftLayer_Product_Package data type contains information about packages from which orders can be generated. Packages contain general information regarding what is in them, where they are currently sold, availability, and pricing. +type Product_Package struct { + Entity + + // A count of the preset configurations available only for the authenticated account and this package. + AccountRestrictedActivePresetCount *uint `json:"accountRestrictedActivePresetCount,omitempty" xmlrpc:"accountRestrictedActivePresetCount,omitempty"` + + // The preset configurations available only for the authenticated account and this package. + AccountRestrictedActivePresets []Product_Package_Preset `json:"accountRestrictedActivePresets,omitempty" xmlrpc:"accountRestrictedActivePresets,omitempty"` + + // The results from this call are similar to [[SoftLayer_Product_Package/getCategories|getCategories]], but these ONLY include account-restricted prices. Not all accounts have restricted pricing. + AccountRestrictedCategories []Product_Item_Category `json:"accountRestrictedCategories,omitempty" xmlrpc:"accountRestrictedCategories,omitempty"` + + // A count of the results from this call are similar to [[SoftLayer_Product_Package/getCategories|getCategories]], but these ONLY include account-restricted prices. Not all accounts have restricted pricing. + AccountRestrictedCategoryCount *uint `json:"accountRestrictedCategoryCount,omitempty" xmlrpc:"accountRestrictedCategoryCount,omitempty"` + + // The flag to indicate if there are any restricted prices in a package for the currently-active account. + AccountRestrictedPricesFlag *bool `json:"accountRestrictedPricesFlag,omitempty" xmlrpc:"accountRestrictedPricesFlag,omitempty"` + + // A count of the available preset configurations for this package. + ActivePresetCount *uint `json:"activePresetCount,omitempty" xmlrpc:"activePresetCount,omitempty"` + + // The available preset configurations for this package. + ActivePresets []Product_Package_Preset `json:"activePresets,omitempty" xmlrpc:"activePresets,omitempty"` + + // A count of a collection of valid RAM items available for purchase in this package. + ActiveRamItemCount *uint `json:"activeRamItemCount,omitempty" xmlrpc:"activeRamItemCount,omitempty"` + + // A collection of valid RAM items available for purchase in this package. + ActiveRamItems []Product_Item `json:"activeRamItems,omitempty" xmlrpc:"activeRamItems,omitempty"` + + // A count of a collection of valid server items available for purchase in this package. + ActiveServerItemCount *uint `json:"activeServerItemCount,omitempty" xmlrpc:"activeServerItemCount,omitempty"` + + // A collection of valid server items available for purchase in this package. + ActiveServerItems []Product_Item `json:"activeServerItems,omitempty" xmlrpc:"activeServerItems,omitempty"` + + // A count of a collection of valid software items available for purchase in this package. + ActiveSoftwareItemCount *uint `json:"activeSoftwareItemCount,omitempty" xmlrpc:"activeSoftwareItemCount,omitempty"` + + // A collection of valid software items available for purchase in this package. + ActiveSoftwareItems []Product_Item `json:"activeSoftwareItems,omitempty" xmlrpc:"activeSoftwareItems,omitempty"` + + // A count of a collection of [[SoftLayer_Product_Item_Price]] objects for pay-as-you-go usage. + ActiveUsagePriceCount *uint `json:"activeUsagePriceCount,omitempty" xmlrpc:"activeUsagePriceCount,omitempty"` + + // A collection of [[SoftLayer_Product_Item_Price]] objects for pay-as-you-go usage. + ActiveUsagePrices []Product_Item_Price `json:"activeUsagePrices,omitempty" xmlrpc:"activeUsagePrices,omitempty"` + + // This flag indicates that the package is an additional service. + AdditionalServiceFlag *bool `json:"additionalServiceFlag,omitempty" xmlrpc:"additionalServiceFlag,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Product_Package_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A count of a collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) + AvailableLocationCount *uint `json:"availableLocationCount,omitempty" xmlrpc:"availableLocationCount,omitempty"` + + // A collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) + AvailableLocations []Product_Package_Locations `json:"availableLocations,omitempty" xmlrpc:"availableLocations,omitempty"` + + // The maximum number of available disk storage units associated with the servers in a package. + AvailableStorageUnits *uint `json:"availableStorageUnits,omitempty" xmlrpc:"availableStorageUnits,omitempty"` + + // This is a collection of categories ([[SoftLayer_Product_Item_Category]]) associated with a package which can be used for ordering. These categories have several objects prepopulated which are useful when determining the available products for purchase. The categories contain groups ([[SoftLayer_Product_Package_Item_Category_Group]]) that organize the products and prices by similar features. For example, operating systems will be grouped by their manufacturer and virtual server disks will be grouped by their disk type (SAN vs. local). Each group will contain prices ([[SoftLayer_Product_Item_Price]]) which you can use determine the cost of each product. Each price has a product ([[SoftLayer_Product_Item]]) which provides the name and other useful information about the server, service or software you may purchase. + Categories []Product_Item_Category `json:"categories,omitempty" xmlrpc:"categories,omitempty"` + + // The item categories associated with a package, including information detailing which item categories are required as part of a SoftLayer product order. + Configuration []Product_Package_Order_Configuration `json:"configuration,omitempty" xmlrpc:"configuration,omitempty"` + + // A count of the item categories associated with a package, including information detailing which item categories are required as part of a SoftLayer product order. + ConfigurationCount *uint `json:"configurationCount,omitempty" xmlrpc:"configurationCount,omitempty"` + + // A count of a collection of valid RAM items available for purchase in this package. + DefaultRamItemCount *uint `json:"defaultRamItemCount,omitempty" xmlrpc:"defaultRamItemCount,omitempty"` + + // A collection of valid RAM items available for purchase in this package. + DefaultRamItems []Product_Item `json:"defaultRamItems,omitempty" xmlrpc:"defaultRamItems,omitempty"` + + // A count of the package that represents a multi-server solution. (Deprecated) + DeploymentCount *uint `json:"deploymentCount,omitempty" xmlrpc:"deploymentCount,omitempty"` + + // The node type for a package in a solution deployment. + DeploymentNodeType *string `json:"deploymentNodeType,omitempty" xmlrpc:"deploymentNodeType,omitempty"` + + // A count of the packages that are allowed in a multi-server solution. (Deprecated) + DeploymentPackageCount *uint `json:"deploymentPackageCount,omitempty" xmlrpc:"deploymentPackageCount,omitempty"` + + // The packages that are allowed in a multi-server solution. (Deprecated) + DeploymentPackages []Product_Package `json:"deploymentPackages,omitempty" xmlrpc:"deploymentPackages,omitempty"` + + // The solution deployment type. + DeploymentType *string `json:"deploymentType,omitempty" xmlrpc:"deploymentType,omitempty"` + + // The package that represents a multi-server solution. (Deprecated) + Deployments []Product_Package `json:"deployments,omitempty" xmlrpc:"deployments,omitempty"` + + // A generic description of the processor type and count. This includes HTML, so you may want to strip these tags if you plan to use it. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // This flag indicates the package does not allow custom disk partitions. + DisallowCustomDiskPartitions *bool `json:"disallowCustomDiskPartitions,omitempty" xmlrpc:"disallowCustomDiskPartitions,omitempty"` + + // The Softlayer order step is optionally step-based. This returns the first SoftLayer_Product_Package_Order_Step in the step-based order process. + FirstOrderStep *Product_Package_Order_Step `json:"firstOrderStep,omitempty" xmlrpc:"firstOrderStep,omitempty"` + + // This is only needed for step-based order verification. We use this for the order forms, but it is not required. This step is the first SoftLayer_Product_Package_Step for this package. Use this for for filtering which item categories are returned as a part of SoftLayer_Product_Package_Order_Configuration. + FirstOrderStepId *int `json:"firstOrderStepId,omitempty" xmlrpc:"firstOrderStepId,omitempty"` + + // Whether the package is a specialized network gateway appliance package. + GatewayApplianceFlag *bool `json:"gatewayApplianceFlag,omitempty" xmlrpc:"gatewayApplianceFlag,omitempty"` + + // This flag indicates that the package supports GPUs. + GpuFlag *bool `json:"gpuFlag,omitempty" xmlrpc:"gpuFlag,omitempty"` + + // Determines whether the package contains prices that can be ordered hourly. + HourlyBillingAvailableFlag *bool `json:"hourlyBillingAvailableFlag,omitempty" xmlrpc:"hourlyBillingAvailableFlag,omitempty"` + + // Packages with this flag do not allow monthly orders. + HourlyOnlyOrders *bool `json:"hourlyOnlyOrders,omitempty" xmlrpc:"hourlyOnlyOrders,omitempty"` + + // A package's internal identifier. Everything regarding a SoftLayer_Product_Package is tied back to this id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IsActive *int `json:"isActive,omitempty" xmlrpc:"isActive,omitempty"` + + // The item-item conflicts associated with a package. + ItemConflicts []Product_Item_Resource_Conflict `json:"itemConflicts,omitempty" xmlrpc:"itemConflicts,omitempty"` + + // A count of a collection of valid items available for purchase in this package. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // The item-location conflicts associated with a package. + ItemLocationConflicts []Product_Item_Resource_Conflict `json:"itemLocationConflicts,omitempty" xmlrpc:"itemLocationConflicts,omitempty"` + + // A count of a collection of SoftLayer_Product_Item_Prices that are valid for this package. + ItemPriceCount *uint `json:"itemPriceCount,omitempty" xmlrpc:"itemPriceCount,omitempty"` + + // A count of cross reference for item prices + ItemPriceReferenceCount *uint `json:"itemPriceReferenceCount,omitempty" xmlrpc:"itemPriceReferenceCount,omitempty"` + + // cross reference for item prices + ItemPriceReferences []Product_Package_Item_Prices `json:"itemPriceReferences,omitempty" xmlrpc:"itemPriceReferences,omitempty"` + + // A collection of SoftLayer_Product_Item_Prices that are valid for this package. + ItemPrices []Product_Item_Price `json:"itemPrices,omitempty" xmlrpc:"itemPrices,omitempty"` + + // A collection of valid items available for purchase in this package. + Items []Product_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // A unique key name for the package. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of a collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) + LocationCount *uint `json:"locationCount,omitempty" xmlrpc:"locationCount,omitempty"` + + // A collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) + Locations []Location `json:"locations,omitempty" xmlrpc:"locations,omitempty"` + + // The lowest server [[SoftLayer_Product_Item_Price]] related to this package. + LowestServerPrice *Product_Item_Price `json:"lowestServerPrice,omitempty" xmlrpc:"lowestServerPrice,omitempty"` + + // The maximum available network speed associated with the package. + MaximumPortSpeed *uint `json:"maximumPortSpeed,omitempty" xmlrpc:"maximumPortSpeed,omitempty"` + + // The minimum available network speed associated with the package. + MinimumPortSpeed *uint `json:"minimumPortSpeed,omitempty" xmlrpc:"minimumPortSpeed,omitempty"` + + // This flag indicates that this is a MongoDB engineered package. (Deprecated) + MongoDbEngineeredFlag *bool `json:"mongoDbEngineeredFlag,omitempty" xmlrpc:"mongoDbEngineeredFlag,omitempty"` + + // The description of the package. For server packages, this is usually a detailed description of processor type and count. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Whether the package is not in compliance with EU support. + NonEuCompliantFlag *bool `json:"nonEuCompliantFlag,omitempty" xmlrpc:"nonEuCompliantFlag,omitempty"` + + // A count of the premium price modifiers associated with the [[SoftLayer_Product_Item_Price]] and [[SoftLayer_Location]] objects in a package. + OrderPremiumCount *uint `json:"orderPremiumCount,omitempty" xmlrpc:"orderPremiumCount,omitempty"` + + // The premium price modifiers associated with the [[SoftLayer_Product_Item_Price]] and [[SoftLayer_Location]] objects in a package. + OrderPremiums []Product_Item_Price_Premium `json:"orderPremiums,omitempty" xmlrpc:"orderPremiums,omitempty"` + + // This flag indicates if the package may be available in PoP locations in addition to Datacenters. + PopLocationAvailabilityFlag *bool `json:"popLocationAvailabilityFlag,omitempty" xmlrpc:"popLocationAvailabilityFlag,omitempty"` + + // This flag indicates the package is pre-configured. (Deprecated) + PreconfiguredFlag *bool `json:"preconfiguredFlag,omitempty" xmlrpc:"preconfiguredFlag,omitempty"` + + // Whether the package requires the user to define a preset configuration. + PresetConfigurationRequiredFlag *bool `json:"presetConfigurationRequiredFlag,omitempty" xmlrpc:"presetConfigurationRequiredFlag,omitempty"` + + // Whether the package prevents the user from specifying a Vlan. + PreventVlanSelectionFlag *bool `json:"preventVlanSelectionFlag,omitempty" xmlrpc:"preventVlanSelectionFlag,omitempty"` + + // This flag indicates the package is for a private hosted cloud deployment. (Deprecated) + PrivateHostedCloudPackageFlag *bool `json:"privateHostedCloudPackageFlag,omitempty" xmlrpc:"privateHostedCloudPackageFlag,omitempty"` + + // The server role of the private hosted cloud deployment. (Deprecated) + PrivateHostedCloudPackageType *string `json:"privateHostedCloudPackageType,omitempty" xmlrpc:"privateHostedCloudPackageType,omitempty"` + + // Whether the package only has access to the private network. + PrivateNetworkOnlyFlag *bool `json:"privateNetworkOnlyFlag,omitempty" xmlrpc:"privateNetworkOnlyFlag,omitempty"` + + // Whether the package is a specialized mass storage QuantaStor package. (Deprecated) + QuantaStorPackageFlag *bool `json:"quantaStorPackageFlag,omitempty" xmlrpc:"quantaStorPackageFlag,omitempty"` + + // This flag indicates the package does not allow different disks with RAID. + RaidDiskRestrictionFlag *bool `json:"raidDiskRestrictionFlag,omitempty" xmlrpc:"raidDiskRestrictionFlag,omitempty"` + + // This flag determines if the package contains a redundant power supply product. + RedundantPowerFlag *bool `json:"redundantPowerFlag,omitempty" xmlrpc:"redundantPowerFlag,omitempty"` + + // A count of the regional locations that a package is available in. + RegionCount *uint `json:"regionCount,omitempty" xmlrpc:"regionCount,omitempty"` + + // The regional locations that a package is available in. + Regions []Location_Region `json:"regions,omitempty" xmlrpc:"regions,omitempty"` + + // The resource group template that describes a multi-server solution. (Deprecated) + ResourceGroupTemplate *Resource_Group_Template `json:"resourceGroupTemplate,omitempty" xmlrpc:"resourceGroupTemplate,omitempty"` + + // This currently contains no information but is here for future use. + SubDescription *string `json:"subDescription,omitempty" xmlrpc:"subDescription,omitempty"` + + // The top level category code for this service offering. + TopLevelItemCategoryCode *string `json:"topLevelItemCategoryCode,omitempty" xmlrpc:"topLevelItemCategoryCode,omitempty"` + + // The type of service offering. This property can be used to help filter packages. + Type *Product_Package_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The server unit size this package will match to. + UnitSize *int `json:"unitSize,omitempty" xmlrpc:"unitSize,omitempty"` +} + +// no documentation yet +type Product_Package_Attribute struct { + Entity + + // no documentation yet + AttributeType *Product_Package_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Product_Package_Attribute_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This is deprecated. +type Product_Package_Inventory struct { + Entity + + // DEPRECATED. The number of units available for purchase in inventory for a single item in a single datacenter. + AvailableInventoryCount *int `json:"availableInventoryCount,omitempty" xmlrpc:"availableInventoryCount,omitempty"` + + // The product package item that is associated with an inventory record. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // DEPRECATED. The unique identifier of the product item that an inventory record is associated with. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The datacenter that an inventory record is located in. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // DEPRECATED. The unique identifier of the datacenter that an inventory record is located in. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // DEPRECATED. The date that an inventory record was last updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // DEPRECATED. Whether an inventory record is marked as "overstock". + OverstockFlag *int `json:"overstockFlag,omitempty" xmlrpc:"overstockFlag,omitempty"` + + // The product package that is associated with an inventory record. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // DEPRECATED. The unique identifier of the product package that an inventory record is associated with. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// This class is used to organize categories for a service offering. A service offering (usually) contains multiple categories (e.g., server, os, disk0, ram). This class allows us to organize the prices into related item category groups. +type Product_Package_Item_Category_Group struct { + Entity + + // no documentation yet + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The item category id associated with this group. + ItemCategoryId *int `json:"itemCategoryId,omitempty" xmlrpc:"itemCategoryId,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The service offering id associated with this group. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // A count of + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // no documentation yet + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // The sort value for this group. + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // An optional title associated with this group. E.g., for operating systems, this will be the manufacturer. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// The SoftLayer_Product_Package_Item_Prices contains price to package cross references Relates a category, price and item to a bundle. Match bundle ids to see all items and prices in a particular bundle. +type Product_Package_Item_Prices struct { + Entity + + // The unique identifier for SoftLayer_Product_Package_Item_Price. This is only needed as a reference. The important data is the itemPriceId property. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The item price to which this object belongs. The item price has details regarding cost for the item it belongs to. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // The SoftLayer_Product_Item_Price id. This value is to be used when placing orders. To get more information about this item price, go from the item price to the item description + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // The package to which this object belongs. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The Package ID to which this price reference belongs + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// This data type is a cross-reference between the SoftLayer_Product_Package and the SoftLayer_Product_Item(s) that belong in the SoftLayer_Product_Package. +type Product_Package_Items struct { + Entity + + // The unique identifier for this object. It is not used anywhere but in this object. + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The item to which this object belongs. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The SoftLayer_Product_Item id to which this instance of the object belongs. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The package to which this object belongs. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The SoftLayer_Product_Package id to which this instance of the object belongs. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// Most packages are available in many locations. This object describes that availability for each package. +type Product_Package_Locations struct { + Entity + + // This describes the availability of the package tied to this location. + DeliveryTimeInformation *string `json:"deliveryTimeInformation,omitempty" xmlrpc:"deliveryTimeInformation,omitempty"` + + // A simple flag which describes whether or not this location is available for this package. + IsAvailable *int `json:"isAvailable,omitempty" xmlrpc:"isAvailable,omitempty"` + + // The location to which this object belongs. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The location id tied to this object. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The package to which this object belongs. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The SoftLayer_Product_Package ID tied to this object. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// This datatype describes the item categories that are required for each package to be ordered. For instance, for package 2, there will be many required categories. When submitting an order for a server, there must be at most 1 price for each category whose "isRequired" is set. Examples of required categories: - server - ram - bandwidth - disk0 +// +// There are others, but these are the main ones. For each required category, a SoftLayer_Product_Item_Price must be chosen that is valid for the package. +// +// +type Product_Package_Order_Configuration struct { + Entity + + // The error message displayed if the submitted order does not contain this item category, if it is required. + ErrorMessage *string `json:"errorMessage,omitempty" xmlrpc:"errorMessage,omitempty"` + + // The unique identifier for this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is a flag which tells SoftLayer_Product_Order::verifyOrder() whether or not this category is required. If this is set, then the order submitted must contain a SoftLayer_Product_Item_Price with this category as part of the order. + IsRequired *int `json:"isRequired,omitempty" xmlrpc:"isRequired,omitempty"` + + // The item category for this configuration instance. + ItemCategory *Product_Item_Category `json:"itemCategory,omitempty" xmlrpc:"itemCategory,omitempty"` + + // The SoftLayer_Product_Item_Category. + ItemCategoryId *int `json:"itemCategoryId,omitempty" xmlrpc:"itemCategoryId,omitempty"` + + // The order step ID for this particular option in the package. + OrderStepId *int `json:"orderStepId,omitempty" xmlrpc:"orderStepId,omitempty"` + + // The package to which this instance belongs. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The PackageId tied to this instance. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // This is an integer used to show the order in which each item Category should be displayed. This is merely the suggested order. + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // The step to which this instance belongs. + Step *Product_Package_Order_Step `json:"step,omitempty" xmlrpc:"step,omitempty"` +} + +// Each package has at least 1 step to the ordering process. For server orders, there are many. Each step has certain item categories which are displayed. This type describes the steps for each package. +type Product_Package_Order_Step struct { + Entity + + // The unique identifier for this object. It is not used anywhere but in this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the next steps in the ordering process for the package tied to this object, including this step. + InclusivePreviousStepCount *uint `json:"inclusivePreviousStepCount,omitempty" xmlrpc:"inclusivePreviousStepCount,omitempty"` + + // The next steps in the ordering process for the package tied to this object, including this step. + InclusivePreviousSteps []Product_Package_Order_Step_Next `json:"inclusivePreviousSteps,omitempty" xmlrpc:"inclusivePreviousSteps,omitempty"` + + // A count of the next steps in the ordering process for the package tied to this object. + NextStepCount *uint `json:"nextStepCount,omitempty" xmlrpc:"nextStepCount,omitempty"` + + // The next steps in the ordering process for the package tied to this object. + NextSteps []Product_Package_Order_Step_Next `json:"nextSteps,omitempty" xmlrpc:"nextSteps,omitempty"` + + // A count of the item to which this object belongs. + PreviousStepCount *uint `json:"previousStepCount,omitempty" xmlrpc:"previousStepCount,omitempty"` + + // The item to which this object belongs. + PreviousSteps []Product_Package_Order_Step_Next `json:"previousSteps,omitempty" xmlrpc:"previousSteps,omitempty"` + + // The number of the step in the order process for this package. These are sequential and only needed for step-based ordering. + Step *string `json:"step,omitempty" xmlrpc:"step,omitempty"` +} + +// This datatype simply describes which steps are next in line for ordering. +type Product_Package_Order_Step_Next struct { + Entity + + // The unique identifier for this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique identifier for SoftLayer_Product_Package_Order_Step for the next step in the process. + NextOrderStepId *int `json:"nextOrderStepId,omitempty" xmlrpc:"nextOrderStepId,omitempty"` + + // The unique identifier for SoftLayer_Product_Package_Order_Step for the current step. + OrderStepId *int `json:"orderStepId,omitempty" xmlrpc:"orderStepId,omitempty"` + + // The SoftLayer_Product_Package_Order_Step to which this object belongs. + Step *Product_Package_Order_Step `json:"step,omitempty" xmlrpc:"step,omitempty"` +} + +// Package presets are used to simplify ordering by eliminating the need for price ids when submitting orders. +// +// Orders submitted with a preset id defined will use the prices included in the package preset. Prices submitted on an order with a preset id will replace the prices included in the package preset for that prices category. If the package preset has a fixed configuration flag (fixedConfigurationFlag) set then the prices included in the preset configuration cannot be replaced by prices submitted on the order. The only exception to the fixed configuration flag would be if a price submitted on the order is an account-restricted price for the same product item. +type Product_Package_Preset struct { + Entity + + // no documentation yet + AvailableStorageUnits *uint `json:"availableStorageUnits,omitempty" xmlrpc:"availableStorageUnits,omitempty"` + + // The item categories that are included in this package preset configuration. + Categories []Product_Item_Category `json:"categories,omitempty" xmlrpc:"categories,omitempty"` + + // A count of the item categories that are included in this package preset configuration. + CategoryCount *uint `json:"categoryCount,omitempty" xmlrpc:"categoryCount,omitempty"` + + // The compute family this configuration belongs to. + ComputeGroup *Product_Item_Server_Group `json:"computeGroup,omitempty" xmlrpc:"computeGroup,omitempty"` + + // The preset configuration (category and price). + Configuration []Product_Package_Preset_Configuration `json:"configuration,omitempty" xmlrpc:"configuration,omitempty"` + + // A count of the preset configuration (category and price). + ConfigurationCount *uint `json:"configurationCount,omitempty" xmlrpc:"configurationCount,omitempty"` + + // A description of the package preset. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // When true this preset is only allowed to upgrade/downgrade to other presets in the same compute family. + DisallowedComputeGroupUpgradeFlag *bool `json:"disallowedComputeGroupUpgradeFlag,omitempty" xmlrpc:"disallowedComputeGroupUpgradeFlag,omitempty"` + + // A package preset with this flag set will not allow the price's defined in the preset configuration to be overriden during order placement. + FixedConfigurationFlag *bool `json:"fixedConfigurationFlag,omitempty" xmlrpc:"fixedConfigurationFlag,omitempty"` + + // A preset's internal identifier. Everything regarding a SoftLayer_Product_Package_Preset is tied back to this id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The status of the package preset. + IsActive *string `json:"isActive,omitempty" xmlrpc:"isActive,omitempty"` + + // The key name of the package preset. For the base configuration of a package the preset key name is "DEFAULT". + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of the locations this preset configuration is available in. If empty the preset is available in all locations the package is available in. + LocationCount *uint `json:"locationCount,omitempty" xmlrpc:"locationCount,omitempty"` + + // The locations this preset configuration is available in. If empty the preset is available in all locations the package is available in. + Locations []Location `json:"locations,omitempty" xmlrpc:"locations,omitempty"` + + // The lowest server prices related to this package preset. + LowestPresetServerPrice *Product_Item_Price `json:"lowestPresetServerPrice,omitempty" xmlrpc:"lowestPresetServerPrice,omitempty"` + + // The name of the package preset. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The package this preset belongs to. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The item categories associated with a package preset, including information detailing which item categories are required as part of a SoftLayer product order. + PackageConfiguration []Product_Package_Order_Configuration `json:"packageConfiguration,omitempty" xmlrpc:"packageConfiguration,omitempty"` + + // A count of the item categories associated with a package preset, including information detailing which item categories are required as part of a SoftLayer product order. + PackageConfigurationCount *uint `json:"packageConfigurationCount,omitempty" xmlrpc:"packageConfigurationCount,omitempty"` + + // The package id for the package this preset belongs to. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // A count of the item prices that are included in this package preset configuration. + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // The item prices that are included in this package preset configuration. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // A count of describes how all disks in this preset will be configured. + StorageGroupTemplateArrayCount *uint `json:"storageGroupTemplateArrayCount,omitempty" xmlrpc:"storageGroupTemplateArrayCount,omitempty"` + + // Describes how all disks in this preset will be configured. + StorageGroupTemplateArrays []Configuration_Storage_Group_Template_Group `json:"storageGroupTemplateArrays,omitempty" xmlrpc:"storageGroupTemplateArrays,omitempty"` + + // The starting hourly price for this configuration. Additional options not defined in the preset may increase the cost. + TotalMinimumHourlyFee *Float64 `json:"totalMinimumHourlyFee,omitempty" xmlrpc:"totalMinimumHourlyFee,omitempty"` + + // The starting monthly price for this configuration. Additional options not defined in the preset may increase the cost. + TotalMinimumRecurringFee *Float64 `json:"totalMinimumRecurringFee,omitempty" xmlrpc:"totalMinimumRecurringFee,omitempty"` +} + +// Package preset attributes contain supplementary information for a package preset. +type Product_Package_Preset_Attribute struct { + Entity + + // no documentation yet + AttributeType *Product_Package_Preset_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // The internal identifier of the type of attribute that a pacakge preset attribute belongs to. + AttributeTypeId *int `json:"attributeTypeId,omitempty" xmlrpc:"attributeTypeId,omitempty"` + + // A package preset attribute's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Preset *Product_Package_Preset `json:"preset,omitempty" xmlrpc:"preset,omitempty"` + + // The internal identifier of the package preset an attribute belongs to. + PresetId *int `json:"presetId,omitempty" xmlrpc:"presetId,omitempty"` + + // A package preset's attribute value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Product_Package_Preset_Attribute_Type models the type of attribute that can be assigned to a package preset. +type Product_Package_Preset_Attribute_Type struct { + Entity + + // A brief description of a package preset attribute type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A package preset attribute type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A package preset attribute type's key name. This is typically a shorter version of an attribute type's name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A package preset attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Product_Package_Preset_Configuration struct { + Entity + + // no documentation yet + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // no documentation yet + PackagePreset *Product_Package_Preset `json:"packagePreset,omitempty" xmlrpc:"packagePreset,omitempty"` + + // no documentation yet + Price *Product_Item_Price `json:"price,omitempty" xmlrpc:"price,omitempty"` +} + +// The SoftLayer_Product_Package_Server data type contains summarized information for bare metal servers regarding pricing, processor stats, and feature sets. +type Product_Package_Server struct { + Entity + + // no documentation yet + Catalog *Product_Catalog `json:"catalog,omitempty" xmlrpc:"catalog,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Catalog]]. + CatalogId *int `json:"catalogId,omitempty" xmlrpc:"catalogId,omitempty"` + + // Comma-separated list of datacenter names this server is available in + Datacenters *string `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // The minimum amount of RAM the server is configured with. + DefaultRamCapacity *Float64 `json:"defaultRamCapacity,omitempty" xmlrpc:"defaultRamCapacity,omitempty"` + + // Flag to indicate if the server configuration supports dual path network routing. + DualPathNetworkFlag *bool `json:"dualPathNetworkFlag,omitempty" xmlrpc:"dualPathNetworkFlag,omitempty"` + + // no documentation yet + FlexCoreServerFlag *bool `json:"flexCoreServerFlag,omitempty" xmlrpc:"flexCoreServerFlag,omitempty"` + + // Indicates whether or not the server contains a GPU. + GpuFlag *bool `json:"gpuFlag,omitempty" xmlrpc:"gpuFlag,omitempty"` + + // Flag to determine if a server is available for hourly billing. + HourlyBillingFlag *bool `json:"hourlyBillingFlag,omitempty" xmlrpc:"hourlyBillingFlag,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Package_Server]]. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Item]]. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // no documentation yet + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Item_Price]]. + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // The maximum number of hard drives the server can support. + MaximumDriveCount *int `json:"maximumDriveCount,omitempty" xmlrpc:"maximumDriveCount,omitempty"` + + // The maximum available network speed for the server. + MaximumPortSpeed *Float64 `json:"maximumPortSpeed,omitempty" xmlrpc:"maximumPortSpeed,omitempty"` + + // The maximum amount of RAM the server can support. + MaximumRamCapacity *Float64 `json:"maximumRamCapacity,omitempty" xmlrpc:"maximumRamCapacity,omitempty"` + + // The minimum available network speed for the server. + MinimumPortSpeed *Float64 `json:"minimumPortSpeed,omitempty" xmlrpc:"minimumPortSpeed,omitempty"` + + // no documentation yet + NetworkGatewayApplianceRoleFlag *bool `json:"networkGatewayApplianceRoleFlag,omitempty" xmlrpc:"networkGatewayApplianceRoleFlag,omitempty"` + + // DEPRECATED. Indicates whether or not the server is being sold as part of an outlet package. + OutletFlag *bool `json:"outletFlag,omitempty" xmlrpc:"outletFlag,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Package]]. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The type of service offering/package. + PackageType *string `json:"packageType,omitempty" xmlrpc:"packageType,omitempty"` + + // Flag to indicate if the server is an IBM Power server. + PowerServerFlag *bool `json:"powerServerFlag,omitempty" xmlrpc:"powerServerFlag,omitempty"` + + // no documentation yet + Preset *Product_Package_Preset `json:"preset,omitempty" xmlrpc:"preset,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Package_Preset]]. + PresetId *int `json:"presetId,omitempty" xmlrpc:"presetId,omitempty"` + + // Indicates whether or not the server can only be configured with a private network. + PrivateNetworkOnlyFlag *bool `json:"privateNetworkOnlyFlag,omitempty" xmlrpc:"privateNetworkOnlyFlag,omitempty"` + + // The processor's bus speed. + ProcessorBusSpeed *string `json:"processorBusSpeed,omitempty" xmlrpc:"processorBusSpeed,omitempty"` + + // The amount of cache the processor has. + ProcessorCache *string `json:"processorCache,omitempty" xmlrpc:"processorCache,omitempty"` + + // The number of cores in each processor. + ProcessorCores *int `json:"processorCores,omitempty" xmlrpc:"processorCores,omitempty"` + + // The number of processors the server has. + ProcessorCount *int `json:"processorCount,omitempty" xmlrpc:"processorCount,omitempty"` + + // The manufacturer of the server's processor. + ProcessorManufacturer *string `json:"processorManufacturer,omitempty" xmlrpc:"processorManufacturer,omitempty"` + + // The model of the server's processor. + ProcessorModel *string `json:"processorModel,omitempty" xmlrpc:"processorModel,omitempty"` + + // The name of the server's processor. + ProcessorName *string `json:"processorName,omitempty" xmlrpc:"processorName,omitempty"` + + // The processor speed. + ProcessorSpeed *string `json:"processorSpeed,omitempty" xmlrpc:"processorSpeed,omitempty"` + + // The name of the server product. + ProductName *string `json:"productName,omitempty" xmlrpc:"productName,omitempty"` + + // Indicates whether or not the server has the capability to support a redundant power supply. + RedundantPowerFlag *bool `json:"redundantPowerFlag,omitempty" xmlrpc:"redundantPowerFlag,omitempty"` + + // Flag to indicate if the server is SAP certified. + SapCertifiedServerFlag *bool `json:"sapCertifiedServerFlag,omitempty" xmlrpc:"sapCertifiedServerFlag,omitempty"` + + // The hourly starting price for the server. This includes a sum of all the minimum required items, including RAM and hard drives. Not all servers are available hourly. + StartingHourlyPrice *Float64 `json:"startingHourlyPrice,omitempty" xmlrpc:"startingHourlyPrice,omitempty"` + + // The monthly starting price for the server. This includes a sum of all the minimum required items, including RAM and hard drives. + StartingMonthlyPrice *Float64 `json:"startingMonthlyPrice,omitempty" xmlrpc:"startingMonthlyPrice,omitempty"` + + // The total number of processor cores available for the server. + TotalCoreCount *int `json:"totalCoreCount,omitempty" xmlrpc:"totalCoreCount,omitempty"` + + // Flag to indicate if the server configuration supports TXT/TPM. + TxtTpmFlag *bool `json:"txtTpmFlag,omitempty" xmlrpc:"txtTpmFlag,omitempty"` + + // The size of the server. + UnitSize *int `json:"unitSize,omitempty" xmlrpc:"unitSize,omitempty"` + + // Flag to indicate if the server is a VMware vSAN Node configuration. + VmwareVsanNodeFlag *bool `json:"vmwareVsanNodeFlag,omitempty" xmlrpc:"vmwareVsanNodeFlag,omitempty"` +} + +// The [[SoftLayer_Product_Package_Server_Option]] data type contains various data points associated with package servers that can be used in selection criteria. +type Product_Package_Server_Option struct { + Entity + + // The unique identifier of a Catalog. + CatalogId *int `json:"catalogId,omitempty" xmlrpc:"catalogId,omitempty"` + + // A description of the option. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique identifier of a Package Server Option. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The type of option. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The value of the the option. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The [[SoftLayer_Product_Package_Type]] object indicates the type for a service offering (package). The type can be used to filter packages. For example, if you are looking for the package representing virtual servers, you can filter on the type's key name of '''VIRTUAL_SERVER_INSTANCE'''. For bare metal servers by core or CPU, filter on '''BARE_METAL_CORE''' or '''BARE_METAL_CPU''', respectively. +type Product_Package_Type struct { + Entity + + // The package type's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the package type. Use this value when filtering. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the package type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of all the packages associated with the given package type. + PackageCount *uint `json:"packageCount,omitempty" xmlrpc:"packageCount,omitempty"` + + // All the packages associated with the given package type. + Packages []Product_Package `json:"packages,omitempty" xmlrpc:"packages,omitempty"` +} + +// The SoftLayer_Product_Upgrade_Request data type contains general information relating to a hardware, virtual server, or service upgrade. It also relates a [[SoftLayer_Billing_Order]] to a [[SoftLayer_Ticket]]. +type Product_Upgrade_Request struct { + Entity + + // The account that an order belongs to + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The unique internal id of a SoftLayer account + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Indicates that the upgrade request has completed or has been cancelled. + CompletedFlag *bool `json:"completedFlag,omitempty" xmlrpc:"completedFlag,omitempty"` + + // The date an upgrade request was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The unique internal id of the last modified user + EmployeeId *int `json:"employeeId,omitempty" xmlrpc:"employeeId,omitempty"` + + // The unique internal id of the virtual server that an upgrade will be done + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The unique internal id of the hardware that an upgrade will be done + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // An upgrade request's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is the invoice associated with the upgrade request. For hourly servers or services, an invoice will not be available. + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // The time that system admin starts working on the order item. This is used for upgrade orders. + MaintenanceStartTimeUtc *Time `json:"maintenanceStartTimeUtc,omitempty" xmlrpc:"maintenanceStartTimeUtc,omitempty"` + + // The date an upgrade request was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // An order record associated to the upgrade request + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The unique internal id of the order that an upgrade request is related to + OrderId *int `json:"orderId,omitempty" xmlrpc:"orderId,omitempty"` + + // The total amount of fees + OrderTotal *Float64 `json:"orderTotal,omitempty" xmlrpc:"orderTotal,omitempty"` + + // The prorated total amount of recurring fees + ProratedTotal *Float64 `json:"proratedTotal,omitempty" xmlrpc:"proratedTotal,omitempty"` + + // A server object associated with the upgrade request if any. + Server *Hardware `json:"server,omitempty" xmlrpc:"server,omitempty"` + + // The current status of the upgrade request. + Status *Product_Upgrade_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The unique internal id of an upgrade status + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The ticket that is used to coordinate the upgrade process. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // The unique internal id of the ticket related to an upgrade request + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The user that placed the order. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The unique internal id of the customer who place the order + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // A virtual server object associated with the upgrade request if any. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` +} + +// The SoftLayer_Product_Upgrade_Request_Status data type contains detailed information relating to an hardware or software upgrade request. +type Product_Upgrade_Request_Status struct { + Entity + + // The detailed description of an upgrade request status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // An internal identifier of an upgrade request status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of an upgrade request status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The status code of an upgrade request status. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/provisioning.go b/vendor/github.com/softlayer/softlayer-go/datatypes/provisioning.go new file mode 100644 index 000000000000..e79fac22918c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/provisioning.go @@ -0,0 +1,303 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Provisioning_Hook contains all the information needed to add a hook into a server/Virtual provision and os reload. +type Provisioning_Hook struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ID of the account the script belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + HookType *Provisioning_Hook_Type `json:"hookType,omitempty" xmlrpc:"hookType,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the hook. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The ID of the type of hook the script is identified as. Currently only CUSTOMER_PROVIDED_HOOK has been implemented. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The endpoint that the script will be downloaded from (USERNAME AND PASSWORD SHOULD BE INCLUDED HERE). If the endpoint is HTTP, the script will only be downloaded. If the endpoint is HTTPS, the script will be downloaded and executed. + Uri *string `json:"uri,omitempty" xmlrpc:"uri,omitempty"` +} + +// no documentation yet +type Provisioning_Hook_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Provisioning_Maintenance_Classification represent a maintenance type for the specific hardware maintenance desired. +type Provisioning_Maintenance_Classification struct { + Entity + + // The id of the maintenance classification. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ItemCategories []Provisioning_Maintenance_Classification_Item_Category `json:"itemCategories,omitempty" xmlrpc:"itemCategories,omitempty"` + + // A count of + ItemCategoryCount *uint `json:"itemCategoryCount,omitempty" xmlrpc:"itemCategoryCount,omitempty"` + + // The number of slots required for the maintenance classification. + Slots *int `json:"slots,omitempty" xmlrpc:"slots,omitempty"` + + // The type or name of the maintenance classification. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Provisioning_Maintenance_Classification_Item_Category struct { + Entity + + // no documentation yet + ItemCategoryId *int `json:"itemCategoryId,omitempty" xmlrpc:"itemCategoryId,omitempty"` + + // no documentation yet + MaintenanceClassification *Provisioning_Maintenance_Classification `json:"maintenanceClassification,omitempty" xmlrpc:"maintenanceClassification,omitempty"` + + // no documentation yet + MaintenanceClassificationId *int `json:"maintenanceClassificationId,omitempty" xmlrpc:"maintenanceClassificationId,omitempty"` +} + +// The SoftLayer_Provisioning_Maintenance_Slots represent the available slots for a given maintenance window at a SoftLayer data center. +type Provisioning_Maintenance_Slots struct { + Entity + + // The available slots for a maintenance window. + AvailableSlots *int `json:"availableSlots,omitempty" xmlrpc:"availableSlots,omitempty"` +} + +// no documentation yet +type Provisioning_Maintenance_Ticket struct { + Entity + + // no documentation yet + AvailableSlots *Provisioning_Maintenance_Slots `json:"availableSlots,omitempty" xmlrpc:"availableSlots,omitempty"` + + // no documentation yet + MaintClassId *int `json:"maintClassId,omitempty" xmlrpc:"maintClassId,omitempty"` + + // no documentation yet + MaintWindowId *int `json:"maintWindowId,omitempty" xmlrpc:"maintWindowId,omitempty"` + + // no documentation yet + MaintenanceClass *Provisioning_Maintenance_Classification `json:"maintenanceClass,omitempty" xmlrpc:"maintenanceClass,omitempty"` + + // no documentation yet + MaintenanceDate *Time `json:"maintenanceDate,omitempty" xmlrpc:"maintenanceDate,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // no documentation yet + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` +} + +// The SoftLayer_Provisioning_Maintenance_Window represent a time window that SoftLayer performs a hardware or software maintenance and upgrades. +type Provisioning_Maintenance_Window struct { + Entity + + // The date and time a maintenance window is scheduled to begin. + BeginDate *Time `json:"beginDate,omitempty" xmlrpc:"beginDate,omitempty"` + + // An ISO-8601 numeric representation of the day of the week that a maintenance window is performed. 1: Monday, 7: Sunday + DayOfWeek *int `json:"dayOfWeek,omitempty" xmlrpc:"dayOfWeek,omitempty"` + + // The date and time a maintenance window is scheduled to end. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // Id of the maintenance window + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An internal identifier of the location (data center) record that a maintenance window takes place in. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // An internal identifier of the datacenter timezone. + PortalTzId *int `json:"portalTzId,omitempty" xmlrpc:"portalTzId,omitempty"` +} + +// The SoftLayer_Provisioning_Version1_Transaction data type contains general information relating to a single SoftLayer hardware transaction. +// +// SoftLayer customers are unable to change their hardware transactions. +type Provisioning_Version1_Transaction struct { + Entity + + // The account that a transaction belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The date a transaction was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The amount of seconds that have elapsed since the transaction was last modified. + ElapsedSeconds *int `json:"elapsedSeconds,omitempty" xmlrpc:"elapsedSeconds,omitempty"` + + // The guest record for this transaction. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // A transaction's associated guest identification number. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The hardware object for this transaction. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A transaction's associated hardware identification number. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // A transaction's identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Loopback []Provisioning_Version1_Transaction `json:"loopback,omitempty" xmlrpc:"loopback,omitempty"` + + // A count of + LoopbackCount *uint `json:"loopbackCount,omitempty" xmlrpc:"loopbackCount,omitempty"` + + // The date a transaction was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of + PendingTransactionCount *uint `json:"pendingTransactionCount,omitempty" xmlrpc:"pendingTransactionCount,omitempty"` + + // no documentation yet + PendingTransactions []Provisioning_Version1_Transaction `json:"pendingTransactions,omitempty" xmlrpc:"pendingTransactions,omitempty"` + + // The date the transaction status was last modified. + StatusChangeDate *Time `json:"statusChangeDate,omitempty" xmlrpc:"statusChangeDate,omitempty"` + + // no documentation yet + TicketScheduledActionReference []Ticket_Attachment `json:"ticketScheduledActionReference,omitempty" xmlrpc:"ticketScheduledActionReference,omitempty"` + + // A count of + TicketScheduledActionReferenceCount *uint `json:"ticketScheduledActionReferenceCount,omitempty" xmlrpc:"ticketScheduledActionReferenceCount,omitempty"` + + // A transaction's group. This group object determines what type of service is being done on the hardware. + TransactionGroup *Provisioning_Version1_Transaction_Group `json:"transactionGroup,omitempty" xmlrpc:"transactionGroup,omitempty"` + + // A transaction's status. This status object determines the state it is in the transaction group. + TransactionStatus *Provisioning_Version1_Transaction_Status `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// The SoftLayer_Provisioning_Version1_Transaction_Group data type contains general information relating to a single SoftLayer hardware transaction group. +// +// SoftLayer customers are unable to change their hardware transactions or the hardware transaction group. +type Provisioning_Version1_Transaction_Group struct { + Entity + + // Average time, in minutes, for this type of transaction to complete. Please note that this is only an estimate. + AverageTimeToComplete *Float64 `json:"averageTimeToComplete,omitempty" xmlrpc:"averageTimeToComplete,omitempty"` + + // A transaction group's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Provisioning_Version1_Transaction_History struct { + Entity + + // The finish date of a transaction history record. + FinishDate *Time `json:"finishDate,omitempty" xmlrpc:"finishDate,omitempty"` + + // The guest from where transaction history originates. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The guest ID associated with a transaction history. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The hardware from where transaction history originates. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The hardware ID associated with a transaction history. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The host ID associated with a transaction history. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // The ID associated with a transaction history. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The start date of a transaction history record. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The transaction from where transaction history originates. + Transaction *Provisioning_Version1_Transaction `json:"transaction,omitempty" xmlrpc:"transaction,omitempty"` + + // The transaction ID associated with a transaction history. + TransactionId *int `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` + + // The transaction status of a transaction history. + TransactionStatus *Provisioning_Version1_Transaction_Status `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` + + // The transaction status ID associated with a transaction history. + TransactionStatusId *int `json:"transactionStatusId,omitempty" xmlrpc:"transactionStatusId,omitempty"` +} + +// The SoftLayer_Provisioning_Version1_Transaction_Status data type contains general information relating to a single SoftLayer hardware transaction status. +// +// SoftLayer customers are unable to change their hardware transaction status. +type Provisioning_Version1_Transaction_Status struct { + Entity + + // Hardware transaction status average duration. + AverageDuration *Float64 `json:"averageDuration,omitempty" xmlrpc:"averageDuration,omitempty"` + + // Transaction status friendly name. + FriendlyName *string `json:"friendlyName,omitempty" xmlrpc:"friendlyName,omitempty"` + + // Transaction status name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + NonCompletedTransactionCount *uint `json:"nonCompletedTransactionCount,omitempty" xmlrpc:"nonCompletedTransactionCount,omitempty"` + + // no documentation yet + NonCompletedTransactions []Provisioning_Version1_Transaction `json:"nonCompletedTransactions,omitempty" xmlrpc:"nonCompletedTransactions,omitempty"` +} + +// no documentation yet +type Provisioning_Version1_Transaction_SubnetMigration struct { + Provisioning_Version1_Transaction +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/resource.go b/vendor/github.com/softlayer/softlayer-go/datatypes/resource.go new file mode 100644 index 000000000000..f2a293ad2683 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/resource.go @@ -0,0 +1,413 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Resource_Configuration struct { + Entity +} + +// no documentation yet +type Resource_Group struct { + Entity + + // A count of a resource group's associated group ancestors. + AncestorGroupCount *uint `json:"ancestorGroupCount,omitempty" xmlrpc:"ancestorGroupCount,omitempty"` + + // A resource group's associated group ancestors. + AncestorGroups []Resource_Group `json:"ancestorGroups,omitempty" xmlrpc:"ancestorGroups,omitempty"` + + // A count of a resource group's associated attributes. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // A resource group's associated attributes. + Attributes []Resource_Group_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A resource group's creation date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A resource group's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of a resource group's associated hardware members. + HardwareMemberCount *uint `json:"hardwareMemberCount,omitempty" xmlrpc:"hardwareMemberCount,omitempty"` + + // A resource group's associated hardware members. + HardwareMembers []Resource_Group_Member `json:"hardwareMembers,omitempty" xmlrpc:"hardwareMembers,omitempty"` + + // A resource group's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group's keyname. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of a resource group's associated members. + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // A resource group's associated members. + Members []Resource_Group_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // A resource group's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A resource group's associated root resource group. + RootResourceGroup *Resource_Group `json:"rootResourceGroup,omitempty" xmlrpc:"rootResourceGroup,omitempty"` + + // no documentation yet + RootResourceGroupId *int `json:"rootResourceGroupId,omitempty" xmlrpc:"rootResourceGroupId,omitempty"` + + // A count of a resource group's associated subnet members. + SubnetMemberCount *uint `json:"subnetMemberCount,omitempty" xmlrpc:"subnetMemberCount,omitempty"` + + // A resource group's associated subnet members. + SubnetMembers []Resource_Group_Member `json:"subnetMembers,omitempty" xmlrpc:"subnetMembers,omitempty"` + + // A resource group's associated template. + Template *Resource_Group_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // A resource group's template ID. + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` + + // A count of a resource group's associated VLAN members. + VlanMemberCount *uint `json:"vlanMemberCount,omitempty" xmlrpc:"vlanMemberCount,omitempty"` + + // A resource group's associated VLAN members. + VlanMembers []Resource_Group_Member `json:"vlanMembers,omitempty" xmlrpc:"vlanMembers,omitempty"` +} + +// no documentation yet +type Resource_Group_Attribute struct { + Entity + + // A resource group attribute's creation date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A resource group attribute's resource group. + Group *Resource_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A resource group attribute's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group attribute's type. + Type *Resource_Group_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A resource group attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Resource_Group_Attribute_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Resource_Group_Descendant_Reference data type simplifies the link between one SoftLayer_Resource_Group_Member object and all of its parents. +// +// +type Resource_Group_Descendant_Reference struct { + Entity + + // no documentation yet + Group *Resource_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // no documentation yet + GroupMember *Resource_Group_Member `json:"groupMember,omitempty" xmlrpc:"groupMember,omitempty"` +} + +// no documentation yet +type Resource_Group_Member struct { + Entity + + // A count of a resource group member's associated attributes. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // A resource group member's associated attributes. + Attributes []Resource_Group_Member_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A resource group member's creation date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of a resource group member's associated member descendants. + DescendantMemberCount *uint `json:"descendantMemberCount,omitempty" xmlrpc:"descendantMemberCount,omitempty"` + + // A resource group member's associated member descendants. + DescendantMembers []Resource_Group_Member `json:"descendantMembers,omitempty" xmlrpc:"descendantMembers,omitempty"` + + // A resource group member's resource group. + Group *Resource_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A resource group member's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a resource group member's associated roles. + RoleCount *uint `json:"roleCount,omitempty" xmlrpc:"roleCount,omitempty"` + + // A resource group member's associated roles. + Roles []Resource_Group_Role `json:"roles,omitempty" xmlrpc:"roles,omitempty"` + + // A resource group member's status. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A resource group member's type. + Type *Resource_Group_Member_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Attribute struct { + Entity + + // A resource group member attribute's creation date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A resource group member attribute's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group member attribute's resource group member. + Member *Resource_Group_Member `json:"member,omitempty" xmlrpc:"member,omitempty"` + + // A resource group member attribute's type. + Type *Resource_Group_Member_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A resource group member attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Attribute_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_CloudStack_Version3_Cluster struct { + Resource_Group_Member + + // A resource group member's associated cluster. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_CloudStack_Version3_Pod struct { + Resource_Group_Member + + // A resource group member's associated pod. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_CloudStack_Version3_Zone struct { + Resource_Group_Member + + // A resource group member's associated zone. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Hardware struct { + Resource_Group_Member + + // A resource group member's associated hardware. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // A resource group hardware member's associated server arbiter-only state. + ServerArbiterOnly *Resource_Group_Member_Attribute `json:"serverArbiterOnly,omitempty" xmlrpc:"serverArbiterOnly,omitempty"` + + // A resource group hardware member's associated server hidden state. + ServerHidden *Resource_Group_Member_Attribute `json:"serverHidden,omitempty" xmlrpc:"serverHidden,omitempty"` + + // A resource group hardware member's associated server priority. + ServerPriority *Resource_Group_Member_Attribute `json:"serverPriority,omitempty" xmlrpc:"serverPriority,omitempty"` + + // A resource group hardware member's associated server slave delay (in seconds). + ServerSlaveDelay *Resource_Group_Member_Attribute `json:"serverSlaveDelay,omitempty" xmlrpc:"serverSlaveDelay,omitempty"` + + // A resource group hardware member's associated server tags (in JSON format). + ServerTags *Resource_Group_Member_Attribute `json:"serverTags,omitempty" xmlrpc:"serverTags,omitempty"` + + // A resource group hardware member's associated server vote count. + ServerVotes *Resource_Group_Member_Attribute `json:"serverVotes,omitempty" xmlrpc:"serverVotes,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Network_Storage struct { + Resource_Group_Member + + // A resource group member's associated network storage. + Resource *Network_Storage `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Network_Subnet struct { + Resource_Group_Member + + // A resource group member's associated network subnet. + Resource *Network_Subnet `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Network_Vlan struct { + Resource_Group_Member + + // A resource group member's associated network VLAN. + Resource *Network_Vlan `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Resource_Group struct { + Resource_Group_Member + + // A resource group member's associated resource group. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Role_Link struct { + Entity + + // A resource group member's ID. + GroupMemberId *int `json:"groupMemberId,omitempty" xmlrpc:"groupMemberId,omitempty"` + + // A resource group's template role ID. + GroupTemplateRoleId *int `json:"groupTemplateRoleId,omitempty" xmlrpc:"groupTemplateRoleId,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Software_Component_Password struct { + Resource_Group_Member + + // A resource group member's associated software component password. + Resource *Software_Component_Password `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Type struct { + Entity + + // A resource group member's type description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A resource group member's type keyname. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Virtual_Host_Pool struct { + Resource_Group_Member +} + +// no documentation yet +type Resource_Group_Role struct { + Entity + + // A resource group role's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A resource group role's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group role's keyname. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of a resource group's role. + MemberLinkCount *uint `json:"memberLinkCount,omitempty" xmlrpc:"memberLinkCount,omitempty"` + + // A resource group's role. + MemberLinks []Resource_Group_Member_Role_Link `json:"memberLinks,omitempty" xmlrpc:"memberLinks,omitempty"` +} + +// no documentation yet +type Resource_Group_Template struct { + Entity + + // no documentation yet + Children []Resource_Group_Template `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // A resource group template's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group template's keyname. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // no documentation yet + Members []Resource_Group_Template_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` +} + +// no documentation yet +type Resource_Group_Template_Member struct { + Entity + + // no documentation yet + MaxQuantity *int `json:"maxQuantity,omitempty" xmlrpc:"maxQuantity,omitempty"` + + // no documentation yet + MinQuantity *int `json:"minQuantity,omitempty" xmlrpc:"minQuantity,omitempty"` + + // no documentation yet + Role *Resource_Group_Role `json:"role,omitempty" xmlrpc:"role,omitempty"` + + // no documentation yet + RoleId *int `json:"roleId,omitempty" xmlrpc:"roleId,omitempty"` + + // no documentation yet + Template *Resource_Group_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // no documentation yet + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` +} + +// no documentation yet +type Resource_Metadata struct { + Entity +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/sales.go b/vendor/github.com/softlayer/softlayer-go/datatypes/sales.go new file mode 100644 index 000000000000..bf667aab4970 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/sales.go @@ -0,0 +1,62 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The presale event data types indicate the information regarding an individual presale event. The '''locationId''' will indicate the datacenter associated with the presale event. The '''itemId''' will indicate the product item associated with a particular presale event - however these are more rare. The '''startDate''' and '''endDate''' will provide information regarding when the presale event is available for use. At the end of the presale event, the server or services purchased will be available once approved and provisioned. +type Sales_Presale_Event struct { + Entity + + // A flag to indicate that the presale event is currently active. A presale event is active if the current time is between the start and end dates. + ActiveFlag *bool `json:"activeFlag,omitempty" xmlrpc:"activeFlag,omitempty"` + + // Description of the presale event. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // End date of the presale event. Orders can be approved and provisioned after this date. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // A flag to indicate that the presale event is expired. A presale event is expired if the current time is after the end date. + ExpiredFlag *bool `json:"expiredFlag,omitempty" xmlrpc:"expiredFlag,omitempty"` + + // Presale event unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The [[SoftLayer_Product_Item]] associated with the presale event. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // [[SoftLayer_Product_Item]] id associated with the presale event. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The [[SoftLayer_Location]] associated with the presale event. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // [[SoftLayer_Location]] id for the presale event. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // A count of the orders ([[SoftLayer_Billing_Order]]) associated with this presale event that were created for the customer's account. + OrderCount *uint `json:"orderCount,omitempty" xmlrpc:"orderCount,omitempty"` + + // The orders ([[SoftLayer_Billing_Order]]) associated with this presale event that were created for the customer's account. + Orders []Billing_Order `json:"orders,omitempty" xmlrpc:"orders,omitempty"` + + // Start date of the presale event. Orders cannot be approved before this date. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/scale.go b/vendor/github.com/softlayer/softlayer-go/datatypes/scale.go new file mode 100644 index 000000000000..4ecc2a0f1d57 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/scale.go @@ -0,0 +1,559 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Scale_Asset struct { + Entity + + // When this asset was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // An asset's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The group this asset belongs to. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this asset belongs to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` +} + +// no documentation yet +type Scale_Asset_Hardware struct { + Scale_Asset + + // The hardware for this asset. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The identifier of the hardware for this asset. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` +} + +// no documentation yet +type Scale_Asset_Virtual_Guest struct { + Scale_Asset + + // The guest for this asset. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // The identifier of the guest for this asset. + VirtualGuestId *int `json:"virtualGuestId,omitempty" xmlrpc:"virtualGuestId,omitempty"` +} + +// no documentation yet +type Scale_Group struct { + Entity + + // The account for this scaling group. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The identifier of the account assigned to this group. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // If this is true, this group will scale down members in a way to preserve the balance across VLANs. If there is ambiguity about which member to use to maintain balance, the terminationPolicy is used to resolve it. This is false by default and can only be set to true if there are multiple VLANs that are being balanced across. + BalancedTerminationFlag *bool `json:"balancedTerminationFlag,omitempty" xmlrpc:"balancedTerminationFlag,omitempty"` + + // The number of seconds this group will wait after lastActionDate before performing another action. Be advised, this can be overridden per policy. While strongly discouraged, a value of 0 effectively disables cooldown. + Cooldown *int `json:"cooldown,omitempty" xmlrpc:"cooldown,omitempty"` + + // When this group was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // This value is only available on the template for creating and editing a group. It will be null when retrieved. When this value is provided on create or edit, guests will be scaled up or down to meet this number. This number must be in the range provided by minimumMemberCount and maximumMemberCount. This value can only be present during create or edit when this group is active. Note, guests that are created as a result of this value can possibly be removed after cooldown by a policy. + DesiredMemberCount *int `json:"desiredMemberCount,omitempty" xmlrpc:"desiredMemberCount,omitempty"` + + // A group's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date of the last action on this group or its create date + LastActionDate *Time `json:"lastActionDate,omitempty" xmlrpc:"lastActionDate,omitempty"` + + // A count of collection of load balancers for this auto scale group. + LoadBalancerCount *uint `json:"loadBalancerCount,omitempty" xmlrpc:"loadBalancerCount,omitempty"` + + // Collection of load balancers for this auto scale group. + LoadBalancers []Scale_LoadBalancer `json:"loadBalancers,omitempty" xmlrpc:"loadBalancers,omitempty"` + + // A count of collection of log entries for this group. + LogCount *uint `json:"logCount,omitempty" xmlrpc:"logCount,omitempty"` + + // Collection of log entries for this group. + Logs []Scale_Group_Log `json:"logs,omitempty" xmlrpc:"logs,omitempty"` + + // The greatest number of virtual guest members that are allowed on this group. Any attempts to add a guest member will fail if it will result in the total guest member count of this group to be above this number. If this number is edited and is less than the current guest member count, guests will be removed to at least be no greater than this number. + MaximumMemberCount *int `json:"maximumMemberCount,omitempty" xmlrpc:"maximumMemberCount,omitempty"` + + // The fewest number of virtual guest members that are allowed on this group. Any attempts to remove a guest member will fail if it will result in the total guest member count of this group to be below this number. If this number is edited and is larger than the current guest member count, guests will be added to at least reach this number. + MinimumMemberCount *int `json:"minimumMemberCount,omitempty" xmlrpc:"minimumMemberCount,omitempty"` + + // When this group was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of this scale group. It must be unique on the account. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of collection of VLANs for this auto scale group. VLANs are optional. This can contain a public or private VLAN or both. When a single VLAN for a public/private type is given it can be a non-purchased VLAN only if the minimumMemberCount on the group is >= 1. This can also contain any number of public/private purchased VLANs and members are staggered across them when scaled up. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // Collection of VLANs for this auto scale group. VLANs are optional. This can contain a public or private VLAN or both. When a single VLAN for a public/private type is given it can be a non-purchased VLAN only if the minimumMemberCount on the group is >= 1. This can also contain any number of public/private purchased VLANs and members are staggered across them when scaled up. + NetworkVlans []Scale_Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // Collection of policies for this group. This can be empty. + Policies []Scale_Policy `json:"policies,omitempty" xmlrpc:"policies,omitempty"` + + // A count of collection of policies for this group. This can be empty. + PolicyCount *uint `json:"policyCount,omitempty" xmlrpc:"policyCount,omitempty"` + + // The regional group for this scale group. + RegionalGroup *Location_Group_Regional `json:"regionalGroup,omitempty" xmlrpc:"regionalGroup,omitempty"` + + // The identifier of the regional group this scaling group is assigned to. + RegionalGroupId *int `json:"regionalGroupId,omitempty" xmlrpc:"regionalGroupId,omitempty"` + + // The status for this scale group. + Status *Scale_Group_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // If true, this group is suspended. + SuspendedFlag *bool `json:"suspendedFlag,omitempty" xmlrpc:"suspendedFlag,omitempty"` + + // The termination policy for this scaling group. + TerminationPolicy *Scale_Termination_Policy `json:"terminationPolicy,omitempty" xmlrpc:"terminationPolicy,omitempty"` + + // The termination policy for the group. This determines which member to choose to delete when scaling downwards. + TerminationPolicyId *int `json:"terminationPolicyId,omitempty" xmlrpc:"terminationPolicyId,omitempty"` + + // A count of collection of guests that have been pinned to this group. Guest assets are only used for certain trigger checks such as resource watches. They do not count towards the auto scaling guest counts of this group in anyway and are never automatically added or removed. + VirtualGuestAssetCount *uint `json:"virtualGuestAssetCount,omitempty" xmlrpc:"virtualGuestAssetCount,omitempty"` + + // Collection of guests that have been pinned to this group. Guest assets are only used for certain trigger checks such as resource watches. They do not count towards the auto scaling guest counts of this group in anyway and are never automatically added or removed. + VirtualGuestAssets []Scale_Asset_Virtual_Guest `json:"virtualGuestAssets,omitempty" xmlrpc:"virtualGuestAssets,omitempty"` + + // A count of collection of guests that have been scaled with the group. When this group is active, the count of guests here is guaranteed to be between minimumMemberCount and maximumMemberCount inclusively. + VirtualGuestMemberCount *uint `json:"virtualGuestMemberCount,omitempty" xmlrpc:"virtualGuestMemberCount,omitempty"` + + // This is the template to create guest members with. This is the same template accepted by the createObject call on SoftLayer_Virtual_Guest with some caveats. The hostname provided will have an arbitrary value appended to it for each guest created. Also, hourlyBillingFlag cannot be false, and if the datacenter is provided it must be in the region of this group. Finally, VLANs cannot be provided for the template, it will use VLANs provided to this group instead. + // + // Note, if this template is edited on an existing group the previous template values are not kept and are not considered during termination. This means a group's guest members could effectively be a hybrid of multiple templates because this value was changed after some guest members were created but before others were created. + VirtualGuestMemberTemplate *Virtual_Guest `json:"virtualGuestMemberTemplate,omitempty" xmlrpc:"virtualGuestMemberTemplate,omitempty"` + + // Collection of guests that have been scaled with the group. When this group is active, the count of guests here is guaranteed to be between minimumMemberCount and maximumMemberCount inclusively. + VirtualGuestMembers []Scale_Member_Virtual_Guest `json:"virtualGuestMembers,omitempty" xmlrpc:"virtualGuestMembers,omitempty"` +} + +// no documentation yet +type Scale_Group_Log struct { + Entity + + // When this event occurred. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A textual description of what happened during this action. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // This log's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The group this log refers to. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this log refers to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` +} + +// no documentation yet +type Scale_Group_Status struct { + Entity + + // A status's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A status's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A status's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Scale_LoadBalancer struct { + Entity + + // The percentage of connections allocated to this virtual server. + AllocationPercent *int `json:"allocationPercent,omitempty" xmlrpc:"allocationPercent,omitempty"` + + // When this load balancer configuration was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // The health check for this configuration. + HealthCheck *Network_Application_Delivery_Controller_LoadBalancer_Health_Check `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // The identifier for the health check of this load balancer configuration + HealthCheckId *int `json:"healthCheckId,omitempty" xmlrpc:"healthCheckId,omitempty"` + + // The load balancer configuration's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // When this load balancer configuration was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The port for this load balancer configuration. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The routing method. + RoutingMethod *Network_Application_Delivery_Controller_LoadBalancer_Routing_Method `json:"routingMethod,omitempty" xmlrpc:"routingMethod,omitempty"` + + // The routing type. + RoutingType *Network_Application_Delivery_Controller_LoadBalancer_Routing_Type `json:"routingType,omitempty" xmlrpc:"routingType,omitempty"` + + // The group this load balancer configuration is for. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this load balancer configuration applies to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` + + // The ID of the virtual IP address. + VirtualIpAddressId *int `json:"virtualIpAddressId,omitempty" xmlrpc:"virtualIpAddressId,omitempty"` + + // The virtual server for this configuration. + VirtualServer *Network_Application_Delivery_Controller_LoadBalancer_VirtualServer `json:"virtualServer,omitempty" xmlrpc:"virtualServer,omitempty"` + + // The identifier of the virtual server this load balancer configuration uses. + VirtualServerId *int `json:"virtualServerId,omitempty" xmlrpc:"virtualServerId,omitempty"` + + // The port on the virtual server. + VirtualServerPort *int `json:"virtualServerPort,omitempty" xmlrpc:"virtualServerPort,omitempty"` +} + +// no documentation yet +type Scale_Member struct { + Entity + + // When this member was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A member's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The group this member belongs to. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this member belongs to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` +} + +// no documentation yet +type Scale_Member_Virtual_Guest struct { + Scale_Member + + // The guest for this member. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // The identifier of the guest for this member. + VirtualGuestId *int `json:"virtualGuestId,omitempty" xmlrpc:"virtualGuestId,omitempty"` +} + +// no documentation yet +type Scale_Network_Vlan struct { + Entity + + // When this network VLAN reference was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // The network VLAN reference's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network VLAN to scale with. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // The identifier for the VLAN to scale with. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` + + // The group this network VLAN is for. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this network VLAN reference applies to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` +} + +// no documentation yet +type Scale_Policy struct { + Entity + + // A count of the actions to perform upon any trigger hit. Currently this must be a single value. + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // The actions to perform upon any trigger hit. Currently this must be a single value. + Actions []Scale_Policy_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // The number of seconds this policy will wait after lastActionDate on group before performing another action. If not present, the group's cooldown value is used. + Cooldown *int `json:"cooldown,omitempty" xmlrpc:"cooldown,omitempty"` + + // When this policy was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // A policy's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // When this policy was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of this policy. It must be unique within the group. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the one-time triggers to check for this group. + OneTimeTriggerCount *uint `json:"oneTimeTriggerCount,omitempty" xmlrpc:"oneTimeTriggerCount,omitempty"` + + // The one-time triggers to check for this group. + OneTimeTriggers []Scale_Policy_Trigger_OneTime `json:"oneTimeTriggers,omitempty" xmlrpc:"oneTimeTriggers,omitempty"` + + // A count of the repeating triggers to check for this group. + RepeatingTriggerCount *uint `json:"repeatingTriggerCount,omitempty" xmlrpc:"repeatingTriggerCount,omitempty"` + + // The repeating triggers to check for this group. + RepeatingTriggers []Scale_Policy_Trigger_Repeating `json:"repeatingTriggers,omitempty" xmlrpc:"repeatingTriggers,omitempty"` + + // A count of the resource-use triggers to check for this group. + ResourceUseTriggerCount *uint `json:"resourceUseTriggerCount,omitempty" xmlrpc:"resourceUseTriggerCount,omitempty"` + + // The resource-use triggers to check for this group. + ResourceUseTriggers []Scale_Policy_Trigger_ResourceUse `json:"resourceUseTriggers,omitempty" xmlrpc:"resourceUseTriggers,omitempty"` + + // A count of the scale actions to perform upon any trigger hit. Currently this must be a single value. + ScaleActionCount *uint `json:"scaleActionCount,omitempty" xmlrpc:"scaleActionCount,omitempty"` + + // The scale actions to perform upon any trigger hit. Currently this must be a single value. + ScaleActions []Scale_Policy_Action_Scale `json:"scaleActions,omitempty" xmlrpc:"scaleActions,omitempty"` + + // The group this policy is on. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this member belongs to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` + + // A count of the triggers to check for this group. + TriggerCount *uint `json:"triggerCount,omitempty" xmlrpc:"triggerCount,omitempty"` + + // The triggers to check for this group. + Triggers []Scale_Policy_Trigger `json:"triggers,omitempty" xmlrpc:"triggers,omitempty"` +} + +// no documentation yet +type Scale_Policy_Action struct { + Entity + + // When this action was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // An action's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Then this action was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The policy this action is on. + ScalePolicy *Scale_Policy `json:"scalePolicy,omitempty" xmlrpc:"scalePolicy,omitempty"` + + // The policy this action is on. + ScalePolicyId *int `json:"scalePolicyId,omitempty" xmlrpc:"scalePolicyId,omitempty"` + + // The type of action. + Type *Scale_Policy_Action_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The identifier of this action's type. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// no documentation yet +type Scale_Policy_Action_Scale struct { + Scale_Policy_Action + + // The number to scale by. This number has different meanings based on type. + Amount *int `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The type of scale to perform. Possible values: + // + // + // * ABSOLUTE - Force the group to be set at a specific number of group members. This may include scaling up or + // down or not at all. If the amount is outside of the min/max range of the group, an error occurs. + // * PERCENT - Scale the group up or down based on the positive or negative percentage given in amount. The + // number is a percent of the current group member count. Any extra percent after the decimal point is always ignored. If the resulting amount is zero, -1 or 1 is used depending upon whether the percentage was negative or positive respectively. + // * RELATIVE - Scale the group up or down by the positive or negative value given in amount. + ScaleType *string `json:"scaleType,omitempty" xmlrpc:"scaleType,omitempty"` +} + +// no documentation yet +type Scale_Policy_Action_Type struct { + Entity + + // This type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An action type's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // An action type's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger struct { + Entity + + // When this trigger was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // A trigger's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // When this trigger was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The policy this trigger is on. + ScalePolicy *Scale_Policy `json:"scalePolicy,omitempty" xmlrpc:"scalePolicy,omitempty"` + + // The policy this trigger is on. + ScalePolicyId *int `json:"scalePolicyId,omitempty" xmlrpc:"scalePolicyId,omitempty"` + + // The type of trigger. + Type *Scale_Policy_Trigger_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type of trigger this is. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_OneTime struct { + Scale_Policy_Trigger + + // The date to execute the policy. + Date *Time `json:"date,omitempty" xmlrpc:"date,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_Repeating struct { + Scale_Policy_Trigger + + // The cron-formatted schedule. This is run in the UTC timezone. + Schedule *string `json:"schedule,omitempty" xmlrpc:"schedule,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_ResourceUse struct { + Scale_Policy_Trigger + + // A count of the resource watches for this trigger. + WatchCount *uint `json:"watchCount,omitempty" xmlrpc:"watchCount,omitempty"` + + // The resource watches for this trigger. + Watches []Scale_Policy_Trigger_ResourceUse_Watch `json:"watches,omitempty" xmlrpc:"watches,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_ResourceUse_Watch struct { + Entity + + // The algorithm to use when aggregating and comparing. Currently, the only value that is accepted is EWMA (Exponential Weighted Moving Average). EWMA is the default value if no value is given. + Algorithm *string `json:"algorithm,omitempty" xmlrpc:"algorithm,omitempty"` + + // When this watch was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // A watch's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The metric to watch. Possible values: + // + // + // * host.cpu.percent - On a scale of 0 to 100, the percent CPU a guest is using. + // * host.network.backend.in and host.network.frontend.in - The network bytes-per-second incoming on the interface + // of either the frontend or backend network. + // * host.network.backend.out and host.network.frontend.out - The network bytes-per-second incoming on the interface + // of either the frontend or backend network. + Metric *string `json:"metric,omitempty" xmlrpc:"metric,omitempty"` + + // When this watch was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The operator to use for comparison. The only two valid values are ">" and "<". + Operator *string `json:"operator,omitempty" xmlrpc:"operator,omitempty"` + + // The number of seconds the values are aggregated for when compared to value. If values are not retrieved steadily and consecutively for the length of this period, nothing is compared. + Period *int `json:"period,omitempty" xmlrpc:"period,omitempty"` + + // The trigger this watch is on. + ScalePolicyTrigger *Scale_Policy_Trigger_ResourceUse `json:"scalePolicyTrigger,omitempty" xmlrpc:"scalePolicyTrigger,omitempty"` + + // The trigger this watch is on. + ScalePolicyTriggerId *int `json:"scalePolicyTriggerId,omitempty" xmlrpc:"scalePolicyTriggerId,omitempty"` + + // The value to compare against. Although the value is a string, validation will be done on the value for restrictions (such as numeric-only) based on the metric. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_Type struct { + Entity + + // A trigger type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A trigger type's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A trigger type's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Scale_Termination_Policy struct { + Entity + + // A termination policy's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A termination policy's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A termination policy's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/search.go b/vendor/github.com/softlayer/softlayer-go/datatypes/search.go new file mode 100644 index 000000000000..5afb44aacd58 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/search.go @@ -0,0 +1,26 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Search struct { + Entity +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/security.go b/vendor/github.com/softlayer/softlayer-go/datatypes/security.go new file mode 100644 index 000000000000..23a9a024e466 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/security.go @@ -0,0 +1,294 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Security_Certificate struct { + Entity + + // The number of services currently associated with the certificate. + AssociatedServiceCount *int `json:"associatedServiceCount,omitempty" xmlrpc:"associatedServiceCount,omitempty"` + + // The certificate provided publicly to clients requesting identity credentials. This certificate is usually signed by a source trusted by the client or a signature chain can be established between this certificate and the truested certificate. + // + // This property may only be modified when no services are associated. See associatedServiceCount. + Certificate *string `json:"certificate,omitempty" xmlrpc:"certificate,omitempty"` + + // The signing request used to request a certificate authority generate a signed certificate. + // + // This property may only be modified when no services are associated. See associatedServiceCount. + CertificateSigningRequest *string `json:"certificateSigningRequest,omitempty" xmlrpc:"certificateSigningRequest,omitempty"` + + // The common name (usually a domain name) encoded within the certificate. + // + // This property is read only. Changes made will be silently ignored. + CommonName *string `json:"commonName,omitempty" xmlrpc:"commonName,omitempty"` + + // The date the certificate _record_ was created. The contents of the certificate may of changed since the record was created, so this does not represent anything about the certificate itself. + // + // This property is read only. Changes made will be silently ignored. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The ID of the certificate record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The intermediate certificate authorities certificate that completes the certificate chain for the issued certificate. Required when clients will only trust the root certificate. + // + // This property may only be modified when no services are associated. See associatedServiceCount. + IntermediateCertificate *string `json:"intermediateCertificate,omitempty" xmlrpc:"intermediateCertificate,omitempty"` + + // The size (number of bits) of the public key represented by the certificate. + KeySize *int `json:"keySize,omitempty" xmlrpc:"keySize,omitempty"` + + // A count of the load balancers virtual IP addresses currently associated with the certificate. + LoadBalancerVirtualIpAddressCount *uint `json:"loadBalancerVirtualIpAddressCount,omitempty" xmlrpc:"loadBalancerVirtualIpAddressCount,omitempty"` + + // The load balancers virtual IP addresses currently associated with the certificate. + LoadBalancerVirtualIpAddresses []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"loadBalancerVirtualIpAddresses,omitempty" xmlrpc:"loadBalancerVirtualIpAddresses,omitempty"` + + // The date the certificate _record_ was last modified.The contents of the certificate may of changed since the record was created, so this does not represent anything about the certificate itself. + // + // This property is read only. Changes made will be silently ignored. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A note to help describe the certificate. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The organizational name encoded in the certificate. + // + // This property is read only. Changes made will be silently ignored. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The private key in the key/certificate pair. + // + // This property may only be modified when no services are associated. See associatedServiceCount. + PrivateKey *string `json:"privateKey,omitempty" xmlrpc:"privateKey,omitempty"` + + // The UTC timestamp representing the beginning of the certificate's validity + // + // This property is read only. Changes made will be silently ignored. + ValidityBegin *Time `json:"validityBegin,omitempty" xmlrpc:"validityBegin,omitempty"` + + // The number of days remaining in the validity period for the certificate. + // + // This property is read only. Changes made will be silently ignored. + ValidityDays *int `json:"validityDays,omitempty" xmlrpc:"validityDays,omitempty"` + + // The UTC timestamp representing the end of the certificate's validity period. + // + // This property is read only. Changes made will be silently ignored. + ValidityEnd *Time `json:"validityEnd,omitempty" xmlrpc:"validityEnd,omitempty"` +} + +// no documentation yet +type Security_Certificate_Entry struct { + Entity + + // The ID of the certificate record. + CertificateId *int `json:"certificateId,omitempty" xmlrpc:"certificateId,omitempty"` + + // The common name (usually a domain name) encoded within the certificate. + CommonName *string `json:"commonName,omitempty" xmlrpc:"commonName,omitempty"` + + // The size (number of bits) of the public key represented by the certificate. + KeySize *int `json:"keySize,omitempty" xmlrpc:"keySize,omitempty"` + + // The organizational name encoded in the certificate. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The UTC timestamp representing the beginning of the certificate's validity + ValidityBegin *Time `json:"validityBegin,omitempty" xmlrpc:"validityBegin,omitempty"` + + // The number of days remaining in the validity period for the certificate. + ValidityDays *int `json:"validityDays,omitempty" xmlrpc:"validityDays,omitempty"` + + // The UTC timestamp representing the end of the certificate's validity period. + ValidityEnd *Time `json:"validityEnd,omitempty" xmlrpc:"validityEnd,omitempty"` +} + +// SoftLayer_Security_Certificate_Request data type is used to harness your SSL certificate order to a Certificate Authority. This contains data that is required by a Certificate Authority to place an SSL certificate order. +type Security_Certificate_Request struct { + Entity + + // The account to which a SSL certificate request belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // This is a reference to your SoftLayer account. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The email address of a person who will approve your SSL certificate order. This is usually an email address of your domain administrator. + ApproverEmailAddress *string `json:"approverEmailAddress,omitempty" xmlrpc:"approverEmailAddress,omitempty"` + + // The Certificate Authority name + CertificateAuthorityName *string `json:"certificateAuthorityName,omitempty" xmlrpc:"certificateAuthorityName,omitempty"` + + // A Certificate Signing Request (CSR) string + CertificateSigningRequest *string `json:"certificateSigningRequest,omitempty" xmlrpc:"certificateSigningRequest,omitempty"` + + // A domain name of a SSL certificate request + CommonName *string `json:"commonName,omitempty" xmlrpc:"commonName,omitempty"` + + // The date a SSL certificate request was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The date of your SSL certificate went into effect + EffectiveDate *Time `json:"effectiveDate,omitempty" xmlrpc:"effectiveDate,omitempty"` + + // The expiration date of your SSL certificate + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // The internal identifier of an SSL certificate request + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date a SSL certificate request was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The order contains the information related to a SSL certificate request. + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The associated order item for this SSL certificate request. + OrderItem *Billing_Order_Item `json:"orderItem,omitempty" xmlrpc:"orderItem,omitempty"` + + // The status of a SSL certificate request. + Status *Security_Certificate_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A status id reflecting the state of a SSL certificate request + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The technical contact email address. + TechnicalContactEmailAddress *string `json:"technicalContactEmailAddress,omitempty" xmlrpc:"technicalContactEmailAddress,omitempty"` +} + +// Represents a server type that can be specified when ordering an SSL certificate. +type Security_Certificate_Request_ServerType struct { + Entity + + // The description of the certificate server type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of the certificate server type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of the certificate server type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The value of the certificate server type. + Value *int `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Represents the status of an SSL certificate request. +type Security_Certificate_Request_Status struct { + Entity + + // The description of a SSL certificate request status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of an SSL certificate request status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Security_Directory_Service_Host_Xref_Hardware extends the [[SoftLayer_Security_Directory_Service_Host_Xref]] data type to include hardware specific properties. +type Security_Directory_Service_Host_Xref_Hardware struct { + Entity + + // The hardware object. + Host *Hardware `json:"host,omitempty" xmlrpc:"host,omitempty"` +} + +// The SoftLayer_Security_Level data type contains the security level restriction for the account +type Security_Level struct { + Entity + + // The unique name of the security level the account is under + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the security level the account is under + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Encryption algorithm intended for use in SSL/TLS communications +type Security_SecureTransportCipher struct { + Entity + + // Unique identifier for the encryption algorithm + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// Protocol intended for use in secure communications +type Security_SecureTransportProtocol struct { + Entity + + // Unique identifier for the protocol + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // List of the supported encryption ciphers + SupportedSecureTransportCiphers []Security_SecureTransportCipher `json:"supportedSecureTransportCiphers,omitempty" xmlrpc:"supportedSecureTransportCiphers,omitempty"` +} + +// no documentation yet +type Security_Ssh_Key struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A count of the image template groups that are linked to an SSH key. + BlockDeviceTemplateGroupCount *uint `json:"blockDeviceTemplateGroupCount,omitempty" xmlrpc:"blockDeviceTemplateGroupCount,omitempty"` + + // The image template groups that are linked to an SSH key. + BlockDeviceTemplateGroups []Virtual_Guest_Block_Device_Template_Group `json:"blockDeviceTemplateGroups,omitempty" xmlrpc:"blockDeviceTemplateGroups,omitempty"` + + // The date a ssh key was added. + // + // This property is read only. Changes made will be silently ignored. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A short sequence of bytes used to authenticate or lookup a longer ssh key. This will automatically be generated upon adding or modifying the ssh key. + // + // This property is read only. Changes made will be silently ignored. + Fingerprint *string `json:"fingerprint,omitempty" xmlrpc:"fingerprint,omitempty"` + + // The ID of the ssh key record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The ssh key. + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // A descriptive name used to identify a ssh key. + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` + + // The date a ssh key was last modified. + // + // This property is read only. Changes made will be silently ignored. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A small note about a ssh key to use at your discretion. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of the OS root users that are linked to an SSH key. + SoftwarePasswordCount *uint `json:"softwarePasswordCount,omitempty" xmlrpc:"softwarePasswordCount,omitempty"` + + // The OS root users that are linked to an SSH key. + SoftwarePasswords []Software_Component_Password `json:"softwarePasswords,omitempty" xmlrpc:"softwarePasswords,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/service.go b/vendor/github.com/softlayer/softlayer-go/datatypes/service.go new file mode 100644 index 000000000000..c73b1f48bd69 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/service.go @@ -0,0 +1,55 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Service_External_Resource is a placeholder that references a service being provided outside of the standard SoftLayer system. +type Service_External_Resource struct { + Entity + + // The customer account that is consuming the service. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The customer account that is consuming the related service. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The unique identifier in the service provider's system. + ExternalIdentifier *string `json:"externalIdentifier,omitempty" xmlrpc:"externalIdentifier,omitempty"` + + // An external resource's unique identifier in the SoftLayer system. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Service_Provider struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/softlayer.go b/vendor/github.com/softlayer/softlayer-go/datatypes/softlayer.go new file mode 100644 index 000000000000..a1f8f0b8db2b --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/softlayer.go @@ -0,0 +1,104 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package datatypes + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Void is a dummy type for identifying void return values from methods +type Void int + +// Time type overrides the default json marshaler with the SoftLayer custom format +type Time struct { + time.Time +} + +func (r Time) String() string { + return r.Time.Format(time.RFC3339) +} + +// MarshalJSON returns the json encoding of the datatypes.Time receiver. This +// override is necessary to ensure datetimes are formatted in the way SoftLayer +// expects - that is, using the RFC3339 format, without nanoseconds. +func (r Time) MarshalJSON() ([]byte, error) { + return []byte(`"` + r.String() + `"`), nil +} + +// MarshalText returns a text encoding of the datatypes.Time receiver. This +// is mainly provided to complete what might be expected of a type that +// implements the Marshaler interface. +func (r Time) MarshalText() ([]byte, error) { + return []byte(r.String()), nil +} + +// FIXME: Need to have special unmarshaling of some values defined as float type +// in the metadata that actually come down as strings in the api. +// e.g. SoftLayer_Product_Item.capacity +// Float64 is a float type that deals with some of the oddities when +// unmarshalling from the SLAPI +// +// Code borrowed from https://github.com/sudorandom/softlayer-go/blob/master/slapi/types/float.go +type Float64 float64 + +// UnmarshalJSON statisied the json.Unmarshaler interface +func (f *Float64) UnmarshalJSON(data []byte) error { + + // Attempt parsing the float normally + v, err := strconv.ParseFloat(string(data), 64) + + // Attempt parsing the float as a string + if err != nil { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("malformed data") + } + + v, err = strconv.ParseFloat(string(data[1:len(data)-1]), 64) + if err != nil { + return err + } + } + *f = Float64(v) + return nil +} + +// Used to set the appropriate complexType field in the passed product order. +// Employs reflection to determine the type of the passed value and use it +// to derive the complexType to send to SoftLayer. +func SetComplexType(v interface{}) error { + orderDataPtr := reflect.ValueOf(v) + if orderDataPtr.Type().Name() != "" { + return errors.New("Did not pass a pointer to a product order.") + } + + orderDataValue := reflect.Indirect(reflect.ValueOf(v)) + orderDataType := orderDataValue.Type().Name() + if !strings.HasPrefix(orderDataType, "Container_Product_Order") { + return fmt.Errorf("Did not pass a pointer to a product order: %s", orderDataType) + } + + complexTypeField := orderDataValue.FieldByName("ComplexType") + complexType := "SoftLayer_" + orderDataType + complexTypeField.Set(reflect.ValueOf(&complexType)) + + return nil +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/software.go b/vendor/github.com/softlayer/softlayer-go/datatypes/software.go new file mode 100644 index 000000000000..6c6541f3f683 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/software.go @@ -0,0 +1,768 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// SoftLayer_Software_AccountLicense is a class that represents software licenses that are tied only to a customer's account and not to any particular hardware, IP address, etc. +type Software_AccountLicense struct { + Entity + + // The customer account this Account License belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ID of the SoftLayer Account to which this Account License belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The billing item for a software account license. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Some Account Licenses have capacity information such as CPU specified in the units key. This provides the numerical representation of the capacity of the units. + Capacity *string `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // The License Key for this specific Account License. + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // The SoftLayer_Software_Description that this account license is for. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The unit of measurement that an account license has the capacity of. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` +} + +// A SoftLayer_Software_Component ties the installation of a specific piece of software onto a specific piece of hardware. +// +// SoftLayer_Software_Component works with SoftLayer_Software_License and SoftLayer_Software_Description to tie this all together. +// +//
    • SoftLayer_Software_Component is the installation of a specific piece of software onto a specific piece of hardware in accordance to a software license.
      • SoftLayer_Software_License dictates when and how a specific piece of software may be installed onto a piece of hardware.
        • SoftLayer_Software_Description describes a specific piece of software which can be installed onto hardware in accordance with it's license agreement.
    +type Software_Component struct { + Entity + + // The average amount of time that a software component takes to install. + AverageInstallationDuration *uint `json:"averageInstallationDuration,omitempty" xmlrpc:"averageInstallationDuration,omitempty"` + + // The billing item for a software component. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The hardware this Software Component is installed upon. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // Hardware Identification Number for the server this Software Component is installed upon. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // An ID number identifying this Software Component (Software Installation) + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The manufacturer code that is needed to activate a license. + ManufacturerActivationCode *string `json:"manufacturerActivationCode,omitempty" xmlrpc:"manufacturerActivationCode,omitempty"` + + // A license key for this specific installation of software, if it is needed. + ManufacturerLicenseInstance *string `json:"manufacturerLicenseInstance,omitempty" xmlrpc:"manufacturerLicenseInstance,omitempty"` + + // A count of username/Password pairs used for access to this Software Installation. + PasswordCount *uint `json:"passwordCount,omitempty" xmlrpc:"passwordCount,omitempty"` + + // History Records for Software Passwords. + PasswordHistory []Software_Component_Password_History `json:"passwordHistory,omitempty" xmlrpc:"passwordHistory,omitempty"` + + // A count of history Records for Software Passwords. + PasswordHistoryCount *uint `json:"passwordHistoryCount,omitempty" xmlrpc:"passwordHistoryCount,omitempty"` + + // Username/Password pairs used for access to this Software Installation. + Passwords []Software_Component_Password `json:"passwords,omitempty" xmlrpc:"passwords,omitempty"` + + // The Software Description of this Software Component. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The License this Software Component uses. + SoftwareLicense *Software_License `json:"softwareLicense,omitempty" xmlrpc:"softwareLicense,omitempty"` + + // The virtual guest this software component is installed upon. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` +} + +// This object specifies a specific type of Software Component: An analytics instance. Analytics installations have a specific default ports and patterns for usernames and passwords. Defaults are initiated by this object. +type Software_Component_Analytics struct { + Software_Component +} + +// This object specifies a specific Software Component: An Urchin instance. Urchin installations have a specific default port (9999) and a pattern for usernames and passwords. Defaults are initiated by this object. +type Software_Component_Analytics_Urchin struct { + Software_Component_Analytics +} + +// This object specifies a specific type of Software Component: An Anti-virus/spyware instance. Anti-virus/spyware installations have specific properties and methods such as SoftLayer_Software_Component_AntivirusSpyware::updateAntivirusSpywarePolicy. Defaults are initiated by this object. +type Software_Component_AntivirusSpyware struct { + Software_Component +} + +// The SoftLayer_Software_Component_AntivirusSpyware_Mcafee represents a single anti-virus/spyware software component. +type Software_Component_AntivirusSpyware_Mcafee struct { + Software_Component_AntivirusSpyware +} + +// The SoftLayer_Software_Component_AntivirusSpyware_Mcafee_Epo_Version36 data type represents a single McAfee Secure anti-virus/spyware software component that uses the ePolicy Orchestrator version 3.6 backend. +type Software_Component_AntivirusSpyware_Mcafee_Epo_Version36 struct { + Software_Component_AntivirusSpyware_Mcafee + + // The virus scan agent details. + AgentDetails *McAfee_Epolicy_Orchestrator_Version36_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // The current anti-virus policy. + CurrentAntivirusPolicy *int `json:"currentAntivirusPolicy,omitempty" xmlrpc:"currentAntivirusPolicy,omitempty"` + + // The virus definition file version. + DataFileVersion *McAfee_Epolicy_Orchestrator_Version36_Product_Properties `json:"dataFileVersion,omitempty" xmlrpc:"dataFileVersion,omitempty"` + + // The version of ePolicy Orchestrator that the anti-virus/spyware client communicates with. + EpoVersion *string `json:"epoVersion,omitempty" xmlrpc:"epoVersion,omitempty"` + + // A count of the latest access protection events. + LatestAccessProtectionEventCount *uint `json:"latestAccessProtectionEventCount,omitempty" xmlrpc:"latestAccessProtectionEventCount,omitempty"` + + // The latest access protection events. + LatestAccessProtectionEvents []McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_AccessProtection `json:"latestAccessProtectionEvents,omitempty" xmlrpc:"latestAccessProtectionEvents,omitempty"` + + // A count of the latest anti-virus events. + LatestAntivirusEventCount *uint `json:"latestAntivirusEventCount,omitempty" xmlrpc:"latestAntivirusEventCount,omitempty"` + + // The latest anti-virus events. + LatestAntivirusEvents []McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event `json:"latestAntivirusEvents,omitempty" xmlrpc:"latestAntivirusEvents,omitempty"` + + // A count of the latest spyware events. + LatestSpywareEventCount *uint `json:"latestSpywareEventCount,omitempty" xmlrpc:"latestSpywareEventCount,omitempty"` + + // The latest spyware events. + LatestSpywareEvents []McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event `json:"latestSpywareEvents,omitempty" xmlrpc:"latestSpywareEvents,omitempty"` + + // The current transaction status of a server. + TransactionStatus *string `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// The SoftLayer_Software_Component_AntivirusSpyware_Mcafee_Epo_Version45 data type represents a single McAfee Secure anti-virus/spyware software component that uses the ePolicy Orchestrator version 4.5 backend. +type Software_Component_AntivirusSpyware_Mcafee_Epo_Version45 struct { + Software_Component_AntivirusSpyware_Mcafee + + // The virus scan agent details. + AgentDetails *McAfee_Epolicy_Orchestrator_Version45_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // The current anti-virus policy. + CurrentAntivirusPolicy *int `json:"currentAntivirusPolicy,omitempty" xmlrpc:"currentAntivirusPolicy,omitempty"` + + // The virus definition file version. + DataFileVersion *McAfee_Epolicy_Orchestrator_Version45_Product_Properties `json:"dataFileVersion,omitempty" xmlrpc:"dataFileVersion,omitempty"` + + // The version of ePolicy Orchestrator that the anti-virus/spyware client communicates with. + EpoVersion *string `json:"epoVersion,omitempty" xmlrpc:"epoVersion,omitempty"` + + // A count of the latest access protection events. + LatestAccessProtectionEventCount *uint `json:"latestAccessProtectionEventCount,omitempty" xmlrpc:"latestAccessProtectionEventCount,omitempty"` + + // The latest access protection events. + LatestAccessProtectionEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"latestAccessProtectionEvents,omitempty" xmlrpc:"latestAccessProtectionEvents,omitempty"` + + // A count of the latest anti-virus events. + LatestAntivirusEventCount *uint `json:"latestAntivirusEventCount,omitempty" xmlrpc:"latestAntivirusEventCount,omitempty"` + + // The latest anti-virus events. + LatestAntivirusEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"latestAntivirusEvents,omitempty" xmlrpc:"latestAntivirusEvents,omitempty"` + + // A count of the latest spyware events + LatestSpywareEventCount *uint `json:"latestSpywareEventCount,omitempty" xmlrpc:"latestSpywareEventCount,omitempty"` + + // The latest spyware events + LatestSpywareEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"latestSpywareEvents,omitempty" xmlrpc:"latestSpywareEvents,omitempty"` + + // The current transaction status of a server. + TransactionStatus *string `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// This object specifies a specific type of Software Component: A control panel instance. Control panel installations have a specific default ports and patterns for usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel struct { + Software_Component +} + +// This object specifies a specific Software Component: A cPanel instance. cPanel installations have a specific default port (2086) and a pattern for usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel_Cpanel struct { + Software_Component +} + +// This object specifies a specific type of control panel Software Component: An Idera instance. +type Software_Component_ControlPanel_Idera struct { + Software_Component +} + +// This object specifies a specific type of Software Component: A Idera Server Backup instance. +type Software_Component_ControlPanel_Idera_ServerBackup struct { + Software_Component_ControlPanel_Idera +} + +// This object is a parent class for Microsoft Products, like Web Matrix +type Software_Component_ControlPanel_Microsoft struct { + Software_Component +} + +// This object specifies a specific Software Component: A WebPlatform instance. WebPlatform installations have a specific xml config with usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel_Microsoft_WebPlatform struct { + Software_Component_ControlPanel_Microsoft +} + +// This object is a parent class for SWSoft Products, like Plesk +type Software_Component_ControlPanel_Parallels struct { + Software_Component +} + +// This object specifies a specific Software Component: A Plesk instance produced by SWSoft. SWSoft Plesk installations have a specific default port (8443) and a pattern for usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel_Parallels_Plesk struct { + Software_Component_ControlPanel_Parallels +} + +// This object specifies a specific type of control panel Software Component: A R1soft instance. +type Software_Component_ControlPanel_R1soft struct { + Software_Component +} + +// This object specifies a specific type of Software Component: A R1soft continuous data protection instance. +type Software_Component_ControlPanel_R1soft_Cdp struct { + Software_Component_ControlPanel_R1soft +} + +// This object specifies a specific type of Software Component: A R1Soft Server Backup instance. +type Software_Component_ControlPanel_R1soft_ServerBackup struct { + Software_Component_ControlPanel_R1soft +} + +// This object is a parent class for SWSoft Products, like Plesk +type Software_Component_ControlPanel_Swsoft struct { + Software_Component +} + +// This object specifies a specific Software Component: A Helm instance produced by Webhost Automation. WEbhost Automation's Helm installations have a specific default port (8086) and a pattern for usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel_WebhostAutomation struct { + Software_Component +} + +// This object specifies a specific type of Software Component: A Host Intrusion Protection System instance. +type Software_Component_HostIps struct { + Software_Component +} + +// The SoftLayer_Software_Component_HostIps_Mcafee represents a single host IPS software component. +type Software_Component_HostIps_Mcafee struct { + Software_Component_HostIps +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version36_Hips data type represents a single McAfee Secure Host IPS software component that uses the ePolicy Orchestrator version 3.6 backend. +type Software_Component_HostIps_Mcafee_Epo_Version36_Hips struct { + Software_Component_HostIps_Mcafee + + // The host IPS agent details. + AgentDetails *McAfee_Epolicy_Orchestrator_Version36_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // A count of the names of the possible policy options for the application mode setting. + ApplicationModePolicyNameCount *uint `json:"applicationModePolicyNameCount,omitempty" xmlrpc:"applicationModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the application mode setting. + ApplicationModePolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"applicationModePolicyNames,omitempty" xmlrpc:"applicationModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the application rule set setting. + ApplicationRuleSetPolicyNameCount *uint `json:"applicationRuleSetPolicyNameCount,omitempty" xmlrpc:"applicationRuleSetPolicyNameCount,omitempty"` + + // The names of the possible policy options for the application rule set setting. + ApplicationRuleSetPolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"applicationRuleSetPolicyNames,omitempty" xmlrpc:"applicationRuleSetPolicyNames,omitempty"` + + // A count of the names of the possible options for the enforcement policy setting. + EnforcementPolicyNameCount *uint `json:"enforcementPolicyNameCount,omitempty" xmlrpc:"enforcementPolicyNameCount,omitempty"` + + // The names of the possible options for the enforcement policy setting. + EnforcementPolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"enforcementPolicyNames,omitempty" xmlrpc:"enforcementPolicyNames,omitempty"` + + // The version of ePolicy Orchestrator that the host IPS client communicates with. + EpoVersion *string `json:"epoVersion,omitempty" xmlrpc:"epoVersion,omitempty"` + + // A count of the names of the possible policy options for the firewall mode setting. + FirewallModePolicyNameCount *uint `json:"firewallModePolicyNameCount,omitempty" xmlrpc:"firewallModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the firewall mode setting. + FirewallModePolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"firewallModePolicyNames,omitempty" xmlrpc:"firewallModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the firewall rule set setting. + FirewallRuleSetPolicyNameCount *uint `json:"firewallRuleSetPolicyNameCount,omitempty" xmlrpc:"firewallRuleSetPolicyNameCount,omitempty"` + + // The names of the possible policy options for the firewall rule set setting. + FirewallRuleSetPolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"firewallRuleSetPolicyNames,omitempty" xmlrpc:"firewallRuleSetPolicyNames,omitempty"` + + // A count of the names of the possible policy options for the host IPS mode setting. + IpsModePolicyNameCount *uint `json:"ipsModePolicyNameCount,omitempty" xmlrpc:"ipsModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the host IPS mode setting. + IpsModePolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"ipsModePolicyNames,omitempty" xmlrpc:"ipsModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the host IPS protection setting. + IpsProtectionPolicyNameCount *uint `json:"ipsProtectionPolicyNameCount,omitempty" xmlrpc:"ipsProtectionPolicyNameCount,omitempty"` + + // The names of the possible policy options for the host IPS protection setting. + IpsProtectionPolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"ipsProtectionPolicyNames,omitempty" xmlrpc:"ipsProtectionPolicyNames,omitempty"` + + // The current transaction status of a server. + TransactionStatus *string `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version36_Hips_Version6 data type represents a single McAfee Secure Host IPS software component for version 6 of the Host IPS client and uses the ePolicy Orchestrator version 3.6 backend. +type Software_Component_HostIps_Mcafee_Epo_Version36_Hips_Version6 struct { + Software_Component_HostIps_Mcafee_Epo_Version36_Hips + + // A count of the blocked application events for this software component. + BlockedApplicationEventCount *uint `json:"blockedApplicationEventCount,omitempty" xmlrpc:"blockedApplicationEventCount,omitempty"` + + // The blocked application events for this software component. + BlockedApplicationEvents []McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_BlockedApplicationEvent `json:"blockedApplicationEvents,omitempty" xmlrpc:"blockedApplicationEvents,omitempty"` + + // A count of the host IPS events for this software component. + IpsEventCount *uint `json:"ipsEventCount,omitempty" xmlrpc:"ipsEventCount,omitempty"` + + // The host IPS events for this software component. + IpsEvents []McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_IPSEvent `json:"ipsEvents,omitempty" xmlrpc:"ipsEvents,omitempty"` +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version36_Hips_Version7 data type represents a single McAfee Secure Host IPS software component for version 7 of the Host IPS client and uses the ePolicy Orchestrator version 3.6 backend. +type Software_Component_HostIps_Mcafee_Epo_Version36_Hips_Version7 struct { + Software_Component_HostIps_Mcafee_Epo_Version36_Hips + + // A count of the blocked application events for this software component. + BlockedApplicationEventCount *uint `json:"blockedApplicationEventCount,omitempty" xmlrpc:"blockedApplicationEventCount,omitempty"` + + // The blocked application events for this software component. + BlockedApplicationEvents []McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_BlockedApplicationEvent `json:"blockedApplicationEvents,omitempty" xmlrpc:"blockedApplicationEvents,omitempty"` + + // A count of the host IPS events for this software component. + IpsEventCount *uint `json:"ipsEventCount,omitempty" xmlrpc:"ipsEventCount,omitempty"` + + // The host IPS events for this software component. + IpsEvents []McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_IPSEvent `json:"ipsEvents,omitempty" xmlrpc:"ipsEvents,omitempty"` +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version45_Hips data type represents a single McAfee Secure Host IPS software component that uses the ePolicy Orchestrator version 4.5 backend. +type Software_Component_HostIps_Mcafee_Epo_Version45_Hips struct { + Software_Component_HostIps_Mcafee + + // The host IPS agent details. + AgentDetails *McAfee_Epolicy_Orchestrator_Version45_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // A count of the names of the possible policy options for the application mode setting. + ApplicationModePolicyNameCount *uint `json:"applicationModePolicyNameCount,omitempty" xmlrpc:"applicationModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the application mode setting. + ApplicationModePolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"applicationModePolicyNames,omitempty" xmlrpc:"applicationModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the application rule set setting. + ApplicationRuleSetPolicyNameCount *uint `json:"applicationRuleSetPolicyNameCount,omitempty" xmlrpc:"applicationRuleSetPolicyNameCount,omitempty"` + + // The names of the possible policy options for the application rule set setting. + ApplicationRuleSetPolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"applicationRuleSetPolicyNames,omitempty" xmlrpc:"applicationRuleSetPolicyNames,omitempty"` + + // A count of the blocked application events for this software component. + BlockedApplicationEventCount *uint `json:"blockedApplicationEventCount,omitempty" xmlrpc:"blockedApplicationEventCount,omitempty"` + + // The blocked application events for this software component. + BlockedApplicationEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"blockedApplicationEvents,omitempty" xmlrpc:"blockedApplicationEvents,omitempty"` + + // A count of the names of the possible options for the enforcement policy setting. + EnforcementPolicyNameCount *uint `json:"enforcementPolicyNameCount,omitempty" xmlrpc:"enforcementPolicyNameCount,omitempty"` + + // The names of the possible options for the enforcement policy setting. + EnforcementPolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"enforcementPolicyNames,omitempty" xmlrpc:"enforcementPolicyNames,omitempty"` + + // The version of ePolicy Orchestrator that the host IPS client communicates with. + EpoVersion *string `json:"epoVersion,omitempty" xmlrpc:"epoVersion,omitempty"` + + // A count of the names of the possible policy options for the firewall mode setting. + FirewallModePolicyNameCount *uint `json:"firewallModePolicyNameCount,omitempty" xmlrpc:"firewallModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the firewall mode setting. + FirewallModePolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"firewallModePolicyNames,omitempty" xmlrpc:"firewallModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the firewall rule set setting. + FirewallRuleSetPolicyNameCount *uint `json:"firewallRuleSetPolicyNameCount,omitempty" xmlrpc:"firewallRuleSetPolicyNameCount,omitempty"` + + // The names of the possible policy options for the firewall rule set setting. + FirewallRuleSetPolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"firewallRuleSetPolicyNames,omitempty" xmlrpc:"firewallRuleSetPolicyNames,omitempty"` + + // A count of the host IPS events for this software component. + IpsEventCount *uint `json:"ipsEventCount,omitempty" xmlrpc:"ipsEventCount,omitempty"` + + // The host IPS events for this software component. + IpsEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"ipsEvents,omitempty" xmlrpc:"ipsEvents,omitempty"` + + // A count of the names of the possible policy options for the host IPS mode setting. + IpsModePolicyNameCount *uint `json:"ipsModePolicyNameCount,omitempty" xmlrpc:"ipsModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the host IPS mode setting. + IpsModePolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"ipsModePolicyNames,omitempty" xmlrpc:"ipsModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the host IPS protection setting. + IpsProtectionPolicyNameCount *uint `json:"ipsProtectionPolicyNameCount,omitempty" xmlrpc:"ipsProtectionPolicyNameCount,omitempty"` + + // The names of the possible policy options for the host IPS protection setting. + IpsProtectionPolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"ipsProtectionPolicyNames,omitempty" xmlrpc:"ipsProtectionPolicyNames,omitempty"` + + // The current transaction status of a server. + TransactionStatus *string `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version45_Hips_Version7 data type represents a single McAfee Secure Host IPS software component for version 7 of the Host IPS client and uses the ePolicy Orchestrator version 4.5 backend. +type Software_Component_HostIps_Mcafee_Epo_Version45_Hips_Version7 struct { + Software_Component_HostIps_Mcafee_Epo_Version45_Hips +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version45_Hips_Version8 data type represents a single McAfee Secure Host IPS software component for version 8 of the Host IPS client and uses the ePolicy Orchestrator version 4.5 backend. +type Software_Component_HostIps_Mcafee_Epo_Version45_Hips_Version8 struct { + Software_Component_HostIps_Mcafee_Epo_Version45_Hips +} + +// SoftLayer_Software_Component_OperatingSystem extends the [[SoftLayer_Software_Component]] data type to include operating system specific properties. +type Software_Component_OperatingSystem struct { + Software_Component + + // The date in which the license for this software expires. + LicenseExpirationDate *Time `json:"licenseExpirationDate,omitempty" xmlrpc:"licenseExpirationDate,omitempty"` + + // A count of an operating system's associated [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]] that can be used to configure a hardware drive. + PartitionTemplateCount *uint `json:"partitionTemplateCount,omitempty" xmlrpc:"partitionTemplateCount,omitempty"` + + // An operating system's associated [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]] that can be used to configure a hardware drive. + PartitionTemplates []Hardware_Component_Partition_Template `json:"partitionTemplates,omitempty" xmlrpc:"partitionTemplates,omitempty"` + + // An operating systems associated [[SoftLayer_Provisioning_Version1_Transaction_Group|Transaction Group]]. A transaction group is a list of operations that will occur during the installment of an operating system. + ReloadTransactionGroup *Provisioning_Version1_Transaction_Group `json:"reloadTransactionGroup,omitempty" xmlrpc:"reloadTransactionGroup,omitempty"` +} + +// This object specifies a specific type of Software Component: A package instance. +type Software_Component_Package struct { + Software_Component +} + +// This object specifies a specific type of Software Component: A package management instance. +type Software_Component_Package_Management struct { + Software_Component_Package +} + +// This object specifies a specific type of Software Component: A Ksplice instance. +type Software_Component_Package_Management_Ksplice struct { + Software_Component_Package_Management +} + +// This SoftLayer_Software_Component_Password data type contains a password for a specific software component instance. +type Software_Component_Password struct { + Entity + + // The date this username/password pair was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // An id number for this specific username/password pair. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date of the last modification to this username/password pair. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A note string stored for this username/password pair. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The password part of the username/password pair. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The application access port for the Software Component. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The SoftLayer_Software_Component instance that this username/password pair is valid for. + Software *Software_Component `json:"software,omitempty" xmlrpc:"software,omitempty"` + + // An id number for the software component this username/password pair is valid for. + SoftwareId *int `json:"softwareId,omitempty" xmlrpc:"softwareId,omitempty"` + + // A count of sSH keys to be installed on the server during provisioning or an OS reload. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // SSH keys to be installed on the server during provisioning or an OS reload. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // The username part of the username/password pair. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// This object allows you to find the history of password changes for a specific SoftLayer_Software Component +type Software_Component_Password_History struct { + Entity + + // The date this username/password pair was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A note string stored for this username/password pair. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The password part of this specific password history instance. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // An installed and licensed instance of a piece of software + SoftwareComponent *Software_Component `json:"softwareComponent,omitempty" xmlrpc:"softwareComponent,omitempty"` + + // The id number for the Software Component this username/password pair is for. + SoftwareComponentId *int `json:"softwareComponentId,omitempty" xmlrpc:"softwareComponentId,omitempty"` + + // The username part of this specific password history instance. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// This object specifies a specific type of Software Component: A security instance. Security installations have custom configurations for password requirements. +type Software_Component_Security struct { + Software_Component +} + +// This object specifies a specific Software Component: A SafeNet instance. SafeNet installations have custom configurations for password requirements. +type Software_Component_Security_SafeNet struct { + Software_Component_Security +} + +// This class holds a description for a specific installation of a Software Component. +// +// SoftLayer_Software_Licenses tie a Software Component (A specific installation on a piece of hardware) to it's description. +// +// The "Manufacturer" and "Name" properties of a SoftLayer_Software_Description are used by the framework to factory specific objects, objects that may have special methods for that specific piece of software, or objects that contain application specific data, such as default ports. For example, if you create a SoftLayer_Software_Component who's SoftLayer_Software_License points to the SoftLayer_Software_Description for "Swsoft" "Plesk", you'll actually get a SoftLayer_Software_Component_Swsoft_Plesk object. +type Software_Description struct { + Entity + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Software_Description_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // The average amount of time that a software description takes to install. + AverageInstallationDuration *int `json:"averageInstallationDuration,omitempty" xmlrpc:"averageInstallationDuration,omitempty"` + + // A count of a list of the software descriptions that are compatible with this software description. + CompatibleSoftwareDescriptionCount *uint `json:"compatibleSoftwareDescriptionCount,omitempty" xmlrpc:"compatibleSoftwareDescriptionCount,omitempty"` + + // A list of the software descriptions that are compatible with this software description. + CompatibleSoftwareDescriptions []Software_Description `json:"compatibleSoftwareDescriptions,omitempty" xmlrpc:"compatibleSoftwareDescriptions,omitempty"` + + // This is set to '1' if this Software Description describes a Control Panel. + ControlPanel *int `json:"controlPanel,omitempty" xmlrpc:"controlPanel,omitempty"` + + // A count of the feature attributes of a software description. + FeatureCount *uint `json:"featureCount,omitempty" xmlrpc:"featureCount,omitempty"` + + // The feature attributes of a software description. + Features []Software_Description_Feature `json:"features,omitempty" xmlrpc:"features,omitempty"` + + // An ID number to identify this Software Description. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The latest version of a software description. + LatestVersion []Software_Description `json:"latestVersion,omitempty" xmlrpc:"latestVersion,omitempty"` + + // A count of the latest version of a software description. + LatestVersionCount *uint `json:"latestVersionCount,omitempty" xmlrpc:"latestVersionCount,omitempty"` + + // The unit of measurement (day, month, or year) for license registration. Used in conjunction with licenseTermValue to determine overall license registration length of a new license. + LicenseTermUnit *string `json:"licenseTermUnit,omitempty" xmlrpc:"licenseTermUnit,omitempty"` + + // The number of units (licenseTermUnit) a new license is valid for at the time of registration. + LicenseTermValue *int `json:"licenseTermValue,omitempty" xmlrpc:"licenseTermValue,omitempty"` + + // The manufacturer, name and version of a piece of software. + LongDescription *string `json:"longDescription,omitempty" xmlrpc:"longDescription,omitempty"` + + // The name of the manufacturer for this specific piece of software. This name is used by SoftLayer_Software_Component to tailor make (factory) specific types of Software Components that know details like default ports. + Manufacturer *string `json:"manufacturer,omitempty" xmlrpc:"manufacturer,omitempty"` + + // The name of this specific piece of software. This name is used by SoftLayer_Software_Component to tailor make (factory) specific types of Software Components that know details like default ports. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // This is set to '1' if this Software Description describes an Operating System. + OperatingSystem *int `json:"operatingSystem,omitempty" xmlrpc:"operatingSystem,omitempty"` + + // A count of the various product items to which this software description is linked. + ProductItemCount *uint `json:"productItemCount,omitempty" xmlrpc:"productItemCount,omitempty"` + + // The various product items to which this software description is linked. + ProductItems []Product_Item `json:"productItems,omitempty" xmlrpc:"productItems,omitempty"` + + // This details the provisioning transaction group for this software. This is only valid for Operating System software. + ProvisionTransactionGroup *Provisioning_Version1_Transaction_Group `json:"provisionTransactionGroup,omitempty" xmlrpc:"provisionTransactionGroup,omitempty"` + + // A reference code is structured as three tokens separated by underscores. The first token represents the product, the second is the version of the product, and the third is whether the software is 32 or 64bit. + ReferenceCode *string `json:"referenceCode,omitempty" xmlrpc:"referenceCode,omitempty"` + + // The transaction group that a software description belongs to. A transaction group is a sequence of transactions that must be performed in a specific order for the installation of software. + ReloadTransactionGroup *Provisioning_Version1_Transaction_Group `json:"reloadTransactionGroup,omitempty" xmlrpc:"reloadTransactionGroup,omitempty"` + + // The default user created for a given a software description. + RequiredUser *string `json:"requiredUser,omitempty" xmlrpc:"requiredUser,omitempty"` + + // A count of software Licenses that govern this Software Description. + SoftwareLicenseCount *uint `json:"softwareLicenseCount,omitempty" xmlrpc:"softwareLicenseCount,omitempty"` + + // Software Licenses that govern this Software Description. + SoftwareLicenses []Software_License `json:"softwareLicenses,omitempty" xmlrpc:"softwareLicenses,omitempty"` + + // A suggestion for an upgrade path from this Software Description + UpgradeSoftwareDescription *Software_Description `json:"upgradeSoftwareDescription,omitempty" xmlrpc:"upgradeSoftwareDescription,omitempty"` + + // Contains the ID of the suggested upgrade from this Software_Description to a more powerful software installation. + UpgradeSoftwareDescriptionId *int `json:"upgradeSoftwareDescriptionId,omitempty" xmlrpc:"upgradeSoftwareDescriptionId,omitempty"` + + // A suggestion for an upgrade path from this Software Description (Deprecated - Use upgradeSoftwareDescription) + UpgradeSwDesc *Software_Description `json:"upgradeSwDesc,omitempty" xmlrpc:"upgradeSwDesc,omitempty"` + + // Contains the ID of the suggested upgrade from this Software_Description to a more powerful software installation. (Deprecated - Use upgradeSoftwareDescriptionId) + UpgradeSwDescId *int `json:"upgradeSwDescId,omitempty" xmlrpc:"upgradeSwDescId,omitempty"` + + // A count of + ValidFilesystemTypeCount *uint `json:"validFilesystemTypeCount,omitempty" xmlrpc:"validFilesystemTypeCount,omitempty"` + + // no documentation yet + ValidFilesystemTypes []Configuration_Storage_Filesystem_Type `json:"validFilesystemTypes,omitempty" xmlrpc:"validFilesystemTypes,omitempty"` + + // The version of this specific piece of software. + Version *string `json:"version,omitempty" xmlrpc:"version,omitempty"` + + // This is set to '1' if this Software Description can be licensed to a Virtual Machine (an IP address). + VirtualLicense *int `json:"virtualLicense,omitempty" xmlrpc:"virtualLicense,omitempty"` + + // This is set to '1' if this Software Description a platform for hosting virtual servers. + VirtualizationPlatform *int `json:"virtualizationPlatform,omitempty" xmlrpc:"virtualizationPlatform,omitempty"` +} + +// The SoftLayer_Software_Description_Attribute data type represents an attributes associated with this software description. +type Software_Description_Attribute struct { + Entity + + // no documentation yet + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // no documentation yet + Type *Software_Description_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The value that was assigned to this attribute. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Software_Description_Attribute_Type data type represents the type of an attribute. +type Software_Description_Attribute_Type struct { + Entity + + // The keyname for this attribute type. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` +} + +// The SoftLayer_Software_Description_Feature data type represents a single software description feature. A feature may show up on more than one software description and can not be created, modified, or removed. +type Software_Description_Feature struct { + Entity + + // The unique identifier for a software description feature. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A unique name used to reference this software description feature. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of a software description feature. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The vendor that a software description feature belongs to. + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// This class represents a software description's required user +type Software_Description_RequiredUser struct { + Entity + + // If the default password is set the user will be created with that password, otherwise a random password is generated. + DefaultPassword *string `json:"defaultPassword,omitempty" xmlrpc:"defaultPassword,omitempty"` + + // If this software has a required user (such as "root") this string contains it's name. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// This class describes a specific type of license, like a Microsoft Windows Site License, a GPL license, or a license of another type. +type Software_License struct { + Entity + + // The account that owns this specific License instance. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // An ID number for this specific License type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The account that owns this specific License instance. + Owner *Account `json:"owner,omitempty" xmlrpc:"owner,omitempty"` + + // A Description of the software that this license instance is valid for. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The ID number of a Software Description that this specific license is valid for. + SoftwareDescriptionId *int `json:"softwareDescriptionId,omitempty" xmlrpc:"softwareDescriptionId,omitempty"` +} + +// SoftLayer_Software_VirtualLicense is the application class that handles a special type of Software License. Most software licenses are licensed to a specific hardware ID; virtual licenses are designed for virtual machines and therefore are assigned to an IP Address. Not all software packages can be "virtual licensed". +type Software_VirtualLicense struct { + Entity + + // The customer account this Virtual License belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ID of the SoftLayer Account to which this Virtual License belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The billing item for a software virtual license. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The hardware record to which the software virtual license is assigned. + HostHardware *Hardware_Server `json:"hostHardware,omitempty" xmlrpc:"hostHardware,omitempty"` + + // The ID of the SoftLayer Hardware Server record to which this Virtual License belongs. + HostHardwareId *int `json:"hostHardwareId,omitempty" xmlrpc:"hostHardwareId,omitempty"` + + // An ID number for this Virtual License instance. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The specific IP address this Virtual License belongs to. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The IP Address record associated with a virtual license. + IpAddressRecord *Network_Subnet_IpAddress `json:"ipAddressRecord,omitempty" xmlrpc:"ipAddressRecord,omitempty"` + + // The License Key for this specific Virtual License. + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // A "notes" string attached to this specific Virtual License. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The SoftLayer_Software_Description that this virtual license is for. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The Software Description ID this Virtual License is for. + SoftwareDescriptionId *int `json:"softwareDescriptionId,omitempty" xmlrpc:"softwareDescriptionId,omitempty"` + + // The subnet this Virtual License's IP address belongs to. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // The ID of the SoftLayer Network Subnet this Virtual License belongs to. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/survey.go b/vendor/github.com/softlayer/softlayer-go/datatypes/survey.go new file mode 100644 index 000000000000..243c10073e3e --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/survey.go @@ -0,0 +1,150 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Survey data type contains general information relating to a single SoftLayer survey. +type Survey struct { + Entity + + // A flag indicating if a survey can be taken. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // The date that a survey had originally started. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A survey's id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A survey's name or title. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the questions for a survey. + QuestionCount *uint `json:"questionCount,omitempty" xmlrpc:"questionCount,omitempty"` + + // The questions for a survey. + Questions []Survey_Question `json:"questions,omitempty" xmlrpc:"questions,omitempty"` + + // The status of the survey + Status *Survey_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status id of the survey. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The type of survey + Type *Survey_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type id of the survey. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// The SoftLayer_Survey_Answer data type contains general information relating to a single SoftLayer survey answer. +type Survey_Answer struct { + Entity + + // A survey answer's answer that a user can response too. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // A value indicating the order in when a survey answer will be displayed to a user. + AnswerOrder *int `json:"answerOrder,omitempty" xmlrpc:"answerOrder,omitempty"` + + // A survey answer's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The survey question that this answer belongs to. + SurveyQuestion *Survey_Question `json:"surveyQuestion,omitempty" xmlrpc:"surveyQuestion,omitempty"` + + // A survey answer's associated [[SoftLayer_Survey_Question|Survey Question]] Id. + SurveyQuestionId *int `json:"surveyQuestionId,omitempty" xmlrpc:"surveyQuestionId,omitempty"` +} + +// The SoftLayer_Survey_Question data type contains general information relating to a single SoftLayer survey question. +type Survey_Question struct { + Entity + + // A count of the possible answers for a survey question. + AnswerCount *uint `json:"answerCount,omitempty" xmlrpc:"answerCount,omitempty"` + + // The possible answers for a survey question. + Answers []Survey_Answer `json:"answers,omitempty" xmlrpc:"answers,omitempty"` + + // A survey question's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A flag indicating that a survey question requires a response. + IsRequired *int `json:"isRequired,omitempty" xmlrpc:"isRequired,omitempty"` + + // A flag indicating that a survey question can have multiple answers responded to. + MultiAnswer *int `json:"multiAnswer,omitempty" xmlrpc:"multiAnswer,omitempty"` + + // A survey question's question. + Question *string `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // A value indicating the order in when a survey question will be asked. + QuestionOrder *int `json:"questionOrder,omitempty" xmlrpc:"questionOrder,omitempty"` + + // The survey that a question belongs to. + Survey *Survey `json:"survey,omitempty" xmlrpc:"survey,omitempty"` + + // A survey question's associated [[SoftLayer_Survey|Survey]] Id. + SurveyId *int `json:"surveyId,omitempty" xmlrpc:"surveyId,omitempty"` +} + +// The SoftLayer_Survey_Response data type contains general information relating to a single SoftLayer survey response. +type Survey_Response struct { + Entity + + // The user typed response for the [[SoftLayer_Survey_Answer|Survey Answer]] that a response is associated with. + OtherAnswer *string `json:"otherAnswer,omitempty" xmlrpc:"otherAnswer,omitempty"` + + // The survey answer that this response was to. + SurveyAnswer *Survey_Answer `json:"surveyAnswer,omitempty" xmlrpc:"surveyAnswer,omitempty"` + + // The Id of the [[SoftLayer_Survey_Answer|Survey Answer]] that a response was made for. + SurveyAnswerId *int `json:"surveyAnswerId,omitempty" xmlrpc:"surveyAnswerId,omitempty"` +} + +// The SoftLayer_Survey_Status data type contains survey status information. +type Survey_Status struct { + Entity + + // Description of a survey status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a survey status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a survey status + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Survey_Type data type contains survey type information. +type Survey_Type struct { + Entity + + // Description of a survey type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a survey type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a survey type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/tag.go b/vendor/github.com/softlayer/softlayer-go/datatypes/tag.go new file mode 100644 index 000000000000..0e5a25a6265a --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/tag.go @@ -0,0 +1,157 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Tag data type is an optional type associated with hardware. The account ID that the tag is tied to, and the tag itself are stored in this data type. There is also a flag to denote whether the tag is internal or not. +type Tag struct { + Entity + + // The account to which the tag is tied. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Account the tag belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Unique identifier for a tag. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Indicates whether a tag is internal. + Internal *int `json:"internal,omitempty" xmlrpc:"internal,omitempty"` + + // Name of the tag. The characters permitted are A-Z, 0-9, whitespace, _ (underscore), + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of references that tie object to the tag. + ReferenceCount *uint `json:"referenceCount,omitempty" xmlrpc:"referenceCount,omitempty"` + + // References that tie object to the tag. + References []Tag_Reference `json:"references,omitempty" xmlrpc:"references,omitempty"` +} + +// no documentation yet +type Tag_Reference struct { + Entity + + // no documentation yet + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // no documentation yet + EmpRecordId *int `json:"empRecordId,omitempty" xmlrpc:"empRecordId,omitempty"` + + // no documentation yet + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // no documentation yet + Tag *Tag `json:"tag,omitempty" xmlrpc:"tag,omitempty"` + + // no documentation yet + TagId *int `json:"tagId,omitempty" xmlrpc:"tagId,omitempty"` + + // no documentation yet + TagType *Tag_Type `json:"tagType,omitempty" xmlrpc:"tagType,omitempty"` + + // no documentation yet + TagTypeId *int `json:"tagTypeId,omitempty" xmlrpc:"tagTypeId,omitempty"` + + // no documentation yet + UsrRecordId *int `json:"usrRecordId,omitempty" xmlrpc:"usrRecordId,omitempty"` +} + +// no documentation yet +type Tag_Reference_Hardware struct { + Tag_Reference + + // no documentation yet + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Network_Application_Delivery_Controller struct { + Tag_Reference + + // no documentation yet + Resource *Network_Application_Delivery_Controller `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Network_Vlan struct { + Tag_Reference + + // no documentation yet + Resource *Network_Vlan `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Network_Vlan_Firewall struct { + Tag_Reference + + // no documentation yet + Resource *Network_Vlan_Firewall `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Resource_Group struct { + Tag_Reference + + // no documentation yet + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Virtual_DedicatedHost struct { + Tag_Reference + + // no documentation yet + Resource *Virtual_DedicatedHost `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Virtual_Guest struct { + Tag_Reference + + // no documentation yet + Resource *Virtual_Guest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Virtual_Guest_Block_Device_Template_Group struct { + Tag_Reference + + // no documentation yet + Resource *Virtual_Guest_Block_Device_Template_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/ticket.go b/vendor/github.com/softlayer/softlayer-go/datatypes/ticket.go new file mode 100644 index 000000000000..3abcc221ffe7 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/ticket.go @@ -0,0 +1,733 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Ticket data type models a single SoftLayer customer support or notification ticket. Each ticket object contains references to it's updates, the user it's assigned to, the SoftLayer department and employee that it's assigned to, and any hardware objects or attached files associated with the ticket. Tickets are described in further detail on the [[SoftLayer_Ticket]] service page. +// +// To create a support ticket execute the [[SoftLayer_Ticket::createStandardTicket|createStandardTicket]] or [[SoftLayer_Ticket::createAdministrativeTicket|createAdministrativeTicket]] methods in the SoftLayer_Ticket service. To create an upgrade ticket for the SoftLayer sales group execute the [[SoftLayer_Ticket::createUpgradeTicket|createUpgradeTicket]]. +type Ticket struct { + Entity + + // The SoftLayer customer account associated with a ticket. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // An internal identifier of the SoftLayer customer account that a ticket is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + AssignedAgentCount *uint `json:"assignedAgentCount,omitempty" xmlrpc:"assignedAgentCount,omitempty"` + + // no documentation yet + AssignedAgents []User_Customer `json:"assignedAgents,omitempty" xmlrpc:"assignedAgents,omitempty"` + + // The portal user that a ticket is assigned to. + AssignedUser *User_Customer `json:"assignedUser,omitempty" xmlrpc:"assignedUser,omitempty"` + + // An internal identifier of the portal user that a ticket is assigned to. + AssignedUserId *int `json:"assignedUserId,omitempty" xmlrpc:"assignedUserId,omitempty"` + + // A count of the list of additional emails to notify when a ticket update is made. + AttachedAdditionalEmailCount *uint `json:"attachedAdditionalEmailCount,omitempty" xmlrpc:"attachedAdditionalEmailCount,omitempty"` + + // The list of additional emails to notify when a ticket update is made. + AttachedAdditionalEmails []User_Customer_AdditionalEmail `json:"attachedAdditionalEmails,omitempty" xmlrpc:"attachedAdditionalEmails,omitempty"` + + // A count of the Dedicated Hosts associated with a ticket. This is used in cases where a ticket is directly associated with one or more Dedicated Hosts. + AttachedDedicatedHostCount *uint `json:"attachedDedicatedHostCount,omitempty" xmlrpc:"attachedDedicatedHostCount,omitempty"` + + // The Dedicated Hosts associated with a ticket. This is used in cases where a ticket is directly associated with one or more Dedicated Hosts. + AttachedDedicatedHosts []Virtual_DedicatedHost `json:"attachedDedicatedHosts,omitempty" xmlrpc:"attachedDedicatedHosts,omitempty"` + + // A count of the files attached to a ticket. + AttachedFileCount *uint `json:"attachedFileCount,omitempty" xmlrpc:"attachedFileCount,omitempty"` + + // The files attached to a ticket. + AttachedFiles []Ticket_Attachment_File `json:"attachedFiles,omitempty" xmlrpc:"attachedFiles,omitempty"` + + // The hardware associated with a ticket. This is used in cases where a ticket is directly associated with one or more pieces of hardware. + AttachedHardware []Hardware `json:"attachedHardware,omitempty" xmlrpc:"attachedHardware,omitempty"` + + // no documentation yet + AttachedHardwareCount *uint `json:"attachedHardwareCount,omitempty" xmlrpc:"attachedHardwareCount,omitempty"` + + // A count of + AttachedResourceCount *uint `json:"attachedResourceCount,omitempty" xmlrpc:"attachedResourceCount,omitempty"` + + // no documentation yet + AttachedResources []Ticket_Attachment `json:"attachedResources,omitempty" xmlrpc:"attachedResources,omitempty"` + + // A count of the virtual guests associated with a ticket. This is used in cases where a ticket is directly associated with one or more virtualized guests installations or Virtual Servers. + AttachedVirtualGuestCount *uint `json:"attachedVirtualGuestCount,omitempty" xmlrpc:"attachedVirtualGuestCount,omitempty"` + + // The virtual guests associated with a ticket. This is used in cases where a ticket is directly associated with one or more virtualized guests installations or Virtual Servers. + AttachedVirtualGuests []Virtual_Guest `json:"attachedVirtualGuests,omitempty" xmlrpc:"attachedVirtualGuests,omitempty"` + + // Ticket is waiting on a response from a customer flag. + AwaitingUserResponseFlag *bool `json:"awaitingUserResponseFlag,omitempty" xmlrpc:"awaitingUserResponseFlag,omitempty"` + + // Whether a ticket has a one-time charge associated with it. Standard tickets are free while administrative tickets typically cost $3 USD. + BillableFlag *bool `json:"billableFlag,omitempty" xmlrpc:"billableFlag,omitempty"` + + // A service cancellation request. + CancellationRequest *Billing_Item_Cancellation_Request `json:"cancellationRequest,omitempty" xmlrpc:"cancellationRequest,omitempty"` + + // no documentation yet + ChangeOwnerFlag *bool `json:"changeOwnerFlag,omitempty" xmlrpc:"changeOwnerFlag,omitempty"` + + // The date that a ticket was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + EmployeeAttachmentCount *uint `json:"employeeAttachmentCount,omitempty" xmlrpc:"employeeAttachmentCount,omitempty"` + + // no documentation yet + EmployeeAttachments []User_Employee `json:"employeeAttachments,omitempty" xmlrpc:"employeeAttachments,omitempty"` + + // A ticket's associated EU compliant record + EuSupportedFlag *bool `json:"euSupportedFlag,omitempty" xmlrpc:"euSupportedFlag,omitempty"` + + // no documentation yet + EuSupportedLocationId *int `json:"euSupportedLocationId,omitempty" xmlrpc:"euSupportedLocationId,omitempty"` + + // Feedback left by a portal or API user on their experiences in a ticket. Final comments may be created after a ticket is closed. + FinalComments *string `json:"finalComments,omitempty" xmlrpc:"finalComments,omitempty"` + + // The first physical or virtual server attached to a ticket. + FirstAttachedResource *Ticket_Attachment `json:"firstAttachedResource,omitempty" xmlrpc:"firstAttachedResource,omitempty"` + + // The first update made to a ticket. This is typically the contents of a ticket when it's created. + FirstUpdate *Ticket_Update `json:"firstUpdate,omitempty" xmlrpc:"firstUpdate,omitempty"` + + // The SoftLayer department that a ticket is assigned to. + Group *Ticket_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // The internal identifier of the SoftLayer department that a ticket is assigned to. + GroupId *int `json:"groupId,omitempty" xmlrpc:"groupId,omitempty"` + + // A ticket's internal identifier. Each ticket is defined by a unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the invoice items associated with a ticket. Ticket based invoice items only exist when a ticket incurs a fee that has been invoiced. + InvoiceItemCount *uint `json:"invoiceItemCount,omitempty" xmlrpc:"invoiceItemCount,omitempty"` + + // The invoice items associated with a ticket. Ticket based invoice items only exist when a ticket incurs a fee that has been invoiced. + InvoiceItems []Billing_Invoice_Item `json:"invoiceItems,omitempty" xmlrpc:"invoiceItems,omitempty"` + + // no documentation yet + LastActivity *Ticket_Activity `json:"lastActivity,omitempty" xmlrpc:"lastActivity,omitempty"` + + // The date that a ticket was last modified. A modification does not necessarily mean that an update was added. + LastEditDate *Time `json:"lastEditDate,omitempty" xmlrpc:"lastEditDate,omitempty"` + + // The type of user who last edited or updated a ticket. This is either "EMPLOYEE" or "USER". + LastEditType *string `json:"lastEditType,omitempty" xmlrpc:"lastEditType,omitempty"` + + // no documentation yet + LastEditor *User_Interface `json:"lastEditor,omitempty" xmlrpc:"lastEditor,omitempty"` + + // The date that the last ticket update was made + LastResponseDate *Time `json:"lastResponseDate,omitempty" xmlrpc:"lastResponseDate,omitempty"` + + // The last update made to a ticket. + LastUpdate *Ticket_Update `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` + + // A timestamp of the last time the Ticket was viewed by the active user. + LastViewedDate *Time `json:"lastViewedDate,omitempty" xmlrpc:"lastViewedDate,omitempty"` + + // A ticket's associated location within the SoftLayer location hierarchy. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The internal identifier of the location associated with a ticket. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The date that a ticket was last updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // True if there are new, unread updates to this ticket for the current user, False otherwise. + NewUpdatesFlag *bool `json:"newUpdatesFlag,omitempty" xmlrpc:"newUpdatesFlag,omitempty"` + + // Whether or not the user who owns a ticket is notified via email when a ticket is updated. + NotifyUserOnUpdateFlag *bool `json:"notifyUserOnUpdateFlag,omitempty" xmlrpc:"notifyUserOnUpdateFlag,omitempty"` + + // The IP address of the user who opened a ticket. + OriginatingIpAddress *string `json:"originatingIpAddress,omitempty" xmlrpc:"originatingIpAddress,omitempty"` + + // no documentation yet + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // no documentation yet + ResponsibleBrandId *int `json:"responsibleBrandId,omitempty" xmlrpc:"responsibleBrandId,omitempty"` + + // A count of + ScheduledActionCount *uint `json:"scheduledActionCount,omitempty" xmlrpc:"scheduledActionCount,omitempty"` + + // no documentation yet + ScheduledActions []Provisioning_Version1_Transaction `json:"scheduledActions,omitempty" xmlrpc:"scheduledActions,omitempty"` + + // The amount of money in US Dollars ($USD) that a ticket has charged to an account. A ticket's administrative billing amount is a one time charge and only applies to administrative support tickets. + ServerAdministrationBillingAmount *int `json:"serverAdministrationBillingAmount,omitempty" xmlrpc:"serverAdministrationBillingAmount,omitempty"` + + // The invoice associated with a ticket. Only tickets with an associated administrative charge have an invoice. + ServerAdministrationBillingInvoice *Billing_Invoice `json:"serverAdministrationBillingInvoice,omitempty" xmlrpc:"serverAdministrationBillingInvoice,omitempty"` + + // The internal identifier of the invoice associated with a ticket's administrative charge. Only tickets with an administrative charge have an associated invoice. + ServerAdministrationBillingInvoiceId *int `json:"serverAdministrationBillingInvoiceId,omitempty" xmlrpc:"serverAdministrationBillingInvoiceId,omitempty"` + + // Whether a ticket is a standard or an administrative support ticket. Administrative support tickets typically incur a $3 USD charge. + ServerAdministrationFlag *int `json:"serverAdministrationFlag,omitempty" xmlrpc:"serverAdministrationFlag,omitempty"` + + // The refund invoice associated with a ticket. Only tickets with a refund applied in them have an associated refund invoice. + ServerAdministrationRefundInvoice *Billing_Invoice `json:"serverAdministrationRefundInvoice,omitempty" xmlrpc:"serverAdministrationRefundInvoice,omitempty"` + + // The internal identifier of the refund invoice associated with a ticket. Only tickets with an account refund associated with them have an associated refund invoice. + ServerAdministrationRefundInvoiceId *int `json:"serverAdministrationRefundInvoiceId,omitempty" xmlrpc:"serverAdministrationRefundInvoiceId,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // A ticket's internal identifier at its service provider. Each ticket is defined by a unique identifier. + ServiceProviderResourceId *string `json:"serviceProviderResourceId,omitempty" xmlrpc:"serviceProviderResourceId,omitempty"` + + // no documentation yet + State []Ticket_State `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // A count of + StateCount *uint `json:"stateCount,omitempty" xmlrpc:"stateCount,omitempty"` + + // A ticket's status. + Status *Ticket_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A ticket status' internal identifier. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // A ticket's subject. Only standard support tickets have an associated subject. A standard support ticket's title corresponds with it's subject's name. + Subject *Ticket_Subject `json:"subject,omitempty" xmlrpc:"subject,omitempty"` + + // An internal identifier of the pre-set subject that a ticket is associated with. Standard support tickets have a subject set while administrative tickets have a null subject. A standard support ticket's title is the name of it's associated subject. + SubjectId *int `json:"subjectId,omitempty" xmlrpc:"subjectId,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // A ticket's title. This is typically a brief summary of the issue described in the ticket. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // no documentation yet + TotalUpdateCount *int `json:"totalUpdateCount,omitempty" xmlrpc:"totalUpdateCount,omitempty"` + + // A count of a ticket's updates. + UpdateCount *uint `json:"updateCount,omitempty" xmlrpc:"updateCount,omitempty"` + + // A ticket's updates. + Updates []Ticket_Update `json:"updates,omitempty" xmlrpc:"updates,omitempty"` + + // Whether a user is able to update a ticket. + UserEditableFlag *bool `json:"userEditableFlag,omitempty" xmlrpc:"userEditableFlag,omitempty"` +} + +// no documentation yet +type Ticket_Activity struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + CreateTimestamp *Time `json:"createTimestamp,omitempty" xmlrpc:"createTimestamp,omitempty"` + + // no documentation yet + Editor *User_Interface `json:"editor,omitempty" xmlrpc:"editor,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // no documentation yet + TicketUpdate *Ticket_Update `json:"ticketUpdate,omitempty" xmlrpc:"ticketUpdate,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer tickets have the ability to be associated with specific pieces of hardware in a customer's inventory. Attaching hardware to a ticket can greatly increase response time from SoftLayer for issues that are related to one or more specific servers on a customer's account. The SoftLayer_Ticket_Attachment_Hardware data type models the relationship between a piece of hardware and a ticket. Only one attachment record may exist per hardware item per ticket. +type Ticket_Attachment struct { + Entity + + // The internal identifier of an item that is attached to a ticket. + AttachmentId *int `json:"attachmentId,omitempty" xmlrpc:"attachmentId,omitempty"` + + // The date that an item was attached to a ticket. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A ticket attachment's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The ticket that an item is attached to. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // The internal identifier of the ticket that an item is attached to. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` +} + +// no documentation yet +type Ticket_Attachment_Assigned_Agent struct { + Ticket_Attachment + + // no documentation yet + AssignedAgent *User_Customer `json:"assignedAgent,omitempty" xmlrpc:"assignedAgent,omitempty"` + + // The internal identifier of an assigned Agent that is attached to a ticket. + AssignedAgentId *int `json:"assignedAgentId,omitempty" xmlrpc:"assignedAgentId,omitempty"` + + // no documentation yet + Resource *User_Customer `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// This datatype contains tickets referenced from card change request +type Ticket_Attachment_CardChangeRequest struct { + Ticket_Attachment + + // The card change request that is attached to a ticket. + Resource *Billing_Payment_Card_ChangeRequest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// SoftLayer tickets have the ability to be associated with specific dedicated hosts in a customer's inventory. Attaching a dedicated host to a ticket can greatly increase response time from SoftLayer for issues that are related to one or more specific hosts on a customer's account. The SoftLayer_Ticket_Attachment_Dedicated_Host data type models the relationship between a dedicated host and a ticket. Only one attachment record can exist per dedicated host item per ticket. +type Ticket_Attachment_Dedicated_Host struct { + Ticket_Attachment + + // The dedicated dost that is attached to a ticket. + DedicatedHost *Virtual_DedicatedHost `json:"dedicatedHost,omitempty" xmlrpc:"dedicatedHost,omitempty"` + + // The internal identifier of the Dedicated Host that is attached to a ticket. + DedicatedHostId *int `json:"dedicatedHostId,omitempty" xmlrpc:"dedicatedHostId,omitempty"` + + // The Dedicated Host that is attached to a ticket. + Resource *Virtual_DedicatedHost `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// SoftLayer tickets can have have files attached to them. Attaching a file to a ticket is a good way to report issues, provide documentation, and give examples of an issue. Both SoftLayer customers and employees have the ability to attach files to a ticket. The SoftLayer_Ticket_Attachment_File data type models a single file attached to a ticket. +type Ticket_Attachment_File struct { + Entity + + // The date a file was originally attached to a ticket. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The name of a file attached to a ticket. + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The size of a file attached to a ticket, measured in bytes. + FileSize *string `json:"fileSize,omitempty" xmlrpc:"fileSize,omitempty"` + + // A ticket file attachment's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date that a file attachment record was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // The internal identifier of the ticket that a file is attached to. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The ticket that a file is attached to. + Update *Ticket_Update `json:"update,omitempty" xmlrpc:"update,omitempty"` + + // The internal identifier of the ticket update the attached file is associated with. + UpdateId *int `json:"updateId,omitempty" xmlrpc:"updateId,omitempty"` + + // The internal identifier of the user that uploaded a ticket file attachment. This is only used when A file attachment's ''uploaderType'' is set to "USER". + UploaderId *string `json:"uploaderId,omitempty" xmlrpc:"uploaderId,omitempty"` + + // The type of user that attached a file to a ticket. This is either "USER" if the file was uploaded by a portal or API user or "EMPLOYEE" if the file was uploaded by a SoftLayer employee. + UploaderType *string `json:"uploaderType,omitempty" xmlrpc:"uploaderType,omitempty"` +} + +// SoftLayer tickets have the ability to be associated with specific pieces of hardware in a customer's inventory. Attaching hardware to a ticket can greatly increase response time from SoftLayer for issues that are related to one or more specific servers on a customer's account. The SoftLayer_Ticket_Attachment_Hardware data type models the relationship between a piece of hardware and a ticket. Only one attachment record may exist per hardware item per ticket. +type Ticket_Attachment_Hardware struct { + Ticket_Attachment + + // The hardware that is attached to a ticket. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The internal identifier of a piece of hardware that is attached to a ticket. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The hardware that is attached to a ticket. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// This datatype contains tickets referenced from manual payments +type Ticket_Attachment_Manual_Payment struct { + Ticket_Attachment + + // The manual payment that is attached to a ticket. + Resource *Billing_Payment_Card_ManualPayment `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Ticket_Attachment_Network_Storage_Mass_Data_Migration struct { + Ticket_Attachment + + // The Mass Data Migration request that is attached to a ticket. + Request *Network_Storage_MassDataMigration_Request `json:"request,omitempty" xmlrpc:"request,omitempty"` + + // no documentation yet + RequestId *int `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` + + // The Mass Data Migration request that is attached to a ticket. + Resource *Network_Storage_MassDataMigration_Request `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Ticket_Attachment_Scheduled_Action struct { + Ticket_Attachment + + // no documentation yet + Resource *Provisioning_Version1_Transaction `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The internal identifier of a scheduled action transaction that is attached to a ticket. + RunDate *Time `json:"runDate,omitempty" xmlrpc:"runDate,omitempty"` + + // no documentation yet + ScheduledAction *Provisioning_Version1_Transaction `json:"scheduledAction,omitempty" xmlrpc:"scheduledAction,omitempty"` + + // no documentation yet + Transaction *Provisioning_Version1_Transaction `json:"transaction,omitempty" xmlrpc:"transaction,omitempty"` + + // The internal identifier of a scheduled action transaction that is attached to a ticket. + TransactionId *int `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` +} + +// SoftLayer tickets have the ability to be associated with specific virtual guests in a customer's inventory. Attaching virtual guests to a ticket can greatly increase response time from SoftLayer for issues that are related to one or more specific servers on a customer's account. The SoftLayer_Ticket_Attachment_Virtual_Guest data type models the relationship between a virtual guest and a ticket. Only one attachment record may exist per virtual guest per ticket. +type Ticket_Attachment_Virtual_Guest struct { + Ticket_Attachment + + // The virtualized guest or CloudLayer Computing Instance that is attached to a ticket. + Resource *Virtual_Guest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The virtualized guest or CloudLayer Computing Instance that is attached to a ticket. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // The internal identifier of the virtualized guest or CloudLayer Computing Instance that is attached to a ticket. + VirtualGuestId *int `json:"virtualGuestId,omitempty" xmlrpc:"virtualGuestId,omitempty"` +} + +// no documentation yet +type Ticket_Chat struct { + Entity + + // no documentation yet + Agent *User_Employee `json:"agent,omitempty" xmlrpc:"agent,omitempty"` + + // no documentation yet + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // no documentation yet + CustomerId *int `json:"customerId,omitempty" xmlrpc:"customerId,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // no documentation yet + TicketUpdate *Ticket_Update_Chat `json:"ticketUpdate,omitempty" xmlrpc:"ticketUpdate,omitempty"` + + // no documentation yet + Transcript *string `json:"transcript,omitempty" xmlrpc:"transcript,omitempty"` +} + +// no documentation yet +type Ticket_Chat_Liveperson struct { + Ticket_Chat +} + +// no documentation yet +type Ticket_Chat_TranscriptLine struct { + Entity + + // no documentation yet + Speaker *User_Interface `json:"speaker,omitempty" xmlrpc:"speaker,omitempty"` +} + +// no documentation yet +type Ticket_Chat_TranscriptLine_Customer struct { + Ticket_Chat_TranscriptLine +} + +// no documentation yet +type Ticket_Chat_TranscriptLine_Employee struct { + Ticket_Chat_TranscriptLine +} + +// no documentation yet +type Ticket_EuCompliance struct { + Entity + + // no documentation yet + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` +} + +// SoftLayer tickets have the ability to be assigned to one of SoftLayer's internal departments. The department that a ticket is assigned to is modeled by the SoftLayer_Ticket_Group data type. Ticket groups help to ensure that the proper department is handling a ticket. Standard support tickets are created from a number of pre-determined subjects. These subjects help determine which group a standard ticket is assigned to. +type Ticket_Group struct { + Entity + + // The category that a ticket group belongs to. + Category *Ticket_Group_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // A ticket group's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket group's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The internal identifier for the category that a ticket group belongs to.. + TicketGroupCategoryId *int `json:"ticketGroupCategoryId,omitempty" xmlrpc:"ticketGroupCategoryId,omitempty"` +} + +// SoftLayer's support ticket groups represent the department at SoftLayer that is assigned to work one of your support tickets. Many departments are responsible for handling different types of tickets. These types of tickets are modeled in the SoftLayer_Ticket_Group_Category data type. Ticket group categories also help separate differentiate your tickets' issues in the SoftLayer customer portal. +type Ticket_Group_Category struct { + Entity + + // A ticket group category's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket group category's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Ticket_Priority struct { + Entity +} + +// no documentation yet +type Ticket_State struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + StateType *Ticket_State_Type `json:"stateType,omitempty" xmlrpc:"stateType,omitempty"` + + // no documentation yet + StateTypeId *int `json:"stateTypeId,omitempty" xmlrpc:"stateTypeId,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // no documentation yet + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` +} + +// no documentation yet +type Ticket_State_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Ticket_Status data type models the state of a ticket as it is worked by SoftLayer and its customers. Tickets exist in one of three states: +// *'''OPEN''': Open tickets are considered unresolved issues by SoftLayer and can be assigned to a SoftLayer employee for work. Tickets created by portal or API users are created in the Open state. +// *'''ASSIGNED''': Assigned tickets are identical to open tickets, but are assigned to an individual SoftLayer employee. An assigned ticket is actively being worked by SoftLayer. +// *'''CLOSED''': Tickets are closed when the issue at hand is considered resolved. A SoftLayer employee can change a ticket's status from Closed to Open or Assigned if the need arises. +// +// +// A ticket usually goes from the Open to Assigned to Closed states during its life cycle. If a ticket is forwarded from one department to another it may change from the Assigned state back to Open until it is assigned to a member of the new department. +type Ticket_Status struct { + Entity + + // A ticket status' internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket status' name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Ticket_Subject data type models one of the possible subjects that a standard support ticket may belong to. A basic support ticket's title matches it's corresponding subject's name. +type Ticket_Subject struct { + Entity + + // no documentation yet + Category *Ticket_Subject_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The subject category id that this ticket subject belongs to. + CategoryId *int `json:"categoryId,omitempty" xmlrpc:"categoryId,omitempty"` + + // A child subject + Children []Ticket_Subject `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of a child subject + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // no documentation yet + Group *Ticket_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A ticket subject's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket subject's name. This name is used for a standard support ticket's title. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A parent subject + Parent *Ticket_Subject `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // Specifies the parent subject id. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` +} + +// SoftLayer_Ticket_Subject_Category groups ticket subjects into logical group. +type Ticket_Subject_Category struct { + Entity + + // A unique identifier of a ticket subject category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket subject category name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + SubjectCount *uint `json:"subjectCount,omitempty" xmlrpc:"subjectCount,omitempty"` + + // no documentation yet + Subjects []Ticket_Subject `json:"subjects,omitempty" xmlrpc:"subjects,omitempty"` +} + +// no documentation yet +type Ticket_Survey struct { + Entity +} + +// no documentation yet +type Ticket_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// The SoftLayer_Ticket_Update type relates to a single update to a ticket, either by a customer or an employee. +type Ticket_Update struct { + Entity + + // no documentation yet + ChangeOwnerActivity *string `json:"changeOwnerActivity,omitempty" xmlrpc:"changeOwnerActivity,omitempty"` + + // The data a ticket update was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The user or SoftLayer employee who created a ticket update. + Editor *User_Interface `json:"editor,omitempty" xmlrpc:"editor,omitempty"` + + // The internal identifier of the SoftLayer portal or API user who created a ticket update. This is only used if a ticket update's ''editorType'' property is "USER". + EditorId *int `json:"editorId,omitempty" xmlrpc:"editorId,omitempty"` + + // The type user who created a ticket update. This is either "USER" for an update created by a SoftLayer portal or API user, "EMPLOYEE" for an update created by a SoftLayer employee, or "AUTO" if a ticket update was generated automatically by SoftLayer's backend systems. + EditorType *string `json:"editorType,omitempty" xmlrpc:"editorType,omitempty"` + + // The contents of a ticket update. + Entry *string `json:"entry,omitempty" xmlrpc:"entry,omitempty"` + + // The files attached to a ticket update. + FileAttachment []Ticket_Attachment_File `json:"fileAttachment,omitempty" xmlrpc:"fileAttachment,omitempty"` + + // A count of the files attached to a ticket update. + FileAttachmentCount *uint `json:"fileAttachmentCount,omitempty" xmlrpc:"fileAttachmentCount,omitempty"` + + // A ticket update's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The ticket that a ticket update belongs to. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // The internal identifier of the ticket that a ticket update belongs to. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The Type of update to this ticket + Type *Ticket_Update_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// A SoftLayer_Ticket_Update_Agent type models an update to a ticket made by an agent. +type Ticket_Update_Agent struct { + Ticket_Update +} + +// A SoftLayer_Ticket_Update_Chat is a chat between a customer and a customer service representative relating to a ticket. +type Ticket_Update_Chat struct { + Ticket_Update + + // The chat between the Customer and Agent + Chat *Ticket_Chat_Liveperson `json:"chat,omitempty" xmlrpc:"chat,omitempty"` +} + +// A SoftLayer_Ticket_Update_Customer is a single update made by a customer to a ticket. +type Ticket_Update_Customer struct { + Ticket_Update +} + +// The SoftLayer_Ticket_Update_Employee data type models an update to a ticket made by a SoftLayer employee. +type Ticket_Update_Employee struct { + Ticket_Update + + // A ticket update's response rating. Ticket updates posted by SoftLayer employees have the option of earning a rating from SoftLayer's customers. Ratings are based on a 1 - 5 scale, with one being a poor rating while 5 is a very high rating. This is only used if a ticket update's ''editorType'' property is "EMPLOYEE". + ResponseRating *int `json:"responseRating,omitempty" xmlrpc:"responseRating,omitempty"` +} + +// no documentation yet +type Ticket_Update_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Ticket *Ticket_Update `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/user.go b/vendor/github.com/softlayer/softlayer-go/datatypes/user.go new file mode 100644 index 000000000000..27253784a72c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/user.go @@ -0,0 +1,1450 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// This class represents a login/logout sheet for facility visitors. +type User_Access_Facility_Log struct { + Entity + + // This is the account associated with the log entry. For users under a customer's account, it is the customer's account. For contractors and others visiting a colocation area, it is the account associated with the area they visited. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // This is the account associated with a log record. For a customer logging into a datacenter, this is the customer's account. For a contractor or any other guest logging into a customer's cabinet or colocation cage, this is the customer's account. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // This is the location of the facility. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // This is a short description of why the person is at the location. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // This is the colocation hardware that was visited. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // This is the type of person entering the facility. + LogType *User_Access_Facility_Log_Type `json:"logType,omitempty" xmlrpc:"logType,omitempty"` + + // This is the date and time the person arrived. + TimeIn *Time `json:"timeIn,omitempty" xmlrpc:"timeIn,omitempty"` + + // no documentation yet + TimeOut *Time `json:"timeOut,omitempty" xmlrpc:"timeOut,omitempty"` + + // no documentation yet + Visitor *Entity `json:"visitor,omitempty" xmlrpc:"visitor,omitempty"` +} + +// no documentation yet +type User_Access_Facility_Log_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This class represents a facility visitor that is not an active employee or customer. +type User_Access_Facility_Visitor struct { + Entity + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + VisitorType *User_Access_Facility_Visitor_Type `json:"visitorType,omitempty" xmlrpc:"visitorType,omitempty"` +} + +// no documentation yet +type User_Access_Facility_Visitor_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_Customer data type contains general information relating to a single SoftLayer customer portal user. Personal information in this type such as names, addresses, and phone numbers are not necessarily associated with the customer account the user is assigned to. +type User_Customer struct { + User_Interface + + // The customer account that a user belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A portal user's associated [[SoftLayer_Account|customer account]] id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // no documentation yet + Actions []User_Permission_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // A count of a portal user's additional email addresses. These email addresses are contacted when updates are made to support tickets. + AdditionalEmailCount *uint `json:"additionalEmailCount,omitempty" xmlrpc:"additionalEmailCount,omitempty"` + + // A portal user's additional email addresses. These email addresses are contacted when updates are made to support tickets. + AdditionalEmails []User_Customer_AdditionalEmail `json:"additionalEmails,omitempty" xmlrpc:"additionalEmails,omitempty"` + + // The first line of the mailing address belonging to a portal user. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line of the mailing address belonging to a portal user. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // A portal user's AOL Instant Messenger screen name. + Aim *string `json:"aim,omitempty" xmlrpc:"aim,omitempty"` + + // A portal user's secondary phone number. + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // A count of a portal user's API Authentication keys. There is a max limit of two API keys per user. + ApiAuthenticationKeyCount *uint `json:"apiAuthenticationKeyCount,omitempty" xmlrpc:"apiAuthenticationKeyCount,omitempty"` + + // A portal user's API Authentication keys. There is a max limit of two API keys per user. + ApiAuthenticationKeys []User_Customer_ApiAuthentication `json:"apiAuthenticationKeys,omitempty" xmlrpc:"apiAuthenticationKeys,omitempty"` + + // The authentication token used for logging into the SoftLayer customer portal. + AuthenticationToken *Container_User_Authentication_Token `json:"authenticationToken,omitempty" xmlrpc:"authenticationToken,omitempty"` + + // A count of the CDN accounts associated with a portal user. + CdnAccountCount *uint `json:"cdnAccountCount,omitempty" xmlrpc:"cdnAccountCount,omitempty"` + + // The CDN accounts associated with a portal user. + CdnAccounts []Network_ContentDelivery_Account `json:"cdnAccounts,omitempty" xmlrpc:"cdnAccounts,omitempty"` + + // A count of a portal user's child users. Some portal users may not have child users. + ChildUserCount *uint `json:"childUserCount,omitempty" xmlrpc:"childUserCount,omitempty"` + + // A portal user's child users. Some portal users may not have child users. + ChildUsers []User_Customer `json:"childUsers,omitempty" xmlrpc:"childUsers,omitempty"` + + // The city of the mailing address belonging to a portal user. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // A count of an user's associated closed tickets. + ClosedTicketCount *uint `json:"closedTicketCount,omitempty" xmlrpc:"closedTicketCount,omitempty"` + + // An user's associated closed tickets. + ClosedTickets []Ticket `json:"closedTickets,omitempty" xmlrpc:"closedTickets,omitempty"` + + // A portal user's associated company. This may not be the same company as the customer that owns this portal user. + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // A two-letter abbreviation of the country in the mailing address belonging to a portal user. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The date a portal user's record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Whether a portal user's time zone is affected by Daylight Savings Time. + DaylightSavingsTimeFlag *bool `json:"daylightSavingsTimeFlag,omitempty" xmlrpc:"daylightSavingsTimeFlag,omitempty"` + + // A count of the dedicated hosts to which the user has been granted access. + DedicatedHostCount *uint `json:"dedicatedHostCount,omitempty" xmlrpc:"dedicatedHostCount,omitempty"` + + // The dedicated hosts to which the user has been granted access. + DedicatedHosts []Virtual_DedicatedHost `json:"dedicatedHosts,omitempty" xmlrpc:"dedicatedHosts,omitempty"` + + // Flag used to deny access to all hardware and cloud computing instances upon user creation. + DenyAllResourceAccessOnCreateFlag *bool `json:"denyAllResourceAccessOnCreateFlag,omitempty" xmlrpc:"denyAllResourceAccessOnCreateFlag,omitempty"` + + // no documentation yet + DisplayName *string `json:"displayName,omitempty" xmlrpc:"displayName,omitempty"` + + // A portal user's email address. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // A count of the external authentication bindings that link an external identifier to a SoftLayer user. + ExternalBindingCount *uint `json:"externalBindingCount,omitempty" xmlrpc:"externalBindingCount,omitempty"` + + // The external authentication bindings that link an external identifier to a SoftLayer user. + ExternalBindings []User_External_Binding `json:"externalBindings,omitempty" xmlrpc:"externalBindings,omitempty"` + + // A portal user's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // A user's password for the SoftLayer forums, hashed for auto-login capability from the SoftLayer customer portal + ForumPasswordHash *string `json:"forumPasswordHash,omitempty" xmlrpc:"forumPasswordHash,omitempty"` + + // A portal user's accessible hardware. These permissions control which hardware a user has access to in the SoftLayer customer portal. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of a portal user's accessible hardware. These permissions control which hardware a user has access to in the SoftLayer customer portal. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // A count of hardware notifications associated with this user. A hardware notification links a user to a piece of hardware, and that user will be notified if any monitors on that hardware fail, if the monitors have a status of 'Notify User'. + HardwareNotificationCount *uint `json:"hardwareNotificationCount,omitempty" xmlrpc:"hardwareNotificationCount,omitempty"` + + // Hardware notifications associated with this user. A hardware notification links a user to a piece of hardware, and that user will be notified if any monitors on that hardware fail, if the monitors have a status of 'Notify User'. + HardwareNotifications []User_Customer_Notification_Hardware `json:"hardwareNotifications,omitempty" xmlrpc:"hardwareNotifications,omitempty"` + + // Whether or not a user has acknowledged the support policy. + HasAcknowledgedSupportPolicyFlag *bool `json:"hasAcknowledgedSupportPolicyFlag,omitempty" xmlrpc:"hasAcknowledgedSupportPolicyFlag,omitempty"` + + // Permission granting the user access to all Dedicated Host devices on the account. + HasFullDedicatedHostAccessFlag *bool `json:"hasFullDedicatedHostAccessFlag,omitempty" xmlrpc:"hasFullDedicatedHostAccessFlag,omitempty"` + + // Whether or not a portal user has access to all hardware on their account. + HasFullHardwareAccessFlag *bool `json:"hasFullHardwareAccessFlag,omitempty" xmlrpc:"hasFullHardwareAccessFlag,omitempty"` + + // Whether or not a portal user has access to all hardware on their account. + HasFullVirtualGuestAccessFlag *bool `json:"hasFullVirtualGuestAccessFlag,omitempty" xmlrpc:"hasFullVirtualGuestAccessFlag,omitempty"` + + // A portal user's ICQ UIN. + Icq *string `json:"icq,omitempty" xmlrpc:"icq,omitempty"` + + // A portal user's internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP addresses or IP ranges from which a user may login to the SoftLayer customer portal. Specify subnets in CIDR format and separate multiple addresses and subnets by commas. You may combine IPv4 and IPv6 addresses and subnets, for example: 192.168.0.0/16,fe80:021b::0/64. + IpAddressRestriction *string `json:"ipAddressRestriction,omitempty" xmlrpc:"ipAddressRestriction,omitempty"` + + // no documentation yet + IsMasterUserFlag *bool `json:"isMasterUserFlag,omitempty" xmlrpc:"isMasterUserFlag,omitempty"` + + // A portal user's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // A count of + LayoutProfileCount *uint `json:"layoutProfileCount,omitempty" xmlrpc:"layoutProfileCount,omitempty"` + + // no documentation yet + LayoutProfiles []Layout_Profile `json:"layoutProfiles,omitempty" xmlrpc:"layoutProfiles,omitempty"` + + // The linked account integration mode + LinkedAccountIntegrationMode *string `json:"linkedAccountIntegrationMode,omitempty" xmlrpc:"linkedAccountIntegrationMode,omitempty"` + + // A user's locale. Locale holds user's language and region information. + Locale *Locale `json:"locale,omitempty" xmlrpc:"locale,omitempty"` + + // A portal user's associated [[SoftLayer_Locale|locale]] id. + LocaleId *int `json:"localeId,omitempty" xmlrpc:"localeId,omitempty"` + + // A count of a user's attempts to log into the SoftLayer customer portal. + LoginAttemptCount *uint `json:"loginAttemptCount,omitempty" xmlrpc:"loginAttemptCount,omitempty"` + + // A user's attempts to log into the SoftLayer customer portal. + LoginAttempts []User_Customer_Access_Authentication `json:"loginAttempts,omitempty" xmlrpc:"loginAttempts,omitempty"` + + // Determines if this portal user is managed by SAML federation. + ManagedByFederationFlag *bool `json:"managedByFederationFlag,omitempty" xmlrpc:"managedByFederationFlag,omitempty"` + + // Determines if this portal user is managed by IBMid federation. + ManagedByOpenIdConnectFlag *bool `json:"managedByOpenIdConnectFlag,omitempty" xmlrpc:"managedByOpenIdConnectFlag,omitempty"` + + // A count of a portal user's associated mobile device profiles. + MobileDeviceCount *uint `json:"mobileDeviceCount,omitempty" xmlrpc:"mobileDeviceCount,omitempty"` + + // A portal user's associated mobile device profiles. + MobileDevices []User_Customer_MobileDevice `json:"mobileDevices,omitempty" xmlrpc:"mobileDevices,omitempty"` + + // The date a portal user's record was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A portal user's MSN address. + Msn *string `json:"msn,omitempty" xmlrpc:"msn,omitempty"` + + // no documentation yet + NameId *string `json:"nameId,omitempty" xmlrpc:"nameId,omitempty"` + + // A count of notification subscription records for the user. + NotificationSubscriberCount *uint `json:"notificationSubscriberCount,omitempty" xmlrpc:"notificationSubscriberCount,omitempty"` + + // Notification subscription records for the user. + NotificationSubscribers []Notification_Subscriber `json:"notificationSubscribers,omitempty" xmlrpc:"notificationSubscribers,omitempty"` + + // A portal user's office phone number. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // The BlueID username associated to with this user, if the account is managed by OpenIDConnect / BlueID federation + OpenIdConnectUserName *string `json:"openIdConnectUserName,omitempty" xmlrpc:"openIdConnectUserName,omitempty"` + + // A count of an user's associated open tickets. + OpenTicketCount *uint `json:"openTicketCount,omitempty" xmlrpc:"openTicketCount,omitempty"` + + // An user's associated open tickets. + OpenTickets []Ticket `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // A count of a portal user's vpn accessible subnets. + OverrideCount *uint `json:"overrideCount,omitempty" xmlrpc:"overrideCount,omitempty"` + + // A portal user's vpn accessible subnets. + Overrides []Network_Service_Vpn_Overrides `json:"overrides,omitempty" xmlrpc:"overrides,omitempty"` + + // A portal user's parent user. If a SoftLayer_User_Customer has a null parentId property then it doesn't have a parent user. + Parent *User_Customer `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // A portal user's parent user. Id a users parentId is ''null'' then it doesn't have a parent user in the customer portal. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // The expiration date for the user's password + PasswordExpireDate *Time `json:"passwordExpireDate,omitempty" xmlrpc:"passwordExpireDate,omitempty"` + + // A count of a portal user's permissions. These permissions control that user's access to functions within the SoftLayer customer portal and API. + PermissionCount *uint `json:"permissionCount,omitempty" xmlrpc:"permissionCount,omitempty"` + + // no documentation yet + PermissionSystemVersion *int `json:"permissionSystemVersion,omitempty" xmlrpc:"permissionSystemVersion,omitempty"` + + // A portal user's permissions. These permissions control that user's access to functions within the SoftLayer customer portal and API. + Permissions []User_Customer_CustomerPermission_Permission `json:"permissions,omitempty" xmlrpc:"permissions,omitempty"` + + // The postal code of the mailing address belonging to an portal user. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Whether a portal user may connect to the SoftLayer private network via PPTP VPN or not. + PptpVpnAllowedFlag *bool `json:"pptpVpnAllowedFlag,omitempty" xmlrpc:"pptpVpnAllowedFlag,omitempty"` + + // A count of + PreferenceCount *uint `json:"preferenceCount,omitempty" xmlrpc:"preferenceCount,omitempty"` + + // no documentation yet + Preferences []User_Preference `json:"preferences,omitempty" xmlrpc:"preferences,omitempty"` + + // A count of + RoleCount *uint `json:"roleCount,omitempty" xmlrpc:"roleCount,omitempty"` + + // no documentation yet + Roles []User_Permission_Role `json:"roles,omitempty" xmlrpc:"roles,omitempty"` + + // no documentation yet + SalesforceUserLink *User_Customer_Link `json:"salesforceUserLink,omitempty" xmlrpc:"salesforceUserLink,omitempty"` + + // no documentation yet + SavedId *string `json:"savedId,omitempty" xmlrpc:"savedId,omitempty"` + + // Whether a user may change their security options (IP restriction, password expiration, or enforce security questions on login) which were pre-selected by their account's master user. + SecondaryLoginManagementFlag *bool `json:"secondaryLoginManagementFlag,omitempty" xmlrpc:"secondaryLoginManagementFlag,omitempty"` + + // Whether a user is required to answer a security question when logging into the SoftLayer customer portal. + SecondaryLoginRequiredFlag *bool `json:"secondaryLoginRequiredFlag,omitempty" xmlrpc:"secondaryLoginRequiredFlag,omitempty"` + + // The date when a user's password was last updated. + SecondaryPasswordModifyDate *Time `json:"secondaryPasswordModifyDate,omitempty" xmlrpc:"secondaryPasswordModifyDate,omitempty"` + + // The number of days for which a user's password is active. + SecondaryPasswordTimeoutDays *int `json:"secondaryPasswordTimeoutDays,omitempty" xmlrpc:"secondaryPasswordTimeoutDays,omitempty"` + + // A count of a portal user's security question answers. Some portal users may not have security answers or may not be configured to require answering a security question on login. + SecurityAnswerCount *uint `json:"securityAnswerCount,omitempty" xmlrpc:"securityAnswerCount,omitempty"` + + // A portal user's security question answers. Some portal users may not have security answers or may not be configured to require answering a security question on login. + SecurityAnswers []User_Customer_Security_Answer `json:"securityAnswers,omitempty" xmlrpc:"securityAnswers,omitempty"` + + // A phone number that can receive SMS text messages for this portal user. + Sms *string `json:"sms,omitempty" xmlrpc:"sms,omitempty"` + + // Whether a portal user may connect to the SoftLayer private network via SSL VPN or not. + SslVpnAllowedFlag *bool `json:"sslVpnAllowedFlag,omitempty" xmlrpc:"sslVpnAllowedFlag,omitempty"` + + // A two-letter abbreviation of the state in the mailing address belonging to a portal user. If a user does not reside in a province then this is typically blank. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // The date a portal users record's last status change. + StatusDate *Time `json:"statusDate,omitempty" xmlrpc:"statusDate,omitempty"` + + // A count of a user's notification subscription records. + SubscriberCount *uint `json:"subscriberCount,omitempty" xmlrpc:"subscriberCount,omitempty"` + + // A user's notification subscription records. + Subscribers []Notification_User_Subscriber `json:"subscribers,omitempty" xmlrpc:"subscribers,omitempty"` + + // A count of a user's successful attempts to log into the SoftLayer customer portal. + SuccessfulLoginCount *uint `json:"successfulLoginCount,omitempty" xmlrpc:"successfulLoginCount,omitempty"` + + // A user's successful attempts to log into the SoftLayer customer portal. + SuccessfulLogins []User_Customer_Access_Authentication `json:"successfulLogins,omitempty" xmlrpc:"successfulLogins,omitempty"` + + // Whether or not a user is required to acknowledge the support policy for portal access. + SupportPolicyAcknowledgementRequiredFlag *int `json:"supportPolicyAcknowledgementRequiredFlag,omitempty" xmlrpc:"supportPolicyAcknowledgementRequiredFlag,omitempty"` + + // A count of the surveys that a user has taken in the SoftLayer customer portal. + SurveyCount *uint `json:"surveyCount,omitempty" xmlrpc:"surveyCount,omitempty"` + + // Whether or not a user must take a brief survey the next time they log into the SoftLayer customer portal. + SurveyRequiredFlag *bool `json:"surveyRequiredFlag,omitempty" xmlrpc:"surveyRequiredFlag,omitempty"` + + // The surveys that a user has taken in the SoftLayer customer portal. + Surveys []Survey `json:"surveys,omitempty" xmlrpc:"surveys,omitempty"` + + // A count of an user's associated tickets. + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // An user's associated tickets. + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` + + // A portal user's time zone. + Timezone *Locale_Timezone `json:"timezone,omitempty" xmlrpc:"timezone,omitempty"` + + // A portal user's time zone. + TimezoneId *int `json:"timezoneId,omitempty" xmlrpc:"timezoneId,omitempty"` + + // A count of a user's unsuccessful attempts to log into the SoftLayer customer portal. + UnsuccessfulLoginCount *uint `json:"unsuccessfulLoginCount,omitempty" xmlrpc:"unsuccessfulLoginCount,omitempty"` + + // A user's unsuccessful attempts to log into the SoftLayer customer portal. + UnsuccessfulLogins []User_Customer_Access_Authentication `json:"unsuccessfulLogins,omitempty" xmlrpc:"unsuccessfulLogins,omitempty"` + + // A count of + UserLinkCount *uint `json:"userLinkCount,omitempty" xmlrpc:"userLinkCount,omitempty"` + + // no documentation yet + UserLinks []User_Customer_Link `json:"userLinks,omitempty" xmlrpc:"userLinks,omitempty"` + + // A portal user's status, which controls overall access to the SoftLayer customer portal and VPN access to the private network. + UserStatus *User_Customer_Status `json:"userStatus,omitempty" xmlrpc:"userStatus,omitempty"` + + // A number reflecting the state of a portal user. + UserStatusId *int `json:"userStatusId,omitempty" xmlrpc:"userStatusId,omitempty"` + + // A portal user's username. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // The verification code from Bluemix BSS to save in the invitation + VerificationCode *string `json:"verificationCode,omitempty" xmlrpc:"verificationCode,omitempty"` + + // A count of a portal user's accessible CloudLayer Computing Instances. These permissions control which CloudLayer Computing Instances a user has access to in the SoftLayer customer portal. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // A portal user's accessible CloudLayer Computing Instances. These permissions control which CloudLayer Computing Instances a user has access to in the SoftLayer customer portal. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` + + // Whether a portal user vpn subnets have been manual configured. + VpnManualConfig *bool `json:"vpnManualConfig,omitempty" xmlrpc:"vpnManualConfig,omitempty"` + + // A portal user's Yahoo! Chat name. + Yahoo *string `json:"yahoo,omitempty" xmlrpc:"yahoo,omitempty"` +} + +// SoftLayer_User_Customer_Access_Authentication models a single attempt to log into the SoftLayer customer portal. A SoftLayer_User_Customer_Access_Authentication record is created every time a user attempts to log into the portal. Use this service to audit your users' portal activity and diagnose potential security breaches of your SoftLayer portal accounts. +// +// Unsuccessful login attempts can be caused by an incorrect password, failing to answer or not answering a login security question if the user has them configured, or attempting to log in from an IP address outside of the user's IP address restriction list. +// +// SoftLayer employees periodically log into our customer portal as users to diagnose portal issues, verify settings and configuration, and to perform maintenance on your account or services. SoftLayer employees only log into customer accounts from the following IP ranges: +// * 2607:f0d0:1000::/48 +// * 2607:f0d0:2000::/48 +// * 2607:f0d0:3000::/48 +// * 66.228.118.67/32 +// * 66.228.118.86/32 +type User_Customer_Access_Authentication struct { + Entity + + // The date of an attempt to log into the SoftLayer customer portal. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The IP address of the user who attempted to log into the SoftLayer customer portal. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // Whether an attempt to log into the SoftLayer customer portal was successful or not. + SuccessFlag *bool `json:"successFlag,omitempty" xmlrpc:"successFlag,omitempty"` + + // The user who has attempted to log into the SoftLayer customer portal. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The internal identifier of the user who attempted to log into the SoftLayer customer portal. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // The username used when attempting to log into the SoftLayer customer portal + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// The SoftLayer_User_Customer_AdditionalEmail data type contains the additional email for use in ticket update notifications. +type User_Customer_AdditionalEmail struct { + Entity + + // Email assigned to user for use in ticket update notifications. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The portal user that owns this additional email address. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // An internal identifier for the portal user who this additional email belongs to. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// The SoftLayer_User_Customer_ApiAuthentication type contains user's authentication key(s). +type User_Customer_ApiAuthentication struct { + Entity + + // The user's authentication key for API access. + AuthenticationKey *string `json:"authenticationKey,omitempty" xmlrpc:"authenticationKey,omitempty"` + + // The user's API authentication identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP addresses or IP ranges from which this user may access the SoftLayer API. Specify subnets in CIDR format and separate multiple addresses and subnets by commas. You may combine IPv4 and IPv6 addresses and subnets, for example: 192.168.0.0/16,fe80:021b::0/64. + IpAddressRestriction *string `json:"ipAddressRestriction,omitempty" xmlrpc:"ipAddressRestriction,omitempty"` + + // The user's authentication key modification date. + TimestampKey *int `json:"timestampKey,omitempty" xmlrpc:"timestampKey,omitempty"` + + // The user who owns the api authentication key. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The user's identifying number. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// Each SoftLayer portal account is assigned a series of permissions that determine what access the user has to functions within the SoftLayer customer portal. This status is reflected in the SoftLayer_User_Customer_Status data type. Permissions differ from user status in that user status applies globally to the portal while user permissions are applied to specific portal functions. +type User_Customer_CustomerPermission_Permission struct { + Entity + + // A user permission's short name. + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // A user permission's key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A user permission's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_Customer_External_Binding data type contains general information for a single external binding. This includes the 3rd party vendor, type of binding, and a unique identifier and password that is used to authenticate against the 3rd party service. +type User_Customer_External_Binding struct { + User_External_Binding + + // The SoftLayer user that the external authentication binding belongs to. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` +} + +// The SoftLayer_User_Customer_External_Binding_Attribute data type contains the value for a single attribute associated with an external binding. External binding attributes contain additional information about an external binding. An attribute can be generic or specific to a 3rd party vendor. For example these attributes relate to Verisign: +// *Credential Type +// *Credential State +// *Credential Expiration Date +// *Credential Last Update Date +type User_Customer_External_Binding_Attribute struct { + User_External_Binding_Attribute +} + +// The SoftLayer_User_Customer_External_Binding_Phone data type contains information about an external binding that uses a phone call, SMS or mobile app for 2 form factor authentication. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal or VPN to authenticate them against a trusted 3rd party, in this case using a mobile phone, mobile phone application or land-line phone. +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Phone struct { + User_Customer_External_Binding + + // The current external binding status. It can be "ACTIVE" or "BLOCKED". + BindingStatus *string `json:"bindingStatus,omitempty" xmlrpc:"bindingStatus,omitempty"` + + // no documentation yet + PinLength *string `json:"pinLength,omitempty" xmlrpc:"pinLength,omitempty"` +} + +// The SoftLayer_User_Customer_External_Binding_Totp data type contains information about a single time-based one time password external binding. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal to authenticate them. +// +// The information provided by this external binding data type includes: +// * The type of credential +// * The current state of the credential +// ** Active +// ** Inactive +// +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Totp struct { + User_Customer_External_Binding +} + +// The SoftLayer_User_Customer_External_Binding_Type data type contains information relating to a type of external authentication binding. It contains a user friendly name as well as a unique key name. +type User_Customer_External_Binding_Type struct { + User_External_Binding_Type +} + +// The SoftLayer_User_Customer_External_Binding_Vendor data type contains information for a single external binding vendor. This information includes a user friendly vendor name, a unique version of the vendor name, and a unique internal identifier that can be used when creating a new external binding. +type User_Customer_External_Binding_Vendor struct { + User_External_Binding_Vendor +} + +// The SoftLayer_User_Customer_External_Binding_Verisign data type contains information about a single VeriSign external binding. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal to authenticate them against a 3rd party, in this case VeriSign. +// +// The information provided by the VeriSign external binding data type includes: +// * The type of credential +// * The current state of the credential +// ** Enabled +// ** Disabled +// ** Locked +// * The credential's expiration date +// * The last time the credential was updated +// +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Verisign struct { + User_Customer_External_Binding + + // The date that a VeriSign credential expires. + CredentialExpirationDate *string `json:"credentialExpirationDate,omitempty" xmlrpc:"credentialExpirationDate,omitempty"` + + // The last time a VeriSign credential was updated. + CredentialLastUpdateDate *string `json:"credentialLastUpdateDate,omitempty" xmlrpc:"credentialLastUpdateDate,omitempty"` + + // The current state of a VeriSign credential. This can be 'Enabled', 'Disabled', or 'Locked'. + CredentialState *string `json:"credentialState,omitempty" xmlrpc:"credentialState,omitempty"` + + // The type of VeriSign credential. This can be either 'Hardware' or 'Software'. + CredentialType *string `json:"credentialType,omitempty" xmlrpc:"credentialType,omitempty"` +} + +// no documentation yet +type User_Customer_Invitation struct { + Entity + + // no documentation yet + Code *string `json:"code,omitempty" xmlrpc:"code,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + CreatorId *int `json:"creatorId,omitempty" xmlrpc:"creatorId,omitempty"` + + // no documentation yet + CreatorType *string `json:"creatorType,omitempty" xmlrpc:"creatorType,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + ExistingBlueIdFlag *int `json:"existingBlueIdFlag,omitempty" xmlrpc:"existingBlueIdFlag,omitempty"` + + // no documentation yet + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // no documentation yet + IbmIdUsername *string `json:"ibmIdUsername,omitempty" xmlrpc:"ibmIdUsername,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IsFederatedEmailDomainFlag *int `json:"isFederatedEmailDomainFlag,omitempty" xmlrpc:"isFederatedEmailDomainFlag,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + ResponseDate *Time `json:"responseDate,omitempty" xmlrpc:"responseDate,omitempty"` + + // no documentation yet + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type User_Customer_Link struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultFlag *int `json:"defaultFlag,omitempty" xmlrpc:"defaultFlag,omitempty"` + + // no documentation yet + DestinationUserAlphanumericId *string `json:"destinationUserAlphanumericId,omitempty" xmlrpc:"destinationUserAlphanumericId,omitempty"` + + // no documentation yet + DestinationUserId *int `json:"destinationUserId,omitempty" xmlrpc:"destinationUserId,omitempty"` + + // no documentation yet + IbmIdUniqueIdentifier *string `json:"ibmIdUniqueIdentifier,omitempty" xmlrpc:"ibmIdUniqueIdentifier,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type User_Customer_Link_ThePlanet struct { + User_Customer_Link +} + +// no documentation yet +type User_Customer_Link_VerifiedIBMidLinkCollection struct { + Entity + + // no documentation yet + BadLinksDifferentIUI []User_Customer_Link `json:"badLinksDifferentIUI,omitempty" xmlrpc:"badLinksDifferentIUI,omitempty"` + + // no documentation yet + BadLinksDifferentUsername []User_Customer_Link `json:"badLinksDifferentUsername,omitempty" xmlrpc:"badLinksDifferentUsername,omitempty"` + + // no documentation yet + GoodLinks []User_Customer_Link `json:"goodLinks,omitempty" xmlrpc:"goodLinks,omitempty"` +} + +// This class represents a mobile device belonging to a user. The device can be a phone, tablet, or possibly even some Android based net books. The purpose is to tie just enough info with the device and the user to enable push notifications through non-softlayer entities (Google, Apple, RIM). +type User_Customer_MobileDevice struct { + Entity + + // A count of notification subscriptions available to a mobile device. + AvailablePushNotificationSubscriptionCount *uint `json:"availablePushNotificationSubscriptionCount,omitempty" xmlrpc:"availablePushNotificationSubscriptionCount,omitempty"` + + // Notification subscriptions available to a mobile device. + AvailablePushNotificationSubscriptions []Notification `json:"availablePushNotificationSubscriptions,omitempty" xmlrpc:"availablePushNotificationSubscriptions,omitempty"` + + // Created date for the record. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The user this mobile device belongs to. + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // The device resolution formatted width x height + DisplayResolutionXxY *string `json:"displayResolutionXxY,omitempty" xmlrpc:"displayResolutionXxY,omitempty"` + + // Record Identifier + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Device type identifier. + MobileDeviceTypeId *int `json:"mobileDeviceTypeId,omitempty" xmlrpc:"mobileDeviceTypeId,omitempty"` + + // Mobile OS identifier. + MobileOperatingSystemId *int `json:"mobileOperatingSystemId,omitempty" xmlrpc:"mobileOperatingSystemId,omitempty"` + + // Device model number + ModelNumber *string `json:"modelNumber,omitempty" xmlrpc:"modelNumber,omitempty"` + + // Last modify date for the record. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the device the user is using. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The operating system this device is using + OperatingSystem *User_Customer_MobileDevice_OperatingSystem `json:"operatingSystem,omitempty" xmlrpc:"operatingSystem,omitempty"` + + // Device phone number + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // A count of notification subscriptions attached to a mobile device. + PushNotificationSubscriptionCount *uint `json:"pushNotificationSubscriptionCount,omitempty" xmlrpc:"pushNotificationSubscriptionCount,omitempty"` + + // Notification subscriptions attached to a mobile device. + PushNotificationSubscriptions []Notification_User_Subscriber `json:"pushNotificationSubscriptions,omitempty" xmlrpc:"pushNotificationSubscriptions,omitempty"` + + // Device serial number + SerialNumber *string `json:"serialNumber,omitempty" xmlrpc:"serialNumber,omitempty"` + + // The token that is provided by the mobile device. + Token *string `json:"token,omitempty" xmlrpc:"token,omitempty"` + + // The type of device this user is using + Type *User_Customer_MobileDevice_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // User Identifier + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// This class represents the mobile operating system installed on a user's registered mobile device. It assists us when determining the how to get a push notification to the user. +type User_Customer_MobileDevice_OperatingSystem struct { + Entity + + // Build revision number of the operating system. + BuildVersion *int `json:"buildVersion,omitempty" xmlrpc:"buildVersion,omitempty"` + + // Create date of the record. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Description of the mobile operating system.. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Indentifier for the record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Major revision number of the operating system. + MajorVersion *int `json:"majorVersion,omitempty" xmlrpc:"majorVersion,omitempty"` + + // Minor revision number of the operating system. + MinorVersion *int `json:"minorVersion,omitempty" xmlrpc:"minorVersion,omitempty"` + + // Modify date of the record. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Name of the mobile operating system. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Describes a supported class of mobile device. In this the word class is used in the context of classes of consumer electronic devices, the two most prominent examples being mobile phones and tablets. +type User_Customer_MobileDevice_Type struct { + Entity + + // Record create date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A description of the device + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Indentifier for record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Last modify date for record. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The common name of the device. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The Customer_Notification_Hardware object stores links between customers and the hardware devices they wish to monitor. This link is not enough, the user must be sure to also create SoftLayer_Network_Monitor_Version1_Query_Host instance with the response action set to "notify users" in order for the users linked to that hardware object to be notified on failure. +type User_Customer_Notification_Hardware struct { + Entity + + // The hardware object that will be monitored. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The ID of the Hardware object that is to be monitored. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The user that will be notified when the associated hardware object fails a monitoring instance. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The ID of the SoftLayer_User_Customer object that represents the user to be notified on monitoring failure. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// The SoftLayer_User_Customer_Notification_Virtual_Guest object stores links between customers and the virtual guests they wish to monitor. This link is not enough, the user must be sure to also create SoftLayer_Network_Monitor_Version1_Query_Host instance with the response action set to "notify users" in order for the users linked to that hardware object to be notified on failure. +type User_Customer_Notification_Virtual_Guest struct { + Entity + + // The virtual guest object that will be monitored. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The ID of the virtual guest object that is to be monitored. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The user that will be notified when the associated virtual guest object fails a monitoring instance. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The ID of the SoftLayer_User_Customer object that represents the user to be notified on monitoring failure. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type User_Customer_OpenIdConnect struct { + User_Customer +} + +// no documentation yet +type User_Customer_Prospect struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A count of + AssignedEmployeeCount *uint `json:"assignedEmployeeCount,omitempty" xmlrpc:"assignedEmployeeCount,omitempty"` + + // no documentation yet + AssignedEmployees []User_Employee `json:"assignedEmployees,omitempty" xmlrpc:"assignedEmployees,omitempty"` + + // A count of + QuoteCount *uint `json:"quoteCount,omitempty" xmlrpc:"quoteCount,omitempty"` + + // no documentation yet + Quotes []Billing_Order_Quote `json:"quotes,omitempty" xmlrpc:"quotes,omitempty"` + + // no documentation yet + Type *User_Customer_Prospect_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// Contains user information for Service Provider Enrollment. +type User_Customer_Prospect_ServiceProvider_EnrollRequest struct { + Entity + + // Flag indicating whether or not applicant has accepted all current SSP agreements. + AcceptAllAgreementsFlag *bool `json:"acceptAllAgreementsFlag,omitempty" xmlrpc:"acceptAllAgreementsFlag,omitempty"` + + // accountId of existing SoftLayer Customer + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Service provider address1 + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Service provider address2 + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // Credit card account number + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // Credit card expiration month + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // Credit card expiration year + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // Type of credit card being used + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // Credit card verification number + CardVerificationNumber *string `json:"cardVerificationNumber,omitempty" xmlrpc:"cardVerificationNumber,omitempty"` + + // Service provider city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Service provider company name + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // Catalyst company types. + CompanyType *Catalyst_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // Id of the company type which best describes applicant's company + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // Service provider company url + CompanyUrl *string `json:"companyUrl,omitempty" xmlrpc:"companyUrl,omitempty"` + + // Service provider contact's email + ContactEmail *string `json:"contactEmail,omitempty" xmlrpc:"contactEmail,omitempty"` + + // Service provider contact's first name + ContactFirstName *string `json:"contactFirstName,omitempty" xmlrpc:"contactFirstName,omitempty"` + + // Service provider contact's last name + ContactLastName *string `json:"contactLastName,omitempty" xmlrpc:"contactLastName,omitempty"` + + // Service provider contact's Phone + ContactPhone *string `json:"contactPhone,omitempty" xmlrpc:"contactPhone,omitempty"` + + // Service provider country + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Customer Prospect id + CustomerProspectId *int `json:"customerProspectId,omitempty" xmlrpc:"customerProspectId,omitempty"` + + // Id of the device fingerprint + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // Service provider email + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Indicates if customer has an existing SoftLayer account + ExistingCustomerFlag *bool `json:"existingCustomerFlag,omitempty" xmlrpc:"existingCustomerFlag,omitempty"` + + // Service provider first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // IBM partner world id + IbmPartnerWorldId *string `json:"ibmPartnerWorldId,omitempty" xmlrpc:"ibmPartnerWorldId,omitempty"` + + // Indicates if the customer is IBM partner world member + IbmPartnerWorldMemberFlag *bool `json:"ibmPartnerWorldMemberFlag,omitempty" xmlrpc:"ibmPartnerWorldMemberFlag,omitempty"` + + // Service provider last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Flag indicating whether or not applicant acknowledged MSA + MasterAgreementCompleteFlag *bool `json:"masterAgreementCompleteFlag,omitempty" xmlrpc:"masterAgreementCompleteFlag,omitempty"` + + // Service provider office phone + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // Service provider postalCode + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Flag indicating whether or not applicant acknowledged service provider addendum + ServiceProviderAddendumFlag *bool `json:"serviceProviderAddendumFlag,omitempty" xmlrpc:"serviceProviderAddendumFlag,omitempty"` + + // Service provider state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // Survey responses + SurveyResponses []Survey_Response `json:"surveyResponses,omitempty" xmlrpc:"surveyResponses,omitempty"` + + // Applicant's VAT id, if one exists + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// no documentation yet +type User_Customer_Prospect_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_Customer_Security_Answer type contains user's answers to security questions. +type User_Customer_Security_Answer struct { + Entity + + // A user's answer. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // A user's answer identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The question the security answer is associated with. + Question *User_Security_Question `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // A user's question identifying number. + QuestionId *int `json:"questionId,omitempty" xmlrpc:"questionId,omitempty"` + + // The user who the security answer belongs to. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // A user's identifying number. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// Each SoftLayer portal account is assigned a status code that determines how it's treated in the customer portal. This status is reflected in the SoftLayer_User_Customer_Status data type. Status differs from user permissions in that user status applies globally to the portal while user permissions are applied to specific portal functions. +type User_Customer_Status struct { + Entity + + // A user's status identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A user's status keyname + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A user's status. This can be either "Active" for user accounts with portal access, "Inactive" for users disabled by another portal user, "Disabled" for accounts turned off by SoftLayer, or "VPN Only" for user accounts with no access to the customer portal but VPN access to the private network. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A SoftLayer_User_Employee models a single SoftLayer employee for the purposes of ticket updates created by SoftLayer employees. SoftLayer portal and API users cannot see individual employee names in ticket responses. SoftLayer employees can be assigned to customer accounts as a personal support representative. Employee names and email will be available if an employee is assigned to the account. +type User_Employee struct { + User_Interface + + // A count of + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // no documentation yet + Actions []User_Permission_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // no documentation yet + ChatTranscript []Ticket_Chat `json:"chatTranscript,omitempty" xmlrpc:"chatTranscript,omitempty"` + + // A count of + ChatTranscriptCount *uint `json:"chatTranscriptCount,omitempty" xmlrpc:"chatTranscriptCount,omitempty"` + + // no documentation yet + DisplayName *string `json:"displayName,omitempty" xmlrpc:"displayName,omitempty"` + + // A SoftLayer employee's email address. Email addresses are only visible to [[SoftLayer_Account|SoftLayer Accounts]] that are assigned to an employee + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The department that a SoftLayer employee belongs to. + EmployeeDepartment *User_Employee_Department `json:"employeeDepartment,omitempty" xmlrpc:"employeeDepartment,omitempty"` + + // A SoftLayer employee's [[SoftLayer_User_Employee_Department|department]] id. + EmployeeDepartmentId *int `json:"employeeDepartmentId,omitempty" xmlrpc:"employeeDepartmentId,omitempty"` + + // A SoftLayer employee's first name. First names are only visible to [[SoftLayer_Account|SoftLayer Accounts]] that are assigned to an employee + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // A SoftLayer employee's last name. Last names are only visible to [[SoftLayer_Account|SoftLayer Accounts]] that are assigned to an employee + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // A count of + LayoutProfileCount *uint `json:"layoutProfileCount,omitempty" xmlrpc:"layoutProfileCount,omitempty"` + + // no documentation yet + LayoutProfiles []Layout_Profile `json:"layoutProfiles,omitempty" xmlrpc:"layoutProfiles,omitempty"` + + // no documentation yet + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // no documentation yet + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // A count of + RoleCount *uint `json:"roleCount,omitempty" xmlrpc:"roleCount,omitempty"` + + // no documentation yet + Roles []User_Permission_Role `json:"roles,omitempty" xmlrpc:"roles,omitempty"` + + // A count of + SecurityLevelCount *uint `json:"securityLevelCount,omitempty" xmlrpc:"securityLevelCount,omitempty"` + + // no documentation yet + SecurityLevels []Security_Level `json:"securityLevels,omitempty" xmlrpc:"securityLevels,omitempty"` + + // no documentation yet + TicketActivities []Ticket_Activity `json:"ticketActivities,omitempty" xmlrpc:"ticketActivities,omitempty"` + + // A count of + TicketActivityCount *uint `json:"ticketActivityCount,omitempty" xmlrpc:"ticketActivityCount,omitempty"` + + // A count of + TicketAttachmentReferenceCount *uint `json:"ticketAttachmentReferenceCount,omitempty" xmlrpc:"ticketAttachmentReferenceCount,omitempty"` + + // no documentation yet + TicketAttachmentReferences []Ticket_Attachment `json:"ticketAttachmentReferences,omitempty" xmlrpc:"ticketAttachmentReferences,omitempty"` + + // A representation of a SoftLayer employee's username. In all cases this should simply state "Employee". + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// SoftLayer_User_Employee_Department models a department within SoftLayer's internal employee hierarchy. Common departments include Support, Sales, Accounting, Development, Systems, and Networking. +type User_Employee_Department struct { + Entity + + // The name of one of SoftLayer's employee departments. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_External_Binding data type contains general information for a single external binding. This includes the 3rd party vendor, type of binding, and a unique identifier and password that is used to authenticate against the 3rd party service. +type User_External_Binding struct { + Entity + + // The flag that determines whether the external binding is active will be used for authentication or not. + Active *bool `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // A count of attributes of an external authentication binding. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // Attributes of an external authentication binding. + Attributes []User_External_Binding_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // Information regarding the billing item for external authentication. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The date that the external authentication binding was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The identifier used to identify this binding to an external authentication source. + ExternalId *string `json:"externalId,omitempty" xmlrpc:"externalId,omitempty"` + + // An external authentication binding's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An optional note for identifying the external binding. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The password used to authenticate the external id at an external authentication source. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The type of external authentication binding. + Type *User_External_Binding_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The [[SoftLayer_User_External_Binding_Type|type]] identifier of an external authentication binding. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // An external authentication binding's associated [[SoftLayer_User_Customer|user account]] id. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // The vendor of an external authentication binding. + Vendor *User_External_Binding_Vendor `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` + + // The [[SoftLayer_User_External_Binding_Vendor|vendor]] identifier of an external authentication binding. + VendorId *int `json:"vendorId,omitempty" xmlrpc:"vendorId,omitempty"` +} + +// The SoftLayer_User_External_Binding_Attribute data type contains the value for a single attribute associated with an external binding. External binding attributes contain additional information about an external binding. An attribute can be generic or specific to a 3rd party vendor. For example these attributes relate to Verisign: +// *Credential Type +// *Credential State +// *Credential Expiration Date +// *Credential Last Update Date +type User_External_Binding_Attribute struct { + Entity + + // The external authentication binding an attribute belongs to. + ExternalBinding *User_External_Binding `json:"externalBinding,omitempty" xmlrpc:"externalBinding,omitempty"` + + // The value of an external binding attribute. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_User_External_Binding_Type data type contains information relating to a type of external authentication binding. It contains a user friendly name as well as a unique key name. +type User_External_Binding_Type struct { + Entity + + // The unique name used to identify a type of external authentication binding. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The user friendly name of a type of external authentication binding. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_External_Binding_Vendor data type contains information for a single external binding vendor. This information includes a user friendly vendor name, a unique version of the vendor name, and a unique internal identifier that can be used when creating a new external binding. +type User_External_Binding_Vendor struct { + Entity + + // The unique identifier for an external binding vendor. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A unique version of the name property. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The user friendly name of an external binding vendor. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A SoftLayer_User_Interface represents a generic user instance within the SoftLayer API. The SoftLayer API uses SoftLayer_User_Interfaces in cases where a user object could be one of many types of users. Currently the [[SoftLayer_User_Customer]] and [[SoftLayer_User_Employee]] classes are abstracted by this type. +type User_Interface struct { + Entity +} + +// no documentation yet +type User_Permission_Action struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type User_Permission_Group struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A permission groups associated [[SoftLayer_Account|customer account]] id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // no documentation yet + Actions []User_Permission_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // The date the permission group record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description of the permission group. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The date the temporary group will be destroyed. + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // A permission groups internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date the permission group record was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the permission group. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + RoleCount *uint `json:"roleCount,omitempty" xmlrpc:"roleCount,omitempty"` + + // no documentation yet + Roles []User_Permission_Role `json:"roles,omitempty" xmlrpc:"roles,omitempty"` + + // The type of the permission group. + Type *User_Permission_Group_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type of permission group. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// no documentation yet +type User_Permission_Group_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // no documentation yet + Groups []User_Permission_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type User_Permission_Role struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A permission roles associated [[SoftLayer_Account|customer account]] id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // no documentation yet + Actions []User_Permission_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // The date the permission role record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description of the permission role. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // no documentation yet + Groups []User_Permission_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // A permission roles internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date the permission role record was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the permission role. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A flag showing if new users should be automatically added to this role. + NewUserDefaultFlag *int `json:"newUserDefaultFlag,omitempty" xmlrpc:"newUserDefaultFlag,omitempty"` + + // A flag showing if the permission role was created by our internal system for a single user. If this flag is set only a single user can be assigned to this permission role and it can not be deleted. + SystemFlag *int `json:"systemFlag,omitempty" xmlrpc:"systemFlag,omitempty"` + + // A count of + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // no documentation yet + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` +} + +// The SoftLayer_User_Preference data type contains a single user preference to a specific preference type. +type User_Preference struct { + Entity + + // Description of the user preference + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Type of user preference + Type *User_Preference_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The users current preference value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_User_Preference_Type data type contains a single preference type including the accepted values. +type User_Preference_Type struct { + Entity + + // A description of the preference type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the preference type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // An example of accepted preference values + ValueExample *string `json:"valueExample,omitempty" xmlrpc:"valueExample,omitempty"` +} + +// The SoftLayer_User_Security_Question data type contains questions. +type User_Security_Question struct { + Entity + + // A security question's display order. + DisplayOrder *int `json:"displayOrder,omitempty" xmlrpc:"displayOrder,omitempty"` + + // A security question's internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A security question's question. + Question *string `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // A security question's viewable flag. + Viewable *int `json:"viewable,omitempty" xmlrpc:"viewable,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/utility.go b/vendor/github.com/softlayer/softlayer-go/datatypes/utility.go new file mode 100644 index 000000000000..4711f0129229 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/utility.go @@ -0,0 +1,46 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Generic utility class used for gathering graphing parameters and actually creating graphs. +type Utility_Bandwidth_Graph struct { + Entity +} + +// no documentation yet +type Utility_Network struct { + Entity +} + +// no documentation yet +type Utility_ObjectFilter struct { + Entity +} + +// no documentation yet +type Utility_ObjectFilter_Operation struct { + Entity +} + +// no documentation yet +type Utility_ObjectFilter_Operation_Option struct { + Entity +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/virtual.go b/vendor/github.com/softlayer/softlayer-go/datatypes/virtual.go new file mode 100644 index 000000000000..26fb4892264c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/virtual.go @@ -0,0 +1,1379 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// This data type presents the structure for a dedicated host. The data type contains relational properties to distinguish a dedicated host and associate an account to it. +type Virtual_DedicatedHost struct { + Entity + + // The account that the dedicated host belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The dedicated host's associated account id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The container that represents allocations on the dedicated host. + AllocationStatus *Container_Virtual_DedicatedHost_AllocationStatus `json:"allocationStatus,omitempty" xmlrpc:"allocationStatus,omitempty"` + + // The backend router behind dedicated host's pool of resources. + BackendRouter *Hardware_Router_Backend `json:"backendRouter,omitempty" xmlrpc:"backendRouter,omitempty"` + + // The billing item for the dedicated host. + BillingItem *Billing_Item_Virtual_DedicatedHost `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The capacity that the dedicated host's CPU allocation is restricted to. + CpuCount *int `json:"cpuCount,omitempty" xmlrpc:"cpuCount,omitempty"` + + // The date that the dedicated host was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The datacenter that the dedicated host resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The capacity that the dedicated host's disk allocation is restricted to. + DiskCapacity *int `json:"diskCapacity,omitempty" xmlrpc:"diskCapacity,omitempty"` + + // A count of the guests associated with the dedicated host. + GuestCount *uint `json:"guestCount,omitempty" xmlrpc:"guestCount,omitempty"` + + // The guests associated with the dedicated host. + Guests []Virtual_Guest `json:"guests,omitempty" xmlrpc:"guests,omitempty"` + + // The dedicated host's associated unique id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + InternalTagReferenceCount *uint `json:"internalTagReferenceCount,omitempty" xmlrpc:"internalTagReferenceCount,omitempty"` + + // no documentation yet + InternalTagReferences []Tag_Reference `json:"internalTagReferences,omitempty" xmlrpc:"internalTagReferences,omitempty"` + + // The capacity that the dedicated host's memory allocation is restricted to. + MemoryCapacity *int `json:"memoryCapacity,omitempty" xmlrpc:"memoryCapacity,omitempty"` + + // The date that the dedicated host was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The dedicated host's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A note of up to 1,000 characters about a dedicated host. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The container that represents PCI device allocations on the dedicated host. + PciDeviceAllocationStatus *Container_Virtual_DedicatedHost_Pci_Device_AllocationStatus `json:"pciDeviceAllocationStatus,omitempty" xmlrpc:"pciDeviceAllocationStatus,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` +} + +// The virtual disk image data type presents the structure in which a virtual disk image will be presented. +// +// Virtual block devices are assigned to disk images. +type Virtual_Disk_Image struct { + Entity + + // The billing item for a virtual disk image. + BillingItem *Billing_Item_Virtual_Disk_Image `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A count of the block devices that a disk image is attached to. Block devices connect computing instances to disk images. + BlockDeviceCount *uint `json:"blockDeviceCount,omitempty" xmlrpc:"blockDeviceCount,omitempty"` + + // The block devices that a disk image is attached to. Block devices connect computing instances to disk images. + BlockDevices []Virtual_Guest_Block_Device `json:"blockDevices,omitempty" xmlrpc:"blockDevices,omitempty"` + + // no documentation yet + BootableVolumeFlag *bool `json:"bootableVolumeFlag,omitempty" xmlrpc:"bootableVolumeFlag,omitempty"` + + // A disk image's size measured in gigabytes. + Capacity *int `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // A disk image's unique md5 checksum. + Checksum *string `json:"checksum,omitempty" xmlrpc:"checksum,omitempty"` + + // A count of + CoalescedDiskImageCount *uint `json:"coalescedDiskImageCount,omitempty" xmlrpc:"coalescedDiskImageCount,omitempty"` + + // no documentation yet + CoalescedDiskImages []Virtual_Disk_Image `json:"coalescedDiskImages,omitempty" xmlrpc:"coalescedDiskImages,omitempty"` + + // no documentation yet + CopyOnWriteFlag *bool `json:"copyOnWriteFlag,omitempty" xmlrpc:"copyOnWriteFlag,omitempty"` + + // The date a disk image was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A brief description of a virtual disk image. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A disk image's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LocalDiskFlag *bool `json:"localDiskFlag,omitempty" xmlrpc:"localDiskFlag,omitempty"` + + // Whether this disk image is meant for storage of custom user data supplied with a Cloud Computing Instance order. + MetadataFlag *bool `json:"metadataFlag,omitempty" xmlrpc:"metadataFlag,omitempty"` + + // The date a disk image was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A descriptive name used to identify a disk image to a user. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The ID of the the disk image that this disk image is based on, if applicable. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // A count of references to the software that resides on a disk image. + SoftwareReferenceCount *uint `json:"softwareReferenceCount,omitempty" xmlrpc:"softwareReferenceCount,omitempty"` + + // References to the software that resides on a disk image. + SoftwareReferences []Virtual_Disk_Image_Software `json:"softwareReferences,omitempty" xmlrpc:"softwareReferences,omitempty"` + + // The original disk image that the current disk image was cloned from. + SourceDiskImage *Virtual_Disk_Image `json:"sourceDiskImage,omitempty" xmlrpc:"sourceDiskImage,omitempty"` + + // The storage repository that a disk image resides in. + StorageRepository *Virtual_Storage_Repository `json:"storageRepository,omitempty" xmlrpc:"storageRepository,omitempty"` + + // The [[SoftLayer_Virtual_Storage_Repository|storage repository]] that a disk image is in. + StorageRepositoryId *int `json:"storageRepositoryId,omitempty" xmlrpc:"storageRepositoryId,omitempty"` + + // The type of storage repository that a disk image resides in. + StorageRepositoryType *Virtual_Storage_Repository_Type `json:"storageRepositoryType,omitempty" xmlrpc:"storageRepositoryType,omitempty"` + + // The template that attaches a disk image to a [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|archive]]. + TemplateBlockDevice *Virtual_Guest_Block_Device_Template `json:"templateBlockDevice,omitempty" xmlrpc:"templateBlockDevice,omitempty"` + + // A virtual disk image's type. + Type *Virtual_Disk_Image_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A disk image's [[SoftLayer_Virtual_Disk_Image_Type|type]] ID + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The unit of storage in which the size of the image is measured. Defaults to "GB" for gigabytes. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` + + // A disk image's unique ID on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// A SoftLayer_Virtual_Disk_Image_Software record connects a computing instance's virtual disk images with software records. This can be useful if a disk image is directly associated with software such as operating systems. +type Virtual_Disk_Image_Software struct { + Entity + + // The virtual disk image that is associated with software. + DiskImage *Virtual_Disk_Image `json:"diskImage,omitempty" xmlrpc:"diskImage,omitempty"` + + // The unique identifier of a virtual disk image to software relationship. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of username/Password pairs used for access to a Software Installation. + PasswordCount *uint `json:"passwordCount,omitempty" xmlrpc:"passwordCount,omitempty"` + + // Username/Password pairs used for access to a Software Installation. + Passwords []Virtual_Disk_Image_Software_Password `json:"passwords,omitempty" xmlrpc:"passwords,omitempty"` + + // The software associated with a virtual disk image. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The unique identifier of the software that a virtual disk image is associated with. + SoftwareDescriptionId *int `json:"softwareDescriptionId,omitempty" xmlrpc:"softwareDescriptionId,omitempty"` +} + +// This SoftLayer_Virtual_Disk_Image_Software_Password data type contains a password for a specific virtual disk image software instance. +type Virtual_Disk_Image_Software_Password struct { + Entity + + // A virtual disk images' password. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The instance that this username/password pair is valid for. + Software *Virtual_Disk_Image_Software `json:"software,omitempty" xmlrpc:"software,omitempty"` + + // A virtual disk images' username. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// SoftLayer_Virtual_Disk_Image_Type models the types of virtual disk images available to CloudLayer Computing Instances. Virtual disk image types describe if an image's data is preservable when upgraded, whether a disk contains a suspended virtual image, or if a disk contains crash dump information. +type Virtual_Disk_Image_Type struct { + Entity + + // A brief description of a virtual disk image type's function. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A virtual disk image type's key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A virtual disk image type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The virtual guest data type presents the structure in which all virtual guests will be presented. Internally, the structure supports various virtualization platforms with no change to external interaction. +// +// A guest, also known as a virtual server, represents an allocation of resources on a virtual host. +type Virtual_Guest struct { + Entity + + // The account that a virtual guest belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A computing instance's associated [[SoftLayer_Account|account]] id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AccountOwnedPoolFlag *bool `json:"accountOwnedPoolFlag,omitempty" xmlrpc:"accountOwnedPoolFlag,omitempty"` + + // A virtual guest's currently active network monitoring incidents. + ActiveNetworkMonitorIncident []Network_Monitor_Version1_Incident `json:"activeNetworkMonitorIncident,omitempty" xmlrpc:"activeNetworkMonitorIncident,omitempty"` + + // A count of a virtual guest's currently active network monitoring incidents. + ActiveNetworkMonitorIncidentCount *uint `json:"activeNetworkMonitorIncidentCount,omitempty" xmlrpc:"activeNetworkMonitorIncidentCount,omitempty"` + + // A count of + ActiveTicketCount *uint `json:"activeTicketCount,omitempty" xmlrpc:"activeTicketCount,omitempty"` + + // no documentation yet + ActiveTickets []Ticket `json:"activeTickets,omitempty" xmlrpc:"activeTickets,omitempty"` + + // A transaction that is still be performed on a cloud server. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // A count of any active transaction(s) that are currently running for the server (example: os reload). + ActiveTransactionCount *uint `json:"activeTransactionCount,omitempty" xmlrpc:"activeTransactionCount,omitempty"` + + // Any active transaction(s) that are currently running for the server (example: os reload). + ActiveTransactions []Provisioning_Version1_Transaction `json:"activeTransactions,omitempty" xmlrpc:"activeTransactions,omitempty"` + + // The SoftLayer_Network_Storage_Allowed_Host information to connect this Virtual Guest to Network Storage volumes that require access control lists. + AllowedHost *Network_Storage_Allowed_Host `json:"allowedHost,omitempty" xmlrpc:"allowedHost,omitempty"` + + // The SoftLayer_Network_Storage objects that this SoftLayer_Virtual_Guest has access to. + AllowedNetworkStorage []Network_Storage `json:"allowedNetworkStorage,omitempty" xmlrpc:"allowedNetworkStorage,omitempty"` + + // A count of the SoftLayer_Network_Storage objects that this SoftLayer_Virtual_Guest has access to. + AllowedNetworkStorageCount *uint `json:"allowedNetworkStorageCount,omitempty" xmlrpc:"allowedNetworkStorageCount,omitempty"` + + // A count of the SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Virtual_Guest has access to. + AllowedNetworkStorageReplicaCount *uint `json:"allowedNetworkStorageReplicaCount,omitempty" xmlrpc:"allowedNetworkStorageReplicaCount,omitempty"` + + // The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Virtual_Guest has access to. + AllowedNetworkStorageReplicas []Network_Storage `json:"allowedNetworkStorageReplicas,omitempty" xmlrpc:"allowedNetworkStorageReplicas,omitempty"` + + // A antivirus / spyware software component object. + AntivirusSpywareSoftwareComponent *Software_Component `json:"antivirusSpywareSoftwareComponent,omitempty" xmlrpc:"antivirusSpywareSoftwareComponent,omitempty"` + + // no documentation yet + ApplicationDeliveryController *Network_Application_Delivery_Controller `json:"applicationDeliveryController,omitempty" xmlrpc:"applicationDeliveryController,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Virtual_Guest_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // An object that stores the maximum level for the monitoring query types and response types. + AvailableMonitoring []Network_Monitor_Version1_Query_Host_Stratum `json:"availableMonitoring,omitempty" xmlrpc:"availableMonitoring,omitempty"` + + // A count of an object that stores the maximum level for the monitoring query types and response types. + AvailableMonitoringCount *uint `json:"availableMonitoringCount,omitempty" xmlrpc:"availableMonitoringCount,omitempty"` + + // The average daily private bandwidth usage for the current billing cycle. + AverageDailyPrivateBandwidthUsage *Float64 `json:"averageDailyPrivateBandwidthUsage,omitempty" xmlrpc:"averageDailyPrivateBandwidthUsage,omitempty"` + + // The average daily public bandwidth usage for the current billing cycle. + AverageDailyPublicBandwidthUsage *Float64 `json:"averageDailyPublicBandwidthUsage,omitempty" xmlrpc:"averageDailyPublicBandwidthUsage,omitempty"` + + // A count of a guests's backend network components. + BackendNetworkComponentCount *uint `json:"backendNetworkComponentCount,omitempty" xmlrpc:"backendNetworkComponentCount,omitempty"` + + // A guests's backend network components. + BackendNetworkComponents []Virtual_Guest_Network_Component `json:"backendNetworkComponents,omitempty" xmlrpc:"backendNetworkComponents,omitempty"` + + // A count of a guest's backend or private router. + BackendRouterCount *uint `json:"backendRouterCount,omitempty" xmlrpc:"backendRouterCount,omitempty"` + + // A guest's backend or private router. + BackendRouters []Hardware `json:"backendRouters,omitempty" xmlrpc:"backendRouters,omitempty"` + + // A computing instance's allotted bandwidth (measured in GB). + BandwidthAllocation *Float64 `json:"bandwidthAllocation,omitempty" xmlrpc:"bandwidthAllocation,omitempty"` + + // A computing instance's allotted detail record. Allotment details link bandwidth allocation with allotments. + BandwidthAllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"bandwidthAllotmentDetail,omitempty" xmlrpc:"bandwidthAllotmentDetail,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // The billing item for a CloudLayer Compute Instance. + BillingItem *Billing_Item_Virtual_Guest `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Determines whether the instance is ineligible for cancellation because it is disconnected. + BlockCancelBecauseDisconnectedFlag *bool `json:"blockCancelBecauseDisconnectedFlag,omitempty" xmlrpc:"blockCancelBecauseDisconnectedFlag,omitempty"` + + // A count of a computing instance's block devices. Block devices link [[SoftLayer_Virtual_Disk_Image|disk images]] to computing instances. + BlockDeviceCount *uint `json:"blockDeviceCount,omitempty" xmlrpc:"blockDeviceCount,omitempty"` + + // The global identifier for the image template that was used to provision or reload a guest. + BlockDeviceTemplateGroup *Virtual_Guest_Block_Device_Template_Group `json:"blockDeviceTemplateGroup,omitempty" xmlrpc:"blockDeviceTemplateGroup,omitempty"` + + // A computing instance's block devices. Block devices link [[SoftLayer_Virtual_Disk_Image|disk images]] to computing instances. + BlockDevices []Virtual_Guest_Block_Device `json:"blockDevices,omitempty" xmlrpc:"blockDevices,omitempty"` + + // A flag indicating a computing instance's console IP address is assigned. + ConsoleIpAddressFlag *bool `json:"consoleIpAddressFlag,omitempty" xmlrpc:"consoleIpAddressFlag,omitempty"` + + // A record containing information about a computing instance's console IP and port number. + ConsoleIpAddressRecord *Virtual_Guest_Network_Component_IpAddress `json:"consoleIpAddressRecord,omitempty" xmlrpc:"consoleIpAddressRecord,omitempty"` + + // A continuous data protection software component object. + ContinuousDataProtectionSoftwareComponent *Software_Component `json:"continuousDataProtectionSoftwareComponent,omitempty" xmlrpc:"continuousDataProtectionSoftwareComponent,omitempty"` + + // A guest's control panel. + ControlPanel *Software_Component `json:"controlPanel,omitempty" xmlrpc:"controlPanel,omitempty"` + + // The date a virtual computing instance was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // An object that provides commonly used bandwidth summary components for the current billing cycle. + CurrentBandwidthSummary *Metric_Tracking_Object_Bandwidth_Summary `json:"currentBandwidthSummary,omitempty" xmlrpc:"currentBandwidthSummary,omitempty"` + + // The datacenter that a virtual guest resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // When true this flag specifies that a compute instance is to run on hosts that only have guests from the same account. + DedicatedAccountHostOnlyFlag *bool `json:"dedicatedAccountHostOnlyFlag,omitempty" xmlrpc:"dedicatedAccountHostOnlyFlag,omitempty"` + + // The dedicated host associated with this guest. + DedicatedHost *Virtual_DedicatedHost `json:"dedicatedHost,omitempty" xmlrpc:"dedicatedHost,omitempty"` + + // A computing instance's domain name + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // A guest's associated EVault network storage service account. + EvaultNetworkStorage []Network_Storage `json:"evaultNetworkStorage,omitempty" xmlrpc:"evaultNetworkStorage,omitempty"` + + // A count of a guest's associated EVault network storage service account. + EvaultNetworkStorageCount *uint `json:"evaultNetworkStorageCount,omitempty" xmlrpc:"evaultNetworkStorageCount,omitempty"` + + // A computing instance's hardware firewall services. + FirewallServiceComponent *Network_Component_Firewall `json:"firewallServiceComponent,omitempty" xmlrpc:"firewallServiceComponent,omitempty"` + + // A count of a guest's frontend network components. + FrontendNetworkComponentCount *uint `json:"frontendNetworkComponentCount,omitempty" xmlrpc:"frontendNetworkComponentCount,omitempty"` + + // A guest's frontend network components. + FrontendNetworkComponents []Virtual_Guest_Network_Component `json:"frontendNetworkComponents,omitempty" xmlrpc:"frontendNetworkComponents,omitempty"` + + // A guest's frontend or public router. + FrontendRouters *Hardware `json:"frontendRouters,omitempty" xmlrpc:"frontendRouters,omitempty"` + + // A name reflecting the hostname and domain of the computing instance. + FullyQualifiedDomainName *string `json:"fullyQualifiedDomainName,omitempty" xmlrpc:"fullyQualifiedDomainName,omitempty"` + + // A guest's universally unique identifier. + GlobalIdentifier *string `json:"globalIdentifier,omitempty" xmlrpc:"globalIdentifier,omitempty"` + + // no documentation yet + GuestBootParameter *Virtual_Guest_Boot_Parameter `json:"guestBootParameter,omitempty" xmlrpc:"guestBootParameter,omitempty"` + + // The virtual host on which a virtual guest resides (available only on private clouds). + Host *Virtual_Host `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // A host IPS software component object. + HostIpsSoftwareComponent *Software_Component `json:"hostIpsSoftwareComponent,omitempty" xmlrpc:"hostIpsSoftwareComponent,omitempty"` + + // A virtual computing instance's hostname + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // Whether or not a computing instance is billed hourly instead of monthly. + HourlyBillingFlag *bool `json:"hourlyBillingFlag,omitempty" xmlrpc:"hourlyBillingFlag,omitempty"` + + // Unique ID for a computing instance. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The total private inbound bandwidth for this computing instance for the current billing cycle. + InboundPrivateBandwidthUsage *Float64 `json:"inboundPrivateBandwidthUsage,omitempty" xmlrpc:"inboundPrivateBandwidthUsage,omitempty"` + + // The total public inbound bandwidth for this computing instance for the current billing cycle. + InboundPublicBandwidthUsage *Float64 `json:"inboundPublicBandwidthUsage,omitempty" xmlrpc:"inboundPublicBandwidthUsage,omitempty"` + + // A count of + InternalTagReferenceCount *uint `json:"internalTagReferenceCount,omitempty" xmlrpc:"internalTagReferenceCount,omitempty"` + + // no documentation yet + InternalTagReferences []Tag_Reference `json:"internalTagReferences,omitempty" xmlrpc:"internalTagReferences,omitempty"` + + // The last known power state of a virtual guest in the event the guest is turned off outside of IMS or has gone offline. + LastKnownPowerState *Virtual_Guest_Power_State `json:"lastKnownPowerState,omitempty" xmlrpc:"lastKnownPowerState,omitempty"` + + // The last transaction that a cloud server's operating system was loaded. + LastOperatingSystemReload *Provisioning_Version1_Transaction `json:"lastOperatingSystemReload,omitempty" xmlrpc:"lastOperatingSystemReload,omitempty"` + + // no documentation yet + LastPowerStateId *int `json:"lastPowerStateId,omitempty" xmlrpc:"lastPowerStateId,omitempty"` + + // The last transaction a cloud server had performed. + LastTransaction *Provisioning_Version1_Transaction `json:"lastTransaction,omitempty" xmlrpc:"lastTransaction,omitempty"` + + // The last timestamp of when the guest was verified as a resident virtual machine on the host's hypervisor platform. + LastVerifiedDate *Time `json:"lastVerifiedDate,omitempty" xmlrpc:"lastVerifiedDate,omitempty"` + + // A virtual guest's latest network monitoring incident. + LatestNetworkMonitorIncident *Network_Monitor_Version1_Incident `json:"latestNetworkMonitorIncident,omitempty" xmlrpc:"latestNetworkMonitorIncident,omitempty"` + + // A flag indicating that the virtual guest has at least one disk which is local to the host it runs on. This does not include a SWAP device. + LocalDiskFlag *bool `json:"localDiskFlag,omitempty" xmlrpc:"localDiskFlag,omitempty"` + + // Where guest is located within SoftLayer's location hierarchy. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A flag indicating that the virtual guest is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // The maximum amount of CPU resources a computing instance may utilize. + MaxCpu *int `json:"maxCpu,omitempty" xmlrpc:"maxCpu,omitempty"` + + // The unit of the maximum amount of CPU resources a computing instance may utilize. + MaxCpuUnits *string `json:"maxCpuUnits,omitempty" xmlrpc:"maxCpuUnits,omitempty"` + + // The maximum amount of memory a computing instance may utilize. + MaxMemory *int `json:"maxMemory,omitempty" xmlrpc:"maxMemory,omitempty"` + + // The date of the most recent metric tracking poll performed. + MetricPollDate *Time `json:"metricPollDate,omitempty" xmlrpc:"metricPollDate,omitempty"` + + // A guest's metric tracking object. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // The metric tracking object id for this guest. + MetricTrackingObjectId *int `json:"metricTrackingObjectId,omitempty" xmlrpc:"metricTrackingObjectId,omitempty"` + + // The date a virtual computing instance was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of + MonitoringAgentCount *uint `json:"monitoringAgentCount,omitempty" xmlrpc:"monitoringAgentCount,omitempty"` + + // no documentation yet + MonitoringAgents []Monitoring_Agent `json:"monitoringAgents,omitempty" xmlrpc:"monitoringAgents,omitempty"` + + // no documentation yet + MonitoringRobot *Monitoring_Robot `json:"monitoringRobot,omitempty" xmlrpc:"monitoringRobot,omitempty"` + + // A virtual guest's network monitoring services. + MonitoringServiceComponent *Network_Monitor_Version1_Query_Host_Stratum `json:"monitoringServiceComponent,omitempty" xmlrpc:"monitoringServiceComponent,omitempty"` + + // no documentation yet + MonitoringServiceEligibilityFlag *bool `json:"monitoringServiceEligibilityFlag,omitempty" xmlrpc:"monitoringServiceEligibilityFlag,omitempty"` + + // no documentation yet + MonitoringServiceFlag *bool `json:"monitoringServiceFlag,omitempty" xmlrpc:"monitoringServiceFlag,omitempty"` + + // The monitoring notification objects for this guest. Each object links this guest instance to a user account that will be notified if monitoring on this guest object fails + MonitoringUserNotification []User_Customer_Notification_Virtual_Guest `json:"monitoringUserNotification,omitempty" xmlrpc:"monitoringUserNotification,omitempty"` + + // A count of the monitoring notification objects for this guest. Each object links this guest instance to a user account that will be notified if monitoring on this guest object fails + MonitoringUserNotificationCount *uint `json:"monitoringUserNotificationCount,omitempty" xmlrpc:"monitoringUserNotificationCount,omitempty"` + + // A count of a guests's network components. + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // A guests's network components. + NetworkComponents []Virtual_Guest_Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // A count of a guests's network monitors. + NetworkMonitorCount *uint `json:"networkMonitorCount,omitempty" xmlrpc:"networkMonitorCount,omitempty"` + + // A count of all of a virtual guest's network monitoring incidents. + NetworkMonitorIncidentCount *uint `json:"networkMonitorIncidentCount,omitempty" xmlrpc:"networkMonitorIncidentCount,omitempty"` + + // All of a virtual guest's network monitoring incidents. + NetworkMonitorIncidents []Network_Monitor_Version1_Incident `json:"networkMonitorIncidents,omitempty" xmlrpc:"networkMonitorIncidents,omitempty"` + + // A guests's network monitors. + NetworkMonitors []Network_Monitor_Version1_Query_Host `json:"networkMonitors,omitempty" xmlrpc:"networkMonitors,omitempty"` + + // A guest's associated network storage accounts. + NetworkStorage []Network_Storage `json:"networkStorage,omitempty" xmlrpc:"networkStorage,omitempty"` + + // A count of a guest's associated network storage accounts. + NetworkStorageCount *uint `json:"networkStorageCount,omitempty" xmlrpc:"networkStorageCount,omitempty"` + + // A count of the network Vlans that a guest's network components are associated with. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // The network Vlans that a guest's network components are associated with. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // A note of up to 1,000 characters about a virtual server. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // An open ticket requesting cancellation of this server, if one exists. + OpenCancellationTicket *Ticket `json:"openCancellationTicket,omitempty" xmlrpc:"openCancellationTicket,omitempty"` + + // A guest's operating system. + OperatingSystem *Software_Component_OperatingSystem `json:"operatingSystem,omitempty" xmlrpc:"operatingSystem,omitempty"` + + // A guest's operating system software description. + OperatingSystemReferenceCode *string `json:"operatingSystemReferenceCode,omitempty" xmlrpc:"operatingSystemReferenceCode,omitempty"` + + // The original package id provided with the order for a Cloud Computing Instance. + OrderedPackageId *string `json:"orderedPackageId,omitempty" xmlrpc:"orderedPackageId,omitempty"` + + // The total private outbound bandwidth for this computing instance for the current billing cycle. + OutboundPrivateBandwidthUsage *Float64 `json:"outboundPrivateBandwidthUsage,omitempty" xmlrpc:"outboundPrivateBandwidthUsage,omitempty"` + + // The total public outbound bandwidth for this computing instance for the current billing cycle. + OutboundPublicBandwidthUsage *Float64 `json:"outboundPublicBandwidthUsage,omitempty" xmlrpc:"outboundPublicBandwidthUsage,omitempty"` + + // Whether the bandwidth usage for this computing instance for the current billing cycle exceeds the allocation. + OverBandwidthAllocationFlag *int `json:"overBandwidthAllocationFlag,omitempty" xmlrpc:"overBandwidthAllocationFlag,omitempty"` + + // When true this virtual guest must be migrated using SoftLayer_Virtual_Guest::migrate. + PendingMigrationFlag *bool `json:"pendingMigrationFlag,omitempty" xmlrpc:"pendingMigrationFlag,omitempty"` + + // URI of the script to be downloaded and executed after installation is complete. This is deprecated in favor of supplementalCreateObjectOptions' postInstallScriptUri. + PostInstallScriptUri *string `json:"postInstallScriptUri,omitempty" xmlrpc:"postInstallScriptUri,omitempty"` + + // The current power state of a virtual guest. + PowerState *Virtual_Guest_Power_State `json:"powerState,omitempty" xmlrpc:"powerState,omitempty"` + + // A guest's primary private IP address. + PrimaryBackendIpAddress *string `json:"primaryBackendIpAddress,omitempty" xmlrpc:"primaryBackendIpAddress,omitempty"` + + // A guest's primary backend network component. + PrimaryBackendNetworkComponent *Virtual_Guest_Network_Component `json:"primaryBackendNetworkComponent,omitempty" xmlrpc:"primaryBackendNetworkComponent,omitempty"` + + // The guest's primary public IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // A guest's primary public network component. + PrimaryNetworkComponent *Virtual_Guest_Network_Component `json:"primaryNetworkComponent,omitempty" xmlrpc:"primaryNetworkComponent,omitempty"` + + // Whether the computing instance only has access to the private network. + PrivateNetworkOnlyFlag *bool `json:"privateNetworkOnlyFlag,omitempty" xmlrpc:"privateNetworkOnlyFlag,omitempty"` + + // Whether the bandwidth usage for this computing instance for the current billing cycle is projected to exceed the allocation. + ProjectedOverBandwidthAllocationFlag *int `json:"projectedOverBandwidthAllocationFlag,omitempty" xmlrpc:"projectedOverBandwidthAllocationFlag,omitempty"` + + // The projected public outbound bandwidth for this computing instance for the current billing cycle. + ProjectedPublicBandwidthUsage *Float64 `json:"projectedPublicBandwidthUsage,omitempty" xmlrpc:"projectedPublicBandwidthUsage,omitempty"` + + // no documentation yet + ProvisionDate *Time `json:"provisionDate,omitempty" xmlrpc:"provisionDate,omitempty"` + + // A count of recent events that impact this computing instance. + RecentEventCount *uint `json:"recentEventCount,omitempty" xmlrpc:"recentEventCount,omitempty"` + + // Recent events that impact this computing instance. + RecentEvents []Notification_Occurrence_Event `json:"recentEvents,omitempty" xmlrpc:"recentEvents,omitempty"` + + // The regional group this guest is in. + RegionalGroup *Location_Group_Regional `json:"regionalGroup,omitempty" xmlrpc:"regionalGroup,omitempty"` + + // no documentation yet + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // A count of collection of scale assets this guest corresponds to. + ScaleAssetCount *uint `json:"scaleAssetCount,omitempty" xmlrpc:"scaleAssetCount,omitempty"` + + // Collection of scale assets this guest corresponds to. + ScaleAssets []Scale_Asset `json:"scaleAssets,omitempty" xmlrpc:"scaleAssets,omitempty"` + + // The scale member for this guest, if applicable. + ScaleMember *Scale_Member_Virtual_Guest `json:"scaleMember,omitempty" xmlrpc:"scaleMember,omitempty"` + + // Whether or not this guest is a member of a scale group and was automatically created as part of a scale group action. + ScaledFlag *bool `json:"scaledFlag,omitempty" xmlrpc:"scaledFlag,omitempty"` + + // A count of a guest's vulnerability scan requests. + SecurityScanRequestCount *uint `json:"securityScanRequestCount,omitempty" xmlrpc:"securityScanRequestCount,omitempty"` + + // A guest's vulnerability scan requests. + SecurityScanRequests []Network_Security_Scanner_Request `json:"securityScanRequests,omitempty" xmlrpc:"securityScanRequests,omitempty"` + + // The server room that a guest is located at. There may be more than one server room for every data center. + ServerRoom *Location `json:"serverRoom,omitempty" xmlrpc:"serverRoom,omitempty"` + + // A count of a guest's installed software. + SoftwareComponentCount *uint `json:"softwareComponentCount,omitempty" xmlrpc:"softwareComponentCount,omitempty"` + + // A guest's installed software. + SoftwareComponents []Software_Component `json:"softwareComponents,omitempty" xmlrpc:"softwareComponents,omitempty"` + + // A count of sSH keys to be installed on the server during provisioning or an OS reload. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // SSH keys to be installed on the server during provisioning or an OS reload. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // The number of CPUs available to a computing instance upon startup. + StartCpus *int `json:"startCpus,omitempty" xmlrpc:"startCpus,omitempty"` + + // A computing instance's status. + Status *Virtual_Guest_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A computing instances [[SoftLayer_Virtual_Guest_Status|status]] ID + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // Extra options needed for [[SoftLayer_Virtual_Guest/createObject|createObject]] and [[SoftLayer_Virtual_Guest/createObjects|createObjects]]. + SupplementalCreateObjectOptions *Virtual_Guest_SupplementalCreateObjectOptions `json:"supplementalCreateObjectOptions,omitempty" xmlrpc:"supplementalCreateObjectOptions,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // Whether or not a computing instance is a Transient Instance. + TransientGuestFlag *bool `json:"transientGuestFlag,omitempty" xmlrpc:"transientGuestFlag,omitempty"` + + // The endpoint used to notify customers their transient guest is terminating. + TransientWebhookURI *Virtual_Guest_Attribute `json:"transientWebhookURI,omitempty" xmlrpc:"transientWebhookURI,omitempty"` + + // The type of this virtual guest. + Type *Virtual_Guest_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // Gives the type of guest categorized as PUBLIC, DEDICATED or PRIVATE. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // A computing instance's associated upgrade request object if any. + UpgradeRequest *Product_Upgrade_Request `json:"upgradeRequest,omitempty" xmlrpc:"upgradeRequest,omitempty"` + + // A count of a list of users that have access to this computing instance. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // A base64 encoded string containing custom user data for a Cloud Computing Instance order. + UserData []Virtual_Guest_Attribute `json:"userData,omitempty" xmlrpc:"userData,omitempty"` + + // A count of a base64 encoded string containing custom user data for a Cloud Computing Instance order. + UserDataCount *uint `json:"userDataCount,omitempty" xmlrpc:"userDataCount,omitempty"` + + // A list of users that have access to this computing instance. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` + + // Unique ID for a computing instance's record on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` + + // The name of the bandwidth allotment that a hardware belongs too. + VirtualRack *Network_Bandwidth_Version1_Allotment `json:"virtualRack,omitempty" xmlrpc:"virtualRack,omitempty"` + + // The id of the bandwidth allotment that a computing instance belongs too. + VirtualRackId *int `json:"virtualRackId,omitempty" xmlrpc:"virtualRackId,omitempty"` + + // The name of the bandwidth allotment that a computing instance belongs too. + VirtualRackName *string `json:"virtualRackName,omitempty" xmlrpc:"virtualRackName,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Attribute struct { + Entity + + // no documentation yet + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // no documentation yet + Type *Virtual_Guest_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A guest attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Attribute_Type struct { + Entity + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Attribute_UserData struct { + Virtual_Guest_Attribute +} + +// The block device data type presents the structure in which all block devices will be presented. A block device attaches a disk image to a guest. Internally, the structure supports various virtualization platforms with no change to external interaction. +// +// A guest, also known as a virtual server, represents an allocation of resources on a virtual host. +type Virtual_Guest_Block_Device struct { + Entity + + // A flag indicating if a block device can be booted from. + BootableFlag *int `json:"bootableFlag,omitempty" xmlrpc:"bootableFlag,omitempty"` + + // The date a block device was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A name used to identify a block device. + Device *string `json:"device,omitempty" xmlrpc:"device,omitempty"` + + // The disk image that a block device connects to in a computing instance. + DiskImage *Virtual_Disk_Image `json:"diskImage,omitempty" xmlrpc:"diskImage,omitempty"` + + // A block device [[SoftLayer_Virtual_Disk_Image|disk image]]'s unique ID. + DiskImageId *int `json:"diskImageId,omitempty" xmlrpc:"diskImageId,omitempty"` + + // The computing instance that this block device is attached to. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The [[SoftLayer_Virtual_Guest|computing instance]] that a block device is associated with. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // A flag indicating if a block device can be plugged into a computing instance without having to shut down the instance. + HotPlugFlag *int `json:"hotPlugFlag,omitempty" xmlrpc:"hotPlugFlag,omitempty"` + + // A computing instance block device's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The data a block device was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The writing mode that a virtual block device is mounted as, either "RO" for read-only mode or "RW" for read and write mode. + MountMode *string `json:"mountMode,omitempty" xmlrpc:"mountMode,omitempty"` + + // The type of device that a virtual block device is mounted as, either "Disk" for a directly connected storage disk or "CD" for devices that are mounted as optical drives.. + MountType *string `json:"mountType,omitempty" xmlrpc:"mountType,omitempty"` + + // no documentation yet + Status *Virtual_Guest_Block_Device_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status of the device, either disconnected or connected + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // A block device's unique ID on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Block_Device_Status struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The virtual block device template data type presents the structure in which all archived image templates are presented. +// +// A virtual block device template, also known as a image template, represents the image of a virtual guest instance. +type Virtual_Guest_Block_Device_Template struct { + Entity + + // A name that identifies a block device template. + Device *string `json:"device,omitempty" xmlrpc:"device,omitempty"` + + // A block device template's disk image. + DiskImage *Virtual_Disk_Image `json:"diskImage,omitempty" xmlrpc:"diskImage,omitempty"` + + // A block device template's [[SoftLayer_Virtual_Disk_Image|disk image]] ID. + DiskImageId *int `json:"diskImageId,omitempty" xmlrpc:"diskImageId,omitempty"` + + // The amount of disk space that a block device template is using. Use this number along with the units property to obtain the correct space used. + DiskSpace *Float64 `json:"diskSpace,omitempty" xmlrpc:"diskSpace,omitempty"` + + // A block device template's group. Several block device templates can be combined together into a group for archiving purposes. + Group *Virtual_Guest_Block_Device_Template_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A block device template's [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|group]] ID. + GroupId *int `json:"groupId,omitempty" xmlrpc:"groupId,omitempty"` + + // A block device template's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The units that will be used with the disk space property to identify the amount of disk space used. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` +} + +// The virtual block device template group data type presents the structure in which a group of archived image templates will be presented. The structure consists of a parent template group which contain multiple child template group objects. Each child template group object represents the image template in a particular location. Unless editing/deleting a specific child template group object, it is best to use the parent object. +// +// A virtual block device template group, also known as an image template group, represents an image of a virtual guest instance. +type Virtual_Guest_Block_Device_Template_Group struct { + Entity + + // A block device template group's [[SoftLayer_Account|account]]. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A count of + AccountContactCount *uint `json:"accountContactCount,omitempty" xmlrpc:"accountContactCount,omitempty"` + + // no documentation yet + AccountContacts []Account_Contact `json:"accountContacts,omitempty" xmlrpc:"accountContacts,omitempty"` + + // A block device template group's [[SoftLayer_Account|account]] ID + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the accounts which may have read-only access to an image template group. Will only be populated for parent template group objects. + AccountReferenceCount *uint `json:"accountReferenceCount,omitempty" xmlrpc:"accountReferenceCount,omitempty"` + + // The accounts which may have read-only access to an image template group. Will only be populated for parent template group objects. + AccountReferences []Virtual_Guest_Block_Device_Template_Group_Accounts `json:"accountReferences,omitempty" xmlrpc:"accountReferences,omitempty"` + + // A count of the block devices that are part of an image template group + BlockDeviceCount *uint `json:"blockDeviceCount,omitempty" xmlrpc:"blockDeviceCount,omitempty"` + + // The block devices that are part of an image template group + BlockDevices []Virtual_Guest_Block_Device_Template `json:"blockDevices,omitempty" xmlrpc:"blockDevices,omitempty"` + + // The total disk space of all images in a image template group. + BlockDevicesDiskSpaceTotal *Float64 `json:"blockDevicesDiskSpaceTotal,omitempty" xmlrpc:"blockDevicesDiskSpaceTotal,omitempty"` + + // A flag indicating that customer is providing the software licenses. + ByolFlag *bool `json:"byolFlag,omitempty" xmlrpc:"byolFlag,omitempty"` + + // The image template groups that are clones of an image template group. + Children []Virtual_Guest_Block_Device_Template_Group `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of the image template groups that are clones of an image template group. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // The date a block device template group was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The location containing this image template group. Will only be populated for child template group objects. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // A count of a collection of locations containing a copy of this image template group. Will only be populated for parent template group objects. + DatacenterCount *uint `json:"datacenterCount,omitempty" xmlrpc:"datacenterCount,omitempty"` + + // A collection of locations containing a copy of this image template group. Will only be populated for parent template group objects. + Datacenters []Location `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // A flag indicating if this is a flex image. + FlexImageFlag *bool `json:"flexImageFlag,omitempty" xmlrpc:"flexImageFlag,omitempty"` + + // An image template's universally unique identifier. + GlobalIdentifier *string `json:"globalIdentifier,omitempty" xmlrpc:"globalIdentifier,omitempty"` + + // A block device template group's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The virtual disk image type of this template. Value will be populated on parent and child, but only supports object filtering on the parent. + ImageType *Virtual_Disk_Image_Type `json:"imageType,omitempty" xmlrpc:"imageType,omitempty"` + + // The virtual disk image type keyname (e.g. SYSTEM, DISK_CAPTURE, ISO, etc) of this template. Value will be populated on parent and child, but only supports object filtering on the parent. + ImageTypeKeyName *string `json:"imageTypeKeyName,omitempty" xmlrpc:"imageTypeKeyName,omitempty"` + + // A user definable and optional name of a block device template group. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A block device template group's user defined note. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The image template group that another image template group was cloned from. + Parent *Virtual_Guest_Block_Device_Template_Group `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // A block device template group's [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|parent]] ID. This will only be set when a template group is created from a previously existing template group + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // no documentation yet + PublicFlag *int `json:"publicFlag,omitempty" xmlrpc:"publicFlag,omitempty"` + + // A count of the ssh keys to be implemented on the server when provisioned or reloaded from an image template group. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // The ssh keys to be implemented on the server when provisioned or reloaded from an image template group. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // A template group's status. + Status *Virtual_Guest_Block_Device_Template_Group_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A block device template group's [[SoftLayer_Virtual_Guest_Block_Device_Template_Group_Status|status]] ID + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The storage repository that an image template group resides on. + StorageRepository *Virtual_Storage_Repository `json:"storageRepository,omitempty" xmlrpc:"storageRepository,omitempty"` + + // A block device template group's user defined summary. + Summary *string `json:"summary,omitempty" xmlrpc:"summary,omitempty"` + + // A count of the tags associated with this image template group. + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // The tags associated with this image template group. + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // A transaction that is being performed on a image template group. + Transaction *Provisioning_Version1_Transaction `json:"transaction,omitempty" xmlrpc:"transaction,omitempty"` + + // A block device template group's [[SoftLayer_Provisioning_Version1_Transaction|transaction]] ID. This will only be set when there is a transaction being performed on the block device template group. + TransactionId *int `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` + + // A block device template group's [[SoftLayer_User|user]] ID + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// The SoftLayer_Virtual_Guest_Block_Device_Template_Group_Accounts data type represents the SoftLayer customer accounts which have access to provision CloudLayer Computing Instances from an image template group. +// +// All accounts other than the image template group owner have read-only access to that image template group. +// +// It is important to note that this data type should only exist to give accounts access to the parent template group object, not the child. All image template sharing between accounts should occur on the parent object. +type Virtual_Guest_Block_Device_Template_Group_Accounts struct { + Entity + + // The [[SoftLayer_Account|account]] that an image template group is shared with. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The [[SoftLayer_Account|account]] ID which will have access to an image. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The date access was granted to an account. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|image template group]] that is shared with an account. + Group *Virtual_Guest_Block_Device_Template_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // The [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|group]] ID which access will be granted to. + GroupId *int `json:"groupId,omitempty" xmlrpc:"groupId,omitempty"` +} + +// The virtual block device template group status data type represents the current status of the image template. Depending upon the status, the image template can be used for provisioning or reloading. +// +// For an operating system reload, the image template will need to have a status of 'Active' or 'Deprecated'. For a provision, the image template will need to have a status of 'Active' +// +// +type Virtual_Guest_Block_Device_Template_Group_Status struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Boot_Parameter struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // no documentation yet + GuestBootParameterType *Virtual_Guest_Boot_Parameter_Type `json:"guestBootParameterType,omitempty" xmlrpc:"guestBootParameterType,omitempty"` + + // no documentation yet + GuestBootParameterTypeId *int `json:"guestBootParameterTypeId,omitempty" xmlrpc:"guestBootParameterTypeId,omitempty"` + + // no documentation yet + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// Describes a virtual guest boot parameter. In this the word class is used in the context of arguments sent to cloud computing instances such as single user mode and boot into bash. +type Virtual_Guest_Boot_Parameter_Type struct { + Entity + + // Available boot options. + BootOption *string `json:"bootOption,omitempty" xmlrpc:"bootOption,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A description of the boot parameter + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Indentifier for record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The key name of the boot parameter. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The common name of the boot parameter. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The virtual machine arguments + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The virtual guest network component data type presents the structure in which all computing instance network components are presented. Internally, the structure supports various virtualization platforms with no change to external interaction. +// +// A guest, also known as a virtual server, represents an allocation of resources on a virtual host. +type Virtual_Guest_Network_Component struct { + Entity + + // The date a computing instance's network component was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The computing instance that this network component exists on. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The unique ID of the [[SoftLayer_Virtual_Guest|computing instance]] that this network component belongs to. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // no documentation yet + HighAvailabilityFirewallFlag *bool `json:"highAvailabilityFirewallFlag,omitempty" xmlrpc:"highAvailabilityFirewallFlag,omitempty"` + + // no documentation yet + IcpBinding *Virtual_Guest_Network_Component_IcpBinding `json:"icpBinding,omitempty" xmlrpc:"icpBinding,omitempty"` + + // A computing instance's network component's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the records of all IP addresses bound to a computing instance's network component. + IpAddressBindingCount *uint `json:"ipAddressBindingCount,omitempty" xmlrpc:"ipAddressBindingCount,omitempty"` + + // The records of all IP addresses bound to a computing instance's network component. + IpAddressBindings []Virtual_Guest_Network_Component_IpAddress `json:"ipAddressBindings,omitempty" xmlrpc:"ipAddressBindings,omitempty"` + + // A computing instance network component's unique MAC address. + MacAddress *string `json:"macAddress,omitempty" xmlrpc:"macAddress,omitempty"` + + // A computing instance network component's maximum allowed speed, measured in Mbit per second. ''maxSpeed'' is determined by the capabilities of the network interface and the port speed purchased on your SoftLayer computing instance. + MaxSpeed *int `json:"maxSpeed,omitempty" xmlrpc:"maxSpeed,omitempty"` + + // The date a computing instance's network component was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A computing instance network component's short name. This is usually ''eth''. Use this in conjunction with the ''port'' property to identify a network component. For instance, the "eth0" interface on a server has the network component name "eth" and port 0. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The upstream network component firewall. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // A computing instance's network component's [[SoftLayer_Virtual_Network|network]] ID + NetworkId *int `json:"networkId,omitempty" xmlrpc:"networkId,omitempty"` + + // The VLAN that a computing instance network component's subnet is associated with. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A computing instance network component's port number. Most computing instances have more than one network interface. The port property separates these interfaces. Use this in conjunction with the ''name'' property to identify a network component. For instance, the "eth0" interface on a server has the network component name "eth" and port 0. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // A computing instance network component's primary IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // no documentation yet + PrimaryIpAddressRecord *Network_Subnet_IpAddress `json:"primaryIpAddressRecord,omitempty" xmlrpc:"primaryIpAddressRecord,omitempty"` + + // A network component's subnet for its primary IP address + PrimarySubnet *Network_Subnet `json:"primarySubnet,omitempty" xmlrpc:"primarySubnet,omitempty"` + + // A network component's primary IPv6 IP address record. + PrimaryVersion6IpAddressRecord *Network_Subnet_IpAddress `json:"primaryVersion6IpAddressRecord,omitempty" xmlrpc:"primaryVersion6IpAddressRecord,omitempty"` + + // A network component's routers. + Router *Hardware_Router `json:"router,omitempty" xmlrpc:"router,omitempty"` + + // A count of the bindings associating security groups to this network component + SecurityGroupBindingCount *uint `json:"securityGroupBindingCount,omitempty" xmlrpc:"securityGroupBindingCount,omitempty"` + + // The bindings associating security groups to this network component + SecurityGroupBindings []Virtual_Network_SecurityGroup_NetworkComponentBinding `json:"securityGroupBindings,omitempty" xmlrpc:"securityGroupBindings,omitempty"` + + // A computing instance network component's speed, measured in Mbit per second. + Speed *int `json:"speed,omitempty" xmlrpc:"speed,omitempty"` + + // A computing instance network component's status. This can be one of four possible values: "ACTIVE", "DISABLED", "INACTIVE", or "ABUSE_DISCONNECT". "ACTIVE" network components are enabled and in use on a cloud instance. "ABUSE_DISCONNECT" status components have been administratively disabled by SoftLayer accounting or abuse. "DISABLED" components have been administratively disabled by you, the user. You should never see a network interface in MACWAIT state. If you happen to see one please contact SoftLayer support. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A count of a network component's subnets. A subnet is a group of IP addresses + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // A network component's subnets. A subnet is a group of IP addresses + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // A computing instance's network component's unique ID on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Network_Component_IcpBinding struct { + Entity + + // no documentation yet + InterfaceId *string `json:"interfaceId,omitempty" xmlrpc:"interfaceId,omitempty"` + + // no documentation yet + IpAllocationId *string `json:"ipAllocationId,omitempty" xmlrpc:"ipAllocationId,omitempty"` + + // The network component associated with this object. + NetworkComponent *Virtual_Guest_Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The type of IP that this IP allocation id record references. Set to PRIMARY for the first servic port. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + VpcId *string `json:"vpcId,omitempty" xmlrpc:"vpcId,omitempty"` +} + +// The SoftLayer_Virtual_Guest_Network_Component_IpAddress data type contains general information relating to the binding of a single network component to a single SoftLayer IP address. +type Virtual_Guest_Network_Component_IpAddress struct { + Entity + + // The IP address associated with this object's network component. + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The unique ID of the [[SoftLayer_Network_Subnet_ipAddress|ip address]] this virtual IP address is associated with. + IpAddressId *int `json:"ipAddressId,omitempty" xmlrpc:"ipAddressId,omitempty"` + + // The network component associated with this object's IP address. + NetworkComponent *Virtual_Guest_Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The port that a network component has reserved. This field is only required for some IP address types. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The type of IP that this IP address record references. Some examples are PRIMARY for the network component's primary IP address and CONSOLE_PROXY which represents the IP information for logging into a computing instance's console. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The power state class provides a common set of values for which a guest's power state will be presented in the SoftLayer API. +type Virtual_Guest_Power_State struct { + Entity + + // The description of a power state + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The key name of a power state + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of a power state + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Status struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Virtual_Guest_SupplementalCreateObjectOptions struct { + Entity + + // The mode used to boot the [[SoftLayer_Virtual_Guest]]. Supported values are 'PV' and 'HVM'. + BootMode *string `json:"bootMode,omitempty" xmlrpc:"bootMode,omitempty"` + + // When set the startCpus and maxMemory are defined by the flavor. If the flavor includes local storage blockDevice 0 is also defined by the flavor. When startCpus, maxMemory, or blockDevice 0 are also provided on the template object they are validated against the flavor provided. + FlavorKeyName *string `json:"flavorKeyName,omitempty" xmlrpc:"flavorKeyName,omitempty"` + + // When explicitly set to true, createObject(s) will fail unless the order is started automatically. This can be used by automated systems to fail an order that might otherwise require manual approval. For multi-guest orders via [[SoftLayer_Virtual_Guest/createObjects|createObjects]], this value must be the exact same for every item. + ImmediateApprovalOnlyFlag *bool `json:"immediateApprovalOnlyFlag,omitempty" xmlrpc:"immediateApprovalOnlyFlag,omitempty"` + + // URI of the script to be downloaded and executed after installation is complete. This can be different for each virtual guest when multiple are sent to [[SoftLayer_Virtual_Guest/createObjects|createObjects]]. + PostInstallScriptUri *string `json:"postInstallScriptUri,omitempty" xmlrpc:"postInstallScriptUri,omitempty"` +} + +// SoftLayer_Virtual_Guest_Type models the type of a [[SoftLayer_Virtual_Guest]] (PUBLIC | DEDICATED | PRIVATE) +type Virtual_Guest_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Vpc_IpAllocation struct { + Entity +} + +// no documentation yet +type Virtual_Guest_Vpc_Subnet struct { + Entity +} + +// The virtual host represents the platform on which virtual guests reside. At times a virtual host has no allocations on the physical server, however with many modern platforms it is a virtual machine with small CPU and Memory allocations that runs in the Control Domain. +type Virtual_Host struct { + Entity + + // The account which a virtual host belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A virtual host's associated account id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Boolean flag indicating whether this virtualization platform gets billed per guest rather than at a fixed rate. + BilledPerGuestFlag *bool `json:"billedPerGuestFlag,omitempty" xmlrpc:"billedPerGuestFlag,omitempty"` + + // Boolean flag indicating whether this virtualization platform gets billed per memory usage rather than at a fixed rate. + BilledPerMemoryUsageFlag *bool `json:"billedPerMemoryUsageFlag,omitempty" xmlrpc:"billedPerMemoryUsageFlag,omitempty"` + + // The date a virtual host was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A virtual host's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The enabled flag specifies whether a virtual host can run guests. + EnabledFlag *int `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"` + + // A count of the guests associated with a virtual host. + GuestCount *uint `json:"guestCount,omitempty" xmlrpc:"guestCount,omitempty"` + + // The guests associated with a virtual host. + Guests []Virtual_Guest `json:"guests,omitempty" xmlrpc:"guests,omitempty"` + + // The hardware record which a virtual host resides on. + Hardware *Hardware_Server `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A hardware device which a virtual host resides. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Unique ID for a virtual host. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The metric tracking object for this virtual host. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // The date a virtual host was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A virtual host's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The amount of memory physically available for a virtual host. + PhysicalMemoryCapacity *int `json:"physicalMemoryCapacity,omitempty" xmlrpc:"physicalMemoryCapacity,omitempty"` + + // Unique ID for a virtual host's record on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// The SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding data type contains general information for a single binding. A binding associates a [[SoftLayer_Virtual_Guest_Network_Component]] with a [[SoftLayer_Network_SecurityGroup]]. +type Virtual_Network_SecurityGroup_NetworkComponentBinding struct { + Entity + + // The unique ID for a binding. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + NetworkComponent *Virtual_Guest_Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The ID of the network component. + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` + + // no documentation yet + SecurityGroup *Network_SecurityGroup `json:"securityGroup,omitempty" xmlrpc:"securityGroup,omitempty"` + + // The ID of the security group. + SecurityGroupId *int `json:"securityGroupId,omitempty" xmlrpc:"securityGroupId,omitempty"` +} + +// The SoftLayer_Virtual_Storage_Repository represents a web based storage system that can be accessed through many types of devices, interfaces, and other resources. +type Virtual_Storage_Repository struct { + Entity + + // The [[SoftLayer_Account|account]] that a storage repository belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The current billing item for a storage repository. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A storage repositories capacity measured in Giga-Bytes (GB) + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // The datacenter that a virtual storage repository resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // A storage repositories description that describes its purpose or contents + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of the [[SoftLayer_Virtual_Disk_Image|disk images]] that are in a storage repository. Disk images are the virtual hard drives for a virtual guest. + DiskImageCount *uint `json:"diskImageCount,omitempty" xmlrpc:"diskImageCount,omitempty"` + + // The [[SoftLayer_Virtual_Disk_Image|disk images]] that are in a storage repository. Disk images are the virtual hard drives for a virtual guest. + DiskImages []Virtual_Disk_Image `json:"diskImages,omitempty" xmlrpc:"diskImages,omitempty"` + + // A count of the computing instances that have disk images in a storage repository. + GuestCount *uint `json:"guestCount,omitempty" xmlrpc:"guestCount,omitempty"` + + // The computing instances that have disk images in a storage repository. + Guests []Virtual_Guest `json:"guests,omitempty" xmlrpc:"guests,omitempty"` + + // Unique ID for a storage repository. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + MetricTrackingObject *Metric_Tracking_Object_Virtual_Storage_Repository `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // A storage repositories name that describes its purpose or contents + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + PublicFlag *int `json:"publicFlag,omitempty" xmlrpc:"publicFlag,omitempty"` + + // The current billing item for a public storage repository. + PublicImageBillingItem *Billing_Item `json:"publicImageBillingItem,omitempty" xmlrpc:"publicImageBillingItem,omitempty"` + + // A storage repository's [[SoftLayer_Virtual_Storage_Repository_Type|type]]. + Type *Virtual_Storage_Repository_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A storage repositories [[SoftLayer_Virtual_Storage_Repository_Type|type]] ID + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// SoftLayer employs many different types of repositories that computing instances use as their storage volume. SoftLayer_Virtual_Storage_Repository_Type models a single storage type. Common types of storage repositories include networked file systems, logical volume management, and local disk volumes for swap and page file management. +type Virtual_Storage_Repository_Type struct { + Entity + + // A brief description os a storage repository type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A storage repository type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The storage repositories on a SoftLayer customer account that belong to this type. + StorageRepositories []Virtual_Storage_Repository `json:"storageRepositories,omitempty" xmlrpc:"storageRepositories,omitempty"` + + // A count of the storage repositories on a SoftLayer customer account that belong to this type. + StorageRepositoryCount *uint `json:"storageRepositoryCount,omitempty" xmlrpc:"storageRepositoryCount,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/filter/filters.go b/vendor/github.com/softlayer/softlayer-go/filter/filters.go new file mode 100644 index 000000000000..5f78040141cc --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/filter/filters.go @@ -0,0 +1,305 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// See reference at https://sldn.softlayer.com/article/object-filters. +// Examples in the README.md file and in the examples directory. +package filter + +import ( + "encoding/json" + "fmt" + "strings" +) + +type Filter struct { + Path string + Op string + Opts map[string]interface{} + Val interface{} +} + +type Filters []Filter + +// Returns an array of Filters that you can later call .Build() on. +func New(args ...Filter) Filters { + return args +} + +// This is like calling New().Build(). +// Returns a JSON string that can be used as the object filter. +func Build(args ...Filter) string { + filters := Filters{} + + for _, arg := range args { + filters = append(filters, arg) + } + + return filters.Build() +} + +// This creates a new Filter. The path is a dot-delimited path down +// to the attribute this filter is for. The second value parameter +// is optional. +func Path(path string, val ...interface{}) Filter { + if len(val) > 0 { + return Filter{Path: path, Val: val[0]} + } + + return Filter{Path: path} +} + +// Builds the filter string in JSON format +func (fs Filters) Build() string { + // Loops around filters, + // splitting path on '.' and looping around path pieces. + // Idea is to create a map/tree like map[string]interface{}. + // Every component in the path is a node to create in the tree. + // Once we get to the leaf, we set the operation. + // map[string]interface{}{"operation": op+" "+value} + // If Op is "", then just map[string]interface{}{"operation": value}. + // Afterwards, the Opts are traversed; []map[string]interface{}{} + // For every entry in Opts, we create one map, and append it to an array of maps. + // At the end, json.Marshal the whole thing. + result := map[string]interface{}{} + for _, filter := range fs { + if filter.Path == "" { + continue + } + + cursor := result + nodes := strings.Split(filter.Path, ".") + for len(nodes) > 1 { + branch := nodes[0] + if _, ok := cursor[branch]; !ok { + cursor[branch] = map[string]interface{}{} + } + cursor = cursor[branch].(map[string]interface{}) + nodes = nodes[1:len(nodes)] + } + + leaf := nodes[0] + if filter.Val != nil { + operation := filter.Val + if filter.Op != "" { + var format string + switch filter.Val.(type) { + case int: + format = "%d" + default: + format = "%s" + } + operation = filter.Op + " " + fmt.Sprintf(format, filter.Val) + } + + cursor[leaf] = map[string]interface{}{ + "operation": operation, + } + } + + if filter.Opts == nil { + continue + } + + options := []map[string]interface{}{} + for name, value := range filter.Opts { + options = append(options, map[string]interface{}{ + "name": name, + "value": value, + }) + } + + cursor[leaf] = map[string]interface{}{ + "operation": filter.Op, + "options": options, + } + } + + jsonStr, _ := json.Marshal(result) + return string(jsonStr) +} + +// Builds the filter string in JSON format +func (f Filter) Build() string { + return Build(f) +} + +// Add options to the filter. Can be chained for multiple options. +func (f Filter) Opt(name string, value interface{}) Filter { + if f.Opts == nil { + f.Opts = map[string]interface{}{} + } + + f.Opts[name] = value + return f +} + +// Set this filter to test if property is equal to the value +func (f Filter) Eq(val interface{}) Filter { + f.Op = "" + f.Val = val + return f +} + +// Set this filter to test if property is not equal to the value +func (f Filter) NotEq(val interface{}) Filter { + f.Op = "!=" + f.Val = val + return f +} + +// Set this filter to test if property is like the value +func (f Filter) Like(val interface{}) Filter { + f.Op = "~" + f.Val = val + return f +} + +// Set this filter to test if property is unlike value +func (f Filter) NotLike(val interface{}) Filter { + f.Op = "!~" + f.Val = val + return f +} + +// Set this filter to test if property is less than value +func (f Filter) LessThan(val interface{}) Filter { + f.Op = "<" + f.Val = val + return f +} + +// Set this filter to test if property is less than or equal to the value +func (f Filter) LessThanOrEqual(val interface{}) Filter { + f.Op = "<=" + f.Val = val + return f +} + +// Set this filter to test if property is greater than value +func (f Filter) GreaterThan(val interface{}) Filter { + f.Op = ">" + f.Val = val + return f +} + +// Set this filter to test if property is greater than or equal to value +func (f Filter) GreaterThanOrEqual(val interface{}) Filter { + f.Op = ">=" + f.Val = val + return f +} + +// Set this filter to test if property is null +func (f Filter) IsNull() Filter { + f.Op = "" + f.Val = "is null" + return f +} + +// Set this filter to test if property is not null +func (f Filter) NotNull() Filter { + f.Op = "" + f.Val = "not null" + return f +} + +// Set this filter to test if property contains the value +func (f Filter) Contains(val interface{}) Filter { + f.Op = "*=" + f.Val = val + return f +} + +// Set this filter to test if property does not contain the value +func (f Filter) NotContains(val interface{}) Filter { + f.Op = "!*=" + f.Val = val + return f +} + +// Set this filter to test if property starts with the value +func (f Filter) StartsWith(val interface{}) Filter { + f.Op = "^=" + f.Val = val + return f +} + +// Set this filter to test if property does not start with the value +func (f Filter) NotStartsWith(val interface{}) Filter { + f.Op = "!^=" + f.Val = val + return f +} + +// Set this filter to test if property ends with the value +func (f Filter) EndsWith(val interface{}) Filter { + f.Op = "$=" + f.Val = val + return f +} + +// Set this filter to test if property does not end with the value +func (f Filter) NotEndsWith(val interface{}) Filter { + f.Op = "!$=" + f.Val = val + return f +} + +// Set this filter to test if property is one of the values in args. +func (f Filter) In(args ...interface{}) Filter { + f.Op = "in" + values := []interface{}{} + for _, arg := range args { + values = append(values, arg) + } + + return f.Opt("data", values) +} + +// Set this filter to test if property has a date older than the value in days. +func (f Filter) DaysPast(val interface{}) Filter { + f.Op = ">= currentDate -" + f.Val = val + return f +} + +// Set this filter to test if property has the exact date as the value. +func (f Filter) Date(date string) Filter { + f.Op = "isDate" + f.Val = nil + return f.Opt("date", []string{date}) +} + +// Set this filter to test if property has a date before the value. +func (f Filter) DateBefore(date string) Filter { + f.Op = "lessThanDate" + f.Val = nil + return f.Opt("date", []string{date}) +} + +// Set this filter to test if property has a date after the value. +func (f Filter) DateAfter(date string) Filter { + f.Op = "greaterThanDate" + f.Val = nil + return f.Opt("date", []string{date}) +} + +// Set this filter to test if property has a date between the values. +func (f Filter) DateBetween(start string, end string) Filter { + f.Op = "betweenDate" + f.Val = nil + return f.Opt("startDate", []string{start}).Opt("endDate", []string{end}) +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/account.go b/vendor/github.com/softlayer/softlayer-go/services/account.go new file mode 100644 index 000000000000..914275d47d6c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/account.go @@ -0,0 +1,5459 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Account data type contains general information relating to a single SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are assigned to the account only and not to users belonging to the account. The SoftLayer_Account data type contains a number of relational properties that are used by the SoftLayer customer portal to quickly present a variety of account related services to it's users. +// +// SoftLayer customers are unable to change their company account information in the portal or the API. If you need to change this information please open a sales ticket in our customer portal and our account management staff will assist you. +type Account struct { + Session *session.Session + Options sl.Options +} + +// GetAccountService returns an instance of the Account SoftLayer service +func GetAccountService(sess *session.Session) Account { + return Account{Session: sess} +} + +func (r Account) Id(id int) Account { + r.Options.Id = &id + return r +} + +func (r Account) Mask(mask string) Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account) Filter(filter string) Account { + r.Options.Filter = filter + return r +} + +func (r Account) Limit(limit int) Account { + r.Options.Limit = &limit + return r +} + +func (r Account) Offset(offset int) Account { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account) ActivatePartner(accountId *string, hashCode *string) (resp datatypes.Account, err error) { + params := []interface{}{ + accountId, + hashCode, + } + err = r.Session.DoRequest("SoftLayer_Account", "activatePartner", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) AddAchInformation(achInformation *datatypes.Container_Billing_Info_Ach) (resp bool, err error) { + params := []interface{}{ + achInformation, + } + err = r.Session.DoRequest("SoftLayer_Account", "addAchInformation", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) AddReferralPartnerPaymentOption(paymentOption *datatypes.Container_Referral_Partner_Payment_Option) (resp bool, err error) { + params := []interface{}{ + paymentOption, + } + err = r.Session.DoRequest("SoftLayer_Account", "addReferralPartnerPaymentOption", params, &r.Options, &resp) + return +} + +// This method indicates whether or not Bandwidth Pooling updates are blocked for the account so the billing cycle can run. Generally, accounts are restricted from moving servers in or out of Bandwidth Pools from 12:00 CST on the day prior to billing, until the billing batch completes, sometime after midnight the day of actual billing for the account. +func (r Account) AreVdrUpdatesBlockedForBilling() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "areVdrUpdatesBlockedForBilling", nil, &r.Options, &resp) + return +} + +// Cancel the PayPal Payment Request process. During the process of submitting a PayPal payment request, the customer is redirected to PayPal to confirm the request. If the customer elects to cancel the payment from PayPal, they are returned to SoftLayer where the manual payment record is updated to a status of canceled. +func (r Account) CancelPayPalTransaction(token *string, payerId *string) (resp bool, err error) { + params := []interface{}{ + token, + payerId, + } + err = r.Session.DoRequest("SoftLayer_Account", "cancelPayPalTransaction", params, &r.Options, &resp) + return +} + +// Complete the PayPal Payment Request process and receive confirmation message. During the process of submitting a PayPal payment request, the customer is redirected to PayPal to confirm the request. Once confirmed, PayPal returns the customer to SoftLayer where an attempt is made to finalize the transaction. A status message regarding the attempt is returned to the calling function. +func (r Account) CompletePayPalTransaction(token *string, payerId *string) (resp string, err error) { + params := []interface{}{ + token, + payerId, + } + err = r.Session.DoRequest("SoftLayer_Account", "completePayPalTransaction", params, &r.Options, &resp) + return +} + +// Retrieve the number of hourly services on an account that are active, plus any pending orders with hourly services attached. +func (r Account) CountHourlyInstances() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "countHourlyInstances", nil, &r.Options, &resp) + return +} + +// Create a new Customer user record in the SoftLayer customer portal. This is a wrapper around the Customer::createObject call, please see the documentation of that API. This wrapper adds the feature of the "silentlyCreate" option, which bypasses the IBMid invitation email process. False (the default) goes through the IBMid invitation email process, which creates the IBMid/SoftLayer Single-Sign-On (SSO) user link when the invitation is accepted (meaning the email has been received, opened, and the link(s) inside the email have been clicked to complete the process). True will silently (no email) create the IBMid/SoftLayer user SSO link immediately. Either case will use the value in the template object 'email' field to indicate the IBMid to use. This can be the username or, if unique, the email address of an IBMid. In the silent case, the IBMid must already exist. In the non-silent invitation email case, the IBMid can be created during this flow, by specifying an email address to be used to create the IBMid.All the features and restrictions of createObject apply to this API as well. In addition, note that the "silentlyCreate" flag is ONLY valid for IBMid-authenticated accounts. +func (r Account) CreateUser(templateObject *datatypes.User_Customer, password *string, vpnPassword *string, silentlyCreateFlag *bool) (resp datatypes.User_Customer, err error) { + params := []interface{}{ + templateObject, + password, + vpnPassword, + silentlyCreateFlag, + } + err = r.Session.DoRequest("SoftLayer_Account", "createUser", params, &r.Options, &resp) + return +} + +//

    Warning: If you remove the EU Supported account flag, you are removing the restriction that limits Processing activities to EU personnel.

    +func (r Account) DisableEuSupport() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Account", "disableEuSupport", nil, &r.Options, &resp) + return +} + +//

    If you select the EU Supported option, the most common Support issues will be limited to IBM Cloud staff located in the EU. In the event your issue requires non-EU expert assistance, it will be reviewed and approval given prior to any non-EU intervention. Additionally, in order to support and update the services, cross-border Processing of your data may still occur. Please ensure you take the necessary actions to allow this Processing, as detailed in the Cloud Service Terms. A standard Data Processing Addendum is available here.

    +// +//

    Important note (you will only see this once): Orders using the API will proceed without additional notifications. The terms related to selecting products, services, or locations outside the EU apply to API orders. Users you create and API keys you generate will have the ability to order products, services, and locations outside of the EU. It is your responsibility to educate anyone you grant access to your account on the consequences and requirements if they make a selection that is not in the EU Supported option. In order to meet EU Supported requirements, the current PPTP VPN solution will no longer be offered or supported.

    +// +//

    If PPTP has been selected as an option for any users in your account by itself (or in combination with another VPN offering), you will need to disable PPTP before selecting the EU Supported account feature. For more information on VPN changes, click here.

    +func (r Account) EnableEuSupport() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Account", "enableEuSupport", nil, &r.Options, &resp) + return +} + +// Retrieve An email address that is responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to this address. +func (r Account) GetAbuseEmail() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAbuseEmail", nil, &r.Options, &resp) + return +} + +// Retrieve Email addresses that are responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to these addresses. +func (r Account) GetAbuseEmails() (resp []datatypes.Account_AbuseEmail, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAbuseEmails", nil, &r.Options, &resp) + return +} + +// This method returns an array of SoftLayer_Container_Network_Storage_Evault_WebCc_JobDetails objects for the given start and end dates. Start and end dates should be be valid ISO 8601 dates. The backupStatus can be one of null, 'success', 'failed', or 'conflict'. The 'success' backupStatus returns jobs with a status of 'COMPLETED', the 'failed' backupStatus returns jobs with a status of 'FAILED', while the 'conflict' backupStatus will return jobs that are not 'COMPLETED' or 'FAILED'. +func (r Account) GetAccountBackupHistory(startDate *datatypes.Time, endDate *datatypes.Time, backupStatus *string) (resp []datatypes.Container_Network_Storage_Evault_WebCc_JobDetails, err error) { + params := []interface{}{ + startDate, + endDate, + backupStatus, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAccountBackupHistory", params, &r.Options, &resp) + return +} + +// Retrieve The account contacts on an account. +func (r Account) GetAccountContacts() (resp []datatypes.Account_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAccountContacts", nil, &r.Options, &resp) + return +} + +// Retrieve The account software licenses owned by an account +func (r Account) GetAccountLicenses() (resp []datatypes.Software_AccountLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAccountLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetAccountLinks() (resp []datatypes.Account_Link, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAccountLinks", nil, &r.Options, &resp) + return +} + +// Retrieve An account's status presented in a more detailed data type. +func (r Account) GetAccountStatus() (resp datatypes.Account_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAccountStatus", nil, &r.Options, &resp) + return +} + +// This method pulls an account trait by its key. +func (r Account) GetAccountTraitValue(keyName *string) (resp string, err error) { + params := []interface{}{ + keyName, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAccountTraitValue", params, &r.Options, &resp) + return +} + +// Retrieve The billing item associated with an account's monthly discount. +func (r Account) GetActiveAccountDiscountBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveAccountDiscountBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The active account software licenses owned by an account +func (r Account) GetActiveAccountLicenses() (resp []datatypes.Software_AccountLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveAccountLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve The active address(es) that belong to an account. +func (r Account) GetActiveAddresses() (resp []datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveAddresses", nil, &r.Options, &resp) + return +} + +// Return all currently active alarms on this account. Only alarms on hardware and virtual servers accessible to the current user will be returned. +func (r Account) GetActiveAlarms() (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveAlarms", nil, &r.Options, &resp) + return +} + +// Retrieve All billing agreements for an account +func (r Account) GetActiveBillingAgreements() (resp []datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveBillingAgreements", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetActiveCatalystEnrollment() (resp datatypes.Catalyst_Enrollment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveCatalystEnrollment", nil, &r.Options, &resp) + return +} + +// Retrieve The account's active top level colocation containers. +func (r Account) GetActiveColocationContainers() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveColocationContainers", nil, &r.Options, &resp) + return +} + +// Retrieve Account's currently active Flexible Credit enrollment. +func (r Account) GetActiveFlexibleCreditEnrollment() (resp datatypes.FlexibleCredit_Enrollment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveFlexibleCreditEnrollment", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetActiveNotificationSubscribers() (resp []datatypes.Notification_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// This is deprecated and will not return any results. +func (r Account) GetActiveOutletPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveOutletPackages", nil, &r.Options, &resp) + return +} + +// This method will return the [[SoftLayer_Product_Package]] objects from which you can order a bare metal server, virtual server, service (such as CDN or Object Storage) or other software. Once you have the package you want to order from, you may query one of various endpoints from that package to get specific information about its products and pricing. See [[SoftLayer_Product_Package/getCategories|getCategories]] or [[SoftLayer_Product_Package/getItems|getItems]] for more information. +// +// Packages that have been retired will not appear in this result set. +func (r Account) GetActivePackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActivePackages", nil, &r.Options, &resp) + return +} + +// This method is deprecated and should not be used in production code. +// +// This method will return the [[SoftLayer_Product_Package]] objects from which you can order a bare metal server, virtual server, service (such as CDN or Object Storage) or other software filtered by an attribute type associated with the package. Once you have the package you want to order from, you may query one of various endpoints from that package to get specific information about its products and pricing. See [[SoftLayer_Product_Package/getCategories|getCategories]] or [[SoftLayer_Product_Package/getItems|getItems]] for more information. +func (r Account) GetActivePackagesByAttribute(attributeKeyName *string) (resp []datatypes.Product_Package, err error) { + params := []interface{}{ + attributeKeyName, + } + err = r.Session.DoRequest("SoftLayer_Account", "getActivePackagesByAttribute", params, &r.Options, &resp) + return +} + +// This method pulls all the active private hosted cloud packages. This will give you a basic description of the packages that are currently active and from which you can order private hosted cloud configurations. +func (r Account) GetActivePrivateHostedCloudPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActivePrivateHostedCloudPackages", nil, &r.Options, &resp) + return +} + +// Retrieve An account's non-expired quotes. +func (r Account) GetActiveQuotes() (resp []datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveQuotes", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual software licenses controlled by an account +func (r Account) GetActiveVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated load balancers. +func (r Account) GetAdcLoadBalancers() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAdcLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve All the address(es) that belong to an account. +func (r Account) GetAddresses() (resp []datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve An affiliate identifier associated with the customer account. +func (r Account) GetAffiliateId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAffiliateId", nil, &r.Options, &resp) + return +} + +// Returns URL uptime data for your account +func (r Account) GetAggregatedUptimeGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAggregatedUptimeGraph", params, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetAllBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetAllCommissionBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllCommissionBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetAllRecurringTopLevelBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllRecurringTopLevelBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. Does not consider associated items. +func (r Account) GetAllRecurringTopLevelBillingItemsUnfiltered() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllRecurringTopLevelBillingItemsUnfiltered", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetAllSubnetBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllSubnetBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve All billing items of an account. +func (r Account) GetAllTopLevelBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllTopLevelBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. Does not consider associated items. +func (r Account) GetAllTopLevelBillingItemsUnfiltered() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllTopLevelBillingItemsUnfiltered", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether this account is allowed to silently migrate to use IBMid Authentication. +func (r Account) GetAllowIbmIdSilentMigrationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllowIbmIdSilentMigrationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Flag indicating if this account can be linked with Bluemix. +func (r Account) GetAllowsBluemixAccountLinkingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllowsBluemixAccountLinkingFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetAlternateCreditCardData() (resp datatypes.Container_Account_Payment_Method_CreditCard, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAlternateCreditCardData", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated application delivery controller records. +func (r Account) GetApplicationDeliveryControllers() (resp []datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getApplicationDeliveryControllers", nil, &r.Options, &resp) + return +} + +// Retrieve a single [[SoftLayer_Account_Attribute]] record by its [[SoftLayer_Account_Attribute_Type|types's]] key name. +func (r Account) GetAttributeByType(attributeType *string) (resp datatypes.Account_Attribute, err error) { + params := []interface{}{ + attributeType, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAttributeByType", params, &r.Options, &resp) + return +} + +// Retrieve The account attribute values for a SoftLayer customer account. +func (r Account) GetAttributes() (resp []datatypes.Account_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAttributes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetAuxiliaryNotifications() (resp []datatypes.Container_Utility_Message, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAuxiliaryNotifications", nil, &r.Options, &resp) + return +} + +// Retrieve The public network VLANs assigned to an account. +func (r Account) GetAvailablePublicNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAvailablePublicNetworkVlans", nil, &r.Options, &resp) + return +} + +// Returns the average disk space usage for all archive repositories. +func (r Account) GetAverageArchiveUsageMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAverageArchiveUsageMetricDataByDate", params, &r.Options, &resp) + return +} + +// Returns the average disk space usage for all public repositories. +func (r Account) GetAveragePublicUsageMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAveragePublicUsageMetricDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The account balance of a SoftLayer customer account. An account's balance is the amount of money owed to SoftLayer by the account holder, returned as a floating point number with two decimal places, measured in US Dollars ($USD). A negative account balance means the account holder has overpaid and is owed money by SoftLayer. +func (r Account) GetBalance() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBalance", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotments for an account. +func (r Account) GetBandwidthAllotments() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBandwidthAllotments", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotments for an account currently over allocation. +func (r Account) GetBandwidthAllotmentsOverAllocation() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBandwidthAllotmentsOverAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotments for an account projected to go over allocation. +func (r Account) GetBandwidthAllotmentsProjectedOverAllocation() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBandwidthAllotmentsProjectedOverAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated bare metal server objects. +func (r Account) GetBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve All billing agreements for an account +func (r Account) GetBillingAgreements() (resp []datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBillingAgreements", nil, &r.Options, &resp) + return +} + +// Retrieve An account's billing information. +func (r Account) GetBillingInfo() (resp datatypes.Billing_Info, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBillingInfo", nil, &r.Options, &resp) + return +} + +// Retrieve Private template group objects (parent and children) and the shared template group objects (parent only) for an account. +func (r Account) GetBlockDeviceTemplateGroups() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBlockDeviceTemplateGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The Bluemix account link associated with this SoftLayer account, if one exists. +func (r Account) GetBluemixAccountLink() (resp datatypes.Account_Link_Bluemix, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBluemixAccountLink", nil, &r.Options, &resp) + return +} + +// Retrieve Returns true if this account is linked to IBM Bluemix, false if not. +func (r Account) GetBluemixLinkedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBluemixLinkedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetBrand() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBrand", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetBrandAccountFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBrandAccountFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The brand keyName. +func (r Account) GetBrandKeyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBrandKeyName", nil, &r.Options, &resp) + return +} + +// Retrieve The Business Partner details for the account. Country Enterprise Code, Channel, Segment, Reseller Level. +func (r Account) GetBusinessPartner() (resp datatypes.Account_Business_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBusinessPartner", nil, &r.Options, &resp) + return +} + +// Retrieve Indicating whether this account can order additional Vlans. +func (r Account) GetCanOrderAdditionalVlansFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCanOrderAdditionalVlansFlag", nil, &r.Options, &resp) + return +} + +// Retrieve An account's active carts. +func (r Account) GetCarts() (resp []datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCarts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetCatalystEnrollments() (resp []datatypes.Catalyst_Enrollment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCatalystEnrollments", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated CDN accounts. +func (r Account) GetCdnAccounts() (resp []datatypes.Network_ContentDelivery_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCdnAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve All closed tickets associated with an account. +func (r Account) GetClosedTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getClosedTickets", nil, &r.Options, &resp) + return +} + +// This method returns a SoftLayer_Container_Account_Graph_Outputs containing a base64 string PNG image. The optional parameter, detailedGraph, can be passed to get a more detailed graph. +func (r Account) GetCurrentBackupStatisticsGraph(detailedGraph *bool) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + detailedGraph, + } + err = r.Session.DoRequest("SoftLayer_Account", "getCurrentBackupStatisticsGraph", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetCurrentTicketStatisticsGraph(detailedGraph *bool) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + detailedGraph, + } + err = r.Session.DoRequest("SoftLayer_Account", "getCurrentTicketStatisticsGraph", params, &r.Options, &resp) + return +} + +// Retrieve the user record of the user calling the SoftLayer API. +func (r Account) GetCurrentUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCurrentUser", nil, &r.Options, &resp) + return +} + +// Retrieve Datacenters which contain subnets that the account has access to route. +func (r Account) GetDatacentersWithSubnetAllocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDatacentersWithSubnetAllocations", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual dedicated host objects. +func (r Account) GetDedicatedHosts() (resp []datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDedicatedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating whether payments are processed for this account. +func (r Account) GetDisablePaymentProcessingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDisablePaymentProcessingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve disk usage data on a [[SoftLayer_Virtual_Guest|Cloud Computing Instance]] image for the time range you provide from the Metric Tracking Object System and Legacy Data Warehouse. Each data entry objects contain ''dateTime'' and ''counter'' properties. ''dateTime'' property indicates the time that the disk usage data was measured and ''counter'' property holds the disk usage in bytes. +func (r Account) GetDiskUsageMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getDiskUsageMetricDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve disk usage data on a [[SoftLayer_Virtual_Guest|Cloud Computing Instance]] image for the time range you provide from the Legacy Data Warehouse. Each data entry objects contain ''dateTime'' and ''counter'' properties. ''dateTime'' property indicates the time that the disk usage data was measured and ''counter'' property holds the disk usage in bytes. +func (r Account) GetDiskUsageMetricDataFromLegacyByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getDiskUsageMetricDataFromLegacyByDate", params, &r.Options, &resp) + return +} + +// Retrieve disk usage data on a [[SoftLayer_Virtual_Guest|Cloud Computing Instance]] image for the time range you provide from the Metric Tracking Object System. Each data entry object contains ''dateTime'' and ''counter'' properties. ''dateTime'' property indicates the time that the disk usage data was measured and ''counter'' property holds the disk usage in bytes. +func (r Account) GetDiskUsageMetricDataFromMetricTrackingObjectSystemByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getDiskUsageMetricDataFromMetricTrackingObjectSystemByDate", params, &r.Options, &resp) + return +} + +// Returns a disk usage image based on disk usage specified by the input parameters. +func (r Account) GetDiskUsageMetricImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getDiskUsageMetricImageByDate", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer employees that an account is assigned to. +func (r Account) GetDisplaySupportRepresentativeAssignments() (resp []datatypes.Account_Attachment_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDisplaySupportRepresentativeAssignments", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetDomainRegistrations() (resp []datatypes.Dns_Domain_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDomainRegistrations", nil, &r.Options, &resp) + return +} + +// Retrieve The DNS domains associated with an account. +func (r Account) GetDomains() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDomains", nil, &r.Options, &resp) + return +} + +// Retrieve The DNS domains associated with an account that were not created as a result of a secondary DNS zone transfer. +func (r Account) GetDomainsWithoutSecondaryDnsRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDomainsWithoutSecondaryDnsRecords", nil, &r.Options, &resp) + return +} + +// Retrieve Boolean flag dictating whether or not this account has the EU Supported flag. This flag indicates that this account uses IBM Cloud services to process EU citizen's personal data. +func (r Account) GetEuSupportedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getEuSupportedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The total capacity of Legacy EVault Volumes on an account, in GB. +func (r Account) GetEvaultCapacityGB() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getEvaultCapacityGB", nil, &r.Options, &resp) + return +} + +// Retrieve An account's master EVault user. This is only used when an account has EVault service. +func (r Account) GetEvaultMasterUsers() (resp []datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getEvaultMasterUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated EVault storage volumes. +func (r Account) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// This method will return a PDF of the specified report, with the specified period within the start and end dates. The pdfType must be one of 'snapshot', or 'historical'. Possible historicalType parameters are 'monthly', 'yearly', and 'quarterly'. Start and end dates should be in ISO 8601 date format. +func (r Account) GetExecutiveSummaryPdf(pdfType *string, historicalType *string, startDate *string, endDate *string) (resp []byte, err error) { + params := []interface{}{ + pdfType, + historicalType, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getExecutiveSummaryPdf", params, &r.Options, &resp) + return +} + +// Retrieve Stored security certificates that are expired (ie. SSL) +func (r Account) GetExpiredSecurityCertificates() (resp []datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getExpiredSecurityCertificates", nil, &r.Options, &resp) + return +} + +// Retrieve Logs of who entered a colocation area which is assigned to this account, or when a user under this account enters a datacenter. +func (r Account) GetFacilityLogs() (resp []datatypes.User_Access_Facility_Log, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getFacilityLogs", nil, &r.Options, &resp) + return +} + +// Retrieve All of the account's current and former Flexible Credit enrollments. +func (r Account) GetFlexibleCreditEnrollments() (resp []datatypes.FlexibleCredit_Enrollment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getFlexibleCreditEnrollments", nil, &r.Options, &resp) + return +} + +// This method will return a [[SoftLayer_Container_Account_Discount_Program]] object containing the Flexible Credit Program information for this account. To be considered an active participant, the account must have an enrollment record with a monthly credit amount set and the current date must be within the range defined by the enrollment and graduation date. The forNextBillCycle parameter can be set to true to return a SoftLayer_Container_Account_Discount_Program object with information with relation to the next bill cycle. The forNextBillCycle parameter defaults to false. Please note that all discount amount entries are reported as pre-tax amounts and the legacy tax fields in the [[SoftLayer_Container_Account_Discount_Program]] are deprecated. +func (r Account) GetFlexibleCreditProgramInfo(forNextBillCycle *bool) (resp datatypes.Container_Account_Discount_Program, err error) { + params := []interface{}{ + forNextBillCycle, + } + err = r.Session.DoRequest("SoftLayer_Account", "getFlexibleCreditProgramInfo", params, &r.Options, &resp) + return +} + +// Retrieve Timestamp representing the point in time when an account is required to link with PaaS. +func (r Account) GetForcePaasAccountLinkDate() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getForcePaasAccountLinkDate", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetGlobalIpRecords() (resp []datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getGlobalIpRecords", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetGlobalIpv4Records() (resp []datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getGlobalIpv4Records", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetGlobalIpv6Records() (resp []datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getGlobalIpv6Records", nil, &r.Options, &resp) + return +} + +// Retrieve The global load balancer accounts for a softlayer customer account. +func (r Account) GetGlobalLoadBalancerAccounts() (resp []datatypes.Network_LoadBalancer_Global_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getGlobalLoadBalancerAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hardware objects. +func (r Account) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hardware objects currently over bandwidth allocation. +func (r Account) GetHardwareOverBandwidthAllocation() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareOverBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Return a collection of managed hardware pools. +func (r Account) GetHardwarePools() (resp []datatypes.Container_Hardware_Pool_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwarePools", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hardware objects projected to go over bandwidth allocation. +func (r Account) GetHardwareProjectedOverBandwidthAllocation() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareProjectedOverBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the cPanel web hosting control panel installed. +func (r Account) GetHardwareWithCpanel() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithCpanel", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the Helm web hosting control panel installed. +func (r Account) GetHardwareWithHelm() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithHelm", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has McAfee Secure software components. +func (r Account) GetHardwareWithMcafee() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithMcafee", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has McAfee Secure AntiVirus for Redhat software components. +func (r Account) GetHardwareWithMcafeeAntivirusRedhat() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithMcafeeAntivirusRedhat", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has McAfee Secure AntiVirus for Windows software components. +func (r Account) GetHardwareWithMcafeeAntivirusWindows() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithMcafeeAntivirusWindows", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has McAfee Secure Intrusion Detection System software components. +func (r Account) GetHardwareWithMcafeeIntrusionDetectionSystem() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithMcafeeIntrusionDetectionSystem", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the Plesk web hosting control panel installed. +func (r Account) GetHardwareWithPlesk() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithPlesk", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the QuantaStor storage system installed. +func (r Account) GetHardwareWithQuantastor() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithQuantastor", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the Urchin web traffic analytics package installed. +func (r Account) GetHardwareWithUrchin() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithUrchin", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that is running a version of the Microsoft Windows operating system. +func (r Account) GetHardwareWithWindows() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithWindows", nil, &r.Options, &resp) + return +} + +// Retrieve Return 1 if one of the account's hardware has the EVault Bare Metal Server Restore Plugin otherwise 0. +func (r Account) GetHasEvaultBareMetalRestorePluginFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHasEvaultBareMetalRestorePluginFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Return 1 if one of the account's hardware has an installation of Idera Server Backup otherwise 0. +func (r Account) GetHasIderaBareMetalRestorePluginFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHasIderaBareMetalRestorePluginFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The number of orders in a PENDING status for a SoftLayer customer account. +func (r Account) GetHasPendingOrder() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHasPendingOrder", nil, &r.Options, &resp) + return +} + +// Retrieve Return 1 if one of the account's hardware has an installation of R1Soft CDP otherwise 0. +func (r Account) GetHasR1softBareMetalRestorePluginFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHasR1softBareMetalRestorePluginFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetHistoricalBackupGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getHistoricalBackupGraph", params, &r.Options, &resp) + return +} + +// This method will return a SoftLayer_Container_Account_Graph_Outputs object containing a base64 string PNG image of a line graph of bandwidth statistics given the start and end dates. The start and end dates should be valid ISO 8601 date formatted strings. +func (r Account) GetHistoricalBandwidthGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getHistoricalBandwidthGraph", params, &r.Options, &resp) + return +} + +// Given the start and end dates, this method will return a pie chart of ticket statistics in the form of SoftLayer_Container_Account_Graph_Outputs object with a base64 PNG string. If an error occurs the graphError parameter will be populated. Possible errors include: SoftLayer_Exception_Public Thrown if an invalid start or end date is provided. SoftLayer_Exception Thrown if there is an error connecting to HBase. SoftLayer_Exception Thrown if there is no data available for the specified date range. SoftLayer_Exception Thrown if there is an error retrieving data or generating the graph. +func (r Account) GetHistoricalTicketGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getHistoricalTicketGraph", params, &r.Options, &resp) + return +} + +// The graph image is returned as a base64 PNG string. Start and end dates should be formatted using the ISO 8601 date standard. If there is an error retrieving graph data or generating the graph string a graphError attribute will be returned. The graphError attribute may contain any of the following error messages: SoftLayer_Exception_Public Thrown if an invalid start or end date is provided. SoftLayer_Exception Thrown if there is an error connecting to HBase. SoftLayer_Exception Thrown if there is no data available for the specified date range. SoftLayer_Exception Thrown if there is an error retrieving data or generating the graph. +func (r Account) GetHistoricalUptimeGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getHistoricalUptimeGraph", params, &r.Options, &resp) + return +} + +// Retrieve An account's associated hourly bare metal server objects. +func (r Account) GetHourlyBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHourlyBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve Hourly service billing items that will be on an account's next invoice. +func (r Account) GetHourlyServiceBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHourlyServiceBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hourly virtual guest objects. +func (r Account) GetHourlyVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHourlyVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated Virtual Storage volumes. +func (r Account) GetHubNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHubNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve Unique identifier for a customer used throughout IBM. +func (r Account) GetIbmCustomerNumber() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getIbmCustomerNumber", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether this account requires IBMid authentication. +func (r Account) GetIbmIdAuthenticationRequiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getIbmIdAuthenticationRequiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Timestamp representing the point in time when an account is required to use IBMid authentication. +func (r Account) GetIbmIdMigrationExpirationTimestamp() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getIbmIdMigrationExpirationTimestamp", nil, &r.Options, &resp) + return +} + +// Retrieve An in progress request to switch billing systems. +func (r Account) GetInProgressExternalAccountSetup() (resp datatypes.Account_External_Setup, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getInProgressExternalAccountSetup", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetInternalNotes() (resp []datatypes.Account_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getInternalNotes", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated billing invoices. +func (r Account) GetInvoices() (resp []datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getInvoices", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated iSCSI storage volumes. +func (r Account) GetIscsiNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getIscsiNetworkStorage", nil, &r.Options, &resp) + return +} + +// Computes the number of available public secondary IP addresses, aligned to a subnet size. +func (r Account) GetLargestAllowedSubnetCidr(numberOfHosts *int, locationId *int) (resp int, err error) { + params := []interface{}{ + numberOfHosts, + locationId, + } + err = r.Session.DoRequest("SoftLayer_Account", "getLargestAllowedSubnetCidr", params, &r.Options, &resp) + return +} + +// Retrieve The most recently canceled billing item. +func (r Account) GetLastCanceledBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastCanceledBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The most recent cancelled server billing item. +func (r Account) GetLastCancelledServerBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastCancelledServerBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed abuse tickets associated with an account. +func (r Account) GetLastFiveClosedAbuseTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedAbuseTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed accounting tickets associated with an account. +func (r Account) GetLastFiveClosedAccountingTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedAccountingTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. +func (r Account) GetLastFiveClosedOtherTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedOtherTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed sales tickets associated with an account. +func (r Account) GetLastFiveClosedSalesTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedSalesTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed support tickets associated with an account. +func (r Account) GetLastFiveClosedSupportTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedSupportTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed tickets associated with an account. +func (r Account) GetLastFiveClosedTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedTickets", nil, &r.Options, &resp) + return +} + +// Retrieve An account's most recent billing date. +func (r Account) GetLatestBillDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLatestBillDate", nil, &r.Options, &resp) + return +} + +// Retrieve An account's latest recurring invoice. +func (r Account) GetLatestRecurringInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLatestRecurringInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve An account's latest recurring pending invoice. +func (r Account) GetLatestRecurringPendingInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLatestRecurringPendingInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve The legacy bandwidth allotments for an account. +func (r Account) GetLegacyBandwidthAllotments() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLegacyBandwidthAllotments", nil, &r.Options, &resp) + return +} + +// Retrieve The total capacity of Legacy iSCSI Volumes on an account, in GB. +func (r Account) GetLegacyIscsiCapacityGB() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLegacyIscsiCapacityGB", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated load balancers. +func (r Account) GetLoadBalancers() (resp []datatypes.Network_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve The total capacity of Legacy lockbox Volumes on an account, in GB. +func (r Account) GetLockboxCapacityGB() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLockboxCapacityGB", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated Lockbox storage volumes. +func (r Account) GetLockboxNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetManualPaymentsUnderReview() (resp []datatypes.Billing_Payment_Card_ManualPayment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getManualPaymentsUnderReview", nil, &r.Options, &resp) + return +} + +// Retrieve An account's master user. +func (r Account) GetMasterUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getMasterUser", nil, &r.Options, &resp) + return +} + +// Retrieve An account's media transfer service requests. +func (r Account) GetMediaDataTransferRequests() (resp []datatypes.Account_Media_Data_Transfer_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getMediaDataTransferRequests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated monthly bare metal server objects. +func (r Account) GetMonthlyBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getMonthlyBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated monthly virtual guest objects. +func (r Account) GetMonthlyVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getMonthlyVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated NAS storage volumes. +func (r Account) GetNasNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNasNetworkStorage", nil, &r.Options, &resp) + return +} + +// This returns a collection of active NetApp software account license keys. +func (r Account) GetNetAppActiveAccountLicenseKeys() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetAppActiveAccountLicenseKeys", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this account can define their own networks. +func (r Account) GetNetworkCreationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkCreationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve All network gateway devices on this account. +func (r Account) GetNetworkGateways() (resp []datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkGateways", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated network hardware. +func (r Account) GetNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetNetworkMessageDeliveryAccounts() (resp []datatypes.Network_Message_Delivery, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMessageDeliveryAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware which is currently experiencing a service failure. +func (r Account) GetNetworkMonitorDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guest which is currently experiencing a service failure. +func (r Account) GetNetworkMonitorDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware which is currently recovering from a service failure. +func (r Account) GetNetworkMonitorRecoveringHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorRecoveringHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guest which is currently recovering from a service failure. +func (r Account) GetNetworkMonitorRecoveringVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorRecoveringVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware which is currently online. +func (r Account) GetNetworkMonitorUpHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorUpHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guest which is currently online. +func (r Account) GetNetworkMonitorUpVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorUpVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated storage volumes. This includes Lockbox, NAS, EVault, and iSCSI volumes. +func (r Account) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve An account's Network Storage groups. +func (r Account) GetNetworkStorageGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkStorageGroups", nil, &r.Options, &resp) + return +} + +// Retrieve IPSec network tunnels for an account. +func (r Account) GetNetworkTunnelContexts() (resp []datatypes.Network_Tunnel_Module_Context, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkTunnelContexts", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not an account has automatic private VLAN spanning enabled. +func (r Account) GetNetworkVlanSpan() (resp datatypes.Account_Network_Vlan_Span, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkVlanSpan", nil, &r.Options, &resp) + return +} + +// Retrieve All network VLANs assigned to an account. +func (r Account) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers for the next billing cycle. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. +func (r Account) GetNextBillingPublicAllotmentHardwareBandwidthDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextBillingPublicAllotmentHardwareBandwidthDetails", nil, &r.Options, &resp) + return +} + +// Return an account's next invoice in a Microsoft excel format. The "next invoice" is what a customer will be billed on their next invoice, assuming no changes are made. Currently this does not include Bandwidth Pooling charges. +func (r Account) GetNextInvoiceExcel(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceExcel", params, &r.Options, &resp) + return +} + +// Retrieve The pre-tax total amount exempt from incubator credit for the account's next invoice. This field is now deprecated and will soon be removed. Please update all references to instead use nextInvoiceTotalAmount +func (r Account) GetNextInvoiceIncubatorExemptTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceIncubatorExemptTotal", nil, &r.Options, &resp) + return +} + +// Return an account's next invoice in PDF format. The "next invoice" is what a customer will be billed on their next invoice, assuming no changes are made. Currently this does not include Bandwidth Pooling charges. +func (r Account) GetNextInvoicePdf(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoicePdf", params, &r.Options, &resp) + return +} + +// Return an account's next invoice detailed portion in PDF format. The "next invoice" is what a customer will be billed on their next invoice, assuming no changes are made. Currently this does not include Bandwidth Pooling charges. +func (r Account) GetNextInvoicePdfDetailed(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoicePdfDetailed", params, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetNextInvoiceTopLevelBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTopLevelBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The pre-tax total amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total one-time charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total one-time tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalRecurringAmountBeforeAccountDiscount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalRecurringAmountBeforeAccountDiscount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalTaxableRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalTaxableRecurringAmount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetNextInvoiceZeroFeeItemCounts() (resp []datatypes.Container_Product_Item_Category_ZeroFee_Count, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceZeroFeeItemCounts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetNotificationSubscribers() (resp []datatypes.Notification_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Account object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Account service. You can only retrieve the account that your portal user is assigned to. +func (r Account) GetObject() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The open abuse tickets associated with an account. +func (r Account) GetOpenAbuseTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenAbuseTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The open accounting tickets associated with an account. +func (r Account) GetOpenAccountingTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenAccountingTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The open billing tickets associated with an account. +func (r Account) GetOpenBillingTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenBillingTickets", nil, &r.Options, &resp) + return +} + +// Retrieve An open ticket requesting cancellation of this server, if one exists. +func (r Account) GetOpenCancellationRequests() (resp []datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenCancellationRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The open tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. +func (r Account) GetOpenOtherTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenOtherTickets", nil, &r.Options, &resp) + return +} + +// Retrieve An account's recurring invoices. +func (r Account) GetOpenRecurringInvoices() (resp []datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenRecurringInvoices", nil, &r.Options, &resp) + return +} + +// Retrieve The open sales tickets associated with an account. +func (r Account) GetOpenSalesTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenSalesTickets", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetOpenStackAccountLinks() (resp []datatypes.Account_Link, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenStackAccountLinks", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated Openstack related Object Storage accounts. +func (r Account) GetOpenStackObjectStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenStackObjectStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The open support tickets associated with an account. +func (r Account) GetOpenSupportTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenSupportTickets", nil, &r.Options, &resp) + return +} + +// Retrieve All open tickets associated with an account. +func (r Account) GetOpenTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenTickets", nil, &r.Options, &resp) + return +} + +// Retrieve All open tickets associated with an account last edited by an employee. +func (r Account) GetOpenTicketsWaitingOnCustomer() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenTicketsWaitingOnCustomer", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated billing orders excluding upgrades. +func (r Account) GetOrders() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOrders", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that have no parent billing item. These are items that don't necessarily belong to a single server. +func (r Account) GetOrphanBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOrphanBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetOwnedBrands() (resp []datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOwnedBrands", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetOwnedHardwareGenericComponentModels() (resp []datatypes.Hardware_Component_Model_Generic, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOwnedHardwareGenericComponentModels", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPaymentProcessors() (resp []datatypes.Billing_Payment_Processor, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPaymentProcessors", nil, &r.Options, &resp) + return +} + +// Before being approved for general use, a credit card must be approved by a SoftLayer agent. Once a credit card change request has been either approved or denied, the change request will no longer appear in the list of pending change requests. This method will return a list of all pending change requests as well as a portion of the data from the original request. +func (r Account) GetPendingCreditCardChangeRequestData() (resp []datatypes.Container_Account_Payment_Method_CreditCard, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingCreditCardChangeRequestData", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPendingEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingEvents", nil, &r.Options, &resp) + return +} + +// Retrieve An account's latest open (pending) invoice. +func (r Account) GetPendingInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve A list of top-level invoice items that are on an account's currently pending invoice. +func (r Account) GetPendingInvoiceTopLevelItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTopLevelItems", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of an account's pending invoice, if one exists. +func (r Account) GetPendingInvoiceTotalAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total one-time charges for an account's pending invoice, if one exists. In other words, it is the sum of one-time charges, setup fees, and labor fees. It does not include taxes. +func (r Account) GetPendingInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the taxes related to one time charges for an account's pending invoice, if one exists. +func (r Account) GetPendingInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring amount of an account's pending invoice, if one exists. +func (r Account) GetPendingInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of the recurring taxes on an account's pending invoice, if one exists. +func (r Account) GetPendingInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An account's permission groups. +func (r Account) GetPermissionGroups() (resp []datatypes.User_Permission_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPermissionGroups", nil, &r.Options, &resp) + return +} + +// Retrieve An account's user roles. +func (r Account) GetPermissionRoles() (resp []datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPermissionRoles", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPortableStorageVolumes() (resp []datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPortableStorageVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve Customer specified URIs that are downloaded onto a newly provisioned or reloaded server. If the URI is sent over https it will be executed directly on the server. +func (r Account) GetPostProvisioningHooks() (resp []datatypes.Provisioning_Hook, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPostProvisioningHooks", nil, &r.Options, &resp) + return +} + +// Retrieve Boolean flag dictating whether or not this account supports PPTP VPN Access. +func (r Account) GetPptpVpnAllowedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPptpVpnAllowedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated portal users with PPTP VPN access. +func (r Account) GetPptpVpnUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPptpVpnUsers", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring amount for an accounts previous revenue. +func (r Account) GetPreviousRecurringRevenue() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPreviousRecurringRevenue", nil, &r.Options, &resp) + return +} + +// Retrieve The item price that an account is restricted to. +func (r Account) GetPriceRestrictions() (resp []datatypes.Product_Item_Price_Account_Restriction, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPriceRestrictions", nil, &r.Options, &resp) + return +} + +// Retrieve All priority one tickets associated with an account. +func (r Account) GetPriorityOneTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPriorityOneTickets", nil, &r.Options, &resp) + return +} + +// Retrieve DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The private inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. +func (r Account) GetPrivateAllotmentHardwareBandwidthDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateAllotmentHardwareBandwidthDetails", nil, &r.Options, &resp) + return +} + +// Retrieve Private and shared template group objects (parent only) for an account. +func (r Account) GetPrivateBlockDeviceTemplateGroups() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateBlockDeviceTemplateGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPrivateIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The private network VLANs assigned to an account. +func (r Account) GetPrivateNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve All private subnets associated with an account. +func (r Account) GetPrivateSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Boolean flag indicating whether or not this account is a Proof of Concept account. +func (r Account) GetProofOfConceptAccountFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getProofOfConceptAccountFlag", nil, &r.Options, &resp) + return +} + +// Retrieve DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. +func (r Account) GetPublicAllotmentHardwareBandwidthDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPublicAllotmentHardwareBandwidthDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPublicIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPublicIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The public network VLANs assigned to an account. +func (r Account) GetPublicNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPublicNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve All public network subnets associated with an account. +func (r Account) GetPublicSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPublicSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve An account's quotes. +func (r Account) GetQuotes() (resp []datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getQuotes", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The Referral Partner for this account, if any. +func (r Account) GetReferralPartner() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferralPartner", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetReferralPartnerCommissionForecast() (resp []datatypes.Container_Referral_Partner_Commission, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferralPartnerCommissionForecast", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetReferralPartnerCommissionHistory() (resp []datatypes.Container_Referral_Partner_Commission, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferralPartnerCommissionHistory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetReferralPartnerCommissionPending() (resp []datatypes.Container_Referral_Partner_Commission, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferralPartnerCommissionPending", nil, &r.Options, &resp) + return +} + +// Retrieve If this is a account is a referral partner, the accounts this referral partner has referred +func (r Account) GetReferredAccounts() (resp []datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferredAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetRegulatedWorkloads() (resp []datatypes.Legal_RegulatedWorkload, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRegulatedWorkloads", nil, &r.Options, &resp) + return +} + +// Retrieve Remote management command requests for an account +func (r Account) GetRemoteManagementCommandRequests() (resp []datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRemoteManagementCommandRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The Replication events for all Network Storage volumes on an account. +func (r Account) GetReplicationEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReplicationEvents", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether newly created users under this account will be associated with IBMid via an email requiring a response, or not. +func (r Account) GetRequireSilentIBMidUserCreation() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRequireSilentIBMidUserCreation", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated top-level resource groups. +func (r Account) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve All Routers that an accounts VLANs reside on +func (r Account) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve An account's reverse WHOIS data. This data is used when making SWIP requests. +func (r Account) GetRwhoisData() (resp datatypes.Network_Subnet_Rwhois_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRwhoisData", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetSalesforceAccountLink() (resp datatypes.Account_Link, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSalesforceAccountLink", nil, &r.Options, &resp) + return +} + +// Retrieve The SAML configuration for this account. +func (r Account) GetSamlAuthentication() (resp datatypes.Account_Authentication_Saml, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSamlAuthentication", nil, &r.Options, &resp) + return +} + +// Retrieve All scale groups on this account. +func (r Account) GetScaleGroups() (resp []datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getScaleGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The secondary DNS records for a SoftLayer customer account. +func (r Account) GetSecondaryDomains() (resp []datatypes.Dns_Secondary, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSecondaryDomains", nil, &r.Options, &resp) + return +} + +// Retrieve Stored security certificates (ie. SSL) +func (r Account) GetSecurityCertificates() (resp []datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSecurityCertificates", nil, &r.Options, &resp) + return +} + +// Retrieve The security groups belonging to this account. +func (r Account) GetSecurityGroups() (resp []datatypes.Network_SecurityGroup, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSecurityGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetSecurityLevel() (resp datatypes.Security_Level, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSecurityLevel", nil, &r.Options, &resp) + return +} + +// Retrieve An account's vulnerability scan requests. +func (r Account) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The service billing items that will be on an account's next invoice. +func (r Account) GetServiceBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getServiceBillingItems", nil, &r.Options, &resp) + return +} + +// This method returns the [[SoftLayer_Virtual_Guest_Block_Device_Template_Group]] objects that have been shared with this account +func (r Account) GetSharedBlockDeviceTemplateGroups() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSharedBlockDeviceTemplateGroups", nil, &r.Options, &resp) + return +} + +// Retrieve Shipments that belong to the customer's account. +func (r Account) GetShipments() (resp []datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getShipments", nil, &r.Options, &resp) + return +} + +// Retrieve Customer specified SSH keys that can be implemented onto a newly provisioned or reloaded server. +func (r Account) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated portal users with SSL VPN access. +func (r Account) GetSslVpnUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSslVpnUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An account's virtual guest objects that are hosted on a user provisioned hypervisor. +func (r Account) GetStandardPoolVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getStandardPoolVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetSubnetRegistrationDetails() (resp []datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSubnetRegistrationDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetSubnetRegistrations() (resp []datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSubnetRegistrations", nil, &r.Options, &resp) + return +} + +// Retrieve All network subnets associated with an account. +func (r Account) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer employees that an account is assigned to. +func (r Account) GetSupportRepresentatives() (resp []datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSupportRepresentatives", nil, &r.Options, &resp) + return +} + +// Retrieve The active support subscriptions for this account. +func (r Account) GetSupportSubscriptions() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSupportSubscriptions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetSupportTier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSupportTier", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating to suppress invoices. +func (r Account) GetSuppressInvoicesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSuppressInvoicesFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetTags() (resp []datatypes.Tag, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTags", nil, &r.Options, &resp) + return +} + +// This method will return a SoftLayer_Container_Account_Discount_Program object containing the Technology Incubator Program information for this account. To be considered an active participant, the account must have an enrollment record with a monthly credit amount set and the current date must be within the range defined by the enrollment and graduation date. The forNextBillCycle parameter can be set to true to return a SoftLayer_Container_Account_Discount_Program object with information with relation to the next bill cycle. The forNextBillCycle parameter defaults to false. +func (r Account) GetTechIncubatorProgramInfo(forNextBillCycle *bool) (resp datatypes.Container_Account_Discount_Program, err error) { + params := []interface{}{ + forNextBillCycle, + } + err = r.Session.DoRequest("SoftLayer_Account", "getTechIncubatorProgramInfo", params, &r.Options, &resp) + return +} + +// Returns multiple [[SoftLayer_Container_Policy_Acceptance]] that represent the acceptance status of the applicable third-party policies for this account. +func (r Account) GetThirdPartyPoliciesAcceptanceStatus() (resp []datatypes.Container_Policy_Acceptance, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getThirdPartyPoliciesAcceptanceStatus", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated tickets. +func (r Account) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTickets", nil, &r.Options, &resp) + return +} + +// Retrieve Tickets closed within the last 72 hours or last 10 tickets, whichever is less, associated with an account. +func (r Account) GetTicketsClosedInTheLastThreeDays() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTicketsClosedInTheLastThreeDays", nil, &r.Options, &resp) + return +} + +// Retrieve Tickets closed today associated with an account. +func (r Account) GetTicketsClosedToday() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTicketsClosedToday", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated Transcode account. +func (r Account) GetTranscodeAccounts() (resp []datatypes.Network_Media_Transcode_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTranscodeAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade requests. +func (r Account) GetUpgradeRequests() (resp []datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getUpgradeRequests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's portal users. +func (r Account) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getUsers", nil, &r.Options, &resp) + return +} + +// Retrieve a list of valid (non-expired) security certificates without the sensitive certificate information. This allows non-privileged users to view and select security certificates when configuring associated services. +func (r Account) GetValidSecurityCertificateEntries() (resp []datatypes.Security_Certificate_Entry, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getValidSecurityCertificateEntries", nil, &r.Options, &resp) + return +} + +// Retrieve Stored security certificates that are not expired (ie. SSL) +func (r Account) GetValidSecurityCertificates() (resp []datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getValidSecurityCertificates", nil, &r.Options, &resp) + return +} + +// Retrieve Return 0 if vpn updates are currently in progress on this account otherwise 1. +func (r Account) GetVdrUpdatesInProgressFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVdrUpdatesInProgressFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth pooling for this account. +func (r Account) GetVirtualDedicatedRacks() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualDedicatedRacks", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual server virtual disk images. +func (r Account) GetVirtualDiskImages() (resp []datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualDiskImages", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual guest objects. +func (r Account) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual guest objects currently over bandwidth allocation. +func (r Account) GetVirtualGuestsOverBandwidthAllocation() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsOverBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual guest objects currently over bandwidth allocation. +func (r Account) GetVirtualGuestsProjectedOverBandwidthAllocation() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsProjectedOverBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has the cPanel web hosting control panel installed. +func (r Account) GetVirtualGuestsWithCpanel() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithCpanel", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that have McAfee Secure software components. +func (r Account) GetVirtualGuestsWithMcafee() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithMcafee", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that have McAfee Secure AntiVirus for Redhat software components. +func (r Account) GetVirtualGuestsWithMcafeeAntivirusRedhat() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithMcafeeAntivirusRedhat", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has McAfee Secure AntiVirus for Windows software components. +func (r Account) GetVirtualGuestsWithMcafeeAntivirusWindows() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithMcafeeAntivirusWindows", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has McAfee Secure Intrusion Detection System software components. +func (r Account) GetVirtualGuestsWithMcafeeIntrusionDetectionSystem() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithMcafeeIntrusionDetectionSystem", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has the Plesk web hosting control panel installed. +func (r Account) GetVirtualGuestsWithPlesk() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithPlesk", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that have the QuantaStor storage system installed. +func (r Account) GetVirtualGuestsWithQuantastor() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithQuantastor", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has the Urchin web traffic analytics package installed. +func (r Account) GetVirtualGuestsWithUrchin() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithUrchin", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth pooling for this account. +func (r Account) GetVirtualPrivateRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualPrivateRack", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual server archived storage repositories. +func (r Account) GetVirtualStorageArchiveRepositories() (resp []datatypes.Virtual_Storage_Repository, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualStorageArchiveRepositories", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual server public storage repositories. +func (r Account) GetVirtualStoragePublicRepositories() (resp []datatypes.Virtual_Storage_Repository, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualStoragePublicRepositories", nil, &r.Options, &resp) + return +} + +// This returns a collection of active VMware software account license keys. +func (r Account) GetVmWareActiveAccountLicenseKeys() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVmWareActiveAccountLicenseKeys", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated VPC configured virtual guest objects. +func (r Account) GetVpcVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVpcVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve a list of an account's hardware's Windows Update status. This list includes which servers have available updates, which servers require rebooting due to updates, which servers have failed retrieving updates, and which servers have failed to communicate with the SoftLayer private Windows Software Update Services server. +func (r Account) GetWindowsUpdateStatus() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getWindowsUpdateStatus", nil, &r.Options, &resp) + return +} + +// Determine if an account has an [[SoftLayer_Account_Attribute|attribute]] associated with it. hasAttribute() returns false if the attribute does not exist or if it does not have a value. +func (r Account) HasAttribute(attributeType *string) (resp bool, err error) { + params := []interface{}{ + attributeType, + } + err = r.Session.DoRequest("SoftLayer_Account", "hasAttribute", params, &r.Options, &resp) + return +} + +// This method will return the limit (number) of hourly services the account is allowed to have. +func (r Account) HourlyInstanceLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "hourlyInstanceLimit", nil, &r.Options, &resp) + return +} + +// This method will return the limit (number) of hourly bare metal servers the account is allowed to have. +func (r Account) HourlyServerLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "hourlyServerLimit", nil, &r.Options, &resp) + return +} + +// Returns true if this account is eligible for the local currency program, false otherwise. +func (r Account) IsEligibleForLocalCurrencyProgram() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "isEligibleForLocalCurrencyProgram", nil, &r.Options, &resp) + return +} + +// Returns true if this account is eligible to link with PaaS. False otherwise. +func (r Account) IsEligibleToLinkWithPaas() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "isEligibleToLinkWithPaas", nil, &r.Options, &resp) + return +} + +// This method will link this SoftLayer account with the provided external account. +func (r Account) LinkExternalAccount(externalAccountId *string, authorizationToken *string, externalServiceProviderKey *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + externalAccountId, + authorizationToken, + externalServiceProviderKey, + } + err = r.Session.DoRequest("SoftLayer_Account", "linkExternalAccount", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) RemoveAlternateCreditCard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "removeAlternateCreditCard", nil, &r.Options, &resp) + return +} + +// Retrieve the record data associated with the submission of a Credit Card Change Request. Softlayer customers are permitted to request a change in Credit Card information. Part of the process calls for an attempt by SoftLayer to submit at $1.00 charge to the financial institution backing the credit card as a means of verifying that the information provided in the change request is valid. The data associated with this change request returned to the calling function. +// +// If the onlyChangeNicknameFlag parameter is set to true, the nickname of the credit card will be changed immediately without requiring approval by an agent. To change the nickname of the active payment method, pass the empty string for paymentRoleName. To change the nickname for the alternate credit card, pass ALTERNATE_CREDIT_CARD as the paymentRoleName. vatId must be set, but the value will not be used and the empty string is acceptable. +func (r Account) RequestCreditCardChange(request *datatypes.Billing_Payment_Card_ChangeRequest, vatId *string, paymentRoleName *string, onlyChangeNicknameFlag *bool) (resp datatypes.Billing_Payment_Card_ChangeRequest, err error) { + params := []interface{}{ + request, + vatId, + paymentRoleName, + onlyChangeNicknameFlag, + } + err = r.Session.DoRequest("SoftLayer_Account", "requestCreditCardChange", params, &r.Options, &resp) + return +} + +// Retrieve the record data associated with the submission of a Manual Payment Request. Softlayer customers are permitted to request a manual one-time payment at a minimum amount of $2.00. Customers may submit a Credit Card Payment (Mastercard, Visa, American Express) or a PayPal payment. For Credit Card Payments, SoftLayer engages the credit card financial institution to submit the payment request. The financial institution's response and other data associated with the transaction are returned to the calling function. In the case of PayPal Payments, SoftLayer engages the PayPal system to initiate the PayPal payment sequence. The applicable data generated during the request is returned to the calling function. +func (r Account) RequestManualPayment(request *datatypes.Billing_Payment_Card_ManualPayment) (resp datatypes.Billing_Payment_Card_ManualPayment, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account", "requestManualPayment", params, &r.Options, &resp) + return +} + +// Retrieve the record data associated with the submission of a Manual Payment Request for a manual payment using a credit card which is on file and does not require an approval process. Softlayer customers are permitted to request a manual one-time payment at a minimum amount of $2.00. Customers may use an existing Credit Card on file (Mastercard, Visa, American Express). SoftLayer engages the credit card financial institution to submit the payment request. The financial institution's response and other data associated with the transaction are returned to the calling function. The applicable data generated during the request is returned to the calling function. +func (r Account) RequestManualPaymentUsingCreditCardOnFile(amount *string, payWithAlternateCardFlag *bool, note *string) (resp datatypes.Billing_Payment_Card_ManualPayment, err error) { + params := []interface{}{ + amount, + payWithAlternateCardFlag, + note, + } + err = r.Session.DoRequest("SoftLayer_Account", "requestManualPaymentUsingCreditCardOnFile", params, &r.Options, &resp) + return +} + +// Set this account's abuse emails. Takes an array of email addresses as strings. +func (r Account) SetAbuseEmails(emails []string) (resp bool, err error) { + params := []interface{}{ + emails, + } + err = r.Session.DoRequest("SoftLayer_Account", "setAbuseEmails", params, &r.Options, &resp) + return +} + +// Set the total number of servers that are to be maintained in the given pool. When a server is ordered a new server will be put in the pool to replace the server that was removed to fill an order to maintain the desired pool availability quantity. +func (r Account) SetManagedPoolQuantity(poolKeyName *string, backendRouter *string, quantity *int) (resp []byte, err error) { + params := []interface{}{ + poolKeyName, + backendRouter, + quantity, + } + err = r.Session.DoRequest("SoftLayer_Account", "setManagedPoolQuantity", params, &r.Options, &resp) + return +} + +// Set the flag that enables or disables automatic private network VLAN spanning for a SoftLayer customer account. Enabling VLAN spanning allows an account's servers to talk on the same broadcast domain even if they reside within different private vlans. +func (r Account) SetVlanSpan(enabled *bool) (resp bool, err error) { + params := []interface{}{ + enabled, + } + err = r.Session.DoRequest("SoftLayer_Account", "setVlanSpan", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) SwapCreditCards() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "swapCreditCards", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) SyncCurrentUserPopulationWithPaas() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Account", "syncCurrentUserPopulationWithPaas", nil, &r.Options, &resp) + return +} + +// Some larger SoftLayer customer accounts may have servers and virtual servers on more subnets than SoftLayer's private network VPN devices can assign routes for. In those cases routes for individual servers and virtual servers may be assigned individually to an account's servers via this method. +// +// Always call this method to enable changes when manually configuring VPN subnet access. +func (r Account) UpdateVpnUsersForResource(objectId *int, objectType *string) (resp bool, err error) { + params := []interface{}{ + objectId, + objectType, + } + err = r.Session.DoRequest("SoftLayer_Account", "updateVpnUsersForResource", params, &r.Options, &resp) + return +} + +// This method will validate the following account fields. Included are the allowed characters for each field.
    Email Address*: letters, numbers, space, period, dash, parenthesis, exclamation point, at sign, ampersand, colon, comma, underscore, apostrophe, octothorpe.

    Company Name*: alphabet, numbers, space, period, dash, octothorpe, forward slash, backward slash, comma, colon, at sign, ampersand, underscore, apostrophe, parenthesis, exclamation point. (Note: may not contain an email address)
    First Name*: alphabet, space, period, dash, comma, apostrophe.
    Last Name*: alphabet, space, period, dash, comma, apostrophe.
    Address 1*: alphabet, numbers, space, period, dash, octothorpe, forward slash, backward slash, comma, colon, at sign, ampersand, underscore, apostrophe.
    Address 2: alphabet, numbers, space, period, dash, octothorpe, forward slash, backward slash, comma, colon, at sign, ampersand, underscore, apostrophe.
    City*: alphabet, space, period, dash, apostrophe.
    State*: Required if country is US or Canada. Must be valid Alpha-2 ISO 3166-1 state code for that country.
    Postal Code*: alphabet, numbers, dash, space.
    Country*: alphabet, numbers. Must be valid Alpha-2 ISO 3166-1 country code.
    Office Phone*: alphabet, numbers, space, period, dash, parenthesis, plus sign.
    Alternate Phone: alphabet, numbers, space, period, dash, parenthesis, plus sign.
    Fax Phone: alphabet, numbers, space, period, dash, parenthesis, plus sign.
    +// * denotes a required field. +func (r Account) Validate(account *datatypes.Account) (resp []string, err error) { + params := []interface{}{ + account, + } + err = r.Session.DoRequest("SoftLayer_Account", "validate", params, &r.Options, &resp) + return +} + +// This method checks global and account specific requirements and returns true if the dollar amount entered is acceptable for this account and false otherwise. Please note the dollar amount is in USD. +func (r Account) ValidateManualPaymentAmount(amount *string) (resp bool, err error) { + params := []interface{}{ + amount, + } + err = r.Session.DoRequest("SoftLayer_Account", "validateManualPaymentAmount", params, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Address data type contains information on an address associated with a SoftLayer account. +type Account_Address struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAddressService returns an instance of the Account_Address SoftLayer service +func GetAccountAddressService(sess *session.Session) Account_Address { + return Account_Address{Session: sess} +} + +func (r Account_Address) Id(id int) Account_Address { + r.Options.Id = &id + return r +} + +func (r Account_Address) Mask(mask string) Account_Address { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Address) Filter(filter string) Account_Address { + r.Options.Filter = filter + return r +} + +func (r Account_Address) Limit(limit int) Account_Address { + r.Options.Limit = &limit + return r +} + +func (r Account_Address) Offset(offset int) Account_Address { + r.Options.Offset = &offset + return r +} + +// Create a new address record. The ''typeId'', ''accountId'', ''description'', ''address1'', ''city'', ''state'', ''country'', and ''postalCode'' properties in the templateObject parameter are required properties and may not be null or empty. Users will be restricted to creating addresses for their account. +func (r Account_Address) CreateObject(templateObject *datatypes.Account_Address) (resp datatypes.Account_Address, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Address", "createObject", params, &r.Options, &resp) + return +} + +// Edit the properties of an address record by passing in a modified instance of a SoftLayer_Account_Address object. Users will be restricted to modifying addresses for their account. +func (r Account_Address) EditObject(templateObject *datatypes.Account_Address) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Address", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account to which this address belongs. +func (r Account_Address) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve a list of SoftLayer datacenter addresses. +func (r Account_Address) GetAllDataCenters() (resp []datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getAllDataCenters", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created this address. +func (r Account_Address) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The location of this address. +func (r Account_Address) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified this address. +func (r Account_Address) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified this address. +func (r Account_Address) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getModifyUser", nil, &r.Options, &resp) + return +} + +// Retrieve a list of SoftLayer datacenter addresses. +func (r Account_Address) GetNetworkAddress(name *string) (resp []datatypes.Account_Address, err error) { + params := []interface{}{ + name, + } + err = r.Session.DoRequest("SoftLayer_Account_Address", "getNetworkAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Address) GetObject() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An account address' type. +func (r Account_Address) GetType() (resp datatypes.Account_Address_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Address_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAddressTypeService returns an instance of the Account_Address_Type SoftLayer service +func GetAccountAddressTypeService(sess *session.Session) Account_Address_Type { + return Account_Address_Type{Session: sess} +} + +func (r Account_Address_Type) Id(id int) Account_Address_Type { + r.Options.Id = &id + return r +} + +func (r Account_Address_Type) Mask(mask string) Account_Address_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Address_Type) Filter(filter string) Account_Address_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Address_Type) Limit(limit int) Account_Address_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Address_Type) Offset(offset int) Account_Address_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Address_Type) GetObject() (resp datatypes.Account_Address_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address_Type", "getObject", nil, &r.Options, &resp) + return +} + +// This service allows for a unique identifier to be associated to an existing customer account. +type Account_Affiliation struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAffiliationService returns an instance of the Account_Affiliation SoftLayer service +func GetAccountAffiliationService(sess *session.Session) Account_Affiliation { + return Account_Affiliation{Session: sess} +} + +func (r Account_Affiliation) Id(id int) Account_Affiliation { + r.Options.Id = &id + return r +} + +func (r Account_Affiliation) Mask(mask string) Account_Affiliation { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Affiliation) Filter(filter string) Account_Affiliation { + r.Options.Filter = filter + return r +} + +func (r Account_Affiliation) Limit(limit int) Account_Affiliation { + r.Options.Limit = &limit + return r +} + +func (r Account_Affiliation) Offset(offset int) Account_Affiliation { + r.Options.Offset = &offset + return r +} + +// Create a new affiliation to associate with an existing account. +func (r Account_Affiliation) CreateObject(templateObject *datatypes.Account_Affiliation) (resp datatypes.Account_Affiliation, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "createObject", params, &r.Options, &resp) + return +} + +// deleteObject permanently removes an account affiliation +func (r Account_Affiliation) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit an affiliation that is associated to an existing account. +func (r Account_Affiliation) EditObject(templateObject *datatypes.Account_Affiliation) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account that an affiliation belongs to. +func (r Account_Affiliation) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "getAccount", nil, &r.Options, &resp) + return +} + +// Get account affiliation information associated with affiliate id. +func (r Account_Affiliation) GetAccountAffiliationsByAffiliateId(affiliateId *string) (resp []datatypes.Account_Affiliation, err error) { + params := []interface{}{ + affiliateId, + } + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "getAccountAffiliationsByAffiliateId", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Affiliation) GetObject() (resp datatypes.Account_Affiliation, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Agreement struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAgreementService returns an instance of the Account_Agreement SoftLayer service +func GetAccountAgreementService(sess *session.Session) Account_Agreement { + return Account_Agreement{Session: sess} +} + +func (r Account_Agreement) Id(id int) Account_Agreement { + r.Options.Id = &id + return r +} + +func (r Account_Agreement) Mask(mask string) Account_Agreement { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Agreement) Filter(filter string) Account_Agreement { + r.Options.Filter = filter + return r +} + +func (r Account_Agreement) Limit(limit int) Account_Agreement { + r.Options.Limit = &limit + return r +} + +func (r Account_Agreement) Offset(offset int) Account_Agreement { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Account_Agreement) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The type of agreement. +func (r Account_Agreement) GetAgreementType() (resp datatypes.Account_Agreement_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getAgreementType", nil, &r.Options, &resp) + return +} + +// Retrieve The files attached to an agreement. +func (r Account_Agreement) GetAttachedBillingAgreementFiles() (resp []datatypes.Account_MasterServiceAgreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getAttachedBillingAgreementFiles", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items associated with an agreement. +func (r Account_Agreement) GetBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getBillingItems", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Agreement) GetObject() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The status of the agreement. +func (r Account_Agreement) GetStatus() (resp datatypes.Account_Agreement_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The top level billing item associated with an agreement. +func (r Account_Agreement) GetTopLevelBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getTopLevelBillingItems", nil, &r.Options, &resp) + return +} + +// Account authentication has many different settings that can be set. This class allows the customer or employee to set these settigns. +type Account_Authentication_Attribute struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAuthenticationAttributeService returns an instance of the Account_Authentication_Attribute SoftLayer service +func GetAccountAuthenticationAttributeService(sess *session.Session) Account_Authentication_Attribute { + return Account_Authentication_Attribute{Session: sess} +} + +func (r Account_Authentication_Attribute) Id(id int) Account_Authentication_Attribute { + r.Options.Id = &id + return r +} + +func (r Account_Authentication_Attribute) Mask(mask string) Account_Authentication_Attribute { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Authentication_Attribute) Filter(filter string) Account_Authentication_Attribute { + r.Options.Filter = filter + return r +} + +func (r Account_Authentication_Attribute) Limit(limit int) Account_Authentication_Attribute { + r.Options.Limit = &limit + return r +} + +func (r Account_Authentication_Attribute) Offset(offset int) Account_Authentication_Attribute { + r.Options.Offset = &offset + return r +} + +// Retrieve The SoftLayer customer account. +func (r Account_Authentication_Attribute) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer account authentication that has an attribute. +func (r Account_Authentication_Attribute) GetAuthenticationRecord() (resp datatypes.Account_Authentication_Saml, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute", "getAuthenticationRecord", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Authentication_Attribute) GetObject() (resp datatypes.Account_Authentication_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of attribute assigned to a SoftLayer account authentication. +func (r Account_Authentication_Attribute) GetType() (resp datatypes.Account_Authentication_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute", "getType", nil, &r.Options, &resp) + return +} + +// SoftLayer_Account_Authentication_Attribute_Type models the type of attribute that can be assigned to a SoftLayer customer account authentication. +type Account_Authentication_Attribute_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAuthenticationAttributeTypeService returns an instance of the Account_Authentication_Attribute_Type SoftLayer service +func GetAccountAuthenticationAttributeTypeService(sess *session.Session) Account_Authentication_Attribute_Type { + return Account_Authentication_Attribute_Type{Session: sess} +} + +func (r Account_Authentication_Attribute_Type) Id(id int) Account_Authentication_Attribute_Type { + r.Options.Id = &id + return r +} + +func (r Account_Authentication_Attribute_Type) Mask(mask string) Account_Authentication_Attribute_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Authentication_Attribute_Type) Filter(filter string) Account_Authentication_Attribute_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Authentication_Attribute_Type) Limit(limit int) Account_Authentication_Attribute_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Authentication_Attribute_Type) Offset(offset int) Account_Authentication_Attribute_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Authentication_Attribute_Type) GetAllObjects() (resp []datatypes.Account_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Authentication_Attribute_Type) GetObject() (resp datatypes.Account_Authentication_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Authentication_Saml struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAuthenticationSamlService returns an instance of the Account_Authentication_Saml SoftLayer service +func GetAccountAuthenticationSamlService(sess *session.Session) Account_Authentication_Saml { + return Account_Authentication_Saml{Session: sess} +} + +func (r Account_Authentication_Saml) Id(id int) Account_Authentication_Saml { + r.Options.Id = &id + return r +} + +func (r Account_Authentication_Saml) Mask(mask string) Account_Authentication_Saml { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Authentication_Saml) Filter(filter string) Account_Authentication_Saml { + r.Options.Filter = filter + return r +} + +func (r Account_Authentication_Saml) Limit(limit int) Account_Authentication_Saml { + r.Options.Limit = &limit + return r +} + +func (r Account_Authentication_Saml) Offset(offset int) Account_Authentication_Saml { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Authentication_Saml) CreateObject(templateObject *datatypes.Account_Authentication_Saml) (resp datatypes.Account_Authentication_Saml, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Authentication_Saml) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit the object by passing in a modified instance of the object +func (r Account_Authentication_Saml) EditObject(templateObject *datatypes.Account_Authentication_Saml) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with this saml configuration. +func (r Account_Authentication_Saml) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The saml attribute values for a SoftLayer customer account. +func (r Account_Authentication_Saml) GetAttributes() (resp []datatypes.Account_Authentication_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "getAttributes", nil, &r.Options, &resp) + return +} + +// This method will return the service provider metadata in XML format. +func (r Account_Authentication_Saml) GetMetadata() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "getMetadata", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Authentication_Saml) GetObject() (resp datatypes.Account_Authentication_Saml, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "getObject", nil, &r.Options, &resp) + return +} + +// Contains business partner details associated with an account. Country Enterprise Identifier (CEID), Channel ID, Segment ID and Reseller Level. +type Account_Business_Partner struct { + Session *session.Session + Options sl.Options +} + +// GetAccountBusinessPartnerService returns an instance of the Account_Business_Partner SoftLayer service +func GetAccountBusinessPartnerService(sess *session.Session) Account_Business_Partner { + return Account_Business_Partner{Session: sess} +} + +func (r Account_Business_Partner) Id(id int) Account_Business_Partner { + r.Options.Id = &id + return r +} + +func (r Account_Business_Partner) Mask(mask string) Account_Business_Partner { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Business_Partner) Filter(filter string) Account_Business_Partner { + r.Options.Filter = filter + return r +} + +func (r Account_Business_Partner) Limit(limit int) Account_Business_Partner { + r.Options.Limit = &limit + return r +} + +func (r Account_Business_Partner) Offset(offset int) Account_Business_Partner { + r.Options.Offset = &offset + return r +} + +// Retrieve Account associated with the business partner data +func (r Account_Business_Partner) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Business_Partner", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Channel indicator used to categorize business partner revenue. +func (r Account_Business_Partner) GetChannel() (resp datatypes.Business_Partner_Channel, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Business_Partner", "getChannel", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Business_Partner) GetObject() (resp datatypes.Account_Business_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Business_Partner", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Segment indicator used to categorize business partner revenue. +func (r Account_Business_Partner) GetSegment() (resp datatypes.Business_Partner_Segment, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Business_Partner", "getSegment", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Contact struct { + Session *session.Session + Options sl.Options +} + +// GetAccountContactService returns an instance of the Account_Contact SoftLayer service +func GetAccountContactService(sess *session.Session) Account_Contact { + return Account_Contact{Session: sess} +} + +func (r Account_Contact) Id(id int) Account_Contact { + r.Options.Id = &id + return r +} + +func (r Account_Contact) Mask(mask string) Account_Contact { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Contact) Filter(filter string) Account_Contact { + r.Options.Filter = filter + return r +} + +func (r Account_Contact) Limit(limit int) Account_Contact { + r.Options.Limit = &limit + return r +} + +func (r Account_Contact) Offset(offset int) Account_Contact { + r.Options.Offset = &offset + return r +} + +// This method creates an account contact. The accountId is fixed, other properties can be set during creation. The typeId indicates the SoftLayer_Account_Contact_Type for the contact. This method returns the SoftLayer_Account_Contact object that is created. +func (r Account_Contact) CreateObject(templateObject *datatypes.Account_Contact) (resp datatypes.Account_Contact, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Contact", "createObject", params, &r.Options, &resp) + return +} + +// deleteObject permanently removes an account contact +func (r Account_Contact) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method allows you to modify an account contact. Only master users are permitted to modify an account contact. +func (r Account_Contact) EditObject(templateObject *datatypes.Account_Contact) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Contact", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Contact) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "getAccount", nil, &r.Options, &resp) + return +} + +// This method will return an array of SoftLayer_Account_Contact_Type objects which can be used when creating or editing an account contact. +func (r Account_Contact) GetAllContactTypes() (resp []datatypes.Account_Contact_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "getAllContactTypes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Contact) GetObject() (resp datatypes.Account_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Contact) GetType() (resp datatypes.Account_Contact_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_External_Setup struct { + Session *session.Session + Options sl.Options +} + +// GetAccountExternalSetupService returns an instance of the Account_External_Setup SoftLayer service +func GetAccountExternalSetupService(sess *session.Session) Account_External_Setup { + return Account_External_Setup{Session: sess} +} + +func (r Account_External_Setup) Id(id int) Account_External_Setup { + r.Options.Id = &id + return r +} + +func (r Account_External_Setup) Mask(mask string) Account_External_Setup { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_External_Setup) Filter(filter string) Account_External_Setup { + r.Options.Filter = filter + return r +} + +func (r Account_External_Setup) Limit(limit int) Account_External_Setup { + r.Options.Limit = &limit + return r +} + +func (r Account_External_Setup) Offset(offset int) Account_External_Setup { + r.Options.Offset = &offset + return r +} + +// Calling this method signals that the account with the provided account id is ready to be billed by the external billing system. +func (r Account_External_Setup) FinalizeExternalBillingForAccount(accountId *int) (resp datatypes.Container_Account_External_Setup_ProvisioningHoldLifted, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_Account_External_Setup", "finalizeExternalBillingForAccount", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_External_Setup) GetObject() (resp datatypes.Account_External_Setup, err error) { + err = r.Session.DoRequest("SoftLayer_Account_External_Setup", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The transaction information related to verifying the customer credit card. +func (r Account_External_Setup) GetVerifyCardTransaction() (resp datatypes.Billing_Payment_Card_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Account_External_Setup", "getVerifyCardTransaction", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Historical_Report struct { + Session *session.Session + Options sl.Options +} + +// GetAccountHistoricalReportService returns an instance of the Account_Historical_Report SoftLayer service +func GetAccountHistoricalReportService(sess *session.Session) Account_Historical_Report { + return Account_Historical_Report{Session: sess} +} + +func (r Account_Historical_Report) Id(id int) Account_Historical_Report { + r.Options.Id = &id + return r +} + +func (r Account_Historical_Report) Mask(mask string) Account_Historical_Report { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Historical_Report) Filter(filter string) Account_Historical_Report { + r.Options.Filter = filter + return r +} + +func (r Account_Historical_Report) Limit(limit int) Account_Historical_Report { + r.Options.Limit = &limit + return r +} + +func (r Account_Historical_Report) Offset(offset int) Account_Historical_Report { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Historical_Report) GetAccountHostUptimeGraphData(startDate *string, endDate *string) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getAccountHostUptimeGraphData", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetAccountHostUptimeSummary(startDateTime *string, endDateTime *string) (resp datatypes.Container_Account_Historical_Summary, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getAccountHostUptimeSummary", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetAccountUrlUptimeGraphData(startDate *string, endDate *string) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getAccountUrlUptimeGraphData", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetAccountUrlUptimeSummary(startDateTime *string, endDateTime *string) (resp datatypes.Container_Account_Historical_Summary, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getAccountUrlUptimeSummary", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetHostUptimeDetail(configurationValueId *int, startDateTime *string, endDateTime *string) (resp datatypes.Container_Account_Historical_Summary_Detail, err error) { + params := []interface{}{ + configurationValueId, + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getHostUptimeDetail", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetHostUptimeGraphData(configurationValueId *int, startDate *string, endDate *string) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + configurationValueId, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getHostUptimeGraphData", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetUrlUptimeDetail(configurationValueId *int, startDateTime *string, endDateTime *string) (resp datatypes.Container_Account_Historical_Summary_Detail, err error) { + params := []interface{}{ + configurationValueId, + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getUrlUptimeDetail", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetUrlUptimeGraphData(configurationValueId *int, startDate *string, endDate *string) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + configurationValueId, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getUrlUptimeGraphData", params, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Internal_Ibm struct { + Session *session.Session + Options sl.Options +} + +// GetAccountInternalIbmService returns an instance of the Account_Internal_Ibm SoftLayer service +func GetAccountInternalIbmService(sess *session.Session) Account_Internal_Ibm { + return Account_Internal_Ibm{Session: sess} +} + +func (r Account_Internal_Ibm) Id(id int) Account_Internal_Ibm { + r.Options.Id = &id + return r +} + +func (r Account_Internal_Ibm) Mask(mask string) Account_Internal_Ibm { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Internal_Ibm) Filter(filter string) Account_Internal_Ibm { + r.Options.Filter = filter + return r +} + +func (r Account_Internal_Ibm) Limit(limit int) Account_Internal_Ibm { + r.Options.Limit = &limit + return r +} + +func (r Account_Internal_Ibm) Offset(offset int) Account_Internal_Ibm { + r.Options.Offset = &offset + return r +} + +// Validates request and, if the request is approved, returns a list of allowed uses for an automatically created IBMer IaaS account. +func (r Account_Internal_Ibm) GetAccountTypes() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Internal_Ibm", "getAccountTypes", nil, &r.Options, &resp) + return +} + +// Gets the URL used to perform manager validation. +func (r Account_Internal_Ibm) GetAuthorizationUrl(requestId *int) (resp string, err error) { + params := []interface{}{ + requestId, + } + err = r.Session.DoRequest("SoftLayer_Account_Internal_Ibm", "getAuthorizationUrl", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Internal_Ibm) GetBmsCountryList() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Internal_Ibm", "getBmsCountryList", nil, &r.Options, &resp) + return +} + +// Exchanges a code for a token during manager validation. +func (r Account_Internal_Ibm) GetEmployeeAccessToken(unverifiedAuthenticationCode *string) (resp string, err error) { + params := []interface{}{ + unverifiedAuthenticationCode, + } + err = r.Session.DoRequest("SoftLayer_Account_Internal_Ibm", "getEmployeeAccessToken", params, &r.Options, &resp) + return +} + +// After validating the requesting user through the access token, generates a container with the relevant request information and returns it. +func (r Account_Internal_Ibm) GetManagerPreview(requestId *int, accessToken *string) (resp datatypes.Container_Account_Internal_Ibm_Request, err error) { + params := []interface{}{ + requestId, + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_Internal_Ibm", "getManagerPreview", params, &r.Options, &resp) + return +} + +// Checks for an existing request which would block an IBMer from submitting a new request. Such a request could be denied, approved, or awaiting manager action. +func (r Account_Internal_Ibm) HasExistingRequest(employeeUid *string, managerUid *string) (resp bool, err error) { + params := []interface{}{ + employeeUid, + managerUid, + } + err = r.Session.DoRequest("SoftLayer_Account_Internal_Ibm", "hasExistingRequest", params, &r.Options, &resp) + return +} + +// Applies manager approval to a pending internal IBM account request. If cost recovery is already configured, this will create an account. If not, this will remind the internal team to configure cost recovery and create the account when possible. +func (r Account_Internal_Ibm) ManagerApprove(requestId *int, accessToken *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + requestId, + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_Internal_Ibm", "managerApprove", params, &r.Options, &resp) + return +} + +// Denies a pending request and prevents additional requests from the same applicant for as long as the manager remains the same. +func (r Account_Internal_Ibm) ManagerDeny(requestId *int, accessToken *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + requestId, + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_Internal_Ibm", "managerDeny", params, &r.Options, &resp) + return +} + +// Validates request and kicks off the approval process. +func (r Account_Internal_Ibm) RequestAccount(requestContainer *datatypes.Container_Account_Internal_Ibm_Request) (err error) { + var resp datatypes.Void + params := []interface{}{ + requestContainer, + } + err = r.Session.DoRequest("SoftLayer_Account_Internal_Ibm", "requestAccount", params, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Link_Bluemix struct { + Session *session.Session + Options sl.Options +} + +// GetAccountLinkBluemixService returns an instance of the Account_Link_Bluemix SoftLayer service +func GetAccountLinkBluemixService(sess *session.Session) Account_Link_Bluemix { + return Account_Link_Bluemix{Session: sess} +} + +func (r Account_Link_Bluemix) Id(id int) Account_Link_Bluemix { + r.Options.Id = &id + return r +} + +func (r Account_Link_Bluemix) Mask(mask string) Account_Link_Bluemix { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Link_Bluemix) Filter(filter string) Account_Link_Bluemix { + r.Options.Filter = filter + return r +} + +func (r Account_Link_Bluemix) Limit(limit int) Account_Link_Bluemix { + r.Options.Limit = &limit + return r +} + +func (r Account_Link_Bluemix) Offset(offset int) Account_Link_Bluemix { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Link_Bluemix) GetObject() (resp datatypes.Account_Link_Bluemix, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_Bluemix", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_Bluemix) GetSupportTierType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_Bluemix", "getSupportTierType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Link_OpenStack struct { + Session *session.Session + Options sl.Options +} + +// GetAccountLinkOpenStackService returns an instance of the Account_Link_OpenStack SoftLayer service +func GetAccountLinkOpenStackService(sess *session.Session) Account_Link_OpenStack { + return Account_Link_OpenStack{Session: sess} +} + +func (r Account_Link_OpenStack) Id(id int) Account_Link_OpenStack { + r.Options.Id = &id + return r +} + +func (r Account_Link_OpenStack) Mask(mask string) Account_Link_OpenStack { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Link_OpenStack) Filter(filter string) Account_Link_OpenStack { + r.Options.Filter = filter + return r +} + +func (r Account_Link_OpenStack) Limit(limit int) Account_Link_OpenStack { + r.Options.Limit = &limit + return r +} + +func (r Account_Link_OpenStack) Offset(offset int) Account_Link_OpenStack { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Link_OpenStack) CreateOSDomain(request *datatypes.Account_Link_OpenStack_LinkRequest) (resp datatypes.Account_Link_OpenStack_DomainCreationDetails, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "createOSDomain", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) CreateOSProject(request *datatypes.Account_Link_OpenStack_LinkRequest) (resp datatypes.Account_Link_OpenStack_ProjectCreationDetails, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "createOSProject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) DeleteOSDomain(domainId *string) (resp bool, err error) { + params := []interface{}{ + domainId, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "deleteOSDomain", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) DeleteOSProject(projectId *string) (resp bool, err error) { + params := []interface{}{ + projectId, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "deleteOSProject", params, &r.Options, &resp) + return +} + +// deleteObject permanently removes an account link and all of it's associated keystone data (including users for the associated project). '''This cannot be undone.''' Be wary of running this method. If you remove an account link in error you will need to re-create it by creating a new SoftLayer_Account_Link_OpenStack object. +func (r Account_Link_OpenStack) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) GetOSProject(projectId *string) (resp datatypes.Account_Link_OpenStack_ProjectDetails, err error) { + params := []interface{}{ + projectId, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "getOSProject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) GetObject() (resp datatypes.Account_Link_OpenStack, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) ListOSProjects() (resp []datatypes.Account_Link_OpenStack_ProjectDetails, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "listOSProjects", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Lockdown_Request data type holds information on API requests from brand customers. +type Account_Lockdown_Request struct { + Session *session.Session + Options sl.Options +} + +// GetAccountLockdownRequestService returns an instance of the Account_Lockdown_Request SoftLayer service +func GetAccountLockdownRequestService(sess *session.Session) Account_Lockdown_Request { + return Account_Lockdown_Request{Session: sess} +} + +func (r Account_Lockdown_Request) Id(id int) Account_Lockdown_Request { + r.Options.Id = &id + return r +} + +func (r Account_Lockdown_Request) Mask(mask string) Account_Lockdown_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Lockdown_Request) Filter(filter string) Account_Lockdown_Request { + r.Options.Filter = filter + return r +} + +func (r Account_Lockdown_Request) Limit(limit int) Account_Lockdown_Request { + r.Options.Limit = &limit + return r +} + +func (r Account_Lockdown_Request) Offset(offset int) Account_Lockdown_Request { + r.Options.Offset = &offset + return r +} + +// Will cancel a lockdown request scheduled in the future. Once canceled, the lockdown request cannot be reconciled and new requests must be made for subsequent actions on the account. +func (r Account_Lockdown_Request) CancelRequest() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "cancelRequest", nil, &r.Options, &resp) + return +} + +// Takes the original lockdown request ID, and an optional disable date. If no date is passed with the API call, the account will be disabled immediately. Otherwise, the account will be disabled on the date given. All hardware will be reclaimed and all accounts permanently disabled. +func (r Account_Lockdown_Request) DisableLockedAccount(disableDate *string) (resp int, err error) { + params := []interface{}{ + disableDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "disableLockedAccount", params, &r.Options, &resp) + return +} + +// Takes an account ID and an optional disconnect date. If no disconnect date is passed into the API call, the account disconnection will happen immediately. Otherwise, the account disconnection will happen on the date given. A brand account request ID will be returned and will then be updated when the disconnection occurs. +func (r Account_Lockdown_Request) DisconnectCompute(accountId *int, disconnectDate *string) (resp int, err error) { + params := []interface{}{ + accountId, + disconnectDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "disconnectCompute", params, &r.Options, &resp) + return +} + +// Provides a history of an account's lockdown requests and their status. +func (r Account_Lockdown_Request) GetAccountHistory(accountId *int) (resp []datatypes.Account_Lockdown_Request, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "getAccountHistory", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Lockdown_Request) GetObject() (resp datatypes.Account_Lockdown_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Takes the original disconnected lockdown event ID, and an optional reconnect date. If no reconnect date is passed with the API call, the account reconnection will happen immediately. Otherwise, the account reconnection will happen on the date given. The associated lockdown event will be unlocked and closed at that time. +func (r Account_Lockdown_Request) ReconnectCompute(reconnectDate *string) (resp int, err error) { + params := []interface{}{ + reconnectDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "reconnectCompute", params, &r.Options, &resp) + return +} + +// no documentation yet +type Account_MasterServiceAgreement struct { + Session *session.Session + Options sl.Options +} + +// GetAccountMasterServiceAgreementService returns an instance of the Account_MasterServiceAgreement SoftLayer service +func GetAccountMasterServiceAgreementService(sess *session.Session) Account_MasterServiceAgreement { + return Account_MasterServiceAgreement{Session: sess} +} + +func (r Account_MasterServiceAgreement) Id(id int) Account_MasterServiceAgreement { + r.Options.Id = &id + return r +} + +func (r Account_MasterServiceAgreement) Mask(mask string) Account_MasterServiceAgreement { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_MasterServiceAgreement) Filter(filter string) Account_MasterServiceAgreement { + r.Options.Filter = filter + return r +} + +func (r Account_MasterServiceAgreement) Limit(limit int) Account_MasterServiceAgreement { + r.Options.Limit = &limit + return r +} + +func (r Account_MasterServiceAgreement) Offset(offset int) Account_MasterServiceAgreement { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Account_MasterServiceAgreement) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_MasterServiceAgreement", "getAccount", nil, &r.Options, &resp) + return +} + +// Gets a File Entity container with the user's account's current MSA PDF. Gets a translation if one is available. Otherwise, gets the master document. +func (r Account_MasterServiceAgreement) GetFile() (resp datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Account_MasterServiceAgreement", "getFile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_MasterServiceAgreement) GetObject() (resp datatypes.Account_MasterServiceAgreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account_MasterServiceAgreement", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Media data type contains information on a single piece of media associated with a Data Transfer Service request. +type Account_Media struct { + Session *session.Session + Options sl.Options +} + +// GetAccountMediaService returns an instance of the Account_Media SoftLayer service +func GetAccountMediaService(sess *session.Session) Account_Media { + return Account_Media{Session: sess} +} + +func (r Account_Media) Id(id int) Account_Media { + r.Options.Id = &id + return r +} + +func (r Account_Media) Mask(mask string) Account_Media { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Media) Filter(filter string) Account_Media { + r.Options.Filter = filter + return r +} + +func (r Account_Media) Limit(limit int) Account_Media { + r.Options.Limit = &limit + return r +} + +func (r Account_Media) Offset(offset int) Account_Media { + r.Options.Offset = &offset + return r +} + +// Edit the properties of a media record by passing in a modified instance of a SoftLayer_Account_Media object. +func (r Account_Media) EditObject(templateObject *datatypes.Account_Media) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Media", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account to which the media belongs. +func (r Account_Media) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve a list supported media types for SoftLayer's Data Transfer Service. +func (r Account_Media) GetAllMediaTypes() (resp []datatypes.Account_Media_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getAllMediaTypes", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created the media object. +func (r Account_Media) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter where the media resides. +func (r Account_Media) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified the media. +func (r Account_Media) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified the media. +func (r Account_Media) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getModifyUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Media) GetObject() (resp datatypes.Account_Media, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The request to which the media belongs. +func (r Account_Media) GetRequest() (resp datatypes.Account_Media_Data_Transfer_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The media's type. +func (r Account_Media) GetType() (resp datatypes.Account_Media_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's associated EVault network storage service account. +func (r Account_Media) GetVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getVolume", nil, &r.Options, &resp) + return +} + +// Remove a media from a SoftLayer account's list of media. The media record is not deleted. +func (r Account_Media) RemoveMediaFromList(mediaTemplate *datatypes.Account_Media) (resp int, err error) { + params := []interface{}{ + mediaTemplate, + } + err = r.Session.DoRequest("SoftLayer_Account_Media", "removeMediaFromList", params, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Media_Data_Transfer_Request data type contains information on a single Data Transfer Service request. Creation of these requests is limited to SoftLayer customers through the SoftLayer Customer Portal. +type Account_Media_Data_Transfer_Request struct { + Session *session.Session + Options sl.Options +} + +// GetAccountMediaDataTransferRequestService returns an instance of the Account_Media_Data_Transfer_Request SoftLayer service +func GetAccountMediaDataTransferRequestService(sess *session.Session) Account_Media_Data_Transfer_Request { + return Account_Media_Data_Transfer_Request{Session: sess} +} + +func (r Account_Media_Data_Transfer_Request) Id(id int) Account_Media_Data_Transfer_Request { + r.Options.Id = &id + return r +} + +func (r Account_Media_Data_Transfer_Request) Mask(mask string) Account_Media_Data_Transfer_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Media_Data_Transfer_Request) Filter(filter string) Account_Media_Data_Transfer_Request { + r.Options.Filter = filter + return r +} + +func (r Account_Media_Data_Transfer_Request) Limit(limit int) Account_Media_Data_Transfer_Request { + r.Options.Limit = &limit + return r +} + +func (r Account_Media_Data_Transfer_Request) Offset(offset int) Account_Media_Data_Transfer_Request { + r.Options.Offset = &offset + return r +} + +// Edit the properties of a data transfer request record by passing in a modified instance of a SoftLayer_Account_Media_Data_Transfer_Request object. +func (r Account_Media_Data_Transfer_Request) EditObject(templateObject *datatypes.Account_Media_Data_Transfer_Request) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account to which the request belongs. +func (r Account_Media_Data_Transfer_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The active tickets that are attached to the data transfer request. +func (r Account_Media_Data_Transfer_Request) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieves a list of all the possible statuses to which a request may be set. +func (r Account_Media_Data_Transfer_Request) GetAllRequestStatuses() (resp []datatypes.Account_Media_Data_Transfer_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getAllRequestStatuses", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for the original request. +func (r Account_Media_Data_Transfer_Request) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created the request. +func (r Account_Media_Data_Transfer_Request) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The media of the request. +func (r Account_Media_Data_Transfer_Request) GetMedia() (resp datatypes.Account_Media, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getMedia", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified the request. +func (r Account_Media_Data_Transfer_Request) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified the request. +func (r Account_Media_Data_Transfer_Request) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getModifyUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Media_Data_Transfer_Request) GetObject() (resp datatypes.Account_Media_Data_Transfer_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The shipments of the request. +func (r Account_Media_Data_Transfer_Request) GetShipments() (resp []datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getShipments", nil, &r.Options, &resp) + return +} + +// Retrieve The status of the request. +func (r Account_Media_Data_Transfer_Request) GetStatus() (resp datatypes.Account_Media_Data_Transfer_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve All tickets that are attached to the data transfer request. +func (r Account_Media_Data_Transfer_Request) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getTickets", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Note struct { + Session *session.Session + Options sl.Options +} + +// GetAccountNoteService returns an instance of the Account_Note SoftLayer service +func GetAccountNoteService(sess *session.Session) Account_Note { + return Account_Note{Session: sess} +} + +func (r Account_Note) Id(id int) Account_Note { + r.Options.Id = &id + return r +} + +func (r Account_Note) Mask(mask string) Account_Note { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Note) Filter(filter string) Account_Note { + r.Options.Filter = filter + return r +} + +func (r Account_Note) Limit(limit int) Account_Note { + r.Options.Limit = &limit + return r +} + +func (r Account_Note) Offset(offset int) Account_Note { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Note) CreateObject(templateObject *datatypes.Account_Note) (resp datatypes.Account_Note, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Note", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note) EditObject(templateObject *datatypes.Account_Note) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Note", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Note) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Note) GetCustomer() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getCustomer", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Note) GetNoteHistory() (resp []datatypes.Account_Note_History, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getNoteHistory", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Note) GetNoteType() (resp datatypes.Account_Note_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getNoteType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note) GetObject() (resp datatypes.Account_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Note_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountNoteTypeService returns an instance of the Account_Note_Type SoftLayer service +func GetAccountNoteTypeService(sess *session.Session) Account_Note_Type { + return Account_Note_Type{Session: sess} +} + +func (r Account_Note_Type) Id(id int) Account_Note_Type { + r.Options.Id = &id + return r +} + +func (r Account_Note_Type) Mask(mask string) Account_Note_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Note_Type) Filter(filter string) Account_Note_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Note_Type) Limit(limit int) Account_Note_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Note_Type) Offset(offset int) Account_Note_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Note_Type) CreateObject(templateObject *datatypes.Account_Note_Type) (resp datatypes.Account_Note_Type, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note_Type) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note_Type) EditObject(templateObject *datatypes.Account_Note_Type) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note_Type) GetAllObjects() (resp []datatypes.Account_Note_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note_Type) GetObject() (resp datatypes.Account_Note_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Partner_Referral_Prospect struct { + Session *session.Session + Options sl.Options +} + +// GetAccountPartnerReferralProspectService returns an instance of the Account_Partner_Referral_Prospect SoftLayer service +func GetAccountPartnerReferralProspectService(sess *session.Session) Account_Partner_Referral_Prospect { + return Account_Partner_Referral_Prospect{Session: sess} +} + +func (r Account_Partner_Referral_Prospect) Id(id int) Account_Partner_Referral_Prospect { + r.Options.Id = &id + return r +} + +func (r Account_Partner_Referral_Prospect) Mask(mask string) Account_Partner_Referral_Prospect { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Partner_Referral_Prospect) Filter(filter string) Account_Partner_Referral_Prospect { + r.Options.Filter = filter + return r +} + +func (r Account_Partner_Referral_Prospect) Limit(limit int) Account_Partner_Referral_Prospect { + r.Options.Limit = &limit + return r +} + +func (r Account_Partner_Referral_Prospect) Offset(offset int) Account_Partner_Referral_Prospect { + r.Options.Offset = &offset + return r +} + +// Create a new Referral Partner Prospect +func (r Account_Partner_Referral_Prospect) CreateProspect(templateObject *datatypes.Container_Referral_Partner_Prospect, commit *bool) (resp datatypes.Account_Partner_Referral_Prospect, err error) { + params := []interface{}{ + templateObject, + commit, + } + err = r.Session.DoRequest("SoftLayer_Account_Partner_Referral_Prospect", "createProspect", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Partner_Referral_Prospect) GetObject() (resp datatypes.Account_Partner_Referral_Prospect, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Partner_Referral_Prospect", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieves Questions for a Referral Partner Survey +func (r Account_Partner_Referral_Prospect) GetSurveyQuestions() (resp []datatypes.Survey_Question, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Partner_Referral_Prospect", "getSurveyQuestions", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Password contains username, passwords and notes for services that may require for external applications such the Webcc interface for the EVault Storage service. +type Account_Password struct { + Session *session.Session + Options sl.Options +} + +// GetAccountPasswordService returns an instance of the Account_Password SoftLayer service +func GetAccountPasswordService(sess *session.Session) Account_Password { + return Account_Password{Session: sess} +} + +func (r Account_Password) Id(id int) Account_Password { + r.Options.Id = &id + return r +} + +func (r Account_Password) Mask(mask string) Account_Password { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Password) Filter(filter string) Account_Password { + r.Options.Filter = filter + return r +} + +func (r Account_Password) Limit(limit int) Account_Password { + r.Options.Limit = &limit + return r +} + +func (r Account_Password) Offset(offset int) Account_Password { + r.Options.Offset = &offset + return r +} + +// The password and/or notes may be modified. Modifying the EVault passwords here will also update the password the Webcc interface will use. +func (r Account_Password) EditObject(templateObject *datatypes.Account_Password) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Password", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Password) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Password", "getAccount", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Account_Password object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Account_Password service. +func (r Account_Password) GetObject() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Password", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The service that an account/password combination is tied to. +func (r Account_Password) GetType() (resp datatypes.Account_Password_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Password", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_PersonalData_RemoveRequestReview struct { + Session *session.Session + Options sl.Options +} + +// GetAccountPersonalDataRemoveRequestReviewService returns an instance of the Account_PersonalData_RemoveRequestReview SoftLayer service +func GetAccountPersonalDataRemoveRequestReviewService(sess *session.Session) Account_PersonalData_RemoveRequestReview { + return Account_PersonalData_RemoveRequestReview{Session: sess} +} + +func (r Account_PersonalData_RemoveRequestReview) Id(id int) Account_PersonalData_RemoveRequestReview { + r.Options.Id = &id + return r +} + +func (r Account_PersonalData_RemoveRequestReview) Mask(mask string) Account_PersonalData_RemoveRequestReview { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_PersonalData_RemoveRequestReview) Filter(filter string) Account_PersonalData_RemoveRequestReview { + r.Options.Filter = filter + return r +} + +func (r Account_PersonalData_RemoveRequestReview) Limit(limit int) Account_PersonalData_RemoveRequestReview { + r.Options.Limit = &limit + return r +} + +func (r Account_PersonalData_RemoveRequestReview) Offset(offset int) Account_PersonalData_RemoveRequestReview { + r.Options.Offset = &offset + return r +} + +// Approve a personal information removal request. +func (r Account_PersonalData_RemoveRequestReview) ApproveRequest(requestId *int, accessToken *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + requestId, + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_PersonalData_RemoveRequestReview", "approveRequest", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_PersonalData_RemoveRequestReview) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_PersonalData_RemoveRequestReview", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_PersonalData_RemoveRequestReview) GetApprovedFlag() (resp datatypes.Account_PersonalData_RemoveRequestReview, err error) { + err = r.Session.DoRequest("SoftLayer_Account_PersonalData_RemoveRequestReview", "getApprovedFlag", nil, &r.Options, &resp) + return +} + +// Gets the redirect URL for GDPR removal review. +func (r Account_PersonalData_RemoveRequestReview) GetAuthorizationUrl() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account_PersonalData_RemoveRequestReview", "getAuthorizationUrl", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_PersonalData_RemoveRequestReview) GetObject() (resp datatypes.Account_PersonalData_RemoveRequestReview, err error) { + err = r.Session.DoRequest("SoftLayer_Account_PersonalData_RemoveRequestReview", "getObject", nil, &r.Options, &resp) + return +} + +// Gets information removal requests to review. +func (r Account_PersonalData_RemoveRequestReview) GetPendingRequests(accessToken *string) (resp []datatypes.Container_Account_PersonalInformation, err error) { + params := []interface{}{ + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_PersonalData_RemoveRequestReview", "getPendingRequests", params, &r.Options, &resp) + return +} + +// Retrieves an access token. +func (r Account_PersonalData_RemoveRequestReview) GetReviewerAccessToken(code *string) (resp string, err error) { + params := []interface{}{ + code, + } + err = r.Session.DoRequest("SoftLayer_Account_PersonalData_RemoveRequestReview", "getReviewerAccessToken", params, &r.Options, &resp) + return +} + +// Finds a reviewer's email using the access token +func (r Account_PersonalData_RemoveRequestReview) GetReviewerEmailFromAccessToken(accessToken *string) (resp string, err error) { + params := []interface{}{ + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_PersonalData_RemoveRequestReview", "getReviewerEmailFromAccessToken", params, &r.Options, &resp) + return +} + +// no documentation yet +type Account_ProofOfConcept struct { + Session *session.Session + Options sl.Options +} + +// GetAccountProofOfConceptService returns an instance of the Account_ProofOfConcept SoftLayer service +func GetAccountProofOfConceptService(sess *session.Session) Account_ProofOfConcept { + return Account_ProofOfConcept{Session: sess} +} + +func (r Account_ProofOfConcept) Id(id int) Account_ProofOfConcept { + r.Options.Id = &id + return r +} + +func (r Account_ProofOfConcept) Mask(mask string) Account_ProofOfConcept { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_ProofOfConcept) Filter(filter string) Account_ProofOfConcept { + r.Options.Filter = filter + return r +} + +func (r Account_ProofOfConcept) Limit(limit int) Account_ProofOfConcept { + r.Options.Limit = &limit + return r +} + +func (r Account_ProofOfConcept) Offset(offset int) Account_ProofOfConcept { + r.Options.Offset = &offset + return r +} + +// Allows a verified reviewer to approve a request +func (r Account_ProofOfConcept) ApproveReview(requestId *int, accessToken *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + requestId, + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "approveReview", params, &r.Options, &resp) + return +} + +// Allows verified reviewer to deny a request +func (r Account_ProofOfConcept) DenyReview(requestId *int, accessToken *string, reason *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + requestId, + accessToken, + reason, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "denyReview", params, &r.Options, &resp) + return +} + +// Returns URL used to authenticate reviewers +func (r Account_ProofOfConcept) GetAuthenticationUrl(targetPage *string) (resp string, err error) { + params := []interface{}{ + targetPage, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "getAuthenticationUrl", params, &r.Options, &resp) + return +} + +// Retrieves a list of requests that are pending review in the specified regions +func (r Account_ProofOfConcept) GetRequestsPendingIntegratedOfferingTeamReview(accessToken *string) (resp []datatypes.Container_Account_ProofOfConcept_Review_Summary, err error) { + params := []interface{}{ + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "getRequestsPendingIntegratedOfferingTeamReview", params, &r.Options, &resp) + return +} + +// Retrieves a list of requests that are pending over threshold review +func (r Account_ProofOfConcept) GetRequestsPendingOverThresholdReview(accessToken *string) (resp []datatypes.Container_Account_ProofOfConcept_Review_Summary, err error) { + params := []interface{}{ + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "getRequestsPendingOverThresholdReview", params, &r.Options, &resp) + return +} + +// Exchanges a code for a token during reviewer validation. +func (r Account_ProofOfConcept) GetReviewerAccessToken(unverifiedAuthenticationCode *string) (resp string, err error) { + params := []interface{}{ + unverifiedAuthenticationCode, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "getReviewerAccessToken", params, &r.Options, &resp) + return +} + +// Finds a reviewer's email using the access token +func (r Account_ProofOfConcept) GetReviewerEmailFromAccessToken(accessToken *string) (resp string, err error) { + params := []interface{}{ + accessToken, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "getReviewerEmailFromAccessToken", params, &r.Options, &resp) + return +} + +// Allows authorized IBMer to pull all the details of a single proof of concept account request. +func (r Account_ProofOfConcept) GetSubmittedRequest(requestId *int) (resp datatypes.Container_Account_ProofOfConcept_Review, err error) { + params := []interface{}{ + requestId, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "getSubmittedRequest", params, &r.Options, &resp) + return +} + +// Allows authorized IBMer to retrieve a list summarizing all previously submitted proof of concept requests. +// +// Note that the proof of concept system is for internal IBM employees only and is not applicable to users outside the IBM organization. +func (r Account_ProofOfConcept) GetSubmittedRequests(email *string, sortOrder *string) (resp []datatypes.Container_Account_ProofOfConcept_Review_Summary, err error) { + params := []interface{}{ + email, + sortOrder, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "getSubmittedRequests", params, &r.Options, &resp) + return +} + +// Gets email address users can use to ask for help/support +func (r Account_ProofOfConcept) GetSupportEmailAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "getSupportEmailAddress", nil, &r.Options, &resp) + return +} + +// Indicates whether or not a reviewer belongs to the integrated offering team +func (r Account_ProofOfConcept) IsIntegratedOfferingTeamReviewer(emailAddress *string) (resp bool, err error) { + params := []interface{}{ + emailAddress, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "isIntegratedOfferingTeamReviewer", params, &r.Options, &resp) + return +} + +// Indicates whether or not a reviewer belongs to the threshold team. +func (r Account_ProofOfConcept) IsOverThresholdReviewer(emailAddress *string) (resp bool, err error) { + params := []interface{}{ + emailAddress, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "isOverThresholdReviewer", params, &r.Options, &resp) + return +} + +// Allows authorized IBMer's to apply for a proof of concept account using account team funding. Requests will be reviewed by multiple internal teams before an account is created. +// +// Note that the proof of concept system is for internal IBM employees only and is not applicable to users outside the IBM organization. +func (r Account_ProofOfConcept) RequestAccountTeamFundedAccount(request *datatypes.Container_Account_ProofOfConcept_Request_AccountFunded) (resp datatypes.Container_Account_ProofOfConcept_Review_Summary, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "requestAccountTeamFundedAccount", params, &r.Options, &resp) + return +} + +// Allows authorized IBMer's to apply for a proof of concept account using global funding. Requests will be reviewed by multiple internal teams before an account is created. +// +// Note that the proof of concept system is for internal IBM employees only and is not applicable to users outside the IBM organization. +func (r Account_ProofOfConcept) RequestGlobalFundedAccount(request *datatypes.Container_Account_ProofOfConcept_Request_GlobalFunded) (resp datatypes.Container_Account_ProofOfConcept_Review_Summary, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "requestGlobalFundedAccount", params, &r.Options, &resp) + return +} + +// Verifies that a potential reviewer is an approved internal IBM employee +func (r Account_ProofOfConcept) VerifyReviewer(requestId *int, reviewerEmailAddress *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + requestId, + reviewerEmailAddress, + } + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept", "verifyReviewer", params, &r.Options, &resp) + return +} + +// This class represents a Proof of Concept account approver. +type Account_ProofOfConcept_Approver struct { + Session *session.Session + Options sl.Options +} + +// GetAccountProofOfConceptApproverService returns an instance of the Account_ProofOfConcept_Approver SoftLayer service +func GetAccountProofOfConceptApproverService(sess *session.Session) Account_ProofOfConcept_Approver { + return Account_ProofOfConcept_Approver{Session: sess} +} + +func (r Account_ProofOfConcept_Approver) Id(id int) Account_ProofOfConcept_Approver { + r.Options.Id = &id + return r +} + +func (r Account_ProofOfConcept_Approver) Mask(mask string) Account_ProofOfConcept_Approver { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_ProofOfConcept_Approver) Filter(filter string) Account_ProofOfConcept_Approver { + r.Options.Filter = filter + return r +} + +func (r Account_ProofOfConcept_Approver) Limit(limit int) Account_ProofOfConcept_Approver { + r.Options.Limit = &limit + return r +} + +func (r Account_ProofOfConcept_Approver) Offset(offset int) Account_ProofOfConcept_Approver { + r.Options.Offset = &offset + return r +} + +// Retrieves a list of reviewers +func (r Account_ProofOfConcept_Approver) GetAllObjects() (resp []datatypes.Account_ProofOfConcept_Approver, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Approver", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_ProofOfConcept_Approver) GetObject() (resp datatypes.Account_ProofOfConcept_Approver, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Approver", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_ProofOfConcept_Approver) GetRole() (resp datatypes.Account_ProofOfConcept_Approver_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Approver", "getRole", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_ProofOfConcept_Approver) GetType() (resp datatypes.Account_ProofOfConcept_Approver_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Approver", "getType", nil, &r.Options, &resp) + return +} + +// This class represents a Proof of Concept account approver type. The current roles are Primary and Backup approvers. +type Account_ProofOfConcept_Approver_Role struct { + Session *session.Session + Options sl.Options +} + +// GetAccountProofOfConceptApproverRoleService returns an instance of the Account_ProofOfConcept_Approver_Role SoftLayer service +func GetAccountProofOfConceptApproverRoleService(sess *session.Session) Account_ProofOfConcept_Approver_Role { + return Account_ProofOfConcept_Approver_Role{Session: sess} +} + +func (r Account_ProofOfConcept_Approver_Role) Id(id int) Account_ProofOfConcept_Approver_Role { + r.Options.Id = &id + return r +} + +func (r Account_ProofOfConcept_Approver_Role) Mask(mask string) Account_ProofOfConcept_Approver_Role { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_ProofOfConcept_Approver_Role) Filter(filter string) Account_ProofOfConcept_Approver_Role { + r.Options.Filter = filter + return r +} + +func (r Account_ProofOfConcept_Approver_Role) Limit(limit int) Account_ProofOfConcept_Approver_Role { + r.Options.Limit = &limit + return r +} + +func (r Account_ProofOfConcept_Approver_Role) Offset(offset int) Account_ProofOfConcept_Approver_Role { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_ProofOfConcept_Approver_Role) GetObject() (resp datatypes.Account_ProofOfConcept_Approver_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Approver_Role", "getObject", nil, &r.Options, &resp) + return +} + +// This class represents a Proof of Concept account approver type. +type Account_ProofOfConcept_Approver_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountProofOfConceptApproverTypeService returns an instance of the Account_ProofOfConcept_Approver_Type SoftLayer service +func GetAccountProofOfConceptApproverTypeService(sess *session.Session) Account_ProofOfConcept_Approver_Type { + return Account_ProofOfConcept_Approver_Type{Session: sess} +} + +func (r Account_ProofOfConcept_Approver_Type) Id(id int) Account_ProofOfConcept_Approver_Type { + r.Options.Id = &id + return r +} + +func (r Account_ProofOfConcept_Approver_Type) Mask(mask string) Account_ProofOfConcept_Approver_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_ProofOfConcept_Approver_Type) Filter(filter string) Account_ProofOfConcept_Approver_Type { + r.Options.Filter = filter + return r +} + +func (r Account_ProofOfConcept_Approver_Type) Limit(limit int) Account_ProofOfConcept_Approver_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_ProofOfConcept_Approver_Type) Offset(offset int) Account_ProofOfConcept_Approver_Type { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Account_ProofOfConcept_Approver_Type) GetApprovers() (resp []datatypes.Account_ProofOfConcept_Approver, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Approver_Type", "getApprovers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_ProofOfConcept_Approver_Type) GetObject() (resp datatypes.Account_ProofOfConcept_Approver_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Approver_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_ProofOfConcept_Funding_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountProofOfConceptFundingTypeService returns an instance of the Account_ProofOfConcept_Funding_Type SoftLayer service +func GetAccountProofOfConceptFundingTypeService(sess *session.Session) Account_ProofOfConcept_Funding_Type { + return Account_ProofOfConcept_Funding_Type{Session: sess} +} + +func (r Account_ProofOfConcept_Funding_Type) Id(id int) Account_ProofOfConcept_Funding_Type { + r.Options.Id = &id + return r +} + +func (r Account_ProofOfConcept_Funding_Type) Mask(mask string) Account_ProofOfConcept_Funding_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_ProofOfConcept_Funding_Type) Filter(filter string) Account_ProofOfConcept_Funding_Type { + r.Options.Filter = filter + return r +} + +func (r Account_ProofOfConcept_Funding_Type) Limit(limit int) Account_ProofOfConcept_Funding_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_ProofOfConcept_Funding_Type) Offset(offset int) Account_ProofOfConcept_Funding_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_ProofOfConcept_Funding_Type) GetAllObjects() (resp []datatypes.Account_ProofOfConcept_Funding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Funding_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_ProofOfConcept_Funding_Type) GetApproverTypes() (resp []datatypes.Account_ProofOfConcept_Approver_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Funding_Type", "getApproverTypes", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_ProofOfConcept_Funding_Type) GetApprovers() (resp []datatypes.Account_ProofOfConcept_Approver, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Funding_Type", "getApprovers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_ProofOfConcept_Funding_Type) GetObject() (resp datatypes.Account_ProofOfConcept_Funding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_ProofOfConcept_Funding_Type", "getObject", nil, &r.Options, &resp) + return +} + +// +// +// +// +// +type Account_Regional_Registry_Detail struct { + Session *session.Session + Options sl.Options +} + +// GetAccountRegionalRegistryDetailService returns an instance of the Account_Regional_Registry_Detail SoftLayer service +func GetAccountRegionalRegistryDetailService(sess *session.Session) Account_Regional_Registry_Detail { + return Account_Regional_Registry_Detail{Session: sess} +} + +func (r Account_Regional_Registry_Detail) Id(id int) Account_Regional_Registry_Detail { + r.Options.Id = &id + return r +} + +func (r Account_Regional_Registry_Detail) Mask(mask string) Account_Regional_Registry_Detail { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Regional_Registry_Detail) Filter(filter string) Account_Regional_Registry_Detail { + r.Options.Filter = filter + return r +} + +func (r Account_Regional_Registry_Detail) Limit(limit int) Account_Regional_Registry_Detail { + r.Options.Limit = &limit + return r +} + +func (r Account_Regional_Registry_Detail) Offset(offset int) Account_Regional_Registry_Detail { + r.Options.Offset = &offset + return r +} + +// This method will create a new SoftLayer_Account_Regional_Registry_Detail object. +// +// Input - [[SoftLayer_Account_Regional_Registry_Detail (type)|SoftLayer_Account_Regional_Registry_Detail]]
    • detailTypeId
      The [[SoftLayer_Account_Regional_Registry_Detail_Type|type id]] of this detail object
      • Required
      • Type - integer
    • regionalInternetRegistryHandleId
      The id of the [[SoftLayer_Account_Rwhois_Handle|RWhois handle]] object. This is only to be used for detailed registrations, where a subnet is registered to an organization. The associated handle will be required to be a valid organization object id at the relevant registry. In this case, the detail object will only be valid for the registry the organization belongs to.
      • Optional
      • Type - integer
    +func (r Account_Regional_Registry_Detail) CreateObject(templateObject *datatypes.Account_Regional_Registry_Detail) (resp datatypes.Account_Regional_Registry_Detail, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "createObject", params, &r.Options, &resp) + return +} + +// This method will delete an existing SoftLayer_Account_Regional_Registry_Detail object. +func (r Account_Regional_Registry_Detail) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method will edit an existing SoftLayer_Account_Regional_Registry_Detail object. For more detail, see [[SoftLayer_Account_Regional_Registry_Detail::createObject|createObject]]. +func (r Account_Regional_Registry_Detail) EditObject(templateObject *datatypes.Account_Regional_Registry_Detail) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account that this detail object belongs to. +func (r Account_Regional_Registry_Detail) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The associated type of this detail object. +func (r Account_Regional_Registry_Detail) GetDetailType() (resp datatypes.Account_Regional_Registry_Detail_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getDetailType", nil, &r.Options, &resp) + return +} + +// Retrieve References to the [[SoftLayer_Network_Subnet_Registration|registration objects]] that consume this detail object. +func (r Account_Regional_Registry_Detail) GetDetails() (resp []datatypes.Network_Subnet_Registration_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Regional_Registry_Detail) GetObject() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The individual properties that define this detail object's values. +func (r Account_Regional_Registry_Detail) GetProperties() (resp []datatypes.Account_Regional_Registry_Detail_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve The associated RWhois handle of this detail object. Used only when detailed reassignments are necessary. +func (r Account_Regional_Registry_Detail) GetRegionalInternetRegistryHandle() (resp datatypes.Account_Rwhois_Handle, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getRegionalInternetRegistryHandle", nil, &r.Options, &resp) + return +} + +// This method will create a bulk transaction to update any registrations that reference this detail object. It should only be called from a child class such as [[SoftLayer_Account_Regional_Registry_Detail_Person]] or [[SoftLayer_Account_Regional_Registry_Detail_Network]]. The registrations should be in the Open or Registration_Complete status. +func (r Account_Regional_Registry_Detail) UpdateReferencedRegistrations() (resp datatypes.Container_Network_Subnet_Registration_TransactionDetails, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "updateReferencedRegistrations", nil, &r.Options, &resp) + return +} + +// Subnet registration properties are used to define various attributes of the [[SoftLayer_Account_Regional_Registry_Detail|detail objects]]. These properties are defined by the [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] objects, which describe the available value formats. +type Account_Regional_Registry_Detail_Property struct { + Session *session.Session + Options sl.Options +} + +// GetAccountRegionalRegistryDetailPropertyService returns an instance of the Account_Regional_Registry_Detail_Property SoftLayer service +func GetAccountRegionalRegistryDetailPropertyService(sess *session.Session) Account_Regional_Registry_Detail_Property { + return Account_Regional_Registry_Detail_Property{Session: sess} +} + +func (r Account_Regional_Registry_Detail_Property) Id(id int) Account_Regional_Registry_Detail_Property { + r.Options.Id = &id + return r +} + +func (r Account_Regional_Registry_Detail_Property) Mask(mask string) Account_Regional_Registry_Detail_Property { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Regional_Registry_Detail_Property) Filter(filter string) Account_Regional_Registry_Detail_Property { + r.Options.Filter = filter + return r +} + +func (r Account_Regional_Registry_Detail_Property) Limit(limit int) Account_Regional_Registry_Detail_Property { + r.Options.Limit = &limit + return r +} + +func (r Account_Regional_Registry_Detail_Property) Offset(offset int) Account_Regional_Registry_Detail_Property { + r.Options.Offset = &offset + return r +} + +// This method will create a new SoftLayer_Account_Regional_Registry_Detail_Property object. +// +// Input - [[SoftLayer_Account_Regional_Registry_Detail_Property (type)|SoftLayer_Account_Regional_Registry_Detail_Property]]
    • registrationDetailId
      The numeric ID of the [[SoftLayer_Account_Regional_Registry_Detail|detail object]] this property belongs to
      • Required
      • Type - integer
    • propertyTypeId
      The numeric ID of the associated [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] object
      • Required
      • Type - integer
    • sequencePosition
      When more than one property of the same type exists on a detail object, this value determines the position in that collection. This can be thought of more as a sort order.
      • Required
      • Type - integer
    • value
      The actual value of the property.
      • Required
      • Type - string
    +func (r Account_Regional_Registry_Detail_Property) CreateObject(templateObject *datatypes.Account_Regional_Registry_Detail_Property) (resp datatypes.Account_Regional_Registry_Detail_Property, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "createObject", params, &r.Options, &resp) + return +} + +// Edit multiple [[SoftLayer_Account_Regional_Registry_Detail_Property]] objects. +func (r Account_Regional_Registry_Detail_Property) CreateObjects(templateObjects []datatypes.Account_Regional_Registry_Detail_Property) (resp []datatypes.Account_Regional_Registry_Detail_Property, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "createObjects", params, &r.Options, &resp) + return +} + +// This method will delete an existing SoftLayer_Account_Regional_Registry_Detail_Property object. +func (r Account_Regional_Registry_Detail_Property) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method will edit an existing SoftLayer_Account_Regional_Registry_Detail_Property object. For more detail, see [[SoftLayer_Account_Regional_Registry_Detail_Property::createObject|createObject]]. +func (r Account_Regional_Registry_Detail_Property) EditObject(templateObject *datatypes.Account_Regional_Registry_Detail_Property) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "editObject", params, &r.Options, &resp) + return +} + +// Edit multiple [[SoftLayer_Account_Regional_Registry_Detail_Property]] objects. +func (r Account_Regional_Registry_Detail_Property) EditObjects(templateObjects []datatypes.Account_Regional_Registry_Detail_Property) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "editObjects", params, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Account_Regional_Registry_Detail]] object this property belongs to +func (r Account_Regional_Registry_Detail_Property) GetDetail() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "getDetail", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Property) GetObject() (resp datatypes.Account_Regional_Registry_Detail_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] object this property belongs to +func (r Account_Regional_Registry_Detail_Property) GetPropertyType() (resp datatypes.Account_Regional_Registry_Detail_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "getPropertyType", nil, &r.Options, &resp) + return +} + +// Subnet Registration Detail Property Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail_Property]] object. These types use [http://php.net/pcre.pattern.php Perl-Compatible Regular Expressions] to validate the value of a property object. +type Account_Regional_Registry_Detail_Property_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountRegionalRegistryDetailPropertyTypeService returns an instance of the Account_Regional_Registry_Detail_Property_Type SoftLayer service +func GetAccountRegionalRegistryDetailPropertyTypeService(sess *session.Session) Account_Regional_Registry_Detail_Property_Type { + return Account_Regional_Registry_Detail_Property_Type{Session: sess} +} + +func (r Account_Regional_Registry_Detail_Property_Type) Id(id int) Account_Regional_Registry_Detail_Property_Type { + r.Options.Id = &id + return r +} + +func (r Account_Regional_Registry_Detail_Property_Type) Mask(mask string) Account_Regional_Registry_Detail_Property_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Regional_Registry_Detail_Property_Type) Filter(filter string) Account_Regional_Registry_Detail_Property_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Regional_Registry_Detail_Property_Type) Limit(limit int) Account_Regional_Registry_Detail_Property_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Regional_Registry_Detail_Property_Type) Offset(offset int) Account_Regional_Registry_Detail_Property_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Property_Type) GetAllObjects() (resp []datatypes.Account_Regional_Registry_Detail_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Property_Type) GetObject() (resp datatypes.Account_Regional_Registry_Detail_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property_Type", "getObject", nil, &r.Options, &resp) + return +} + +// Subnet Registration Detail Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail]] object. +// +// The standard values for these objects are as follows:
    • NETWORK - The detail object represents the information for a [[SoftLayer_Network_Subnet|subnet]]
    • NETWORK6 - The detail object represents the information for an [[SoftLayer_Network_Subnet_Version6|IPv6 subnet]]
    • PERSON - The detail object represents the information for a customer with the RIR
    +type Account_Regional_Registry_Detail_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountRegionalRegistryDetailTypeService returns an instance of the Account_Regional_Registry_Detail_Type SoftLayer service +func GetAccountRegionalRegistryDetailTypeService(sess *session.Session) Account_Regional_Registry_Detail_Type { + return Account_Regional_Registry_Detail_Type{Session: sess} +} + +func (r Account_Regional_Registry_Detail_Type) Id(id int) Account_Regional_Registry_Detail_Type { + r.Options.Id = &id + return r +} + +func (r Account_Regional_Registry_Detail_Type) Mask(mask string) Account_Regional_Registry_Detail_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Regional_Registry_Detail_Type) Filter(filter string) Account_Regional_Registry_Detail_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Regional_Registry_Detail_Type) Limit(limit int) Account_Regional_Registry_Detail_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Regional_Registry_Detail_Type) Offset(offset int) Account_Regional_Registry_Detail_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Type) GetAllObjects() (resp []datatypes.Account_Regional_Registry_Detail_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Type) GetObject() (resp datatypes.Account_Regional_Registry_Detail_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Reports_Request struct { + Session *session.Session + Options sl.Options +} + +// GetAccountReportsRequestService returns an instance of the Account_Reports_Request SoftLayer service +func GetAccountReportsRequestService(sess *session.Session) Account_Reports_Request { + return Account_Reports_Request{Session: sess} +} + +func (r Account_Reports_Request) Id(id int) Account_Reports_Request { + r.Options.Id = &id + return r +} + +func (r Account_Reports_Request) Mask(mask string) Account_Reports_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Reports_Request) Filter(filter string) Account_Reports_Request { + r.Options.Filter = filter + return r +} + +func (r Account_Reports_Request) Limit(limit int) Account_Reports_Request { + r.Options.Limit = &limit + return r +} + +func (r Account_Reports_Request) Offset(offset int) Account_Reports_Request { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Reports_Request) CreateRequest(contact *datatypes.Account_Contact, reason *string, reportType *string) (resp datatypes.Account_Reports_Request, err error) { + params := []interface{}{ + contact, + reason, + reportType, + } + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "createRequest", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Reports_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A request's corresponding external contact, if one exists. +func (r Account_Reports_Request) GetAccountContact() (resp datatypes.Account_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getAccountContact", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) GetAllObjects() (resp datatypes.Account_Reports_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) GetObject() (resp datatypes.Account_Reports_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Type of the report customer is requesting for. +func (r Account_Reports_Request) GetReportType() (resp datatypes.Compliance_Report_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getReportType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) GetRequestByRequestKey(requestKey *string) (resp datatypes.Account_Reports_Request, err error) { + params := []interface{}{ + requestKey, + } + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getRequestByRequestKey", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Reports_Request) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getTicket", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user that initiated a report request. +func (r Account_Reports_Request) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) SendReportEmail(request *datatypes.Account_Reports_Request) (resp bool, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "sendReportEmail", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) UpdateTicketOnDecline(request *datatypes.Account_Reports_Request) (resp bool, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "updateTicketOnDecline", params, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Shipment data type contains information relating to a shipment. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentService returns an instance of the Account_Shipment SoftLayer service +func GetAccountShipmentService(sess *session.Session) Account_Shipment { + return Account_Shipment{Session: sess} +} + +func (r Account_Shipment) Id(id int) Account_Shipment { + r.Options.Id = &id + return r +} + +func (r Account_Shipment) Mask(mask string) Account_Shipment { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment) Filter(filter string) Account_Shipment { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment) Limit(limit int) Account_Shipment { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment) Offset(offset int) Account_Shipment { + r.Options.Offset = &offset + return r +} + +// Edit the properties of a shipment record by passing in a modified instance of a SoftLayer_Account_Shipment object. +func (r Account_Shipment) EditObject(templateObject *datatypes.Account_Shipment) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account to which the shipment belongs. +func (r Account_Shipment) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve a list of available shipping couriers. +func (r Account_Shipment) GetAllCouriers() (resp []datatypes.Auxiliary_Shipping_Courier, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAllCouriers", nil, &r.Options, &resp) + return +} + +// Retrieve a list of available shipping couriers. +func (r Account_Shipment) GetAllCouriersByType(courierTypeKeyName *string) (resp []datatypes.Auxiliary_Shipping_Courier, err error) { + params := []interface{}{ + courierTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAllCouriersByType", params, &r.Options, &resp) + return +} + +// Retrieve a a list of shipment statuses. +func (r Account_Shipment) GetAllShipmentStatuses() (resp []datatypes.Account_Shipment_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAllShipmentStatuses", nil, &r.Options, &resp) + return +} + +// Retrieve a a list of shipment types. +func (r Account_Shipment) GetAllShipmentTypes() (resp []datatypes.Account_Shipment_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAllShipmentTypes", nil, &r.Options, &resp) + return +} + +// Retrieve The courier handling the shipment. +func (r Account_Shipment) GetCourier() (resp datatypes.Auxiliary_Shipping_Courier, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getCourier", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who created the shipment. +func (r Account_Shipment) GetCreateEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getCreateEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created the shipment. +func (r Account_Shipment) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The address at which the shipment is received. +func (r Account_Shipment) GetDestinationAddress() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getDestinationAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified the shipment. +func (r Account_Shipment) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified the shipment. +func (r Account_Shipment) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getModifyUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Shipment) GetObject() (resp datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The address from which the shipment is sent. +func (r Account_Shipment) GetOriginationAddress() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getOriginationAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The items in the shipment. +func (r Account_Shipment) GetShipmentItems() (resp []datatypes.Account_Shipment_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getShipmentItems", nil, &r.Options, &resp) + return +} + +// Retrieve The status of the shipment. +func (r Account_Shipment) GetStatus() (resp datatypes.Account_Shipment_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The tracking data for the shipment. +func (r Account_Shipment) GetTrackingData() (resp []datatypes.Account_Shipment_Tracking_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getTrackingData", nil, &r.Options, &resp) + return +} + +// Retrieve The type of shipment (e.g. for Data Transfer Service or Colocation Service). +func (r Account_Shipment) GetType() (resp datatypes.Account_Shipment_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getType", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Shipment_Item data type contains information relating to a shipment's item. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment_Item struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentItemService returns an instance of the Account_Shipment_Item SoftLayer service +func GetAccountShipmentItemService(sess *session.Session) Account_Shipment_Item { + return Account_Shipment_Item{Session: sess} +} + +func (r Account_Shipment_Item) Id(id int) Account_Shipment_Item { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Item) Mask(mask string) Account_Shipment_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Item) Filter(filter string) Account_Shipment_Item { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Item) Limit(limit int) Account_Shipment_Item { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Item) Offset(offset int) Account_Shipment_Item { + r.Options.Offset = &offset + return r +} + +// Edit the properties of a shipment record by passing in a modified instance of a SoftLayer_Account_Shipment_Item object. +func (r Account_Shipment_Item) EditObject(templateObject *datatypes.Account_Shipment_Item) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Shipment_Item) GetObject() (resp datatypes.Account_Shipment_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The shipment to which this item belongs. +func (r Account_Shipment_Item) GetShipment() (resp datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item", "getShipment", nil, &r.Options, &resp) + return +} + +// Retrieve The type of this shipment item. +func (r Account_Shipment_Item) GetShipmentItemType() (resp datatypes.Account_Shipment_Item_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item", "getShipmentItemType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Shipment_Item_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentItemTypeService returns an instance of the Account_Shipment_Item_Type SoftLayer service +func GetAccountShipmentItemTypeService(sess *session.Session) Account_Shipment_Item_Type { + return Account_Shipment_Item_Type{Session: sess} +} + +func (r Account_Shipment_Item_Type) Id(id int) Account_Shipment_Item_Type { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Item_Type) Mask(mask string) Account_Shipment_Item_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Item_Type) Filter(filter string) Account_Shipment_Item_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Item_Type) Limit(limit int) Account_Shipment_Item_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Item_Type) Offset(offset int) Account_Shipment_Item_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Shipment_Item_Type) GetObject() (resp datatypes.Account_Shipment_Item_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Shipment_Resource_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentResourceTypeService returns an instance of the Account_Shipment_Resource_Type SoftLayer service +func GetAccountShipmentResourceTypeService(sess *session.Session) Account_Shipment_Resource_Type { + return Account_Shipment_Resource_Type{Session: sess} +} + +func (r Account_Shipment_Resource_Type) Id(id int) Account_Shipment_Resource_Type { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Resource_Type) Mask(mask string) Account_Shipment_Resource_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Resource_Type) Filter(filter string) Account_Shipment_Resource_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Resource_Type) Limit(limit int) Account_Shipment_Resource_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Resource_Type) Offset(offset int) Account_Shipment_Resource_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Shipment_Resource_Type) GetObject() (resp datatypes.Account_Shipment_Resource_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Resource_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Shipment_Status struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentStatusService returns an instance of the Account_Shipment_Status SoftLayer service +func GetAccountShipmentStatusService(sess *session.Session) Account_Shipment_Status { + return Account_Shipment_Status{Session: sess} +} + +func (r Account_Shipment_Status) Id(id int) Account_Shipment_Status { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Status) Mask(mask string) Account_Shipment_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Status) Filter(filter string) Account_Shipment_Status { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Status) Limit(limit int) Account_Shipment_Status { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Status) Offset(offset int) Account_Shipment_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Shipment_Status) GetObject() (resp datatypes.Account_Shipment_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Status", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Shipment_Tracking_Data data type contains information on a single piece of tracking information pertaining to a shipment. This tracking information tracking numbers by which the shipment may be tracked through the shipping courier. +type Account_Shipment_Tracking_Data struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentTrackingDataService returns an instance of the Account_Shipment_Tracking_Data SoftLayer service +func GetAccountShipmentTrackingDataService(sess *session.Session) Account_Shipment_Tracking_Data { + return Account_Shipment_Tracking_Data{Session: sess} +} + +func (r Account_Shipment_Tracking_Data) Id(id int) Account_Shipment_Tracking_Data { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Tracking_Data) Mask(mask string) Account_Shipment_Tracking_Data { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Tracking_Data) Filter(filter string) Account_Shipment_Tracking_Data { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Tracking_Data) Limit(limit int) Account_Shipment_Tracking_Data { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Tracking_Data) Offset(offset int) Account_Shipment_Tracking_Data { + r.Options.Offset = &offset + return r +} + +// Create a new shipment tracking data. The ''shipmentId'', ''sequence'', and ''trackingData'' properties in the templateObject parameter are required parameters to create a tracking data record. +func (r Account_Shipment_Tracking_Data) CreateObject(templateObject *datatypes.Account_Shipment_Tracking_Data) (resp datatypes.Account_Shipment_Tracking_Data, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "createObject", params, &r.Options, &resp) + return +} + +// Create a new shipment tracking data. The ''shipmentId'', ''sequence'', and ''trackingData'' properties of each templateObject in the templateObjects array are required parameters to create a tracking data record. +func (r Account_Shipment_Tracking_Data) CreateObjects(templateObjects []datatypes.Account_Shipment_Tracking_Data) (resp []datatypes.Account_Shipment_Tracking_Data, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "createObjects", params, &r.Options, &resp) + return +} + +// deleteObject permanently removes a shipment tracking datum (number) +func (r Account_Shipment_Tracking_Data) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit the properties of a tracking data record by passing in a modified instance of a SoftLayer_Account_Shipment_Tracking_Data object. +func (r Account_Shipment_Tracking_Data) EditObject(templateObject *datatypes.Account_Shipment_Tracking_Data) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The employee who created the tracking datum. +func (r Account_Shipment_Tracking_Data) GetCreateEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getCreateEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created the tracking datum. +func (r Account_Shipment_Tracking_Data) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified the tracking datum. +func (r Account_Shipment_Tracking_Data) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified the tracking datum. +func (r Account_Shipment_Tracking_Data) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getModifyUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Shipment_Tracking_Data) GetObject() (resp datatypes.Account_Shipment_Tracking_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The shipment of the tracking datum. +func (r Account_Shipment_Tracking_Data) GetShipment() (resp datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getShipment", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Shipment_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentTypeService returns an instance of the Account_Shipment_Type SoftLayer service +func GetAccountShipmentTypeService(sess *session.Session) Account_Shipment_Type { + return Account_Shipment_Type{Session: sess} +} + +func (r Account_Shipment_Type) Id(id int) Account_Shipment_Type { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Type) Mask(mask string) Account_Shipment_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Type) Filter(filter string) Account_Shipment_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Type) Limit(limit int) Account_Shipment_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Type) Offset(offset int) Account_Shipment_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Shipment_Type) GetObject() (resp datatypes.Account_Shipment_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Type", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/auxiliary.go b/vendor/github.com/softlayer/softlayer-go/services/auxiliary.go new file mode 100644 index 000000000000..59c183e901e3 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/auxiliary.go @@ -0,0 +1,729 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Auxiliary_Marketing_Event struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryMarketingEventService returns an instance of the Auxiliary_Marketing_Event SoftLayer service +func GetAuxiliaryMarketingEventService(sess *session.Session) Auxiliary_Marketing_Event { + return Auxiliary_Marketing_Event{Session: sess} +} + +func (r Auxiliary_Marketing_Event) Id(id int) Auxiliary_Marketing_Event { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Marketing_Event) Mask(mask string) Auxiliary_Marketing_Event { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Marketing_Event) Filter(filter string) Auxiliary_Marketing_Event { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Marketing_Event) Limit(limit int) Auxiliary_Marketing_Event { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Marketing_Event) Offset(offset int) Auxiliary_Marketing_Event { + r.Options.Offset = &offset + return r +} + +// This method will return a collection of SoftLayer_Auxiliary_Marketing_Event objects ordered in ascending order by start date. +func (r Auxiliary_Marketing_Event) GetMarketingEvents() (resp []datatypes.Auxiliary_Marketing_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Marketing_Event", "getMarketingEvents", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Auxiliary_Marketing_Event) GetObject() (resp datatypes.Auxiliary_Marketing_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Marketing_Event", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Network_Status struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryNetworkStatusService returns an instance of the Auxiliary_Network_Status SoftLayer service +func GetAuxiliaryNetworkStatusService(sess *session.Session) Auxiliary_Network_Status { + return Auxiliary_Network_Status{Session: sess} +} + +func (r Auxiliary_Network_Status) Id(id int) Auxiliary_Network_Status { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Network_Status) Mask(mask string) Auxiliary_Network_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Network_Status) Filter(filter string) Auxiliary_Network_Status { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Network_Status) Limit(limit int) Auxiliary_Network_Status { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Network_Status) Offset(offset int) Auxiliary_Network_Status { + r.Options.Offset = &offset + return r +} + +// Return the current network status of and latency information for a given target from numerous points around the world. Valid Targets: +// * ALL +// * NETWORK_DALLAS +// * NETWORK_SEATTLE +// * NETWORK_PUBLIC +// * NETWORK_PUBLIC_DALLAS +// * NETWORK_PUBLIC_SEATTLE +// * NETWORK_PUBLIC_WDC +// * NETWORK_PRIVATE +// * NETWORK_PRIVATE_DALLAS +// * NETWORK_PRIVATE_SEATTLE +// * NETWORK_PRIVATE_WDC +func (r Auxiliary_Network_Status) GetNetworkStatus(target *string) (resp []datatypes.Container_Auxiliary_Network_Status_Reading, err error) { + params := []interface{}{ + target, + } + err = r.Session.DoRequest("SoftLayer_Auxiliary_Network_Status", "getNetworkStatus", params, &r.Options, &resp) + return +} + +// A SoftLayer_Auxiliary_Notification_Emergency data object represents a notification event being broadcast to the SoftLayer customer base. It is used to provide information regarding outages or current known issues. +type Auxiliary_Notification_Emergency struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryNotificationEmergencyService returns an instance of the Auxiliary_Notification_Emergency SoftLayer service +func GetAuxiliaryNotificationEmergencyService(sess *session.Session) Auxiliary_Notification_Emergency { + return Auxiliary_Notification_Emergency{Session: sess} +} + +func (r Auxiliary_Notification_Emergency) Id(id int) Auxiliary_Notification_Emergency { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Notification_Emergency) Mask(mask string) Auxiliary_Notification_Emergency { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Notification_Emergency) Filter(filter string) Auxiliary_Notification_Emergency { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Notification_Emergency) Limit(limit int) Auxiliary_Notification_Emergency { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Notification_Emergency) Offset(offset int) Auxiliary_Notification_Emergency { + r.Options.Offset = &offset + return r +} + +// Retrieve an array of SoftLayer_Auxiliary_Notification_Emergency data types, which contain all notification events regardless of status. +func (r Auxiliary_Notification_Emergency) GetAllObjects() (resp []datatypes.Auxiliary_Notification_Emergency, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Notification_Emergency data types, which contain all current notification events. +func (r Auxiliary_Notification_Emergency) GetCurrentNotifications() (resp []datatypes.Auxiliary_Notification_Emergency, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getCurrentNotifications", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Notification_Emergency object, it can be used to check for current notifications being broadcast by SoftLayer. +func (r Auxiliary_Notification_Emergency) GetObject() (resp datatypes.Auxiliary_Notification_Emergency, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The signature of the SoftLayer employee department associated with this notification. +func (r Auxiliary_Notification_Emergency) GetSignature() (resp datatypes.Auxiliary_Notification_Emergency_Signature, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getSignature", nil, &r.Options, &resp) + return +} + +// Retrieve The status of this notification. +func (r Auxiliary_Notification_Emergency) GetStatus() (resp datatypes.Auxiliary_Notification_Emergency_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseService returns an instance of the Auxiliary_Press_Release SoftLayer service +func GetAuxiliaryPressReleaseService(sess *session.Session) Auxiliary_Press_Release { + return Auxiliary_Press_Release{Session: sess} +} + +func (r Auxiliary_Press_Release) Id(id int) Auxiliary_Press_Release { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release) Mask(mask string) Auxiliary_Press_Release { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release) Filter(filter string) Auxiliary_Press_Release { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release) Limit(limit int) Auxiliary_Press_Release { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release) Offset(offset int) Auxiliary_Press_Release { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Press_Release) GetAbout() (resp []datatypes.Auxiliary_Press_Release_About_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getAbout", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Press_Release data types, which contain all press releases. +func (r Auxiliary_Press_Release) GetAllObjects() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release) GetContacts() (resp []datatypes.Auxiliary_Press_Release_Contact_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getContacts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release) GetMediaPartners() (resp []datatypes.Auxiliary_Press_Release_Media_Partner_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getMediaPartners", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release) GetObject() (resp datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release) GetPressReleaseContent() (resp datatypes.Auxiliary_Press_Release_Content, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getPressReleaseContent", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Press_Release data types, which contain all press releases. +func (r Auxiliary_Press_Release) GetRenderedPressRelease() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getRenderedPressRelease", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Press_Release data types, which contain all press releases for a given year and or result limit. +func (r Auxiliary_Press_Release) GetRenderedPressReleases(resultLimit *string, year *string) (resp []datatypes.Auxiliary_Press_Release, err error) { + params := []interface{}{ + resultLimit, + year, + } + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getRenderedPressReleases", params, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Press_Release data types, which have the website highlight flag set. +func (r Auxiliary_Press_Release) GetWebsiteHighlightPressReleases() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getWebsiteHighlightPressReleases", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_About struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseAboutService returns an instance of the Auxiliary_Press_Release_About SoftLayer service +func GetAuxiliaryPressReleaseAboutService(sess *session.Session) Auxiliary_Press_Release_About { + return Auxiliary_Press_Release_About{Session: sess} +} + +func (r Auxiliary_Press_Release_About) Id(id int) Auxiliary_Press_Release_About { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_About) Mask(mask string) Auxiliary_Press_Release_About { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_About) Filter(filter string) Auxiliary_Press_Release_About { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_About) Limit(limit int) Auxiliary_Press_Release_About { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_About) Offset(offset int) Auxiliary_Press_Release_About { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_About object whose about id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_About) GetObject() (resp datatypes.Auxiliary_Press_Release_About, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_About", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_About_Press_Release struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseAboutPressReleaseService returns an instance of the Auxiliary_Press_Release_About_Press_Release SoftLayer service +func GetAuxiliaryPressReleaseAboutPressReleaseService(sess *session.Session) Auxiliary_Press_Release_About_Press_Release { + return Auxiliary_Press_Release_About_Press_Release{Session: sess} +} + +func (r Auxiliary_Press_Release_About_Press_Release) Id(id int) Auxiliary_Press_Release_About_Press_Release { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_About_Press_Release) Mask(mask string) Auxiliary_Press_Release_About_Press_Release { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_About_Press_Release) Filter(filter string) Auxiliary_Press_Release_About_Press_Release { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_About_Press_Release) Limit(limit int) Auxiliary_Press_Release_About_Press_Release { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_About_Press_Release) Offset(offset int) Auxiliary_Press_Release_About_Press_Release { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Press_Release_About_Press_Release) GetAboutParagraphs() (resp []datatypes.Auxiliary_Press_Release_About, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_About_Press_Release", "getAboutParagraphs", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_About_Press_Release object whose contact id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_About_Press_Release) GetObject() (resp datatypes.Auxiliary_Press_Release_About_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_About_Press_Release", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release_About_Press_Release) GetPressReleases() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_About_Press_Release", "getPressReleases", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Contact struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseContactService returns an instance of the Auxiliary_Press_Release_Contact SoftLayer service +func GetAuxiliaryPressReleaseContactService(sess *session.Session) Auxiliary_Press_Release_Contact { + return Auxiliary_Press_Release_Contact{Session: sess} +} + +func (r Auxiliary_Press_Release_Contact) Id(id int) Auxiliary_Press_Release_Contact { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Contact) Mask(mask string) Auxiliary_Press_Release_Contact { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Contact) Filter(filter string) Auxiliary_Press_Release_Contact { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Contact) Limit(limit int) Auxiliary_Press_Release_Contact { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Contact) Offset(offset int) Auxiliary_Press_Release_Contact { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Contact object whose contact id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Contact) GetObject() (resp datatypes.Auxiliary_Press_Release_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Contact", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Contact_Press_Release struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseContactPressReleaseService returns an instance of the Auxiliary_Press_Release_Contact_Press_Release SoftLayer service +func GetAuxiliaryPressReleaseContactPressReleaseService(sess *session.Session) Auxiliary_Press_Release_Contact_Press_Release { + return Auxiliary_Press_Release_Contact_Press_Release{Session: sess} +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Id(id int) Auxiliary_Press_Release_Contact_Press_Release { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Mask(mask string) Auxiliary_Press_Release_Contact_Press_Release { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Filter(filter string) Auxiliary_Press_Release_Contact_Press_Release { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Limit(limit int) Auxiliary_Press_Release_Contact_Press_Release { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Offset(offset int) Auxiliary_Press_Release_Contact_Press_Release { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Press_Release_Contact_Press_Release) GetContacts() (resp []datatypes.Auxiliary_Press_Release_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Contact_Press_Release", "getContacts", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Contact object whose contact id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Contact_Press_Release) GetObject() (resp datatypes.Auxiliary_Press_Release_Contact_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Contact_Press_Release", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release_Contact_Press_Release) GetPressReleases() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Contact_Press_Release", "getPressReleases", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Content struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseContentService returns an instance of the Auxiliary_Press_Release_Content SoftLayer service +func GetAuxiliaryPressReleaseContentService(sess *session.Session) Auxiliary_Press_Release_Content { + return Auxiliary_Press_Release_Content{Session: sess} +} + +func (r Auxiliary_Press_Release_Content) Id(id int) Auxiliary_Press_Release_Content { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Content) Mask(mask string) Auxiliary_Press_Release_Content { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Content) Filter(filter string) Auxiliary_Press_Release_Content { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Content) Limit(limit int) Auxiliary_Press_Release_Content { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Content) Offset(offset int) Auxiliary_Press_Release_Content { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Content object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Content) GetObject() (resp datatypes.Auxiliary_Press_Release_Content, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Content", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseMediaPartnerService returns an instance of the Auxiliary_Press_Release_Media_Partner SoftLayer service +func GetAuxiliaryPressReleaseMediaPartnerService(sess *session.Session) Auxiliary_Press_Release_Media_Partner { + return Auxiliary_Press_Release_Media_Partner{Session: sess} +} + +func (r Auxiliary_Press_Release_Media_Partner) Id(id int) Auxiliary_Press_Release_Media_Partner { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Media_Partner) Mask(mask string) Auxiliary_Press_Release_Media_Partner { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Media_Partner) Filter(filter string) Auxiliary_Press_Release_Media_Partner { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Media_Partner) Limit(limit int) Auxiliary_Press_Release_Media_Partner { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Media_Partner) Offset(offset int) Auxiliary_Press_Release_Media_Partner { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Contact object whose contact id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Media_Partner) GetObject() (resp datatypes.Auxiliary_Press_Release_Media_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Media_Partner", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner_Press_Release struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseMediaPartnerPressReleaseService returns an instance of the Auxiliary_Press_Release_Media_Partner_Press_Release SoftLayer service +func GetAuxiliaryPressReleaseMediaPartnerPressReleaseService(sess *session.Session) Auxiliary_Press_Release_Media_Partner_Press_Release { + return Auxiliary_Press_Release_Media_Partner_Press_Release{Session: sess} +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Id(id int) Auxiliary_Press_Release_Media_Partner_Press_Release { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Mask(mask string) Auxiliary_Press_Release_Media_Partner_Press_Release { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Filter(filter string) Auxiliary_Press_Release_Media_Partner_Press_Release { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Limit(limit int) Auxiliary_Press_Release_Media_Partner_Press_Release { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Offset(offset int) Auxiliary_Press_Release_Media_Partner_Press_Release { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) GetMediaPartners() (resp []datatypes.Auxiliary_Press_Release_Media_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Media_Partner_Press_Release", "getMediaPartners", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Media_Partner_Press_Release object whose media partner id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) GetObject() (resp datatypes.Auxiliary_Press_Release_Media_Partner_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Media_Partner_Press_Release", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) GetPressReleases() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Media_Partner_Press_Release", "getPressReleases", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Shipping_Courier_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryShippingCourierTypeService returns an instance of the Auxiliary_Shipping_Courier_Type SoftLayer service +func GetAuxiliaryShippingCourierTypeService(sess *session.Session) Auxiliary_Shipping_Courier_Type { + return Auxiliary_Shipping_Courier_Type{Session: sess} +} + +func (r Auxiliary_Shipping_Courier_Type) Id(id int) Auxiliary_Shipping_Courier_Type { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Shipping_Courier_Type) Mask(mask string) Auxiliary_Shipping_Courier_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Shipping_Courier_Type) Filter(filter string) Auxiliary_Shipping_Courier_Type { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Shipping_Courier_Type) Limit(limit int) Auxiliary_Shipping_Courier_Type { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Shipping_Courier_Type) Offset(offset int) Auxiliary_Shipping_Courier_Type { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Shipping_Courier_Type) GetCourier() (resp []datatypes.Auxiliary_Shipping_Courier, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Shipping_Courier_Type", "getCourier", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Auxiliary_Shipping_Courier_Type) GetObject() (resp datatypes.Auxiliary_Shipping_Courier_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Shipping_Courier_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Auxiliary_Shipping_Courier_Type) GetTypeByKeyName(keyName *string) (resp datatypes.Auxiliary_Shipping_Courier_Type, err error) { + params := []interface{}{ + keyName, + } + err = r.Session.DoRequest("SoftLayer_Auxiliary_Shipping_Courier_Type", "getTypeByKeyName", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/billing.go b/vendor/github.com/softlayer/softlayer-go/services/billing.go new file mode 100644 index 000000000000..bb334b3f8ac2 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/billing.go @@ -0,0 +1,2794 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Billing_Currency struct { + Session *session.Session + Options sl.Options +} + +// GetBillingCurrencyService returns an instance of the Billing_Currency SoftLayer service +func GetBillingCurrencyService(sess *session.Session) Billing_Currency { + return Billing_Currency{Session: sess} +} + +func (r Billing_Currency) Id(id int) Billing_Currency { + r.Options.Id = &id + return r +} + +func (r Billing_Currency) Mask(mask string) Billing_Currency { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Currency) Filter(filter string) Billing_Currency { + r.Options.Filter = filter + return r +} + +func (r Billing_Currency) Limit(limit int) Billing_Currency { + r.Options.Limit = &limit + return r +} + +func (r Billing_Currency) Offset(offset int) Billing_Currency { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Currency) GetAllObjects() (resp []datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The current exchange rate +func (r Billing_Currency) GetCurrentExchangeRate() (resp datatypes.Billing_Currency_ExchangeRate, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency", "getCurrentExchangeRate", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency) GetObject() (resp datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency) GetPrice(price *datatypes.Float64, formatOptions *datatypes.Container_Billing_Currency_Format) (resp string, err error) { + params := []interface{}{ + price, + formatOptions, + } + err = r.Session.DoRequest("SoftLayer_Billing_Currency", "getPrice", params, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Currency_Country data type maps what currencies are valid for specific countries. US Dollars are valid from any country, but other currencies are only available to customers in certain countries. +type Billing_Currency_Country struct { + Session *session.Session + Options sl.Options +} + +// GetBillingCurrencyCountryService returns an instance of the Billing_Currency_Country SoftLayer service +func GetBillingCurrencyCountryService(sess *session.Session) Billing_Currency_Country { + return Billing_Currency_Country{Session: sess} +} + +func (r Billing_Currency_Country) Id(id int) Billing_Currency_Country { + r.Options.Id = &id + return r +} + +func (r Billing_Currency_Country) Mask(mask string) Billing_Currency_Country { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Currency_Country) Filter(filter string) Billing_Currency_Country { + r.Options.Filter = filter + return r +} + +func (r Billing_Currency_Country) Limit(limit int) Billing_Currency_Country { + r.Options.Limit = &limit + return r +} + +func (r Billing_Currency_Country) Offset(offset int) Billing_Currency_Country { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Currency_Country) GetCountriesWithListOfEligibleCurrencies() (resp []datatypes.Container_Billing_Currency_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_Country", "getCountriesWithListOfEligibleCurrencies", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_Country) GetObject() (resp datatypes.Billing_Currency_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_Country", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Billing_Currency_ExchangeRate struct { + Session *session.Session + Options sl.Options +} + +// GetBillingCurrencyExchangeRateService returns an instance of the Billing_Currency_ExchangeRate SoftLayer service +func GetBillingCurrencyExchangeRateService(sess *session.Session) Billing_Currency_ExchangeRate { + return Billing_Currency_ExchangeRate{Session: sess} +} + +func (r Billing_Currency_ExchangeRate) Id(id int) Billing_Currency_ExchangeRate { + r.Options.Id = &id + return r +} + +func (r Billing_Currency_ExchangeRate) Mask(mask string) Billing_Currency_ExchangeRate { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Currency_ExchangeRate) Filter(filter string) Billing_Currency_ExchangeRate { + r.Options.Filter = filter + return r +} + +func (r Billing_Currency_ExchangeRate) Limit(limit int) Billing_Currency_ExchangeRate { + r.Options.Limit = &limit + return r +} + +func (r Billing_Currency_ExchangeRate) Offset(offset int) Billing_Currency_ExchangeRate { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetAllCurrencyExchangeRates(stringDate *string) (resp []datatypes.Billing_Currency_ExchangeRate, err error) { + params := []interface{}{ + stringDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getAllCurrencyExchangeRates", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetCurrencies() (resp []datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getCurrencies", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetExchangeRate(to *string, from *string, effectiveDate *datatypes.Time) (resp datatypes.Billing_Currency_ExchangeRate, err error) { + params := []interface{}{ + to, + from, + effectiveDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getExchangeRate", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Currency_ExchangeRate) GetFundingCurrency() (resp datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getFundingCurrency", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Currency_ExchangeRate) GetLocalCurrency() (resp datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getLocalCurrency", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetObject() (resp datatypes.Billing_Currency_ExchangeRate, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetPrice(price *datatypes.Float64, formatOptions *datatypes.Container_Billing_Currency_Format) (resp string, err error) { + params := []interface{}{ + price, + formatOptions, + } + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getPrice", params, &r.Options, &resp) + return +} + +// Every SoftLayer customer account has billing specific information which is kept in the SoftLayer_Billing_Info data type. This information is used by the SoftLayer accounting group when sending invoices and making billing inquiries. +type Billing_Info struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInfoService returns an instance of the Billing_Info SoftLayer service +func GetBillingInfoService(sess *session.Session) Billing_Info { + return Billing_Info{Session: sess} +} + +func (r Billing_Info) Id(id int) Billing_Info { + r.Options.Id = &id + return r +} + +func (r Billing_Info) Mask(mask string) Billing_Info { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Info) Filter(filter string) Billing_Info { + r.Options.Filter = filter + return r +} + +func (r Billing_Info) Limit(limit int) Billing_Info { + r.Options.Limit = &limit + return r +} + +func (r Billing_Info) Offset(offset int) Billing_Info { + r.Options.Offset = &offset + return r +} + +// Retrieve The SoftLayer customer account associated with this billing information. +func (r Billing_Info) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Info) GetAchInformation() (resp []datatypes.Billing_Info_Ach, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getAchInformation", nil, &r.Options, &resp) + return +} + +// Retrieve Currency to be used by this customer account. +func (r Billing_Info) GetCurrency() (resp datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getCurrency", nil, &r.Options, &resp) + return +} + +// Retrieve Information related to an account's current and previous billing cycles. +func (r Billing_Info) GetCurrentBillingCycle() (resp datatypes.Billing_Info_Cycle, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getCurrentBillingCycle", nil, &r.Options, &resp) + return +} + +// Retrieve The date on which an account was last billed. +func (r Billing_Info) GetLastBillDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getLastBillDate", nil, &r.Options, &resp) + return +} + +// Retrieve The date on which an account will be billed next. +func (r Billing_Info) GetNextBillDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getNextBillDate", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Info object whose data corresponds to the account to which your portal user is tied. +func (r Billing_Info) GetObject() (resp datatypes.Billing_Info, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Invoice data type contains general information relating to an individual invoice applied to a SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the invoice is generated. +type Billing_Invoice struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceService returns an instance of the Billing_Invoice SoftLayer service +func GetBillingInvoiceService(sess *session.Session) Billing_Invoice { + return Billing_Invoice{Session: sess} +} + +func (r Billing_Invoice) Id(id int) Billing_Invoice { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice) Mask(mask string) Billing_Invoice { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice) Filter(filter string) Billing_Invoice { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice) Limit(limit int) Billing_Invoice { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice) Offset(offset int) Billing_Invoice { + r.Options.Offset = &offset + return r +} + +// Create a transaction to email PDF and/or Excel invoice links to the requesting user's email address. You must have a PDF reader installed in order to view these files. +func (r Billing_Invoice) EmailInvoices(options *datatypes.Container_Billing_Invoice_Email) (err error) { + var resp datatypes.Void + params := []interface{}{ + options, + } + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "emailInvoices", params, &r.Options, &resp) + return +} + +// Retrieve The account that an invoice belongs to. +func (r Billing_Invoice) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve This is the amount of this invoice. +func (r Billing_Invoice) GetAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getAmount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Invoice) GetBrandAtInvoiceCreation() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getBrandAtInvoiceCreation", nil, &r.Options, &resp) + return +} + +// Retrieve A flag that will reflect whether the detailed version of the pdf has been generated. +func (r Billing_Invoice) GetDetailedPdfGeneratedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getDetailedPdfGeneratedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve a Microsoft Excel spreadsheet of a SoftLayer invoice. You must have a Microsoft Excel reader installed in order to view these invoice files. +func (r Billing_Invoice) GetExcel() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getExcel", nil, &r.Options, &resp) + return +} + +// Retrieve A list of top-level invoice items that are on the currently pending invoice. +func (r Billing_Invoice) GetInvoiceTopLevelItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTopLevelItems", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of this invoice. +func (r Billing_Invoice) GetInvoiceTotalAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total one-time charges for this invoice. This is the sum of one-time charges + setup fees + labor fees. This does not include taxes. +func (r Billing_Invoice) GetInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A sum of all the taxes related to one time charges for this invoice. +func (r Billing_Invoice) GetInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of this invoice. This does not include taxes. +func (r Billing_Invoice) GetInvoiceTotalPreTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalPreTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total Recurring amount of this invoice. This amount does not include taxes or one time charges. +func (r Billing_Invoice) GetInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of the recurring taxes on this invoice. +func (r Billing_Invoice) GetInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The items that belong to this invoice. +func (r Billing_Invoice) GetItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getItems", nil, &r.Options, &resp) + return +} + +// Retrieve Exchange rate used for billing this invoice. +func (r Billing_Invoice) GetLocalCurrencyExchangeRate() (resp datatypes.Billing_Currency_ExchangeRate, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getLocalCurrencyExchangeRate", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Invoice object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Invoice service. You can only retrieve invoices that are assigned to your portal user's account. +func (r Billing_Invoice) GetObject() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve This is the total payment made on this invoice. +func (r Billing_Invoice) GetPayment() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPayment", nil, &r.Options, &resp) + return +} + +// Retrieve The payments for the invoice. +func (r Billing_Invoice) GetPayments() (resp []datatypes.Billing_Invoice_Receivable_Payment, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPayments", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer invoice. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. You must have a PDF reader installed in order to view these invoice files. +func (r Billing_Invoice) GetPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdf", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer detailed invoice summary. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. You must have a PDF reader installed in order to view these files. +func (r Billing_Invoice) GetPdfDetailed() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdfDetailed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice) GetPdfDetailedFilename() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdfDetailedFilename", nil, &r.Options, &resp) + return +} + +// Retrieve the size of a PDF record of a SoftLayer invoice. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. +func (r Billing_Invoice) GetPdfFileSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdfFileSize", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice) GetPdfFilename() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdfFilename", nil, &r.Options, &resp) + return +} + +// Retrieve a Microsoft Excel record of a SoftLayer invoice. SoftLayer generates Microsoft Excel records of all closed invoices for customer retrieval from the portal and API. You must have a Microsoft Excel reader installed in order to view these invoice files. +func (r Billing_Invoice) GetPreliminaryExcel() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPreliminaryExcel", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer invoice. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. You must have a PDF reader installed in order to view these invoice files. +func (r Billing_Invoice) GetPreliminaryPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPreliminaryPdf", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of the detailed version of a SoftLayer invoice. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. +func (r Billing_Invoice) GetPreliminaryPdfDetailed() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPreliminaryPdfDetailed", nil, &r.Options, &resp) + return +} + +// Retrieve This is the seller's tax registration. +func (r Billing_Invoice) GetSellerRegistration() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getSellerRegistration", nil, &r.Options, &resp) + return +} + +// Retrieve This is the tax information that applies to tax auditing. This is the official tax record for this invoice. +func (r Billing_Invoice) GetTaxInfo() (resp datatypes.Billing_Invoice_Tax_Info, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getTaxInfo", nil, &r.Options, &resp) + return +} + +// Retrieve This is the set of tax information for any tax calculation for this invoice. Note that not all of these are necessarily official, so use the taxInfo key to get the final information. +func (r Billing_Invoice) GetTaxInfoHistory() (resp []datatypes.Billing_Invoice_Tax_Info, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getTaxInfoHistory", nil, &r.Options, &resp) + return +} + +// Retrieve This is a message explaining the tax treatment for this invoice. +func (r Billing_Invoice) GetTaxMessage() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getTaxMessage", nil, &r.Options, &resp) + return +} + +// Retrieve This is the strategy used to calculate tax on this invoice. +func (r Billing_Invoice) GetTaxType() (resp datatypes.Billing_Invoice_Tax_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getTaxType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice) GetXlsFilename() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getXlsFilename", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice) GetZeroFeeItemCounts() (resp []datatypes.Container_Product_Item_Category_ZeroFee_Count, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getZeroFeeItemCounts", nil, &r.Options, &resp) + return +} + +// Each billing invoice item makes up a record within an invoice. This provides you with a detailed record of everything related to an invoice item. When you are billed, our system takes active billing items and creates an invoice. These invoice items are a copy of your active billing items, and make up the contents of your invoice. +type Billing_Invoice_Item struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceItemService returns an instance of the Billing_Invoice_Item SoftLayer service +func GetBillingInvoiceItemService(sess *session.Session) Billing_Invoice_Item { + return Billing_Invoice_Item{Session: sess} +} + +func (r Billing_Invoice_Item) Id(id int) Billing_Invoice_Item { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice_Item) Mask(mask string) Billing_Invoice_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice_Item) Filter(filter string) Billing_Invoice_Item { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice_Item) Limit(limit int) Billing_Invoice_Item { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice_Item) Offset(offset int) Billing_Invoice_Item { + r.Options.Offset = &offset + return r +} + +// Retrieve An Invoice Item's associated child invoice items. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. +func (r Billing_Invoice_Item) GetAssociatedChildren() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's associated invoice item. If this is populated, it means this is an orphaned invoice item, but logically belongs to the associated invoice item. +func (r Billing_Invoice_Item) GetAssociatedInvoiceItem() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getAssociatedInvoiceItem", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's billing item, from which this item was generated. +func (r Billing_Invoice_Item) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve This invoice item's "item category". +func (r Billing_Invoice_Item) GetCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's child invoice items. Only parent invoice items have children. For instance, a server invoice item will have children. +func (r Billing_Invoice_Item) GetChildren() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's associated child invoice items, excluding some items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. +func (r Billing_Invoice_Item) GetFilteredAssociatedChildren() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getFilteredAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve Indicating whether this invoice item is billed on an hourly basis. +func (r Billing_Invoice_Item) GetHourlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getHourlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The invoice to which this item belongs. +func (r Billing_Invoice_Item) GetInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve An invoice item's location, if one exists.' +func (r Billing_Invoice_Item) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's associated child invoice items, excluding ALL items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. +func (r Billing_Invoice_Item) GetNonZeroAssociatedChildren() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getNonZeroAssociatedChildren", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Invoice_Item object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Invoice_Item service. You can only retrieve the items tied to the account that your portal user is assigned to. +func (r Billing_Invoice_Item) GetObject() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Every item tied to a server should have a parent invoice item which is the server line item. This is how we associate items to a server. +func (r Billing_Invoice_Item) GetParent() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve The entry in the product catalog that a invoice item is based upon. +func (r Billing_Invoice_Item) GetProduct() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getProduct", nil, &r.Options, &resp) + return +} + +// Retrieve A string representing the name of parent level product group of an invoice item. +func (r Billing_Invoice_Item) GetTopLevelProductGroupName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTopLevelProductGroupName", nil, &r.Options, &resp) + return +} + +// Retrieve An invoice Item's total, including any child invoice items if they exist. +func (r Billing_Invoice_Item) GetTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An invoice Item's total, including any child invoice items if they exist. +func (r Billing_Invoice_Item) GetTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An invoice Item's total, including any child invoice items if they exist. +func (r Billing_Invoice_Item) GetTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Invoice_Item) GetTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Indicating whether this invoice item is for the usage charge. +func (r Billing_Invoice_Item) GetUsageChargeFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getUsageChargeFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Billing_Invoice_Next struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceNextService returns an instance of the Billing_Invoice_Next SoftLayer service +func GetBillingInvoiceNextService(sess *session.Session) Billing_Invoice_Next { + return Billing_Invoice_Next{Session: sess} +} + +func (r Billing_Invoice_Next) Id(id int) Billing_Invoice_Next { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice_Next) Mask(mask string) Billing_Invoice_Next { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice_Next) Filter(filter string) Billing_Invoice_Next { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice_Next) Limit(limit int) Billing_Invoice_Next { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice_Next) Offset(offset int) Billing_Invoice_Next { + r.Options.Offset = &offset + return r +} + +// Return an account's next invoice in a Microsoft excel format. +func (r Billing_Invoice_Next) GetExcel(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Next", "getExcel", params, &r.Options, &resp) + return +} + +// Return an account's next invoice in PDF format. +func (r Billing_Invoice_Next) GetPdf(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Next", "getPdf", params, &r.Options, &resp) + return +} + +// Return an account's next invoice detailed portion in PDF format. +func (r Billing_Invoice_Next) GetPdfDetailed(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Next", "getPdfDetailed", params, &r.Options, &resp) + return +} + +// The invoice tax status data type models a single status or state that an invoice can reflect in regard to an integration with a third-party tax calculation service. +type Billing_Invoice_Tax_Status struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceTaxStatusService returns an instance of the Billing_Invoice_Tax_Status SoftLayer service +func GetBillingInvoiceTaxStatusService(sess *session.Session) Billing_Invoice_Tax_Status { + return Billing_Invoice_Tax_Status{Session: sess} +} + +func (r Billing_Invoice_Tax_Status) Id(id int) Billing_Invoice_Tax_Status { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice_Tax_Status) Mask(mask string) Billing_Invoice_Tax_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice_Tax_Status) Filter(filter string) Billing_Invoice_Tax_Status { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice_Tax_Status) Limit(limit int) Billing_Invoice_Tax_Status { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice_Tax_Status) Offset(offset int) Billing_Invoice_Tax_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Invoice_Tax_Status) GetAllObjects() (resp []datatypes.Billing_Invoice_Tax_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Tax_Status", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice_Tax_Status) GetObject() (resp datatypes.Billing_Invoice_Tax_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Tax_Status", "getObject", nil, &r.Options, &resp) + return +} + +// The invoice tax type data type models a single strategy for handling tax calculations. +type Billing_Invoice_Tax_Type struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceTaxTypeService returns an instance of the Billing_Invoice_Tax_Type SoftLayer service +func GetBillingInvoiceTaxTypeService(sess *session.Session) Billing_Invoice_Tax_Type { + return Billing_Invoice_Tax_Type{Session: sess} +} + +func (r Billing_Invoice_Tax_Type) Id(id int) Billing_Invoice_Tax_Type { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice_Tax_Type) Mask(mask string) Billing_Invoice_Tax_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice_Tax_Type) Filter(filter string) Billing_Invoice_Tax_Type { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice_Tax_Type) Limit(limit int) Billing_Invoice_Tax_Type { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice_Tax_Type) Offset(offset int) Billing_Invoice_Tax_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Invoice_Tax_Type) GetAllObjects() (resp []datatypes.Billing_Invoice_Tax_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Tax_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice_Tax_Type) GetObject() (resp datatypes.Billing_Invoice_Tax_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Tax_Type", "getObject", nil, &r.Options, &resp) + return +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Item struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemService returns an instance of the Billing_Item SoftLayer service +func GetBillingItemService(sess *session.Session) Billing_Item { + return Billing_Item{Session: sess} +} + +func (r Billing_Item) Id(id int) Billing_Item { + r.Options.Id = &id + return r +} + +func (r Billing_Item) Mask(mask string) Billing_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item) Filter(filter string) Billing_Item { + r.Options.Filter = filter + return r +} + +func (r Billing_Item) Limit(limit int) Billing_Item { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item) Offset(offset int) Billing_Item { + r.Options.Offset = &offset + return r +} + +// Cancel the resource or service for a billing Item. By default the billing item will be cancelled immediately and reclaim of the resource will begin shortly. Setting the "cancelImmediately" property to false will delay the cancellation until the next bill date. +// +// +// * The reason parameter could be from the list below: +// * "No longer needed" +// * "Business closing down" +// * "Server / Upgrade Costs" +// * "Migrating to larger server" +// * "Migrating to smaller server" +// * "Migrating to a different SoftLayer datacenter" +// * "Network performance / latency" +// * "Support response / timing" +// * "Sales process / upgrades" +// * "Moving to competitor" +func (r Billing_Item) CancelItem(cancelImmediately *bool, cancelAssociatedBillingItems *bool, reason *string, customerNote *string) (resp bool, err error) { + params := []interface{}{ + cancelImmediately, + cancelAssociatedBillingItems, + reason, + customerNote, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item", "cancelItem", params, &r.Options, &resp) + return +} + +// Cancel the resource or service (excluding bare metal servers) for a billing Item. The billing item will be cancelled immediately and reclaim of the resource will begin shortly. +func (r Billing_Item) CancelService() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "cancelService", nil, &r.Options, &resp) + return +} + +// Cancel the resource or service for a billing Item +func (r Billing_Item) CancelServiceOnAnniversaryDate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "cancelServiceOnAnniversaryDate", nil, &r.Options, &resp) + return +} + +// Retrieve The account that a billing item belongs to. +func (r Billing_Item) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetActiveAgreement() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveAgreement", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the billing item is under an active agreement. +func (r Billing_Item) GetActiveAgreementFlag() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveAgreementFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. +func (r Billing_Item) GetActiveAssociatedChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetActiveAssociatedGuestDiskBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveAssociatedGuestDiskBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active bundled billing items. +func (r Billing_Item) GetActiveBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A service cancellation request item that corresponds to the billing item. +func (r Billing_Item) GetActiveCancellationItem() (resp datatypes.Billing_Item_Cancellation_Request_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveCancellationItem", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item) GetActiveChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetActiveFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetActiveSparePoolAssociatedGuestDiskBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveSparePoolAssociatedGuestDiskBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's spare pool bundled billing items. +func (r Billing_Item) GetActiveSparePoolBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveSparePoolBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's associated parent. This is to be used for billing items that are "floating", and therefore are not child items of any parent billing item. If it is desired to associate an item to another, populate this with the SoftLayer_Billing_Item ID of that associated parent item. +func (r Billing_Item) GetAssociatedBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAssociatedBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A history of billing items which a billing item has been associated with. +func (r Billing_Item) GetAssociatedBillingItemHistory() (resp []datatypes.Billing_Item_Association_History, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAssociatedBillingItemHistory", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. +func (r Billing_Item) GetAssociatedChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. +func (r Billing_Item) GetAssociatedParent() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAssociatedParent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetAvailableMatchingVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAvailableMatchingVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allocation for a billing item. +func (r Billing_Item) GetBandwidthAllocation() (resp datatypes.Network_Bandwidth_Version1_Allocation, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's recurring child items that have once been billed and are scheduled to be billed in the future. +func (r Billing_Item) GetBillableChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getBillableChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's bundled billing items +func (r Billing_Item) GetBundleItems() (resp []datatypes.Product_Item_Bundles, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getBundleItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's bundled billing items' +func (r Billing_Item) GetBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item) GetCanceledChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getCanceledChildren", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item's cancellation reason. +func (r Billing_Item) GetCancellationReason() (resp datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getCancellationReason", nil, &r.Options, &resp) + return +} + +// Retrieve This will return any cancellation requests that are associated with this billing item. +func (r Billing_Item) GetCancellationRequests() (resp []datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getCancellationRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The item category to which the billing item's item belongs. +func (r Billing_Item) GetCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's child billing items' +func (r Billing_Item) GetChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item) GetChildrenWithActiveAgreement() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getChildrenWithActiveAgreement", nil, &r.Options, &resp) + return +} + +// Retrieve For product items which have a downgrade path defined, this will return those product items. +func (r Billing_Item) GetDowngradeItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getDowngradeItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. +func (r Billing_Item) GetFilteredNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getFilteredNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A flag that will reflect whether this billing item is billed on an hourly basis or not. +func (r Billing_Item) GetHourlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getHourlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Invoice items associated with this billing item +func (r Billing_Item) GetInvoiceItem() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getInvoiceItem", nil, &r.Options, &resp) + return +} + +// Retrieve All invoice items associated with the billing item +func (r Billing_Item) GetInvoiceItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getInvoiceItems", nil, &r.Options, &resp) + return +} + +// Retrieve The entry in the SoftLayer product catalog that a billing item is based upon. +func (r Billing_Item) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve The location of the billing item. Some billing items have physical properties such as the server itself. For items such as these, we provide location information. +func (r Billing_Item) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's child billing items and associated items' +func (r Billing_Item) GetNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Item) GetNextInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Item) GetNextInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items and associated billing items if they exist.' +func (r Billing_Item) GetNextInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve This is deprecated and will always be zero. Because tax is calculated in real-time, previewing the next recurring invoice is pre-tax only. +func (r Billing_Item) GetNextInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. +func (r Billing_Item) GetNonZeroNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNonZeroNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Item object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Item service. You can only retrieve billing items tied to the account that your portal user is assigned to. Billing items are an account's items of billable items. There are "parent" billing items and "child" billing items. The server billing item is generally referred to as a parent billing item. The items tied to a server, such as ram, harddrives, and operating systems are considered "child" billing items. +func (r Billing_Item) GetObject() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's original order item. Simply a reference to the original order from which this billing item was created. +func (r Billing_Item) GetOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getOrderItem", nil, &r.Options, &resp) + return +} + +// Retrieve The original physical location for this billing item--may differ from current. +func (r Billing_Item) GetOriginalLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getOriginalLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The package under which this billing item was sold. A Package is the general grouping of products as seen on our order forms. +func (r Billing_Item) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's parent item. If a billing item has no parent item then this value is null. +func (r Billing_Item) GetParent() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's parent item. If a billing item has no parent item then this value is null. +func (r Billing_Item) GetParentVirtualGuestBillingItem() (resp datatypes.Billing_Item_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getParentVirtualGuestBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates whether a billing item is scheduled to be canceled or not. +func (r Billing_Item) GetPendingCancellationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getPendingCancellationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The new order item that will replace this billing item. +func (r Billing_Item) GetPendingOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getPendingOrderItem", nil, &r.Options, &resp) + return +} + +// Retrieve Provisioning transaction for this billing item +func (r Billing_Item) GetProvisionTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getProvisionTransaction", nil, &r.Options, &resp) + return +} + +// This service returns billing items of a specified category code. This service should be used to retrieve billing items that you wish to cancel. Some billing items can be canceled via [[SoftLayer_Security_Certificate_Request|service cancellation]] service. +// +// In order to find billing items for cancellation, use [[SoftLayer_Product_Item_Category::getValidCancelableServiceItemCategories|product categories]] service to retrieve category codes that are eligible for cancellation. +func (r Billing_Item) GetServiceBillingItemsByCategory(categoryCode *string, includeZeroRecurringFee *bool) (resp []datatypes.Billing_Item, err error) { + params := []interface{}{ + categoryCode, + includeZeroRecurringFee, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getServiceBillingItemsByCategory", params, &r.Options, &resp) + return +} + +// Retrieve A friendly description of software component +func (r Billing_Item) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve Billing items whose product item has an upgrade path defined in our system will return the next product item in the upgrade path. +func (r Billing_Item) GetUpgradeItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getUpgradeItem", nil, &r.Options, &resp) + return +} + +// Retrieve Billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. +func (r Billing_Item) GetUpgradeItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getUpgradeItems", nil, &r.Options, &resp) + return +} + +// Remove the association from a billing item. +func (r Billing_Item) RemoveAssociationId() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "removeAssociationId", nil, &r.Options, &resp) + return +} + +// Set an associated billing item to an orphan billing item. Associations allow you to tie an "orphaned" billing item, any non-server billing item that doesn't have a parent item such as secondary IP subnets or StorageLayer accounts, to a server billing item. You may only set an association for an orphan to a server. You cannot associate a server to an orphan if the either the server or orphan billing items have a cancellation date set. +func (r Billing_Item) SetAssociationId(associatedId *int) (resp bool, err error) { + params := []interface{}{ + associatedId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item", "setAssociationId", params, &r.Options, &resp) + return +} + +// Void a previously made cancellation for a service +func (r Billing_Item) VoidCancelService() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "voidCancelService", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Item_Cancellation_Reason data type contains cancellation reasons. +type Billing_Item_Cancellation_Reason struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemCancellationReasonService returns an instance of the Billing_Item_Cancellation_Reason SoftLayer service +func GetBillingItemCancellationReasonService(sess *session.Session) Billing_Item_Cancellation_Reason { + return Billing_Item_Cancellation_Reason{Session: sess} +} + +func (r Billing_Item_Cancellation_Reason) Id(id int) Billing_Item_Cancellation_Reason { + r.Options.Id = &id + return r +} + +func (r Billing_Item_Cancellation_Reason) Mask(mask string) Billing_Item_Cancellation_Reason { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item_Cancellation_Reason) Filter(filter string) Billing_Item_Cancellation_Reason { + r.Options.Filter = filter + return r +} + +func (r Billing_Item_Cancellation_Reason) Limit(limit int) Billing_Item_Cancellation_Reason { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item_Cancellation_Reason) Offset(offset int) Billing_Item_Cancellation_Reason { + r.Options.Offset = &offset + return r +} + +// getAllCancellationReasons() retrieves a list of all cancellation reasons that a server/service may be assigned to. +func (r Billing_Item_Cancellation_Reason) GetAllCancellationReasons() (resp []datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getAllCancellationReasons", nil, &r.Options, &resp) + return +} + +// Retrieve An billing cancellation reason category. +func (r Billing_Item_Cancellation_Reason) GetBillingCancellationReasonCategory() (resp datatypes.Billing_Item_Cancellation_Reason_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getBillingCancellationReasonCategory", nil, &r.Options, &resp) + return +} + +// Retrieve The corresponding billing items having the specific cancellation reason. +func (r Billing_Item_Cancellation_Reason) GetBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getBillingItems", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Item_Cancellation_Reason) GetObject() (resp datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Cancellation_Reason) GetTranslatedReason() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getTranslatedReason", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Item_Cancellation_Reason_Category data type contains cancellation reason categories. +type Billing_Item_Cancellation_Reason_Category struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemCancellationReasonCategoryService returns an instance of the Billing_Item_Cancellation_Reason_Category SoftLayer service +func GetBillingItemCancellationReasonCategoryService(sess *session.Session) Billing_Item_Cancellation_Reason_Category { + return Billing_Item_Cancellation_Reason_Category{Session: sess} +} + +func (r Billing_Item_Cancellation_Reason_Category) Id(id int) Billing_Item_Cancellation_Reason_Category { + r.Options.Id = &id + return r +} + +func (r Billing_Item_Cancellation_Reason_Category) Mask(mask string) Billing_Item_Cancellation_Reason_Category { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item_Cancellation_Reason_Category) Filter(filter string) Billing_Item_Cancellation_Reason_Category { + r.Options.Filter = filter + return r +} + +func (r Billing_Item_Cancellation_Reason_Category) Limit(limit int) Billing_Item_Cancellation_Reason_Category { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item_Cancellation_Reason_Category) Offset(offset int) Billing_Item_Cancellation_Reason_Category { + r.Options.Offset = &offset + return r +} + +// getAllCancellationReasonCategories() retrieves a list of all cancellation reason categories +func (r Billing_Item_Cancellation_Reason_Category) GetAllCancellationReasonCategories() (resp []datatypes.Billing_Item_Cancellation_Reason_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason_Category", "getAllCancellationReasonCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The corresponding billing cancellation reasons having the specific billing cancellation reason category. +func (r Billing_Item_Cancellation_Reason_Category) GetBillingCancellationReasons() (resp []datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason_Category", "getBillingCancellationReasons", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Item_Cancellation_Reason_Category) GetObject() (resp datatypes.Billing_Item_Cancellation_Reason_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason_Category", "getObject", nil, &r.Options, &resp) + return +} + +// SoftLayer_Billing_Item_Cancellation_Request data type is used to cancel service billing items. +type Billing_Item_Cancellation_Request struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemCancellationRequestService returns an instance of the Billing_Item_Cancellation_Request SoftLayer service +func GetBillingItemCancellationRequestService(sess *session.Session) Billing_Item_Cancellation_Request { + return Billing_Item_Cancellation_Request{Session: sess} +} + +func (r Billing_Item_Cancellation_Request) Id(id int) Billing_Item_Cancellation_Request { + r.Options.Id = &id + return r +} + +func (r Billing_Item_Cancellation_Request) Mask(mask string) Billing_Item_Cancellation_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item_Cancellation_Request) Filter(filter string) Billing_Item_Cancellation_Request { + r.Options.Filter = filter + return r +} + +func (r Billing_Item_Cancellation_Request) Limit(limit int) Billing_Item_Cancellation_Request { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item_Cancellation_Request) Offset(offset int) Billing_Item_Cancellation_Request { + r.Options.Offset = &offset + return r +} + +// This method creates a service cancellation request. +// +// You need to have "Cancel Services" privilege to create a cancellation request. You have to provide at least one SoftLayer_Billing_Item_Cancellation_Request_Item in the "items" property. Make sure billing item's category code belongs to the cancelable product codes. You can retrieve the cancelable product category by the [[SoftLayer_Product_Item_Category::getValidCancelableServiceItemCategories|product category]] service. +func (r Billing_Item_Cancellation_Request) CreateObject(templateObject *datatypes.Billing_Item_Cancellation_Request) (resp datatypes.Billing_Item_Cancellation_Request, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer account that a service cancellation request belongs to. +func (r Billing_Item_Cancellation_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// This method returns all service cancellation requests. +// +// Make sure to include the "resultLimit" in the SOAP request header for quicker response. If there is no result limit header is passed, it will return the latest 25 results by default. +func (r Billing_Item_Cancellation_Request) GetAllCancellationRequests() (resp []datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getAllCancellationRequests", nil, &r.Options, &resp) + return +} + +// Services can be canceled 2 or 3 days prior to your next bill date. This service returns the time by which a cancellation request submission is permitted in the current billing cycle. If the current time falls into the cut off date, this will return next earliest cancellation cut off date. +// +// Available category codes are: service, server +func (r Billing_Item_Cancellation_Request) GetCancellationCutoffDate(accountId *int, categoryCode *string) (resp datatypes.Time, err error) { + params := []interface{}{ + accountId, + categoryCode, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getCancellationCutoffDate", params, &r.Options, &resp) + return +} + +// Retrieve A collection of service cancellation items. +func (r Billing_Item_Cancellation_Request) GetItems() (resp []datatypes.Billing_Item_Cancellation_Request_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getItems", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Item_Cancellation_Request object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Item_Cancellation_Request service. You can only retrieve cancellation request records that are assigned to your SoftLayer account. +func (r Billing_Item_Cancellation_Request) GetObject() (resp datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The status of a service cancellation request. +func (r Billing_Item_Cancellation_Request) GetStatus() (resp datatypes.Billing_Item_Cancellation_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The ticket that is associated with the service cancellation request. +func (r Billing_Item_Cancellation_Request) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getTicket", nil, &r.Options, &resp) + return +} + +// Retrieve The user that initiated a service cancellation request. +func (r Billing_Item_Cancellation_Request) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getUser", nil, &r.Options, &resp) + return +} + +// This method removes a cancellation item from a cancellation request that is in "Pending" or "Approved" status. +func (r Billing_Item_Cancellation_Request) RemoveCancellationItem(itemId *int) (resp bool, err error) { + params := []interface{}{ + itemId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "removeCancellationItem", params, &r.Options, &resp) + return +} + +// This method examined if a billing item is eligible for cancellation. It checks if the billing item you provided is already in your existing cancellation request. +func (r Billing_Item_Cancellation_Request) ValidateBillingItemForCancellation(billingItemId *int) (resp bool, err error) { + params := []interface{}{ + billingItemId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "validateBillingItemForCancellation", params, &r.Options, &resp) + return +} + +// This method voids a service cancellation request in "Pending" or "Approved" status. +func (r Billing_Item_Cancellation_Request) Void(closeRelatedTicketFlag *bool) (resp bool, err error) { + params := []interface{}{ + closeRelatedTicketFlag, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "void", params, &r.Options, &resp) + return +} + +// no documentation yet +type Billing_Item_Virtual_DedicatedHost struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemVirtualDedicatedHostService returns an instance of the Billing_Item_Virtual_DedicatedHost SoftLayer service +func GetBillingItemVirtualDedicatedHostService(sess *session.Session) Billing_Item_Virtual_DedicatedHost { + return Billing_Item_Virtual_DedicatedHost{Session: sess} +} + +func (r Billing_Item_Virtual_DedicatedHost) Id(id int) Billing_Item_Virtual_DedicatedHost { + r.Options.Id = &id + return r +} + +func (r Billing_Item_Virtual_DedicatedHost) Mask(mask string) Billing_Item_Virtual_DedicatedHost { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item_Virtual_DedicatedHost) Filter(filter string) Billing_Item_Virtual_DedicatedHost { + r.Options.Filter = filter + return r +} + +func (r Billing_Item_Virtual_DedicatedHost) Limit(limit int) Billing_Item_Virtual_DedicatedHost { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item_Virtual_DedicatedHost) Offset(offset int) Billing_Item_Virtual_DedicatedHost { + r.Options.Offset = &offset + return r +} + +// Cancel the resource or service for a billing Item. By default the billing item will be cancelled immediately and reclaim of the resource will begin shortly. Setting the "cancelImmediately" property to false will delay the cancellation until the next bill date. +// +// +// * The reason parameter could be from the list below: +// * "No longer needed" +// * "Business closing down" +// * "Server / Upgrade Costs" +// * "Migrating to larger server" +// * "Migrating to smaller server" +// * "Migrating to a different SoftLayer datacenter" +// * "Network performance / latency" +// * "Support response / timing" +// * "Sales process / upgrades" +// * "Moving to competitor" +func (r Billing_Item_Virtual_DedicatedHost) CancelItem(cancelImmediately *bool, cancelAssociatedBillingItems *bool, reason *string, customerNote *string) (resp bool, err error) { + params := []interface{}{ + cancelImmediately, + cancelAssociatedBillingItems, + reason, + customerNote, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "cancelItem", params, &r.Options, &resp) + return +} + +// Cancel the resource or service (excluding bare metal servers) for a billing Item. The billing item will be cancelled immediately and reclaim of the resource will begin shortly. +func (r Billing_Item_Virtual_DedicatedHost) CancelService() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "cancelService", nil, &r.Options, &resp) + return +} + +// Cancel the resource or service for a billing Item +func (r Billing_Item_Virtual_DedicatedHost) CancelServiceOnAnniversaryDate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "cancelServiceOnAnniversaryDate", nil, &r.Options, &resp) + return +} + +// Retrieve The account that a billing item belongs to. +func (r Billing_Item_Virtual_DedicatedHost) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetActiveAgreement() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveAgreement", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the billing item is under an active agreement. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveAgreementFlag() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveAgreementFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveAssociatedChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetActiveAssociatedGuestDiskBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveAssociatedGuestDiskBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active bundled billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A service cancellation request item that corresponds to the billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveCancellationItem() (resp datatypes.Billing_Item_Cancellation_Request_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveCancellationItem", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetActiveFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetActiveSparePoolAssociatedGuestDiskBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveSparePoolAssociatedGuestDiskBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's spare pool bundled billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveSparePoolBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveSparePoolBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's associated parent. This is to be used for billing items that are "floating", and therefore are not child items of any parent billing item. If it is desired to associate an item to another, populate this with the SoftLayer_Billing_Item ID of that associated parent item. +func (r Billing_Item_Virtual_DedicatedHost) GetAssociatedBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAssociatedBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A history of billing items which a billing item has been associated with. +func (r Billing_Item_Virtual_DedicatedHost) GetAssociatedBillingItemHistory() (resp []datatypes.Billing_Item_Association_History, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAssociatedBillingItemHistory", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetAssociatedChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. +func (r Billing_Item_Virtual_DedicatedHost) GetAssociatedParent() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAssociatedParent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetAvailableMatchingVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAvailableMatchingVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allocation for a billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetBandwidthAllocation() (resp datatypes.Network_Bandwidth_Version1_Allocation, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's recurring child items that have once been billed and are scheduled to be billed in the future. +func (r Billing_Item_Virtual_DedicatedHost) GetBillableChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getBillableChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's bundled billing items +func (r Billing_Item_Virtual_DedicatedHost) GetBundleItems() (resp []datatypes.Product_Item_Bundles, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getBundleItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's bundled billing items' +func (r Billing_Item_Virtual_DedicatedHost) GetBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetCanceledChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getCanceledChildren", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item's cancellation reason. +func (r Billing_Item_Virtual_DedicatedHost) GetCancellationReason() (resp datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getCancellationReason", nil, &r.Options, &resp) + return +} + +// Retrieve This will return any cancellation requests that are associated with this billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetCancellationRequests() (resp []datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getCancellationRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The item category to which the billing item's item belongs. +func (r Billing_Item_Virtual_DedicatedHost) GetCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's child billing items' +func (r Billing_Item_Virtual_DedicatedHost) GetChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetChildrenWithActiveAgreement() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getChildrenWithActiveAgreement", nil, &r.Options, &resp) + return +} + +// Retrieve For product items which have a downgrade path defined, this will return those product items. +func (r Billing_Item_Virtual_DedicatedHost) GetDowngradeItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getDowngradeItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. +func (r Billing_Item_Virtual_DedicatedHost) GetFilteredNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getFilteredNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A flag that will reflect whether this billing item is billed on an hourly basis or not. +func (r Billing_Item_Virtual_DedicatedHost) GetHourlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getHourlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Invoice items associated with this billing item +func (r Billing_Item_Virtual_DedicatedHost) GetInvoiceItem() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getInvoiceItem", nil, &r.Options, &resp) + return +} + +// Retrieve All invoice items associated with the billing item +func (r Billing_Item_Virtual_DedicatedHost) GetInvoiceItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getInvoiceItems", nil, &r.Options, &resp) + return +} + +// Retrieve The entry in the SoftLayer product catalog that a billing item is based upon. +func (r Billing_Item_Virtual_DedicatedHost) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve The location of the billing item. Some billing items have physical properties such as the server itself. For items such as these, we provide location information. +func (r Billing_Item_Virtual_DedicatedHost) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's child billing items and associated items' +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items and associated billing items if they exist.' +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve This is deprecated and will always be zero. Because tax is calculated in real-time, previewing the next recurring invoice is pre-tax only. +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. +func (r Billing_Item_Virtual_DedicatedHost) GetNonZeroNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNonZeroNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Item_Virtual_DedicatedHost) GetObject() (resp datatypes.Billing_Item_Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's original order item. Simply a reference to the original order from which this billing item was created. +func (r Billing_Item_Virtual_DedicatedHost) GetOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getOrderItem", nil, &r.Options, &resp) + return +} + +// Retrieve The original physical location for this billing item--may differ from current. +func (r Billing_Item_Virtual_DedicatedHost) GetOriginalLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getOriginalLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The package under which this billing item was sold. A Package is the general grouping of products as seen on our order forms. +func (r Billing_Item_Virtual_DedicatedHost) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's parent item. If a billing item has no parent item then this value is null. +func (r Billing_Item_Virtual_DedicatedHost) GetParent() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's parent item. If a billing item has no parent item then this value is null. +func (r Billing_Item_Virtual_DedicatedHost) GetParentVirtualGuestBillingItem() (resp datatypes.Billing_Item_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getParentVirtualGuestBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates whether a billing item is scheduled to be canceled or not. +func (r Billing_Item_Virtual_DedicatedHost) GetPendingCancellationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getPendingCancellationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The new order item that will replace this billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetPendingOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getPendingOrderItem", nil, &r.Options, &resp) + return +} + +// Retrieve Provisioning transaction for this billing item +func (r Billing_Item_Virtual_DedicatedHost) GetProvisionTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getProvisionTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve The resource for a virtual dedicated host billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetResource() (resp datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getResource", nil, &r.Options, &resp) + return +} + +// This service returns billing items of a specified category code. This service should be used to retrieve billing items that you wish to cancel. Some billing items can be canceled via [[SoftLayer_Security_Certificate_Request|service cancellation]] service. +// +// In order to find billing items for cancellation, use [[SoftLayer_Product_Item_Category::getValidCancelableServiceItemCategories|product categories]] service to retrieve category codes that are eligible for cancellation. +func (r Billing_Item_Virtual_DedicatedHost) GetServiceBillingItemsByCategory(categoryCode *string, includeZeroRecurringFee *bool) (resp []datatypes.Billing_Item, err error) { + params := []interface{}{ + categoryCode, + includeZeroRecurringFee, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getServiceBillingItemsByCategory", params, &r.Options, &resp) + return +} + +// Retrieve A friendly description of software component +func (r Billing_Item_Virtual_DedicatedHost) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve Billing items whose product item has an upgrade path defined in our system will return the next product item in the upgrade path. +func (r Billing_Item_Virtual_DedicatedHost) GetUpgradeItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getUpgradeItem", nil, &r.Options, &resp) + return +} + +// Retrieve Billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. +func (r Billing_Item_Virtual_DedicatedHost) GetUpgradeItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getUpgradeItems", nil, &r.Options, &resp) + return +} + +// Remove the association from a billing item. +func (r Billing_Item_Virtual_DedicatedHost) RemoveAssociationId() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "removeAssociationId", nil, &r.Options, &resp) + return +} + +// Set an associated billing item to an orphan billing item. Associations allow you to tie an "orphaned" billing item, any non-server billing item that doesn't have a parent item such as secondary IP subnets or StorageLayer accounts, to a server billing item. You may only set an association for an orphan to a server. You cannot associate a server to an orphan if the either the server or orphan billing items have a cancellation date set. +func (r Billing_Item_Virtual_DedicatedHost) SetAssociationId(associatedId *int) (resp bool, err error) { + params := []interface{}{ + associatedId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "setAssociationId", params, &r.Options, &resp) + return +} + +// Void a previously made cancellation for a service +func (r Billing_Item_Virtual_DedicatedHost) VoidCancelService() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "voidCancelService", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Order data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the order is generated for existing SoftLayer customer. +type Billing_Order struct { + Session *session.Session + Options sl.Options +} + +// GetBillingOrderService returns an instance of the Billing_Order SoftLayer service +func GetBillingOrderService(sess *session.Session) Billing_Order { + return Billing_Order{Session: sess} +} + +func (r Billing_Order) Id(id int) Billing_Order { + r.Options.Id = &id + return r +} + +func (r Billing_Order) Mask(mask string) Billing_Order { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Order) Filter(filter string) Billing_Order { + r.Options.Filter = filter + return r +} + +func (r Billing_Order) Limit(limit int) Billing_Order { + r.Options.Limit = &limit + return r +} + +func (r Billing_Order) Offset(offset int) Billing_Order { + r.Options.Offset = &offset + return r +} + +// When an order has been modified, the customer will need to approve the changes. This method will allow the customer to approve the changes. +func (r Billing_Order) ApproveModifiedOrder() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "approveModifiedOrder", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Account|account]] to which an order belongs. +func (r Billing_Order) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getAccount", nil, &r.Options, &resp) + return +} + +// This will get all billing orders for your account. +func (r Billing_Order) GetAllObjects() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetBrand() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getBrand", nil, &r.Options, &resp) + return +} + +// Retrieve A cart is similar to a quote, except that it can be continually modified by the customer and does not have locked-in prices. Not all orders will have a cart associated with them. See [[SoftLayer_Billing_Order_Cart]] for more information. +func (r Billing_Order) GetCart() (resp datatypes.Billing_Order_Cart, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getCart", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Billing_Order_Item (type)|order items]] that are core restricted +func (r Billing_Order) GetCoreRestrictedItems() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getCoreRestrictedItems", nil, &r.Options, &resp) + return +} + +// Retrieve All credit card transactions associated with this order. If this order was not placed with a credit card, this will be empty. +func (r Billing_Order) GetCreditCardTransactions() (resp []datatypes.Billing_Payment_Card_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getCreditCardTransactions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetExchangeRate() (resp datatypes.Billing_Currency_ExchangeRate, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getExchangeRate", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetInitialInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getInitialInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Billing_Order_items included in an order. +func (r Billing_Order) GetItems() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getItems", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Order object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Order service. You can only retrieve orders that are assigned to your portal user's account. +func (r Billing_Order) GetObject() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetOrderApprovalDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderApprovalDate", nil, &r.Options, &resp) + return +} + +// Retrieve An order's non-server items total monthly fee. +func (r Billing_Order) GetOrderNonServerMonthlyAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderNonServerMonthlyAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's server items total monthly fee. +func (r Billing_Order) GetOrderServerMonthlyAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderServerMonthlyAmount", nil, &r.Options, &resp) + return +} + +// Get a list of [[SoftLayer_Container_Billing_Order_Status]] objects. +func (r Billing_Order) GetOrderStatuses() (resp []datatypes.Container_Billing_Order_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderStatuses", nil, &r.Options, &resp) + return +} + +// Retrieve An order's top level items. This normally includes the server line item and any non-server additional services such as NAS or ISCSI. +func (r Billing_Order) GetOrderTopLevelItems() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTopLevelItems", nil, &r.Options, &resp) + return +} + +// Retrieve This amount represents the order's initial charge including set up fee and taxes. +func (r Billing_Order) GetOrderTotalAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total one time amount summing all the set up fees, the labor fees and the one time fees. Taxes will be applied for non-tax-exempt. This amount represents the initial fees that will be charged. +func (r Billing_Order) GetOrderTotalOneTime() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalOneTime", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total one time amount. This amount represents the initial fees before tax. +func (r Billing_Order) GetOrderTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total one time tax amount. This amount represents the tax that will be applied to the total charge, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. +func (r Billing_Order) GetOrderTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total recurring amount. Taxes will be applied for non-tax-exempt. This amount represents the fees that will be charged on a recurring (usually monthly) basis. +func (r Billing_Order) GetOrderTotalRecurring() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalRecurring", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total recurring amount. This amount represents the fees that will be charged on a recurring (usually monthly) basis. +func (r Billing_Order) GetOrderTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total tax amount of the recurring fees, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. +func (r Billing_Order) GetOrderTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total setup fee. +func (r Billing_Order) GetOrderTotalSetupAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalSetupAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The type of an order. This lets you know where this order was generated from. +func (r Billing_Order) GetOrderType() (resp datatypes.Billing_Order_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderType", nil, &r.Options, &resp) + return +} + +// Retrieve All PayPal transactions associated with this order. If this order was not placed with PayPal, this will be empty. +func (r Billing_Order) GetPaypalTransactions() (resp []datatypes.Billing_Payment_PayPal_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getPaypalTransactions", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer quote. If the order is not a quote, an error will be thrown. +func (r Billing_Order) GetPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getPdf", nil, &r.Options, &resp) + return +} + +// Retrieve the default filename of an order PDF. +func (r Billing_Order) GetPdfFilename() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getPdfFilename", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetPresaleEvent() (resp datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getPresaleEvent", nil, &r.Options, &resp) + return +} + +// Retrieve The quote of an order. This quote holds information about its expiration date, creation date, name and status. This information is tied to an order having the status 'QUOTE' +func (r Billing_Order) GetQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getQuote", nil, &r.Options, &resp) + return +} + +// Generate an [[SoftLayer_Container_Product_Order|order container]] from a billing order. This will take into account promotions, reseller status, estimated taxes and all other standard order verification processes. +func (r Billing_Order) GetRecalculatedOrderContainer(message *string, ignoreDiscountsFlag *bool) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + message, + ignoreDiscountsFlag, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getRecalculatedOrderContainer", params, &r.Options, &resp) + return +} + +// Generate a [[SoftLayer_Container_Product_Order_Receipt]] object with all the order information. +func (r Billing_Order) GetReceipt() (resp datatypes.Container_Product_Order_Receipt, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getReceipt", nil, &r.Options, &resp) + return +} + +// Retrieve The Referral Partner who referred this order. (Only necessary for new customer orders) +func (r Billing_Order) GetReferralPartner() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getReferralPartner", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates an order is an upgrade. +func (r Billing_Order) GetUpgradeRequestFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getUpgradeRequestFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_User_Customer object tied to an order. +func (r Billing_Order) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getUserRecord", nil, &r.Options, &resp) + return +} + +// When an order has been modified, it will contain a status indicating so. This method checks that status and also verifies that the active user's account is the same as the account on the order. +func (r Billing_Order) IsPendingEditApproval() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "isPendingEditApproval", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Billing_Order_Cart struct { + Session *session.Session + Options sl.Options +} + +// GetBillingOrderCartService returns an instance of the Billing_Order_Cart SoftLayer service +func GetBillingOrderCartService(sess *session.Session) Billing_Order_Cart { + return Billing_Order_Cart{Session: sess} +} + +func (r Billing_Order_Cart) Id(id int) Billing_Order_Cart { + r.Options.Id = &id + return r +} + +func (r Billing_Order_Cart) Mask(mask string) Billing_Order_Cart { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Order_Cart) Filter(filter string) Billing_Order_Cart { + r.Options.Filter = filter + return r +} + +func (r Billing_Order_Cart) Limit(limit int) Billing_Order_Cart { + r.Options.Limit = &limit + return r +} + +func (r Billing_Order_Cart) Offset(offset int) Billing_Order_Cart { + r.Options.Offset = &offset + return r +} + +// This method is used to transfer an anonymous quote to the active user and associated account. An anonymous quote is one that was created by a user without being authenticated. If a quote was created anonymously and then the customer attempts to access that anonymous quote via the API (which requires authentication), the customer will be unable to retrieve the quote due to the security restrictions in place. By providing the ability for a customer to claim a quote, s/he will be able to pull the anonymous quote onto his/her account and successfully view the quote. +// +// To claim a quote, both the quote id and the quote key (the 32-character random string) must be provided. +func (r Billing_Order_Cart) Claim(quoteKey *string, quoteId *int) (resp datatypes.Billing_Order_Quote, err error) { + params := []interface{}{ + quoteKey, + quoteId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "claim", params, &r.Options, &resp) + return +} + +// When creating a new cart, the order data is sent through SoftLayer_Product_Order::verifyOrder to make sure that the cart contains valid data. If an issue is found with the order, an exception will be thrown and you will receive the same response as if SoftLayer_Product_Order::verifyOrder were called directly. Once the order verification is complete, the cart will be created. +// +// The response is the new cart id. +func (r Billing_Order_Cart) CreateCart(orderData *datatypes.Container_Product_Order) (resp int, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "createCart", params, &r.Options, &resp) + return +} + +// If a cart is no longer needed, it can be deleted using this service. Once a cart has been deleted, it cannot be retrieved again. +func (r Billing_Order_Cart) DeleteCart() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "deleteCart", nil, &r.Options, &resp) + return +} + +// Account master users and sub-users in the SoftLayer customer portal can delete the quote of an order. +func (r Billing_Order_Cart) DeleteQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "deleteQuote", nil, &r.Options, &resp) + return +} + +// Retrieve A quote's corresponding account. +func (r Billing_Order_Cart) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve a valid cart record of a SoftLayer order. +func (r Billing_Order_Cart) GetCartByCartKey(cartKey *string) (resp datatypes.Billing_Order_Cart, err error) { + params := []interface{}{ + cartKey, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getCartByCartKey", params, &r.Options, &resp) + return +} + +// Retrieve Indicates whether the owner of the quote chosen to no longer be contacted. +func (r Billing_Order_Cart) GetDoNotContactFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getDoNotContactFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Order_Cart) GetObject() (resp datatypes.Billing_Order_Cart, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve This order contains the records for which products were selected for this quote. +func (r Billing_Order_Cart) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve These are all the orders that were created from this quote. +func (r Billing_Order_Cart) GetOrdersFromQuote() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getOrdersFromQuote", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF copy of the cart. +func (r Billing_Order_Cart) GetPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getPdf", nil, &r.Options, &resp) + return +} + +// This method will return a [[SoftLayer_Billing_Order_Quote]] that is identified by the quote key specified. If you do not have access to the quote or it does not exist, an exception will be thrown indicating so. +func (r Billing_Order_Cart) GetQuoteByQuoteKey(quoteKey *string) (resp datatypes.Billing_Order_Quote, err error) { + params := []interface{}{ + quoteKey, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getQuoteByQuoteKey", params, &r.Options, &resp) + return +} + +// This method allows the customer to retrieve a saved cart and put it in a format that's suitable to be sent to SoftLayer_Billing_Order_Cart::createCart to create a new cart or to SoftLayer_Billing_Order_Cart::updateCart to update an existing cart. +func (r Billing_Order_Cart) GetRecalculatedOrderContainer(orderData *datatypes.Container_Product_Order, orderBeingPlacedFlag *bool) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + orderData, + orderBeingPlacedFlag, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getRecalculatedOrderContainer", params, &r.Options, &resp) + return +} + +// Use this method for placing server orders and additional services orders. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server orders. In addition to verifying the order, placeOrder() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Billing_Order_Cart) PlaceOrder(orderData interface{}) (resp datatypes.Container_Product_Order_Receipt, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "placeOrder", params, &r.Options, &resp) + return +} + +// Use this method for placing server quotes and additional services quotes. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server quotes. In addition to verifying the quote, placeQuote() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. +func (r Billing_Order_Cart) PlaceQuote(orderData *datatypes.Container_Product_Order) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "placeQuote", params, &r.Options, &resp) + return +} + +// Account master users and sub-users in the SoftLayer customer portal can save the quote of an order to avoid its deletion after 5 days or its expiration after 2 days. +func (r Billing_Order_Cart) SaveQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "saveQuote", nil, &r.Options, &resp) + return +} + +// Like SoftLayer_Billing_Order_Cart::createCart, the order data will be sent through SoftLayer_Product_Order::verifyOrder to make sure that the updated cart information is valid. Once it has been verified, the new order data will be saved. +// +// This will return the cart id. +func (r Billing_Order_Cart) UpdateCart(orderData *datatypes.Container_Product_Order) (resp int, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "updateCart", params, &r.Options, &resp) + return +} + +// Use this method for placing server orders and additional services orders. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server orders. In addition to verifying the order, placeOrder() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Billing_Order_Cart) VerifyOrder(orderData interface{}) (resp datatypes.Container_Product_Order, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "verifyOrder", params, &r.Options, &resp) + return +} + +// Withdraws the users acceptance of the GDPR terms. +func (r Billing_Order_Cart) WithdrawGdprAcceptance() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "withdrawGdprAcceptance", nil, &r.Options, &resp) + return +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Order_Item struct { + Session *session.Session + Options sl.Options +} + +// GetBillingOrderItemService returns an instance of the Billing_Order_Item SoftLayer service +func GetBillingOrderItemService(sess *session.Session) Billing_Order_Item { + return Billing_Order_Item{Session: sess} +} + +func (r Billing_Order_Item) Id(id int) Billing_Order_Item { + r.Options.Id = &id + return r +} + +func (r Billing_Order_Item) Mask(mask string) Billing_Order_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Order_Item) Filter(filter string) Billing_Order_Item { + r.Options.Filter = filter + return r +} + +func (r Billing_Order_Item) Limit(limit int) Billing_Order_Item { + r.Options.Limit = &limit + return r +} + +func (r Billing_Order_Item) Offset(offset int) Billing_Order_Item { + r.Options.Offset = &offset + return r +} + +// Retrieve The SoftLayer_Billing_Item tied to the order item. +func (r Billing_Order_Item) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The other items included with an ordered item. +func (r Billing_Order_Item) GetBundledItems() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve The item category tied to an order item. +func (r Billing_Order_Item) GetCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve The child order items for an order item. All server order items should have children. These children are considered a part of the server. +func (r Billing_Order_Item) GetChildren() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Billing_Order_Item) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The component type tied to an order item. All hardware-specific items should have a generic hardware component. +func (r Billing_Order_Item) GetHardwareGenericComponent() (resp datatypes.Hardware_Component_Model_Generic, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getHardwareGenericComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Product_Item tied to an order item. The item is the actual definition of the product being sold. +func (r Billing_Order_Item) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve This is an item's category answers. +func (r Billing_Order_Item) GetItemCategoryAnswers() (resp []datatypes.Billing_Order_Item_Category_Answer, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getItemCategoryAnswers", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Product_Item_Price tied to an order item. The item price object describes the cost of an item. +func (r Billing_Order_Item) GetItemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getItemPrice", nil, &r.Options, &resp) + return +} + +// Retrieve The location of an ordered item. This is usually the same as the server it is being ordered with. Otherwise it describes the location of the additional service being ordered. +func (r Billing_Order_Item) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order_Item) GetNextOrderChildren() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getNextOrderChildren", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Item object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Item service. You can only retrieve billing items tied to the account that your portal user is assigned to. Billing items are an account's items of billable items. There are "parent" billing items and "child" billing items. The server billing item is generally referred to as a parent billing item. The items tied to a server, such as ram, harddrives, and operating systems are considered "child" billing items. +func (r Billing_Order_Item) GetObject() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve This is only populated when an upgrade order is placed. The old billing item represents what the billing was before the upgrade happened. +func (r Billing_Order_Item) GetOldBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getOldBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The order to which this item belongs. The order contains all the information related to the items included in an order +func (r Billing_Order_Item) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order_Item) GetOrderApprovalDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getOrderApprovalDate", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Product_Package an order item is a part of. +func (r Billing_Order_Item) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve The parent order item ID for an item. Items that are associated with a server will have a parent. The parent will be the server item itself. +func (r Billing_Order_Item) GetParent() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Product_Package_Preset related to this order item. +func (r Billing_Order_Item) GetPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getPreset", nil, &r.Options, &resp) + return +} + +// Retrieve A count of power supplies contained within this SoftLayer_Billing_Order +func (r Billing_Order_Item) GetRedundantPowerSupplyCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getRedundantPowerSupplyCount", nil, &r.Options, &resp) + return +} + +// Retrieve For ordered items that are software items, a full description of that software can be found with this property. +func (r Billing_Order_Item) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The drive storage groups that are attached to this billing order item. +func (r Billing_Order_Item) GetStorageGroups() (resp []datatypes.Configuration_Storage_Group_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getStorageGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The recurring fee of an ordered item. This amount represents the fees that will be charged on a recurring (usually monthly) basis. +func (r Billing_Order_Item) GetTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The next SoftLayer_Product_Item in the upgrade path for this order item. +func (r Billing_Order_Item) GetUpgradeItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getUpgradeItem", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Oder_Quote data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the quote is generated for existing SoftLayer customer. +type Billing_Order_Quote struct { + Session *session.Session + Options sl.Options +} + +// GetBillingOrderQuoteService returns an instance of the Billing_Order_Quote SoftLayer service +func GetBillingOrderQuoteService(sess *session.Session) Billing_Order_Quote { + return Billing_Order_Quote{Session: sess} +} + +func (r Billing_Order_Quote) Id(id int) Billing_Order_Quote { + r.Options.Id = &id + return r +} + +func (r Billing_Order_Quote) Mask(mask string) Billing_Order_Quote { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Order_Quote) Filter(filter string) Billing_Order_Quote { + r.Options.Filter = filter + return r +} + +func (r Billing_Order_Quote) Limit(limit int) Billing_Order_Quote { + r.Options.Limit = &limit + return r +} + +func (r Billing_Order_Quote) Offset(offset int) Billing_Order_Quote { + r.Options.Offset = &offset + return r +} + +// This method is used to transfer an anonymous quote to the active user and associated account. An anonymous quote is one that was created by a user without being authenticated. If a quote was created anonymously and then the customer attempts to access that anonymous quote via the API (which requires authentication), the customer will be unable to retrieve the quote due to the security restrictions in place. By providing the ability for a customer to claim a quote, s/he will be able to pull the anonymous quote onto his/her account and successfully view the quote. +// +// To claim a quote, both the quote id and the quote key (the 32-character random string) must be provided. +func (r Billing_Order_Quote) Claim(quoteKey *string, quoteId *int) (resp datatypes.Billing_Order_Quote, err error) { + params := []interface{}{ + quoteKey, + quoteId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "claim", params, &r.Options, &resp) + return +} + +// Account master users and sub-users in the SoftLayer customer portal can delete the quote of an order. +func (r Billing_Order_Quote) DeleteQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "deleteQuote", nil, &r.Options, &resp) + return +} + +// Retrieve A quote's corresponding account. +func (r Billing_Order_Quote) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether the owner of the quote chosen to no longer be contacted. +func (r Billing_Order_Quote) GetDoNotContactFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getDoNotContactFlag", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Order_Quote object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Order_Quote service. You can only retrieve quotes that are assigned to your portal user's account. +func (r Billing_Order_Quote) GetObject() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve This order contains the records for which products were selected for this quote. +func (r Billing_Order_Quote) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve These are all the orders that were created from this quote. +func (r Billing_Order_Quote) GetOrdersFromQuote() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getOrdersFromQuote", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer quoted order. SoftLayer keeps PDF records of all quoted orders for customer retrieval from the portal and API. You must have a PDF reader installed in order to view these quoted order files. +func (r Billing_Order_Quote) GetPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getPdf", nil, &r.Options, &resp) + return +} + +// This method will return a [[SoftLayer_Billing_Order_Quote]] that is identified by the quote key specified. If you do not have access to the quote or it does not exist, an exception will be thrown indicating so. +func (r Billing_Order_Quote) GetQuoteByQuoteKey(quoteKey *string) (resp datatypes.Billing_Order_Quote, err error) { + params := []interface{}{ + quoteKey, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getQuoteByQuoteKey", params, &r.Options, &resp) + return +} + +// Generate an [[SoftLayer_Container_Product_Order|order container]] from the previously-created quote. This will take into account promotions, reseller status, estimated taxes and all other standard order verification processes. +func (r Billing_Order_Quote) GetRecalculatedOrderContainer(userOrderData *datatypes.Container_Product_Order, orderBeingPlacedFlag *bool) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + userOrderData, + orderBeingPlacedFlag, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getRecalculatedOrderContainer", params, &r.Options, &resp) + return +} + +// Use this method for placing server orders and additional services orders. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server orders. In addition to verifying the order, placeOrder() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Billing_Order_Quote) PlaceOrder(orderData interface{}) (resp datatypes.Container_Product_Order_Receipt, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "placeOrder", params, &r.Options, &resp) + return +} + +// Use this method for placing server quotes and additional services quotes. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server quotes. In addition to verifying the quote, placeQuote() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. +func (r Billing_Order_Quote) PlaceQuote(orderData *datatypes.Container_Product_Order) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "placeQuote", params, &r.Options, &resp) + return +} + +// Account master users and sub-users in the SoftLayer customer portal can save the quote of an order to avoid its deletion after 5 days or its expiration after 2 days. +func (r Billing_Order_Quote) SaveQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "saveQuote", nil, &r.Options, &resp) + return +} + +// Use this method for placing server orders and additional services orders. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server orders. In addition to verifying the order, placeOrder() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Billing_Order_Quote) VerifyOrder(orderData interface{}) (resp datatypes.Container_Product_Order, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "verifyOrder", params, &r.Options, &resp) + return +} + +// Withdraws the users acceptance of the GDPR terms. +func (r Billing_Order_Quote) WithdrawGdprAcceptance() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "withdrawGdprAcceptance", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/brand.go b/vendor/github.com/softlayer/softlayer-go/services/brand.go new file mode 100644 index 000000000000..447a51e4d805 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/brand.go @@ -0,0 +1,375 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Brand data type contains brand information relating to the single SoftLayer customer account. +// +// SoftLayer customers are unable to change their brand information in the portal or the API. +type Brand struct { + Session *session.Session + Options sl.Options +} + +// GetBrandService returns an instance of the Brand SoftLayer service +func GetBrandService(sess *session.Session) Brand { + return Brand{Session: sess} +} + +func (r Brand) Id(id int) Brand { + r.Options.Id = &id + return r +} + +func (r Brand) Mask(mask string) Brand { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Brand) Filter(filter string) Brand { + r.Options.Filter = filter + return r +} + +func (r Brand) Limit(limit int) Brand { + r.Options.Limit = &limit + return r +} + +func (r Brand) Offset(offset int) Brand { + r.Options.Offset = &offset + return r +} + +// Create a new customer account record. +func (r Brand) CreateCustomerAccount(account *datatypes.Account, bypassDuplicateAccountCheck *bool) (resp datatypes.Account, err error) { + params := []interface{}{ + account, + bypassDuplicateAccountCheck, + } + err = r.Session.DoRequest("SoftLayer_Brand", "createCustomerAccount", params, &r.Options, &resp) + return +} + +// Create a new brand record. +func (r Brand) CreateObject(templateObject *datatypes.Brand) (resp datatypes.Brand, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Brand", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve All accounts owned by the brand. +func (r Brand) GetAllOwnedAccounts() (resp []datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getAllOwnedAccounts", nil, &r.Options, &resp) + return +} + +// (DEPRECATED) Use [[SoftLayer_Ticket_Subject::getAllObjects]] method. +func (r Brand) GetAllTicketSubjects(account *datatypes.Account) (resp []datatypes.Ticket_Subject, err error) { + params := []interface{}{ + account, + } + err = r.Session.DoRequest("SoftLayer_Brand", "getAllTicketSubjects", params, &r.Options, &resp) + return +} + +// Retrieve This flag indicates if creation of accounts is allowed. +func (r Brand) GetAllowAccountCreationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getAllowAccountCreationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Business Partner details for the brand. Country Enterprise Code, Channel, Segment, Reseller Level. +func (r Brand) GetBusinessPartner() (resp datatypes.Brand_Business_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getBusinessPartner", nil, &r.Options, &resp) + return +} + +// Retrieve Flag indicating if the brand is a business partner. +func (r Brand) GetBusinessPartnerFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getBusinessPartnerFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The Product Catalog for the Brand +func (r Brand) GetCatalog() (resp datatypes.Product_Catalog, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getCatalog", nil, &r.Options, &resp) + return +} + +// Retrieve the contact information for the brand such as the corporate or support contact. This will include the contact name, telephone number, fax number, email address, and mailing address of the contact. +func (r Brand) GetContactInformation() (resp []datatypes.Brand_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getContactInformation", nil, &r.Options, &resp) + return +} + +// Retrieve The contacts for the brand. +func (r Brand) GetContacts() (resp []datatypes.Brand_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getContacts", nil, &r.Options, &resp) + return +} + +// Retrieve This references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. +func (r Brand) GetCustomerCountryLocationRestrictions() (resp []datatypes.Brand_Restriction_Location_CustomerCountry, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getCustomerCountryLocationRestrictions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetDistributor() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getDistributor", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetDistributorChildFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getDistributorChildFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetDistributorFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getDistributorFlag", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hardware objects. +func (r Brand) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetHasAgentSupportFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getHasAgentSupportFlag", nil, &r.Options, &resp) + return +} + +// Get the payment processor merchant name. +func (r Brand) GetMerchantName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getMerchantName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Brand) GetObject() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetOpenTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getOpenTickets", nil, &r.Options, &resp) + return +} + +// Retrieve Active accounts owned by the brand. +func (r Brand) GetOwnedAccounts() (resp []datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getOwnedAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetSecurityLevel() (resp datatypes.Security_Level, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getSecurityLevel", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetTicketGroups() (resp []datatypes.Ticket_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getTicketGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getTickets", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Brand) GetToken(userId *int) (resp string, err error) { + params := []interface{}{ + userId, + } + err = r.Session.DoRequest("SoftLayer_Brand", "getToken", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual guest objects. +func (r Brand) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Contains business partner details associated with a brand. Country Enterprise Identifier (CEID), Channel ID, Segment ID and Reseller Level. +type Brand_Business_Partner struct { + Session *session.Session + Options sl.Options +} + +// GetBrandBusinessPartnerService returns an instance of the Brand_Business_Partner SoftLayer service +func GetBrandBusinessPartnerService(sess *session.Session) Brand_Business_Partner { + return Brand_Business_Partner{Session: sess} +} + +func (r Brand_Business_Partner) Id(id int) Brand_Business_Partner { + r.Options.Id = &id + return r +} + +func (r Brand_Business_Partner) Mask(mask string) Brand_Business_Partner { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Brand_Business_Partner) Filter(filter string) Brand_Business_Partner { + r.Options.Filter = filter + return r +} + +func (r Brand_Business_Partner) Limit(limit int) Brand_Business_Partner { + r.Options.Limit = &limit + return r +} + +func (r Brand_Business_Partner) Offset(offset int) Brand_Business_Partner { + r.Options.Offset = &offset + return r +} + +// Retrieve Brand associated with the business partner data +func (r Brand_Business_Partner) GetBrand() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Business_Partner", "getBrand", nil, &r.Options, &resp) + return +} + +// Retrieve Channel indicator used to categorize business partner revenue. +func (r Brand_Business_Partner) GetChannel() (resp datatypes.Business_Partner_Channel, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Business_Partner", "getChannel", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Brand_Business_Partner) GetObject() (resp datatypes.Brand_Business_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Business_Partner", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Segment indicator used to categorize business partner revenue. +func (r Brand_Business_Partner) GetSegment() (resp datatypes.Business_Partner_Segment, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Business_Partner", "getSegment", nil, &r.Options, &resp) + return +} + +// The [[SoftLayer_Brand_Restriction_Location_CustomerCountry]] data type defines the relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on the SoftLayer US brand for customers that live in Great Britain. +type Brand_Restriction_Location_CustomerCountry struct { + Session *session.Session + Options sl.Options +} + +// GetBrandRestrictionLocationCustomerCountryService returns an instance of the Brand_Restriction_Location_CustomerCountry SoftLayer service +func GetBrandRestrictionLocationCustomerCountryService(sess *session.Session) Brand_Restriction_Location_CustomerCountry { + return Brand_Restriction_Location_CustomerCountry{Session: sess} +} + +func (r Brand_Restriction_Location_CustomerCountry) Id(id int) Brand_Restriction_Location_CustomerCountry { + r.Options.Id = &id + return r +} + +func (r Brand_Restriction_Location_CustomerCountry) Mask(mask string) Brand_Restriction_Location_CustomerCountry { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Brand_Restriction_Location_CustomerCountry) Filter(filter string) Brand_Restriction_Location_CustomerCountry { + r.Options.Filter = filter + return r +} + +func (r Brand_Restriction_Location_CustomerCountry) Limit(limit int) Brand_Restriction_Location_CustomerCountry { + r.Options.Limit = &limit + return r +} + +func (r Brand_Restriction_Location_CustomerCountry) Offset(offset int) Brand_Restriction_Location_CustomerCountry { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Brand_Restriction_Location_CustomerCountry) GetAllObjects() (resp []datatypes.Brand_Restriction_Location_CustomerCountry, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Restriction_Location_CustomerCountry", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve This references the brand that has a brand-location-country restriction setup. +func (r Brand_Restriction_Location_CustomerCountry) GetBrand() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Restriction_Location_CustomerCountry", "getBrand", nil, &r.Options, &resp) + return +} + +// Retrieve This references the datacenter that has a brand-location-country restriction setup. For example, if a datacenter is listed with a restriction for Canada, a Canadian customer may not be eligible to order services at that location. +func (r Brand_Restriction_Location_CustomerCountry) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Restriction_Location_CustomerCountry", "getLocation", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Brand_Restriction_Location_CustomerCountry) GetObject() (resp datatypes.Brand_Restriction_Location_CustomerCountry, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Restriction_Location_CustomerCountry", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/business.go b/vendor/github.com/softlayer/softlayer-go/services/business.go new file mode 100644 index 000000000000..bfe53c02a349 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/business.go @@ -0,0 +1,122 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// Contains business partner channel information +type Business_Partner_Channel struct { + Session *session.Session + Options sl.Options +} + +// GetBusinessPartnerChannelService returns an instance of the Business_Partner_Channel SoftLayer service +func GetBusinessPartnerChannelService(sess *session.Session) Business_Partner_Channel { + return Business_Partner_Channel{Session: sess} +} + +func (r Business_Partner_Channel) Id(id int) Business_Partner_Channel { + r.Options.Id = &id + return r +} + +func (r Business_Partner_Channel) Mask(mask string) Business_Partner_Channel { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Business_Partner_Channel) Filter(filter string) Business_Partner_Channel { + r.Options.Filter = filter + return r +} + +func (r Business_Partner_Channel) Limit(limit int) Business_Partner_Channel { + r.Options.Limit = &limit + return r +} + +func (r Business_Partner_Channel) Offset(offset int) Business_Partner_Channel { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Business_Partner_Channel) GetObject() (resp datatypes.Business_Partner_Channel, err error) { + err = r.Session.DoRequest("SoftLayer_Business_Partner_Channel", "getObject", nil, &r.Options, &resp) + return +} + +// Contains business partner segment information +type Business_Partner_Segment struct { + Session *session.Session + Options sl.Options +} + +// GetBusinessPartnerSegmentService returns an instance of the Business_Partner_Segment SoftLayer service +func GetBusinessPartnerSegmentService(sess *session.Session) Business_Partner_Segment { + return Business_Partner_Segment{Session: sess} +} + +func (r Business_Partner_Segment) Id(id int) Business_Partner_Segment { + r.Options.Id = &id + return r +} + +func (r Business_Partner_Segment) Mask(mask string) Business_Partner_Segment { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Business_Partner_Segment) Filter(filter string) Business_Partner_Segment { + r.Options.Filter = filter + return r +} + +func (r Business_Partner_Segment) Limit(limit int) Business_Partner_Segment { + r.Options.Limit = &limit + return r +} + +func (r Business_Partner_Segment) Offset(offset int) Business_Partner_Segment { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Business_Partner_Segment) GetObject() (resp datatypes.Business_Partner_Segment, err error) { + err = r.Session.DoRequest("SoftLayer_Business_Partner_Segment", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/catalyst.go b/vendor/github.com/softlayer/softlayer-go/services/catalyst.go new file mode 100644 index 000000000000..12242da8a391 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/catalyst.go @@ -0,0 +1,207 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Catalyst_Company_Type struct { + Session *session.Session + Options sl.Options +} + +// GetCatalystCompanyTypeService returns an instance of the Catalyst_Company_Type SoftLayer service +func GetCatalystCompanyTypeService(sess *session.Session) Catalyst_Company_Type { + return Catalyst_Company_Type{Session: sess} +} + +func (r Catalyst_Company_Type) Id(id int) Catalyst_Company_Type { + r.Options.Id = &id + return r +} + +func (r Catalyst_Company_Type) Mask(mask string) Catalyst_Company_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Catalyst_Company_Type) Filter(filter string) Catalyst_Company_Type { + r.Options.Filter = filter + return r +} + +func (r Catalyst_Company_Type) Limit(limit int) Catalyst_Company_Type { + r.Options.Limit = &limit + return r +} + +func (r Catalyst_Company_Type) Offset(offset int) Catalyst_Company_Type { + r.Options.Offset = &offset + return r +} + +// <<.create_object > li > div { padding-top: .5em; padding-bottom: .5em} +// createObject() enables the creation of servers on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a server, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a server of the specified configuration. +// +// +// To determine when the server is available you can poll the server via [[SoftLayer_Hardware/getObject|getObject]], +// checking the provisionDate property. +// When provisionDate is not null, the server will be ready. Be sure to use the globalIdentifier +// as your initialization parameter. +// +// +// Warning: Servers created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Hardware/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Hardware (type)|SoftLayer_Hardware]] +//
      +//
    • hostname +//
      Hostname for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • processorCoreAmount +//
      The number of logical CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • memoryCapacity +//
      The amount of memory to allocate in gigabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the server.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the server will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the server with.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the server is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the server's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - The highest available zero cost port speed will be used.
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the server.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • networkComponents.redundancyEnabledFlag +//
      Specifies whether or not the server's network components should be in redundancy groups.
        +//
      • Optional
      • +//
      • Type - bool
      • +//
      • Default - false
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. When the redundancyEnabledFlag property is true the server's network components will be in redundancy groups.
      • +//
      +// { +// "networkComponents": [ +// { +// "redundancyEnabledFlag": false +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the server only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a server is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the server.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the server.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • fixedConfigurationPreset.keyName +//
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The fixedConfigurationPreset property is a [[SoftLayer_Product_Package_Preset (type)|fixed configuration preset]] structure. The keyName property must be set to specify preset to use.
      • +//
      • If a fixed configuration preset is used processorCoreAmount, memoryCapacity and hardDrives properties must not be set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "fixedConfigurationPreset": { +// "keyName": "SOME_KEY_NAME" +// } +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the server.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Hardware_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the server. This is primarily useful for providing data to software that may be on the server and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • hardDrives +//
      Hard drive settings for the server
        +//
      • Optional
      • +//
      • Type - SoftLayer_Hardware_Component
      • +//
      • Default - The largest available capacity for a zero cost primary disk will be used.
      • +//
      • Description - The hardDrives property is an array of [[SoftLayer_Hardware_Component (type)|hardware component]] structures. +//
      • Each hard drive must specify the capacity property.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "hardDrives": [ +// { +// "capacity": 500 +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the server upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "processorCoreAmount": 2, +// "memoryCapacity": 2, +// "hourlyBillingFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Hardware.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Hardware/f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5/getObject +// +// +// { +// "accountId": 232298, +// "bareMetalInstanceFlag": null, +// "domain": "example.com", +// "hardwareStatusId": null, +// "hostname": "host1", +// "id": null, +// "serviceProviderId": null, +// "serviceProviderResourceId": null, +// "globalIdentifier": "f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5", +// "hourlyBillingFlag": true, +// "memoryCapacity": 2, +// "operatingSystemReferenceCode": "UBUNTU_LATEST", +// "processorCoreAmount": 2 +// } +// +func (r Hardware) CreateObject(templateObject *datatypes.Hardware) (resp datatypes.Hardware, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "createObject", params, &r.Options, &resp) + return +} + +// +// This method will cancel a server effective immediately. For servers billed hourly, the charges will stop immediately after the method returns. +func (r Hardware) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete software component passwords. +func (r Hardware) DeleteSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "deleteSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, and notes. +func (r Hardware) EditSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "editSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Download and run remote script from uri on the hardware. +func (r Hardware) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// The '''findByIpAddress''' method finds hardware using its primary public or private IP address. IP addresses that have a secondary subnet tied to the hardware will not return the hardware - alternate means of locating the hardware must be used (see '''Associated Methods'''). If no hardware is found, no errors are generated and no data is returned. +func (r Hardware) FindByIpAddress(ipAddress *string) (resp datatypes.Hardware, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Hardware_Server (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Hardware/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Hardware) GenerateOrderTemplate(templateObject *datatypes.Hardware) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a piece of hardware. +func (r Hardware) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active physical components. +func (r Hardware) GetActiveComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getActiveComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active network monitoring incidents. +func (r Hardware) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// The '''getAlarmHistory''' method retrieves a detailed history for the monitoring alarm. When calling this method, a start and end date for the history to be retrieved must be entered. +func (r Hardware) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetAllPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAllPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. +func (r Hardware) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Hardware) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Hardware) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an antivirus/spyware software component object. +func (r Hardware) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Hardware. +func (r Hardware) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's specific attributes. +func (r Hardware) GetAttributes() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAttributes", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Hardware. +func (r Hardware) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Hardware) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// The '''getBackendIncomingBandwidth''' method retrieves the amount of incoming private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware) GetBackendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getBackendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's back-end or private network components. +func (r Hardware) GetBackendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getBackendOutgoingBandwidth''' method retrieves the amount of outgoing private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware) GetBackendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getBackendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's backend or private router. +func (r Hardware) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth (measured in GB). +func (r Hardware) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Hardware) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's benchmark certifications. +func (r Hardware) GetBenchmarkCertifications() (resp []datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBenchmarkCertifications", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a server. +func (r Hardware) GetBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a billing item exists. +func (r Hardware) GetBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the hardware is ineligible for cancellation because it is disconnected. +func (r Hardware) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Status indicating whether or not a piece of hardware has business continuance insurance. +func (r Hardware) GetBusinessContinuanceInsuranceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBusinessContinuanceInsuranceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Child hardware. +func (r Hardware) GetChildrenHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getChildrenHardware", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware) GetComponentDetailsXML() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getComponentDetailsXML", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's components. +func (r Hardware) GetComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection/server backup software component object. +func (r Hardware) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a server, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Hardware_Configuration (type)]]. +func (r Hardware) GetCreateObjectOptions() (resp datatypes.Container_Hardware_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve The current billable public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware) GetCurrentBillableBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getCurrentBillableBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Get the billing detail for this instance for the current billing period. This does not include bandwidth usage. +func (r Hardware) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// The '''getCurrentBillingTotal''' method retrieves the total bill amount in US Dollars ($) for the current billing period. In addition to the total bill amount, the billing detail also includes all bandwidth used up to the point the method is called on the piece of hardware. +func (r Hardware) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// The '''getDailyAverage''' method calculates the average daily network traffic used by the selected server. Using the required parameter ''dateTime'' to enter a start and end date, the user retrieves this average, measure in gigabytes (GB) for the specified date range. When entering parameters, only the month, day and year are required - time entries are omitted as this method defaults the time to midnight in order to account for the entire day. +func (r Hardware) GetDailyAverage(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getDailyAverage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the datacenter in which a piece of hardware resides. +func (r Hardware) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the datacenter in which a piece of hardware resides. +func (r Hardware) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// Retrieve Number of day(s) a server have been in spare pool. +func (r Hardware) GetDaysInSparePool() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDaysInSparePool", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware) GetDownlinkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownlinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware) GetDownlinkNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownlinkNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached to a piece of network hardware. +func (r Hardware) GetDownlinkServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownlinkServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware) GetDownlinkVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownlinkVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware downstream from a network device. +func (r Hardware) GetDownstreamHardwareBindings() (resp []datatypes.Network_Component_Uplink_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamHardwareBindings", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware downstream from the selected piece of hardware. +func (r Hardware) GetDownstreamNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. +func (r Hardware) GetDownstreamNetworkHardwareWithIncidents() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamNetworkHardwareWithIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached downstream to a piece of network hardware. +func (r Hardware) GetDownstreamServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware) GetDownstreamVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The drive controllers contained within a piece of hardware. +func (r Hardware) GetDriveControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDriveControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated EVault network storage service account. +func (r Hardware) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's firewall services. +func (r Hardware) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Defines the fixed components in a fixed configuration bare metal server. +func (r Hardware) GetFixedConfigurationPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getFixedConfigurationPreset", nil, &r.Options, &resp) + return +} + +// The '''getFrontendIncomingBandwidth''' method retrieves the amount of incoming public network traffic used by a server between the given start and end date parameters. When entering the ''dateTime'' parameter, only the month, day and year of the start and end dates are required - the time (hour, minute and second) are set to midnight by default and cannot be changed. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware) GetFrontendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getFrontendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's front-end or public network components. +func (r Hardware) GetFrontendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getFrontendOutgoingBandwidth''' method retrieves the amount of outgoing public network traffic used by a server between the given start and end date parameters. The ''dateTime'' parameter requires only the day, month and year to be entered - the time (hour, minute and second) are set to midnight be default in order to gather the data for the entire start and end date indicated in the parameter. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware) GetFrontendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getFrontendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's frontend or public router. +func (r Hardware) GetFrontendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Hardware) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The hard drives contained within a piece of hardware. +func (r Hardware) GetHardDrives() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardDrives", nil, &r.Options, &resp) + return +} + +// Retrieve The chassis that a piece of hardware is housed in. +func (r Hardware) GetHardwareChassis() (resp datatypes.Hardware_Chassis, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardwareChassis", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware) GetHardwareFunction() (resp datatypes.Hardware_Function, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardwareFunction", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware) GetHardwareFunctionDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardwareFunctionDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's status. +func (r Hardware) GetHardwareStatus() (resp datatypes.Hardware_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardwareStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Determine in hardware object has TPM enabled. +func (r Hardware) GetHasTrustedPlatformModuleBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHasTrustedPlatformModuleBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a host IPS software component object. +func (r Hardware) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// The '''getHourlyBandwidth''' method retrieves all bandwidth updates hourly for the specified hardware. Because the potential number of data points can become excessive, the method limits the user to obtain data in 24-hour intervals. The required ''dateTime'' parameter is used as the starting point for the query and will be calculated for the 24-hour period starting with the specified date and time. For example, entering a parameter of +// +// '02/01/2008 0:00' +// +// results in a return of all bandwidth data for the entire day of February 1, 2008, as 0:00 specifies a midnight start date. Please note that the time entered should be completed using a 24-hour clock (military time, astronomical time). +// +// For data spanning more than a single 24-hour period, refer to the getBandwidthData function on the metricTrackingObject for the piece of hardware. +func (r Hardware) GetHourlyBandwidth(mode *string, day *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + mode, + day, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getHourlyBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A server's hourly billing status. +func (r Hardware) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the inbound network traffic data for the last 30 days. +func (r Hardware) GetInboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getInboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the last transaction a server performed. +func (r Hardware) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's latest network monitoring incident. +func (r Hardware) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve Where a piece of hardware is located within SoftLayer's location hierarchy. +func (r Hardware) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetLocationPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLocationPathString", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a lockbox account associated with a server. +func (r Hardware) GetLockboxNetworkStorage() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the hardware is a managed resource. +func (r Hardware) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's memory. +func (r Hardware) GetMemory() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMemory", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of memory a piece of hardware has, measured in gigabytes. +func (r Hardware) GetMemoryCapacity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMemoryCapacity", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's metric tracking object. +func (r Hardware) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_HardwareServer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Hardware) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the monitoring agents associated with a piece of hardware. +func (r Hardware) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Hardware) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's monitoring robot. +func (r Hardware) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitoring services. +func (r Hardware) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring service flag eligibility status for a piece of hardware. +func (r Hardware) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The service flag status for a piece of hardware. +func (r Hardware) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's motherboard. +func (r Hardware) GetMotherboard() (resp datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMotherboard", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network cards. +func (r Hardware) GetNetworkCards() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkCards", nil, &r.Options, &resp) + return +} + +// Retrieve Returns a hardware's network components. +func (r Hardware) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway member if this device is part of a network gateway. +func (r Hardware) GetNetworkGatewayMember() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkGatewayMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this device is part of a network gateway. +func (r Hardware) GetNetworkGatewayMemberFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkGatewayMemberFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's network management IP address. +func (r Hardware) GetNetworkManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve All servers with failed monitoring that are attached downstream to a piece of hardware. +func (r Hardware) GetNetworkMonitorAttachedDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkMonitorAttachedDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guests that are attached downstream to a hardware that have failed monitoring +func (r Hardware) GetNetworkMonitorAttachedDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkMonitorAttachedDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The status of all of a piece of hardware's network monitoring incidents. +func (r Hardware) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitors. +func (r Hardware) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve The value of a hardware's network status attribute. +func (r Hardware) GetNetworkStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's related network status attribute. +func (r Hardware) GetNetworkStatusAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkStatusAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated network storage service account. +func (r Hardware) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network virtual LANs (VLANs) associated with a piece of hardware's network components. +func (r Hardware) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth for the next billing cycle (measured in GB). +func (r Hardware) GetNextBillingCycleBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNextBillingCycleBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetNotesHistory() (resp []datatypes.Hardware_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNotesHistory", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware service. You can only retrieve the account that your portal user is assigned to. +func (r Hardware) GetObject() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's operating system. +func (r Hardware) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's operating system software description. +func (r Hardware) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the outbound network traffic data for the last 30 days. +func (r Hardware) GetOutboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getOutboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Blade Bay +func (r Hardware) GetParentBay() (resp datatypes.Hardware_Blade, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getParentBay", nil, &r.Options, &resp) + return +} + +// Retrieve Parent Hardware. +func (r Hardware) GetParentHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getParentHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. +func (r Hardware) GetPointOfPresenceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPointOfPresenceLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The power components for a hardware object. +func (r Hardware) GetPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's power supply. +func (r Hardware) GetPowerSupply() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPowerSupply", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary private IP address. +func (r Hardware) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary back-end network component. +func (r Hardware) GetPrimaryBackendNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary public IP address. +func (r Hardware) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary public network component. +func (r Hardware) GetPrimaryNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPrivateBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware) GetPrivateBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrivateBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve Whether the hardware only has access to the private network. +func (r Hardware) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware) GetProcessorCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getProcessorCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of physical processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware) GetProcessorPhysicalCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getProcessorPhysicalCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's processors. +func (r Hardware) GetProcessors() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getProcessors", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware) GetPublicBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getPublicBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetRack() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRack", nil, &r.Options, &resp) + return +} + +// Retrieve The RAID controllers contained within a piece of hardware. +func (r Hardware) GetRaidControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRaidControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this hardware. +func (r Hardware) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve User credentials to issue commands and/or interact with the server's remote management card. +func (r Hardware) GetRemoteManagementAccounts() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRemoteManagementAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's associated remote management component. This is normally IPMI. +func (r Hardware) GetRemoteManagementComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRemoteManagementComponent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetResourceConfigurations() (resp []datatypes.Hardware_Resource_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getResourceConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetResourceGroupMemberReferences() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getResourceGroupMemberReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetResourceGroupRoles() (resp []datatypes.Resource_Group_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getResourceGroupRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this hardware is a member. +func (r Hardware) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's routers. +func (r Hardware) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this hardware corresponds to. +func (r Hardware) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's vulnerability scan requests. +func (r Hardware) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// The '''getSensorData''' method retrieves a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures various information, including system temperatures, voltages and other local server settings. Sensor data is cached for 30 second; calls made to this method for the same server within 30 seconds of each other will result in the same data being returned. To ensure that the data retrieved retrieves snapshot of varied data, make calls greater than 30 seconds apart. +func (r Hardware) GetSensorData() (resp []datatypes.Container_RemoteManagement_SensorReading, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSensorData", nil, &r.Options, &resp) + return +} + +// The '''getSensorDataWithGraphs''' method retrieves the raw data returned from the server's remote management card. Along with raw data, graphs for the CPU and system temperatures and fan speeds are also returned. For more details on what information is returned, refer to the ''getSensorData'' method. +func (r Hardware) GetSensorDataWithGraphs() (resp datatypes.Container_RemoteManagement_SensorReadingsWithGraphs, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSensorDataWithGraphs", nil, &r.Options, &resp) + return +} + +// The '''getServerFanSpeedGraphs''' method retrieves the server's fan speeds and displays the speeds using tachometer graphs. data used to construct these graphs is retrieved from the server's remote management card. Each graph returned will have an associated title. +func (r Hardware) GetServerFanSpeedGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorSpeed, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServerFanSpeedGraphs", nil, &r.Options, &resp) + return +} + +// The '''getPowerState''' method retrieves the power state for the selected server. The server's power status is retrieved from its remote management card. This method returns "on", for a server that has been powered on, or "off" for servers powered off. +func (r Hardware) GetServerPowerState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServerPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the server room in which the hardware is located. +func (r Hardware) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServerRoom", nil, &r.Options, &resp) + return +} + +// The '''getServerTemperatureGraphs''' retrieves the server's temperatures and displays the various temperatures using thermometer graphs. Temperatures retrieved are CPU temperature(s) and system temperatures. Data used to construct the graphs is retrieved from the server's remote management card. All graphs returned will have an associated title. +func (r Hardware) GetServerTemperatureGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorTemperature, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServerTemperatureGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware's service provider. +func (r Hardware) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's installed software. +func (r Hardware) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a spare pool server. +func (r Hardware) GetSparePoolBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSparePoolBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Hardware) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetStorageNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getStorageNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetTopLevelLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getTopLevelLocation", nil, &r.Options, &resp) + return +} + +// +// This method will query transaction history for a piece of hardware. +func (r Hardware) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction_History, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Retrieve a list of upgradeable items available to this piece of hardware. Currently, getUpgradeItemPrices retrieves upgrades available for a server's memory, hard drives, network port speed, bandwidth allocation and GPUs. +func (r Hardware) GetUpgradeItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUpgradeItemPrices", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade request object, if any. +func (r Hardware) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The network device connected to a piece of hardware. +func (r Hardware) GetUplinkHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUplinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. +func (r Hardware) GetUplinkNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUplinkNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve An array containing a single string of custom user data for a hardware order. Max size is 16 kb. +func (r Hardware) GetUserData() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis for a piece of hardware. +func (r Hardware) GetVirtualChassis() (resp datatypes.Hardware_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualChassis", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis siblings for a piece of hardware. +func (r Hardware) GetVirtualChassisSiblings() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualChassisSiblings", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtual host record. +func (r Hardware) GetVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualHost", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's virtual software licenses. +func (r Hardware) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the bandwidth allotment to which a piece of hardware belongs. +func (r Hardware) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtualization platform software. +func (r Hardware) GetVirtualizationPlatform() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualizationPlatform", nil, &r.Options, &resp) + return +} + +// The '''importVirtualHost''' method attempts to import the host record for the virtualization platform running on a server. +func (r Hardware) ImportVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "importVirtualHost", nil, &r.Options, &resp) + return +} + +// The '''isPingable''' method issues a ping command to the selected server and returns the result of the ping command. This boolean return value displays ''true'' upon successful ping or ''false'' for a failed ping. +func (r Hardware) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "isPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command to the server and returns the ping response. +func (r Hardware) Ping() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "ping", nil, &r.Options, &resp) + return +} + +// The '''powerCycle''' method completes a power off and power on of the server successively in one command. The power cycle command is equivalent to unplugging the server from the power strip and then plugging the server back in. '''This method should only be used when all other options have been exhausted'''. Additional remote management commands may not be executed if this command was successfully issued within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "powerCycle", nil, &r.Options, &resp) + return +} + +// This method will power off the server via the server's remote management card. +func (r Hardware) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "powerOff", nil, &r.Options, &resp) + return +} + +// The '''powerOn''' method powers on a server via its remote management card. This boolean return value returns ''true'' upon successful execution and ''false'' if unsuccessful. Other remote management commands may not be issued in this command was successfully completed within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "powerOn", nil, &r.Options, &resp) + return +} + +// The '''rebootDefault''' method attempts to reboot the server by issuing a soft reboot, or reset, command to the server's remote management card. if the reset attempt is unsuccessful, a power cycle command will be issued via the power strip. The power cycle command is equivalent to unplugging the server from the power strip and then plugging the server back in. If the reset was successful within the last 20 minutes, another remote management command cannot be completed to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "rebootDefault", nil, &r.Options, &resp) + return +} + +// The '''rebootHard''' method reboots the server by issuing a cycle command to the server's remote management card. A hard reboot is equivalent to pressing the ''Reset'' button on a server - it is issued immediately and will not allow processes to shut down prior to the reboot. Completing a hard reboot may initiate system disk checks upon server reboot, causing the boot up to take longer than normally expected. +// +// Remote management commands are unable to be executed if a reboot has been issued successfully within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "rebootHard", nil, &r.Options, &resp) + return +} + +// The '''rebootSoft''' method reboots the server by issuing a reset command to the server's remote management card via soft reboot. When executing a soft reboot, servers allow all processes to shut down completely before rebooting. Remote management commands are unable to be issued within 20 minutes of issuing a successful soft reboot in order to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "rebootSoft", nil, &r.Options, &resp) + return +} + +// This method is used to remove access to s SoftLayer_Network_Storage volumes that supports host- or network-level access control. +func (r Hardware) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "setTags", params, &r.Options, &resp) + return +} + +// This method will update the root IPMI password on this SoftLayer_Hardware. +func (r Hardware) UpdateIpmiPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "updateIpmiPassword", params, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Benchmark_Certification data type contains general information relating to a single SoftLayer hardware benchmark certification document. +type Hardware_Benchmark_Certification struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareBenchmarkCertificationService returns an instance of the Hardware_Benchmark_Certification SoftLayer service +func GetHardwareBenchmarkCertificationService(sess *session.Session) Hardware_Benchmark_Certification { + return Hardware_Benchmark_Certification{Session: sess} +} + +func (r Hardware_Benchmark_Certification) Id(id int) Hardware_Benchmark_Certification { + r.Options.Id = &id + return r +} + +func (r Hardware_Benchmark_Certification) Mask(mask string) Hardware_Benchmark_Certification { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Benchmark_Certification) Filter(filter string) Hardware_Benchmark_Certification { + r.Options.Filter = filter + return r +} + +func (r Hardware_Benchmark_Certification) Limit(limit int) Hardware_Benchmark_Certification { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Benchmark_Certification) Offset(offset int) Hardware_Benchmark_Certification { + r.Options.Offset = &offset + return r +} + +// Retrieve Information regarding a benchmark certification result's associated SoftLayer customer account. +func (r Hardware_Benchmark_Certification) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Benchmark_Certification", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware on which a benchmark certification test was performed. +func (r Hardware_Benchmark_Certification) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Benchmark_Certification", "getHardware", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Benchmark_Certification object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware_Benchmark_Certification service. +func (r Hardware_Benchmark_Certification) GetObject() (resp datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Benchmark_Certification", "getObject", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a benchmark certification result, if such a file exists. If there is no file for this benchmark certification result, calling this method throws an exception. The "getResultFile" method attempts to retrieve the file associated with a benchmark certification result, if such a file exists. If no file exists for the benchmark certification, an exception is thrown. +func (r Hardware_Benchmark_Certification) GetResultFile() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Benchmark_Certification", "getResultFile", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Hardware_Blade struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareBladeService returns an instance of the Hardware_Blade SoftLayer service +func GetHardwareBladeService(sess *session.Session) Hardware_Blade { + return Hardware_Blade{Session: sess} +} + +func (r Hardware_Blade) Id(id int) Hardware_Blade { + r.Options.Id = &id + return r +} + +func (r Hardware_Blade) Mask(mask string) Hardware_Blade { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Blade) Filter(filter string) Hardware_Blade { + r.Options.Filter = filter + return r +} + +func (r Hardware_Blade) Limit(limit int) Hardware_Blade { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Blade) Offset(offset int) Hardware_Blade { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Hardware_Blade) GetHardwareChild() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Blade", "getHardwareChild", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Blade) GetHardwareParent() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Blade", "getHardwareParent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Blade) GetObject() (resp datatypes.Hardware_Blade, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Blade", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Component_Model data type contains general information relating to a single SoftLayer component model. A component model represents a vendor specific representation of a hardware component. Every piece of hardware on a server will have a specific hardware component model. +type Hardware_Component_Model struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareComponentModelService returns an instance of the Hardware_Component_Model SoftLayer service +func GetHardwareComponentModelService(sess *session.Session) Hardware_Component_Model { + return Hardware_Component_Model{Session: sess} +} + +func (r Hardware_Component_Model) Id(id int) Hardware_Component_Model { + r.Options.Id = &id + return r +} + +func (r Hardware_Component_Model) Mask(mask string) Hardware_Component_Model { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Component_Model) Filter(filter string) Hardware_Component_Model { + r.Options.Filter = filter + return r +} + +func (r Hardware_Component_Model) Limit(limit int) Hardware_Component_Model { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Component_Model) Offset(offset int) Hardware_Component_Model { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Hardware_Component_Model) GetArchitectureType() (resp datatypes.Hardware_Component_Model_Architecture_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getArchitectureType", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetAttributes() (resp []datatypes.Hardware_Component_Model_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetCompatibleArrayTypes() (resp []datatypes.Configuration_Storage_Group_Array_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getCompatibleArrayTypes", nil, &r.Options, &resp) + return +} + +// Retrieve All the component models that are compatible with a hardware component model. +func (r Hardware_Component_Model) GetCompatibleChildComponentModels() (resp []datatypes.Hardware_Component_Model, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getCompatibleChildComponentModels", nil, &r.Options, &resp) + return +} + +// Retrieve All the component models that a hardware component model is compatible with. +func (r Hardware_Component_Model) GetCompatibleParentComponentModels() (resp []datatypes.Hardware_Component_Model, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getCompatibleParentComponentModels", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetFirmwareQuantity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getFirmwareQuantity", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetFirmwares() (resp []datatypes.Hardware_Component_Firmware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getFirmwares", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware component model's physical components in inventory. +func (r Hardware_Component_Model) GetHardwareComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getHardwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The non-vendor specific generic component model for a hardware component model. +func (r Hardware_Component_Model) GetHardwareGenericComponentModel() (resp datatypes.Hardware_Component_Model_Generic, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getHardwareGenericComponentModel", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetInfinibandCompatibleAttribute() (resp datatypes.Hardware_Component_Model_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getInfinibandCompatibleAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetIsFlexSkuCompatible() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getIsFlexSkuCompatible", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetIsInfinibandCompatible() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getIsInfinibandCompatible", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Component_Model object. +func (r Hardware_Component_Model) GetObject() (resp datatypes.Hardware_Component_Model, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A motherboard's average reboot time. +func (r Hardware_Component_Model) GetRebootTime() (resp datatypes.Hardware_Component_Motherboard_Reboot_Time, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getRebootTime", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware component model's type. +func (r Hardware_Component_Model) GetType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The types of attributes that are allowed for a given hardware component model. +func (r Hardware_Component_Model) GetValidAttributeTypes() (resp []datatypes.Hardware_Component_Model_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getValidAttributeTypes", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Component_Partition_OperatingSystem data type contains general information relating to a single SoftLayer Operating System Partition Template. +type Hardware_Component_Partition_OperatingSystem struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareComponentPartitionOperatingSystemService returns an instance of the Hardware_Component_Partition_OperatingSystem SoftLayer service +func GetHardwareComponentPartitionOperatingSystemService(sess *session.Session) Hardware_Component_Partition_OperatingSystem { + return Hardware_Component_Partition_OperatingSystem{Session: sess} +} + +func (r Hardware_Component_Partition_OperatingSystem) Id(id int) Hardware_Component_Partition_OperatingSystem { + r.Options.Id = &id + return r +} + +func (r Hardware_Component_Partition_OperatingSystem) Mask(mask string) Hardware_Component_Partition_OperatingSystem { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Component_Partition_OperatingSystem) Filter(filter string) Hardware_Component_Partition_OperatingSystem { + r.Options.Filter = filter + return r +} + +func (r Hardware_Component_Partition_OperatingSystem) Limit(limit int) Hardware_Component_Partition_OperatingSystem { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Component_Partition_OperatingSystem) Offset(offset int) Hardware_Component_Partition_OperatingSystem { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Hardware_Component_Partition_OperatingSystem) GetAllObjects() (resp []datatypes.Hardware_Component_Partition_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_OperatingSystem", "getAllObjects", nil, &r.Options, &resp) + return +} + +// The '''getByDescription''' method retrieves all possible partition templates based on the description (required parameter) entered when calling the method. The description is typically the operating system's name. Current recognized values include 'linux', 'windows', 'freebsd', and 'Debian'. +func (r Hardware_Component_Partition_OperatingSystem) GetByDescription(description *string) (resp datatypes.Hardware_Component_Partition_OperatingSystem, err error) { + params := []interface{}{ + description, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_OperatingSystem", "getByDescription", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Component_Partition_OperatingSystem object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware_Component_Partition_OperatingSystem service.s +func (r Hardware_Component_Partition_OperatingSystem) GetObject() (resp datatypes.Hardware_Component_Partition_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_OperatingSystem", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an operating system's [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]]. +func (r Hardware_Component_Partition_OperatingSystem) GetPartitionTemplates() (resp []datatypes.Hardware_Component_Partition_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_OperatingSystem", "getPartitionTemplates", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Component_Partition_Template data type contains general information relating to a single SoftLayer partition template. Partition templates group 1 or more partition configurations that can be used to predefine how a hard drive's partitions will be configured. +type Hardware_Component_Partition_Template struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareComponentPartitionTemplateService returns an instance of the Hardware_Component_Partition_Template SoftLayer service +func GetHardwareComponentPartitionTemplateService(sess *session.Session) Hardware_Component_Partition_Template { + return Hardware_Component_Partition_Template{Session: sess} +} + +func (r Hardware_Component_Partition_Template) Id(id int) Hardware_Component_Partition_Template { + r.Options.Id = &id + return r +} + +func (r Hardware_Component_Partition_Template) Mask(mask string) Hardware_Component_Partition_Template { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Component_Partition_Template) Filter(filter string) Hardware_Component_Partition_Template { + r.Options.Filter = filter + return r +} + +func (r Hardware_Component_Partition_Template) Limit(limit int) Hardware_Component_Partition_Template { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Component_Partition_Template) Offset(offset int) Hardware_Component_Partition_Template { + r.Options.Offset = &offset + return r +} + +// Retrieve A partition template's associated [[SoftLayer_Account|Account]]. +func (r Hardware_Component_Partition_Template) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve An individual partition for a partition template. This is identical to 'partitionTemplatePartition' except this will sort unix partitions. +func (r Hardware_Component_Partition_Template) GetData() (resp []datatypes.Hardware_Component_Partition_Template_Partition, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getData", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Partition_Template) GetExpireDate() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getExpireDate", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Component_Partition_Template object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware_Component_Partition_Template service. You can only retrieve the partition templates that your account created or the templates predefined by SoftLayer. +func (r Hardware_Component_Partition_Template) GetObject() (resp datatypes.Hardware_Component_Partition_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A partition template's associated [[SoftLayer_Hardware_Component_Partition_OperatingSystem|Operating System]]. +func (r Hardware_Component_Partition_Template) GetPartitionOperatingSystem() (resp datatypes.Hardware_Component_Partition_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getPartitionOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve An individual partition for a partition template. +func (r Hardware_Component_Partition_Template) GetPartitionTemplatePartition() (resp []datatypes.Hardware_Component_Partition_Template_Partition, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getPartitionTemplatePartition", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Router data type contains general information relating to a single SoftLayer router. +type Hardware_Router struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareRouterService returns an instance of the Hardware_Router SoftLayer service +func GetHardwareRouterService(sess *session.Session) Hardware_Router { + return Hardware_Router{Session: sess} +} + +func (r Hardware_Router) Id(id int) Hardware_Router { + r.Options.Id = &id + return r +} + +func (r Hardware_Router) Mask(mask string) Hardware_Router { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Router) Filter(filter string) Hardware_Router { + r.Options.Filter = filter + return r +} + +func (r Hardware_Router) Limit(limit int) Hardware_Router { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Router) Offset(offset int) Hardware_Router { + r.Options.Offset = &offset + return r +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Hardware_Router) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_Router) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Captures a Flex Image of the hard disk on the physical machine, based on the capture template parameter. Returns the image template group containing the disk image. +func (r Hardware_Router) CaptureImage(captureTemplate *datatypes.Container_Disk_Image_Capture_Template) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + captureTemplate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "captureImage", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Hardware_Router) CloseAlarm(alarmId *string) (resp bool, err error) { + params := []interface{}{ + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "closeAlarm", params, &r.Options, &resp) + return +} + +// +// +// createObject() enables the creation of servers on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a server, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a server of the specified configuration. +// +// +// To determine when the server is available you can poll the server via [[SoftLayer_Hardware/getObject|getObject]], +// checking the provisionDate property. +// When provisionDate is not null, the server will be ready. Be sure to use the globalIdentifier +// as your initialization parameter. +// +// +// Warning: Servers created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Hardware/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Hardware (type)|SoftLayer_Hardware]] +//
      +//
    • hostname +//
      Hostname for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • processorCoreAmount +//
      The number of logical CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • memoryCapacity +//
      The amount of memory to allocate in gigabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the server.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the server will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the server with.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the server is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the server's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - The highest available zero cost port speed will be used.
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the server.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • networkComponents.redundancyEnabledFlag +//
      Specifies whether or not the server's network components should be in redundancy groups.
        +//
      • Optional
      • +//
      • Type - bool
      • +//
      • Default - false
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. When the redundancyEnabledFlag property is true the server's network components will be in redundancy groups.
      • +//
      +// { +// "networkComponents": [ +// { +// "redundancyEnabledFlag": false +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the server only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a server is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the server.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the server.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • fixedConfigurationPreset.keyName +//
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The fixedConfigurationPreset property is a [[SoftLayer_Product_Package_Preset (type)|fixed configuration preset]] structure. The keyName property must be set to specify preset to use.
      • +//
      • If a fixed configuration preset is used processorCoreAmount, memoryCapacity and hardDrives properties must not be set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "fixedConfigurationPreset": { +// "keyName": "SOME_KEY_NAME" +// } +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the server.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Hardware_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the server. This is primarily useful for providing data to software that may be on the server and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • hardDrives +//
      Hard drive settings for the server
        +//
      • Optional
      • +//
      • Type - SoftLayer_Hardware_Component
      • +//
      • Default - The largest available capacity for a zero cost primary disk will be used.
      • +//
      • Description - The hardDrives property is an array of [[SoftLayer_Hardware_Component (type)|hardware component]] structures. +//
      • Each hard drive must specify the capacity property.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "hardDrives": [ +// { +// "capacity": 500 +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the server upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "processorCoreAmount": 2, +// "memoryCapacity": 2, +// "hourlyBillingFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Hardware.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Hardware/f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5/getObject +// +// +// { +// "accountId": 232298, +// "bareMetalInstanceFlag": null, +// "domain": "example.com", +// "hardwareStatusId": null, +// "hostname": "host1", +// "id": null, +// "serviceProviderId": null, +// "serviceProviderResourceId": null, +// "globalIdentifier": "f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5", +// "hourlyBillingFlag": true, +// "memoryCapacity": 2, +// "operatingSystemReferenceCode": "UBUNTU_LATEST", +// "processorCoreAmount": 2 +// } +// +func (r Hardware_Router) CreateObject(templateObject *datatypes.Hardware) (resp datatypes.Hardware, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "createObject", params, &r.Options, &resp) + return +} + +// +// This method will cancel a server effective immediately. For servers billed hourly, the charges will stop immediately after the method returns. +func (r Hardware_Router) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete software component passwords. +func (r Hardware_Router) DeleteSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "deleteSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, and notes. +func (r Hardware_Router) EditSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "editSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Download and run remote script from uri on the hardware. +func (r Hardware_Router) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// The '''findByIpAddress''' method finds hardware using its primary public or private IP address. IP addresses that have a secondary subnet tied to the hardware will not return the hardware - alternate means of locating the hardware must be used (see '''Associated Methods'''). If no hardware is found, no errors are generated and no data is returned. +func (r Hardware_Router) FindByIpAddress(ipAddress *string) (resp datatypes.Hardware, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Hardware_Server (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Hardware/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Hardware_Router) GenerateOrderTemplate(templateObject *datatypes.Hardware) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a piece of hardware. +func (r Hardware_Router) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active physical components. +func (r Hardware_Router) GetActiveComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getActiveComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active network monitoring incidents. +func (r Hardware_Router) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// The '''getAlarmHistory''' method retrieves a detailed history for the monitoring alarm. When calling this method, a start and end date for the history to be retrieved must be entered. +func (r Hardware_Router) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetAllPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAllPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. +func (r Hardware_Router) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Hardware_Router) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Hardware_Router) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an antivirus/spyware software component object. +func (r Hardware_Router) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Hardware. +func (r Hardware_Router) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's specific attributes. +func (r Hardware_Router) GetAttributes() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAttributes", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Hardware. +func (r Hardware_Router) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Hardware_Router) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// The '''getBackendIncomingBandwidth''' method retrieves the amount of incoming private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_Router) GetBackendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBackendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's back-end or private network components. +func (r Hardware_Router) GetBackendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getBackendOutgoingBandwidth''' method retrieves the amount of outgoing private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_Router) GetBackendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBackendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's backend or private router. +func (r Hardware_Router) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth (measured in GB). +func (r Hardware_Router) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Hardware_Router) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's benchmark certifications. +func (r Hardware_Router) GetBenchmarkCertifications() (resp []datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBenchmarkCertifications", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a server. +func (r Hardware_Router) GetBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a billing item exists. +func (r Hardware_Router) GetBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the hardware is ineligible for cancellation because it is disconnected. +func (r Hardware_Router) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Associated subnets for a router object. +func (r Hardware_Router) GetBoundSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBoundSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Status indicating whether or not a piece of hardware has business continuance insurance. +func (r Hardware_Router) GetBusinessContinuanceInsuranceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBusinessContinuanceInsuranceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Child hardware. +func (r Hardware_Router) GetChildrenHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getChildrenHardware", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Router) GetComponentDetailsXML() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getComponentDetailsXML", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's components. +func (r Hardware_Router) GetComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection/server backup software component object. +func (r Hardware_Router) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a server, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Hardware_Configuration (type)]]. +func (r Hardware_Router) GetCreateObjectOptions() (resp datatypes.Container_Hardware_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve The current billable public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Router) GetCurrentBillableBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getCurrentBillableBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Get the billing detail for this instance for the current billing period. This does not include bandwidth usage. +func (r Hardware_Router) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// The '''getCurrentBillingTotal''' method retrieves the total bill amount in US Dollars ($) for the current billing period. In addition to the total bill amount, the billing detail also includes all bandwidth used up to the point the method is called on the piece of hardware. +func (r Hardware_Router) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// The '''getDailyAverage''' method calculates the average daily network traffic used by the selected server. Using the required parameter ''dateTime'' to enter a start and end date, the user retrieves this average, measure in gigabytes (GB) for the specified date range. When entering parameters, only the month, day and year are required - time entries are omitted as this method defaults the time to midnight in order to account for the entire day. +func (r Hardware_Router) GetDailyAverage(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDailyAverage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the datacenter in which a piece of hardware resides. +func (r Hardware_Router) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the datacenter in which a piece of hardware resides. +func (r Hardware_Router) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// Retrieve Number of day(s) a server have been in spare pool. +func (r Hardware_Router) GetDaysInSparePool() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDaysInSparePool", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_Router) GetDownlinkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownlinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_Router) GetDownlinkNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownlinkNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached to a piece of network hardware. +func (r Hardware_Router) GetDownlinkServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownlinkServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_Router) GetDownlinkVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownlinkVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware downstream from a network device. +func (r Hardware_Router) GetDownstreamHardwareBindings() (resp []datatypes.Network_Component_Uplink_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamHardwareBindings", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware downstream from the selected piece of hardware. +func (r Hardware_Router) GetDownstreamNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. +func (r Hardware_Router) GetDownstreamNetworkHardwareWithIncidents() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamNetworkHardwareWithIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached downstream to a piece of network hardware. +func (r Hardware_Router) GetDownstreamServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_Router) GetDownstreamVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The drive controllers contained within a piece of hardware. +func (r Hardware_Router) GetDriveControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDriveControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated EVault network storage service account. +func (r Hardware_Router) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's firewall services. +func (r Hardware_Router) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Defines the fixed components in a fixed configuration bare metal server. +func (r Hardware_Router) GetFixedConfigurationPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFixedConfigurationPreset", nil, &r.Options, &resp) + return +} + +// The '''getFrontendIncomingBandwidth''' method retrieves the amount of incoming public network traffic used by a server between the given start and end date parameters. When entering the ''dateTime'' parameter, only the month, day and year of the start and end dates are required - the time (hour, minute and second) are set to midnight by default and cannot be changed. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_Router) GetFrontendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFrontendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's front-end or public network components. +func (r Hardware_Router) GetFrontendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getFrontendOutgoingBandwidth''' method retrieves the amount of outgoing public network traffic used by a server between the given start and end date parameters. The ''dateTime'' parameter requires only the day, month and year to be entered - the time (hour, minute and second) are set to midnight be default in order to gather the data for the entire start and end date indicated in the parameter. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_Router) GetFrontendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFrontendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's frontend or public router. +func (r Hardware_Router) GetFrontendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Hardware_Router) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The hard drives contained within a piece of hardware. +func (r Hardware_Router) GetHardDrives() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardDrives", nil, &r.Options, &resp) + return +} + +// Retrieve The chassis that a piece of hardware is housed in. +func (r Hardware_Router) GetHardwareChassis() (resp datatypes.Hardware_Chassis, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardwareChassis", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_Router) GetHardwareFunction() (resp datatypes.Hardware_Function, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardwareFunction", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_Router) GetHardwareFunctionDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardwareFunctionDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's status. +func (r Hardware_Router) GetHardwareStatus() (resp datatypes.Hardware_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardwareStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Determine in hardware object has TPM enabled. +func (r Hardware_Router) GetHasTrustedPlatformModuleBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHasTrustedPlatformModuleBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a host IPS software component object. +func (r Hardware_Router) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// The '''getHourlyBandwidth''' method retrieves all bandwidth updates hourly for the specified hardware. Because the potential number of data points can become excessive, the method limits the user to obtain data in 24-hour intervals. The required ''dateTime'' parameter is used as the starting point for the query and will be calculated for the 24-hour period starting with the specified date and time. For example, entering a parameter of +// +// '02/01/2008 0:00' +// +// results in a return of all bandwidth data for the entire day of February 1, 2008, as 0:00 specifies a midnight start date. Please note that the time entered should be completed using a 24-hour clock (military time, astronomical time). +// +// For data spanning more than a single 24-hour period, refer to the getBandwidthData function on the metricTrackingObject for the piece of hardware. +func (r Hardware_Router) GetHourlyBandwidth(mode *string, day *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + mode, + day, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHourlyBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A server's hourly billing status. +func (r Hardware_Router) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the inbound network traffic data for the last 30 days. +func (r Hardware_Router) GetInboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getInboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Router) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the last transaction a server performed. +func (r Hardware_Router) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's latest network monitoring incident. +func (r Hardware_Router) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a VLAN on the router can be assigned to a host that has local disk functionality. +func (r Hardware_Router) GetLocalDiskStorageCapabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLocalDiskStorageCapabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Where a piece of hardware is located within SoftLayer's location hierarchy. +func (r Hardware_Router) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetLocationPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLocationPathString", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a lockbox account associated with a server. +func (r Hardware_Router) GetLockboxNetworkStorage() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the hardware is a managed resource. +func (r Hardware_Router) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's memory. +func (r Hardware_Router) GetMemory() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMemory", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of memory a piece of hardware has, measured in gigabytes. +func (r Hardware_Router) GetMemoryCapacity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMemoryCapacity", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's metric tracking object. +func (r Hardware_Router) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_HardwareServer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Hardware_Router) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the monitoring agents associated with a piece of hardware. +func (r Hardware_Router) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Hardware_Router) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's monitoring robot. +func (r Hardware_Router) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitoring services. +func (r Hardware_Router) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring service flag eligibility status for a piece of hardware. +func (r Hardware_Router) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The service flag status for a piece of hardware. +func (r Hardware_Router) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's motherboard. +func (r Hardware_Router) GetMotherboard() (resp datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMotherboard", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network cards. +func (r Hardware_Router) GetNetworkCards() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkCards", nil, &r.Options, &resp) + return +} + +// Retrieve Returns a hardware's network components. +func (r Hardware_Router) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway member if this device is part of a network gateway. +func (r Hardware_Router) GetNetworkGatewayMember() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkGatewayMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this device is part of a network gateway. +func (r Hardware_Router) GetNetworkGatewayMemberFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkGatewayMemberFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's network management IP address. +func (r Hardware_Router) GetNetworkManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve All servers with failed monitoring that are attached downstream to a piece of hardware. +func (r Hardware_Router) GetNetworkMonitorAttachedDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkMonitorAttachedDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guests that are attached downstream to a hardware that have failed monitoring +func (r Hardware_Router) GetNetworkMonitorAttachedDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkMonitorAttachedDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The status of all of a piece of hardware's network monitoring incidents. +func (r Hardware_Router) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitors. +func (r Hardware_Router) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve The value of a hardware's network status attribute. +func (r Hardware_Router) GetNetworkStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's related network status attribute. +func (r Hardware_Router) GetNetworkStatusAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkStatusAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated network storage service account. +func (r Hardware_Router) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network virtual LANs (VLANs) associated with a piece of hardware's network components. +func (r Hardware_Router) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth for the next billing cycle (measured in GB). +func (r Hardware_Router) GetNextBillingCycleBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNextBillingCycleBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetNotesHistory() (resp []datatypes.Hardware_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNotesHistory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Router) GetObject() (resp datatypes.Hardware_Router, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's operating system. +func (r Hardware_Router) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's operating system software description. +func (r Hardware_Router) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the outbound network traffic data for the last 30 days. +func (r Hardware_Router) GetOutboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getOutboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Router) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Blade Bay +func (r Hardware_Router) GetParentBay() (resp datatypes.Hardware_Blade, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getParentBay", nil, &r.Options, &resp) + return +} + +// Retrieve Parent Hardware. +func (r Hardware_Router) GetParentHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getParentHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. +func (r Hardware_Router) GetPointOfPresenceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPointOfPresenceLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The power components for a hardware object. +func (r Hardware_Router) GetPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's power supply. +func (r Hardware_Router) GetPowerSupply() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPowerSupply", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary private IP address. +func (r Hardware_Router) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary back-end network component. +func (r Hardware_Router) GetPrimaryBackendNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary public IP address. +func (r Hardware_Router) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary public network component. +func (r Hardware_Router) GetPrimaryNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPrivateBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_Router) GetPrivateBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrivateBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve Whether the hardware only has access to the private network. +func (r Hardware_Router) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_Router) GetProcessorCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getProcessorCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of physical processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_Router) GetProcessorPhysicalCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getProcessorPhysicalCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's processors. +func (r Hardware_Router) GetProcessors() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getProcessors", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_Router) GetPublicBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPublicBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetRack() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRack", nil, &r.Options, &resp) + return +} + +// Retrieve The RAID controllers contained within a piece of hardware. +func (r Hardware_Router) GetRaidControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRaidControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this hardware. +func (r Hardware_Router) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve User credentials to issue commands and/or interact with the server's remote management card. +func (r Hardware_Router) GetRemoteManagementAccounts() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRemoteManagementAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's associated remote management component. This is normally IPMI. +func (r Hardware_Router) GetRemoteManagementComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRemoteManagementComponent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetResourceConfigurations() (resp []datatypes.Hardware_Resource_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getResourceConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetResourceGroupMemberReferences() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getResourceGroupMemberReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetResourceGroupRoles() (resp []datatypes.Resource_Group_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getResourceGroupRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this hardware is a member. +func (r Hardware_Router) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's routers. +func (r Hardware_Router) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a VLAN on the router can be assigned to a host that has SAN disk functionality. +func (r Hardware_Router) GetSanStorageCapabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSanStorageCapabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this hardware corresponds to. +func (r Hardware_Router) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's vulnerability scan requests. +func (r Hardware_Router) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// The '''getSensorData''' method retrieves a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures various information, including system temperatures, voltages and other local server settings. Sensor data is cached for 30 second; calls made to this method for the same server within 30 seconds of each other will result in the same data being returned. To ensure that the data retrieved retrieves snapshot of varied data, make calls greater than 30 seconds apart. +func (r Hardware_Router) GetSensorData() (resp []datatypes.Container_RemoteManagement_SensorReading, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSensorData", nil, &r.Options, &resp) + return +} + +// The '''getSensorDataWithGraphs''' method retrieves the raw data returned from the server's remote management card. Along with raw data, graphs for the CPU and system temperatures and fan speeds are also returned. For more details on what information is returned, refer to the ''getSensorData'' method. +func (r Hardware_Router) GetSensorDataWithGraphs() (resp datatypes.Container_RemoteManagement_SensorReadingsWithGraphs, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSensorDataWithGraphs", nil, &r.Options, &resp) + return +} + +// The '''getServerFanSpeedGraphs''' method retrieves the server's fan speeds and displays the speeds using tachometer graphs. data used to construct these graphs is retrieved from the server's remote management card. Each graph returned will have an associated title. +func (r Hardware_Router) GetServerFanSpeedGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorSpeed, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServerFanSpeedGraphs", nil, &r.Options, &resp) + return +} + +// The '''getPowerState''' method retrieves the power state for the selected server. The server's power status is retrieved from its remote management card. This method returns "on", for a server that has been powered on, or "off" for servers powered off. +func (r Hardware_Router) GetServerPowerState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServerPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the server room in which the hardware is located. +func (r Hardware_Router) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServerRoom", nil, &r.Options, &resp) + return +} + +// The '''getServerTemperatureGraphs''' retrieves the server's temperatures and displays the various temperatures using thermometer graphs. Temperatures retrieved are CPU temperature(s) and system temperatures. Data used to construct the graphs is retrieved from the server's remote management card. All graphs returned will have an associated title. +func (r Hardware_Router) GetServerTemperatureGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorTemperature, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServerTemperatureGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware's service provider. +func (r Hardware_Router) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's installed software. +func (r Hardware_Router) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a spare pool server. +func (r Hardware_Router) GetSparePoolBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSparePoolBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Hardware_Router) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetStorageNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getStorageNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetTopLevelLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getTopLevelLocation", nil, &r.Options, &resp) + return +} + +// +// This method will query transaction history for a piece of hardware. +func (r Hardware_Router) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction_History, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Retrieve a list of upgradeable items available to this piece of hardware. Currently, getUpgradeItemPrices retrieves upgrades available for a server's memory, hard drives, network port speed, bandwidth allocation and GPUs. +func (r Hardware_Router) GetUpgradeItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUpgradeItemPrices", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade request object, if any. +func (r Hardware_Router) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The network device connected to a piece of hardware. +func (r Hardware_Router) GetUplinkHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUplinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. +func (r Hardware_Router) GetUplinkNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUplinkNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve An array containing a single string of custom user data for a hardware order. Max size is 16 kb. +func (r Hardware_Router) GetUserData() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis for a piece of hardware. +func (r Hardware_Router) GetVirtualChassis() (resp datatypes.Hardware_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualChassis", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis siblings for a piece of hardware. +func (r Hardware_Router) GetVirtualChassisSiblings() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualChassisSiblings", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtual host record. +func (r Hardware_Router) GetVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualHost", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's virtual software licenses. +func (r Hardware_Router) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the bandwidth allotment to which a piece of hardware belongs. +func (r Hardware_Router) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_Router) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_Router) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtualization platform software. +func (r Hardware_Router) GetVirtualizationPlatform() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualizationPlatform", nil, &r.Options, &resp) + return +} + +// The '''importVirtualHost''' method attempts to import the host record for the virtualization platform running on a server. +func (r Hardware_Router) ImportVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "importVirtualHost", nil, &r.Options, &resp) + return +} + +// The '''isPingable''' method issues a ping command to the selected server and returns the result of the ping command. This boolean return value displays ''true'' upon successful ping or ''false'' for a failed ping. +func (r Hardware_Router) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "isPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command to the server and returns the ping response. +func (r Hardware_Router) Ping() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "ping", nil, &r.Options, &resp) + return +} + +// The '''powerCycle''' method completes a power off and power on of the server successively in one command. The power cycle command is equivalent to unplugging the server from the power strip and then plugging the server back in. '''This method should only be used when all other options have been exhausted'''. Additional remote management commands may not be executed if this command was successfully issued within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "powerCycle", nil, &r.Options, &resp) + return +} + +// This method will power off the server via the server's remote management card. +func (r Hardware_Router) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "powerOff", nil, &r.Options, &resp) + return +} + +// The '''powerOn''' method powers on a server via its remote management card. This boolean return value returns ''true'' upon successful execution and ''false'' if unsuccessful. Other remote management commands may not be issued in this command was successfully completed within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "powerOn", nil, &r.Options, &resp) + return +} + +// The '''rebootDefault''' method attempts to reboot the server by issuing a soft reboot, or reset, command to the server's remote management card. if the reset attempt is unsuccessful, a power cycle command will be issued via the power strip. The power cycle command is equivalent to unplugging the server from the power strip and then plugging the server back in. If the reset was successful within the last 20 minutes, another remote management command cannot be completed to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "rebootDefault", nil, &r.Options, &resp) + return +} + +// The '''rebootHard''' method reboots the server by issuing a cycle command to the server's remote management card. A hard reboot is equivalent to pressing the ''Reset'' button on a server - it is issued immediately and will not allow processes to shut down prior to the reboot. Completing a hard reboot may initiate system disk checks upon server reboot, causing the boot up to take longer than normally expected. +// +// Remote management commands are unable to be executed if a reboot has been issued successfully within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "rebootHard", nil, &r.Options, &resp) + return +} + +// The '''rebootSoft''' method reboots the server by issuing a reset command to the server's remote management card via soft reboot. When executing a soft reboot, servers allow all processes to shut down completely before rebooting. Remote management commands are unable to be issued within 20 minutes of issuing a successful soft reboot in order to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "rebootSoft", nil, &r.Options, &resp) + return +} + +// This method is used to remove access to s SoftLayer_Network_Storage volumes that supports host- or network-level access control. +func (r Hardware_Router) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_Router) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Router) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "setTags", params, &r.Options, &resp) + return +} + +// This method will update the root IPMI password on this SoftLayer_Hardware. +func (r Hardware_Router) UpdateIpmiPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "updateIpmiPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Hardware_SecurityModule struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareSecurityModuleService returns an instance of the Hardware_SecurityModule SoftLayer service +func GetHardwareSecurityModuleService(sess *session.Session) Hardware_SecurityModule { + return Hardware_SecurityModule{Session: sess} +} + +func (r Hardware_SecurityModule) Id(id int) Hardware_SecurityModule { + r.Options.Id = &id + return r +} + +func (r Hardware_SecurityModule) Mask(mask string) Hardware_SecurityModule { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_SecurityModule) Filter(filter string) Hardware_SecurityModule { + r.Options.Filter = filter + return r +} + +func (r Hardware_SecurityModule) Limit(limit int) Hardware_SecurityModule { + r.Options.Limit = &limit + return r +} + +func (r Hardware_SecurityModule) Offset(offset int) Hardware_SecurityModule { + r.Options.Offset = &offset + return r +} + +// Activates the private network port +func (r Hardware_SecurityModule) ActivatePrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "activatePrivatePort", nil, &r.Options, &resp) + return +} + +// Activates the public network port +func (r Hardware_SecurityModule) ActivatePublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "activatePublicPort", nil, &r.Options, &resp) + return +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Hardware_SecurityModule) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_SecurityModule) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// The Rescue Kernel is designed to provide you with the ability to bring a server online in order to troubleshoot system problems that would normally only be resolved by an OS Reload. The correct Rescue Kernel will be selected based upon the currently installed operating system. When the rescue kernel process is initiated, the server will shutdown and reboot on to the public network with the same IP's assigned to the server to allow for remote connections. It will bring your server offline for approximately 10 minutes while the rescue is in progress. The root/administrator password will be the same as what is listed in the portal for the server. +func (r Hardware_SecurityModule) BootToRescueLayer(noOsBootEnvironment *string) (resp bool, err error) { + params := []interface{}{ + noOsBootEnvironment, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "bootToRescueLayer", params, &r.Options, &resp) + return +} + +// Captures a Flex Image of the hard disk on the physical machine, based on the capture template parameter. Returns the image template group containing the disk image. +func (r Hardware_SecurityModule) CaptureImage(captureTemplate *datatypes.Container_Disk_Image_Capture_Template) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + captureTemplate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "captureImage", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Hardware_SecurityModule) CloseAlarm(alarmId *string) (resp bool, err error) { + params := []interface{}{ + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "closeAlarm", params, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_SecurityModule) CreateFirmwareUpdateTransaction(ipmi *int, raidController *int, bios *int, harddrive *int) (resp bool, err error) { + params := []interface{}{ + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "createFirmwareUpdateTransaction", params, &r.Options, &resp) + return +} + +// +// +// createObject() enables the creation of servers on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a server, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a server of the specified configuration. +// +// +// To determine when the server is available you can poll the server via [[SoftLayer_Hardware/getObject|getObject]], +// checking the provisionDate property. +// When provisionDate is not null, the server will be ready. Be sure to use the globalIdentifier +// as your initialization parameter. +// +// +// Warning: Servers created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Hardware/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Hardware (type)|SoftLayer_Hardware]] +//
      +//
    • hostname +//
      Hostname for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • processorCoreAmount +//
      The number of logical CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • memoryCapacity +//
      The amount of memory to allocate in gigabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the server.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the server will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the server with.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the server is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the server's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - The highest available zero cost port speed will be used.
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the server.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • networkComponents.redundancyEnabledFlag +//
      Specifies whether or not the server's network components should be in redundancy groups.
        +//
      • Optional
      • +//
      • Type - bool
      • +//
      • Default - false
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. When the redundancyEnabledFlag property is true the server's network components will be in redundancy groups.
      • +//
      +// { +// "networkComponents": [ +// { +// "redundancyEnabledFlag": false +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the server only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a server is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the server.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the server.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • fixedConfigurationPreset.keyName +//
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The fixedConfigurationPreset property is a [[SoftLayer_Product_Package_Preset (type)|fixed configuration preset]] structure. The keyName property must be set to specify preset to use.
      • +//
      • If a fixed configuration preset is used processorCoreAmount, memoryCapacity and hardDrives properties must not be set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "fixedConfigurationPreset": { +// "keyName": "SOME_KEY_NAME" +// } +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the server.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Hardware_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the server. This is primarily useful for providing data to software that may be on the server and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • hardDrives +//
      Hard drive settings for the server
        +//
      • Optional
      • +//
      • Type - SoftLayer_Hardware_Component
      • +//
      • Default - The largest available capacity for a zero cost primary disk will be used.
      • +//
      • Description - The hardDrives property is an array of [[SoftLayer_Hardware_Component (type)|hardware component]] structures. +//
      • Each hard drive must specify the capacity property.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "hardDrives": [ +// { +// "capacity": 500 +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the server upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "processorCoreAmount": 2, +// "memoryCapacity": 2, +// "hourlyBillingFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Hardware.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Hardware/f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5/getObject +// +// +// { +// "accountId": 232298, +// "bareMetalInstanceFlag": null, +// "domain": "example.com", +// "hardwareStatusId": null, +// "hostname": "host1", +// "id": null, +// "serviceProviderId": null, +// "serviceProviderResourceId": null, +// "globalIdentifier": "f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5", +// "hourlyBillingFlag": true, +// "memoryCapacity": 2, +// "operatingSystemReferenceCode": "UBUNTU_LATEST", +// "processorCoreAmount": 2 +// } +// +func (r Hardware_SecurityModule) CreateObject(templateObject *datatypes.Hardware_SecurityModule) (resp datatypes.Hardware_SecurityModule, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) CreatePostSoftwareInstallTransaction(installCodes []string, returnBoolean *bool) (resp bool, err error) { + params := []interface{}{ + installCodes, + returnBoolean, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "createPostSoftwareInstallTransaction", params, &r.Options, &resp) + return +} + +// +// This method will cancel a server effective immediately. For servers billed hourly, the charges will stop immediately after the method returns. +func (r Hardware_SecurityModule) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete software component passwords. +func (r Hardware_SecurityModule) DeleteSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "deleteSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Edit a server's properties +func (r Hardware_SecurityModule) EditObject(templateObject *datatypes.Hardware_Server) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "editObject", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, and notes. +func (r Hardware_SecurityModule) EditSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "editSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Download and run remote script from uri on the hardware. +func (r Hardware_SecurityModule) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// The '''findByIpAddress''' method finds hardware using its primary public or private IP address. IP addresses that have a secondary subnet tied to the hardware will not return the hardware - alternate means of locating the hardware must be used (see '''Associated Methods'''). If no hardware is found, no errors are generated and no data is returned. +func (r Hardware_SecurityModule) FindByIpAddress(ipAddress *string) (resp datatypes.Hardware, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Hardware_Server (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Hardware/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Hardware_SecurityModule) GenerateOrderTemplate(templateObject *datatypes.Hardware) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a piece of hardware. +func (r Hardware_SecurityModule) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active physical components. +func (r Hardware_SecurityModule) GetActiveComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a server's attached network firewall. +func (r Hardware_SecurityModule) GetActiveNetworkFirewallBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveNetworkFirewallBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active network monitoring incidents. +func (r Hardware_SecurityModule) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieve Transaction currently running for server. +func (r Hardware_SecurityModule) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve Any active transaction(s) that are currently running for the server (example: os reload). +func (r Hardware_SecurityModule) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// The '''getAlarmHistory''' method retrieves a detailed history for the monitoring alarm. When calling this method, a start and end date for the history to be retrieved must be entered. +func (r Hardware_SecurityModule) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetAllPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAllPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. +func (r Hardware_SecurityModule) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Hardware_SecurityModule) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Hardware_SecurityModule) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an antivirus/spyware software component object. +func (r Hardware_SecurityModule) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Hardware. +func (r Hardware_SecurityModule) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's specific attributes. +func (r Hardware_SecurityModule) GetAttributes() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve An object that stores the maximum level for the monitoring query types and response types. +func (r Hardware_SecurityModule) GetAvailableMonitoring() (resp []datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAvailableMonitoring", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Hardware. +func (r Hardware_SecurityModule) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily total bandwidth usage for the current billing cycle. +func (r Hardware_SecurityModule) GetAverageDailyBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAverageDailyBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily private bandwidth usage for the current billing cycle. +func (r Hardware_SecurityModule) GetAverageDailyPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAverageDailyPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Hardware_SecurityModule) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_SecurityModule) GetBackendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +func (r Hardware_SecurityModule) GetBackendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getBackendIncomingBandwidth''' method retrieves the amount of incoming private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_SecurityModule) GetBackendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's back-end or private network components. +func (r Hardware_SecurityModule) GetBackendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getBackendOutgoingBandwidth''' method retrieves the amount of outgoing private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_SecurityModule) GetBackendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's backend or private router. +func (r Hardware_SecurityModule) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth (measured in GB). +func (r Hardware_SecurityModule) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Hardware_SecurityModule) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Retrieve a collection of bandwidth data from an individual public or private network tracking object. Data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Hardware_SecurityModule) GetBandwidthForDateRange(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBandwidthForDateRange", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single server. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. Use the $draw flag to suppress the generation of the actual binary PNG image. +func (r Hardware_SecurityModule) GetBandwidthImage(networkType *string, snapshotRange *string, draw *bool, dateSpecified *datatypes.Time, dateSpecifiedEnd *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + networkType, + snapshotRange, + draw, + dateSpecified, + dateSpecifiedEnd, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's benchmark certifications. +func (r Hardware_SecurityModule) GetBenchmarkCertifications() (resp []datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBenchmarkCertifications", nil, &r.Options, &resp) + return +} + +// Retrieve The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. +func (r Hardware_SecurityModule) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw private bandwidth usage data for the current billing cycle. +func (r Hardware_SecurityModule) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw public bandwidth usage data for the current billing cycle. +func (r Hardware_SecurityModule) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a server. +func (r Hardware_SecurityModule) GetBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a billing item exists. +func (r Hardware_SecurityModule) GetBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if BIOS password should be left as null. +func (r Hardware_SecurityModule) GetBiosPasswordNullFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBiosPasswordNullFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the hardware is ineligible for cancellation because it is disconnected. +func (r Hardware_SecurityModule) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Status indicating whether or not a piece of hardware has business continuance insurance. +func (r Hardware_SecurityModule) GetBusinessContinuanceInsuranceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBusinessContinuanceInsuranceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Child hardware. +func (r Hardware_SecurityModule) GetChildrenHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getChildrenHardware", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) GetComponentDetailsXML() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getComponentDetailsXML", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's components. +func (r Hardware_SecurityModule) GetComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetContainsSolidStateDrivesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getContainsSolidStateDrivesFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection/server backup software component object. +func (r Hardware_SecurityModule) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve A server's control panel. +func (r Hardware_SecurityModule) GetControlPanel() (resp datatypes.Software_Component_ControlPanel, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getControlPanel", nil, &r.Options, &resp) + return +} + +// Retrieve The total cost of a server, measured in US Dollars ($USD). +func (r Hardware_SecurityModule) GetCost() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCost", nil, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a server, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Hardware_Configuration (type)]]. +func (r Hardware_SecurityModule) GetCreateObjectOptions() (resp datatypes.Container_Hardware_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve An object that provides commonly used bandwidth summary components for the current billing cycle. +func (r Hardware_SecurityModule) GetCurrentBandwidthSummary() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBandwidthSummary", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with the current benchmark certification result, if such a file exists. If there is no file for this benchmark certification result, calling this method throws an exception. +func (r Hardware_SecurityModule) GetCurrentBenchmarkCertificationResultFile() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBenchmarkCertificationResultFile", nil, &r.Options, &resp) + return +} + +// Retrieve The current billable public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetCurrentBillableBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBillableBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Get the billing detail for this instance for the current billing period. This does not include bandwidth usage. +func (r Hardware_SecurityModule) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// The '''getCurrentBillingTotal''' method retrieves the total bill amount in US Dollars ($) for the current billing period. In addition to the total bill amount, the billing detail also includes all bandwidth used up to the point the method is called on the piece of hardware. +func (r Hardware_SecurityModule) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Hardware_SecurityModule) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server has a Customer Installed OS +func (r Hardware_SecurityModule) GetCustomerInstalledOperatingSystemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCustomerInstalledOperatingSystemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server is a customer owned device. +func (r Hardware_SecurityModule) GetCustomerOwnedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCustomerOwnedFlag", nil, &r.Options, &resp) + return +} + +// The '''getDailyAverage''' method calculates the average daily network traffic used by the selected server. Using the required parameter ''dateTime'' to enter a start and end date, the user retrieves this average, measure in gigabytes (GB) for the specified date range. When entering parameters, only the month, day and year are required - time entries are omitted as this method defaults the time to midnight in order to account for the entire day. +func (r Hardware_SecurityModule) GetDailyAverage(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDailyAverage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the datacenter in which a piece of hardware resides. +func (r Hardware_SecurityModule) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the datacenter in which a piece of hardware resides. +func (r Hardware_SecurityModule) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// Retrieve Number of day(s) a server have been in spare pool. +func (r Hardware_SecurityModule) GetDaysInSparePool() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDaysInSparePool", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_SecurityModule) GetDownlinkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownlinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_SecurityModule) GetDownlinkNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownlinkNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached to a piece of network hardware. +func (r Hardware_SecurityModule) GetDownlinkServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownlinkServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_SecurityModule) GetDownlinkVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownlinkVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware downstream from a network device. +func (r Hardware_SecurityModule) GetDownstreamHardwareBindings() (resp []datatypes.Network_Component_Uplink_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamHardwareBindings", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware downstream from the selected piece of hardware. +func (r Hardware_SecurityModule) GetDownstreamNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. +func (r Hardware_SecurityModule) GetDownstreamNetworkHardwareWithIncidents() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamNetworkHardwareWithIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached downstream to a piece of network hardware. +func (r Hardware_SecurityModule) GetDownstreamServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_SecurityModule) GetDownstreamVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The drive controllers contained within a piece of hardware. +func (r Hardware_SecurityModule) GetDriveControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDriveControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated EVault network storage service account. +func (r Hardware_SecurityModule) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Get the subnets associated with this server that are protectable by a network component firewall. +func (r Hardware_SecurityModule) GetFirewallProtectableSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFirewallProtectableSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's firewall services. +func (r Hardware_SecurityModule) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Defines the fixed components in a fixed configuration bare metal server. +func (r Hardware_SecurityModule) GetFixedConfigurationPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFixedConfigurationPreset", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_SecurityModule) GetFrontendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +func (r Hardware_SecurityModule) GetFrontendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getFrontendIncomingBandwidth''' method retrieves the amount of incoming public network traffic used by a server between the given start and end date parameters. When entering the ''dateTime'' parameter, only the month, day and year of the start and end dates are required - the time (hour, minute and second) are set to midnight by default and cannot be changed. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_SecurityModule) GetFrontendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's front-end or public network components. +func (r Hardware_SecurityModule) GetFrontendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getFrontendOutgoingBandwidth''' method retrieves the amount of outgoing public network traffic used by a server between the given start and end date parameters. The ''dateTime'' parameter requires only the day, month and year to be entered - the time (hour, minute and second) are set to midnight be default in order to gather the data for the entire start and end date indicated in the parameter. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_SecurityModule) GetFrontendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's frontend or public router. +func (r Hardware_SecurityModule) GetFrontendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Hardware_SecurityModule) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The hard drives contained within a piece of hardware. +func (r Hardware_SecurityModule) GetHardDrives() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardDrives", nil, &r.Options, &resp) + return +} + +// Retrieve a server by searching for the primary IP address. +func (r Hardware_SecurityModule) GetHardwareByIpAddress(ipAddress *string) (resp datatypes.Hardware_Server, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The chassis that a piece of hardware is housed in. +func (r Hardware_SecurityModule) GetHardwareChassis() (resp datatypes.Hardware_Chassis, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareChassis", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_SecurityModule) GetHardwareFunction() (resp datatypes.Hardware_Function, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareFunction", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_SecurityModule) GetHardwareFunctionDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareFunctionDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's status. +func (r Hardware_SecurityModule) GetHardwareStatus() (resp datatypes.Hardware_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware has Single Root IO VIrtualization (SR-IOV) billing item. +func (r Hardware_SecurityModule) GetHasSingleRootVirtualizationBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHasSingleRootVirtualizationBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determine in hardware object has TPM enabled. +func (r Hardware_SecurityModule) GetHasTrustedPlatformModuleBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHasTrustedPlatformModuleBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a host IPS software component object. +func (r Hardware_SecurityModule) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// The '''getHourlyBandwidth''' method retrieves all bandwidth updates hourly for the specified hardware. Because the potential number of data points can become excessive, the method limits the user to obtain data in 24-hour intervals. The required ''dateTime'' parameter is used as the starting point for the query and will be calculated for the 24-hour period starting with the specified date and time. For example, entering a parameter of +// +// '02/01/2008 0:00' +// +// results in a return of all bandwidth data for the entire day of February 1, 2008, as 0:00 specifies a midnight start date. Please note that the time entered should be completed using a 24-hour clock (military time, astronomical time). +// +// For data spanning more than a single 24-hour period, refer to the getBandwidthData function on the metricTrackingObject for the piece of hardware. +func (r Hardware_SecurityModule) GetHourlyBandwidth(mode *string, day *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + mode, + day, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHourlyBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A server's hourly billing status. +func (r Hardware_SecurityModule) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the inbound network traffic data for the last 30 days. +func (r Hardware_SecurityModule) GetInboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getInboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetInboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getInboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware object has the IBM_CLOUD_READY_NODE_CERTIFIED attribute. +func (r Hardware_SecurityModule) GetIsCloudReadyNodeCertified() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getIsCloudReadyNodeCertified", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects from a collection of SoftLayer_Software_Description +func (r Hardware_SecurityModule) GetItemPricesFromSoftwareDescriptions(softwareDescriptions []datatypes.Software_Description, includeTranslationsFlag *bool, returnAllPricesFlag *bool) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + softwareDescriptions, + includeTranslationsFlag, + returnAllPricesFlag, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getItemPricesFromSoftwareDescriptions", params, &r.Options, &resp) + return +} + +// Retrieve The last transaction that a server's operating system was loaded. +func (r Hardware_SecurityModule) GetLastOperatingSystemReload() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLastOperatingSystemReload", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the last transaction a server performed. +func (r Hardware_SecurityModule) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's latest network monitoring incident. +func (r Hardware_SecurityModule) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve Where a piece of hardware is located within SoftLayer's location hierarchy. +func (r Hardware_SecurityModule) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetLocationPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLocationPathString", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a lockbox account associated with a server. +func (r Hardware_SecurityModule) GetLockboxNetworkStorage() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the hardware is a managed resource. +func (r Hardware_SecurityModule) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the remote management network component attached with this server. +func (r Hardware_SecurityModule) GetManagementNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getManagementNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's memory. +func (r Hardware_SecurityModule) GetMemory() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMemory", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of memory a piece of hardware has, measured in gigabytes. +func (r Hardware_SecurityModule) GetMemoryCapacity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMemoryCapacity", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's metric tracking object. +func (r Hardware_SecurityModule) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_HardwareServer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object id for this server. +func (r Hardware_SecurityModule) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Hardware_SecurityModule) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the monitoring agents associated with a piece of hardware. +func (r Hardware_SecurityModule) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Hardware_SecurityModule) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's monitoring robot. +func (r Hardware_SecurityModule) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitoring services. +func (r Hardware_SecurityModule) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring service flag eligibility status for a piece of hardware. +func (r Hardware_SecurityModule) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The service flag status for a piece of hardware. +func (r Hardware_SecurityModule) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring notification objects for this hardware. Each object links this hardware instance to a user account that will be notified if monitoring on this hardware object fails +func (r Hardware_SecurityModule) GetMonitoringUserNotification() (resp []datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringUserNotification", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's motherboard. +func (r Hardware_SecurityModule) GetMotherboard() (resp datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMotherboard", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network cards. +func (r Hardware_SecurityModule) GetNetworkCards() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkCards", nil, &r.Options, &resp) + return +} + +// Get the IP addresses associated with this server that are protectable by a network component firewall. Note, this may not return all values for IPv6 subnets for this server. Please use getFirewallProtectableSubnets to get all protectable subnets. +func (r Hardware_SecurityModule) GetNetworkComponentFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkComponentFirewallProtectableIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve Returns a hardware's network components. +func (r Hardware_SecurityModule) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway member if this device is part of a network gateway. +func (r Hardware_SecurityModule) GetNetworkGatewayMember() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkGatewayMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this device is part of a network gateway. +func (r Hardware_SecurityModule) GetNetworkGatewayMemberFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkGatewayMemberFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's network management IP address. +func (r Hardware_SecurityModule) GetNetworkManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve All servers with failed monitoring that are attached downstream to a piece of hardware. +func (r Hardware_SecurityModule) GetNetworkMonitorAttachedDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkMonitorAttachedDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guests that are attached downstream to a hardware that have failed monitoring +func (r Hardware_SecurityModule) GetNetworkMonitorAttachedDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkMonitorAttachedDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The status of all of a piece of hardware's network monitoring incidents. +func (r Hardware_SecurityModule) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitors. +func (r Hardware_SecurityModule) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve The value of a hardware's network status attribute. +func (r Hardware_SecurityModule) GetNetworkStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's related network status attribute. +func (r Hardware_SecurityModule) GetNetworkStatusAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkStatusAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated network storage service account. +func (r Hardware_SecurityModule) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network virtual LANs (VLANs) associated with a piece of hardware's network components. +func (r Hardware_SecurityModule) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth for the next billing cycle (measured in GB). +func (r Hardware_SecurityModule) GetNextBillingCycleBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNextBillingCycleBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetNotesHistory() (resp []datatypes.Hardware_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNotesHistory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) GetObject() (resp datatypes.Hardware_SecurityModule, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An open ticket requesting cancellation of this server, if one exists. +func (r Hardware_SecurityModule) GetOpenCancellationTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOpenCancellationTicket", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's operating system. +func (r Hardware_SecurityModule) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's operating system software description. +func (r Hardware_SecurityModule) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the outbound network traffic data for the last 30 days. +func (r Hardware_SecurityModule) GetOutboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOutboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetOutboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOutboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle exceeds the allocation. +func (r Hardware_SecurityModule) GetOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_SecurityModule) GetPMInfo() (resp []datatypes.Container_RemoteManagement_PmInfo, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPMInfo", nil, &r.Options, &resp) + return +} + +// Retrieve Blade Bay +func (r Hardware_SecurityModule) GetParentBay() (resp datatypes.Hardware_Blade, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getParentBay", nil, &r.Options, &resp) + return +} + +// Retrieve Parent Hardware. +func (r Hardware_SecurityModule) GetParentHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getParentHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. +func (r Hardware_SecurityModule) GetPointOfPresenceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPointOfPresenceLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The power components for a hardware object. +func (r Hardware_SecurityModule) GetPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's power supply. +func (r Hardware_SecurityModule) GetPowerSupply() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPowerSupply", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary private IP address. +func (r Hardware_SecurityModule) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary back-end network component. +func (r Hardware_SecurityModule) GetPrimaryBackendNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) GetPrimaryDriveSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryDriveSize", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary public IP address. +func (r Hardware_SecurityModule) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary public network component. +func (r Hardware_SecurityModule) GetPrimaryNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPrivateBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_SecurityModule) GetPrivateBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's private network bandwidth usage. getPrivateBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_SecurityModule) GetPrivateBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image +func (r Hardware_SecurityModule) GetPrivateBandwidthGraphImage(startTime *string, endTime *string) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve A server's primary private IP address. +func (r Hardware_SecurityModule) GetPrivateIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve the private network component attached with this server. +func (r Hardware_SecurityModule) GetPrivateNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the hardware only has access to the private network. +func (r Hardware_SecurityModule) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the backend VLAN for the primary IP address of the server +func (r Hardware_SecurityModule) GetPrivateVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateVlan", nil, &r.Options, &resp) + return +} + +// Retrieve a backend network VLAN by searching for an IP address +func (r Hardware_SecurityModule) GetPrivateVlanByIpAddress(ipAddress *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateVlanByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The total number of processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_SecurityModule) GetProcessorCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProcessorCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of physical processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_SecurityModule) GetProcessorPhysicalCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProcessorPhysicalCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's processors. +func (r Hardware_SecurityModule) GetProcessors() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProcessors", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle is projected to exceed the allocation. +func (r Hardware_SecurityModule) GetProjectedOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProjectedOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) GetProvisionDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProvisionDate", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_SecurityModule) GetPublicBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's public network bandwidth usage. getPublicBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_SecurityModule) GetPublicBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. THIS METHOD GENERATES GRAPHS BASED ON THE NEW DATA WAREHOUSE REPOSITORY. +func (r Hardware_SecurityModule) GetPublicBandwidthGraphImage(startTime *datatypes.Time, endTime *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve the total number of bytes used by a server over a specified time period via the data warehouse tracking objects for this hardware. +func (r Hardware_SecurityModule) GetPublicBandwidthTotal(startTime *int, endTime *int) (resp uint, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicBandwidthTotal", params, &r.Options, &resp) + return +} + +// Retrieve a SoftLayer server's public network component. Some servers are only connected to the private network and may not have a public network component. In that case getPublicNetworkComponent returns a null object. +func (r Hardware_SecurityModule) GetPublicNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend VLAN for the primary IP address of the server +func (r Hardware_SecurityModule) GetPublicVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicVlan", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend network Vlan by searching the hostname of a server +func (r Hardware_SecurityModule) GetPublicVlanByHostname(hostname *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + hostname, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicVlanByHostname", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetRack() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRack", nil, &r.Options, &resp) + return +} + +// Retrieve The RAID controllers contained within a piece of hardware. +func (r Hardware_SecurityModule) GetRaidControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRaidControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware object is vSan Ready Node. +func (r Hardware_SecurityModule) GetReadyNodeFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getReadyNodeFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this hardware. +func (r Hardware_SecurityModule) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The last five commands issued to the server's remote management card. +func (r Hardware_SecurityModule) GetRecentRemoteManagementCommands() (resp []datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRecentRemoteManagementCommands", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card. +func (r Hardware_SecurityModule) GetRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve User credentials to issue commands and/or interact with the server's remote management card. +func (r Hardware_SecurityModule) GetRemoteManagementAccounts() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRemoteManagementAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's associated remote management component. This is normally IPMI. +func (r Hardware_SecurityModule) GetRemoteManagementComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRemoteManagementComponent", nil, &r.Options, &resp) + return +} + +// Retrieve User(s) who have access to issue commands and/or interact with the server's remote management card. +func (r Hardware_SecurityModule) GetRemoteManagementUsers() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRemoteManagementUsers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetResourceConfigurations() (resp []datatypes.Hardware_Resource_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getResourceConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetResourceGroupMemberReferences() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getResourceGroupMemberReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetResourceGroupRoles() (resp []datatypes.Resource_Group_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getResourceGroupRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this hardware is a member. +func (r Hardware_SecurityModule) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve the reverse domain records associated with this server. +func (r Hardware_SecurityModule) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's routers. +func (r Hardware_SecurityModule) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this hardware corresponds to. +func (r Hardware_SecurityModule) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's vulnerability scan requests. +func (r Hardware_SecurityModule) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_SecurityModule) GetSensorData() (resp []datatypes.Container_RemoteManagement_SensorReading, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSensorData", nil, &r.Options, &resp) + return +} + +// Retrieves the raw data returned from the server's remote management card. For more details of what is returned please refer to the getSensorData method. Along with the raw data, graphs for the cpu and system temperatures and fan speeds are also returned. +func (r Hardware_SecurityModule) GetSensorDataWithGraphs() (resp datatypes.Container_RemoteManagement_SensorReadingsWithGraphs, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSensorDataWithGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware components, software, and network components. getServerDetails is an aggregation function that combines the results of [[SoftLayer_Hardware_Server::getComponents]], [[SoftLayer_Hardware_Server::getSoftware]], and [[SoftLayer_Hardware_Server::getNetworkComponents]] in a single container. +func (r Hardware_SecurityModule) GetServerDetails() (resp datatypes.Container_Hardware_Server_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerDetails", nil, &r.Options, &resp) + return +} + +// Retrieve the server's fan speeds and displays them using tachometer graphs. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_SecurityModule) GetServerFanSpeedGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorSpeed, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerFanSpeedGraphs", nil, &r.Options, &resp) + return +} + +// Retrieves the power state for the server. The server's power status is retrieved from its remote management card. This will return 'on' or 'off'. +func (r Hardware_SecurityModule) GetServerPowerState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the server room in which the hardware is located. +func (r Hardware_SecurityModule) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerRoom", nil, &r.Options, &resp) + return +} + +// Retrieve the server's temperature and displays them using thermometer graphs. Temperatures retrieved are CPU(s) and system temperatures. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_SecurityModule) GetServerTemperatureGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorTemperature, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerTemperatureGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware's service provider. +func (r Hardware_SecurityModule) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's installed software. +func (r Hardware_SecurityModule) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware object has Software Guard Extension (SGX) enabled. +func (r Hardware_SecurityModule) GetSoftwareGuardExtensionEnabled() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSoftwareGuardExtensionEnabled", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a spare pool server. +func (r Hardware_SecurityModule) GetSparePoolBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSparePoolBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Hardware_SecurityModule) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card used for statistics. +func (r Hardware_SecurityModule) GetStatisticsRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getStatisticsRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetStorageNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getStorageNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetTopLevelLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getTopLevelLocation", nil, &r.Options, &resp) + return +} + +// +// This method will query transaction history for a piece of hardware. +func (r Hardware_SecurityModule) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction_History, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Retrieve a list of upgradeable items available to this piece of hardware. Currently, getUpgradeItemPrices retrieves upgrades available for a server's memory, hard drives, network port speed, bandwidth allocation and GPUs. +func (r Hardware_SecurityModule) GetUpgradeItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUpgradeItemPrices", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade request object, if any. +func (r Hardware_SecurityModule) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The network device connected to a piece of hardware. +func (r Hardware_SecurityModule) GetUplinkHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUplinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. +func (r Hardware_SecurityModule) GetUplinkNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUplinkNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve An array containing a single string of custom user data for a hardware order. Max size is 16 kb. +func (r Hardware_SecurityModule) GetUserData() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve A list of users that have access to this computing instance. +func (r Hardware_SecurityModule) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUsers", nil, &r.Options, &resp) + return +} + +// This method will return the list of block device template groups that are valid to the host. For instance, it will only retrieve FLEX images. +func (r Hardware_SecurityModule) GetValidBlockDeviceTemplateGroups(visibility *string) (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + visibility, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getValidBlockDeviceTemplateGroups", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis for a piece of hardware. +func (r Hardware_SecurityModule) GetVirtualChassis() (resp datatypes.Hardware_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualChassis", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis siblings for a piece of hardware. +func (r Hardware_SecurityModule) GetVirtualChassisSiblings() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualChassisSiblings", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware server's virtual servers. +func (r Hardware_SecurityModule) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtual host record. +func (r Hardware_SecurityModule) GetVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualHost", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's virtual software licenses. +func (r Hardware_SecurityModule) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the bandwidth allotment to which a piece of hardware belongs. +func (r Hardware_SecurityModule) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_SecurityModule) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_SecurityModule) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtualization platform software. +func (r Hardware_SecurityModule) GetVirtualizationPlatform() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualizationPlatform", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates available for a server from the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_SecurityModule) GetWindowsUpdateAvailableUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getWindowsUpdateAvailableUpdates", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates installed on a server as reported by the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_SecurityModule) GetWindowsUpdateInstalledUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getWindowsUpdateInstalledUpdates", nil, &r.Options, &resp) + return +} + +// This method returns an update status record for this server. That record will specify if the server is missing updates, or has updates that must be reinstalled or require a reboot to go into affect. +func (r Hardware_SecurityModule) GetWindowsUpdateStatus() (resp datatypes.Container_Utility_Microsoft_Windows_UpdateServices_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getWindowsUpdateStatus", nil, &r.Options, &resp) + return +} + +// The '''importVirtualHost''' method attempts to import the host record for the virtualization platform running on a server. +func (r Hardware_SecurityModule) ImportVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "importVirtualHost", nil, &r.Options, &resp) + return +} + +// Idera Bare Metal Server Restore is a backup agent designed specifically for making full system restores made with Idera Server Backup. +func (r Hardware_SecurityModule) InitiateIderaBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "initiateIderaBareMetalRestore", nil, &r.Options, &resp) + return +} + +// R1Soft Bare Metal Server Restore is an R1Soft disk agent designed specifically for making full system restores made with R1Soft CDP Server backup. +func (r Hardware_SecurityModule) InitiateR1SoftBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "initiateR1SoftBareMetalRestore", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_SecurityModule) IsBackendPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "isBackendPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_SecurityModule) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "isPingable", nil, &r.Options, &resp) + return +} + +// Determine if the server runs any version of the Microsoft Windows operating systems. Return ''true'' if it does and ''false if otherwise. +func (r Hardware_SecurityModule) IsWindowsServer() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "isWindowsServer", nil, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_SecurityModule) MassFirmwareUpdate(hardwareIds []int, ipmi *bool, raidController *bool, bios *bool, harddrive *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "massFirmwareUpdate", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_SecurityModule) MassReloadOperatingSystem(hardwareIds []string, token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + hardwareIds, + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "massReloadOperatingSystem", params, &r.Options, &resp) + return +} + +// The ability to place multiple bare metal servers in a state where they are powered down and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_SecurityModule) MassSparePool(hardwareIds []string, action *string, newOrder *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "massSparePool", params, &r.Options, &resp) + return +} + +// Issues a ping command to the server and returns the ping response. +func (r Hardware_SecurityModule) Ping() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "ping", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) PopulateServerRam(ramSerialString *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + ramSerialString, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "populateServerRam", params, &r.Options, &resp) + return +} + +// Power off then power on the server via powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. This should only be used as a last resort. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "powerCycle", nil, &r.Options, &resp) + return +} + +// This method will power off the server via the server's remote management card. +func (r Hardware_SecurityModule) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "powerOff", nil, &r.Options, &resp) + return +} + +// Power on server via its remote management card. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "powerOn", nil, &r.Options, &resp) + return +} + +// Attempts to reboot the server by issuing a reset (soft reboot) command to the server's remote management card. If the reset (soft reboot) attempt is unsuccessful, a power cycle command will be issued via the powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "rebootDefault", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a cycle command to the server's remote management card. This is equivalent to pressing the 'Reset' button on the server. This command is issued immediately and will not wait for processes to shutdown. After this command is issued, the server may take a few moments to boot up as server may run system disks checks. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "rebootHard", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a reset command to the server's remote management card. This is a graceful reboot. The servers will allow all process to shutdown gracefully before rebooting. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "rebootSoft", nil, &r.Options, &resp) + return +} + +// Reloads current operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_SecurityModule) ReloadCurrentOperatingSystemConfiguration(token *string) (resp string, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "reloadCurrentOperatingSystemConfiguration", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_SecurityModule) ReloadOperatingSystem(token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "reloadOperatingSystem", params, &r.Options, &resp) + return +} + +// This method is used to remove access to s SoftLayer_Network_Storage volumes that supports host- or network-level access control. +func (r Hardware_SecurityModule) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_SecurityModule) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// You can launch a new Passmark hardware test by selecting from your server list. It will bring your server offline for approximately 20 minutes while the testing is in progress, and will publish a certificate with the results to your hardware details page. +// +// While the hard drives are tested for the initial deployment, the Passmark Certificate utility will not test the hard drives on your live server. This is to ensure that no data is overwritten. If you would like to test the server's hard drives, you can have the full Passmark suite installed to your server free of charge through a new Support ticket. +// +// While the test itself does not overwrite any data on the server, it is recommended that you make full off-server backups of all data prior to launching the test. The Passmark hardware test is designed to force any latent hardware issues to the surface, so hardware failure is possible. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_SecurityModule) RunPassmarkCertificationBenchmark() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "runPassmarkCertificationBenchmark", nil, &r.Options, &resp) + return +} + +// Changes the password that we have stored in our database for a servers' Operating System +func (r Hardware_SecurityModule) SetOperatingSystemPassword(newPassword *string) (resp bool, err error) { + params := []interface{}{ + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setOperatingSystemPassword", params, &r.Options, &resp) + return +} + +// Sets the private network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_SecurityModule) SetPrivateNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setPrivateNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// Sets the public network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_SecurityModule) SetPublicNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setPublicNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setTags", params, &r.Options, &resp) + return +} + +// Sets the data that will be written to the configuration drive. +func (r Hardware_SecurityModule) SetUserMetadata(metadata []string) (resp []datatypes.Hardware_Attribute, err error) { + params := []interface{}{ + metadata, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setUserMetadata", params, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_SecurityModule) ShutdownPrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "shutdownPrivatePort", nil, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_SecurityModule) ShutdownPublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "shutdownPublicPort", nil, &r.Options, &resp) + return +} + +// The ability to place bare metal servers in a state where they are powered down, and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_SecurityModule) SparePool(action *string, newOrder *bool) (resp bool, err error) { + params := []interface{}{ + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "sparePool", params, &r.Options, &resp) + return +} + +// This method will update the root IPMI password on this SoftLayer_Hardware. +func (r Hardware_SecurityModule) UpdateIpmiPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "updateIpmiPassword", params, &r.Options, &resp) + return +} + +// Validates a collection of partitions for an operating system +func (r Hardware_SecurityModule) ValidatePartitionsForOperatingSystem(operatingSystem *datatypes.Software_Description, partitions []datatypes.Hardware_Component_Partition) (resp bool, err error) { + params := []interface{}{ + operatingSystem, + partitions, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "validatePartitionsForOperatingSystem", params, &r.Options, &resp) + return +} + +// no documentation yet +type Hardware_SecurityModule750 struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareSecurityModule750Service returns an instance of the Hardware_SecurityModule750 SoftLayer service +func GetHardwareSecurityModule750Service(sess *session.Session) Hardware_SecurityModule750 { + return Hardware_SecurityModule750{Session: sess} +} + +func (r Hardware_SecurityModule750) Id(id int) Hardware_SecurityModule750 { + r.Options.Id = &id + return r +} + +func (r Hardware_SecurityModule750) Mask(mask string) Hardware_SecurityModule750 { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_SecurityModule750) Filter(filter string) Hardware_SecurityModule750 { + r.Options.Filter = filter + return r +} + +func (r Hardware_SecurityModule750) Limit(limit int) Hardware_SecurityModule750 { + r.Options.Limit = &limit + return r +} + +func (r Hardware_SecurityModule750) Offset(offset int) Hardware_SecurityModule750 { + r.Options.Offset = &offset + return r +} + +// Activates the private network port +func (r Hardware_SecurityModule750) ActivatePrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "activatePrivatePort", nil, &r.Options, &resp) + return +} + +// Activates the public network port +func (r Hardware_SecurityModule750) ActivatePublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "activatePublicPort", nil, &r.Options, &resp) + return +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Hardware_SecurityModule750) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_SecurityModule750) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// The Rescue Kernel is designed to provide you with the ability to bring a server online in order to troubleshoot system problems that would normally only be resolved by an OS Reload. The correct Rescue Kernel will be selected based upon the currently installed operating system. When the rescue kernel process is initiated, the server will shutdown and reboot on to the public network with the same IP's assigned to the server to allow for remote connections. It will bring your server offline for approximately 10 minutes while the rescue is in progress. The root/administrator password will be the same as what is listed in the portal for the server. +func (r Hardware_SecurityModule750) BootToRescueLayer(noOsBootEnvironment *string) (resp bool, err error) { + params := []interface{}{ + noOsBootEnvironment, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "bootToRescueLayer", params, &r.Options, &resp) + return +} + +// Captures a Flex Image of the hard disk on the physical machine, based on the capture template parameter. Returns the image template group containing the disk image. +func (r Hardware_SecurityModule750) CaptureImage(captureTemplate *datatypes.Container_Disk_Image_Capture_Template) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + captureTemplate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "captureImage", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Hardware_SecurityModule750) CloseAlarm(alarmId *string) (resp bool, err error) { + params := []interface{}{ + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "closeAlarm", params, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_SecurityModule750) CreateFirmwareUpdateTransaction(ipmi *int, raidController *int, bios *int, harddrive *int) (resp bool, err error) { + params := []interface{}{ + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "createFirmwareUpdateTransaction", params, &r.Options, &resp) + return +} + +// +// +// createObject() enables the creation of servers on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a server, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a server of the specified configuration. +// +// +// To determine when the server is available you can poll the server via [[SoftLayer_Hardware/getObject|getObject]], +// checking the provisionDate property. +// When provisionDate is not null, the server will be ready. Be sure to use the globalIdentifier +// as your initialization parameter. +// +// +// Warning: Servers created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Hardware/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Hardware (type)|SoftLayer_Hardware]] +//
      +//
    • hostname +//
      Hostname for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • processorCoreAmount +//
      The number of logical CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • memoryCapacity +//
      The amount of memory to allocate in gigabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the server.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the server will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the server with.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the server is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the server's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - The highest available zero cost port speed will be used.
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the server.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • networkComponents.redundancyEnabledFlag +//
      Specifies whether or not the server's network components should be in redundancy groups.
        +//
      • Optional
      • +//
      • Type - bool
      • +//
      • Default - false
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. When the redundancyEnabledFlag property is true the server's network components will be in redundancy groups.
      • +//
      +// { +// "networkComponents": [ +// { +// "redundancyEnabledFlag": false +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the server only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a server is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the server.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the server.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • fixedConfigurationPreset.keyName +//
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The fixedConfigurationPreset property is a [[SoftLayer_Product_Package_Preset (type)|fixed configuration preset]] structure. The keyName property must be set to specify preset to use.
      • +//
      • If a fixed configuration preset is used processorCoreAmount, memoryCapacity and hardDrives properties must not be set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "fixedConfigurationPreset": { +// "keyName": "SOME_KEY_NAME" +// } +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the server.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Hardware_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the server. This is primarily useful for providing data to software that may be on the server and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • hardDrives +//
      Hard drive settings for the server
        +//
      • Optional
      • +//
      • Type - SoftLayer_Hardware_Component
      • +//
      • Default - The largest available capacity for a zero cost primary disk will be used.
      • +//
      • Description - The hardDrives property is an array of [[SoftLayer_Hardware_Component (type)|hardware component]] structures. +//
      • Each hard drive must specify the capacity property.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "hardDrives": [ +// { +// "capacity": 500 +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the server upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "processorCoreAmount": 2, +// "memoryCapacity": 2, +// "hourlyBillingFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Hardware.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Hardware/f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5/getObject +// +// +// { +// "accountId": 232298, +// "bareMetalInstanceFlag": null, +// "domain": "example.com", +// "hardwareStatusId": null, +// "hostname": "host1", +// "id": null, +// "serviceProviderId": null, +// "serviceProviderResourceId": null, +// "globalIdentifier": "f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5", +// "hourlyBillingFlag": true, +// "memoryCapacity": 2, +// "operatingSystemReferenceCode": "UBUNTU_LATEST", +// "processorCoreAmount": 2 +// } +// +func (r Hardware_SecurityModule750) CreateObject(templateObject *datatypes.Hardware_SecurityModule750) (resp datatypes.Hardware_SecurityModule750, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule750) CreatePostSoftwareInstallTransaction(installCodes []string, returnBoolean *bool) (resp bool, err error) { + params := []interface{}{ + installCodes, + returnBoolean, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "createPostSoftwareInstallTransaction", params, &r.Options, &resp) + return +} + +// +// This method will cancel a server effective immediately. For servers billed hourly, the charges will stop immediately after the method returns. +func (r Hardware_SecurityModule750) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete software component passwords. +func (r Hardware_SecurityModule750) DeleteSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "deleteSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Edit a server's properties +func (r Hardware_SecurityModule750) EditObject(templateObject *datatypes.Hardware_Server) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "editObject", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, and notes. +func (r Hardware_SecurityModule750) EditSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "editSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Download and run remote script from uri on the hardware. +func (r Hardware_SecurityModule750) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// The '''findByIpAddress''' method finds hardware using its primary public or private IP address. IP addresses that have a secondary subnet tied to the hardware will not return the hardware - alternate means of locating the hardware must be used (see '''Associated Methods'''). If no hardware is found, no errors are generated and no data is returned. +func (r Hardware_SecurityModule750) FindByIpAddress(ipAddress *string) (resp datatypes.Hardware, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Hardware_Server (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Hardware/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Hardware_SecurityModule750) GenerateOrderTemplate(templateObject *datatypes.Hardware) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a piece of hardware. +func (r Hardware_SecurityModule750) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active physical components. +func (r Hardware_SecurityModule750) GetActiveComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getActiveComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a server's attached network firewall. +func (r Hardware_SecurityModule750) GetActiveNetworkFirewallBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getActiveNetworkFirewallBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active network monitoring incidents. +func (r Hardware_SecurityModule750) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieve Transaction currently running for server. +func (r Hardware_SecurityModule750) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve Any active transaction(s) that are currently running for the server (example: os reload). +func (r Hardware_SecurityModule750) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// The '''getAlarmHistory''' method retrieves a detailed history for the monitoring alarm. When calling this method, a start and end date for the history to be retrieved must be entered. +func (r Hardware_SecurityModule750) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetAllPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAllPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. +func (r Hardware_SecurityModule750) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Hardware_SecurityModule750) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Hardware_SecurityModule750) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an antivirus/spyware software component object. +func (r Hardware_SecurityModule750) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Hardware. +func (r Hardware_SecurityModule750) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's specific attributes. +func (r Hardware_SecurityModule750) GetAttributes() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve An object that stores the maximum level for the monitoring query types and response types. +func (r Hardware_SecurityModule750) GetAvailableMonitoring() (resp []datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAvailableMonitoring", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Hardware. +func (r Hardware_SecurityModule750) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily total bandwidth usage for the current billing cycle. +func (r Hardware_SecurityModule750) GetAverageDailyBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAverageDailyBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily private bandwidth usage for the current billing cycle. +func (r Hardware_SecurityModule750) GetAverageDailyPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAverageDailyPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Hardware_SecurityModule750) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_SecurityModule750) GetBackendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBackendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +func (r Hardware_SecurityModule750) GetBackendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBackendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getBackendIncomingBandwidth''' method retrieves the amount of incoming private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_SecurityModule750) GetBackendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBackendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's back-end or private network components. +func (r Hardware_SecurityModule750) GetBackendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getBackendOutgoingBandwidth''' method retrieves the amount of outgoing private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_SecurityModule750) GetBackendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBackendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's backend or private router. +func (r Hardware_SecurityModule750) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth (measured in GB). +func (r Hardware_SecurityModule750) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Hardware_SecurityModule750) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Retrieve a collection of bandwidth data from an individual public or private network tracking object. Data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Hardware_SecurityModule750) GetBandwidthForDateRange(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBandwidthForDateRange", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single server. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. Use the $draw flag to suppress the generation of the actual binary PNG image. +func (r Hardware_SecurityModule750) GetBandwidthImage(networkType *string, snapshotRange *string, draw *bool, dateSpecified *datatypes.Time, dateSpecifiedEnd *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + networkType, + snapshotRange, + draw, + dateSpecified, + dateSpecifiedEnd, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's benchmark certifications. +func (r Hardware_SecurityModule750) GetBenchmarkCertifications() (resp []datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBenchmarkCertifications", nil, &r.Options, &resp) + return +} + +// Retrieve The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. +func (r Hardware_SecurityModule750) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw private bandwidth usage data for the current billing cycle. +func (r Hardware_SecurityModule750) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw public bandwidth usage data for the current billing cycle. +func (r Hardware_SecurityModule750) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a server. +func (r Hardware_SecurityModule750) GetBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a billing item exists. +func (r Hardware_SecurityModule750) GetBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if BIOS password should be left as null. +func (r Hardware_SecurityModule750) GetBiosPasswordNullFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBiosPasswordNullFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the hardware is ineligible for cancellation because it is disconnected. +func (r Hardware_SecurityModule750) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Status indicating whether or not a piece of hardware has business continuance insurance. +func (r Hardware_SecurityModule750) GetBusinessContinuanceInsuranceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getBusinessContinuanceInsuranceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Child hardware. +func (r Hardware_SecurityModule750) GetChildrenHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getChildrenHardware", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule750) GetComponentDetailsXML() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getComponentDetailsXML", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's components. +func (r Hardware_SecurityModule750) GetComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetContainsSolidStateDrivesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getContainsSolidStateDrivesFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection/server backup software component object. +func (r Hardware_SecurityModule750) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve A server's control panel. +func (r Hardware_SecurityModule750) GetControlPanel() (resp datatypes.Software_Component_ControlPanel, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getControlPanel", nil, &r.Options, &resp) + return +} + +// Retrieve The total cost of a server, measured in US Dollars ($USD). +func (r Hardware_SecurityModule750) GetCost() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCost", nil, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a server, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Hardware_Configuration (type)]]. +func (r Hardware_SecurityModule750) GetCreateObjectOptions() (resp datatypes.Container_Hardware_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve An object that provides commonly used bandwidth summary components for the current billing cycle. +func (r Hardware_SecurityModule750) GetCurrentBandwidthSummary() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCurrentBandwidthSummary", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with the current benchmark certification result, if such a file exists. If there is no file for this benchmark certification result, calling this method throws an exception. +func (r Hardware_SecurityModule750) GetCurrentBenchmarkCertificationResultFile() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCurrentBenchmarkCertificationResultFile", nil, &r.Options, &resp) + return +} + +// Retrieve The current billable public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule750) GetCurrentBillableBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCurrentBillableBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Get the billing detail for this instance for the current billing period. This does not include bandwidth usage. +func (r Hardware_SecurityModule750) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// The '''getCurrentBillingTotal''' method retrieves the total bill amount in US Dollars ($) for the current billing period. In addition to the total bill amount, the billing detail also includes all bandwidth used up to the point the method is called on the piece of hardware. +func (r Hardware_SecurityModule750) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Hardware_SecurityModule750) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server has a Customer Installed OS +func (r Hardware_SecurityModule750) GetCustomerInstalledOperatingSystemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCustomerInstalledOperatingSystemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server is a customer owned device. +func (r Hardware_SecurityModule750) GetCustomerOwnedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getCustomerOwnedFlag", nil, &r.Options, &resp) + return +} + +// The '''getDailyAverage''' method calculates the average daily network traffic used by the selected server. Using the required parameter ''dateTime'' to enter a start and end date, the user retrieves this average, measure in gigabytes (GB) for the specified date range. When entering parameters, only the month, day and year are required - time entries are omitted as this method defaults the time to midnight in order to account for the entire day. +func (r Hardware_SecurityModule750) GetDailyAverage(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDailyAverage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the datacenter in which a piece of hardware resides. +func (r Hardware_SecurityModule750) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the datacenter in which a piece of hardware resides. +func (r Hardware_SecurityModule750) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// Retrieve Number of day(s) a server have been in spare pool. +func (r Hardware_SecurityModule750) GetDaysInSparePool() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDaysInSparePool", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_SecurityModule750) GetDownlinkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDownlinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_SecurityModule750) GetDownlinkNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDownlinkNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached to a piece of network hardware. +func (r Hardware_SecurityModule750) GetDownlinkServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDownlinkServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_SecurityModule750) GetDownlinkVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDownlinkVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware downstream from a network device. +func (r Hardware_SecurityModule750) GetDownstreamHardwareBindings() (resp []datatypes.Network_Component_Uplink_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDownstreamHardwareBindings", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware downstream from the selected piece of hardware. +func (r Hardware_SecurityModule750) GetDownstreamNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDownstreamNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. +func (r Hardware_SecurityModule750) GetDownstreamNetworkHardwareWithIncidents() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDownstreamNetworkHardwareWithIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached downstream to a piece of network hardware. +func (r Hardware_SecurityModule750) GetDownstreamServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDownstreamServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_SecurityModule750) GetDownstreamVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDownstreamVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The drive controllers contained within a piece of hardware. +func (r Hardware_SecurityModule750) GetDriveControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getDriveControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated EVault network storage service account. +func (r Hardware_SecurityModule750) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Get the subnets associated with this server that are protectable by a network component firewall. +func (r Hardware_SecurityModule750) GetFirewallProtectableSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getFirewallProtectableSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's firewall services. +func (r Hardware_SecurityModule750) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Defines the fixed components in a fixed configuration bare metal server. +func (r Hardware_SecurityModule750) GetFixedConfigurationPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getFixedConfigurationPreset", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_SecurityModule750) GetFrontendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getFrontendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +func (r Hardware_SecurityModule750) GetFrontendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getFrontendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getFrontendIncomingBandwidth''' method retrieves the amount of incoming public network traffic used by a server between the given start and end date parameters. When entering the ''dateTime'' parameter, only the month, day and year of the start and end dates are required - the time (hour, minute and second) are set to midnight by default and cannot be changed. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_SecurityModule750) GetFrontendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getFrontendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's front-end or public network components. +func (r Hardware_SecurityModule750) GetFrontendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getFrontendOutgoingBandwidth''' method retrieves the amount of outgoing public network traffic used by a server between the given start and end date parameters. The ''dateTime'' parameter requires only the day, month and year to be entered - the time (hour, minute and second) are set to midnight be default in order to gather the data for the entire start and end date indicated in the parameter. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_SecurityModule750) GetFrontendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getFrontendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's frontend or public router. +func (r Hardware_SecurityModule750) GetFrontendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Hardware_SecurityModule750) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The hard drives contained within a piece of hardware. +func (r Hardware_SecurityModule750) GetHardDrives() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHardDrives", nil, &r.Options, &resp) + return +} + +// Retrieve a server by searching for the primary IP address. +func (r Hardware_SecurityModule750) GetHardwareByIpAddress(ipAddress *string) (resp datatypes.Hardware_Server, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHardwareByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The chassis that a piece of hardware is housed in. +func (r Hardware_SecurityModule750) GetHardwareChassis() (resp datatypes.Hardware_Chassis, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHardwareChassis", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_SecurityModule750) GetHardwareFunction() (resp datatypes.Hardware_Function, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHardwareFunction", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_SecurityModule750) GetHardwareFunctionDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHardwareFunctionDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's status. +func (r Hardware_SecurityModule750) GetHardwareStatus() (resp datatypes.Hardware_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHardwareStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware has Single Root IO VIrtualization (SR-IOV) billing item. +func (r Hardware_SecurityModule750) GetHasSingleRootVirtualizationBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHasSingleRootVirtualizationBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determine in hardware object has TPM enabled. +func (r Hardware_SecurityModule750) GetHasTrustedPlatformModuleBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHasTrustedPlatformModuleBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a host IPS software component object. +func (r Hardware_SecurityModule750) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// The '''getHourlyBandwidth''' method retrieves all bandwidth updates hourly for the specified hardware. Because the potential number of data points can become excessive, the method limits the user to obtain data in 24-hour intervals. The required ''dateTime'' parameter is used as the starting point for the query and will be calculated for the 24-hour period starting with the specified date and time. For example, entering a parameter of +// +// '02/01/2008 0:00' +// +// results in a return of all bandwidth data for the entire day of February 1, 2008, as 0:00 specifies a midnight start date. Please note that the time entered should be completed using a 24-hour clock (military time, astronomical time). +// +// For data spanning more than a single 24-hour period, refer to the getBandwidthData function on the metricTrackingObject for the piece of hardware. +func (r Hardware_SecurityModule750) GetHourlyBandwidth(mode *string, day *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + mode, + day, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHourlyBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A server's hourly billing status. +func (r Hardware_SecurityModule750) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the inbound network traffic data for the last 30 days. +func (r Hardware_SecurityModule750) GetInboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getInboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule750) GetInboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getInboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule750) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware object has the IBM_CLOUD_READY_NODE_CERTIFIED attribute. +func (r Hardware_SecurityModule750) GetIsCloudReadyNodeCertified() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getIsCloudReadyNodeCertified", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects from a collection of SoftLayer_Software_Description +func (r Hardware_SecurityModule750) GetItemPricesFromSoftwareDescriptions(softwareDescriptions []datatypes.Software_Description, includeTranslationsFlag *bool, returnAllPricesFlag *bool) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + softwareDescriptions, + includeTranslationsFlag, + returnAllPricesFlag, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getItemPricesFromSoftwareDescriptions", params, &r.Options, &resp) + return +} + +// Retrieve The last transaction that a server's operating system was loaded. +func (r Hardware_SecurityModule750) GetLastOperatingSystemReload() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getLastOperatingSystemReload", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the last transaction a server performed. +func (r Hardware_SecurityModule750) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's latest network monitoring incident. +func (r Hardware_SecurityModule750) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve Where a piece of hardware is located within SoftLayer's location hierarchy. +func (r Hardware_SecurityModule750) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetLocationPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getLocationPathString", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a lockbox account associated with a server. +func (r Hardware_SecurityModule750) GetLockboxNetworkStorage() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the hardware is a managed resource. +func (r Hardware_SecurityModule750) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the remote management network component attached with this server. +func (r Hardware_SecurityModule750) GetManagementNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getManagementNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's memory. +func (r Hardware_SecurityModule750) GetMemory() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMemory", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of memory a piece of hardware has, measured in gigabytes. +func (r Hardware_SecurityModule750) GetMemoryCapacity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMemoryCapacity", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's metric tracking object. +func (r Hardware_SecurityModule750) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_HardwareServer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object id for this server. +func (r Hardware_SecurityModule750) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Hardware_SecurityModule750) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the monitoring agents associated with a piece of hardware. +func (r Hardware_SecurityModule750) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Hardware_SecurityModule750) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's monitoring robot. +func (r Hardware_SecurityModule750) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitoring services. +func (r Hardware_SecurityModule750) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring service flag eligibility status for a piece of hardware. +func (r Hardware_SecurityModule750) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The service flag status for a piece of hardware. +func (r Hardware_SecurityModule750) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring notification objects for this hardware. Each object links this hardware instance to a user account that will be notified if monitoring on this hardware object fails +func (r Hardware_SecurityModule750) GetMonitoringUserNotification() (resp []datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMonitoringUserNotification", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's motherboard. +func (r Hardware_SecurityModule750) GetMotherboard() (resp datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getMotherboard", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network cards. +func (r Hardware_SecurityModule750) GetNetworkCards() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkCards", nil, &r.Options, &resp) + return +} + +// Get the IP addresses associated with this server that are protectable by a network component firewall. Note, this may not return all values for IPv6 subnets for this server. Please use getFirewallProtectableSubnets to get all protectable subnets. +func (r Hardware_SecurityModule750) GetNetworkComponentFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkComponentFirewallProtectableIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve Returns a hardware's network components. +func (r Hardware_SecurityModule750) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway member if this device is part of a network gateway. +func (r Hardware_SecurityModule750) GetNetworkGatewayMember() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkGatewayMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this device is part of a network gateway. +func (r Hardware_SecurityModule750) GetNetworkGatewayMemberFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkGatewayMemberFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's network management IP address. +func (r Hardware_SecurityModule750) GetNetworkManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve All servers with failed monitoring that are attached downstream to a piece of hardware. +func (r Hardware_SecurityModule750) GetNetworkMonitorAttachedDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkMonitorAttachedDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guests that are attached downstream to a hardware that have failed monitoring +func (r Hardware_SecurityModule750) GetNetworkMonitorAttachedDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkMonitorAttachedDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The status of all of a piece of hardware's network monitoring incidents. +func (r Hardware_SecurityModule750) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitors. +func (r Hardware_SecurityModule750) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve The value of a hardware's network status attribute. +func (r Hardware_SecurityModule750) GetNetworkStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's related network status attribute. +func (r Hardware_SecurityModule750) GetNetworkStatusAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkStatusAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated network storage service account. +func (r Hardware_SecurityModule750) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network virtual LANs (VLANs) associated with a piece of hardware's network components. +func (r Hardware_SecurityModule750) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth for the next billing cycle (measured in GB). +func (r Hardware_SecurityModule750) GetNextBillingCycleBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNextBillingCycleBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetNotesHistory() (resp []datatypes.Hardware_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getNotesHistory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule750) GetObject() (resp datatypes.Hardware_SecurityModule750, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An open ticket requesting cancellation of this server, if one exists. +func (r Hardware_SecurityModule750) GetOpenCancellationTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getOpenCancellationTicket", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's operating system. +func (r Hardware_SecurityModule750) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's operating system software description. +func (r Hardware_SecurityModule750) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the outbound network traffic data for the last 30 days. +func (r Hardware_SecurityModule750) GetOutboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getOutboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule750) GetOutboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getOutboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule750) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle exceeds the allocation. +func (r Hardware_SecurityModule750) GetOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_SecurityModule750) GetPMInfo() (resp []datatypes.Container_RemoteManagement_PmInfo, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPMInfo", nil, &r.Options, &resp) + return +} + +// Retrieve Blade Bay +func (r Hardware_SecurityModule750) GetParentBay() (resp datatypes.Hardware_Blade, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getParentBay", nil, &r.Options, &resp) + return +} + +// Retrieve Parent Hardware. +func (r Hardware_SecurityModule750) GetParentHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getParentHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. +func (r Hardware_SecurityModule750) GetPointOfPresenceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPointOfPresenceLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The power components for a hardware object. +func (r Hardware_SecurityModule750) GetPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's power supply. +func (r Hardware_SecurityModule750) GetPowerSupply() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPowerSupply", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary private IP address. +func (r Hardware_SecurityModule750) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary back-end network component. +func (r Hardware_SecurityModule750) GetPrimaryBackendNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule750) GetPrimaryDriveSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrimaryDriveSize", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary public IP address. +func (r Hardware_SecurityModule750) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary public network component. +func (r Hardware_SecurityModule750) GetPrimaryNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPrivateBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_SecurityModule750) GetPrivateBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrivateBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's private network bandwidth usage. getPrivateBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_SecurityModule750) GetPrivateBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrivateBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image +func (r Hardware_SecurityModule750) GetPrivateBandwidthGraphImage(startTime *string, endTime *string) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrivateBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve A server's primary private IP address. +func (r Hardware_SecurityModule750) GetPrivateIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrivateIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve the private network component attached with this server. +func (r Hardware_SecurityModule750) GetPrivateNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrivateNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the hardware only has access to the private network. +func (r Hardware_SecurityModule750) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the backend VLAN for the primary IP address of the server +func (r Hardware_SecurityModule750) GetPrivateVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrivateVlan", nil, &r.Options, &resp) + return +} + +// Retrieve a backend network VLAN by searching for an IP address +func (r Hardware_SecurityModule750) GetPrivateVlanByIpAddress(ipAddress *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPrivateVlanByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The total number of processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_SecurityModule750) GetProcessorCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getProcessorCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of physical processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_SecurityModule750) GetProcessorPhysicalCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getProcessorPhysicalCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's processors. +func (r Hardware_SecurityModule750) GetProcessors() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getProcessors", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle is projected to exceed the allocation. +func (r Hardware_SecurityModule750) GetProjectedOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getProjectedOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule750) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule750) GetProvisionDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getProvisionDate", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_SecurityModule750) GetPublicBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPublicBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's public network bandwidth usage. getPublicBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_SecurityModule750) GetPublicBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPublicBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. THIS METHOD GENERATES GRAPHS BASED ON THE NEW DATA WAREHOUSE REPOSITORY. +func (r Hardware_SecurityModule750) GetPublicBandwidthGraphImage(startTime *datatypes.Time, endTime *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPublicBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve the total number of bytes used by a server over a specified time period via the data warehouse tracking objects for this hardware. +func (r Hardware_SecurityModule750) GetPublicBandwidthTotal(startTime *int, endTime *int) (resp uint, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPublicBandwidthTotal", params, &r.Options, &resp) + return +} + +// Retrieve a SoftLayer server's public network component. Some servers are only connected to the private network and may not have a public network component. In that case getPublicNetworkComponent returns a null object. +func (r Hardware_SecurityModule750) GetPublicNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPublicNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend VLAN for the primary IP address of the server +func (r Hardware_SecurityModule750) GetPublicVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPublicVlan", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend network Vlan by searching the hostname of a server +func (r Hardware_SecurityModule750) GetPublicVlanByHostname(hostname *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + hostname, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getPublicVlanByHostname", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetRack() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRack", nil, &r.Options, &resp) + return +} + +// Retrieve The RAID controllers contained within a piece of hardware. +func (r Hardware_SecurityModule750) GetRaidControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRaidControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware object is vSan Ready Node. +func (r Hardware_SecurityModule750) GetReadyNodeFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getReadyNodeFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this hardware. +func (r Hardware_SecurityModule750) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The last five commands issued to the server's remote management card. +func (r Hardware_SecurityModule750) GetRecentRemoteManagementCommands() (resp []datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRecentRemoteManagementCommands", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card. +func (r Hardware_SecurityModule750) GetRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve User credentials to issue commands and/or interact with the server's remote management card. +func (r Hardware_SecurityModule750) GetRemoteManagementAccounts() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRemoteManagementAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's associated remote management component. This is normally IPMI. +func (r Hardware_SecurityModule750) GetRemoteManagementComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRemoteManagementComponent", nil, &r.Options, &resp) + return +} + +// Retrieve User(s) who have access to issue commands and/or interact with the server's remote management card. +func (r Hardware_SecurityModule750) GetRemoteManagementUsers() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRemoteManagementUsers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetResourceConfigurations() (resp []datatypes.Hardware_Resource_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getResourceConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetResourceGroupMemberReferences() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getResourceGroupMemberReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetResourceGroupRoles() (resp []datatypes.Resource_Group_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getResourceGroupRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this hardware is a member. +func (r Hardware_SecurityModule750) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve the reverse domain records associated with this server. +func (r Hardware_SecurityModule750) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's routers. +func (r Hardware_SecurityModule750) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this hardware corresponds to. +func (r Hardware_SecurityModule750) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's vulnerability scan requests. +func (r Hardware_SecurityModule750) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_SecurityModule750) GetSensorData() (resp []datatypes.Container_RemoteManagement_SensorReading, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getSensorData", nil, &r.Options, &resp) + return +} + +// Retrieves the raw data returned from the server's remote management card. For more details of what is returned please refer to the getSensorData method. Along with the raw data, graphs for the cpu and system temperatures and fan speeds are also returned. +func (r Hardware_SecurityModule750) GetSensorDataWithGraphs() (resp datatypes.Container_RemoteManagement_SensorReadingsWithGraphs, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getSensorDataWithGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware components, software, and network components. getServerDetails is an aggregation function that combines the results of [[SoftLayer_Hardware_Server::getComponents]], [[SoftLayer_Hardware_Server::getSoftware]], and [[SoftLayer_Hardware_Server::getNetworkComponents]] in a single container. +func (r Hardware_SecurityModule750) GetServerDetails() (resp datatypes.Container_Hardware_Server_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getServerDetails", nil, &r.Options, &resp) + return +} + +// Retrieve the server's fan speeds and displays them using tachometer graphs. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_SecurityModule750) GetServerFanSpeedGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorSpeed, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getServerFanSpeedGraphs", nil, &r.Options, &resp) + return +} + +// Retrieves the power state for the server. The server's power status is retrieved from its remote management card. This will return 'on' or 'off'. +func (r Hardware_SecurityModule750) GetServerPowerState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getServerPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the server room in which the hardware is located. +func (r Hardware_SecurityModule750) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getServerRoom", nil, &r.Options, &resp) + return +} + +// Retrieve the server's temperature and displays them using thermometer graphs. Temperatures retrieved are CPU(s) and system temperatures. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_SecurityModule750) GetServerTemperatureGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorTemperature, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getServerTemperatureGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware's service provider. +func (r Hardware_SecurityModule750) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's installed software. +func (r Hardware_SecurityModule750) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware object has Software Guard Extension (SGX) enabled. +func (r Hardware_SecurityModule750) GetSoftwareGuardExtensionEnabled() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getSoftwareGuardExtensionEnabled", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a spare pool server. +func (r Hardware_SecurityModule750) GetSparePoolBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getSparePoolBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Hardware_SecurityModule750) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card used for statistics. +func (r Hardware_SecurityModule750) GetStatisticsRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getStatisticsRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetStorageNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getStorageNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule750) GetTopLevelLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getTopLevelLocation", nil, &r.Options, &resp) + return +} + +// +// This method will query transaction history for a piece of hardware. +func (r Hardware_SecurityModule750) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction_History, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Retrieve a list of upgradeable items available to this piece of hardware. Currently, getUpgradeItemPrices retrieves upgrades available for a server's memory, hard drives, network port speed, bandwidth allocation and GPUs. +func (r Hardware_SecurityModule750) GetUpgradeItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getUpgradeItemPrices", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade request object, if any. +func (r Hardware_SecurityModule750) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The network device connected to a piece of hardware. +func (r Hardware_SecurityModule750) GetUplinkHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getUplinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. +func (r Hardware_SecurityModule750) GetUplinkNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getUplinkNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve An array containing a single string of custom user data for a hardware order. Max size is 16 kb. +func (r Hardware_SecurityModule750) GetUserData() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve A list of users that have access to this computing instance. +func (r Hardware_SecurityModule750) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getUsers", nil, &r.Options, &resp) + return +} + +// This method will return the list of block device template groups that are valid to the host. For instance, it will only retrieve FLEX images. +func (r Hardware_SecurityModule750) GetValidBlockDeviceTemplateGroups(visibility *string) (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + visibility, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getValidBlockDeviceTemplateGroups", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis for a piece of hardware. +func (r Hardware_SecurityModule750) GetVirtualChassis() (resp datatypes.Hardware_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getVirtualChassis", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis siblings for a piece of hardware. +func (r Hardware_SecurityModule750) GetVirtualChassisSiblings() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getVirtualChassisSiblings", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware server's virtual servers. +func (r Hardware_SecurityModule750) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtual host record. +func (r Hardware_SecurityModule750) GetVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getVirtualHost", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's virtual software licenses. +func (r Hardware_SecurityModule750) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the bandwidth allotment to which a piece of hardware belongs. +func (r Hardware_SecurityModule750) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_SecurityModule750) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_SecurityModule750) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtualization platform software. +func (r Hardware_SecurityModule750) GetVirtualizationPlatform() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getVirtualizationPlatform", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates available for a server from the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_SecurityModule750) GetWindowsUpdateAvailableUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getWindowsUpdateAvailableUpdates", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates installed on a server as reported by the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_SecurityModule750) GetWindowsUpdateInstalledUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getWindowsUpdateInstalledUpdates", nil, &r.Options, &resp) + return +} + +// This method returns an update status record for this server. That record will specify if the server is missing updates, or has updates that must be reinstalled or require a reboot to go into affect. +func (r Hardware_SecurityModule750) GetWindowsUpdateStatus() (resp datatypes.Container_Utility_Microsoft_Windows_UpdateServices_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "getWindowsUpdateStatus", nil, &r.Options, &resp) + return +} + +// The '''importVirtualHost''' method attempts to import the host record for the virtualization platform running on a server. +func (r Hardware_SecurityModule750) ImportVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "importVirtualHost", nil, &r.Options, &resp) + return +} + +// Idera Bare Metal Server Restore is a backup agent designed specifically for making full system restores made with Idera Server Backup. +func (r Hardware_SecurityModule750) InitiateIderaBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "initiateIderaBareMetalRestore", nil, &r.Options, &resp) + return +} + +// R1Soft Bare Metal Server Restore is an R1Soft disk agent designed specifically for making full system restores made with R1Soft CDP Server backup. +func (r Hardware_SecurityModule750) InitiateR1SoftBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "initiateR1SoftBareMetalRestore", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_SecurityModule750) IsBackendPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "isBackendPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_SecurityModule750) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "isPingable", nil, &r.Options, &resp) + return +} + +// Determine if the server runs any version of the Microsoft Windows operating systems. Return ''true'' if it does and ''false if otherwise. +func (r Hardware_SecurityModule750) IsWindowsServer() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "isWindowsServer", nil, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_SecurityModule750) MassFirmwareUpdate(hardwareIds []int, ipmi *bool, raidController *bool, bios *bool, harddrive *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "massFirmwareUpdate", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_SecurityModule750) MassReloadOperatingSystem(hardwareIds []string, token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + hardwareIds, + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "massReloadOperatingSystem", params, &r.Options, &resp) + return +} + +// The ability to place multiple bare metal servers in a state where they are powered down and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_SecurityModule750) MassSparePool(hardwareIds []string, action *string, newOrder *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "massSparePool", params, &r.Options, &resp) + return +} + +// Issues a ping command to the server and returns the ping response. +func (r Hardware_SecurityModule750) Ping() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "ping", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule750) PopulateServerRam(ramSerialString *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + ramSerialString, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "populateServerRam", params, &r.Options, &resp) + return +} + +// Power off then power on the server via powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. This should only be used as a last resort. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule750) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "powerCycle", nil, &r.Options, &resp) + return +} + +// This method will power off the server via the server's remote management card. +func (r Hardware_SecurityModule750) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "powerOff", nil, &r.Options, &resp) + return +} + +// Power on server via its remote management card. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule750) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "powerOn", nil, &r.Options, &resp) + return +} + +// Attempts to reboot the server by issuing a reset (soft reboot) command to the server's remote management card. If the reset (soft reboot) attempt is unsuccessful, a power cycle command will be issued via the powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule750) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "rebootDefault", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a cycle command to the server's remote management card. This is equivalent to pressing the 'Reset' button on the server. This command is issued immediately and will not wait for processes to shutdown. After this command is issued, the server may take a few moments to boot up as server may run system disks checks. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule750) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "rebootHard", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a reset command to the server's remote management card. This is a graceful reboot. The servers will allow all process to shutdown gracefully before rebooting. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule750) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "rebootSoft", nil, &r.Options, &resp) + return +} + +// Reloads current operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_SecurityModule750) ReloadCurrentOperatingSystemConfiguration(token *string) (resp string, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "reloadCurrentOperatingSystemConfiguration", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_SecurityModule750) ReloadOperatingSystem(token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "reloadOperatingSystem", params, &r.Options, &resp) + return +} + +// This method is used to remove access to s SoftLayer_Network_Storage volumes that supports host- or network-level access control. +func (r Hardware_SecurityModule750) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_SecurityModule750) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// You can launch a new Passmark hardware test by selecting from your server list. It will bring your server offline for approximately 20 minutes while the testing is in progress, and will publish a certificate with the results to your hardware details page. +// +// While the hard drives are tested for the initial deployment, the Passmark Certificate utility will not test the hard drives on your live server. This is to ensure that no data is overwritten. If you would like to test the server's hard drives, you can have the full Passmark suite installed to your server free of charge through a new Support ticket. +// +// While the test itself does not overwrite any data on the server, it is recommended that you make full off-server backups of all data prior to launching the test. The Passmark hardware test is designed to force any latent hardware issues to the surface, so hardware failure is possible. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_SecurityModule750) RunPassmarkCertificationBenchmark() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "runPassmarkCertificationBenchmark", nil, &r.Options, &resp) + return +} + +// Changes the password that we have stored in our database for a servers' Operating System +func (r Hardware_SecurityModule750) SetOperatingSystemPassword(newPassword *string) (resp bool, err error) { + params := []interface{}{ + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "setOperatingSystemPassword", params, &r.Options, &resp) + return +} + +// Sets the private network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_SecurityModule750) SetPrivateNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "setPrivateNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// Sets the public network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_SecurityModule750) SetPublicNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "setPublicNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule750) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "setTags", params, &r.Options, &resp) + return +} + +// Sets the data that will be written to the configuration drive. +func (r Hardware_SecurityModule750) SetUserMetadata(metadata []string) (resp []datatypes.Hardware_Attribute, err error) { + params := []interface{}{ + metadata, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "setUserMetadata", params, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_SecurityModule750) ShutdownPrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "shutdownPrivatePort", nil, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_SecurityModule750) ShutdownPublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "shutdownPublicPort", nil, &r.Options, &resp) + return +} + +// The ability to place bare metal servers in a state where they are powered down, and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_SecurityModule750) SparePool(action *string, newOrder *bool) (resp bool, err error) { + params := []interface{}{ + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "sparePool", params, &r.Options, &resp) + return +} + +// This method will update the root IPMI password on this SoftLayer_Hardware. +func (r Hardware_SecurityModule750) UpdateIpmiPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "updateIpmiPassword", params, &r.Options, &resp) + return +} + +// Validates a collection of partitions for an operating system +func (r Hardware_SecurityModule750) ValidatePartitionsForOperatingSystem(operatingSystem *datatypes.Software_Description, partitions []datatypes.Hardware_Component_Partition) (resp bool, err error) { + params := []interface{}{ + operatingSystem, + partitions, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule750", "validatePartitionsForOperatingSystem", params, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Server data type contains general information relating to a single SoftLayer server. +type Hardware_Server struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareServerService returns an instance of the Hardware_Server SoftLayer service +func GetHardwareServerService(sess *session.Session) Hardware_Server { + return Hardware_Server{Session: sess} +} + +func (r Hardware_Server) Id(id int) Hardware_Server { + r.Options.Id = &id + return r +} + +func (r Hardware_Server) Mask(mask string) Hardware_Server { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Server) Filter(filter string) Hardware_Server { + r.Options.Filter = filter + return r +} + +func (r Hardware_Server) Limit(limit int) Hardware_Server { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Server) Offset(offset int) Hardware_Server { + r.Options.Offset = &offset + return r +} + +// Activates the private network port +func (r Hardware_Server) ActivatePrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "activatePrivatePort", nil, &r.Options, &resp) + return +} + +// Activates the public network port +func (r Hardware_Server) ActivatePublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "activatePublicPort", nil, &r.Options, &resp) + return +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Hardware_Server) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_Server) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// The Rescue Kernel is designed to provide you with the ability to bring a server online in order to troubleshoot system problems that would normally only be resolved by an OS Reload. The correct Rescue Kernel will be selected based upon the currently installed operating system. When the rescue kernel process is initiated, the server will shutdown and reboot on to the public network with the same IP's assigned to the server to allow for remote connections. It will bring your server offline for approximately 10 minutes while the rescue is in progress. The root/administrator password will be the same as what is listed in the portal for the server. +func (r Hardware_Server) BootToRescueLayer(noOsBootEnvironment *string) (resp bool, err error) { + params := []interface{}{ + noOsBootEnvironment, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "bootToRescueLayer", params, &r.Options, &resp) + return +} + +// Captures a Flex Image of the hard disk on the physical machine, based on the capture template parameter. Returns the image template group containing the disk image. +func (r Hardware_Server) CaptureImage(captureTemplate *datatypes.Container_Disk_Image_Capture_Template) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + captureTemplate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "captureImage", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Hardware_Server) CloseAlarm(alarmId *string) (resp bool, err error) { + params := []interface{}{ + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "closeAlarm", params, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_Server) CreateFirmwareUpdateTransaction(ipmi *int, raidController *int, bios *int, harddrive *int) (resp bool, err error) { + params := []interface{}{ + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "createFirmwareUpdateTransaction", params, &r.Options, &resp) + return +} + +// +// +// createObject() enables the creation of servers on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a server, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a server of the specified configuration. +// +// +// To determine when the server is available you can poll the server via [[SoftLayer_Hardware/getObject|getObject]], +// checking the provisionDate property. +// When provisionDate is not null, the server will be ready. Be sure to use the globalIdentifier +// as your initialization parameter. +// +// +// Warning: Servers created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Hardware/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Hardware (type)|SoftLayer_Hardware]] +//
      +//
    • hostname +//
      Hostname for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • processorCoreAmount +//
      The number of logical CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • memoryCapacity +//
      The amount of memory to allocate in gigabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the server.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the server will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the server with.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the server is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the server's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - The highest available zero cost port speed will be used.
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the server.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • networkComponents.redundancyEnabledFlag +//
      Specifies whether or not the server's network components should be in redundancy groups.
        +//
      • Optional
      • +//
      • Type - bool
      • +//
      • Default - false
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. When the redundancyEnabledFlag property is true the server's network components will be in redundancy groups.
      • +//
      +// { +// "networkComponents": [ +// { +// "redundancyEnabledFlag": false +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the server only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a server is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the server.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the server.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • fixedConfigurationPreset.keyName +//
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The fixedConfigurationPreset property is a [[SoftLayer_Product_Package_Preset (type)|fixed configuration preset]] structure. The keyName property must be set to specify preset to use.
      • +//
      • If a fixed configuration preset is used processorCoreAmount, memoryCapacity and hardDrives properties must not be set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "fixedConfigurationPreset": { +// "keyName": "SOME_KEY_NAME" +// } +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the server.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Hardware_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the server. This is primarily useful for providing data to software that may be on the server and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • hardDrives +//
      Hard drive settings for the server
        +//
      • Optional
      • +//
      • Type - SoftLayer_Hardware_Component
      • +//
      • Default - The largest available capacity for a zero cost primary disk will be used.
      • +//
      • Description - The hardDrives property is an array of [[SoftLayer_Hardware_Component (type)|hardware component]] structures. +//
      • Each hard drive must specify the capacity property.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "hardDrives": [ +// { +// "capacity": 500 +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the server upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "processorCoreAmount": 2, +// "memoryCapacity": 2, +// "hourlyBillingFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Hardware.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Hardware/f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5/getObject +// +// +// { +// "accountId": 232298, +// "bareMetalInstanceFlag": null, +// "domain": "example.com", +// "hardwareStatusId": null, +// "hostname": "host1", +// "id": null, +// "serviceProviderId": null, +// "serviceProviderResourceId": null, +// "globalIdentifier": "f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5", +// "hourlyBillingFlag": true, +// "memoryCapacity": 2, +// "operatingSystemReferenceCode": "UBUNTU_LATEST", +// "processorCoreAmount": 2 +// } +// +func (r Hardware_Server) CreateObject(templateObject *datatypes.Hardware_Server) (resp datatypes.Hardware_Server, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) CreatePostSoftwareInstallTransaction(installCodes []string, returnBoolean *bool) (resp bool, err error) { + params := []interface{}{ + installCodes, + returnBoolean, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "createPostSoftwareInstallTransaction", params, &r.Options, &resp) + return +} + +// +// This method will cancel a server effective immediately. For servers billed hourly, the charges will stop immediately after the method returns. +func (r Hardware_Server) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete software component passwords. +func (r Hardware_Server) DeleteSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "deleteSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Edit a server's properties +func (r Hardware_Server) EditObject(templateObject *datatypes.Hardware_Server) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "editObject", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, and notes. +func (r Hardware_Server) EditSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "editSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Download and run remote script from uri on the hardware. +func (r Hardware_Server) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// The '''findByIpAddress''' method finds hardware using its primary public or private IP address. IP addresses that have a secondary subnet tied to the hardware will not return the hardware - alternate means of locating the hardware must be used (see '''Associated Methods'''). If no hardware is found, no errors are generated and no data is returned. +func (r Hardware_Server) FindByIpAddress(ipAddress *string) (resp datatypes.Hardware, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Hardware_Server (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Hardware/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Hardware_Server) GenerateOrderTemplate(templateObject *datatypes.Hardware) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a piece of hardware. +func (r Hardware_Server) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active physical components. +func (r Hardware_Server) GetActiveComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a server's attached network firewall. +func (r Hardware_Server) GetActiveNetworkFirewallBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveNetworkFirewallBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active network monitoring incidents. +func (r Hardware_Server) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieve Transaction currently running for server. +func (r Hardware_Server) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve Any active transaction(s) that are currently running for the server (example: os reload). +func (r Hardware_Server) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// The '''getAlarmHistory''' method retrieves a detailed history for the monitoring alarm. When calling this method, a start and end date for the history to be retrieved must be entered. +func (r Hardware_Server) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetAllPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAllPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. +func (r Hardware_Server) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Hardware_Server) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Hardware_Server) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an antivirus/spyware software component object. +func (r Hardware_Server) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Hardware. +func (r Hardware_Server) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's specific attributes. +func (r Hardware_Server) GetAttributes() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve An object that stores the maximum level for the monitoring query types and response types. +func (r Hardware_Server) GetAvailableMonitoring() (resp []datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAvailableMonitoring", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Hardware. +func (r Hardware_Server) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily total bandwidth usage for the current billing cycle. +func (r Hardware_Server) GetAverageDailyBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAverageDailyBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily private bandwidth usage for the current billing cycle. +func (r Hardware_Server) GetAverageDailyPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAverageDailyPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Hardware_Server) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_Server) GetBackendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +func (r Hardware_Server) GetBackendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getBackendIncomingBandwidth''' method retrieves the amount of incoming private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_Server) GetBackendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's back-end or private network components. +func (r Hardware_Server) GetBackendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getBackendOutgoingBandwidth''' method retrieves the amount of outgoing private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_Server) GetBackendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's backend or private router. +func (r Hardware_Server) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth (measured in GB). +func (r Hardware_Server) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Hardware_Server) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Retrieve a collection of bandwidth data from an individual public or private network tracking object. Data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Hardware_Server) GetBandwidthForDateRange(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBandwidthForDateRange", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single server. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. Use the $draw flag to suppress the generation of the actual binary PNG image. +func (r Hardware_Server) GetBandwidthImage(networkType *string, snapshotRange *string, draw *bool, dateSpecified *datatypes.Time, dateSpecifiedEnd *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + networkType, + snapshotRange, + draw, + dateSpecified, + dateSpecifiedEnd, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's benchmark certifications. +func (r Hardware_Server) GetBenchmarkCertifications() (resp []datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBenchmarkCertifications", nil, &r.Options, &resp) + return +} + +// Retrieve The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. +func (r Hardware_Server) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw private bandwidth usage data for the current billing cycle. +func (r Hardware_Server) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw public bandwidth usage data for the current billing cycle. +func (r Hardware_Server) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a server. +func (r Hardware_Server) GetBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a billing item exists. +func (r Hardware_Server) GetBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if BIOS password should be left as null. +func (r Hardware_Server) GetBiosPasswordNullFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBiosPasswordNullFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the hardware is ineligible for cancellation because it is disconnected. +func (r Hardware_Server) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Status indicating whether or not a piece of hardware has business continuance insurance. +func (r Hardware_Server) GetBusinessContinuanceInsuranceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBusinessContinuanceInsuranceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Child hardware. +func (r Hardware_Server) GetChildrenHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getChildrenHardware", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) GetComponentDetailsXML() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getComponentDetailsXML", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's components. +func (r Hardware_Server) GetComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetContainsSolidStateDrivesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getContainsSolidStateDrivesFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection/server backup software component object. +func (r Hardware_Server) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve A server's control panel. +func (r Hardware_Server) GetControlPanel() (resp datatypes.Software_Component_ControlPanel, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getControlPanel", nil, &r.Options, &resp) + return +} + +// Retrieve The total cost of a server, measured in US Dollars ($USD). +func (r Hardware_Server) GetCost() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCost", nil, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a server, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Hardware_Configuration (type)]]. +func (r Hardware_Server) GetCreateObjectOptions() (resp datatypes.Container_Hardware_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve An object that provides commonly used bandwidth summary components for the current billing cycle. +func (r Hardware_Server) GetCurrentBandwidthSummary() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBandwidthSummary", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with the current benchmark certification result, if such a file exists. If there is no file for this benchmark certification result, calling this method throws an exception. +func (r Hardware_Server) GetCurrentBenchmarkCertificationResultFile() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBenchmarkCertificationResultFile", nil, &r.Options, &resp) + return +} + +// Retrieve The current billable public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetCurrentBillableBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBillableBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Get the billing detail for this instance for the current billing period. This does not include bandwidth usage. +func (r Hardware_Server) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// The '''getCurrentBillingTotal''' method retrieves the total bill amount in US Dollars ($) for the current billing period. In addition to the total bill amount, the billing detail also includes all bandwidth used up to the point the method is called on the piece of hardware. +func (r Hardware_Server) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Hardware_Server) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server has a Customer Installed OS +func (r Hardware_Server) GetCustomerInstalledOperatingSystemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCustomerInstalledOperatingSystemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server is a customer owned device. +func (r Hardware_Server) GetCustomerOwnedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCustomerOwnedFlag", nil, &r.Options, &resp) + return +} + +// The '''getDailyAverage''' method calculates the average daily network traffic used by the selected server. Using the required parameter ''dateTime'' to enter a start and end date, the user retrieves this average, measure in gigabytes (GB) for the specified date range. When entering parameters, only the month, day and year are required - time entries are omitted as this method defaults the time to midnight in order to account for the entire day. +func (r Hardware_Server) GetDailyAverage(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDailyAverage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the datacenter in which a piece of hardware resides. +func (r Hardware_Server) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the datacenter in which a piece of hardware resides. +func (r Hardware_Server) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// Retrieve Number of day(s) a server have been in spare pool. +func (r Hardware_Server) GetDaysInSparePool() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDaysInSparePool", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_Server) GetDownlinkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownlinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_Server) GetDownlinkNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownlinkNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached to a piece of network hardware. +func (r Hardware_Server) GetDownlinkServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownlinkServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_Server) GetDownlinkVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownlinkVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware downstream from a network device. +func (r Hardware_Server) GetDownstreamHardwareBindings() (resp []datatypes.Network_Component_Uplink_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamHardwareBindings", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware downstream from the selected piece of hardware. +func (r Hardware_Server) GetDownstreamNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. +func (r Hardware_Server) GetDownstreamNetworkHardwareWithIncidents() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamNetworkHardwareWithIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached downstream to a piece of network hardware. +func (r Hardware_Server) GetDownstreamServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_Server) GetDownstreamVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The drive controllers contained within a piece of hardware. +func (r Hardware_Server) GetDriveControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDriveControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated EVault network storage service account. +func (r Hardware_Server) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Get the subnets associated with this server that are protectable by a network component firewall. +func (r Hardware_Server) GetFirewallProtectableSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFirewallProtectableSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's firewall services. +func (r Hardware_Server) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Defines the fixed components in a fixed configuration bare metal server. +func (r Hardware_Server) GetFixedConfigurationPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFixedConfigurationPreset", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_Server) GetFrontendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +func (r Hardware_Server) GetFrontendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getFrontendIncomingBandwidth''' method retrieves the amount of incoming public network traffic used by a server between the given start and end date parameters. When entering the ''dateTime'' parameter, only the month, day and year of the start and end dates are required - the time (hour, minute and second) are set to midnight by default and cannot be changed. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_Server) GetFrontendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's front-end or public network components. +func (r Hardware_Server) GetFrontendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getFrontendOutgoingBandwidth''' method retrieves the amount of outgoing public network traffic used by a server between the given start and end date parameters. The ''dateTime'' parameter requires only the day, month and year to be entered - the time (hour, minute and second) are set to midnight be default in order to gather the data for the entire start and end date indicated in the parameter. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_Server) GetFrontendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's frontend or public router. +func (r Hardware_Server) GetFrontendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Hardware_Server) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The hard drives contained within a piece of hardware. +func (r Hardware_Server) GetHardDrives() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardDrives", nil, &r.Options, &resp) + return +} + +// Retrieve a server by searching for the primary IP address. +func (r Hardware_Server) GetHardwareByIpAddress(ipAddress *string) (resp datatypes.Hardware_Server, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The chassis that a piece of hardware is housed in. +func (r Hardware_Server) GetHardwareChassis() (resp datatypes.Hardware_Chassis, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareChassis", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_Server) GetHardwareFunction() (resp datatypes.Hardware_Function, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareFunction", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_Server) GetHardwareFunctionDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareFunctionDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's status. +func (r Hardware_Server) GetHardwareStatus() (resp datatypes.Hardware_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware has Single Root IO VIrtualization (SR-IOV) billing item. +func (r Hardware_Server) GetHasSingleRootVirtualizationBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHasSingleRootVirtualizationBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determine in hardware object has TPM enabled. +func (r Hardware_Server) GetHasTrustedPlatformModuleBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHasTrustedPlatformModuleBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a host IPS software component object. +func (r Hardware_Server) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// The '''getHourlyBandwidth''' method retrieves all bandwidth updates hourly for the specified hardware. Because the potential number of data points can become excessive, the method limits the user to obtain data in 24-hour intervals. The required ''dateTime'' parameter is used as the starting point for the query and will be calculated for the 24-hour period starting with the specified date and time. For example, entering a parameter of +// +// '02/01/2008 0:00' +// +// results in a return of all bandwidth data for the entire day of February 1, 2008, as 0:00 specifies a midnight start date. Please note that the time entered should be completed using a 24-hour clock (military time, astronomical time). +// +// For data spanning more than a single 24-hour period, refer to the getBandwidthData function on the metricTrackingObject for the piece of hardware. +func (r Hardware_Server) GetHourlyBandwidth(mode *string, day *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + mode, + day, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHourlyBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A server's hourly billing status. +func (r Hardware_Server) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the inbound network traffic data for the last 30 days. +func (r Hardware_Server) GetInboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getInboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetInboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getInboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware object has the IBM_CLOUD_READY_NODE_CERTIFIED attribute. +func (r Hardware_Server) GetIsCloudReadyNodeCertified() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getIsCloudReadyNodeCertified", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects from a collection of SoftLayer_Software_Description +func (r Hardware_Server) GetItemPricesFromSoftwareDescriptions(softwareDescriptions []datatypes.Software_Description, includeTranslationsFlag *bool, returnAllPricesFlag *bool) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + softwareDescriptions, + includeTranslationsFlag, + returnAllPricesFlag, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getItemPricesFromSoftwareDescriptions", params, &r.Options, &resp) + return +} + +// Retrieve The last transaction that a server's operating system was loaded. +func (r Hardware_Server) GetLastOperatingSystemReload() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLastOperatingSystemReload", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the last transaction a server performed. +func (r Hardware_Server) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's latest network monitoring incident. +func (r Hardware_Server) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve Where a piece of hardware is located within SoftLayer's location hierarchy. +func (r Hardware_Server) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetLocationPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLocationPathString", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a lockbox account associated with a server. +func (r Hardware_Server) GetLockboxNetworkStorage() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the hardware is a managed resource. +func (r Hardware_Server) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the remote management network component attached with this server. +func (r Hardware_Server) GetManagementNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getManagementNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's memory. +func (r Hardware_Server) GetMemory() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMemory", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of memory a piece of hardware has, measured in gigabytes. +func (r Hardware_Server) GetMemoryCapacity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMemoryCapacity", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's metric tracking object. +func (r Hardware_Server) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_HardwareServer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object id for this server. +func (r Hardware_Server) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Hardware_Server) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the monitoring agents associated with a piece of hardware. +func (r Hardware_Server) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Hardware_Server) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's monitoring robot. +func (r Hardware_Server) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitoring services. +func (r Hardware_Server) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring service flag eligibility status for a piece of hardware. +func (r Hardware_Server) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The service flag status for a piece of hardware. +func (r Hardware_Server) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring notification objects for this hardware. Each object links this hardware instance to a user account that will be notified if monitoring on this hardware object fails +func (r Hardware_Server) GetMonitoringUserNotification() (resp []datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringUserNotification", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's motherboard. +func (r Hardware_Server) GetMotherboard() (resp datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMotherboard", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network cards. +func (r Hardware_Server) GetNetworkCards() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkCards", nil, &r.Options, &resp) + return +} + +// Get the IP addresses associated with this server that are protectable by a network component firewall. Note, this may not return all values for IPv6 subnets for this server. Please use getFirewallProtectableSubnets to get all protectable subnets. +func (r Hardware_Server) GetNetworkComponentFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkComponentFirewallProtectableIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve Returns a hardware's network components. +func (r Hardware_Server) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway member if this device is part of a network gateway. +func (r Hardware_Server) GetNetworkGatewayMember() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkGatewayMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this device is part of a network gateway. +func (r Hardware_Server) GetNetworkGatewayMemberFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkGatewayMemberFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's network management IP address. +func (r Hardware_Server) GetNetworkManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve All servers with failed monitoring that are attached downstream to a piece of hardware. +func (r Hardware_Server) GetNetworkMonitorAttachedDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkMonitorAttachedDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guests that are attached downstream to a hardware that have failed monitoring +func (r Hardware_Server) GetNetworkMonitorAttachedDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkMonitorAttachedDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The status of all of a piece of hardware's network monitoring incidents. +func (r Hardware_Server) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitors. +func (r Hardware_Server) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve The value of a hardware's network status attribute. +func (r Hardware_Server) GetNetworkStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's related network status attribute. +func (r Hardware_Server) GetNetworkStatusAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkStatusAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated network storage service account. +func (r Hardware_Server) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network virtual LANs (VLANs) associated with a piece of hardware's network components. +func (r Hardware_Server) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth for the next billing cycle (measured in GB). +func (r Hardware_Server) GetNextBillingCycleBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNextBillingCycleBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetNotesHistory() (resp []datatypes.Hardware_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNotesHistory", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Server object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware service. You can only retrieve servers from the account that your portal user is assigned to. +func (r Hardware_Server) GetObject() (resp datatypes.Hardware_Server, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An open ticket requesting cancellation of this server, if one exists. +func (r Hardware_Server) GetOpenCancellationTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOpenCancellationTicket", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's operating system. +func (r Hardware_Server) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's operating system software description. +func (r Hardware_Server) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the outbound network traffic data for the last 30 days. +func (r Hardware_Server) GetOutboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOutboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetOutboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOutboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle exceeds the allocation. +func (r Hardware_Server) GetOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_Server) GetPMInfo() (resp []datatypes.Container_RemoteManagement_PmInfo, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPMInfo", nil, &r.Options, &resp) + return +} + +// Retrieve Blade Bay +func (r Hardware_Server) GetParentBay() (resp datatypes.Hardware_Blade, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getParentBay", nil, &r.Options, &resp) + return +} + +// Retrieve Parent Hardware. +func (r Hardware_Server) GetParentHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getParentHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. +func (r Hardware_Server) GetPointOfPresenceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPointOfPresenceLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The power components for a hardware object. +func (r Hardware_Server) GetPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's power supply. +func (r Hardware_Server) GetPowerSupply() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPowerSupply", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary private IP address. +func (r Hardware_Server) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary back-end network component. +func (r Hardware_Server) GetPrimaryBackendNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) GetPrimaryDriveSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryDriveSize", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary public IP address. +func (r Hardware_Server) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary public network component. +func (r Hardware_Server) GetPrimaryNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPrivateBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_Server) GetPrivateBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's private network bandwidth usage. getPrivateBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_Server) GetPrivateBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image +func (r Hardware_Server) GetPrivateBandwidthGraphImage(startTime *string, endTime *string) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve A server's primary private IP address. +func (r Hardware_Server) GetPrivateIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve the private network component attached with this server. +func (r Hardware_Server) GetPrivateNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the hardware only has access to the private network. +func (r Hardware_Server) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the backend VLAN for the primary IP address of the server +func (r Hardware_Server) GetPrivateVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateVlan", nil, &r.Options, &resp) + return +} + +// Retrieve a backend network VLAN by searching for an IP address +func (r Hardware_Server) GetPrivateVlanByIpAddress(ipAddress *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateVlanByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The total number of processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_Server) GetProcessorCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProcessorCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of physical processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_Server) GetProcessorPhysicalCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProcessorPhysicalCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's processors. +func (r Hardware_Server) GetProcessors() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProcessors", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle is projected to exceed the allocation. +func (r Hardware_Server) GetProjectedOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProjectedOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) GetProvisionDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProvisionDate", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_Server) GetPublicBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's public network bandwidth usage. getPublicBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_Server) GetPublicBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. THIS METHOD GENERATES GRAPHS BASED ON THE NEW DATA WAREHOUSE REPOSITORY. +func (r Hardware_Server) GetPublicBandwidthGraphImage(startTime *datatypes.Time, endTime *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve the total number of bytes used by a server over a specified time period via the data warehouse tracking objects for this hardware. +func (r Hardware_Server) GetPublicBandwidthTotal(startTime *int, endTime *int) (resp uint, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicBandwidthTotal", params, &r.Options, &resp) + return +} + +// Retrieve a SoftLayer server's public network component. Some servers are only connected to the private network and may not have a public network component. In that case getPublicNetworkComponent returns a null object. +func (r Hardware_Server) GetPublicNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend VLAN for the primary IP address of the server +func (r Hardware_Server) GetPublicVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicVlan", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend network Vlan by searching the hostname of a server +func (r Hardware_Server) GetPublicVlanByHostname(hostname *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + hostname, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicVlanByHostname", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetRack() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRack", nil, &r.Options, &resp) + return +} + +// Retrieve The RAID controllers contained within a piece of hardware. +func (r Hardware_Server) GetRaidControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRaidControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware object is vSan Ready Node. +func (r Hardware_Server) GetReadyNodeFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getReadyNodeFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this hardware. +func (r Hardware_Server) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The last five commands issued to the server's remote management card. +func (r Hardware_Server) GetRecentRemoteManagementCommands() (resp []datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRecentRemoteManagementCommands", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card. +func (r Hardware_Server) GetRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve User credentials to issue commands and/or interact with the server's remote management card. +func (r Hardware_Server) GetRemoteManagementAccounts() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRemoteManagementAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's associated remote management component. This is normally IPMI. +func (r Hardware_Server) GetRemoteManagementComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRemoteManagementComponent", nil, &r.Options, &resp) + return +} + +// Retrieve User(s) who have access to issue commands and/or interact with the server's remote management card. +func (r Hardware_Server) GetRemoteManagementUsers() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRemoteManagementUsers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetResourceConfigurations() (resp []datatypes.Hardware_Resource_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getResourceConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetResourceGroupMemberReferences() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getResourceGroupMemberReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetResourceGroupRoles() (resp []datatypes.Resource_Group_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getResourceGroupRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this hardware is a member. +func (r Hardware_Server) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve the reverse domain records associated with this server. +func (r Hardware_Server) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's routers. +func (r Hardware_Server) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this hardware corresponds to. +func (r Hardware_Server) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's vulnerability scan requests. +func (r Hardware_Server) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_Server) GetSensorData() (resp []datatypes.Container_RemoteManagement_SensorReading, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSensorData", nil, &r.Options, &resp) + return +} + +// Retrieves the raw data returned from the server's remote management card. For more details of what is returned please refer to the getSensorData method. Along with the raw data, graphs for the cpu and system temperatures and fan speeds are also returned. +func (r Hardware_Server) GetSensorDataWithGraphs() (resp datatypes.Container_RemoteManagement_SensorReadingsWithGraphs, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSensorDataWithGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware components, software, and network components. getServerDetails is an aggregation function that combines the results of [[SoftLayer_Hardware_Server::getComponents]], [[SoftLayer_Hardware_Server::getSoftware]], and [[SoftLayer_Hardware_Server::getNetworkComponents]] in a single container. +func (r Hardware_Server) GetServerDetails() (resp datatypes.Container_Hardware_Server_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerDetails", nil, &r.Options, &resp) + return +} + +// Retrieve the server's fan speeds and displays them using tachometer graphs. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_Server) GetServerFanSpeedGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorSpeed, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerFanSpeedGraphs", nil, &r.Options, &resp) + return +} + +// Retrieves the power state for the server. The server's power status is retrieved from its remote management card. This will return 'on' or 'off'. +func (r Hardware_Server) GetServerPowerState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the server room in which the hardware is located. +func (r Hardware_Server) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerRoom", nil, &r.Options, &resp) + return +} + +// Retrieve the server's temperature and displays them using thermometer graphs. Temperatures retrieved are CPU(s) and system temperatures. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_Server) GetServerTemperatureGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorTemperature, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerTemperatureGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware's service provider. +func (r Hardware_Server) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's installed software. +func (r Hardware_Server) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Determine if hardware object has Software Guard Extension (SGX) enabled. +func (r Hardware_Server) GetSoftwareGuardExtensionEnabled() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSoftwareGuardExtensionEnabled", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a spare pool server. +func (r Hardware_Server) GetSparePoolBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSparePoolBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Hardware_Server) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card used for statistics. +func (r Hardware_Server) GetStatisticsRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getStatisticsRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetStorageNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getStorageNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetTopLevelLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getTopLevelLocation", nil, &r.Options, &resp) + return +} + +// +// This method will query transaction history for a piece of hardware. +func (r Hardware_Server) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction_History, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Retrieve a list of upgradeable items available to this piece of hardware. Currently, getUpgradeItemPrices retrieves upgrades available for a server's memory, hard drives, network port speed, bandwidth allocation and GPUs. +func (r Hardware_Server) GetUpgradeItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUpgradeItemPrices", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade request object, if any. +func (r Hardware_Server) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The network device connected to a piece of hardware. +func (r Hardware_Server) GetUplinkHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUplinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. +func (r Hardware_Server) GetUplinkNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUplinkNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve An array containing a single string of custom user data for a hardware order. Max size is 16 kb. +func (r Hardware_Server) GetUserData() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve A list of users that have access to this computing instance. +func (r Hardware_Server) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUsers", nil, &r.Options, &resp) + return +} + +// This method will return the list of block device template groups that are valid to the host. For instance, it will only retrieve FLEX images. +func (r Hardware_Server) GetValidBlockDeviceTemplateGroups(visibility *string) (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + visibility, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getValidBlockDeviceTemplateGroups", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis for a piece of hardware. +func (r Hardware_Server) GetVirtualChassis() (resp datatypes.Hardware_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualChassis", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis siblings for a piece of hardware. +func (r Hardware_Server) GetVirtualChassisSiblings() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualChassisSiblings", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware server's virtual servers. +func (r Hardware_Server) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtual host record. +func (r Hardware_Server) GetVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualHost", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's virtual software licenses. +func (r Hardware_Server) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the bandwidth allotment to which a piece of hardware belongs. +func (r Hardware_Server) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_Server) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_Server) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtualization platform software. +func (r Hardware_Server) GetVirtualizationPlatform() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualizationPlatform", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates available for a server from the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_Server) GetWindowsUpdateAvailableUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getWindowsUpdateAvailableUpdates", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates installed on a server as reported by the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_Server) GetWindowsUpdateInstalledUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getWindowsUpdateInstalledUpdates", nil, &r.Options, &resp) + return +} + +// This method returns an update status record for this server. That record will specify if the server is missing updates, or has updates that must be reinstalled or require a reboot to go into affect. +func (r Hardware_Server) GetWindowsUpdateStatus() (resp datatypes.Container_Utility_Microsoft_Windows_UpdateServices_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getWindowsUpdateStatus", nil, &r.Options, &resp) + return +} + +// The '''importVirtualHost''' method attempts to import the host record for the virtualization platform running on a server. +func (r Hardware_Server) ImportVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "importVirtualHost", nil, &r.Options, &resp) + return +} + +// Idera Bare Metal Server Restore is a backup agent designed specifically for making full system restores made with Idera Server Backup. +func (r Hardware_Server) InitiateIderaBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "initiateIderaBareMetalRestore", nil, &r.Options, &resp) + return +} + +// R1Soft Bare Metal Server Restore is an R1Soft disk agent designed specifically for making full system restores made with R1Soft CDP Server backup. +func (r Hardware_Server) InitiateR1SoftBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "initiateR1SoftBareMetalRestore", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_Server) IsBackendPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "isBackendPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_Server) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "isPingable", nil, &r.Options, &resp) + return +} + +// Determine if the server runs any version of the Microsoft Windows operating systems. Return ''true'' if it does and ''false if otherwise. +func (r Hardware_Server) IsWindowsServer() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "isWindowsServer", nil, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_Server) MassFirmwareUpdate(hardwareIds []int, ipmi *bool, raidController *bool, bios *bool, harddrive *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "massFirmwareUpdate", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_Server) MassReloadOperatingSystem(hardwareIds []string, token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + hardwareIds, + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "massReloadOperatingSystem", params, &r.Options, &resp) + return +} + +// The ability to place multiple bare metal servers in a state where they are powered down and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_Server) MassSparePool(hardwareIds []string, action *string, newOrder *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "massSparePool", params, &r.Options, &resp) + return +} + +// Issues a ping command to the server and returns the ping response. +func (r Hardware_Server) Ping() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "ping", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) PopulateServerRam(ramSerialString *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + ramSerialString, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "populateServerRam", params, &r.Options, &resp) + return +} + +// Power off then power on the server via powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. This should only be used as a last resort. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "powerCycle", nil, &r.Options, &resp) + return +} + +// This method will power off the server via the server's remote management card. +func (r Hardware_Server) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "powerOff", nil, &r.Options, &resp) + return +} + +// Power on server via its remote management card. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "powerOn", nil, &r.Options, &resp) + return +} + +// Attempts to reboot the server by issuing a reset (soft reboot) command to the server's remote management card. If the reset (soft reboot) attempt is unsuccessful, a power cycle command will be issued via the powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "rebootDefault", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a cycle command to the server's remote management card. This is equivalent to pressing the 'Reset' button on the server. This command is issued immediately and will not wait for processes to shutdown. After this command is issued, the server may take a few moments to boot up as server may run system disks checks. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "rebootHard", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a reset command to the server's remote management card. This is a graceful reboot. The servers will allow all process to shutdown gracefully before rebooting. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "rebootSoft", nil, &r.Options, &resp) + return +} + +// Reloads current operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_Server) ReloadCurrentOperatingSystemConfiguration(token *string) (resp string, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "reloadCurrentOperatingSystemConfiguration", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_Server) ReloadOperatingSystem(token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "reloadOperatingSystem", params, &r.Options, &resp) + return +} + +// This method is used to remove access to s SoftLayer_Network_Storage volumes that supports host- or network-level access control. +func (r Hardware_Server) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_Server) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// You can launch a new Passmark hardware test by selecting from your server list. It will bring your server offline for approximately 20 minutes while the testing is in progress, and will publish a certificate with the results to your hardware details page. +// +// While the hard drives are tested for the initial deployment, the Passmark Certificate utility will not test the hard drives on your live server. This is to ensure that no data is overwritten. If you would like to test the server's hard drives, you can have the full Passmark suite installed to your server free of charge through a new Support ticket. +// +// While the test itself does not overwrite any data on the server, it is recommended that you make full off-server backups of all data prior to launching the test. The Passmark hardware test is designed to force any latent hardware issues to the surface, so hardware failure is possible. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_Server) RunPassmarkCertificationBenchmark() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "runPassmarkCertificationBenchmark", nil, &r.Options, &resp) + return +} + +// Changes the password that we have stored in our database for a servers' Operating System +func (r Hardware_Server) SetOperatingSystemPassword(newPassword *string) (resp bool, err error) { + params := []interface{}{ + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setOperatingSystemPassword", params, &r.Options, &resp) + return +} + +// Sets the private network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_Server) SetPrivateNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setPrivateNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// Sets the public network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_Server) SetPublicNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setPublicNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setTags", params, &r.Options, &resp) + return +} + +// Sets the data that will be written to the configuration drive. +func (r Hardware_Server) SetUserMetadata(metadata []string) (resp []datatypes.Hardware_Attribute, err error) { + params := []interface{}{ + metadata, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setUserMetadata", params, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_Server) ShutdownPrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "shutdownPrivatePort", nil, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_Server) ShutdownPublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "shutdownPublicPort", nil, &r.Options, &resp) + return +} + +// The ability to place bare metal servers in a state where they are powered down, and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_Server) SparePool(action *string, newOrder *bool) (resp bool, err error) { + params := []interface{}{ + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "sparePool", params, &r.Options, &resp) + return +} + +// This method will update the root IPMI password on this SoftLayer_Hardware. +func (r Hardware_Server) UpdateIpmiPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "updateIpmiPassword", params, &r.Options, &resp) + return +} + +// Validates a collection of partitions for an operating system +func (r Hardware_Server) ValidatePartitionsForOperatingSystem(operatingSystem *datatypes.Software_Description, partitions []datatypes.Hardware_Component_Partition) (resp bool, err error) { + params := []interface{}{ + operatingSystem, + partitions, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "validatePartitionsForOperatingSystem", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/layout.go b/vendor/github.com/softlayer/softlayer-go/services/layout.go new file mode 100644 index 000000000000..1446d32b8744 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/layout.go @@ -0,0 +1,512 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Layout_Container contains definitions for default page layouts +type Layout_Container struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutContainerService returns an instance of the Layout_Container SoftLayer service +func GetLayoutContainerService(sess *session.Session) Layout_Container { + return Layout_Container{Session: sess} +} + +func (r Layout_Container) Id(id int) Layout_Container { + r.Options.Id = &id + return r +} + +func (r Layout_Container) Mask(mask string) Layout_Container { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Container) Filter(filter string) Layout_Container { + r.Options.Filter = filter + return r +} + +func (r Layout_Container) Limit(limit int) Layout_Container { + r.Options.Limit = &limit + return r +} + +func (r Layout_Container) Offset(offset int) Layout_Container { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all active layout containers that can be customized. +func (r Layout_Container) GetAllObjects() (resp []datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Container", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The type of the layout container object +func (r Layout_Container) GetLayoutContainerType() (resp datatypes.Layout_Container_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Container", "getLayoutContainerType", nil, &r.Options, &resp) + return +} + +// Retrieve The layout items assigned to this layout container +func (r Layout_Container) GetLayoutItems() (resp []datatypes.Layout_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Container", "getLayoutItems", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Container) GetObject() (resp datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Container", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Layout_Item contains definitions for default layout items +type Layout_Item struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutItemService returns an instance of the Layout_Item SoftLayer service +func GetLayoutItemService(sess *session.Session) Layout_Item { + return Layout_Item{Session: sess} +} + +func (r Layout_Item) Id(id int) Layout_Item { + r.Options.Id = &id + return r +} + +func (r Layout_Item) Mask(mask string) Layout_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Item) Filter(filter string) Layout_Item { + r.Options.Filter = filter + return r +} + +func (r Layout_Item) Limit(limit int) Layout_Item { + r.Options.Limit = &limit + return r +} + +func (r Layout_Item) Offset(offset int) Layout_Item { + r.Options.Offset = &offset + return r +} + +// Retrieve The layout preferences assigned to this layout item +func (r Layout_Item) GetLayoutItemPreferences() (resp []datatypes.Layout_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Item", "getLayoutItemPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve The type of the layout item object +func (r Layout_Item) GetLayoutItemType() (resp datatypes.Layout_Item_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Item", "getLayoutItemType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Item) GetObject() (resp datatypes.Layout_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Item", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Layout_Profile contains the definition of the layout profile +type Layout_Profile struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutProfileService returns an instance of the Layout_Profile SoftLayer service +func GetLayoutProfileService(sess *session.Session) Layout_Profile { + return Layout_Profile{Session: sess} +} + +func (r Layout_Profile) Id(id int) Layout_Profile { + r.Options.Id = &id + return r +} + +func (r Layout_Profile) Mask(mask string) Layout_Profile { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Profile) Filter(filter string) Layout_Profile { + r.Options.Filter = filter + return r +} + +func (r Layout_Profile) Limit(limit int) Layout_Profile { + r.Options.Limit = &limit + return r +} + +func (r Layout_Profile) Offset(offset int) Layout_Profile { + r.Options.Offset = &offset + return r +} + +// This method creates a new layout profile object. +func (r Layout_Profile) CreateObject(templateObject *datatypes.Layout_Profile) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "createObject", params, &r.Options, &resp) + return +} + +// This method deletes an existing layout profile and associated custom preferences +func (r Layout_Profile) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method edits an existing layout profile object by passing in a modified instance of the object. +func (r Layout_Profile) EditObject(templateObject *datatypes.Layout_Profile) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile) GetLayoutContainers() (resp []datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "getLayoutContainers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile) GetLayoutPreferences() (resp []datatypes.Layout_Profile_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "getLayoutPreferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile) GetObject() (resp datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "getObject", nil, &r.Options, &resp) + return +} + +// This method modifies an existing associated [[SoftLayer_Layout_Profile_Preference]] object. If the preference object being modified is a default value object, a new record is created to override the default value. +// +// Only preferences that are assigned to a profile may be updated. Attempts to update a non-existent preference object will result in an exception being thrown. +func (r Layout_Profile) ModifyPreference(templateObject *datatypes.Layout_Profile_Preference) (resp datatypes.Layout_Profile_Preference, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "modifyPreference", params, &r.Options, &resp) + return +} + +// Using this method, multiple [[SoftLayer_Layout_Profile_Preference]] objects may be updated at once. +// +// Refer to [[SoftLayer_Layout_Profile::modifyPreference()]] for more information. +func (r Layout_Profile) ModifyPreferences(layoutPreferenceObjects []datatypes.Layout_Profile_Preference) (resp []datatypes.Layout_Profile_Preference, err error) { + params := []interface{}{ + layoutPreferenceObjects, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "modifyPreferences", params, &r.Options, &resp) + return +} + +// no documentation yet +type Layout_Profile_Containers struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutProfileContainersService returns an instance of the Layout_Profile_Containers SoftLayer service +func GetLayoutProfileContainersService(sess *session.Session) Layout_Profile_Containers { + return Layout_Profile_Containers{Session: sess} +} + +func (r Layout_Profile_Containers) Id(id int) Layout_Profile_Containers { + r.Options.Id = &id + return r +} + +func (r Layout_Profile_Containers) Mask(mask string) Layout_Profile_Containers { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Profile_Containers) Filter(filter string) Layout_Profile_Containers { + r.Options.Filter = filter + return r +} + +func (r Layout_Profile_Containers) Limit(limit int) Layout_Profile_Containers { + r.Options.Limit = &limit + return r +} + +func (r Layout_Profile_Containers) Offset(offset int) Layout_Profile_Containers { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Layout_Profile_Containers) CreateObject(templateObject *datatypes.Layout_Profile_Containers) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile_Containers) EditObject(templateObject *datatypes.Layout_Profile_Containers) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The container to be contained +func (r Layout_Profile_Containers) GetLayoutContainerType() (resp datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "getLayoutContainerType", nil, &r.Options, &resp) + return +} + +// Retrieve The profile containing this container +func (r Layout_Profile_Containers) GetLayoutProfile() (resp datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "getLayoutProfile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile_Containers) GetObject() (resp datatypes.Layout_Profile_Containers, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Layout_Profile_Customer struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutProfileCustomerService returns an instance of the Layout_Profile_Customer SoftLayer service +func GetLayoutProfileCustomerService(sess *session.Session) Layout_Profile_Customer { + return Layout_Profile_Customer{Session: sess} +} + +func (r Layout_Profile_Customer) Id(id int) Layout_Profile_Customer { + r.Options.Id = &id + return r +} + +func (r Layout_Profile_Customer) Mask(mask string) Layout_Profile_Customer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Profile_Customer) Filter(filter string) Layout_Profile_Customer { + r.Options.Filter = filter + return r +} + +func (r Layout_Profile_Customer) Limit(limit int) Layout_Profile_Customer { + r.Options.Limit = &limit + return r +} + +func (r Layout_Profile_Customer) Offset(offset int) Layout_Profile_Customer { + r.Options.Offset = &offset + return r +} + +// This method creates a new layout profile object. +func (r Layout_Profile_Customer) CreateObject(templateObject *datatypes.Layout_Profile) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "createObject", params, &r.Options, &resp) + return +} + +// This method deletes an existing layout profile and associated custom preferences +func (r Layout_Profile_Customer) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method edits an existing layout profile object by passing in a modified instance of the object. +func (r Layout_Profile_Customer) EditObject(templateObject *datatypes.Layout_Profile) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Customer) GetLayoutContainers() (resp []datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "getLayoutContainers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Customer) GetLayoutPreferences() (resp []datatypes.Layout_Profile_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "getLayoutPreferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile_Customer) GetObject() (resp datatypes.Layout_Profile_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Customer) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "getUserRecord", nil, &r.Options, &resp) + return +} + +// This method modifies an existing associated [[SoftLayer_Layout_Profile_Preference]] object. If the preference object being modified is a default value object, a new record is created to override the default value. +// +// Only preferences that are assigned to a profile may be updated. Attempts to update a non-existent preference object will result in an exception being thrown. +func (r Layout_Profile_Customer) ModifyPreference(templateObject *datatypes.Layout_Profile_Preference) (resp datatypes.Layout_Profile_Preference, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "modifyPreference", params, &r.Options, &resp) + return +} + +// Using this method, multiple [[SoftLayer_Layout_Profile_Preference]] objects may be updated at once. +// +// Refer to [[SoftLayer_Layout_Profile::modifyPreference()]] for more information. +func (r Layout_Profile_Customer) ModifyPreferences(layoutPreferenceObjects []datatypes.Layout_Profile_Preference) (resp []datatypes.Layout_Profile_Preference, err error) { + params := []interface{}{ + layoutPreferenceObjects, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "modifyPreferences", params, &r.Options, &resp) + return +} + +// The SoftLayer_Layout_Profile_Preference contains definitions for layout preferences +type Layout_Profile_Preference struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutProfilePreferenceService returns an instance of the Layout_Profile_Preference SoftLayer service +func GetLayoutProfilePreferenceService(sess *session.Session) Layout_Profile_Preference { + return Layout_Profile_Preference{Session: sess} +} + +func (r Layout_Profile_Preference) Id(id int) Layout_Profile_Preference { + r.Options.Id = &id + return r +} + +func (r Layout_Profile_Preference) Mask(mask string) Layout_Profile_Preference { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Profile_Preference) Filter(filter string) Layout_Profile_Preference { + r.Options.Filter = filter + return r +} + +func (r Layout_Profile_Preference) Limit(limit int) Layout_Profile_Preference { + r.Options.Limit = &limit + return r +} + +func (r Layout_Profile_Preference) Offset(offset int) Layout_Profile_Preference { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Layout_Profile_Preference) GetLayoutContainer() (resp datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getLayoutContainer", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Preference) GetLayoutItem() (resp datatypes.Layout_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getLayoutItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Preference) GetLayoutPreference() (resp datatypes.Layout_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getLayoutPreference", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Preference) GetLayoutProfile() (resp datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getLayoutProfile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile_Preference) GetObject() (resp datatypes.Layout_Profile_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/locale.go b/vendor/github.com/softlayer/softlayer-go/services/locale.go new file mode 100644 index 000000000000..5a1b55b0fb58 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/locale.go @@ -0,0 +1,225 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Locale struct { + Session *session.Session + Options sl.Options +} + +// GetLocaleService returns an instance of the Locale SoftLayer service +func GetLocaleService(sess *session.Session) Locale { + return Locale{Session: sess} +} + +func (r Locale) Id(id int) Locale { + r.Options.Id = &id + return r +} + +func (r Locale) Mask(mask string) Locale { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Locale) Filter(filter string) Locale { + r.Options.Filter = filter + return r +} + +func (r Locale) Limit(limit int) Locale { + r.Options.Limit = &limit + return r +} + +func (r Locale) Offset(offset int) Locale { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Locale) GetClosestToLanguageTag(languageTag *string) (resp datatypes.Locale, err error) { + params := []interface{}{ + languageTag, + } + err = r.Session.DoRequest("SoftLayer_Locale", "getClosestToLanguageTag", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Locale) GetObject() (resp datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_Locale", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Locale_Country struct { + Session *session.Session + Options sl.Options +} + +// GetLocaleCountryService returns an instance of the Locale_Country SoftLayer service +func GetLocaleCountryService(sess *session.Session) Locale_Country { + return Locale_Country{Session: sess} +} + +func (r Locale_Country) Id(id int) Locale_Country { + r.Options.Id = &id + return r +} + +func (r Locale_Country) Mask(mask string) Locale_Country { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Locale_Country) Filter(filter string) Locale_Country { + r.Options.Filter = filter + return r +} + +func (r Locale_Country) Limit(limit int) Locale_Country { + r.Options.Limit = &limit + return r +} + +func (r Locale_Country) Offset(offset int) Locale_Country { + r.Options.Offset = &offset + return r +} + +// This method is to get the collection of VAT country codes and VAT ID Regexes. +func (r Locale_Country) GetAllVatCountryCodesAndVatIdRegexes() (resp []datatypes.Container_Collection_Locale_VatCountryCodeAndFormat, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getAllVatCountryCodesAndVatIdRegexes", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve a list of countries and locale information available to the current user. +func (r Locale_Country) GetAvailableCountries() (resp []datatypes.Locale_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getAvailableCountries", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve a list of countries and locale information such as country code and state/provinces. +func (r Locale_Country) GetCountries() (resp []datatypes.Locale_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getCountries", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Locale_Country) GetCountriesAndStates(usFirstFlag *bool) (resp []datatypes.Container_Collection_Locale_CountryCode, err error) { + params := []interface{}{ + usFirstFlag, + } + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getCountriesAndStates", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Locale_Country) GetObject() (resp datatypes.Locale_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve States that belong to this country. +func (r Locale_Country) GetStates() (resp []datatypes.Locale_StateProvince, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getStates", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Locale_Country) IsEuropeanUnionCountry(iso2CountryCode *string) (resp bool, err error) { + params := []interface{}{ + iso2CountryCode, + } + err = r.Session.DoRequest("SoftLayer_Locale_Country", "isEuropeanUnionCountry", params, &r.Options, &resp) + return +} + +// Each User is assigned a timezone allowing for a precise local timestamp. +type Locale_Timezone struct { + Session *session.Session + Options sl.Options +} + +// GetLocaleTimezoneService returns an instance of the Locale_Timezone SoftLayer service +func GetLocaleTimezoneService(sess *session.Session) Locale_Timezone { + return Locale_Timezone{Session: sess} +} + +func (r Locale_Timezone) Id(id int) Locale_Timezone { + r.Options.Id = &id + return r +} + +func (r Locale_Timezone) Mask(mask string) Locale_Timezone { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Locale_Timezone) Filter(filter string) Locale_Timezone { + r.Options.Filter = filter + return r +} + +func (r Locale_Timezone) Limit(limit int) Locale_Timezone { + r.Options.Limit = &limit + return r +} + +func (r Locale_Timezone) Offset(offset int) Locale_Timezone { + r.Options.Offset = &offset + return r +} + +// Retrieve all timezone objects. +func (r Locale_Timezone) GetAllObjects() (resp []datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Timezone", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Locale_Timezone object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Locale_Timezone service. +func (r Locale_Timezone) GetObject() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Timezone", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/location.go b/vendor/github.com/softlayer/softlayer-go/services/location.go new file mode 100644 index 000000000000..00bb82fba132 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/location.go @@ -0,0 +1,884 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// Every piece of hardware and network connection owned by SoftLayer is tracked physically by location and stored in the SoftLayer_Location data type. SoftLayer locations exist in parent/child relationships, a convenient way to track equipment from it's city, datacenter, server room, rack, then slot. Network backbones are tied to datacenters only, not to a room, rack, or slot. +type Location struct { + Session *session.Session + Options sl.Options +} + +// GetLocationService returns an instance of the Location SoftLayer service +func GetLocationService(sess *session.Session) Location { + return Location{Session: sess} +} + +func (r Location) Id(id int) Location { + r.Options.Id = &id + return r +} + +func (r Location) Mask(mask string) Location { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location) Filter(filter string) Location { + r.Options.Filter = filter + return r +} + +func (r Location) Limit(limit int) Location { + r.Options.Limit = &limit + return r +} + +func (r Location) Offset(offset int) Location { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Location) GetActivePresaleEvents() (resp []datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getActivePresaleEvents", nil, &r.Options, &resp) + return +} + +// Object Storage is only available in select datacenters. This method will return all the datacenters where object storage is available. +func (r Location) GetAvailableObjectStorageDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getAvailableObjectStorageDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetBackboneDependents() (resp []datatypes.Network_Backbone_Location_Dependent, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getBackboneDependents", nil, &r.Options, &resp) + return +} + +// Retrieve all datacenter locations. SoftLayer's datacenters exist in various cities and each contain one or more server rooms which house network and server infrastructure. +func (r Location) GetDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getDatacenters", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location) GetDatacentersWithVirtualImageStoreServiceResourceRecord() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getDatacentersWithVirtualImageStoreServiceResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating whether or not the datacenter/location is EU compliant. +func (r Location) GetEuCompliantFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getEuCompliantFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more groups. This will show which groups to which a location belongs. +func (r Location) GetGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetHardwareFirewalls() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getHardwareFirewalls", nil, &r.Options, &resp) + return +} + +// Retrieve A location's physical address. +func (r Location) GetLocationAddress() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getLocationAddress", nil, &r.Options, &resp) + return +} + +// Retrieve A location's Dedicated Rack member +func (r Location) GetLocationReservationMember() (resp datatypes.Location_Reservation_Rack_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getLocationReservationMember", nil, &r.Options, &resp) + return +} + +// Retrieve The current locations status. +func (r Location) GetLocationStatus() (resp datatypes.Location_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getLocationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetNetworkConfigurationAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getNetworkConfigurationAttribute", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location) GetObject() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of users online using SoftLayer's PPTP VPN service for a location. +func (r Location) GetOnlinePptpVpnUserCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getOnlinePptpVpnUserCount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of users online using SoftLayer's SSL VPN service for a location. +func (r Location) GetOnlineSslVpnUserCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getOnlineSslVpnUserCount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getPathString", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more Price Groups. This will show which groups to which a location belongs. +func (r Location) GetPriceGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getPriceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more regions. This will show which regions to which a location belongs. +func (r Location) GetRegions() (resp []datatypes.Location_Region, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getRegions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetTimezone() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getTimezone", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 Bandwidth Pooling Group. This will show which group to which a location belongs. +func (r Location) GetVdrGroup() (resp datatypes.Location_Group_Location_CrossReference, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getVdrGroup", nil, &r.Options, &resp) + return +} + +// Retrieve all datacenter locations. SoftLayer's datacenters exist in various cities and each contain one or more server rooms which house network and server infrastructure. +func (r Location) GetViewableDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getViewableDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve all viewable pop and datacenter locations. +func (r Location) GetViewablePopsAndDataCenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getViewablePopsAndDataCenters", nil, &r.Options, &resp) + return +} + +// Retrieve all viewable network locations. +func (r Location) GetViewablepointOfPresence() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getViewablepointOfPresence", nil, &r.Options, &resp) + return +} + +// Retrieve all point of presence locations. +func (r Location) GetpointOfPresence() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getpointOfPresence", nil, &r.Options, &resp) + return +} + +// SoftLayer_Location_Datacenter extends the [[SoftLayer_Location]] data type to include datacenter-specific properties. +type Location_Datacenter struct { + Session *session.Session + Options sl.Options +} + +// GetLocationDatacenterService returns an instance of the Location_Datacenter SoftLayer service +func GetLocationDatacenterService(sess *session.Session) Location_Datacenter { + return Location_Datacenter{Session: sess} +} + +func (r Location_Datacenter) Id(id int) Location_Datacenter { + r.Options.Id = &id + return r +} + +func (r Location_Datacenter) Mask(mask string) Location_Datacenter { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Datacenter) Filter(filter string) Location_Datacenter { + r.Options.Filter = filter + return r +} + +func (r Location_Datacenter) Limit(limit int) Location_Datacenter { + r.Options.Limit = &limit + return r +} + +func (r Location_Datacenter) Offset(offset int) Location_Datacenter { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Location_Datacenter) GetActiveItemPresaleEvents() (resp []datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getActiveItemPresaleEvents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetActivePresaleEvents() (resp []datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getActivePresaleEvents", nil, &r.Options, &resp) + return +} + +// Object Storage is only available in select datacenters. This method will return all the datacenters where object storage is available. +func (r Location_Datacenter) GetAvailableObjectStorageDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getAvailableObjectStorageDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetBackboneDependents() (resp []datatypes.Network_Backbone_Location_Dependent, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getBackboneDependents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetBackendHardwareRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getBackendHardwareRouters", nil, &r.Options, &resp) + return +} + +// Retrieve Subnets which are directly bound to one or more routers in a given datacenter, and currently allow routing. +func (r Location_Datacenter) GetBoundSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getBoundSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve This references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. +func (r Location_Datacenter) GetBrandCountryRestrictions() (resp []datatypes.Brand_Restriction_Location_CustomerCountry, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getBrandCountryRestrictions", nil, &r.Options, &resp) + return +} + +// Retrieve all datacenter locations. SoftLayer's datacenters exist in various cities and each contain one or more server rooms which house network and server infrastructure. +func (r Location_Datacenter) GetDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getDatacenters", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Datacenter) GetDatacentersWithVirtualImageStoreServiceResourceRecord() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getDatacentersWithVirtualImageStoreServiceResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating whether or not the datacenter/location is EU compliant. +func (r Location_Datacenter) GetEuCompliantFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getEuCompliantFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetFrontendHardwareRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getFrontendHardwareRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more groups. This will show which groups to which a location belongs. +func (r Location_Datacenter) GetGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetHardwareFirewalls() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getHardwareFirewalls", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetHardwareRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getHardwareRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A location's physical address. +func (r Location_Datacenter) GetLocationAddress() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getLocationAddress", nil, &r.Options, &resp) + return +} + +// Retrieve A location's Dedicated Rack member +func (r Location_Datacenter) GetLocationReservationMember() (resp datatypes.Location_Reservation_Rack_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getLocationReservationMember", nil, &r.Options, &resp) + return +} + +// Retrieve The current locations status. +func (r Location_Datacenter) GetLocationStatus() (resp datatypes.Location_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getLocationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetNetworkConfigurationAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getNetworkConfigurationAttribute", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Datacenter) GetObject() (resp datatypes.Location_Datacenter, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of users online using SoftLayer's PPTP VPN service for a location. +func (r Location_Datacenter) GetOnlinePptpVpnUserCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getOnlinePptpVpnUserCount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of users online using SoftLayer's SSL VPN service for a location. +func (r Location_Datacenter) GetOnlineSslVpnUserCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getOnlineSslVpnUserCount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getPathString", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetPresaleEvents() (resp []datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getPresaleEvents", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more Price Groups. This will show which groups to which a location belongs. +func (r Location_Datacenter) GetPriceGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getPriceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The regional group this datacenter belongs to. +func (r Location_Datacenter) GetRegionalGroup() (resp datatypes.Location_Group_Regional, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getRegionalGroup", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more regions. This will show which regions to which a location belongs. +func (r Location_Datacenter) GetRegions() (resp []datatypes.Location_Region, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getRegions", nil, &r.Options, &resp) + return +} + +// Retrieve Retrieve all subnets that are eligible to be routed; those which the account has permission to associate with a vlan. +func (r Location_Datacenter) GetRoutableBoundSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getRoutableBoundSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a SoftLayer datacenter's last 48 hours of network activity. Statistics graphs show traffic outbound from a datacenter on top and inbound traffic on the bottom followed by a legend of the network services tracked in the graph. getStatisticsGraphImage returns a PNG image of variable width and height depending on the number of services reported in the image. +func (r Location_Datacenter) GetStatisticsGraphImage() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getStatisticsGraphImage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetTimezone() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getTimezone", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 Bandwidth Pooling Group. This will show which group to which a location belongs. +func (r Location_Datacenter) GetVdrGroup() (resp datatypes.Location_Group_Location_CrossReference, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getVdrGroup", nil, &r.Options, &resp) + return +} + +// Retrieve all datacenter locations. SoftLayer's datacenters exist in various cities and each contain one or more server rooms which house network and server infrastructure. +func (r Location_Datacenter) GetViewableDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getViewableDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve all viewable pop and datacenter locations. +func (r Location_Datacenter) GetViewablePopsAndDataCenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getViewablePopsAndDataCenters", nil, &r.Options, &resp) + return +} + +// Retrieve all viewable network locations. +func (r Location_Datacenter) GetViewablepointOfPresence() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getViewablepointOfPresence", nil, &r.Options, &resp) + return +} + +// Retrieve all point of presence locations. +func (r Location_Datacenter) GetpointOfPresence() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getpointOfPresence", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Group struct { + Session *session.Session + Options sl.Options +} + +// GetLocationGroupService returns an instance of the Location_Group SoftLayer service +func GetLocationGroupService(sess *session.Session) Location_Group { + return Location_Group{Session: sess} +} + +func (r Location_Group) Id(id int) Location_Group { + r.Options.Id = &id + return r +} + +func (r Location_Group) Mask(mask string) Location_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Group) Filter(filter string) Location_Group { + r.Options.Filter = filter + return r +} + +func (r Location_Group) Limit(limit int) Location_Group { + r.Options.Limit = &limit + return r +} + +func (r Location_Group) Offset(offset int) Location_Group { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Location_Group) GetAllObjects() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The type for this location group. +func (r Location_Group) GetLocationGroupType() (resp datatypes.Location_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group", "getLocationGroupType", nil, &r.Options, &resp) + return +} + +// Retrieve The locations in a group. +func (r Location_Group) GetLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group", "getLocations", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Group) GetObject() (resp datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Group_Pricing struct { + Session *session.Session + Options sl.Options +} + +// GetLocationGroupPricingService returns an instance of the Location_Group_Pricing SoftLayer service +func GetLocationGroupPricingService(sess *session.Session) Location_Group_Pricing { + return Location_Group_Pricing{Session: sess} +} + +func (r Location_Group_Pricing) Id(id int) Location_Group_Pricing { + r.Options.Id = &id + return r +} + +func (r Location_Group_Pricing) Mask(mask string) Location_Group_Pricing { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Group_Pricing) Filter(filter string) Location_Group_Pricing { + r.Options.Filter = filter + return r +} + +func (r Location_Group_Pricing) Limit(limit int) Location_Group_Pricing { + r.Options.Limit = &limit + return r +} + +func (r Location_Group_Pricing) Offset(offset int) Location_Group_Pricing { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Location_Group_Pricing) GetAllObjects() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The type for this location group. +func (r Location_Group_Pricing) GetLocationGroupType() (resp datatypes.Location_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getLocationGroupType", nil, &r.Options, &resp) + return +} + +// Retrieve The locations in a group. +func (r Location_Group_Pricing) GetLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getLocations", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Group_Pricing) GetObject() (resp datatypes.Location_Group_Pricing, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The prices that this pricing location group limits. All of these prices will only be available in the locations defined by this pricing location group. +func (r Location_Group_Pricing) GetPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getPrices", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Group_Regional struct { + Session *session.Session + Options sl.Options +} + +// GetLocationGroupRegionalService returns an instance of the Location_Group_Regional SoftLayer service +func GetLocationGroupRegionalService(sess *session.Session) Location_Group_Regional { + return Location_Group_Regional{Session: sess} +} + +func (r Location_Group_Regional) Id(id int) Location_Group_Regional { + r.Options.Id = &id + return r +} + +func (r Location_Group_Regional) Mask(mask string) Location_Group_Regional { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Group_Regional) Filter(filter string) Location_Group_Regional { + r.Options.Filter = filter + return r +} + +func (r Location_Group_Regional) Limit(limit int) Location_Group_Regional { + r.Options.Limit = &limit + return r +} + +func (r Location_Group_Regional) Offset(offset int) Location_Group_Regional { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Location_Group_Regional) GetAllObjects() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenters in a group. +func (r Location_Group_Regional) GetDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve The type for this location group. +func (r Location_Group_Regional) GetLocationGroupType() (resp datatypes.Location_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getLocationGroupType", nil, &r.Options, &resp) + return +} + +// Retrieve The locations in a group. +func (r Location_Group_Regional) GetLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getLocations", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Group_Regional) GetObject() (resp datatypes.Location_Group_Regional, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The preferred datacenters of a group. +func (r Location_Group_Regional) GetPreferredDatacenter() (resp datatypes.Location_Datacenter, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getPreferredDatacenter", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Reservation struct { + Session *session.Session + Options sl.Options +} + +// GetLocationReservationService returns an instance of the Location_Reservation SoftLayer service +func GetLocationReservationService(sess *session.Session) Location_Reservation { + return Location_Reservation{Session: sess} +} + +func (r Location_Reservation) Id(id int) Location_Reservation { + r.Options.Id = &id + return r +} + +func (r Location_Reservation) Mask(mask string) Location_Reservation { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Reservation) Filter(filter string) Location_Reservation { + r.Options.Filter = filter + return r +} + +func (r Location_Reservation) Limit(limit int) Location_Reservation { + r.Options.Limit = &limit + return r +} + +func (r Location_Reservation) Offset(offset int) Location_Reservation { + r.Options.Offset = &offset + return r +} + +// Retrieve The account that a billing item belongs to. +func (r Location_Reservation) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Reservation) GetAccountReservations() (resp []datatypes.Location_Reservation, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getAccountReservations", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment that the reservation belongs to. +func (r Location_Reservation) GetAllotment() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getAllotment", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment that the reservation belongs to. +func (r Location_Reservation) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter location that the reservation belongs to. +func (r Location_Reservation) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve Rack information for the reservation +func (r Location_Reservation) GetLocationReservationRack() (resp datatypes.Location_Reservation_Rack, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getLocationReservationRack", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Reservation) GetObject() (resp datatypes.Location_Reservation, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Reservation_Rack struct { + Session *session.Session + Options sl.Options +} + +// GetLocationReservationRackService returns an instance of the Location_Reservation_Rack SoftLayer service +func GetLocationReservationRackService(sess *session.Session) Location_Reservation_Rack { + return Location_Reservation_Rack{Session: sess} +} + +func (r Location_Reservation_Rack) Id(id int) Location_Reservation_Rack { + r.Options.Id = &id + return r +} + +func (r Location_Reservation_Rack) Mask(mask string) Location_Reservation_Rack { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Reservation_Rack) Filter(filter string) Location_Reservation_Rack { + r.Options.Filter = filter + return r +} + +func (r Location_Reservation_Rack) Limit(limit int) Location_Reservation_Rack { + r.Options.Limit = &limit + return r +} + +func (r Location_Reservation_Rack) Offset(offset int) Location_Reservation_Rack { + r.Options.Offset = &offset + return r +} + +// Retrieve The bandwidth allotment that the reservation belongs to. +func (r Location_Reservation_Rack) GetAllotment() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getAllotment", nil, &r.Options, &resp) + return +} + +// Retrieve Members of the rack. +func (r Location_Reservation_Rack) GetChildren() (resp []datatypes.Location_Reservation_Rack_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Reservation_Rack) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Reservation_Rack) GetLocationReservation() (resp datatypes.Location_Reservation, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getLocationReservation", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Reservation_Rack) GetObject() (resp datatypes.Location_Reservation_Rack, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Reservation_Rack_Member struct { + Session *session.Session + Options sl.Options +} + +// GetLocationReservationRackMemberService returns an instance of the Location_Reservation_Rack_Member SoftLayer service +func GetLocationReservationRackMemberService(sess *session.Session) Location_Reservation_Rack_Member { + return Location_Reservation_Rack_Member{Session: sess} +} + +func (r Location_Reservation_Rack_Member) Id(id int) Location_Reservation_Rack_Member { + r.Options.Id = &id + return r +} + +func (r Location_Reservation_Rack_Member) Mask(mask string) Location_Reservation_Rack_Member { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Reservation_Rack_Member) Filter(filter string) Location_Reservation_Rack_Member { + r.Options.Filter = filter + return r +} + +func (r Location_Reservation_Rack_Member) Limit(limit int) Location_Reservation_Rack_Member { + r.Options.Limit = &limit + return r +} + +func (r Location_Reservation_Rack_Member) Offset(offset int) Location_Reservation_Rack_Member { + r.Options.Offset = &offset + return r +} + +// Retrieve Location relation for the rack member +func (r Location_Reservation_Rack_Member) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack_Member", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Reservation_Rack_Member) GetLocationReservationRack() (resp datatypes.Location_Reservation_Rack, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack_Member", "getLocationReservationRack", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Reservation_Rack_Member) GetObject() (resp datatypes.Location_Reservation_Rack_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack_Member", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/marketplace.go b/vendor/github.com/softlayer/softlayer-go/services/marketplace.go new file mode 100644 index 000000000000..27339a571f2d --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/marketplace.go @@ -0,0 +1,148 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Marketplace_Partner struct { + Session *session.Session + Options sl.Options +} + +// GetMarketplacePartnerService returns an instance of the Marketplace_Partner SoftLayer service +func GetMarketplacePartnerService(sess *session.Session) Marketplace_Partner { + return Marketplace_Partner{Session: sess} +} + +func (r Marketplace_Partner) Id(id int) Marketplace_Partner { + r.Options.Id = &id + return r +} + +func (r Marketplace_Partner) Mask(mask string) Marketplace_Partner { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Marketplace_Partner) Filter(filter string) Marketplace_Partner { + r.Options.Filter = filter + return r +} + +func (r Marketplace_Partner) Limit(limit int) Marketplace_Partner { + r.Options.Limit = &limit + return r +} + +func (r Marketplace_Partner) Offset(offset int) Marketplace_Partner { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Marketplace_Partner) GetAllObjects() (resp []datatypes.Marketplace_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetAllPublishedPartners(searchTerm *string) (resp []datatypes.Marketplace_Partner, err error) { + params := []interface{}{ + searchTerm, + } + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getAllPublishedPartners", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetAttachments() (resp []datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getAttachments", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetFeaturedPartners(non *bool) (resp []datatypes.Marketplace_Partner, err error) { + params := []interface{}{ + non, + } + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getFeaturedPartners", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetFile(name *string) (resp datatypes.Marketplace_Partner_File, err error) { + params := []interface{}{ + name, + } + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getFile", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetLogoMedium() (resp datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getLogoMedium", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetLogoMediumTemp() (resp datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getLogoMediumTemp", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetLogoSmall() (resp datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getLogoSmall", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetLogoSmallTemp() (resp datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getLogoSmallTemp", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetObject() (resp datatypes.Marketplace_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetPartnerByUrlIdentifier(urlIdentifier *string) (resp datatypes.Marketplace_Partner, err error) { + params := []interface{}{ + urlIdentifier, + } + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getPartnerByUrlIdentifier", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/metric.go b/vendor/github.com/softlayer/softlayer-go/services/metric.go new file mode 100644 index 000000000000..e2721cf4c295 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/metric.go @@ -0,0 +1,234 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// Metric tracking objects provides a common interface to all metrics provided by SoftLayer. These metrics range from network component traffic for a server to aggregated Bandwidth Pooling traffic and more. Every object within SoftLayer's range of objects that has data that can be tracked over time has an associated tracking object. Use the [[SoftLayer_Metric_Tracking_Object]] service to retrieve raw and graph data from a tracking object. +type Metric_Tracking_Object struct { + Session *session.Session + Options sl.Options +} + +// GetMetricTrackingObjectService returns an instance of the Metric_Tracking_Object SoftLayer service +func GetMetricTrackingObjectService(sess *session.Session) Metric_Tracking_Object { + return Metric_Tracking_Object{Session: sess} +} + +func (r Metric_Tracking_Object) Id(id int) Metric_Tracking_Object { + r.Options.Id = &id + return r +} + +func (r Metric_Tracking_Object) Mask(mask string) Metric_Tracking_Object { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Metric_Tracking_Object) Filter(filter string) Metric_Tracking_Object { + r.Options.Filter = filter + return r +} + +func (r Metric_Tracking_Object) Limit(limit int) Metric_Tracking_Object { + r.Options.Limit = &limit + return r +} + +func (r Metric_Tracking_Object) Offset(offset int) Metric_Tracking_Object { + r.Options.Offset = &offset + return r +} + +// Retrieve a PNG image of the last 24 hours of bandwidth usage of one of SoftLayer's network backbones. +func (r Metric_Tracking_Object) GetBackboneBandwidthGraph(graphTitle *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + graphTitle, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getBackboneBandwidthGraph", params, &r.Options, &resp) + return +} + +// Retrieve a collection of raw bandwidth data from an individual public or private network tracking object. Raw data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Metric_Tracking_Object) GetBandwidthData(startDateTime *datatypes.Time, endDateTime *datatypes.Time, typ *string, rollupSeconds *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + typ, + rollupSeconds, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a PNG image of a bandwidth graph representing the bandwidth usage over time recorded by SofTLayer's bandwidth pollers. +func (r Metric_Tracking_Object) GetBandwidthGraph(startDateTime *datatypes.Time, endDateTime *datatypes.Time, graphType *string, fontSize *int, graphWidth *int, graphHeight *int, doNotShowTimeZone *bool) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + graphType, + fontSize, + graphWidth, + graphHeight, + doNotShowTimeZone, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getBandwidthGraph", params, &r.Options, &resp) + return +} + +// Retrieve the total amount of bandwidth recorded by a tracking object within the given date range. This method will only work on SoftLayer_Metric_Tracking_Object for SoftLayer_Hardware objects, and SoftLayer_Virtual_Guest objects. +func (r Metric_Tracking_Object) GetBandwidthTotal(startDateTime *datatypes.Time, endDateTime *datatypes.Time, direction *string, typ *string) (resp uint, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + direction, + typ, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getBandwidthTotal", params, &r.Options, &resp) + return +} + +// Returns a graph container instance that is populated with metric data for the tracking object. +func (r Metric_Tracking_Object) GetCustomGraphData(graphContainer *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphContainer, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getCustomGraphData", params, &r.Options, &resp) + return +} + +// Retrieve a collection of detailed metric data over a date range. Ideal if you want to employ your own graphing systems. Note not all metrics support this method. Those that do not return null. +func (r Metric_Tracking_Object) GetDetailsForDateRange(startDate *datatypes.Time, endDate *datatypes.Time, graphType []string) (resp []datatypes.Container_Metric_Tracking_Object_Details, err error) { + params := []interface{}{ + startDate, + endDate, + graphType, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getDetailsForDateRange", params, &r.Options, &resp) + return +} + +// Retrieve a PNG image of a metric in graph form. +func (r Metric_Tracking_Object) GetGraph(startDateTime *datatypes.Time, endDateTime *datatypes.Time, graphType []string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + graphType, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getGraph", params, &r.Options, &resp) + return +} + +// Returns a collection of metric data types that can be retrieved for a metric tracking object. +func (r Metric_Tracking_Object) GetMetricDataTypes() (resp []datatypes.Container_Metric_Data_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getMetricDataTypes", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Metric_Tracking_Object object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Metric_Tracking_Object service. You can only tracking objects that are associated with your SoftLayer account or services. +func (r Metric_Tracking_Object) GetObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve a metric summary. Ideal if you want to employ your own graphing systems. Note not all metric types contain a summary. These return null. +func (r Metric_Tracking_Object) GetSummary(graphType *string) (resp datatypes.Container_Metric_Tracking_Object_Summary, err error) { + params := []interface{}{ + graphType, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getSummary", params, &r.Options, &resp) + return +} + +// Returns summarized metric data for the date range, metric type and summary period provided. +func (r Metric_Tracking_Object) GetSummaryData(startDateTime *datatypes.Time, endDateTime *datatypes.Time, validTypes []datatypes.Container_Metric_Data_Type, summaryPeriod *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + validTypes, + summaryPeriod, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getSummaryData", params, &r.Options, &resp) + return +} + +// Retrieve The type of data that a tracking object polls. +func (r Metric_Tracking_Object) GetType() (resp datatypes.Metric_Tracking_Object_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getType", nil, &r.Options, &resp) + return +} + +// This data type provides commonly used bandwidth summary components for the current billing cycle. +type Metric_Tracking_Object_Bandwidth_Summary struct { + Session *session.Session + Options sl.Options +} + +// GetMetricTrackingObjectBandwidthSummaryService returns an instance of the Metric_Tracking_Object_Bandwidth_Summary SoftLayer service +func GetMetricTrackingObjectBandwidthSummaryService(sess *session.Session) Metric_Tracking_Object_Bandwidth_Summary { + return Metric_Tracking_Object_Bandwidth_Summary{Session: sess} +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Id(id int) Metric_Tracking_Object_Bandwidth_Summary { + r.Options.Id = &id + return r +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Mask(mask string) Metric_Tracking_Object_Bandwidth_Summary { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Filter(filter string) Metric_Tracking_Object_Bandwidth_Summary { + r.Options.Filter = filter + return r +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Limit(limit int) Metric_Tracking_Object_Bandwidth_Summary { + r.Options.Limit = &limit + return r +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Offset(offset int) Metric_Tracking_Object_Bandwidth_Summary { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Metric_Tracking_Object_Bandwidth_Summary) GetObject() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object_Bandwidth_Summary", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/monitoring.go b/vendor/github.com/softlayer/softlayer-go/services/monitoring.go new file mode 100644 index 000000000000..2de43d7e96eb --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/monitoring.go @@ -0,0 +1,685 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// A monitoring agent object contains information describing the agent. +type Monitoring_Agent struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentService returns an instance of the Monitoring_Agent SoftLayer service +func GetMonitoringAgentService(sess *session.Session) Monitoring_Agent { + return Monitoring_Agent{Session: sess} +} + +func (r Monitoring_Agent) Id(id int) Monitoring_Agent { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent) Mask(mask string) Monitoring_Agent { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent) Filter(filter string) Monitoring_Agent { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent) Limit(limit int) Monitoring_Agent { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent) Offset(offset int) Monitoring_Agent { + r.Options.Offset = &offset + return r +} + +// This method activates a SoftLayer_Monitoring_Agent. +func (r Monitoring_Agent) Activate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "activate", nil, &r.Options, &resp) + return +} + +// This method is used to apply changes to a monitoring agent's configuration for SoftLayer_Configuration_Template_Section with the property sectionType that has a keyName of 'TEMPLATE_SECTION'. Configuration values that are passed in can be new or updated objects but must have a definitionId and profileId defined for both. Existing SoftLayer_Monitoring_Agent_Configuration_Value values can be retrieved as a property of the SoftLayer_Configuration_Template_Section_Definition's from the monitoring agent's configurationTemplate property. New values will follow the structure of SoftLayer_Monitoring_Agent_Configuration_Value. It returns a SoftLayer_Provisioning_Version1_Transaction object to track the progress of the update being applied. Some configuration sections act as a template which helps to create additional monitoring configurations. For instance, Core Resource monitoring agent lets you create monitoring configurations for different disk volumes or disk path. +func (r Monitoring_Agent) AddConfigurationProfile(configurationValues []datatypes.Monitoring_Agent_Configuration_Value) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + configurationValues, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "addConfigurationProfile", params, &r.Options, &resp) + return +} + +// This method creates a transaction used to apply changes to a monitoring agent's configuration for an array of SoftLayer_Configuration_Template_Section that have the property sectionType with a name of 'Fixed section'. Configuration values that are passed in can be new or updated objects but must have a configurationDefinitionId defined for both. Existing SoftLayer_Monitoring_Agent_Configuration_Value values can be retrieved as a property of the SoftLayer_Configuration_Template_Section_Definition from the monitoring agent's configurationTemplate property. New values will follow the structure of SoftLayer_Monitoring_Agent_Configuration_Value. This method returns a SoftLayer_Provisioning_Version1_Transaction object to track the progress of the update being applied. +func (r Monitoring_Agent) ApplyConfigurationValues(configurationValues []datatypes.Monitoring_Agent_Configuration_Value) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + configurationValues, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "applyConfigurationValues", params, &r.Options, &resp) + return +} + +// This method will deactivate the monitoring agent, preventing it from generating any further alarms. +func (r Monitoring_Agent) Deactivate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "deactivate", nil, &r.Options, &resp) + return +} + +// This method will remove a SoftLayer_Configuration_Template_Section_Profile from a SoftLayer_Configuration_Template_Section by passing in the sectionId of the profile object and identifier of the profile. This will execute the action immediately on the server and the SoftLayer_Configuration_Template_Section returning a boolean true if successful. +func (r Monitoring_Agent) DeleteConfigurationProfile(sectionId *int, profileId *int) (resp bool, err error) { + params := []interface{}{ + sectionId, + profileId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "deleteConfigurationProfile", params, &r.Options, &resp) + return +} + +// Initialize a monitoring agent and deploy it with the SoftLayer_Configuration_Template with the same identifier as the $configurationTemplateId parameter. If the configuration template ID is not provided, the current configuration template will be used. When executing this method, the existing configuration values will be lost. If no configuration template identifier is provided, the current configuration template will be used. '''Warning''' Reporting data may be lost as a result of executing this method. +func (r Monitoring_Agent) DeployMonitoringAgent(configurationTemplateId *int) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + configurationTemplateId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "deployMonitoringAgent", params, &r.Options, &resp) + return +} + +// This method retrieves an array of SoftLayer_Notification_User_Subscriber objects belonging to the SoftLayer_Monitoring_Agent which are able to receive alarm notifications. +func (r Monitoring_Agent) GetActiveAlarmSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getActiveAlarmSubscribers", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of the corresponding agent +func (r Monitoring_Agent) GetAgentStatus() (resp datatypes.Monitoring_Agent_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getAgentStatus", nil, &r.Options, &resp) + return +} + +// This method returns an array of available SoftLayer_Configuration_Template objects for this monitoring agent. +func (r Monitoring_Agent) GetAvailableConfigurationTemplates() (resp []datatypes.Configuration_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getAvailableConfigurationTemplates", nil, &r.Options, &resp) + return +} + +// Returns an array of available configuration values that are specific to a server or a Virtual that this monitoring agent is running on. For example, invoking this method against "Network Traffic Monitoring Agent" will return all available network adapters on your system. +func (r Monitoring_Agent) GetAvailableConfigurationValues(configurationDefinitionId *int, configValues []datatypes.Monitoring_Agent_Configuration_Value) (resp []datatypes.Monitoring_Agent_Configuration_Value, err error) { + params := []interface{}{ + configurationDefinitionId, + configValues, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getAvailableConfigurationValues", params, &r.Options, &resp) + return +} + +// Retrieve All custom configuration profiles associated with the corresponding agent +func (r Monitoring_Agent) GetConfigurationProfiles() (resp []datatypes.Configuration_Template_Section_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getConfigurationProfiles", nil, &r.Options, &resp) + return +} + +// Retrieve A template of an agent's current configuration which contains information about the structure of the configuration values. +func (r Monitoring_Agent) GetConfigurationTemplate() (resp datatypes.Configuration_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getConfigurationTemplate", nil, &r.Options, &resp) + return +} + +// Retrieve The values associated with the corresponding Agent configuration. +func (r Monitoring_Agent) GetConfigurationValues() (resp []datatypes.Monitoring_Agent_Configuration_Value, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getConfigurationValues", nil, &r.Options, &resp) + return +} + +// This method returns an array of SoftLayer_User_Customer objects, representing those who are allowed to be used as alarm subscribers. +func (r Monitoring_Agent) GetEligibleAlarmSubscibers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getEligibleAlarmSubscibers", nil, &r.Options, &resp) + return +} + +// This method returns a SoftLayer_Container_Bandwidth_GraphOutputs object containing a base64 PNG string graph of the provided configuration values for the given begin and end dates. +func (r Monitoring_Agent) GetGraph(configurationValues []datatypes.Monitoring_Agent_Configuration_Value, beginDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Monitoring_Graph_Outputs, err error) { + params := []interface{}{ + configurationValues, + beginDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getGraph", params, &r.Options, &resp) + return +} + +// This method returns the metric data for each of the configuration values provided during the given time range. +func (r Monitoring_Agent) GetGraphData(metricDataTypes []datatypes.Container_Metric_Data_Type, startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + metricDataTypes, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getGraphData", params, &r.Options, &resp) + return +} + +// Retrieve SoftLayer hardware related to the agent. +func (r Monitoring_Agent) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getHardware", nil, &r.Options, &resp) + return +} + +// This method retrieves a monitoring agent whose identifier corresponds to the value provided in the initialization parameter passed to the SoftLayer_Monitoring_Agent service. +func (r Monitoring_Agent) GetObject() (resp datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Contains general information relating to a single SoftLayer product. +func (r Monitoring_Agent) GetProductItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getProductItem", nil, &r.Options, &resp) + return +} + +// Retrieve A description for a specific installation of a Software Component +func (r Monitoring_Agent) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve Monitoring agent status name. +func (r Monitoring_Agent) GetStatusName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getStatusName", nil, &r.Options, &resp) + return +} + +// Retrieve Softlayer_Virtual_Guest object related to the monitoring agent, which this virtual guest object and hardware is on the server of the running agent. +func (r Monitoring_Agent) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Use of this method will allow removing active subscribers from the monitoring agent. The agent subscribers can be managed within the portal from the "Alarm Subscribers" tab of the monitoring agent configuration. +func (r Monitoring_Agent) RemoveActiveAlarmSubscriber(userRecordId *int) (resp bool, err error) { + params := []interface{}{ + userRecordId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "removeActiveAlarmSubscriber", params, &r.Options, &resp) + return +} + +// Use of this method will allow removing all subscribers from the monitoring agent. The agent subscribers can be managed within the portal from the "Alarm Subscribers" tab of the monitoring agent configuration. +func (r Monitoring_Agent) RemoveAllAlarmSubscribers() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "removeAllAlarmSubscribers", nil, &r.Options, &resp) + return +} + +// This method restarts a monitoring agent and sets the agent's status to 'ACTIVE'. +func (r Monitoring_Agent) RestartMonitoringAgent() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "restartMonitoringAgent", nil, &r.Options, &resp) + return +} + +// This method assigns a user to receive the alerts generated by this SoftLayer_Monitoring_Agent. +func (r Monitoring_Agent) SetActiveAlarmSubscriber(userRecordId *int) (resp bool, err error) { + params := []interface{}{ + userRecordId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "setActiveAlarmSubscriber", params, &r.Options, &resp) + return +} + +// The SoftLayer_Monitoring_Agent_Configuration_Template_Group class is consisted of configuration templates for agents in a monitoring package. +type Monitoring_Agent_Configuration_Template_Group struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentConfigurationTemplateGroupService returns an instance of the Monitoring_Agent_Configuration_Template_Group SoftLayer service +func GetMonitoringAgentConfigurationTemplateGroupService(sess *session.Session) Monitoring_Agent_Configuration_Template_Group { + return Monitoring_Agent_Configuration_Template_Group{Session: sess} +} + +func (r Monitoring_Agent_Configuration_Template_Group) Id(id int) Monitoring_Agent_Configuration_Template_Group { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group) Mask(mask string) Monitoring_Agent_Configuration_Template_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group) Filter(filter string) Monitoring_Agent_Configuration_Template_Group { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group) Limit(limit int) Monitoring_Agent_Configuration_Template_Group { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group) Offset(offset int) Monitoring_Agent_Configuration_Template_Group { + r.Options.Offset = &offset + return r +} + +// This method creates a SoftLayer_Monitoring_Agent_Configuration_Template_Group using the values provided in the template object. The template objects accountId will be overridden to use the active user's accountId as it shows on their associated SoftLayer_User_Customer object. +func (r Monitoring_Agent_Configuration_Template_Group) CreateObject(templateObject *datatypes.Monitoring_Agent_Configuration_Template_Group) (resp datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "createObject", params, &r.Options, &resp) + return +} + +// Deletes a customer configuration template group. +func (r Monitoring_Agent_Configuration_Template_Group) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method edits an existing SoftLayer_Monitoring_Agent_Configuration_Template_Group using the values passed in the $object parameter. The $object parameter should use the same structure as a SoftLayer_Monitoring_Agent_Configuration_Template_Group object. +func (r Monitoring_Agent_Configuration_Template_Group) EditObject(templateObject *datatypes.Monitoring_Agent_Configuration_Template_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Monitoring_Agent_Configuration_Template_Group) GetAllObjects() (resp []datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getAllObjects", nil, &r.Options, &resp) + return +} + +// This method retrieves an array of SoftLayer_Monitoring_Agent_Configuration_Template_Group objects that are available to the active user's account. The packageId parameter is not currently used. +func (r Monitoring_Agent_Configuration_Template_Group) GetConfigurationGroups(packageId *int) (resp []datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + params := []interface{}{ + packageId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getConfigurationGroups", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group) GetConfigurationTemplateReferences() (resp []datatypes.Monitoring_Agent_Configuration_Template_Group_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getConfigurationTemplateReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group) GetConfigurationTemplates() (resp []datatypes.Configuration_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getConfigurationTemplates", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getItem", nil, &r.Options, &resp) + return +} + +// This method retrieves a monitoring agent configuration template group whose identifier corresponds to the value provided in the initialization parameter passed to the SoftLayer_Monitoring_Agent_Configuration_Template_Group service. +func (r Monitoring_Agent_Configuration_Template_Group) GetObject() (resp datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getObject", nil, &r.Options, &resp) + return +} + +// SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference class holds the reference information, essentially a SQL join, between a monitoring configuration group and agent configuration templates. +type Monitoring_Agent_Configuration_Template_Group_Reference struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentConfigurationTemplateGroupReferenceService returns an instance of the Monitoring_Agent_Configuration_Template_Group_Reference SoftLayer service +func GetMonitoringAgentConfigurationTemplateGroupReferenceService(sess *session.Session) Monitoring_Agent_Configuration_Template_Group_Reference { + return Monitoring_Agent_Configuration_Template_Group_Reference{Session: sess} +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Id(id int) Monitoring_Agent_Configuration_Template_Group_Reference { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Mask(mask string) Monitoring_Agent_Configuration_Template_Group_Reference { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Filter(filter string) Monitoring_Agent_Configuration_Template_Group_Reference { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Limit(limit int) Monitoring_Agent_Configuration_Template_Group_Reference { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Offset(offset int) Monitoring_Agent_Configuration_Template_Group_Reference { + r.Options.Offset = &offset + return r +} + +// This method creates a monitoring agent configuration template group reference by passing in an object with the SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference structure as the $templateObject parameter. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) CreateObject(templateObject *datatypes.Monitoring_Agent_Configuration_Template_Group_Reference) (resp datatypes.Monitoring_Agent_Configuration_Template_Group_Reference, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "createObject", params, &r.Options, &resp) + return +} + +// This method creates monitoring agent configuration template group references by passing in an array of objects with the SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference structure as the $templateObjects parameter. Setting the $bulkCommit parameter to true will commit the changes in one transaction, false will commit after each object is created. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) CreateObjects(templateObjects []datatypes.Monitoring_Agent_Configuration_Template_Group_Reference) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "createObjects", params, &r.Options, &resp) + return +} + +// This method updates a SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference record by passing in a modified instance of the object. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) EditObject(templateObject *datatypes.Monitoring_Agent_Configuration_Template_Group_Reference) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "editObject", params, &r.Options, &resp) + return +} + +// This method updates a set of SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference records by passing in an array of modified instances of the objects. Setting the $bulkCommit parameter to true will commit the changes in one transaction, false will commit after each object is updated. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) EditObjects(templateObjects []datatypes.Monitoring_Agent_Configuration_Template_Group_Reference) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "editObjects", params, &r.Options, &resp) + return +} + +// This method retrieves all SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference objects accessible to the active user. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) GetAllObjects() (resp []datatypes.Monitoring_Agent_Configuration_Template_Group_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group_Reference) GetConfigurationTemplate() (resp datatypes.Configuration_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "getConfigurationTemplate", nil, &r.Options, &resp) + return +} + +// This method retrieves a monitoring agent configuration template group reference whose identifier corresponds to the value provided in the initialization parameter passed to the SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference service. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) GetObject() (resp datatypes.Monitoring_Agent_Configuration_Template_Group_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group_Reference) GetTemplateGroup() (resp datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "getTemplateGroup", nil, &r.Options, &resp) + return +} + +// Monitoring agent configuration value +type Monitoring_Agent_Configuration_Value struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentConfigurationValueService returns an instance of the Monitoring_Agent_Configuration_Value SoftLayer service +func GetMonitoringAgentConfigurationValueService(sess *session.Session) Monitoring_Agent_Configuration_Value { + return Monitoring_Agent_Configuration_Value{Session: sess} +} + +func (r Monitoring_Agent_Configuration_Value) Id(id int) Monitoring_Agent_Configuration_Value { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent_Configuration_Value) Mask(mask string) Monitoring_Agent_Configuration_Value { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent_Configuration_Value) Filter(filter string) Monitoring_Agent_Configuration_Value { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent_Configuration_Value) Limit(limit int) Monitoring_Agent_Configuration_Value { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent_Configuration_Value) Offset(offset int) Monitoring_Agent_Configuration_Value { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Value) GetDefinition() (resp datatypes.Configuration_Template_Section_Definition, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getDefinition", nil, &r.Options, &resp) + return +} + +// Retrieve The metric data type used to retrieve metric data currently being tracked. +func (r Monitoring_Agent_Configuration_Value) GetMetricDataType() (resp datatypes.Container_Metric_Data_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getMetricDataType", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Value) GetMonitoringAgent() (resp datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getMonitoringAgent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Monitoring_Agent_Configuration_Value) GetObject() (resp datatypes.Monitoring_Agent_Configuration_Value, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Value) GetProfile() (resp datatypes.Configuration_Template_Section_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getProfile", nil, &r.Options, &resp) + return +} + +// Monitoring agent status +type Monitoring_Agent_Status struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentStatusService returns an instance of the Monitoring_Agent_Status SoftLayer service +func GetMonitoringAgentStatusService(sess *session.Session) Monitoring_Agent_Status { + return Monitoring_Agent_Status{Session: sess} +} + +func (r Monitoring_Agent_Status) Id(id int) Monitoring_Agent_Status { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent_Status) Mask(mask string) Monitoring_Agent_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent_Status) Filter(filter string) Monitoring_Agent_Status { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent_Status) Limit(limit int) Monitoring_Agent_Status { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent_Status) Offset(offset int) Monitoring_Agent_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Monitoring_Agent_Status) GetObject() (resp datatypes.Monitoring_Agent_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Status", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Monitoring_Robot data type contains general information relating to a monitoring robot. +type Monitoring_Robot struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringRobotService returns an instance of the Monitoring_Robot SoftLayer service +func GetMonitoringRobotService(sess *session.Session) Monitoring_Robot { + return Monitoring_Robot{Session: sess} +} + +func (r Monitoring_Robot) Id(id int) Monitoring_Robot { + r.Options.Id = &id + return r +} + +func (r Monitoring_Robot) Mask(mask string) Monitoring_Robot { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Robot) Filter(filter string) Monitoring_Robot { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Robot) Limit(limit int) Monitoring_Robot { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Robot) Offset(offset int) Monitoring_Robot { + r.Options.Offset = &offset + return r +} + +// Checks if a monitoring robot can communicate with SoftLayer monitoring management system via the private network. +// +// TCP port 48000 - 48002 must be open on your server or your virtual server in order for this test to succeed. +func (r Monitoring_Robot) CheckConnection() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "checkConnection", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Monitoring_Robot) DeployMonitoringAgents(configurationTemplateGroup *datatypes.Monitoring_Agent_Configuration_Template_Group) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + configurationTemplateGroup, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "deployMonitoringAgents", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with the corresponding robot. +func (r Monitoring_Robot) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getAccount", nil, &r.Options, &resp) + return +} + +// Returns available configuration template groups for this monitoring agent. +func (r Monitoring_Robot) GetAvailableConfigurationGroups() (resp []datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getAvailableConfigurationGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The program (monitoring agent) that gets details of a system or application and reporting of the metric data and triggers alarms for predefined events. +func (r Monitoring_Robot) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Monitoring_Robot) GetObject() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of the robot. +func (r Monitoring_Robot) GetRobotStatus() (resp datatypes.Monitoring_Robot_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getRobotStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Software_Component that corresponds to the robot installation on the server. +func (r Monitoring_Robot) GetSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getSoftwareComponent", nil, &r.Options, &resp) + return +} + +// If our monitoring management system is not able to connect to your monitoring robot, it sets the robot status to "Limited Connectivity". Robots in this status will not be process by our monitoring management system. You cannot manage monitoring agents either. +// +// Use this method to resets monitoring robot status to "Active" to indicate the connection issue is resolved. +func (r Monitoring_Robot) ResetStatus() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "resetStatus", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/network.go b/vendor/github.com/softlayer/softlayer-go/services/network.go new file mode 100644 index 000000000000..20add187eae9 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/network.go @@ -0,0 +1,15408 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Network struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkService returns an instance of the Network SoftLayer service +func GetNetworkService(sess *session.Session) Network { + return Network{Session: sess} +} + +func (r Network) Id(id int) Network { + r.Options.Id = &id + return r +} + +func (r Network) Mask(mask string) Network { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network) Filter(filter string) Network { + r.Options.Filter = filter + return r +} + +func (r Network) Limit(limit int) Network { + r.Options.Limit = &limit + return r +} + +func (r Network) Offset(offset int) Network { + r.Options.Offset = &offset + return r +} + +// Provide a template containing the following properties to create a Network: +// * networkIdentifier +// * cidr +// * name +// +// +// The ``networkIdentifier`` must be an IP address within RFC 1918 blocks: +// * 192.168.0.0/16 +// * 172.16.0.0/12 +// * 10.0.0.0/8 +// The ``cidr`` must be an integer between 16 and 24, inclusive. The ``networkIdentifier``/``cidr`` must represent a valid subnet specification. The ``name`` must not be empty, but otherwise can contain up to 50 characters of user specified information to identify the Network. +// +// The subnet specification of the Network bounds the IP address space which can be utilized and constrains the creation of Subnets within the Network. +// +// Example networkIdentifier/CIDR combinations: +// * 192.168.0.0/16 +// * 192.168.0.0/17 +// * 172.16.0.0/16 +// * 172.31.0.0/16 +// * 10.0.0.0/16 +// * 10.255.0.0/16 +func (r Network) CreateObject(templateObject *datatypes.Network) (resp datatypes.Network, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network", "createObject", params, &r.Options, &resp) + return +} + +// Creation of a Subnet is necessary prior to provisioning compute resources into a Network. In order to create a Subnet, both a [[SoftLayer_Network_Subnet|Subnet]] and [[SoftLayer_Network_Pod|Pod]] must be specified. The Pod determines where the Subnet will be available for use by compute resources. +// +// Provide a Subnet template containing the following properties: +// * networkIdentifier +// * cidr +// The ``networkIdentifier`` must represent an IP address within that specified by the Network. The ``cidr`` must be an integer between 24 and 29, inclusive, and represent a subnet size smaller than the Network's. The ``networkIdentifier``/``cidr`` must represent a valid subnet specification. +// +// Provide a Pod template containing the following property: +// * name +// The ``name`` must represent a valid Pod e.g. sjc01.pod02. See [[SoftLayer_Network_Pod (type)]] for more information. +// +// The following constraints apply to Subnet creation: +// * It must fit within the bounds of the Network. +// * It must be no larger than /24 and no smaller than /29. +// * Its size must not equal that of the Network. This implies that a fully +// utilized Network will have a minimum of two Subnets. +// * The Pod must support the ability to create Networks by having the +// SUPPORTS_CUSTOMER_DEFINED_NETWORK capability. See [[SoftLayer_Network_Pod/getCapabilities]]. +func (r Network) CreateSubnet(subnet *datatypes.Network_Subnet, pod *datatypes.Network_Pod) (resp datatypes.Network_Subnet, err error) { + params := []interface{}{ + subnet, + pod, + } + err = r.Session.DoRequest("SoftLayer_Network", "createSubnet", params, &r.Options, &resp) + return +} + +// Remove the specified Network along with any Subnets. +func (r Network) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "deleteObject", nil, &r.Options, &resp) + return +} + +// +// +// Provide a Subnet template containing the following properties: +// * networkIdentifier +// * cidr +// The ``networkIdentifier`` must represent an IP address within that specified by the Network. The ``cidr`` must be an integer between 24 and 29, inclusive, and represent a subnet size smaller than the Network's. The ``networkIdentifier``/``cidr`` must represent a valid subnet specification. Or: +// * id +// The ``id`` must identify a Subnet in the Network. If the ``id`` is provided, the ``networkIdentifier``/``cidr`` will be ignored. +// +// Subnets may only be removed when no compute resources are utilizing them. +func (r Network) DeleteSubnet(subnet *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnet, + } + err = r.Session.DoRequest("SoftLayer_Network", "deleteSubnet", params, &r.Options, &resp) + return +} + +// Modify either the ``name`` or ``notes`` properties of a Network. +func (r Network) EditObject(templateObject *datatypes.Network) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network) GetAllObjects() (resp []datatypes.Network, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The size of the Network specified in CIDR notation. Specified in conjunction with the ``networkIdentifier`` to describe the bounding subnet size for the Network. Required for creation. See [[SoftLayer_Network/createObject]] documentation for creation details. +func (r Network) GetCidr() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getCidr", nil, &r.Options, &resp) + return +} + +// Retrieve A name for the Network. This is required during creation of a Network and is entirely user defined. +func (r Network) GetName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getName", nil, &r.Options, &resp) + return +} + +// Retrieve The starting IP address of the Network. Specified in conjunction with the ``cidr`` property to specify the bounding IP address space for the Network. Required for creation. See [[SoftLayer_Network/createObject]] documentation for creation details. +func (r Network) GetNetworkIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getNetworkIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve Notes, or a description of the Network. This is entirely user defined. +func (r Network) GetNotes() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getNotes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network) GetObject() (resp datatypes.Network, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The Subnets within the Network. These represent the realized segments of the Network and reside within a [[SoftLayer_Network_Pod|Pod]]. A Subnet must be specified when provisioning a compute resource within a Network. +func (r Network) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getSubnets", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Application_Delivery_Controller data type models a single instance of an application delivery controller. Local properties are read only, except for a ''notes'' property, which can be used to describe your application delivery controller service. The type's relational properties provide more information to the service's function and login information to the controller's backend management if advanced view is enabled. +type Network_Application_Delivery_Controller struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerService returns an instance of the Network_Application_Delivery_Controller SoftLayer service +func GetNetworkApplicationDeliveryControllerService(sess *session.Session) Network_Application_Delivery_Controller { + return Network_Application_Delivery_Controller{Session: sess} +} + +func (r Network_Application_Delivery_Controller) Id(id int) Network_Application_Delivery_Controller { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller) Mask(mask string) Network_Application_Delivery_Controller { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller) Filter(filter string) Network_Application_Delivery_Controller { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller) Limit(limit int) Network_Application_Delivery_Controller { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller) Offset(offset int) Network_Application_Delivery_Controller { + r.Options.Offset = &offset + return r +} + +// Create or add to an application delivery controller based load balancer service. The loadBalancer parameter must have its ''name'', ''type'', ''sourcePort'', and ''virtualIpAddress'' properties populated. Changes are reflected immediately in the application delivery controller. +func (r Network_Application_Delivery_Controller) CreateLiveLoadBalancer(loadBalancer *datatypes.Network_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + loadBalancer, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "createLiveLoadBalancer", params, &r.Options, &resp) + return +} + +// Remove a virtual IP address from an application delivery controller based load balancer. Only the ''name'' property in the loadBalancer parameter must be populated. Changes are reflected immediately in the application delivery controller. +func (r Network_Application_Delivery_Controller) DeleteLiveLoadBalancer(loadBalancer *datatypes.Network_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + loadBalancer, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "deleteLiveLoadBalancer", params, &r.Options, &resp) + return +} + +// Remove an entire load balancer service, including all virtual IP addresses, from and application delivery controller based load balancer. The ''name'' property the and ''name'' property within the ''vip'' property of the service parameter must be provided. Changes are reflected immediately in the application delivery controller. +func (r Network_Application_Delivery_Controller) DeleteLiveLoadBalancerService(service *datatypes.Network_LoadBalancer_Service) (err error) { + var resp datatypes.Void + params := []interface{}{ + service, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "deleteLiveLoadBalancerService", params, &r.Options, &resp) + return +} + +// Edit an applications delivery controller record. Currently only a controller's notes property is editable. +func (r Network_Application_Delivery_Controller) EditObject(templateObject *datatypes.Network_Application_Delivery_Controller) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account that owns an application delivery controller record. +func (r Network_Application_Delivery_Controller) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Network_Application_Delivery_Controller) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller) GetBandwidthDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, networkType *string) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + networkType, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single application delivery controller. It will gather the correct input parameters for the generic graphing utility based on the date ranges +func (r Network_Application_Delivery_Controller) GetBandwidthImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, networkType *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + networkType, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getBandwidthImageByDate", params, &r.Options, &resp) + return +} + +// Retrieve The billing item for a Application Delivery Controller. +func (r Network_Application_Delivery_Controller) GetBillingItem() (resp datatypes.Billing_Item_Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve Previous configurations for an Application Delivery Controller. +func (r Network_Application_Delivery_Controller) GetConfigurationHistory() (resp []datatypes.Network_Application_Delivery_Controller_Configuration_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getConfigurationHistory", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Network_Application_Delivery_Controller) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The datacenter that the application delivery controller resides in. +func (r Network_Application_Delivery_Controller) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve A brief description of an application delivery controller record. +func (r Network_Application_Delivery_Controller) GetDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The date in which the license for this application delivery controller will expire. +func (r Network_Application_Delivery_Controller) GetLicenseExpirationDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getLicenseExpirationDate", nil, &r.Options, &resp) + return +} + +// Get the graph image for an application delivery controller service based on the supplied graph type and metric. The available graph types are: 'connections' and 'status', and the available metrics are: 'day', 'week' and 'month'. +// +// This method returns the raw binary image data. +func (r Network_Application_Delivery_Controller) GetLiveLoadBalancerServiceGraphImage(service *datatypes.Network_LoadBalancer_Service, graphType *string, metric *string) (resp []byte, err error) { + params := []interface{}{ + service, + graphType, + metric, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getLiveLoadBalancerServiceGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve The virtual IP address records that belong to an application delivery controller based load balancer. +func (r Network_Application_Delivery_Controller) GetLoadBalancers() (resp []datatypes.Network_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that this Application Delivery Controller is a managed resource. +func (r Network_Application_Delivery_Controller) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve An application delivery controller's management ip address. +func (r Network_Application_Delivery_Controller) GetManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The network VLAN that an application delivery controller resides on. +func (r Network_Application_Delivery_Controller) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The network VLANs that an application delivery controller resides on. +func (r Network_Application_Delivery_Controller) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Application_Delivery_Controller object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Application_Delivery_Controller service. You can only retrieve application delivery controllers that are associated with your SoftLayer customer account. +func (r Network_Application_Delivery_Controller) GetObject() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for the current billing cycle. +func (r Network_Application_Delivery_Controller) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The password used to connect to an application delivery controller's management interface when it is operating in advanced view mode. +func (r Network_Application_Delivery_Controller) GetPassword() (resp datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getPassword", nil, &r.Options, &resp) + return +} + +// Retrieve An application delivery controller's primary public IP address. +func (r Network_Application_Delivery_Controller) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for the current billing cycle. +func (r Network_Application_Delivery_Controller) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve A network application controller's subnets. A subnet is a group of IP addresses +func (r Network_Application_Delivery_Controller) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller) GetType() (resp datatypes.Network_Application_Delivery_Controller_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller) GetVirtualIpAddresses() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getVirtualIpAddresses", nil, &r.Options, &resp) + return +} + +// Restore an application delivery controller's base configuration state. The configuration will be set to what it was when initially provisioned. +func (r Network_Application_Delivery_Controller) RestoreBaseConfiguration() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "restoreBaseConfiguration", nil, &r.Options, &resp) + return +} + +// Restore an application delivery controller's configuration state. +func (r Network_Application_Delivery_Controller) RestoreConfiguration(configurationHistoryId *int) (resp bool, err error) { + params := []interface{}{ + configurationHistoryId, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "restoreConfiguration", params, &r.Options, &resp) + return +} + +// Save an application delivery controller's configuration state. The notes property for this method is optional. +func (r Network_Application_Delivery_Controller) SaveCurrentConfiguration(notes *string) (resp datatypes.Network_Application_Delivery_Controller_Configuration_History, err error) { + params := []interface{}{ + notes, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "saveCurrentConfiguration", params, &r.Options, &resp) + return +} + +// Update the the virtual IP address interface within an application delivery controller based load balancer identified by the ''name'' property in the loadBalancer parameter. You only need to set the properties in the loadBalancer parameter that you wish to change. Any virtual IP properties omitted or left empty are ignored. Changes are reflected immediately in the application delivery controller. +func (r Network_Application_Delivery_Controller) UpdateLiveLoadBalancer(loadBalancer *datatypes.Network_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + loadBalancer, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "updateLiveLoadBalancer", params, &r.Options, &resp) + return +} + +// Update the NetScaler VPX License. +// +// This service will create a transaction to update a NetScaler VPX License. After the license is updated the load balancer will reboot in order to apply the newly issued license +// +// The load balancer will be unavailable during the reboot. +func (r Network_Application_Delivery_Controller) UpdateNetScalerLicense() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "updateNetScalerLicense", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Application_Delivery_Controller_Configuration_History data type models a single instance of a configuration history entry for an application delivery controller. The configuration history entries are used to support creating backups of an application delivery controller's configuration state in order to restore them later if needed. +type Network_Application_Delivery_Controller_Configuration_History struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerConfigurationHistoryService returns an instance of the Network_Application_Delivery_Controller_Configuration_History SoftLayer service +func GetNetworkApplicationDeliveryControllerConfigurationHistoryService(sess *session.Session) Network_Application_Delivery_Controller_Configuration_History { + return Network_Application_Delivery_Controller_Configuration_History{Session: sess} +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Id(id int) Network_Application_Delivery_Controller_Configuration_History { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Mask(mask string) Network_Application_Delivery_Controller_Configuration_History { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Filter(filter string) Network_Application_Delivery_Controller_Configuration_History { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Limit(limit int) Network_Application_Delivery_Controller_Configuration_History { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Offset(offset int) Network_Application_Delivery_Controller_Configuration_History { + r.Options.Offset = &offset + return r +} + +// deleteObject permanently removes a configuration history record +func (r Network_Application_Delivery_Controller_Configuration_History) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_Configuration_History", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The application delivery controller that a configuration history record belongs to. +func (r Network_Application_Delivery_Controller_Configuration_History) GetController() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_Configuration_History", "getController", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_Configuration_History) GetObject() (resp datatypes.Network_Application_Delivery_Controller_Configuration_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_Configuration_History", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerHealthAttributeService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerHealthAttributeService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + return Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) GetHealthCheck() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute", "getHealthCheck", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) GetType() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerHealthAttributeTypeService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerHealthAttributeTypeService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + return Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) GetAllObjects() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Check struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Health_Check SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + return Network_Application_Delivery_Controller_LoadBalancer_Health_Check{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetAttributes() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getAttributes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale load balancers that use this health check. +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetScaleLoadBalancers() (resp []datatypes.Scale_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getScaleLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetServices() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getServices", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetType() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckTypeService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckTypeService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + return Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) GetAllObjects() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Routing_Method struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerRoutingMethodService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Routing_Method SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerRoutingMethodService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + return Network_Application_Delivery_Controller_LoadBalancer_Routing_Method{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) GetAllObjects() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Routing_Method", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Routing_Method", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Routing_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerRoutingTypeService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Routing_Type SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerRoutingTypeService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + return Network_Application_Delivery_Controller_LoadBalancer_Routing_Type{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) GetAllObjects() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Routing_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Routing_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerServiceService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Service SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Service { + return Network_Application_Delivery_Controller_LoadBalancer_Service{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Service { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Service { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Service { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Service { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Service { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) DeleteObject() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "deleteObject", nil, &r.Options, &resp) + return +} + +// Get the graph image for a load balancer service based on the supplied graph type and metric. The available graph types are: 'connections' and 'status', and the available metrics are: 'day', 'week' and 'month'. +// +// This method returns the raw binary image data. +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetGraphImage(graphType *string, metric *string) (resp []byte, err error) { + params := []interface{}{ + graphType, + metric, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetGroupReferences() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getGroupReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetGroups() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetHealthCheck() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getHealthCheck", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetHealthChecks() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getHealthChecks", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getIpAddress", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetServiceGroup() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getServiceGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) ToggleStatus() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "toggleStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service_Group struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerServiceGroupService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Service_Group SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerServiceGroupService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + return Network_Application_Delivery_Controller_LoadBalancer_Service_Group{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + r.Options.Offset = &offset + return r +} + +// Get the graph image for a load balancer service group based on the supplied graph type and metric. The only available graph type currently is: 'connections', and the available metrics are: 'day', 'week' and 'month'. +// +// This method returns the raw binary image data. +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetGraphImage(graphType *string, metric *string) (resp []byte, err error) { + params := []interface{}{ + graphType, + metric, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getGraphImage", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetRoutingMethod() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getRoutingMethod", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetRoutingType() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getRoutingType", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetServiceReferences() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getServiceReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetServices() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getServices", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetVirtualServer() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getVirtualServer", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetVirtualServers() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getVirtualServers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) KickAllConnections() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "kickAllConnections", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + return Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + r.Options.Offset = &offset + return r +} + +// Like any other API object, the load balancers can have their exposed properties edited by passing in a modified version of the object. The load balancer object also can modify its services in this way. Simply request the load balancer object you wish to edit, then modify the objects in the services array and pass the modified object to this function. WARNING: Services cannot be deleted in this manner, you must call deleteObject() on the service to physically remove them from the load balancer. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) EditObject(templateObject *datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual IP address's associated application delivery controller. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetApplicationDeliveryController() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getApplicationDeliveryController", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual IP address's associated application delivery controllers. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetApplicationDeliveryControllers() (resp []datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getApplicationDeliveryControllers", nil, &r.Options, &resp) + return +} + +// Yields a list of the SSL/TLS encryption ciphers that are currently supported on this virtual IP address instance. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetAvailableSecureTransportCiphers() (resp []datatypes.Security_SecureTransportCipher, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getAvailableSecureTransportCiphers", nil, &r.Options, &resp) + return +} + +// Yields a list of the secure communication protocols that are currently supported on this virtual IP address instance. The list of supported ciphers for each protocol is culled to match availability. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetAvailableSecureTransportProtocols() (resp []datatypes.Security_SecureTransportProtocol, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getAvailableSecureTransportProtocols", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for the load balancer virtual IP. This is only valid when dedicatedFlag is false. This is an independent virtual IP, and if canceled, will only affect the associated virtual IP. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for the load balancing device housing the virtual IP. This billing item represents a device which could contain other virtual IPs. Caution should be taken when canceling. This is only valid when dedicatedFlag is true. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetDedicatedBillingItem() (resp datatypes.Billing_Item_Network_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getDedicatedBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve Denotes whether the virtual IP is configured within a high availability cluster. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetHighAvailabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getHighAvailabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetLoadBalancerHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getLoadBalancerHardware", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the load balancer is a managed resource. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The list of security ciphers enabled for this virtual IP address +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetSecureTransportCiphers() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportCipher, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getSecureTransportCiphers", nil, &r.Options, &resp) + return +} + +// Retrieve The list of secure transport protocols enabled for this virtual IP address +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetSecureTransportProtocols() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportProtocol, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getSecureTransportProtocols", nil, &r.Options, &resp) + return +} + +// Retrieve The SSL certificate currently associated with the VIP. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetSecurityCertificate() (resp datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getSecurityCertificate", nil, &r.Options, &resp) + return +} + +// Retrieve The SSL certificate currently associated with the VIP. Provides chosen certificate visibility to unprivileged users. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetSecurityCertificateEntry() (resp datatypes.Security_Certificate_Entry, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getSecurityCertificateEntry", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetVirtualServers() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getVirtualServers", nil, &r.Options, &resp) + return +} + +// Start SSL acceleration on all SSL virtual services (those with a type of HTTPS). This action should be taken only after configuring an SSL certificate for the virtual IP. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) StartSsl() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "startSsl", nil, &r.Options, &resp) + return +} + +// Stop SSL acceleration on all SSL virtual services (those with a type of HTTPS). +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) StopSsl() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "stopSsl", nil, &r.Options, &resp) + return +} + +// Upgrades the connection limit on the Virtual IP to Address to the next, higher connection limit of the same product. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) UpgradeConnectionLimit() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "upgradeConnectionLimit", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_VirtualServer struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_VirtualServer SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + return Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) DeleteObject() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetRoutingMethod() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getRoutingMethod", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale load balancers this virtual server applies to. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetScaleLoadBalancers() (resp []datatypes.Scale_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getScaleLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetServiceGroups() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getServiceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetVirtualIpAddress() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getVirtualIpAddress", nil, &r.Options, &resp) + return +} + +// Start SSL acceleration on all SSL virtual services (those with a type of HTTPS). This action should be taken only after configuring an SSL certificate for the virtual IP. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) StartSsl() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "startSsl", nil, &r.Options, &resp) + return +} + +// Stop SSL acceleration on all SSL virtual services (those with a type of HTTPS). +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) StopSsl() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "stopSsl", nil, &r.Options, &resp) + return +} + +// A SoftLayer_Network_Backbone represents a single backbone connection from SoftLayer to the public Internet, from the Internet to the SoftLayer private network, or a link that connects the private networks between SoftLayer's datacenters. The SoftLayer_Network_Backbone data type is a collection of data associated with one of those connections. +type Network_Backbone struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkBackboneService returns an instance of the Network_Backbone SoftLayer service +func GetNetworkBackboneService(sess *session.Session) Network_Backbone { + return Network_Backbone{Session: sess} +} + +func (r Network_Backbone) Id(id int) Network_Backbone { + r.Options.Id = &id + return r +} + +func (r Network_Backbone) Mask(mask string) Network_Backbone { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Backbone) Filter(filter string) Network_Backbone { + r.Options.Filter = filter + return r +} + +func (r Network_Backbone) Limit(limit int) Network_Backbone { + r.Options.Limit = &limit + return r +} + +func (r Network_Backbone) Offset(offset int) Network_Backbone { + r.Options.Offset = &offset + return r +} + +// Retrieve a list of all SoftLayer backbone connections. Use this method if you need all backbones or don't know the id number of a specific backbone. +func (r Network_Backbone) GetAllBackbones() (resp []datatypes.Network_Backbone, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getAllBackbones", nil, &r.Options, &resp) + return +} + +// Retrieve a list of all SoftLayer backbone connections for a location name. +func (r Network_Backbone) GetBackbonesForLocationName(locationName *string) (resp []datatypes.Network_Backbone, err error) { + params := []interface{}{ + locationName, + } + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getBackbonesForLocationName", params, &r.Options, &resp) + return +} + +// Retrieve a graph of a SoftLayer backbone's last 24 hours of activity. getGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Network_Backbone) GetGraphImage() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getGraphImage", nil, &r.Options, &resp) + return +} + +// Retrieve A backbone's status. +func (r Network_Backbone) GetHealth() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getHealth", nil, &r.Options, &resp) + return +} + +// Retrieve Which of the SoftLayer datacenters a backbone is connected to. +func (r Network_Backbone) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve A backbone's primary network component. +func (r Network_Backbone) GetNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve an individual SoftLayer_Network_Backbone record. Use the getAllBackbones() method to retrieve a list of all SoftLayer network backbones. +func (r Network_Backbone) GetObject() (resp datatypes.Network_Backbone, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Backbone_Location_Dependent struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkBackboneLocationDependentService returns an instance of the Network_Backbone_Location_Dependent SoftLayer service +func GetNetworkBackboneLocationDependentService(sess *session.Session) Network_Backbone_Location_Dependent { + return Network_Backbone_Location_Dependent{Session: sess} +} + +func (r Network_Backbone_Location_Dependent) Id(id int) Network_Backbone_Location_Dependent { + r.Options.Id = &id + return r +} + +func (r Network_Backbone_Location_Dependent) Mask(mask string) Network_Backbone_Location_Dependent { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Backbone_Location_Dependent) Filter(filter string) Network_Backbone_Location_Dependent { + r.Options.Filter = filter + return r +} + +func (r Network_Backbone_Location_Dependent) Limit(limit int) Network_Backbone_Location_Dependent { + r.Options.Limit = &limit + return r +} + +func (r Network_Backbone_Location_Dependent) Offset(offset int) Network_Backbone_Location_Dependent { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Backbone_Location_Dependent) GetAllObjects() (resp []datatypes.Network_Backbone_Location_Dependent, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Backbone_Location_Dependent) GetDependentLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getDependentLocation", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Backbone_Location_Dependent) GetObject() (resp datatypes.Network_Backbone_Location_Dependent, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Backbone_Location_Dependent) GetSourceDependentsByName(locationName *string) (resp datatypes.Location, err error) { + params := []interface{}{ + locationName, + } + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getSourceDependentsByName", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Backbone_Location_Dependent) GetSourceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getSourceLocation", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Bandwidth_Version1_Allotment class provides methods and data structures necessary to work with an array of hardware objects associated with a single Bandwidth Pooling. +type Network_Bandwidth_Version1_Allotment struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkBandwidthVersion1AllotmentService returns an instance of the Network_Bandwidth_Version1_Allotment SoftLayer service +func GetNetworkBandwidthVersion1AllotmentService(sess *session.Session) Network_Bandwidth_Version1_Allotment { + return Network_Bandwidth_Version1_Allotment{Session: sess} +} + +func (r Network_Bandwidth_Version1_Allotment) Id(id int) Network_Bandwidth_Version1_Allotment { + r.Options.Id = &id + return r +} + +func (r Network_Bandwidth_Version1_Allotment) Mask(mask string) Network_Bandwidth_Version1_Allotment { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Bandwidth_Version1_Allotment) Filter(filter string) Network_Bandwidth_Version1_Allotment { + r.Options.Filter = filter + return r +} + +func (r Network_Bandwidth_Version1_Allotment) Limit(limit int) Network_Bandwidth_Version1_Allotment { + r.Options.Limit = &limit + return r +} + +func (r Network_Bandwidth_Version1_Allotment) Offset(offset int) Network_Bandwidth_Version1_Allotment { + r.Options.Offset = &offset + return r +} + +// Create a allotment for servers to pool bandwidth and avoid overages in billing if they use more than there allocated bandwidth. +func (r Network_Bandwidth_Version1_Allotment) CreateObject(templateObject *datatypes.Network_Bandwidth_Version1_Allotment) (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "createObject", params, &r.Options, &resp) + return +} + +// Edit a bandwidth allotment's local properties. Currently you may only change an allotment's name. Use the [[SoftLayer_Network_Bandwidth_Version1_Allotment::reassignServers|reassignServers()]] and [[SoftLayer_Network_Bandwidth_Version1_Allotment::unassignServers|unassignServers()]] methods to move servers in and out of your allotments. +func (r Network_Bandwidth_Version1_Allotment) EditObject(templateObject *datatypes.Network_Bandwidth_Version1_Allotment) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment detail records associated with this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetActiveDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getActiveDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The Application Delivery Controller contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetApplicationDeliveryControllers() (resp []datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getApplicationDeliveryControllers", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool for 24 hour time span starting at a given date/time. To get the private data set for all servers on a Bandwidth Pool from midnight Feb 1st, 2008 to 23:59 on Feb 1st, you would pass a parameter of '02/01/2008 0:00'. The ending date / time is calculated for you to prevent requesting data from the server for periods larger than 24 hours as this method requires processing a lot of data records and can get slow at times. +func (r Network_Bandwidth_Version1_Allotment) GetBackendBandwidthByHour(date *datatypes.Time) (resp []datatypes.Container_Network_Bandwidth_Version1_Usage, err error) { + params := []interface{}{ + date, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBackendBandwidthByHour", params, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool between the given start and end dates to retrieve public bandwidth data. +func (r Network_Bandwidth_Version1_Allotment) GetBackendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBackendBandwidthUse", params, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment type of this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetBandwidthAllotmentType() (resp datatypes.Network_Bandwidth_Version1_Allotment_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBandwidthAllotmentType", nil, &r.Options, &resp) + return +} + +// Retrieve a collection of bandwidth data from an individual public or private network tracking object. Data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Network_Bandwidth_Version1_Allotment) GetBandwidthForDateRange(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBandwidthForDateRange", params, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool for a given snapshot range, gathers the necessary parameters, and then calls the bandwidth graphing server. The return result is a container that includes the min and max dates for all servers to be used in the query, as well as an image in PNG format. This method uses the new and improved drawing routines which should return in a reasonable time frame now that the new backend data warehouse is used. +func (r Network_Bandwidth_Version1_Allotment) GetBandwidthImage(networkType *string, snapshotRange *string, draw *bool, dateSpecified *datatypes.Time, dateSpecifiedEnd *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + networkType, + snapshotRange, + draw, + dateSpecified, + dateSpecifiedEnd, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve The bare metal server instances contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's raw bandwidth usage data for an account's current billing cycle. One object is returned for each network this server is attached to. +func (r Network_Bandwidth_Version1_Allotment) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's raw private network bandwidth usage data for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's raw public network bandwidth usage data for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public bandwidth used in this virtual rack for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetBillingCyclePublicUsageTotal() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingCyclePublicUsageTotal", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's billing item. +func (r Network_Bandwidth_Version1_Allotment) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve An object that provides commonly used bandwidth summary components for the current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetCurrentBandwidthSummary() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getCurrentBandwidthSummary", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Network_Bandwidth_Version1_Allotment) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment detail records associated with this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getDetails", nil, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool for 24 hour time span starting at a given date/time. To get the public data set for all servers on a Bandwidth Pool from midnight Feb 1st, 2008 to 23:59 on Feb 1st, you would pass a parameter of '02/01/2008 0:00'. The ending date / time is calculated for you to prevent requesting data from the server for periods larger than 24 hours as this method requires processing a lot of data records and can get slow at times. +func (r Network_Bandwidth_Version1_Allotment) GetFrontendBandwidthByHour(date *datatypes.Time) (resp []datatypes.Container_Network_Bandwidth_Version1_Usage, err error) { + params := []interface{}{ + date, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getFrontendBandwidthByHour", params, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool between the given start and end dates to retrieve private bandwidth data. +func (r Network_Bandwidth_Version1_Allotment) GetFrontendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getFrontendBandwidthUse", params, &r.Options, &resp) + return +} + +// Retrieve The hardware contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth used in this virtual rack for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The location group associated with this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetLocationGroup() (resp datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getLocationGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The managed bare metal server instances contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetManagedBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getManagedBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve The managed hardware contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetManagedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getManagedHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The managed Virtual Server contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetManagedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getManagedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's metric tracking object. This object records all periodic polled data available to this rack. +func (r Network_Bandwidth_Version1_Allotment) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_VirtualDedicatedRack, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object id for this allotment. +func (r Network_Bandwidth_Version1_Allotment) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Bandwidth_Version1_Allotment object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware service. You can only retrieve an allotment associated with the account that your portal user is assigned to. +func (r Network_Bandwidth_Version1_Allotment) GetObject() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth used in this virtual rack for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this bandwidth pool for the current billing cycle exceeds the allocation. +func (r Network_Bandwidth_Version1_Allotment) GetOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The private network only hardware contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetPrivateNetworkOnlyHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getPrivateNetworkOnlyHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this bandwidth pool for the current billing cycle is projected to exceed the allocation. +func (r Network_Bandwidth_Version1_Allotment) GetProjectedOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getProjectedOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for this virtual server for the current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Bandwidth_Version1_Allotment) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve The combined allocated bandwidth for all servers in a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetTotalBandwidthAllocated() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getTotalBandwidthAllocated", nil, &r.Options, &resp) + return +} + +// Gets the monthly recurring fee of a pooled server. +func (r Network_Bandwidth_Version1_Allotment) GetVdrMemberRecurringFee() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getVdrMemberRecurringFee", nil, &r.Options, &resp) + return +} + +// Retrieve The Virtual Server contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// This method will reassign a collection of SoftLayer hardware to a bandwidth allotment Bandwidth Pool. +func (r Network_Bandwidth_Version1_Allotment) ReassignServers(templateObjects []datatypes.Hardware, newAllotmentId *int) (resp bool, err error) { + params := []interface{}{ + templateObjects, + newAllotmentId, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "reassignServers", params, &r.Options, &resp) + return +} + +// This will remove a bandwidth pooling from a customer's allotments by cancelling the billing item. All servers in that allotment will get moved to the account's vpr. +func (r Network_Bandwidth_Version1_Allotment) RequestVdrCancellation() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "requestVdrCancellation", nil, &r.Options, &resp) + return +} + +// This will move servers into a bandwidth pool, removing them from their previous bandwidth pool and optionally remove the bandwidth pool on completion. +func (r Network_Bandwidth_Version1_Allotment) RequestVdrContentUpdates(hardwareToAdd []datatypes.Hardware, hardwareToRemove []datatypes.Hardware, cloudsToAdd []datatypes.Virtual_Guest, cloudsToRemove []datatypes.Virtual_Guest, optionalAllotmentId *int, adcToAdd []datatypes.Network_Application_Delivery_Controller, adcToRemove []datatypes.Network_Application_Delivery_Controller) (resp bool, err error) { + params := []interface{}{ + hardwareToAdd, + hardwareToRemove, + cloudsToAdd, + cloudsToRemove, + optionalAllotmentId, + adcToAdd, + adcToRemove, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "requestVdrContentUpdates", params, &r.Options, &resp) + return +} + +// This will update the bandwidth pool to the servers provided. Servers currently in the bandwidth pool not provided on update will be removed. Servers provided on update not currently in the bandwidth pool will be added. If all servers are removed, this removes the bandwidth pool on completion. +func (r Network_Bandwidth_Version1_Allotment) SetVdrContent(hardware []datatypes.Hardware, bareMetalServers []datatypes.Hardware, virtualServerInstance []datatypes.Virtual_Guest, adc []datatypes.Network_Application_Delivery_Controller, optionalAllotmentId *int) (resp bool, err error) { + params := []interface{}{ + hardware, + bareMetalServers, + virtualServerInstance, + adc, + optionalAllotmentId, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "setVdrContent", params, &r.Options, &resp) + return +} + +// This method will reassign a collection of SoftLayer hardware to the virtual private rack +func (r Network_Bandwidth_Version1_Allotment) UnassignServers(templateObjects []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "unassignServers", params, &r.Options, &resp) + return +} + +// This method will void a pending server removal from this bandwidth pooling. Pass in the id of the hardware object or virtual guest you wish to update. Assuming that object is currently pending removal from the bandwidth pool at the start of the next billing cycle, the bandwidth pool member status will be restored and the pending cancellation removed. +func (r Network_Bandwidth_Version1_Allotment) VoidPendingServerMove(id *int, typ *string) (resp bool, err error) { + params := []interface{}{ + id, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "voidPendingServerMove", params, &r.Options, &resp) + return +} + +// This method will void a pending cancellation on a bandwidth pool. Note however any servers that belonged to the rack will have to be restored individually using the method voidPendingServerMove($id, $type). +func (r Network_Bandwidth_Version1_Allotment) VoidPendingVdrCancellation() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "voidPendingVdrCancellation", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_CdnMarketplace_Account data type models an individual CDN account. CDN accounts contain the SoftLayer account ID of the customer, the vendor ID the account belongs to, the customer ID provided by the vendor, and a CDN account's status. +type Network_CdnMarketplace_Account struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCdnMarketplaceAccountService returns an instance of the Network_CdnMarketplace_Account SoftLayer service +func GetNetworkCdnMarketplaceAccountService(sess *session.Session) Network_CdnMarketplace_Account { + return Network_CdnMarketplace_Account{Session: sess} +} + +func (r Network_CdnMarketplace_Account) Id(id int) Network_CdnMarketplace_Account { + r.Options.Id = &id + return r +} + +func (r Network_CdnMarketplace_Account) Mask(mask string) Network_CdnMarketplace_Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_CdnMarketplace_Account) Filter(filter string) Network_CdnMarketplace_Account { + r.Options.Filter = filter + return r +} + +func (r Network_CdnMarketplace_Account) Limit(limit int) Network_CdnMarketplace_Account { + r.Options.Limit = &limit + return r +} + +func (r Network_CdnMarketplace_Account) Offset(offset int) Network_CdnMarketplace_Account { + r.Options.Offset = &offset + return r +} + +// Retrieve SoftLayer account to which the CDN account belongs. +func (r Network_CdnMarketplace_Account) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Account", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve An associated parent billing item which is active. +func (r Network_CdnMarketplace_Account) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Account", "getBillingItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Account) GetObject() (resp datatypes.Network_CdnMarketplace_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Account", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Account) VerifyCdnAccountExists(vendorName *string) (resp bool, err error) { + params := []interface{}{ + vendorName, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Account", "verifyCdnAccountExists", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_CdnMarketplace_Configuration_Behavior_Geoblocking struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCdnMarketplaceConfigurationBehaviorGeoblockingService returns an instance of the Network_CdnMarketplace_Configuration_Behavior_Geoblocking SoftLayer service +func GetNetworkCdnMarketplaceConfigurationBehaviorGeoblockingService(sess *session.Session) Network_CdnMarketplace_Configuration_Behavior_Geoblocking { + return Network_CdnMarketplace_Configuration_Behavior_Geoblocking{Session: sess} +} + +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) Id(id int) Network_CdnMarketplace_Configuration_Behavior_Geoblocking { + r.Options.Id = &id + return r +} + +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) Mask(mask string) Network_CdnMarketplace_Configuration_Behavior_Geoblocking { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) Filter(filter string) Network_CdnMarketplace_Configuration_Behavior_Geoblocking { + r.Options.Filter = filter + return r +} + +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) Limit(limit int) Network_CdnMarketplace_Configuration_Behavior_Geoblocking { + r.Options.Limit = &limit + return r +} + +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) Offset(offset int) Network_CdnMarketplace_Configuration_Behavior_Geoblocking { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) CreateGeoblocking(input *datatypes.Container_Network_CdnMarketplace_Configuration_Input) (resp datatypes.Network_CdnMarketplace_Configuration_Behavior_Geoblocking, err error) { + params := []interface{}{ + input, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Behavior_Geoblocking", "createGeoblocking", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) DeleteGeoblocking(input *datatypes.Container_Network_CdnMarketplace_Configuration_Input) (resp datatypes.Network_CdnMarketplace_Utils_Response, err error) { + params := []interface{}{ + input, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Behavior_Geoblocking", "deleteGeoblocking", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) GetGeoblocking(input *datatypes.Container_Network_CdnMarketplace_Configuration_Input) (resp datatypes.Network_CdnMarketplace_Configuration_Behavior_Geoblocking, err error) { + params := []interface{}{ + input, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Behavior_Geoblocking", "getGeoblocking", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) GetGeoblockingAllowedTypesAndRegions(uniqueId *string) (resp datatypes.Network_CdnMarketplace_Configuration_Behavior_Geoblocking_Type, err error) { + params := []interface{}{ + uniqueId, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Behavior_Geoblocking", "getGeoblockingAllowedTypesAndRegions", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) GetObject() (resp datatypes.Network_CdnMarketplace_Configuration_Behavior_Geoblocking, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Behavior_Geoblocking", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Behavior_Geoblocking) UpdateGeoblocking(input *datatypes.Container_Network_CdnMarketplace_Configuration_Input) (resp datatypes.Network_CdnMarketplace_Configuration_Behavior_Geoblocking, err error) { + params := []interface{}{ + input, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Behavior_Geoblocking", "updateGeoblocking", params, &r.Options, &resp) + return +} + +// This data type models a purge event that occurs in caching server. It contains a reference to a mapping configuration, the path to execute the purge on, the status of the purge, and flag that enables saving the purge information for future use. +type Network_CdnMarketplace_Configuration_Cache_Purge struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCdnMarketplaceConfigurationCachePurgeService returns an instance of the Network_CdnMarketplace_Configuration_Cache_Purge SoftLayer service +func GetNetworkCdnMarketplaceConfigurationCachePurgeService(sess *session.Session) Network_CdnMarketplace_Configuration_Cache_Purge { + return Network_CdnMarketplace_Configuration_Cache_Purge{Session: sess} +} + +func (r Network_CdnMarketplace_Configuration_Cache_Purge) Id(id int) Network_CdnMarketplace_Configuration_Cache_Purge { + r.Options.Id = &id + return r +} + +func (r Network_CdnMarketplace_Configuration_Cache_Purge) Mask(mask string) Network_CdnMarketplace_Configuration_Cache_Purge { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_CdnMarketplace_Configuration_Cache_Purge) Filter(filter string) Network_CdnMarketplace_Configuration_Cache_Purge { + r.Options.Filter = filter + return r +} + +func (r Network_CdnMarketplace_Configuration_Cache_Purge) Limit(limit int) Network_CdnMarketplace_Configuration_Cache_Purge { + r.Options.Limit = &limit + return r +} + +func (r Network_CdnMarketplace_Configuration_Cache_Purge) Offset(offset int) Network_CdnMarketplace_Configuration_Cache_Purge { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_Purge) CreatePurge(uniqueId *string, path *string) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Cache_Purge, err error) { + params := []interface{}{ + uniqueId, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_Purge", "createPurge", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_Purge) GetObject() (resp datatypes.Network_CdnMarketplace_Configuration_Cache_Purge, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_Purge", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_Purge) GetPurgeHistoryPerMapping(uniqueId *string, saved *int) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Cache_Purge, err error) { + params := []interface{}{ + uniqueId, + saved, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_Purge", "getPurgeHistoryPerMapping", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_Purge) GetPurgeStatus(uniqueId *string, path *string) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Cache_Purge, err error) { + params := []interface{}{ + uniqueId, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_Purge", "getPurgeStatus", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_Purge) SaveOrUnsavePurgePath(uniqueId *string, path *string, saveOrUnsave *int) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Cache_Purge, err error) { + params := []interface{}{ + uniqueId, + path, + saveOrUnsave, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_Purge", "saveOrUnsavePurgePath", params, &r.Options, &resp) + return +} + +// This data type models a purge event that occurs repetitively and automatically in caching server after a set interval of time. A time to live instance contains a reference to a mapping configuration, the path to execute the purge on, the result of the purge, and the time interval after which the purge will be executed. +type Network_CdnMarketplace_Configuration_Cache_TimeToLive struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCdnMarketplaceConfigurationCacheTimeToLiveService returns an instance of the Network_CdnMarketplace_Configuration_Cache_TimeToLive SoftLayer service +func GetNetworkCdnMarketplaceConfigurationCacheTimeToLiveService(sess *session.Session) Network_CdnMarketplace_Configuration_Cache_TimeToLive { + return Network_CdnMarketplace_Configuration_Cache_TimeToLive{Session: sess} +} + +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) Id(id int) Network_CdnMarketplace_Configuration_Cache_TimeToLive { + r.Options.Id = &id + return r +} + +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) Mask(mask string) Network_CdnMarketplace_Configuration_Cache_TimeToLive { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) Filter(filter string) Network_CdnMarketplace_Configuration_Cache_TimeToLive { + r.Options.Filter = filter + return r +} + +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) Limit(limit int) Network_CdnMarketplace_Configuration_Cache_TimeToLive { + r.Options.Limit = &limit + return r +} + +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) Offset(offset int) Network_CdnMarketplace_Configuration_Cache_TimeToLive { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) CreateTimeToLive(uniqueId *string, pathName *string, ttl *string) (resp string, err error) { + params := []interface{}{ + uniqueId, + pathName, + ttl, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_TimeToLive", "createTimeToLive", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) DeleteTimeToLive(uniqueId *string, pathName *string) (resp string, err error) { + params := []interface{}{ + uniqueId, + pathName, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_TimeToLive", "deleteTimeToLive", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) GetObject() (resp datatypes.Network_CdnMarketplace_Configuration_Cache_TimeToLive, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_TimeToLive", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) ListTimeToLive(uniqueId *string) (resp []datatypes.Network_CdnMarketplace_Configuration_Cache_TimeToLive, err error) { + params := []interface{}{ + uniqueId, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_TimeToLive", "listTimeToLive", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Cache_TimeToLive) UpdateTimeToLive(uniqueId *string, oldPath *string, newPath *string, oldTtl *string, newTtl *string) (resp string, err error) { + params := []interface{}{ + uniqueId, + oldPath, + newPath, + oldTtl, + newTtl, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Cache_TimeToLive", "updateTimeToLive", params, &r.Options, &resp) + return +} + +// This data type represents the mapping Configuration settings for enabling CDN services. Each instance contains a reference to a CDN account, and CDN configuration properties such as a domain, an origin host and its port, a cname we generate, a cname the vendor generates, and a status. Other properties include the type of content to be cached (static or dynamic), the origin type (a host server or an object storage account), and the protocol to be used for caching. +type Network_CdnMarketplace_Configuration_Mapping struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCdnMarketplaceConfigurationMappingService returns an instance of the Network_CdnMarketplace_Configuration_Mapping SoftLayer service +func GetNetworkCdnMarketplaceConfigurationMappingService(sess *session.Session) Network_CdnMarketplace_Configuration_Mapping { + return Network_CdnMarketplace_Configuration_Mapping{Session: sess} +} + +func (r Network_CdnMarketplace_Configuration_Mapping) Id(id int) Network_CdnMarketplace_Configuration_Mapping { + r.Options.Id = &id + return r +} + +func (r Network_CdnMarketplace_Configuration_Mapping) Mask(mask string) Network_CdnMarketplace_Configuration_Mapping { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_CdnMarketplace_Configuration_Mapping) Filter(filter string) Network_CdnMarketplace_Configuration_Mapping { + r.Options.Filter = filter + return r +} + +func (r Network_CdnMarketplace_Configuration_Mapping) Limit(limit int) Network_CdnMarketplace_Configuration_Mapping { + r.Options.Limit = &limit + return r +} + +func (r Network_CdnMarketplace_Configuration_Mapping) Offset(offset int) Network_CdnMarketplace_Configuration_Mapping { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping) CreateDomainMapping(input *datatypes.Container_Network_CdnMarketplace_Configuration_Input) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping, err error) { + params := []interface{}{ + input, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "createDomainMapping", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping) DeleteDomainMapping(uniqueId *string) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping, err error) { + params := []interface{}{ + uniqueId, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "deleteDomainMapping", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping) GetObject() (resp datatypes.Network_CdnMarketplace_Configuration_Mapping, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping) ListDomainMappingByUniqueId(uniqueId *string) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping, err error) { + params := []interface{}{ + uniqueId, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "listDomainMappingByUniqueId", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping) ListDomainMappings() (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "listDomainMappings", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping) RetryHttpsActionRequest(uniqueId *string) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping, err error) { + params := []interface{}{ + uniqueId, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "retryHttpsActionRequest", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping) StartDomainMapping(uniqueId *string) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping, err error) { + params := []interface{}{ + uniqueId, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "startDomainMapping", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping) StopDomainMapping(uniqueId *string) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping, err error) { + params := []interface{}{ + uniqueId, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "stopDomainMapping", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping) UpdateDomainMapping(input *datatypes.Container_Network_CdnMarketplace_Configuration_Input) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping, err error) { + params := []interface{}{ + input, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "updateDomainMapping", params, &r.Options, &resp) + return +} + +// Verifies the CNAME is Unique in the domain. The method will return true if CNAME is unique else returns false +func (r Network_CdnMarketplace_Configuration_Mapping) VerifyCname(cname *string) (resp bool, err error) { + params := []interface{}{ + cname, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "verifyCname", params, &r.Options, &resp) + return +} + +// Verifies the status of the domain mapping by calling the rest api; will update the status, cname, and vendorCName if necessary and will return the updated values. +func (r Network_CdnMarketplace_Configuration_Mapping) VerifyDomainMapping(uniqueId *int) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping, err error) { + params := []interface{}{ + uniqueId, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping", "verifyDomainMapping", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_CdnMarketplace_Configuration_Mapping_Path struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCdnMarketplaceConfigurationMappingPathService returns an instance of the Network_CdnMarketplace_Configuration_Mapping_Path SoftLayer service +func GetNetworkCdnMarketplaceConfigurationMappingPathService(sess *session.Session) Network_CdnMarketplace_Configuration_Mapping_Path { + return Network_CdnMarketplace_Configuration_Mapping_Path{Session: sess} +} + +func (r Network_CdnMarketplace_Configuration_Mapping_Path) Id(id int) Network_CdnMarketplace_Configuration_Mapping_Path { + r.Options.Id = &id + return r +} + +func (r Network_CdnMarketplace_Configuration_Mapping_Path) Mask(mask string) Network_CdnMarketplace_Configuration_Mapping_Path { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_CdnMarketplace_Configuration_Mapping_Path) Filter(filter string) Network_CdnMarketplace_Configuration_Mapping_Path { + r.Options.Filter = filter + return r +} + +func (r Network_CdnMarketplace_Configuration_Mapping_Path) Limit(limit int) Network_CdnMarketplace_Configuration_Mapping_Path { + r.Options.Limit = &limit + return r +} + +func (r Network_CdnMarketplace_Configuration_Mapping_Path) Offset(offset int) Network_CdnMarketplace_Configuration_Mapping_Path { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping_Path) CreateOriginPath(input *datatypes.Container_Network_CdnMarketplace_Configuration_Input) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping_Path, err error) { + params := []interface{}{ + input, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping_Path", "createOriginPath", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping_Path) DeleteOriginPath(uniqueId *string, path *string) (resp string, err error) { + params := []interface{}{ + uniqueId, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping_Path", "deleteOriginPath", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping_Path) GetObject() (resp datatypes.Network_CdnMarketplace_Configuration_Mapping_Path, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping_Path", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping_Path) ListOriginPath(uniqueId *string) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping_Path, err error) { + params := []interface{}{ + uniqueId, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping_Path", "listOriginPath", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Configuration_Mapping_Path) UpdateOriginPath(input *datatypes.Container_Network_CdnMarketplace_Configuration_Input) (resp []datatypes.Container_Network_CdnMarketplace_Configuration_Mapping_Path, err error) { + params := []interface{}{ + input, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Configuration_Mapping_Path", "updateOriginPath", params, &r.Options, &resp) + return +} + +// This Metrics class provides methods to get CDN metrics based on account or mapping unique id. +type Network_CdnMarketplace_Metrics struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCdnMarketplaceMetricsService returns an instance of the Network_CdnMarketplace_Metrics SoftLayer service +func GetNetworkCdnMarketplaceMetricsService(sess *session.Session) Network_CdnMarketplace_Metrics { + return Network_CdnMarketplace_Metrics{Session: sess} +} + +func (r Network_CdnMarketplace_Metrics) Id(id int) Network_CdnMarketplace_Metrics { + r.Options.Id = &id + return r +} + +func (r Network_CdnMarketplace_Metrics) Mask(mask string) Network_CdnMarketplace_Metrics { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_CdnMarketplace_Metrics) Filter(filter string) Network_CdnMarketplace_Metrics { + r.Options.Filter = filter + return r +} + +func (r Network_CdnMarketplace_Metrics) Limit(limit int) Network_CdnMarketplace_Metrics { + r.Options.Limit = &limit + return r +} + +func (r Network_CdnMarketplace_Metrics) Offset(offset int) Network_CdnMarketplace_Metrics { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_CdnMarketplace_Metrics) GetCustomerUsageMetrics(vendorName *string, startDate *int, endDate *int, frequency *string) (resp []datatypes.Container_Network_CdnMarketplace_Metrics, err error) { + params := []interface{}{ + vendorName, + startDate, + endDate, + frequency, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Metrics", "getCustomerUsageMetrics", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Metrics) GetMappingBandwidthByRegionMetrics(mappingUniqueId *string, startDate *int, endDate *int, frequency *string) (resp []datatypes.Container_Network_CdnMarketplace_Metrics, err error) { + params := []interface{}{ + mappingUniqueId, + startDate, + endDate, + frequency, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Metrics", "getMappingBandwidthByRegionMetrics", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Metrics) GetMappingBandwidthMetrics(mappingUniqueId *string, startDate *int, endDate *int, frequency *string) (resp []datatypes.Container_Network_CdnMarketplace_Metrics, err error) { + params := []interface{}{ + mappingUniqueId, + startDate, + endDate, + frequency, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Metrics", "getMappingBandwidthMetrics", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Metrics) GetMappingHitsByTypeMetrics(mappingUniqueId *string, startDate *int, endDate *int, frequency *string) (resp []datatypes.Container_Network_CdnMarketplace_Metrics, err error) { + params := []interface{}{ + mappingUniqueId, + startDate, + endDate, + frequency, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Metrics", "getMappingHitsByTypeMetrics", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Metrics) GetMappingHitsMetrics(mappingUniqueId *string, startDate *int, endDate *int, frequency *string) (resp []datatypes.Container_Network_CdnMarketplace_Metrics, err error) { + params := []interface{}{ + mappingUniqueId, + startDate, + endDate, + frequency, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Metrics", "getMappingHitsMetrics", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Metrics) GetMappingUsageMetrics(mappingUniqueId *string, startDate *int, endDate *int, frequency *string) (resp []datatypes.Container_Network_CdnMarketplace_Metrics, err error) { + params := []interface{}{ + mappingUniqueId, + startDate, + endDate, + frequency, + } + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Metrics", "getMappingUsageMetrics", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_CdnMarketplace_Vendor contains information regarding
 a CDN Vendor. This class is associated with
 SoftLayer_Network_CdnMarketplace_Vendor_Attribute class. +type Network_CdnMarketplace_Vendor struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCdnMarketplaceVendorService returns an instance of the Network_CdnMarketplace_Vendor SoftLayer service +func GetNetworkCdnMarketplaceVendorService(sess *session.Session) Network_CdnMarketplace_Vendor { + return Network_CdnMarketplace_Vendor{Session: sess} +} + +func (r Network_CdnMarketplace_Vendor) Id(id int) Network_CdnMarketplace_Vendor { + r.Options.Id = &id + return r +} + +func (r Network_CdnMarketplace_Vendor) Mask(mask string) Network_CdnMarketplace_Vendor { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_CdnMarketplace_Vendor) Filter(filter string) Network_CdnMarketplace_Vendor { + r.Options.Filter = filter + return r +} + +func (r Network_CdnMarketplace_Vendor) Limit(limit int) Network_CdnMarketplace_Vendor { + r.Options.Limit = &limit + return r +} + +func (r Network_CdnMarketplace_Vendor) Offset(offset int) Network_CdnMarketplace_Vendor { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_CdnMarketplace_Vendor) GetObject() (resp datatypes.Network_CdnMarketplace_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Vendor", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_CdnMarketplace_Vendor) ListVendors() (resp []datatypes.Container_Network_CdnMarketplace_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_Network_CdnMarketplace_Vendor", "listVendors", nil, &r.Options, &resp) + return +} + +// Every piece of hardware running in SoftLayer's datacenters connected to the public, private, or management networks (where applicable) have a corresponding network component. These network components are modeled by the SoftLayer_Network_Component data type. These data types reflect the servers' local ethernet and remote management interfaces. +type Network_Component struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkComponentService returns an instance of the Network_Component SoftLayer service +func GetNetworkComponentService(sess *session.Session) Network_Component { + return Network_Component{Session: sess} +} + +func (r Network_Component) Id(id int) Network_Component { + r.Options.Id = &id + return r +} + +func (r Network_Component) Mask(mask string) Network_Component { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Component) Filter(filter string) Network_Component { + r.Options.Filter = filter + return r +} + +func (r Network_Component) Limit(limit int) Network_Component { + r.Options.Limit = &limit + return r +} + +func (r Network_Component) Offset(offset int) Network_Component { + r.Options.Offset = &offset + return r +} + +// Add VLANs as trunks to a network component. The VLANs given must be assigned to your account, and on the router to which this network component is connected. The current native VLAN (networkVlanId/networkVlan) cannot be added as a trunk. This method should be called on a network component attached directly to customer assigned hardware, though all trunking operations will occur on the uplinkComponent. A current list of VLAN trunks for a network component on a customer server can be found at 'uplinkComponent->networkVlanTrunks'. +// +// This method returns an array of SoftLayer_Network_Vlans which were added as trunks. Any requested trunks which are already trunked will be silently ignored, and will not be returned. +// +// Configuration of network hardware is done asynchronously, do not depend on the return of this call as an indication that the newly trunked VLANs will be accessible. +func (r Network_Component) AddNetworkVlanTrunks(networkVlans []datatypes.Network_Vlan) (resp []datatypes.Network_Vlan, err error) { + params := []interface{}{ + networkVlans, + } + err = r.Session.DoRequest("SoftLayer_Network_Component", "addNetworkVlanTrunks", params, &r.Options, &resp) + return +} + +// This method will remove all VLANs trunked to this network component. The native VLAN (networkVlanId/networkVlan) will remain active, and cannot be removed via the API. Returns a list of SoftLayer_Network_Vlan objects for which the trunks were removed. +func (r Network_Component) ClearNetworkVlanTrunks() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "clearNetworkVlanTrunks", nil, &r.Options, &resp) + return +} + +// Retrieve Reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) command currently executing by the server's remote management card. +func (r Network_Component) GetActiveCommand() (resp datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getActiveCommand", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Network_Component) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Network_Component", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The network component linking this object to a child device +func (r Network_Component) GetDownlinkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getDownlinkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The duplex mode of a network component. +func (r Network_Component) GetDuplexMode() (resp datatypes.Network_Component_Duplex_Mode, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getDuplexMode", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware that a network component resides in. +func (r Network_Component) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Component) GetHighAvailabilityFirewallFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getHighAvailabilityFirewallFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware switch's interface to the bandwidth pod. +func (r Network_Component) GetInterface() (resp datatypes.Network_Bandwidth_Version1_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getInterface", nil, &r.Options, &resp) + return +} + +// Retrieve The records of all IP addresses bound to a network component. +func (r Network_Component) GetIpAddressBindings() (resp []datatypes.Network_Component_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getIpAddressBindings", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Component) GetIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve Last reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) command issued to the server's remote management card. +func (r Network_Component) GetLastCommand() (resp datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getLastCommand", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object for this network component. +func (r Network_Component) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The upstream network component firewall. +func (r Network_Component) GetNetworkComponentFirewall() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkComponentFirewall", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's associated group. +func (r Network_Component) GetNetworkComponentGroup() (resp datatypes.Network_Component_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkComponentGroup", nil, &r.Options, &resp) + return +} + +// Retrieve All network devices in SoftLayer's network hierarchy that this device is connected to. +func (r Network_Component) GetNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The VLAN that a network component's subnet is associated with. +func (r Network_Component) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The VLANs that are trunked to this network component. +func (r Network_Component) GetNetworkVlanTrunks() (resp []datatypes.Network_Component_Network_Vlan_Trunk, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkVlanTrunks", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Component) GetObject() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getObject", nil, &r.Options, &resp) + return +} + +// +// **DEPRECATED - This operation will cease to function after April 4th, 2016 and will be removed from v3.2** +// Retrieve various network statistics. The network statistics are retrieved from the network device using snmpget. Below is a list of statistics retrieved: +// * Administrative Status +// * Operational Status +// * Maximum Transmission Unit +// * In Octets +// * Out Octets +// * In Unicast Packets +// * Out Unicast Packets +// * In Multicast Packets +// * Out Multicast Packets +func (r Network_Component) GetPortStatistics() (resp datatypes.Container_Network_Port_Statistic, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getPortStatistics", nil, &r.Options, &resp) + return +} + +// Retrieve The primary IPv4 Address record for a network component. +func (r Network_Component) GetPrimaryIpAddressRecord() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getPrimaryIpAddressRecord", nil, &r.Options, &resp) + return +} + +// Retrieve The subnet of the primary IP address assigned to this network component. +func (r Network_Component) GetPrimarySubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getPrimarySubnet", nil, &r.Options, &resp) + return +} + +// Retrieve The primary IPv6 Address record for a network component. +func (r Network_Component) GetPrimaryVersion6IpAddressRecord() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getPrimaryVersion6IpAddressRecord", nil, &r.Options, &resp) + return +} + +// Retrieve The last five reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) commands issued to the server's remote management card. +func (r Network_Component) GetRecentCommands() (resp []datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRecentCommands", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether the network component is participating in a group of two or more components capable of being operationally redundant, if enabled. +func (r Network_Component) GetRedundancyCapableFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRedundancyCapableFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether the network component is participating in a group of two or more components which is actively providing link redundancy. +func (r Network_Component) GetRedundancyEnabledFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRedundancyEnabledFlag", nil, &r.Options, &resp) + return +} + +// Retrieve User(s) credentials to issue commands and/or interact with the server's remote management card. +func (r Network_Component) GetRemoteManagementUsers() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRemoteManagementUsers", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's routers. +func (r Network_Component) GetRouter() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRouter", nil, &r.Options, &resp) + return +} + +// Retrieve Whether a network component's primary ip address is from a storage network subnet or not. +func (r Network_Component) GetStorageNetworkFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getStorageNetworkFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's subnets. A subnet is a group of IP addresses +func (r Network_Component) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The network component linking this object to parent +func (r Network_Component) GetUplinkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getUplinkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The duplex mode of the uplink network component linking to this object +func (r Network_Component) GetUplinkDuplexMode() (resp datatypes.Network_Component_Duplex_Mode, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getUplinkDuplexMode", nil, &r.Options, &resp) + return +} + +// Remove one or more VLANs currently attached to the uplinkComponent of this networkComponent. The VLANs given must be assigned to your account, and on the router the network component is connected to. If any VLANs not currently trunked are given, they will be silently ignored. +// +// This method should be called on a network component attached directly to customer assigned hardware, though all trunking operations will occur on the uplinkComponent. A current list of VLAN trunks for a network component on a customer server can be found at 'uplinkComponent->networkVlanTrunks'. +// +// Configuration of network hardware is done asynchronously, do not depend on the return of this call as an indication that the removed VLANs will be inaccessible. +func (r Network_Component) RemoveNetworkVlanTrunks(networkVlans []datatypes.Network_Vlan) (resp []datatypes.Network_Vlan, err error) { + params := []interface{}{ + networkVlans, + } + err = r.Session.DoRequest("SoftLayer_Network_Component", "removeNetworkVlanTrunks", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Component_Firewall data type contains general information relating to a single SoftLayer network component firewall. This is the object which ties the running rules to a specific downstream server. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Component_Firewall struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkComponentFirewallService returns an instance of the Network_Component_Firewall SoftLayer service +func GetNetworkComponentFirewallService(sess *session.Session) Network_Component_Firewall { + return Network_Component_Firewall{Session: sess} +} + +func (r Network_Component_Firewall) Id(id int) Network_Component_Firewall { + r.Options.Id = &id + return r +} + +func (r Network_Component_Firewall) Mask(mask string) Network_Component_Firewall { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Component_Firewall) Filter(filter string) Network_Component_Firewall { + r.Options.Filter = filter + return r +} + +func (r Network_Component_Firewall) Limit(limit int) Network_Component_Firewall { + r.Options.Limit = &limit + return r +} + +func (r Network_Component_Firewall) Offset(offset int) Network_Component_Firewall { + r.Options.Offset = &offset + return r +} + +// Retrieve The additional subnets linked to this network component firewall, that inherit rules from the host that the context slot is attached to. +func (r Network_Component_Firewall) GetApplyServerRuleSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getApplyServerRuleSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a Hardware Firewall (Dedicated). +func (r Network_Component_Firewall) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The network component of the guest virtual server that this network component firewall belongs to. +func (r Network_Component_Firewall) GetGuestNetworkComponent() (resp datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getGuestNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The network component of the switch interface that this network component firewall belongs to. +func (r Network_Component_Firewall) GetNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The update requests made for this firewall. +func (r Network_Component_Firewall) GetNetworkFirewallUpdateRequest() (resp []datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getNetworkFirewallUpdateRequest", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_Module_Context_Interface_AccessControlList_Network_Component object. You can only get objects for servers attached to your account that have a network firewall enabled. +func (r Network_Component_Firewall) GetObject() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The currently running rule set of this network component firewall. +func (r Network_Component_Firewall) GetRules() (resp []datatypes.Network_Component_Firewall_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getRules", nil, &r.Options, &resp) + return +} + +// Retrieve The additional subnets linked to this network component firewall. +func (r Network_Component_Firewall) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getSubnets", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_ContentDelivery_Account data type models an individual CDN account. CDN accounts contain references to the SoftLayer customer account they belong to, login credentials for upload services, and a CDN account's status. Please contact SoftLayer sales to purchase or cancel a CDN account +type Network_ContentDelivery_Account struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkContentDeliveryAccountService returns an instance of the Network_ContentDelivery_Account SoftLayer service +func GetNetworkContentDeliveryAccountService(sess *session.Session) Network_ContentDelivery_Account { + return Network_ContentDelivery_Account{Session: sess} +} + +func (r Network_ContentDelivery_Account) Id(id int) Network_ContentDelivery_Account { + r.Options.Id = &id + return r +} + +func (r Network_ContentDelivery_Account) Mask(mask string) Network_ContentDelivery_Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_ContentDelivery_Account) Filter(filter string) Network_ContentDelivery_Account { + r.Options.Filter = filter + return r +} + +func (r Network_ContentDelivery_Account) Limit(limit int) Network_ContentDelivery_Account { + r.Options.Limit = &limit + return r +} + +func (r Network_ContentDelivery_Account) Offset(offset int) Network_ContentDelivery_Account { + r.Options.Offset = &offset + return r +} + +// Internap servers attempts to validate a token before serving a protected content. SoftLayer customer does not need to invoke this method. Please refer to [[SoftLayer_Network_ContentDelivery_Authentication_Token|Authentication Token]] object for more details on Content Authentication Service. +func (r Network_ContentDelivery_Account) AuthenticateResourceRequest(parameter *datatypes.Container_Network_ContentDelivery_Authentication_Parameter) (resp bool, err error) { + params := []interface{}{ + parameter, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "authenticateResourceRequest", params, &r.Options, &resp) + return +} + +// You can further organize your contents on the CDN FTP server by creating sub directories. This method creates a directory on the CDN FTP server. A user must have CDN_FILE_MANAGE privilege to use this method. A directory name must be an absolute path and you can only create sub directories in /media folder. +func (r Network_ContentDelivery_Account) CreateDirectory(directoryName *string) (resp bool, err error) { + params := []interface{}{ + directoryName, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createDirectory", params, &r.Options, &resp) + return +} + +// This method allows you to create a default CDN FTP user record on the ftp.cdnlayer.service.softlayer.com server. As with a CDN FTP user account, you can upload contents to the CDN host server through the SoftLayer private network. SoftLayer currently allows only one FTP user for each CDN account. Your default CDN FTP user record is created upon successful creation of a CDN account. You may not need to use this method at all. This is provided in support of the previous CDN customers. SoftLayer may offer multiple CDN FTP users for a single CDN account in the future. +// +// Optionally, you can provide a new password when invoking this method and a new password must follow the rules below: +// * ...must be between 8 and 20 characters long +// * ...must be an alphanumeric value +// * ...can contain these characters: - _ ! % # $ ^ & * +func (r Network_ContentDelivery_Account) CreateFtpUser(newPassword *string) (resp bool, err error) { + params := []interface{}{ + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createFtpUser", params, &r.Options, &resp) + return +} + +// With Origin Pull, content is pulled from your origin server as needed and then delivered to visitors. You do not need to upload your files to the CDN FTP: you can utilize the files that currently exist on your origin server. It will take 10 to 15 minutes for this to take effect after you create an Origin Pull rule. Origin Pull is only supported for HTTP protocol and you would continue to use the CDN FTP for Flash and Windows Media streaming services. +// +// A valid origin host can include a directory information. You may include an authentication username and password along with an origin host. If you set an authentication username and password, CDN servers will include "Authorization:" header in every request. You may use the "Authorization:" header to grant access to CDN servers or you may use it to distinguish CDN servers from normal visitors. Here is a list of valid origin hosts: +// * www.website.com +// * www.website.com/cdn_content +// * cdn_user:password@www.website.com +// * cdn_user:password@www.website.com/images +// +// +// An authentication username should be an alphanumeric string and allowed special characters are . - _
    An authentication password should be an alphanumeric string and allowed special characters are . - _ ! # $ % ^ & *
    Both username and password must be between 3 to 10 characters long. +// +// CDN nodes will cache your contents and you can control cache lifetime by modifying your web server's configuration. This method also creates a FTP directory restriction upon successful Origin Pull set up. You will not be able to access /media/http directory since contents will be pulled from your origin server. An origin domain must be a valid domain name and it can contain path information. This can help you organize contents on your origin server. For example, you could set an origin domain as: mydomain.com/cdn_contents +// +// A CNAME record allows you to have a customized URL. You can get rid of your CDN account name from the URL. A valid CNAME for the Origin Pull method must point to .http.cdn.softlayer.net. +// +// There are 2 types of origin pull mappings. The one with a CNAME record or the one without a CNAME record and they work very differently. +// +// gzip is supported if your web server sends a proper gzip header. For more details, visit our [http://knowledgelayer.softlayer.com/topic/cdn KnowledgeLayer] +func (r Network_ContentDelivery_Account) CreateOriginPullMapping(mappingObject *datatypes.Container_Network_ContentDelivery_OriginPull_Mapping) (resp bool, err error) { + params := []interface{}{ + mappingObject, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createOriginPullMapping", params, &r.Options, &resp) + return +} + +// This method is deprecated, please use [[[[SoftLayer_Network_ContentDelivery_Account::createOriginPullMapping|createOriginPullMapping]] method instead. +func (r Network_ContentDelivery_Account) CreateOriginPullRule(originDomain *string, cnameRecord *string) (resp bool, err error) { + params := []interface{}{ + originDomain, + cnameRecord, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createOriginPullRule", params, &r.Options, &resp) + return +} + +// You need to specify a directory on your CDN FTP or on your origin host in which your secure content resides to enable the token authentication . It will take about about 30 minutes for a newly configured token authentication directory to take effect. +func (r Network_ContentDelivery_Account) CreateTokenAuthenticationDirectory(directory *string, mediaType *string) (resp bool, err error) { + params := []interface{}{ + directory, + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createTokenAuthenticationDirectory", params, &r.Options, &resp) + return +} + +// This method deletes your FTP user record on the ftp.cdnlayer.service.softlayer.com server. Refer to the service overview of [[SoftLayer_Network_ContentDelivery_Account::createFtpUser|createFtpUser]] method for more information on the CDN FTP server. +func (r Network_ContentDelivery_Account) DeleteFtpUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "deleteFtpUser", nil, &r.Options, &resp) + return +} + +// This method removes an Origin Pull domain rule. Once an Origin Pull rule is removed, you will be able to access the /media/http directory. It will take 10 to 15 minutes for this to take effect after you remove your Origin Pull rule. Cached contents on CDN POPs may live longer than 15 minutes. +func (r Network_ContentDelivery_Account) DeleteOriginPullRule(originMappingId *string) (resp bool, err error) { + params := []interface{}{ + originMappingId, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "deleteOriginPullRule", params, &r.Options, &resp) + return +} + +// This method disables CDN access log. +func (r Network_ContentDelivery_Account) DisableLogging() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "disableLogging", nil, &r.Options, &resp) + return +} + +// This method enables CDN access log. +func (r Network_ContentDelivery_Account) EnableLogging() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "enableLogging", nil, &r.Options, &resp) + return +} + +// Retrieve The customer account that a CDN account belongs to. +func (r Network_ContentDelivery_Account) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAccount", nil, &r.Options, &resp) + return +} + +// This method returns bandwidth data for each POP. [[SoftLayer_Container_Network_ContentDelivery_Bandwidth_PointsOfPresence_Summary|POP Bandwidth]] object contains a starting time, ending time, total bytes, POP name and bandwidth unit. +// +// POP bandwidth data is updated everyday at 22:50 CST (or CDT). It queries and stores POP data from the day before. It is a more resource intensive process than a regular CDN bandwidth update thus we run this once a day. Since the POP bandwidth data is delayed for a day, there is no correction process for POP data. The POP bandwidth is not associated with any billing process and is mainly used to generate a POP bandwidth graph. +func (r Network_ContentDelivery_Account) GetAllPopsBandwidthData(beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Container_Network_ContentDelivery_Bandwidth_PointsOfPresence_Summary, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAllPopsBandwidthData", params, &r.Options, &resp) + return +} + +// This method returns a bandwidth graph for every POP wrapped in [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object. A POP bandwidth graph shows bandwidth consumption per each POP in a bar graph. [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object contains a begin time, end time, title of the graph, binary date, in and outbound total bandwidth in bytes +func (r Network_ContentDelivery_Account) GetAllPopsBandwidthImage(title *string, beginDateTime *datatypes.Time, endDateTime *datatypes.Time, unit *string) (resp datatypes.Container_Bandwidth_GraphOutputsExtended, err error) { + params := []interface{}{ + title, + beginDateTime, + endDateTime, + unit, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAllPopsBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve The CDN account id that this CDN account is associated with. +func (r Network_ContentDelivery_Account) GetAssociatedCdnAccountId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAssociatedCdnAccountId", nil, &r.Options, &resp) + return +} + +// Retrieve The IP addresses that are used for the content authentication service. +func (r Network_ContentDelivery_Account) GetAuthenticationIpAddresses() (resp []datatypes.Network_ContentDelivery_Authentication_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAuthenticationIpAddresses", nil, &r.Options, &resp) + return +} + +// CDN servers will invoke a Web Service method to validate a content authentication token. This method returns all token validation web service endpoints set for a CDN account. You can override the default web service by calling [[SoftLayer_Network_ContentDelivery_Authentication_Token|setContentAuthenticationWsdl setContentAuthenticationWsdl]] method. +func (r Network_ContentDelivery_Account) GetAuthenticationServiceEndpoints() (resp []datatypes.Container_Network_ContentDelivery_Authentication_ServiceEndpoint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAuthenticationServiceEndpoints", nil, &r.Options, &resp) + return +} + +// This method returns bandwidth data for a given time range. It returns an array of [[SoftLayer_Container_Network_ContentDelivery_Bandwidth_Summary|bandwidth summary]] objects. [[SoftLayer_Container_Network_ContentDelivery_Bandwidth_Summary|Bandwidth summary]] object contains a beginning time, ending time, and bandwidth in bytes. +// +// A Beginning and ending date parameters have to be a timestamp in "yyyy-mm-dd HH24:mi:ss" format and it assumes the time is in Central Standard Time (CST) or Central Daylight Time (CDT) time zone. CDN bandwidth data is stored in Greenwich Mean Time (GMT) internally and converts a beginning and ending time to GMT before querying. +// +// Unlike server bandwidth, CDN bandwidth returns total bytes consumed within an hour. For example, if you pass "2008-10-10 00:00:00" for a beginning time and "2008-10-10 05:00:00" for an ending time, your return value will have 6 elements of bandwidth summary objects. The first bandwidth summary object will have the total bytes consumed between 2008-10-10 00:00:00 and 2008-10-10 05:00:00. And the last object will have the bandwidth consumed between 2008-10-10 05:00:00 and 2008-10-10 00:59:59. The bandwidth data is updated at 10 minutes after every hour. The queried data is on a two hour time delay. The two hour delay is required to gather bandwidth data from each POP and that is the minimum delay required to create a feasible graph. It usually takes about 8 hours to reconcile all the data from every CDN POP. This hourly data is corrected after 24 hours if necessary. If you consume a large amount of bandwidth, your bandwidth data will be updated the next day. +func (r Network_ContentDelivery_Account) GetBandwidthData(beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Container_Network_ContentDelivery_Bandwidth_Summary, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getBandwidthData", params, &r.Options, &resp) + return +} + +// This method returns bandwidth data for a given time range. It returns an array of [[SoftLayer_Container_Network_ContentDelivery_Report_Usage|bandwidth usage report]] objects. +// +// These will be first sorted by timestamp, and there will be one entry with that timestamp for each enabled region. The region type 'NONE' is provided only when non-region-specific data is returned. [[SoftLayer_Container_Network_ContentDelivery_Report_Usage|bandwidth usage report]] objects with a region will never contain non-region-specific data. Non-region-specific values are standardTotal and sslTotal; standardTotal is computed by adding the HTTP Large, Windows Media, Flash and Application Delivery Network bandwidth. The sslTotal is computed by adding the HTTP Large SSL bandwidth and the Application Delivery Network SSL bandwidth. +// +// A Beginning and ending date parameters have to be a timestamp in "yyyy-mm-dd HH24:mi:ss" format and it assumes the time is in Central Standard Time (CST) or Central Daylight Time (CDT) time zone. CDN bandwidth data is stored in Greenwich Mean Time (GMT) internally and converts a beginning and ending time to GMT before querying. +// +// Unlike server bandwidth, CDN bandwidth returns total bytes consumed within an hour. For example, if you pass "2008-10-10 00:00:00" for a beginning time and "2008-10-10 05:00:00" for an ending time, your return value will have 6 elements of bandwidth summary objects. The first bandwidth summary object will have the total bytes consumed between 2008-10-10 00:00:00 and 2008-10-10 05:00:00. And the last object will have the bandwidth consumed between 2008-10-10 05:00:00 and 2008-10-10 00:59:59. The bandwidth data is updated at 10 minutes after every hour. The queried data is on a two hour time delay. The two hour delay is required to gather bandwidth data from each POP and that is the minimum delay required to create a feasible graph. It usually takes about 8 hours to reconcile all the data from every CDN POP. This hourly data is corrected after 24 hours if necessary. If you consume a large amount of bandwidth, your bandwidth data will be updated the next day. +func (r Network_ContentDelivery_Account) GetBandwidthDataWithTypes(beginDateTime *datatypes.Time, endDateTime *datatypes.Time, period *string) (resp []datatypes.Container_Network_ContentDelivery_Report_Usage, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + period, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getBandwidthDataWithTypes", params, &r.Options, &resp) + return +} + +// This method returns a bandwidth graph wrapped in [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object. [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object contains a starting time, ending time, graph title, graph binary data, and in and outbound total bytes. +func (r Network_ContentDelivery_Account) GetBandwidthImage(title *string, beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputsExtended, err error) { + params := []interface{}{ + title, + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a CDN account. +func (r Network_ContentDelivery_Account) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The name of a CDN account. +func (r Network_ContentDelivery_Account) GetCdnAccountName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getCdnAccountName", nil, &r.Options, &resp) + return +} + +// Retrieve A brief note on a CDN account. +func (r Network_ContentDelivery_Account) GetCdnAccountNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getCdnAccountNote", nil, &r.Options, &resp) + return +} + +// Retrieve The solution type of a CDN account. +func (r Network_ContentDelivery_Account) GetCdnSolutionName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getCdnSolutionName", nil, &r.Options, &resp) + return +} + +// An origin pull mapping is a combination of your customer origin record and a CNAME (optional) record. You can now keep track of your customer origin records separate from your CNAME records. This service returns your customer origin records. +func (r Network_ContentDelivery_Account) GetCustomerOrigins(mediaType *string) (resp []datatypes.Container_Network_ContentDelivery_OriginPull_Mapping, err error) { + params := []interface{}{ + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getCustomerOrigins", params, &r.Options, &resp) + return +} + +// Retrieve Indicates if CDN account is dependent on other service. If set, this CDN account is limited to these services: createOriginPullMapping, deleteOriginPullRule, getOriginPullMappingInformation, getCdnUrls, purgeCache, loadContent, manageHttpCompression +func (r Network_ContentDelivery_Account) GetDependantServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getDependantServiceFlag", nil, &r.Options, &resp) + return +} + +// This method returns an array of [[SoftLayer_Container_Network_Directory_Listing|Directory Listing]] objects. You must have CDN_FILE_MANAGE privilege and you can only retrieve directory information within /media directory. A [[SoftLayer_Container_Network_Directory_Listing|Directory Listing]] object contains type (indicating whether it is a file or a directory), name and file count if it is a directory. +func (r Network_ContentDelivery_Account) GetDirectoryInformation(directoryName *string) (resp []datatypes.Container_Network_Directory_Listing, err error) { + params := []interface{}{ + directoryName, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getDirectoryInformation", params, &r.Options, &resp) + return +} + +// This method returns disk space usage data for your CDN FTP. +func (r Network_ContentDelivery_Account) GetDiskSpaceUsageDataByDate(beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getDiskSpaceUsageDataByDate", params, &r.Options, &resp) + return +} + +// This method returns a disk usage graph wrapped in [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object. [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object contains a starting time, ending time, graph title, graph binary data, and in and outbound total bytes. +func (r Network_ContentDelivery_Account) GetDiskSpaceUsageImageByDate(beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getDiskSpaceUsageImageByDate", params, &r.Options, &resp) + return +} + +// This method returns your login credentials to the CDN FTP server (ftp.cdnlayer.service.softlayer.com server). You must have CDN_FILE_MANAGE privilege. Refer to the service overview of [[SoftLayer_Network_ContentDelivery_Account::createFtpUser|createFtpUser]] method for more information on the CDN FTP server. +// +// If you want to download raw log files, prefix the username with "LOGS-" (without quotes) when logging in. SoftLayer designed CDN accounts so they can have multiple CDN FTP users. However, this method returns the default CDN FTP user information: multi user support may be implemented in the future. +func (r Network_ContentDelivery_Account) GetFtpAttributes() (resp datatypes.Container_Network_Authentication_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getFtpAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if it is a legacy CDN or not +func (r Network_ContentDelivery_Account) GetLegacyCdnFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getLegacyCdnFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if CDN logging is enabled. +func (r Network_ContentDelivery_Account) GetLogEnabledFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getLogEnabledFlag", nil, &r.Options, &resp) + return +} + +// This method returns CDN URLs for static file (http), Flash streaming (rtmp) and Window Media (mms) streaming services. You can generate your CDN URLs based on the information retrieved by this method. +func (r Network_ContentDelivery_Account) GetMediaUrls() (resp []datatypes.Container_Network_ContentDelivery_SupportedProtocol, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getMediaUrls", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_ContentDelivery_Account object whose ID number corresponds to the ID number of the initial parameter passed to the SoftLayer_Network_ContentDelivery_Account service. You can only retrieve CDN accounts assigned to your SoftLayer customer account. +func (r Network_ContentDelivery_Account) GetObject() (resp datatypes.Network_ContentDelivery_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getObject", nil, &r.Options, &resp) + return +} + +// This method returns a list of origin pull configuration data. +func (r Network_ContentDelivery_Account) GetOriginPullMappingInformation() (resp []datatypes.Container_Network_ContentDelivery_OriginPull_Mapping, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getOriginPullMappingInformation", nil, &r.Options, &resp) + return +} + +// This method returns CDN URLs that supports Origin Pull mappings. +func (r Network_ContentDelivery_Account) GetOriginPullSupportedMediaUrls() (resp []datatypes.Container_Network_ContentDelivery_SupportedProtocol, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getOriginPullSupportedMediaUrls", nil, &r.Options, &resp) + return +} + +// This method returns the domain name of your Origin Pull rule. It assumes you have already setup an Origin Pull rule. Otherwise, it will throw an exception. A returning value is the value of the first parameter (origin pull domain) you provided to [[SoftLayer_Network_ContentDelivery_Account::createOriginPullRule|createOriginPullRule]] method. See Error Handling section below for possible errors. +func (r Network_ContentDelivery_Account) GetOriginPullUrl() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getOriginPullUrl", nil, &r.Options, &resp) + return +} + +// This method returns an array of CDN POPs (Points of Presence) object. [[SoftLayer_Container_Network_ContentDelivery_PointsOfPresence|POP object]] object contains the POP id and name. +func (r Network_ContentDelivery_Account) GetPopNames() (resp []datatypes.Container_Network_ContentDelivery_PointsOfPresence, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getPopNames", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if customer is allowed to access the CDN provider's management portal. +func (r Network_ContentDelivery_Account) GetProviderPortalAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getProviderPortalAccessFlag", nil, &r.Options, &resp) + return +} + +// This method returns your login credentials to the CDN provider portal. +func (r Network_ContentDelivery_Account) GetProviderPortalCredentials() (resp datatypes.Container_Network_Authentication_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getProviderPortalCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve A CDN account's status presented in a more detailed data type. +func (r Network_ContentDelivery_Account) GetStatus() (resp datatypes.Network_ContentDelivery_Account_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getStatus", nil, &r.Options, &resp) + return +} + +// This method returns all token authentication directories. +func (r Network_ContentDelivery_Account) GetTokenAuthenticationDirectories() (resp []datatypes.Container_Network_Directory_Listing, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getTokenAuthenticationDirectories", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if the token authentication service is enabled or not. +func (r Network_ContentDelivery_Account) GetTokenAuthenticationEnabledFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getTokenAuthenticationEnabledFlag", nil, &r.Options, &resp) + return +} + +// This method returns your login credentials to the public CDN FTP. +func (r Network_ContentDelivery_Account) GetVendorFtpAttributes() (resp datatypes.Container_Network_Authentication_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getVendorFtpAttributes", nil, &r.Options, &resp) + return +} + +// Whether you are using Origin Pull or POP Pull, your content will be transferred and cached on CDN POP (node) on the initial request. If you wish to load your content to all CDN POPs, you may use this service to accomplish that. Please keep in mind, it will take about 10 to 15 minutes to load content to all CDN POPs depending on the load. +// +// You can only specify 5 URLs at a time. +func (r Network_ContentDelivery_Account) LoadContent(objectUrls []string) (resp bool, err error) { + params := []interface{}{ + objectUrls, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "loadContent", params, &r.Options, &resp) + return +} + +// HTTP Compression is used to reduce the bandwidth used to deliver an object. You can specify list of content types that needs to be compressed. If you omit the content type parameter, these values will be used by default: +// * text/plain +// * text/html +// * text/css +// * application/x-javascript +// * text/javascript +// +// +// Note that files larger than 1MB will never be served with compression regardless of whether their content-type is enabled for compression. +func (r Network_ContentDelivery_Account) ManageHttpCompression(enableFlag *bool, mimeTypes []string) (resp bool, err error) { + params := []interface{}{ + enableFlag, + mimeTypes, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "manageHttpCompression", params, &r.Options, &resp) + return +} + +// CDN's cache mechanism works similar to that of web browsers. When CDN pulls a file from your origin server or from your CDN FTP directory for the first time, it creates a cache file on itself. CDN re-uses cache files to save trips to the origin server and thus it speeds up delivering content to visitors. This method removes cached objects on every server in the CDN network. If you see a stale content or a file that send an incorrect header, purging cache will correct the issue. CDN will pull a fresh content from your origin server or your CDN FTP. +// +// This method takes an array of URLs. A URL must be exact as it is being requested by clients. An example URLs may look like this: +// * http://.http.cdn.softlayer.net/mycdnname/some_file.txt +// +// +// If you created a CNAME that points to CDN host, use your CNAME URL instead. +// * http://image.mydomain.com/some_file.txt +// +// +// It takes approximately 3-5 minutes for the system to delete the requested object on every CDN server from submission . +func (r Network_ContentDelivery_Account) PurgeCache(objectUrls []string) (resp []datatypes.Container_Network_ContentDelivery_PurgeService_Response, err error) { + params := []interface{}{ + objectUrls, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "purgeCache", params, &r.Options, &resp) + return +} + +// If you want to turn off the token authentication, use this method to remove a directory from the token authentication directory. +func (r Network_ContentDelivery_Account) RemoveAuthenticationDirectory(directory *string, mediaType *string) (resp bool, err error) { + params := []interface{}{ + directory, + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "removeAuthenticationDirectory", params, &r.Options, &resp) + return +} + +// With this method you can remove a file or a directory on the CDN FTP server. If a source name ends with a slash (/), this method assumes it is a directory. A source name must be an absolute path. It does not check to see if a file or directory exists before deletion. You can only remove files and directories that are in /media folder. Be sure to catch an exception for the detail on an error. +func (r Network_ContentDelivery_Account) RemoveFile(source *string) (resp bool, err error) { + params := []interface{}{ + source, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "removeFile", params, &r.Options, &resp) + return +} + +// CDN servers will invoke a Web Service method to validate a content authentication token. CDN uses the default Web Service provided by SoftLayer to validate a token. A customer can use their own implementation of the token authentication Web Service. A valid SOAP WSDL will look similar [https://manage.softlayer.com/CdnService/authenticationWsdlExample/wsdl this]. +func (r Network_ContentDelivery_Account) SetAuthenticationServiceEndpoint(webserviceEndpoint *string, protocol *string) (resp bool, err error) { + params := []interface{}{ + webserviceEndpoint, + protocol, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "setAuthenticationServiceEndpoint", params, &r.Options, &resp) + return +} + +// With a CDN FTP, you can upload contents to CDN host server. Once you uploaded contents, your contents will be fetched by the CDN POP (Points of Presence) servers as needed. +// +// CDN supports three protocols: Flash streaming (rtmp), Window Media streaming (mms) and HTTP. Once you log in to the CDN FTP server, you will see three directories under /media directory. You have to upload your contents to a proper directory to use the different services. Refer to [[SoftLayer_Network_ContentDelivery_Account|CDN Account]] service overview for details on the CDN FTP server. "gzip" is supported if you compress your content before uploading and you have to change its extension to ".gz". [SoftLayer_Network_ContentDelivery_Account::createOriginPullRule|Origin Pull] also supports "gzip" contents and you don't have to modify file extension with Origin Pull. Once uploaded, your contents should be available almost immediately to visitors. However, it may take about 30 minutes to propagate files to the entire CDN network after uploading. For more details, visit our [hhttp://knowledgelayer.softlayer.com/topic/cdn KnowledgeLayer] +// +// This method updates the password for your CDN FTP account on the ftp.cdnlayer.service.softlayer.com server. You must provide an alphanumeric value for a new password. - _ ! % # $ ^ & * characters are allowed beside an alphanumeric string. +func (r Network_ContentDelivery_Account) SetFtpPassword(newPassword *string) (resp bool, err error) { + params := []interface{}{ + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "setFtpPassword", params, &r.Options, &resp) + return +} + +// This method allows you to edit CDN account note. The maximum length for CDN account note is 30 characters. +func (r Network_ContentDelivery_Account) UpdateNote(note *string) (resp bool, err error) { + params := []interface{}{ + note, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "updateNote", params, &r.Options, &resp) + return +} + +// With this method, you can upload binary data to the CDN FTP server. This method supports files up to 20 Mega Bytes. You need to use the CDN FTP (ftp.cdnlayer.service.softlayer.com) to upload files larger than 20 MB. This method takes [[SoftLayer_Container_Utility_File_Attachment]] a first parameter. A target name must be an absolute path and you can only upload a file to a directory that is in /media folder. +func (r Network_ContentDelivery_Account) UploadStream(source *datatypes.Container_Utility_File_Attachment, target *string) (resp bool, err error) { + params := []interface{}{ + source, + target, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "uploadStream", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_ContentDelivery_Authentication_Address data type models an individual IP address that CDN allow or deny access from. +type Network_ContentDelivery_Authentication_Address struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkContentDeliveryAuthenticationAddressService returns an instance of the Network_ContentDelivery_Authentication_Address SoftLayer service +func GetNetworkContentDeliveryAuthenticationAddressService(sess *session.Session) Network_ContentDelivery_Authentication_Address { + return Network_ContentDelivery_Authentication_Address{Session: sess} +} + +func (r Network_ContentDelivery_Authentication_Address) Id(id int) Network_ContentDelivery_Authentication_Address { + r.Options.Id = &id + return r +} + +func (r Network_ContentDelivery_Authentication_Address) Mask(mask string) Network_ContentDelivery_Authentication_Address { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_ContentDelivery_Authentication_Address) Filter(filter string) Network_ContentDelivery_Authentication_Address { + r.Options.Filter = filter + return r +} + +func (r Network_ContentDelivery_Authentication_Address) Limit(limit int) Network_ContentDelivery_Authentication_Address { + r.Options.Limit = &limit + return r +} + +func (r Network_ContentDelivery_Authentication_Address) Offset(offset int) Network_ContentDelivery_Authentication_Address { + r.Options.Offset = &offset + return r +} + +// This method creates an authentication IP record. Required parameters are +// +// +// * cdnAccountId - A CDN account id that belongs to your SoftLayer Account +// * ipAddress - An IP address or a IP range +// * accessType- It can be "ALLOW" or "DENY" +func (r Network_ContentDelivery_Authentication_Address) CreateObject(templateObject *datatypes.Network_ContentDelivery_Authentication_Address) (resp datatypes.Network_ContentDelivery_Authentication_Address, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "createObject", params, &r.Options, &resp) + return +} + +// This method deletes an authentication IP address. +func (r Network_ContentDelivery_Authentication_Address) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method let you edit an authentication IP object by passing a modified object. +func (r Network_ContentDelivery_Authentication_Address) EditObject(templateObject *datatypes.Network_ContentDelivery_Authentication_Address) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "editObject", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_ContentDelivery_Authentication_Address object whose ID number corresponds to the ID number of the initial parameter passed to the SoftLayer_Network_ContentDelivery_Authentication_Address service. You can only retrieve authentication IP addresses assigned to one of your CDN account. +func (r Network_ContentDelivery_Authentication_Address) GetObject() (resp datatypes.Network_ContentDelivery_Authentication_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "getObject", nil, &r.Options, &resp) + return +} + +// The authentication IP address match occurs from the higher priority IP to the lower. This method will be helpful if you want to modify the order (priority) of the authentication IP addresses. You can use this method instead of editing individual authentication IP addresses. +// +// You can retrieve authentication IP address using [[SoftLayer_Network_ContentDelivery_Account::getAuthenticationIpAddresses|getAuthenticationIpAddresses]] method. Then, rearrange the authentication IP addresses and pass them to this method. When creating template objects as parameter, make sure to include the id of each authentication IP addresses. You must provide every authentication IP address. New priorities will be assigned to each authentication IP addresses in the order of they are passed. +func (r Network_ContentDelivery_Authentication_Address) RearrangeAuthenticationIp(cdnAccountId *int, templateObjects []datatypes.Network_ContentDelivery_Authentication_Address) (resp bool, err error) { + params := []interface{}{ + cdnAccountId, + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "rearrangeAuthenticationIp", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_ContentDelivery_Authentication_Address data type models an individual IP address that CDN allow or deny access from. +type Network_ContentDelivery_Authentication_Token struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkContentDeliveryAuthenticationTokenService returns an instance of the Network_ContentDelivery_Authentication_Token SoftLayer service +func GetNetworkContentDeliveryAuthenticationTokenService(sess *session.Session) Network_ContentDelivery_Authentication_Token { + return Network_ContentDelivery_Authentication_Token{Session: sess} +} + +func (r Network_ContentDelivery_Authentication_Token) Id(id int) Network_ContentDelivery_Authentication_Token { + r.Options.Id = &id + return r +} + +func (r Network_ContentDelivery_Authentication_Token) Mask(mask string) Network_ContentDelivery_Authentication_Token { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_ContentDelivery_Authentication_Token) Filter(filter string) Network_ContentDelivery_Authentication_Token { + r.Options.Filter = filter + return r +} + +func (r Network_ContentDelivery_Authentication_Token) Limit(limit int) Network_ContentDelivery_Authentication_Token { + r.Options.Limit = &limit + return r +} + +func (r Network_ContentDelivery_Authentication_Token) Offset(offset int) Network_ContentDelivery_Authentication_Token { + r.Options.Offset = &offset + return r +} + +// This method is deprecated! Use the [[SoftLayer_Network_ContentDelivery_Authentication_Token::getTimedToken|getTimedToken]] method. +// +// This method creates a managed authentication token. When passing a parameter, the only required value is your CDN account id which can be obtained from the [[SoftLayer_Account::getCdnAccounts|getCdnAccounts]] method. There are 3 optional parameters you can pass: +// +// +// * name - This helps you keep track of managed tokens. +// * referrer - If set, the token validation will check the client's referrer. Keep in mind, if a client doesn't have the referrer information, the token validation will fail. +// * clientIp - If set, the token validation will check the client's IP address. +// +// +func (r Network_ContentDelivery_Authentication_Token) CreateObject(templateObject *datatypes.Network_ContentDelivery_Authentication_Token) (resp datatypes.Network_ContentDelivery_Authentication_Token, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "createObject", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// This method returns all managed tokens for a CDN account. +func (r Network_ContentDelivery_Authentication_Token) GetAllManagedTokens(cdnAccountId *int) (resp []datatypes.Network_ContentDelivery_Authentication_Token, err error) { + params := []interface{}{ + cdnAccountId, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "getAllManagedTokens", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// getObject retrieves the SoftLayer_Network_ContentDelivery_Authentication_Token object whose ID number corresponds to the ID number of the initial parameter passed to the SoftLayer_Network_ContentDelivery_Authentication_Token service. You can only retrieve managed tokens assigned to one of your CDN account. +func (r Network_ContentDelivery_Authentication_Token) GetObject() (resp datatypes.Network_ContentDelivery_Authentication_Token, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "getObject", nil, &r.Options, &resp) + return +} + +// This method returns an authentication token that expires after the seconds you specify. You can provide number of seconds to manage the token life. This parameter sets the expiration time for a token. A valid life time must be an integer between 60 and 604800 (1 week). A customer can also provide client ip and (or) referrer information. If used, a client from the same IP and referrer can view the protected contents. +// +// A valid IP address must be an IPv4 format or an IP block. if you want to block access from IP 211.37.0.0/16, you can enter "211.37." instead. IP blocks can be specified in the manner of "8bit times n". +// +// The referrer is the URL of the previous webpage from which a link was followed. A referrer should not include "http://" prefix and it can be maximum of 30 characters. +func (r Network_ContentDelivery_Authentication_Token) GetTimedToken(cdnAccountId *int, tokenLife *int, clientIp *string, referrer *string, mediaType *string) (resp string, err error) { + params := []interface{}{ + cdnAccountId, + tokenLife, + clientIp, + referrer, + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "getTimedToken", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// This method revokes all managed tokens belong to a CDN account. +func (r Network_ContentDelivery_Authentication_Token) RevokeAllManagedTokens(cdnAccountId *int) (resp bool, err error) { + params := []interface{}{ + cdnAccountId, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "revokeAllManagedTokens", params, &r.Options, &resp) + return +} + +// This method revokes all tokens belong to a CDN account. Valid media types are "HTTP", "FLASH" and "WM". +func (r Network_ContentDelivery_Authentication_Token) RevokeAllTokens(cdnAccountId *int, mediaType *string) (resp bool, err error) { + params := []interface{}{ + cdnAccountId, + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "revokeAllTokens", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// Revokes a managed token. If you revoke a token, the token will be removed from SoftLayer's system but it will not remove your content on CDN FTP. The content that requires token validation will not be available to the visitor who is using a revoked token. +func (r Network_ContentDelivery_Authentication_Token) RevokeManagedToken(cdnAccountId *int, token *string) (resp bool, err error) { + params := []interface{}{ + cdnAccountId, + token, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "revokeManagedToken", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// Deletes multiple managed tokens +func (r Network_ContentDelivery_Authentication_Token) RevokeManagedTokens(templateObjects []datatypes.Network_ContentDelivery_Authentication_Token) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "revokeManagedTokens", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Customer_Subnet data type contains general information relating to a single customer subnet (remote). +type Network_Customer_Subnet struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCustomerSubnetService returns an instance of the Network_Customer_Subnet SoftLayer service +func GetNetworkCustomerSubnetService(sess *session.Session) Network_Customer_Subnet { + return Network_Customer_Subnet{Session: sess} +} + +func (r Network_Customer_Subnet) Id(id int) Network_Customer_Subnet { + r.Options.Id = &id + return r +} + +func (r Network_Customer_Subnet) Mask(mask string) Network_Customer_Subnet { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Customer_Subnet) Filter(filter string) Network_Customer_Subnet { + r.Options.Filter = filter + return r +} + +func (r Network_Customer_Subnet) Limit(limit int) Network_Customer_Subnet { + r.Options.Limit = &limit + return r +} + +func (r Network_Customer_Subnet) Offset(offset int) Network_Customer_Subnet { + r.Options.Offset = &offset + return r +} + +// For IPSec network tunnels, customers can create their local subnets using this method. After the customer is created successfully, the customer subnet can then be added to the IPSec network tunnel. +func (r Network_Customer_Subnet) CreateObject(templateObject *datatypes.Network_Customer_Subnet) (resp datatypes.Network_Customer_Subnet, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Customer_Subnet", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve All ip addresses associated with a subnet. +func (r Network_Customer_Subnet) GetIpAddresses() (resp []datatypes.Network_Customer_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Customer_Subnet", "getIpAddresses", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Customer_Subnet object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Customer_Subnet service. You can only retrieve the subnet whose account matches the account that your portal user is assigned to. +func (r Network_Customer_Subnet) GetObject() (resp datatypes.Network_Customer_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Customer_Subnet", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_DirectLink_Location presents a structure containing attributes of a Direct Link location, and its related object SoftLayer location. +type Network_DirectLink_Location struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkDirectLinkLocationService returns an instance of the Network_DirectLink_Location SoftLayer service +func GetNetworkDirectLinkLocationService(sess *session.Session) Network_DirectLink_Location { + return Network_DirectLink_Location{Session: sess} +} + +func (r Network_DirectLink_Location) Id(id int) Network_DirectLink_Location { + r.Options.Id = &id + return r +} + +func (r Network_DirectLink_Location) Mask(mask string) Network_DirectLink_Location { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_DirectLink_Location) Filter(filter string) Network_DirectLink_Location { + r.Options.Filter = filter + return r +} + +func (r Network_DirectLink_Location) Limit(limit int) Network_DirectLink_Location { + r.Options.Limit = &limit + return r +} + +func (r Network_DirectLink_Location) Offset(offset int) Network_DirectLink_Location { + r.Options.Offset = &offset + return r +} + +// Return all existing Direct Link location. +func (r Network_DirectLink_Location) GetAllObjects() (resp []datatypes.Network_DirectLink_Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_DirectLink_Location", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The location of Direct Link facility. +func (r Network_DirectLink_Location) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_DirectLink_Location", "getLocation", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_DirectLink_Location) GetObject() (resp datatypes.Network_DirectLink_Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_DirectLink_Location", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The Id of Direct Link provider. +func (r Network_DirectLink_Location) GetProvider() (resp datatypes.Network_DirectLink_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Network_DirectLink_Location", "getProvider", nil, &r.Options, &resp) + return +} + +// Retrieve The Id of Direct Link service type. +func (r Network_DirectLink_Location) GetServiceType() (resp datatypes.Network_DirectLink_ServiceType, err error) { + err = r.Session.DoRequest("SoftLayer_Network_DirectLink_Location", "getServiceType", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_DirectLink_Provider presents a structure containing attributes of a Direct Link provider. +type Network_DirectLink_Provider struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkDirectLinkProviderService returns an instance of the Network_DirectLink_Provider SoftLayer service +func GetNetworkDirectLinkProviderService(sess *session.Session) Network_DirectLink_Provider { + return Network_DirectLink_Provider{Session: sess} +} + +func (r Network_DirectLink_Provider) Id(id int) Network_DirectLink_Provider { + r.Options.Id = &id + return r +} + +func (r Network_DirectLink_Provider) Mask(mask string) Network_DirectLink_Provider { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_DirectLink_Provider) Filter(filter string) Network_DirectLink_Provider { + r.Options.Filter = filter + return r +} + +func (r Network_DirectLink_Provider) Limit(limit int) Network_DirectLink_Provider { + r.Options.Limit = &limit + return r +} + +func (r Network_DirectLink_Provider) Offset(offset int) Network_DirectLink_Provider { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_DirectLink_Provider) GetObject() (resp datatypes.Network_DirectLink_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Network_DirectLink_Provider", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_DirectLink_ServiceType presents a structure containing attributes of a Direct Link Service Type. +type Network_DirectLink_ServiceType struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkDirectLinkServiceTypeService returns an instance of the Network_DirectLink_ServiceType SoftLayer service +func GetNetworkDirectLinkServiceTypeService(sess *session.Session) Network_DirectLink_ServiceType { + return Network_DirectLink_ServiceType{Session: sess} +} + +func (r Network_DirectLink_ServiceType) Id(id int) Network_DirectLink_ServiceType { + r.Options.Id = &id + return r +} + +func (r Network_DirectLink_ServiceType) Mask(mask string) Network_DirectLink_ServiceType { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_DirectLink_ServiceType) Filter(filter string) Network_DirectLink_ServiceType { + r.Options.Filter = filter + return r +} + +func (r Network_DirectLink_ServiceType) Limit(limit int) Network_DirectLink_ServiceType { + r.Options.Limit = &limit + return r +} + +func (r Network_DirectLink_ServiceType) Offset(offset int) Network_DirectLink_ServiceType { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_DirectLink_ServiceType) GetObject() (resp datatypes.Network_DirectLink_ServiceType, err error) { + err = r.Session.DoRequest("SoftLayer_Network_DirectLink_ServiceType", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_AccessControlList data type contains general information relating to a single SoftLayer firewall access to controll list. This is the object which ties the running rules to a specific context. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_AccessControlList struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallAccessControlListService returns an instance of the Network_Firewall_AccessControlList SoftLayer service +func GetNetworkFirewallAccessControlListService(sess *session.Session) Network_Firewall_AccessControlList { + return Network_Firewall_AccessControlList{Session: sess} +} + +func (r Network_Firewall_AccessControlList) Id(id int) Network_Firewall_AccessControlList { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_AccessControlList) Mask(mask string) Network_Firewall_AccessControlList { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_AccessControlList) Filter(filter string) Network_Firewall_AccessControlList { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_AccessControlList) Limit(limit int) Network_Firewall_AccessControlList { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_AccessControlList) Offset(offset int) Network_Firewall_AccessControlList { + r.Options.Offset = &offset + return r +} + +// Retrieve The update requests made for this firewall. +func (r Network_Firewall_AccessControlList) GetNetworkFirewallUpdateRequests() (resp []datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_AccessControlList", "getNetworkFirewallUpdateRequests", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Firewall_AccessControlList) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_AccessControlList", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_AccessControlList object. You can only get objects for servers attached to your account that have a network firewall enabled. +func (r Network_Firewall_AccessControlList) GetObject() (resp datatypes.Network_Firewall_AccessControlList, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_AccessControlList", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The currently running rule set of this context access control list firewall. +func (r Network_Firewall_AccessControlList) GetRules() (resp []datatypes.Network_Vlan_Firewall_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_AccessControlList", "getRules", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_Interface data type contains general information relating to a single SoftLayer firewall interface. This is the object which ties the firewall context access control list to a firewall. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Interface struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallInterfaceService returns an instance of the Network_Firewall_Interface SoftLayer service +func GetNetworkFirewallInterfaceService(sess *session.Session) Network_Firewall_Interface { + return Network_Firewall_Interface{Session: sess} +} + +func (r Network_Firewall_Interface) Id(id int) Network_Firewall_Interface { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Interface) Mask(mask string) Network_Firewall_Interface { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Interface) Filter(filter string) Network_Firewall_Interface { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Interface) Limit(limit int) Network_Firewall_Interface { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Interface) Offset(offset int) Network_Firewall_Interface { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Firewall_Interface) GetFirewallContextAccessControlLists() (resp []datatypes.Network_Firewall_AccessControlList, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Interface", "getFirewallContextAccessControlLists", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Firewall_Interface) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Interface", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_Interface object. You can only get objects for servers attached to your account that have a network firewall enabled. +func (r Network_Firewall_Interface) GetObject() (resp datatypes.Network_Firewall_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Interface", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Firewall_Module_Context_Interface struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallModuleContextInterfaceService returns an instance of the Network_Firewall_Module_Context_Interface SoftLayer service +func GetNetworkFirewallModuleContextInterfaceService(sess *session.Session) Network_Firewall_Module_Context_Interface { + return Network_Firewall_Module_Context_Interface{Session: sess} +} + +func (r Network_Firewall_Module_Context_Interface) Id(id int) Network_Firewall_Module_Context_Interface { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Module_Context_Interface) Mask(mask string) Network_Firewall_Module_Context_Interface { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Module_Context_Interface) Filter(filter string) Network_Firewall_Module_Context_Interface { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Module_Context_Interface) Limit(limit int) Network_Firewall_Module_Context_Interface { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Module_Context_Interface) Offset(offset int) Network_Firewall_Module_Context_Interface { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Firewall_Module_Context_Interface) GetFirewallContextAccessControlLists() (resp []datatypes.Network_Firewall_AccessControlList, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Module_Context_Interface", "getFirewallContextAccessControlLists", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Firewall_Module_Context_Interface) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Module_Context_Interface", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Firewall_Module_Context_Interface) GetObject() (resp datatypes.Network_Firewall_Module_Context_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Module_Context_Interface", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_Template type contains general information for a SoftLayer network firewall template. +// +// Firewall templates are recommend rule sets for use with SoftLayer Hardware Firewall (Dedicated). These optimized templates are designed to balance security restriction with application availability. The templates given may be altered to provide custom network security, or may be used as-is for basic security. At least one rule set MUST be applied for the firewall to block traffic. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Template struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallTemplateService returns an instance of the Network_Firewall_Template SoftLayer service +func GetNetworkFirewallTemplateService(sess *session.Session) Network_Firewall_Template { + return Network_Firewall_Template{Session: sess} +} + +func (r Network_Firewall_Template) Id(id int) Network_Firewall_Template { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Template) Mask(mask string) Network_Firewall_Template { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Template) Filter(filter string) Network_Firewall_Template { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Template) Limit(limit int) Network_Firewall_Template { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Template) Offset(offset int) Network_Firewall_Template { + r.Options.Offset = &offset + return r +} + +// Get all available firewall template objects. +// +// ''getAllObjects'' returns an array of SoftLayer_Network_Firewall_Template objects upon success. +func (r Network_Firewall_Template) GetAllObjects() (resp []datatypes.Network_Firewall_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Template", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_Template object. You can retrieve all available firewall templates. getAllObjects returns an array of all available SoftLayer_Network_Firewall_Template objects. You can use these templates to generate a [[SoftLayer Network Firewall Update Request]]. +// +// @SLDNDocumentation Service See Also SoftLayer_Network_Firewall_Update_Request +func (r Network_Firewall_Template) GetObject() (resp datatypes.Network_Firewall_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Template", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The rule set that belongs to this firewall rules template. +func (r Network_Firewall_Template) GetRules() (resp []datatypes.Network_Firewall_Template_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Template", "getRules", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_Update_Request data type contains information relating to a SoftLayer network firewall update request. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallUpdateRequestService returns an instance of the Network_Firewall_Update_Request SoftLayer service +func GetNetworkFirewallUpdateRequestService(sess *session.Session) Network_Firewall_Update_Request { + return Network_Firewall_Update_Request{Session: sess} +} + +func (r Network_Firewall_Update_Request) Id(id int) Network_Firewall_Update_Request { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Update_Request) Mask(mask string) Network_Firewall_Update_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Update_Request) Filter(filter string) Network_Firewall_Update_Request { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Update_Request) Limit(limit int) Network_Firewall_Update_Request { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Update_Request) Offset(offset int) Network_Firewall_Update_Request { + r.Options.Offset = &offset + return r +} + +// Create a new firewall update request. The SoftLayer_Network_Firewall_Update_Request object passed to this function must have at least one rule. +// +// ''createObject'' returns a Boolean ''true'' on successful object creation or ''false'' if your firewall update request was unable to be created. +func (r Network_Firewall_Update_Request) CreateObject(templateObject *datatypes.Network_Firewall_Update_Request) (resp datatypes.Network_Firewall_Update_Request, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve The user that authorized this firewall update request. +func (r Network_Firewall_Update_Request) GetAuthorizingUser() (resp datatypes.User_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getAuthorizingUser", nil, &r.Options, &resp) + return +} + +// Get the possible attribute values for a firewall update request rule. These are the valid values which may be submitted as rule parameters for a firewall update request. +// +// ''getFirewallUpdateRequestRuleAttributes'' returns a SoftLayer_Container_Utility_Network_Firewall_Rule_Attribute object upon success. +func (r Network_Firewall_Update_Request) GetFirewallUpdateRequestRuleAttributes() (resp datatypes.Container_Utility_Network_Firewall_Rule_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getFirewallUpdateRequestRuleAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve The downstream virtual server that the rule set will be applied to. +func (r Network_Firewall_Update_Request) GetGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getGuest", nil, &r.Options, &resp) + return +} + +// Retrieve The downstream server that the rule set will be applied to. +func (r Network_Firewall_Update_Request) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The network component firewall that the rule set will be applied to. +func (r Network_Firewall_Update_Request) GetNetworkComponentFirewall() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getNetworkComponentFirewall", nil, &r.Options, &resp) + return +} + +// ''getObject'' returns a SoftLayer_Network_Firewall_Update_Request object. You can only get historical objects for servers attached to your account that have a network firewall enabled. ''createObject'' inserts a new SoftLayer_Network_Firewall_Update_Request object. You can only insert requests for servers attached to your account that have a network firewall enabled. ''getFirewallUpdateRequestRuleAttributes'' Get the possible attribute values for a firewall update request rule. +func (r Network_Firewall_Update_Request) GetObject() (resp datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group of rules contained within the update request. +func (r Network_Firewall_Update_Request) GetRules() (resp []datatypes.Network_Firewall_Update_Request_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getRules", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Firewall_Update_Request) UpdateRuleNote(fwRule *datatypes.Network_Component_Firewall_Rule, note *string) (resp bool, err error) { + params := []interface{}{ + fwRule, + note, + } + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "updateRuleNote", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_Update_Request_Rule type contains information relating to a SoftLayer network firewall update request rule. This rule is a member of a [[SoftLayer Network Firewall Update Request]]. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request_Rule struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallUpdateRequestRuleService returns an instance of the Network_Firewall_Update_Request_Rule SoftLayer service +func GetNetworkFirewallUpdateRequestRuleService(sess *session.Session) Network_Firewall_Update_Request_Rule { + return Network_Firewall_Update_Request_Rule{Session: sess} +} + +func (r Network_Firewall_Update_Request_Rule) Id(id int) Network_Firewall_Update_Request_Rule { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Update_Request_Rule) Mask(mask string) Network_Firewall_Update_Request_Rule { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Update_Request_Rule) Filter(filter string) Network_Firewall_Update_Request_Rule { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Update_Request_Rule) Limit(limit int) Network_Firewall_Update_Request_Rule { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Update_Request_Rule) Offset(offset int) Network_Firewall_Update_Request_Rule { + r.Options.Offset = &offset + return r +} + +// Create a new firewall update request. The SoftLayer_Network_Firewall_Update_Request object passed to this function must have at least one rule. +// +// ''createObject'' returns a Boolean ''true'' on successful object creation or ''false'' if your firewall update request was unable to be created.. +func (r Network_Firewall_Update_Request_Rule) CreateObject(templateObject *datatypes.Network_Firewall_Update_Request_Rule) (resp datatypes.Network_Firewall_Update_Request_Rule, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request_Rule", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve The update request that this rule belongs to. +func (r Network_Firewall_Update_Request_Rule) GetFirewallUpdateRequest() (resp datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request_Rule", "getFirewallUpdateRequest", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_Update_Request_Rule object. You can only get historical objects for servers attached to your account that have a network firewall enabled. createObject inserts a new SoftLayer_Network_Firewall_Update_Request_Rule object. Use the SoftLayer_Network_Firewall_Update_Request to create groups of rules for an update request. +func (r Network_Firewall_Update_Request_Rule) GetObject() (resp datatypes.Network_Firewall_Update_Request_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request_Rule", "getObject", nil, &r.Options, &resp) + return +} + +// Validate the supplied firewall request rule against the object it will apply to. For IPv4 rules, pass in an instance of SoftLayer_Network_Firewall_Update_Request_Rule. for IPv6 rules, pass in an instance of SoftLayer_Network_Firewall_Update_Request_Rule_Version6. The ID of the applied to object can either be applyToComponentId (an ID of a SoftLayer_Network_Component_Firewall) or applyToAclId (an ID of a SoftLayer_Network_Firewall_Module_Context_Interface_AccessControlList). One, and only one, of applyToComponentId and applyToAclId can be specified. +// +// If validation is successful, nothing is returned. If validation is unsuccessful, an exception is thrown explaining the nature of the validation error. +func (r Network_Firewall_Update_Request_Rule) ValidateRule(rule *datatypes.Network_Firewall_Update_Request_Rule, applyToComponentId *int, applyToAclId *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + rule, + applyToComponentId, + applyToAclId, + } + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request_Rule", "validateRule", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Gateway struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkGatewayService returns an instance of the Network_Gateway SoftLayer service +func GetNetworkGatewayService(sess *session.Session) Network_Gateway { + return Network_Gateway{Session: sess} +} + +func (r Network_Gateway) Id(id int) Network_Gateway { + r.Options.Id = &id + return r +} + +func (r Network_Gateway) Mask(mask string) Network_Gateway { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Gateway) Filter(filter string) Network_Gateway { + r.Options.Filter = filter + return r +} + +func (r Network_Gateway) Limit(limit int) Network_Gateway { + r.Options.Limit = &limit + return r +} + +func (r Network_Gateway) Offset(offset int) Network_Gateway { + r.Options.Offset = &offset + return r +} + +// Start the asynchronous process to bypass all VLANs. Any VLANs that are already bypassed will be ignored. The status field can be checked for progress. +func (r Network_Gateway) BypassAllVlans() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "bypassAllVlans", nil, &r.Options, &resp) + return +} + +// Start the asynchronous process to bypass the provided VLANs. The VLANs must already be attached. Any VLANs that are already bypassed will be ignored. The status field can be checked for progress. +func (r Network_Gateway) BypassVlans(vlans []datatypes.Network_Gateway_Vlan) (err error) { + var resp datatypes.Void + params := []interface{}{ + vlans, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "bypassVlans", params, &r.Options, &resp) + return +} + +// Create and return a new gateway. This object can be created with any number of members or VLANs, but they all must be in the same pod. By creating a gateway with members and/or VLANs attached, it is the equivalent of individually calling their createObject methods except this will start a single asynchronous process to setup the gateway. The status of this process can be checked using the status field. +func (r Network_Gateway) CreateObject(templateObject *datatypes.Network_Gateway) (resp datatypes.Network_Gateway, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "createObject", params, &r.Options, &resp) + return +} + +// Edit this gateway. Currently, the only value that can be edited is the name. +func (r Network_Gateway) EditObject(templateObject *datatypes.Network_Gateway) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account for this gateway. +func (r Network_Gateway) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve All VLANs trunked to this gateway. +func (r Network_Gateway) GetInsideVlans() (resp []datatypes.Network_Gateway_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getInsideVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The members for this gateway. +func (r Network_Gateway) GetMembers() (resp []datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getMembers", nil, &r.Options, &resp) + return +} + +// Retrieve The firewall associated with this gateway, if any. +func (r Network_Gateway) GetNetworkFirewall() (resp datatypes.Network_Vlan_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getNetworkFirewall", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not there is a firewall associated with this gateway. +func (r Network_Gateway) GetNetworkFirewallFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getNetworkFirewallFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Gateway) GetObject() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getObject", nil, &r.Options, &resp) + return +} + +// Get all VLANs that can become inside VLANs on this gateway. This means the VLAN must not already be an inside VLAN, on the same router as this gateway, not a gateway transit VLAN, and not firewalled. +func (r Network_Gateway) GetPossibleInsideVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPossibleInsideVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The private gateway IP address. +func (r Network_Gateway) GetPrivateIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPrivateIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The private VLAN for accessing this gateway. +func (r Network_Gateway) GetPrivateVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPrivateVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The public gateway IP address. +func (r Network_Gateway) GetPublicIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPublicIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The public gateway IPv6 address. +func (r Network_Gateway) GetPublicIpv6Address() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPublicIpv6Address", nil, &r.Options, &resp) + return +} + +// Retrieve The public VLAN for accessing this gateway. +func (r Network_Gateway) GetPublicVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPublicVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of the gateway. +func (r Network_Gateway) GetStatus() (resp datatypes.Network_Gateway_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getStatus", nil, &r.Options, &resp) + return +} + +// Start the asynchronous process to unbypass all VLANs. Any VLANs that are already unbypassed will be ignored. The status field can be checked for progress. +func (r Network_Gateway) UnbypassAllVlans() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "unbypassAllVlans", nil, &r.Options, &resp) + return +} + +// Start the asynchronous process to unbypass the provided VLANs. The VLANs must already be attached. Any VLANs that are already unbypassed will be ignored. The status field can be checked for progress. +func (r Network_Gateway) UnbypassVlans(vlans []datatypes.Network_Gateway_Vlan) (err error) { + var resp datatypes.Void + params := []interface{}{ + vlans, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "unbypassVlans", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Gateway_Member struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkGatewayMemberService returns an instance of the Network_Gateway_Member SoftLayer service +func GetNetworkGatewayMemberService(sess *session.Session) Network_Gateway_Member { + return Network_Gateway_Member{Session: sess} +} + +func (r Network_Gateway_Member) Id(id int) Network_Gateway_Member { + r.Options.Id = &id + return r +} + +func (r Network_Gateway_Member) Mask(mask string) Network_Gateway_Member { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Gateway_Member) Filter(filter string) Network_Gateway_Member { + r.Options.Filter = filter + return r +} + +func (r Network_Gateway_Member) Limit(limit int) Network_Gateway_Member { + r.Options.Limit = &limit + return r +} + +func (r Network_Gateway_Member) Offset(offset int) Network_Gateway_Member { + r.Options.Offset = &offset + return r +} + +// Create a new hardware member on the gateway. This also asynchronously sets up the network for this member. Progress of this process can be monitored via the gateway status. All members created with this object must have no VLANs attached. +func (r Network_Gateway_Member) CreateObject(templateObject *datatypes.Network_Gateway_Member) (resp datatypes.Network_Gateway_Member, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "createObject", params, &r.Options, &resp) + return +} + +// Create multiple new hardware members on the gateway. This also asynchronously sets up the network for the members. Progress of this process can be monitored via the gateway status. All members created with this object must have no VLANs attached. +func (r Network_Gateway_Member) CreateObjects(templateObjects []datatypes.Network_Gateway_Member) (resp []datatypes.Network_Gateway_Member, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "createObjects", params, &r.Options, &resp) + return +} + +// Retrieve The device for this member. +func (r Network_Gateway_Member) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway this member belongs to. +func (r Network_Gateway_Member) GetNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "getNetworkGateway", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Gateway_Member) GetObject() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Gateway_Status struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkGatewayStatusService returns an instance of the Network_Gateway_Status SoftLayer service +func GetNetworkGatewayStatusService(sess *session.Session) Network_Gateway_Status { + return Network_Gateway_Status{Session: sess} +} + +func (r Network_Gateway_Status) Id(id int) Network_Gateway_Status { + r.Options.Id = &id + return r +} + +func (r Network_Gateway_Status) Mask(mask string) Network_Gateway_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Gateway_Status) Filter(filter string) Network_Gateway_Status { + r.Options.Filter = filter + return r +} + +func (r Network_Gateway_Status) Limit(limit int) Network_Gateway_Status { + r.Options.Limit = &limit + return r +} + +func (r Network_Gateway_Status) Offset(offset int) Network_Gateway_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Gateway_Status) GetObject() (resp datatypes.Network_Gateway_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Status", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Gateway_Vlan struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkGatewayVlanService returns an instance of the Network_Gateway_Vlan SoftLayer service +func GetNetworkGatewayVlanService(sess *session.Session) Network_Gateway_Vlan { + return Network_Gateway_Vlan{Session: sess} +} + +func (r Network_Gateway_Vlan) Id(id int) Network_Gateway_Vlan { + r.Options.Id = &id + return r +} + +func (r Network_Gateway_Vlan) Mask(mask string) Network_Gateway_Vlan { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Gateway_Vlan) Filter(filter string) Network_Gateway_Vlan { + r.Options.Filter = filter + return r +} + +func (r Network_Gateway_Vlan) Limit(limit int) Network_Gateway_Vlan { + r.Options.Limit = &limit + return r +} + +func (r Network_Gateway_Vlan) Offset(offset int) Network_Gateway_Vlan { + r.Options.Offset = &offset + return r +} + +// Start the asynchronous process to bypass/unroute the VLAN from this gateway. +func (r Network_Gateway_Vlan) Bypass() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "bypass", nil, &r.Options, &resp) + return +} + +// Create a new VLAN attachment. If the bypassFlag is false, this will also create an asynchronous process to route the VLAN through the gateway. +func (r Network_Gateway_Vlan) CreateObject(templateObject *datatypes.Network_Gateway_Vlan) (resp datatypes.Network_Gateway_Vlan, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "createObject", params, &r.Options, &resp) + return +} + +// Create multiple new VLAN attachments. If the bypassFlag is false, this will also create an asynchronous process to route the VLANs through the gateway. +func (r Network_Gateway_Vlan) CreateObjects(templateObjects []datatypes.Network_Gateway_Vlan) (resp []datatypes.Network_Gateway_Vlan, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "createObjects", params, &r.Options, &resp) + return +} + +// Start the asynchronous process to detach this VLANs from the gateway. +func (r Network_Gateway_Vlan) DeleteObject() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "deleteObject", nil, &r.Options, &resp) + return +} + +// Detach several VLANs. This will not detach them right away, but rather start an asynchronous process to detach. +func (r Network_Gateway_Vlan) DeleteObjects(templateObjects []datatypes.Network_Gateway_Vlan) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "deleteObjects", params, &r.Options, &resp) + return +} + +// Retrieve The gateway this VLAN is attached to. +func (r Network_Gateway_Vlan) GetNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "getNetworkGateway", nil, &r.Options, &resp) + return +} + +// Retrieve The network VLAN record. +func (r Network_Gateway_Vlan) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Gateway_Vlan) GetObject() (resp datatypes.Network_Gateway_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "getObject", nil, &r.Options, &resp) + return +} + +// Start the asynchronous process to route the VLAN to this gateway. +func (r Network_Gateway_Vlan) Unbypass() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "unbypass", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Interconnect_Tenant struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkInterconnectTenantService returns an instance of the Network_Interconnect_Tenant SoftLayer service +func GetNetworkInterconnectTenantService(sess *session.Session) Network_Interconnect_Tenant { + return Network_Interconnect_Tenant{Session: sess} +} + +func (r Network_Interconnect_Tenant) Id(id int) Network_Interconnect_Tenant { + r.Options.Id = &id + return r +} + +func (r Network_Interconnect_Tenant) Mask(mask string) Network_Interconnect_Tenant { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Interconnect_Tenant) Filter(filter string) Network_Interconnect_Tenant { + r.Options.Filter = filter + return r +} + +func (r Network_Interconnect_Tenant) Limit(limit int) Network_Interconnect_Tenant { + r.Options.Limit = &limit + return r +} + +func (r Network_Interconnect_Tenant) Offset(offset int) Network_Interconnect_Tenant { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Interconnect_Tenant) GetAllObjects() (resp []datatypes.Network_Interconnect_Tenant, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Interconnect_Tenant) GetAllPortLabelsWithCurrentUsage(directLinkLocationId *int) (resp []string, err error) { + params := []interface{}{ + directLinkLocationId, + } + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getAllPortLabelsWithCurrentUsage", params, &r.Options, &resp) + return +} + +// Retrieve The billing item for a network interconnect. +func (r Network_Interconnect_Tenant) GetBillingItem() (resp datatypes.Billing_Item_Network_Interconnect, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getBillingItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Interconnect_Tenant) GetConnection(serviceKey *string) (resp string, err error) { + params := []interface{}{ + serviceKey, + } + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getConnection", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Interconnect_Tenant) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Interconnect_Tenant) GetDirectLinkSpeeds(offeringType *string) (resp string, err error) { + params := []interface{}{ + offeringType, + } + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getDirectLinkSpeeds", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Interconnect_Tenant) GetNetworkZones() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getNetworkZones", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Interconnect_Tenant) GetObject() (resp datatypes.Network_Interconnect_Tenant, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Interconnect_Tenant) GetPortLabel() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getPortLabel", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Interconnect_Tenant) GetPorts(provider *string) (resp string, err error) { + params := []interface{}{ + provider, + } + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getPorts", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Interconnect_Tenant) GetServiceType() (resp datatypes.Network_DirectLink_ServiceType, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getServiceType", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Interconnect_Tenant) GetVendorName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getVendorName", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Interconnect_Tenant) GetZoneName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "getZoneName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Interconnect_Tenant) IsAdnAccount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Interconnect_Tenant", "isAdnAccount", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LBaaS_HealthMonitor type presents a structure containing attributes of a health monitor object associated with load balancer instance. Note that the relationship between backend (pool) and health monitor is N-to-1, especially that the pools object associated with a health monitor must have the same pair of protocol and port. Example: frontend FA: http, 80 - backend BA: tcp, 3456 - healthmonitor HM_tcp3456 frontend FB: https, 443 - backend BB: tcp, 3456 - healthmonitor HM_tcp3456 In above example both backends BA and BB share the same healthmonitor HM_tcp3456 +type Network_LBaaS_HealthMonitor struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLBaaSHealthMonitorService returns an instance of the Network_LBaaS_HealthMonitor SoftLayer service +func GetNetworkLBaaSHealthMonitorService(sess *session.Session) Network_LBaaS_HealthMonitor { + return Network_LBaaS_HealthMonitor{Session: sess} +} + +func (r Network_LBaaS_HealthMonitor) Id(id int) Network_LBaaS_HealthMonitor { + r.Options.Id = &id + return r +} + +func (r Network_LBaaS_HealthMonitor) Mask(mask string) Network_LBaaS_HealthMonitor { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LBaaS_HealthMonitor) Filter(filter string) Network_LBaaS_HealthMonitor { + r.Options.Filter = filter + return r +} + +func (r Network_LBaaS_HealthMonitor) Limit(limit int) Network_LBaaS_HealthMonitor { + r.Options.Limit = &limit + return r +} + +func (r Network_LBaaS_HealthMonitor) Offset(offset int) Network_LBaaS_HealthMonitor { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_LBaaS_HealthMonitor) GetObject() (resp datatypes.Network_LBaaS_HealthMonitor, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_HealthMonitor", "getObject", nil, &r.Options, &resp) + return +} + +// Update load balancers health monitor and return load balancer object with listeners (frontend), pools (backend), health monitor server instances (members) and datacenter populated +func (r Network_LBaaS_HealthMonitor) UpdateLoadBalancerHealthMonitors(loadBalancerUuid *string, healthMonitorConfigurations []datatypes.Network_LBaaS_LoadBalancerHealthMonitorConfiguration) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + healthMonitorConfigurations, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_HealthMonitor", "updateLoadBalancerHealthMonitors", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LBaaS_Listener type presents a data structure for a load balancers listener, also called frontend. +type Network_LBaaS_Listener struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLBaaSListenerService returns an instance of the Network_LBaaS_Listener SoftLayer service +func GetNetworkLBaaSListenerService(sess *session.Session) Network_LBaaS_Listener { + return Network_LBaaS_Listener{Session: sess} +} + +func (r Network_LBaaS_Listener) Id(id int) Network_LBaaS_Listener { + r.Options.Id = &id + return r +} + +func (r Network_LBaaS_Listener) Mask(mask string) Network_LBaaS_Listener { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LBaaS_Listener) Filter(filter string) Network_LBaaS_Listener { + r.Options.Filter = filter + return r +} + +func (r Network_LBaaS_Listener) Limit(limit int) Network_LBaaS_Listener { + r.Options.Limit = &limit + return r +} + +func (r Network_LBaaS_Listener) Offset(offset int) Network_LBaaS_Listener { + r.Options.Offset = &offset + return r +} + +// Delete load balancers front- and backend protocols and return load balancer object with listeners (frontend), pools (backend), server instances (members) and datacenter populated. +func (r Network_LBaaS_Listener) DeleteLoadBalancerProtocols(loadBalancerUuid *string, listenerUuids []string) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + listenerUuids, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Listener", "deleteLoadBalancerProtocols", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_LBaaS_Listener) GetDefaultPool() (resp datatypes.Network_LBaaS_Pool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Listener", "getDefaultPool", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_LBaaS_Listener) GetObject() (resp datatypes.Network_LBaaS_Listener, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Listener", "getObject", nil, &r.Options, &resp) + return +} + +// Update (create) load balancers front- and backend protocols and return load balancer object with listeners (frontend), pools (backend), server instances (members) and datacenter populated. Note if a protocolConfiguration has no listenerUuid set, this function will create the specified front- and backend accordingly. Otherwise the given front- and backend will be updated with the new protocol and port. +func (r Network_LBaaS_Listener) UpdateLoadBalancerProtocols(loadBalancerUuid *string, protocolConfigurations []datatypes.Network_LBaaS_LoadBalancerProtocolConfiguration) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + protocolConfigurations, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Listener", "updateLoadBalancerProtocols", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LBaaS_LoadBalancer type presents a structure containing attributes of a load balancer, and its related objects including listeners, pools and members. +type Network_LBaaS_LoadBalancer struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLBaaSLoadBalancerService returns an instance of the Network_LBaaS_LoadBalancer SoftLayer service +func GetNetworkLBaaSLoadBalancerService(sess *session.Session) Network_LBaaS_LoadBalancer { + return Network_LBaaS_LoadBalancer{Session: sess} +} + +func (r Network_LBaaS_LoadBalancer) Id(id int) Network_LBaaS_LoadBalancer { + r.Options.Id = &id + return r +} + +func (r Network_LBaaS_LoadBalancer) Mask(mask string) Network_LBaaS_LoadBalancer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LBaaS_LoadBalancer) Filter(filter string) Network_LBaaS_LoadBalancer { + r.Options.Filter = filter + return r +} + +func (r Network_LBaaS_LoadBalancer) Limit(limit int) Network_LBaaS_LoadBalancer { + r.Options.Limit = &limit + return r +} + +func (r Network_LBaaS_LoadBalancer) Offset(offset int) Network_LBaaS_LoadBalancer { + r.Options.Offset = &offset + return r +} + +// Cancel a load balancer with the given uuid. The billing system will execute the deletion of load balancer and all objects associated with it such as load balancer appliances, listeners, pools and members in the background. +func (r Network_LBaaS_LoadBalancer) CancelLoadBalancer(uuid *string) (resp bool, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "cancelLoadBalancer", params, &r.Options, &resp) + return +} + +// Return all existing load balancers +func (r Network_LBaaS_LoadBalancer) GetAllObjects() (resp []datatypes.Network_LBaaS_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve Datacenter, where load balancer is located. +func (r Network_LBaaS_LoadBalancer) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve Health monitors for the backend members. +func (r Network_LBaaS_LoadBalancer) GetHealthMonitors() (resp []datatypes.Network_LBaaS_HealthMonitor, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getHealthMonitors", nil, &r.Options, &resp) + return +} + +// Return listener time series datapoints. The time series data is available for Throughput, ConnectionRate and ActiveConnections. Throughput is in bits per second. The values are an average over the time range. The time series data is available for 1hour, 6hours, 12hours, 1day, 1week or 2weeks. +// +// +func (r Network_LBaaS_LoadBalancer) GetListenerTimeSeriesData(loadBalancerUuid *string, metricName *string, timeRange *string, listenerUuid *string) (resp []datatypes.Network_LBaaS_LoadBalancerMonitoringMetricDataPoint, err error) { + params := []interface{}{ + loadBalancerUuid, + metricName, + timeRange, + listenerUuid, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getListenerTimeSeriesData", params, &r.Options, &resp) + return +} + +// Retrieve Listeners assigned to load balancer. +func (r Network_LBaaS_LoadBalancer) GetListeners() (resp []datatypes.Network_LBaaS_Listener, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getListeners", nil, &r.Options, &resp) + return +} + +// Get the load balancer object with given uuid. +func (r Network_LBaaS_LoadBalancer) GetLoadBalancer(uuid *string) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getLoadBalancer", params, &r.Options, &resp) + return +} + +// Return load balancer members health +func (r Network_LBaaS_LoadBalancer) GetLoadBalancerMemberHealth(uuid *string) (resp []datatypes.Network_LBaaS_PoolMembersHealth, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getLoadBalancerMemberHealth", params, &r.Options, &resp) + return +} + +// Return load balancers statistics such as total number of current sessions and total number of accumulated connections. +func (r Network_LBaaS_LoadBalancer) GetLoadBalancerStatistics(uuid *string) (resp datatypes.Network_LBaaS_LoadBalancerStatistics, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getLoadBalancerStatistics", params, &r.Options, &resp) + return +} + +// Retrieve Members assigned to load balancer. +func (r Network_LBaaS_LoadBalancer) GetMembers() (resp []datatypes.Network_LBaaS_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_LBaaS_LoadBalancer) GetObject() (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve list of preferred custom ciphers configured for the load balancer. +func (r Network_LBaaS_LoadBalancer) GetSslCiphers() (resp []datatypes.Network_LBaaS_SSLCipher, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getSslCiphers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_LBaaS_LoadBalancer) ServiceLoadBalancer(data *string) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + data, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "serviceLoadBalancer", params, &r.Options, &resp) + return +} + +// Update load balancer's description, and return the load balancer object containing all listeners, pools, members and datacenter. +func (r Network_LBaaS_LoadBalancer) UpdateLoadBalancer(uuid *string, newDescription *string) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + uuid, + newDescription, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "updateLoadBalancer", params, &r.Options, &resp) + return +} + +// Updates the load balancer with the new cipher list. All new connections going forward will use the new set of ciphers selected by the user. +func (r Network_LBaaS_LoadBalancer) UpdateSslCiphers(loadBalancerUuid *string, cipherList []int) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + cipherList, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "updateSslCiphers", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LBaaS_Member represents the backend member for a load balancer. It can be either a virtual server or a bare metal machine. +type Network_LBaaS_Member struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLBaaSMemberService returns an instance of the Network_LBaaS_Member SoftLayer service +func GetNetworkLBaaSMemberService(sess *session.Session) Network_LBaaS_Member { + return Network_LBaaS_Member{Session: sess} +} + +func (r Network_LBaaS_Member) Id(id int) Network_LBaaS_Member { + r.Options.Id = &id + return r +} + +func (r Network_LBaaS_Member) Mask(mask string) Network_LBaaS_Member { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LBaaS_Member) Filter(filter string) Network_LBaaS_Member { + r.Options.Filter = filter + return r +} + +func (r Network_LBaaS_Member) Limit(limit int) Network_LBaaS_Member { + r.Options.Limit = &limit + return r +} + +func (r Network_LBaaS_Member) Offset(offset int) Network_LBaaS_Member { + r.Options.Offset = &offset + return r +} + +// Add server instances as members to load balancer and return it with listeners, pools and members populated +func (r Network_LBaaS_Member) AddLoadBalancerMembers(loadBalancerUuid *string, serverInstances []datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + serverInstances, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Member", "addLoadBalancerMembers", params, &r.Options, &resp) + return +} + +// Delete given members from load balancer and return load balancer object with listeners, pools and members populated +func (r Network_LBaaS_Member) DeleteLoadBalancerMembers(loadBalancerUuid *string, memberUuids []string) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + memberUuids, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Member", "deleteLoadBalancerMembers", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_LBaaS_Member) GetObject() (resp datatypes.Network_LBaaS_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Member", "getObject", nil, &r.Options, &resp) + return +} + +// Update members weight and return load balancer object with listeners, pools and members populated +func (r Network_LBaaS_Member) UpdateLoadBalancerMembers(loadBalancerUuid *string, members []datatypes.Network_LBaaS_Member) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + members, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Member", "updateLoadBalancerMembers", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LBaaS_SSLCipher type presents a structure that contains attributes of load balancer cipher suites. +// +// +type Network_LBaaS_SSLCipher struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLBaaSSSLCipherService returns an instance of the Network_LBaaS_SSLCipher SoftLayer service +func GetNetworkLBaaSSSLCipherService(sess *session.Session) Network_LBaaS_SSLCipher { + return Network_LBaaS_SSLCipher{Session: sess} +} + +func (r Network_LBaaS_SSLCipher) Id(id int) Network_LBaaS_SSLCipher { + r.Options.Id = &id + return r +} + +func (r Network_LBaaS_SSLCipher) Mask(mask string) Network_LBaaS_SSLCipher { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LBaaS_SSLCipher) Filter(filter string) Network_LBaaS_SSLCipher { + r.Options.Filter = filter + return r +} + +func (r Network_LBaaS_SSLCipher) Limit(limit int) Network_LBaaS_SSLCipher { + r.Options.Limit = &limit + return r +} + +func (r Network_LBaaS_SSLCipher) Offset(offset int) Network_LBaaS_SSLCipher { + r.Options.Offset = &offset + return r +} + +// Returns all supported cipher list +func (r Network_LBaaS_SSLCipher) GetAllObjects() (resp []datatypes.Network_LBaaS_SSLCipher, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_SSLCipher", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_LBaaS_SSLCipher) GetObject() (resp datatypes.Network_LBaaS_SSLCipher, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_SSLCipher", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LoadBalancer_Global_Account data type contains the properties for a single global load balancer account. The properties you are able to edit are fallbackIp, loadBalanceTypeId, and notes. The hosts relational property can be used for creating and editing hosts that belong to the global load balancer account. The [[SoftLayer_Network_LoadBalancer_Global_Account::editObject|editObject]] method contains details on creating and edited hosts through the hosts relational property. +type Network_LoadBalancer_Global_Account struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLoadBalancerGlobalAccountService returns an instance of the Network_LoadBalancer_Global_Account SoftLayer service +func GetNetworkLoadBalancerGlobalAccountService(sess *session.Session) Network_LoadBalancer_Global_Account { + return Network_LoadBalancer_Global_Account{Session: sess} +} + +func (r Network_LoadBalancer_Global_Account) Id(id int) Network_LoadBalancer_Global_Account { + r.Options.Id = &id + return r +} + +func (r Network_LoadBalancer_Global_Account) Mask(mask string) Network_LoadBalancer_Global_Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LoadBalancer_Global_Account) Filter(filter string) Network_LoadBalancer_Global_Account { + r.Options.Filter = filter + return r +} + +func (r Network_LoadBalancer_Global_Account) Limit(limit int) Network_LoadBalancer_Global_Account { + r.Options.Limit = &limit + return r +} + +func (r Network_LoadBalancer_Global_Account) Offset(offset int) Network_LoadBalancer_Global_Account { + r.Options.Offset = &offset + return r +} + +// If your globally load balanced domain is hosted on the SoftLayer nameservers this method will add the required NS resource record to your DNS zone file and remove any A records that match the host portion of a global load balancer account hostname. A NS resource record is required to be able to use your SoftLayer global load balancer account. Please make sure the zone file for the hostname listed on your SoftLayer global load balancer account is setup prior to using this method. If your globally load balanced domain is hosted on any other nameservers this method will not be able to add the required NS record. +func (r Network_LoadBalancer_Global_Account) AddNsRecord() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "addNsRecord", nil, &r.Options, &resp) + return +} + +// Edit the properties of a global load balancer account by passing in a modified instance of the object. The global load balancer account properties you are able to edit are: fallback ip, load balance type id, and notes. Hosts that belong to your SoftLayer global load balancer account are created and modified through this method. An example templateObject that updates global load balancer account properties, updates the properties of a host, and adds a new host is shown below: +// +// +// * id: 2 +// * loadBalanceTypeId: 2 +// * notes: Notes updated +// * fallbackIp: 1.1.1.1 +// * hosts: +// ** id: 19 +// ** destinationIp: 2.2.2.2 +// ** weight: 25 +// ** healthCheck: http +// ** destinationPort: 80 +// ** enabled: 1

    +// ** destinationIp: 3.3.3.3 +// ** weight: 25 +// ** healthCheck: http +// ** destinationPort: 80 +// ** enabled: 1 +// +// +// +// +// The first section contains the properties of the global load balancer account that will be updated, while the second section contains the elements of the 'hosts' property of the global load balancer account. The first host listed will have its properties updated because the 'id' property of the host is set, meaning the global load balancer host with an id of 19 will be updated. The second host listed will be created because it lacks the 'id' property. +// +// There is a limit to the maximum number of hosts that you are allowed to add, and is defined by the allowedNumberOfHosts property on the global load balancer account. The destination IP address of a host must be an IP address that belongs to your SoftLayer Account, or a local load balancer virtual IP address that belongs to your account. The destination IP address and destination port are required and must be provided when creating a host. +func (r Network_LoadBalancer_Global_Account) EditObject(templateObject *datatypes.Network_LoadBalancer_Global_Account) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve Your SoftLayer customer account. +func (r Network_LoadBalancer_Global_Account) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a Global Load Balancer account. +func (r Network_LoadBalancer_Global_Account) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The hosts in the load balancing pool for a global load balancer account. +func (r Network_LoadBalancer_Global_Account) GetHosts() (resp []datatypes.Network_LoadBalancer_Global_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The load balance method of a global load balancer account +func (r Network_LoadBalancer_Global_Account) GetLoadBalanceType() (resp datatypes.Network_LoadBalancer_Global_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getLoadBalanceType", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the global load balancer is a managed resource. +func (r Network_LoadBalancer_Global_Account) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_LoadBalancer_Global_Account object whose ID number corresponds to the ID number of the init paramater passed to the SoftLayer_Network_LoadBalancer_Global_Account service. You can only retrieve a global load balancer account that is assigned to your SoftLayer customer account. +func (r Network_LoadBalancer_Global_Account) GetObject() (resp datatypes.Network_LoadBalancer_Global_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getObject", nil, &r.Options, &resp) + return +} + +// If your globally load balanced domain is hosted on the SoftLayer nameservers this method will remove the NS resource record from your DNS zone file. Removing the NS resource record will basically disable your global load balancer account since no DNS requests will be forwarded to the global load balancers. Any A records that were removed when the NS resource record was added will not be created for you. If your globally load balanced domain is hosted on any other nameservers this method will not be able to remove the required NS record. +func (r Network_LoadBalancer_Global_Account) RemoveNsRecord() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "removeNsRecord", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LoadBalancer_Global_Host data type represents a single host that belongs to a global load balancer account's load balancing pool. +// +// The destination IP address of a host must be one that belongs to your SoftLayer customer account, or to a datacenter load balancer virtual ip that belongs to your SoftLayer customer account. The destination IP address and port of a global load balancer host is a required field and must exist during creation and can not be removed. The acceptable values for the health check type are 'none', 'http', and 'tcp'. The status property is updated in 5 minute intervals and the hits property is updated in 10 minute intervals. +// +// The order of the host is only important if you are using the 'failover' load balance method, and the weight is only important if you are using the 'weighted round robin' load balance method. +type Network_LoadBalancer_Global_Host struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLoadBalancerGlobalHostService returns an instance of the Network_LoadBalancer_Global_Host SoftLayer service +func GetNetworkLoadBalancerGlobalHostService(sess *session.Session) Network_LoadBalancer_Global_Host { + return Network_LoadBalancer_Global_Host{Session: sess} +} + +func (r Network_LoadBalancer_Global_Host) Id(id int) Network_LoadBalancer_Global_Host { + r.Options.Id = &id + return r +} + +func (r Network_LoadBalancer_Global_Host) Mask(mask string) Network_LoadBalancer_Global_Host { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LoadBalancer_Global_Host) Filter(filter string) Network_LoadBalancer_Global_Host { + r.Options.Filter = filter + return r +} + +func (r Network_LoadBalancer_Global_Host) Limit(limit int) Network_LoadBalancer_Global_Host { + r.Options.Limit = &limit + return r +} + +func (r Network_LoadBalancer_Global_Host) Offset(offset int) Network_LoadBalancer_Global_Host { + r.Options.Offset = &offset + return r +} + +// Remove a host from the load balancing pool of a global load balancer account. +func (r Network_LoadBalancer_Global_Host) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Host", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The global load balancer account a host belongs to. +func (r Network_LoadBalancer_Global_Host) GetLoadBalancerAccount() (resp datatypes.Network_LoadBalancer_Global_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Host", "getLoadBalancerAccount", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_LoadBalancer_Global_Host object whose ID number corresponds to the ID number of the init paramater passed to the SoftLayer_Network_LoadBalancer_Global_Host service. You can only retrieve a global load balancer host that is assigned to your SoftLayer global load balancer account. +func (r Network_LoadBalancer_Global_Host) GetObject() (resp datatypes.Network_LoadBalancer_Global_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Host", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LoadBalancer_Service data type contains all the information relating to a specific service (destination) on a particular load balancer. +// +// Information retained on the object itself is the the source and destination of the service, routing type, weight, and whether or not the service is currently enabled. +type Network_LoadBalancer_Service struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLoadBalancerServiceService returns an instance of the Network_LoadBalancer_Service SoftLayer service +func GetNetworkLoadBalancerServiceService(sess *session.Session) Network_LoadBalancer_Service { + return Network_LoadBalancer_Service{Session: sess} +} + +func (r Network_LoadBalancer_Service) Id(id int) Network_LoadBalancer_Service { + r.Options.Id = &id + return r +} + +func (r Network_LoadBalancer_Service) Mask(mask string) Network_LoadBalancer_Service { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LoadBalancer_Service) Filter(filter string) Network_LoadBalancer_Service { + r.Options.Filter = filter + return r +} + +func (r Network_LoadBalancer_Service) Limit(limit int) Network_LoadBalancer_Service { + r.Options.Limit = &limit + return r +} + +func (r Network_LoadBalancer_Service) Offset(offset int) Network_LoadBalancer_Service { + r.Options.Offset = &offset + return r +} + +// Calling deleteObject on a particular server will remove it from the load balancer. This is the only way to remove a service from your load balancer. If you wish to remove a server, first call this function, then reload the virtualIpAddress object and edit the remaining services to reflect the other changes that you wish to make. +func (r Network_LoadBalancer_Service) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "deleteObject", nil, &r.Options, &resp) + return +} + +// Get the graph image for a load balancer service based on the supplied graph type and metric. The available graph types are: 'connections' and 'status', and the available metrics are: 'day', 'week' and 'month'. +// +// This method returns the raw binary image data. +func (r Network_LoadBalancer_Service) GetGraphImage(graphType *string, metric *string) (resp []byte, err error) { + params := []interface{}{ + graphType, + metric, + } + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "getGraphImage", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_LoadBalancer_Service object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_LoadBalancer_Service service. You can only retrieve services on load balancers assigned to your account, and it is recommended that you simply retrieve the entire load balancer, as an individual service has no explicit purpose without its "siblings". +func (r Network_LoadBalancer_Service) GetObject() (resp datatypes.Network_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "getObject", nil, &r.Options, &resp) + return +} + +// Returns an array of SoftLayer_Container_Network_LoadBalancer_StatusEntry objects. A SoftLayer_Container_Network_LoadBalancer_StatusEntry object has two variables, "Label" and "Value" +// +// Calling this function executes a command on the physical load balancer itself, and therefore should be called infrequently. For a general idea of the load balancer service, use the "peakConnections" variable on the Type +// +// Possible values for "Label" are: +// +// +// * IP Address +// * Port +// * Server Status +// * Load Status +// * Current Connections +// * Total Hits +// +// +// Not all labels are guaranteed to be returned. +func (r Network_LoadBalancer_Service) GetStatus() (resp []datatypes.Container_Network_LoadBalancer_StatusEntry, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The load balancer that this service belongs to. +func (r Network_LoadBalancer_Service) GetVip() (resp datatypes.Network_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "getVip", nil, &r.Options, &resp) + return +} + +// Calling resetPeakConnections will set the peakConnections variable to zero on this particular object. Peak connections will continue to increase normally after this method call, it will only temporarily reset the statistic to zero, until the next time it is polled. +func (r Network_LoadBalancer_Service) ResetPeakConnections() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "resetPeakConnections", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LoadBalancer_VirtualIpAddress data type contains all the information relating to a specific load balancer assigned to a customer account. +// +// Information retained on the object itself is the virtual IP address, load balancing method, and any notes that are related to the load balancer. There is also an array of SoftLayer_Network_LoadBalancer_Service objects, which represent the load balancer services, explained more fully in the SoftLayer_Network_LoadBalancer_Service documentation. +type Network_LoadBalancer_VirtualIpAddress struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLoadBalancerVirtualIpAddressService returns an instance of the Network_LoadBalancer_VirtualIpAddress SoftLayer service +func GetNetworkLoadBalancerVirtualIpAddressService(sess *session.Session) Network_LoadBalancer_VirtualIpAddress { + return Network_LoadBalancer_VirtualIpAddress{Session: sess} +} + +func (r Network_LoadBalancer_VirtualIpAddress) Id(id int) Network_LoadBalancer_VirtualIpAddress { + r.Options.Id = &id + return r +} + +func (r Network_LoadBalancer_VirtualIpAddress) Mask(mask string) Network_LoadBalancer_VirtualIpAddress { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LoadBalancer_VirtualIpAddress) Filter(filter string) Network_LoadBalancer_VirtualIpAddress { + r.Options.Filter = filter + return r +} + +func (r Network_LoadBalancer_VirtualIpAddress) Limit(limit int) Network_LoadBalancer_VirtualIpAddress { + r.Options.Limit = &limit + return r +} + +func (r Network_LoadBalancer_VirtualIpAddress) Offset(offset int) Network_LoadBalancer_VirtualIpAddress { + r.Options.Offset = &offset + return r +} + +// Disable a Virtual IP Address, removing it from load balancer rotation and denying all connections to that IP address. +func (r Network_LoadBalancer_VirtualIpAddress) Disable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "disable", nil, &r.Options, &resp) + return +} + +// Like any other API object, the load balancers can have their exposed properties edited by passing in a modified version of the object. The load balancer object also can modify its services in this way. Simply request the load balancer object you wish to edit, then modify the objects in the services array and pass the modified object to this function. WARNING: Services cannot be deleted in this manner, you must call deleteObject() on the service to physically remove them from the load balancer. +func (r Network_LoadBalancer_VirtualIpAddress) EditObject(templateObject *datatypes.Network_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "editObject", params, &r.Options, &resp) + return +} + +// Enable a disabled Virtual IP Address, allowing connections back to the IP address. +func (r Network_LoadBalancer_VirtualIpAddress) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "enable", nil, &r.Options, &resp) + return +} + +// Retrieve The account that owns this load balancer. +func (r Network_LoadBalancer_VirtualIpAddress) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for the Load Balancer. +func (r Network_LoadBalancer_VirtualIpAddress) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve If false, this VIP and associated services may be edited via the portal or the API. If true, you must configure this VIP manually on the device. +func (r Network_LoadBalancer_VirtualIpAddress) GetCustomerManagedFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getCustomerManagedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the load balancer is a managed resource. +func (r Network_LoadBalancer_VirtualIpAddress) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_LoadBalancer_VirtualIpAddress object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_LoadBalancer_VirtualIpAddress service. You can only retrieve Load Balancers assigned to your account. +func (r Network_LoadBalancer_VirtualIpAddress) GetObject() (resp datatypes.Network_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve the services on this load balancer. +func (r Network_LoadBalancer_VirtualIpAddress) GetServices() (resp []datatypes.Network_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getServices", nil, &r.Options, &resp) + return +} + +// Quickly remove all active external connections to a Virtual IP Address. +func (r Network_LoadBalancer_VirtualIpAddress) KickAllConnections() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "kickAllConnections", nil, &r.Options, &resp) + return +} + +// Upgrades the connection limit on the VirtualIp and changes the billing item on your account to reflect the change. This function will only upgrade you to the next "level" of service. The next level follows this pattern Current Level => Next Level 50 100 100 200 200 500 500 1000 1000 1200 1200 1500 1500 2000 2000 2500 2500 3000 +func (r Network_LoadBalancer_VirtualIpAddress) UpgradeConnectionLimit() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "upgradeConnectionLimit", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Media_Transcode_Account contains information regarding a transcode account. +type Network_Media_Transcode_Account struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMediaTranscodeAccountService returns an instance of the Network_Media_Transcode_Account SoftLayer service +func GetNetworkMediaTranscodeAccountService(sess *session.Session) Network_Media_Transcode_Account { + return Network_Media_Transcode_Account{Session: sess} +} + +func (r Network_Media_Transcode_Account) Id(id int) Network_Media_Transcode_Account { + r.Options.Id = &id + return r +} + +func (r Network_Media_Transcode_Account) Mask(mask string) Network_Media_Transcode_Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Media_Transcode_Account) Filter(filter string) Network_Media_Transcode_Account { + r.Options.Filter = filter + return r +} + +func (r Network_Media_Transcode_Account) Limit(limit int) Network_Media_Transcode_Account { + r.Options.Limit = &limit + return r +} + +func (r Network_Media_Transcode_Account) Offset(offset int) Network_Media_Transcode_Account { + r.Options.Offset = &offset + return r +} + +// With this method, you can create a transcode account. Individual SoftLayer account can have a single Transcode account. You have to pass your SoftLayer account id as a parameter. +func (r Network_Media_Transcode_Account) CreateTranscodeAccount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "createTranscodeAccount", nil, &r.Options, &resp) + return +} + +// '''Note'''. This method is obsolete. Please use the [[SoftLayer_Network_Media_Transcode_Job::createObject|createObject]] method on SoftLayer_Network_Media_Transcode_Job object instead. SoftLayer_Network_Media_Transcode_Job::createObject returns an object of a newly created Transcode Job. +// +// With this method, you can create a transcode job. +// +// The very first step of creating a transcode job is to upload your media files to the /in directory on your Transcode FTP space. Then, you have to pass a [[SoftLayer_Network_Media_Transcode_Job|Transcode job]] object as a parameter for this method. +// +// There are 4 required properties of SoftLayer_Network_Media_Transcode_Job object: transcodePresetName, transcodePresetGuid, inputFile, and outputFile. A transcode preset is a configuration that defines a certain media output. You can retrieve all the supported presets with the [[SoftLayer_Network_Media_Transcode_Account::getPresets|getPresets]] method. You can also use [[SoftLayer_Network_Media_Transcode_Account::getPresetDetail|getPresetDetail]] method to get more information on a preset. Use these two methods to determine appropriate values for "transcodePresetName" and "transcodePresetGuid" properties. For an "inputFile", you must specify a file that exists in the /in directory of your Transcode FTP space. An "outputFile" name will be used by the Transcode server for naming a transcoded file. An output file name must be in /out directory. If your outputFile name already exists in the /out directory, the Transcode server will append a file name with _n (an underscore and the total number of files with the identical name plus 1). +// +// The "name" property is optional and it can help you keep track of transcode jobs easily. "autoDeleteDuration" is another optional property that you can specify. It determines how soon your input file will be deleted. If autoDeleteDuration is set to zero, your input file will be removed immediately after the last transcode job running on it is completed. A value for autoDeleteDuration property is in seconds and the maximum value is 259200 which is 3 days. +// +// An example SoftLayer_Network_Media_Transcode_Job parameter looks like this: +// +// +// * name: My transcoding +// * transcodePresetName: F4V 896kbps 640x352 16x9 29.97fps +// * transcodePresetGuid: {87E01268-C3E3-4A85-9701-052C9AC42BD4} +// * inputFile: /in/my_birthday.wmv +// * outputFile: /out/my_birthday_flash +// +// +// Notice that an output file does not have a file extension. The Transcode server will append a file extension based on an output format. A newly created transcode job will be in "Pending" status and it will be added to the Transcoding queue. You will receive a notification email whenever there is a status change on your transcode job. For example, the Transcode server starts to process your transcode job, you will be notified via an email. +// +// You can add up to 3 pending jobs at a time. Transcode jobs with any other status such as "Complete" or "Error" will not be counted toward your pending jobs. +// +// Once a job is complete, the Transcode server will place the output file into the /out directory along with a notification email. The files in the /out directory will be removed 3 days after they were created. You will need to use an FTP client to download transcoded files. +// +// +func (r Network_Media_Transcode_Account) CreateTranscodeJob(newJob *datatypes.Network_Media_Transcode_Job) (resp bool, err error) { + params := []interface{}{ + newJob, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "createTranscodeJob", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer account information +func (r Network_Media_Transcode_Account) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getAccount", nil, &r.Options, &resp) + return +} + +// This method returns a collection of SoftLayer_Container_Network_Ftp_Directory objects. You can retrieve directory information for /in and /out directories. A [[SoftLayer_Container_Network_Directory_Listing|Directory Listing]] object contains a type (indicating whether it is a file or a directory), name and file count if it is a directory. +func (r Network_Media_Transcode_Account) GetDirectoryInformation(directoryName *string, extensionFilter *string) (resp []datatypes.Container_Network_Directory_Listing, err error) { + params := []interface{}{ + directoryName, + extensionFilter, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getDirectoryInformation", params, &r.Options, &resp) + return +} + +// This method returns detailed information of a media file that resides in the Transcode FTP server. A [[SoftLayer_Container_Network_Media_Information|media information]] object contains media details such as file size, media format, frame rate, aspect ratio and so on. This information is merely for reference purposes. You should not rely on this data. Our library grabs small pieces of data from a media file to gather media details. This information may not be available for some files. +func (r Network_Media_Transcode_Account) GetFileDetail(source *string) (resp datatypes.Container_Network_Media_Information, err error) { + params := []interface{}{ + source, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getFileDetail", params, &r.Options, &resp) + return +} + +// This method returns your Transcode FTP login credentials to the transcode.service.softlayer.com server. +// +// The Transcode FTP server is available via the SoftLayer private network. There is no API method that you can upload a file to Transcode server so you need to use an FTP client. You will have /in and /out directories on the Transcode FTP server. You will have read-write privileges for /in directory and read-only privilege for /out directory. All the files in both /in and /out directories will be deleted after 72 hours from the creation date. +func (r Network_Media_Transcode_Account) GetFtpAttributes() (resp datatypes.Container_Network_Authentication_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getFtpAttributes", nil, &r.Options, &resp) + return +} + +// getObject method retrieves the SoftLayer_Network_Media_Transcode_Account object whose ID number corresponds to the ID number of the initial parameter passed to the SoftLayer_Network_Media_Transcode_Account service. You can only retrieve a Transcode account assigned to your SoftLayer customer account. +func (r Network_Media_Transcode_Account) GetObject() (resp datatypes.Network_Media_Transcode_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getObject", nil, &r.Options, &resp) + return +} + +// This method returns an array of [[SoftLayer_Container_Network_Media_Transcode_Preset_Element|preset element]] objects. Each preset has its own collection of preset elements such as encoder, frame rate, aspect ratio and so on. Each element object has a default value for itself and an array of [[SoftLayer_Container_Network_Media_Transcode_Preset_Element_Option|element option]] objects. For example, "Frame Rate" element for "Windows Media 9 - Download - 1 Mbps - NTSC - Constrained VBR" preset has 19 element options. 15.0 frame rate is selected by default. Currently, you are not able to change the default value. Customizing these values may be possible in the future. +func (r Network_Media_Transcode_Account) GetPresetDetail(guid *string) (resp []datatypes.Container_Network_Media_Transcode_Preset_Element, err error) { + params := []interface{}{ + guid, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getPresetDetail", params, &r.Options, &resp) + return +} + +// A transcode preset is a configuration that defines a certain media output. This method returns an array of transcoding preset objects supported by SoftLayer's Transcode server. Each [[SoftLayer_Container_Network_Media_Transcode_Preset|preset object]] contains a GUID property. You will need a GUID string when you create a new transcode job. +func (r Network_Media_Transcode_Account) GetPresets() (resp []datatypes.Container_Network_Media_Transcode_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getPresets", nil, &r.Options, &resp) + return +} + +// Retrieve Transcode jobs +func (r Network_Media_Transcode_Account) GetTranscodeJobs() (resp []datatypes.Network_Media_Transcode_Job, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getTranscodeJobs", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Media_Transcode_Job contains information regarding a transcode job such as input file, output format, user id and so on. +type Network_Media_Transcode_Job struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMediaTranscodeJobService returns an instance of the Network_Media_Transcode_Job SoftLayer service +func GetNetworkMediaTranscodeJobService(sess *session.Session) Network_Media_Transcode_Job { + return Network_Media_Transcode_Job{Session: sess} +} + +func (r Network_Media_Transcode_Job) Id(id int) Network_Media_Transcode_Job { + r.Options.Id = &id + return r +} + +func (r Network_Media_Transcode_Job) Mask(mask string) Network_Media_Transcode_Job { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Media_Transcode_Job) Filter(filter string) Network_Media_Transcode_Job { + r.Options.Filter = filter + return r +} + +func (r Network_Media_Transcode_Job) Limit(limit int) Network_Media_Transcode_Job { + r.Options.Limit = &limit + return r +} + +func (r Network_Media_Transcode_Job) Offset(offset int) Network_Media_Transcode_Job { + r.Options.Offset = &offset + return r +} + +// With this method, you can create a transcode job. +// +// The very first step of creating a transcode job is to upload your media files to the /in directory on your Transcode FTP space. Then, you have to pass a [[SoftLayer_Network_Media_Transcode_Job|Transcode job]] object as a parameter for this method. +// +// There are 4 required properties of SoftLayer_Network_Media_Transcode_Job object: transcodePresetName, transcodePresetGuid, inputFile, and outputFile. A transcode preset is a configuration that defines a certain media output. You can retrieve all the supported presets with the [[SoftLayer_Network_Media_Transcode_Account::getPresets|getPresets]] method. You can also use [[SoftLayer_Network_Media_Transcode_Account::getPresetDetail|getPresetDetail]] method to get more information on a preset. Use these two methods to determine appropriate values for "transcodePresetName" and "transcodePresetGuid" properties. For an "inputFile", you must specify a file that exists in the /in directory of your Transcode FTP space. An "outputFile" name will be used by the Transcode server for naming a transcoded file. An output file name must be in /out directory. If your outputFile name already exists in the /out directory, the Transcode server will append a file name with _n (an underscore and the total number of files with the identical name plus 1). +// +// The "name" property is optional and it can help you keep track of transcode jobs easily. "autoDeleteDuration" is another optional property that you can specify. It determines how soon your input file will be deleted. If autoDeleteDuration is set to zero, your input file will be removed immediately after the last transcode job running on it is completed. A value for autoDeleteDuration property is in seconds and the maximum value is 259200 which is 3 days. +// +// An example SoftLayer_Network_Media_Transcode_Job parameter looks like this: +// +// +// * name: My transcoding +// * transcodePresetName: F4V 896kbps 640x352 16x9 29.97fps +// * transcodePresetGuid: {87E01268-C3E3-4A85-9701-052C9AC42BD4} +// * inputFile: /in/my_birthday.wmv +// * outputFile: /out/my_birthday_flash +// +// +// Notice that an output file does not have a file extension. The Transcode server will append a file extension based on an output format. A newly created transcode job will be in "Pending" status and it will be added to the Transcoding queue. You will receive a notification email whenever there is a status change on your transcode job. For example, the Transcode server starts to process your transcode job, you will be notified via an email. +// +// You can add up to 3 pending jobs at a time. Transcode jobs with any other status such as "Complete" or "Error" will not be counted toward your pending jobs. +// +// Once a job is complete, the Transcode server will place the output file into the /out directory along with a notification email. The files in the /out directory will be removed 3 days after they were created. You will need to use an FTP client to download transcoded files. +// +// +func (r Network_Media_Transcode_Job) CreateObject(templateObject *datatypes.Network_Media_Transcode_Job) (resp datatypes.Network_Media_Transcode_Job, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Media_Transcode_Job) GetHistory() (resp []datatypes.Network_Media_Transcode_Job_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getHistory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Media_Transcode_Job) GetObject() (resp datatypes.Network_Media_Transcode_Job, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The transcode service account +func (r Network_Media_Transcode_Job) GetTranscodeAccount() (resp datatypes.Network_Media_Transcode_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getTranscodeAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The status information of a transcode job +func (r Network_Media_Transcode_Job) GetTranscodeStatus() (resp datatypes.Network_Media_Transcode_Job_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getTranscodeStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The status of a transcode job +func (r Network_Media_Transcode_Job) GetTranscodeStatusName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getTranscodeStatusName", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that created the transcode job +func (r Network_Media_Transcode_Job) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getUser", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Media_Transcode_Job_Status contains information on a transcode job status. +type Network_Media_Transcode_Job_Status struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMediaTranscodeJobStatusService returns an instance of the Network_Media_Transcode_Job_Status SoftLayer service +func GetNetworkMediaTranscodeJobStatusService(sess *session.Session) Network_Media_Transcode_Job_Status { + return Network_Media_Transcode_Job_Status{Session: sess} +} + +func (r Network_Media_Transcode_Job_Status) Id(id int) Network_Media_Transcode_Job_Status { + r.Options.Id = &id + return r +} + +func (r Network_Media_Transcode_Job_Status) Mask(mask string) Network_Media_Transcode_Job_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Media_Transcode_Job_Status) Filter(filter string) Network_Media_Transcode_Job_Status { + r.Options.Filter = filter + return r +} + +func (r Network_Media_Transcode_Job_Status) Limit(limit int) Network_Media_Transcode_Job_Status { + r.Options.Limit = &limit + return r +} + +func (r Network_Media_Transcode_Job_Status) Offset(offset int) Network_Media_Transcode_Job_Status { + r.Options.Offset = &offset + return r +} + +// This method returns all transcode job statuses. +func (r Network_Media_Transcode_Job_Status) GetAllStatuses() (resp []datatypes.Network_Media_Transcode_Job_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job_Status", "getAllStatuses", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Media_Transcode_Job_Status) GetObject() (resp datatypes.Network_Media_Transcode_Job_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job_Status", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Message_Delivery struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMessageDeliveryService returns an instance of the Network_Message_Delivery SoftLayer service +func GetNetworkMessageDeliveryService(sess *session.Session) Network_Message_Delivery { + return Network_Message_Delivery{Session: sess} +} + +func (r Network_Message_Delivery) Id(id int) Network_Message_Delivery { + r.Options.Id = &id + return r +} + +func (r Network_Message_Delivery) Mask(mask string) Network_Message_Delivery { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Message_Delivery) Filter(filter string) Network_Message_Delivery { + r.Options.Filter = filter + return r +} + +func (r Network_Message_Delivery) Limit(limit int) Network_Message_Delivery { + r.Options.Limit = &limit + return r +} + +func (r Network_Message_Delivery) Offset(offset int) Network_Message_Delivery { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Message_Delivery) EditObject(templateObject *datatypes.Network_Message_Delivery) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account that a network message delivery account belongs to. +func (r Network_Message_Delivery) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a network message delivery account. +func (r Network_Message_Delivery) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getBillingItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery) GetObject() (resp datatypes.Network_Message_Delivery, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The message delivery type of a network message delivery account. +func (r Network_Message_Delivery) GetType() (resp datatypes.Network_Message_Delivery_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor for a network message delivery account. +func (r Network_Message_Delivery) GetVendor() (resp datatypes.Network_Message_Delivery_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getVendor", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Message_Delivery_Email_Sendgrid struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMessageDeliveryEmailSendgridService returns an instance of the Network_Message_Delivery_Email_Sendgrid SoftLayer service +func GetNetworkMessageDeliveryEmailSendgridService(sess *session.Session) Network_Message_Delivery_Email_Sendgrid { + return Network_Message_Delivery_Email_Sendgrid{Session: sess} +} + +func (r Network_Message_Delivery_Email_Sendgrid) Id(id int) Network_Message_Delivery_Email_Sendgrid { + r.Options.Id = &id + return r +} + +func (r Network_Message_Delivery_Email_Sendgrid) Mask(mask string) Network_Message_Delivery_Email_Sendgrid { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Message_Delivery_Email_Sendgrid) Filter(filter string) Network_Message_Delivery_Email_Sendgrid { + r.Options.Filter = filter + return r +} + +func (r Network_Message_Delivery_Email_Sendgrid) Limit(limit int) Network_Message_Delivery_Email_Sendgrid { + r.Options.Limit = &limit + return r +} + +func (r Network_Message_Delivery_Email_Sendgrid) Offset(offset int) Network_Message_Delivery_Email_Sendgrid { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) AddUnsubscribeEmailAddress(emailAddress *string) (resp bool, err error) { + params := []interface{}{ + emailAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "addUnsubscribeEmailAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) DeleteEmailListEntries(list *string, entries []string) (resp bool, err error) { + params := []interface{}{ + list, + entries, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "deleteEmailListEntries", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) DisableSmtpAccess() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "disableSmtpAccess", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) EditObject(templateObject *datatypes.Network_Message_Delivery) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) EnableSmtpAccess() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "enableSmtpAccess", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account that a network message delivery account belongs to. +func (r Network_Message_Delivery_Email_Sendgrid) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetAccountOverview() (resp datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Account_Overview, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getAccountOverview", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a network message delivery account. +func (r Network_Message_Delivery_Email_Sendgrid) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getBillingItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetCategoryList() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getCategoryList", nil, &r.Options, &resp) + return +} + +// Retrieve The contact e-mail address used by SendGrid. +func (r Network_Message_Delivery_Email_Sendgrid) GetEmailAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getEmailAddress", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetEmailList(list *string) (resp []datatypes.Container_Network_Message_Delivery_Email_Sendgrid_List_Entry, err error) { + params := []interface{}{ + list, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getEmailList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetObject() (resp datatypes.Network_Message_Delivery_Email_Sendgrid, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A flag that determines if a SendGrid e-mail delivery account has access to send mail through the SendGrid SMTP server. +func (r Network_Message_Delivery_Email_Sendgrid) GetSmtpAccess() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getSmtpAccess", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetStatistics(options *datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Options) (resp []datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Statistics, err error) { + params := []interface{}{ + options, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getStatistics", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetStatisticsGraph(options *datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Options) (resp datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Graph, err error) { + params := []interface{}{ + options, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getStatisticsGraph", params, &r.Options, &resp) + return +} + +// Retrieve The message delivery type of a network message delivery account. +func (r Network_Message_Delivery_Email_Sendgrid) GetType() (resp datatypes.Network_Message_Delivery_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor for a network message delivery account. +func (r Network_Message_Delivery_Email_Sendgrid) GetVendor() (resp datatypes.Network_Message_Delivery_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getVendor", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetVendorPortalUrl() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getVendorPortalUrl", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) SendEmail(emailContainer *datatypes.Container_Network_Message_Delivery_Email) (resp bool, err error) { + params := []interface{}{ + emailContainer, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "sendEmail", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) UpdateEmailAddress(emailAddress *string) (resp bool, err error) { + params := []interface{}{ + emailAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "updateEmailAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Monitor struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMonitorService returns an instance of the Network_Monitor SoftLayer service +func GetNetworkMonitorService(sess *session.Session) Network_Monitor { + return Network_Monitor{Session: sess} +} + +func (r Network_Monitor) Id(id int) Network_Monitor { + r.Options.Id = &id + return r +} + +func (r Network_Monitor) Mask(mask string) Network_Monitor { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Monitor) Filter(filter string) Network_Monitor { + r.Options.Filter = filter + return r +} + +func (r Network_Monitor) Limit(limit int) Network_Monitor { + r.Options.Limit = &limit + return r +} + +func (r Network_Monitor) Offset(offset int) Network_Monitor { + r.Options.Offset = &offset + return r +} + +// This will return an arrayObject of objects containing the ipaddresses. Using an string parameter you can send a partial ipaddress to search within a given ipaddress. You can also set the max limit as well using the setting the resultLimit. +func (r Network_Monitor) GetIpAddressesByHardware(hardware *datatypes.Hardware, partialIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + hardware, + partialIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor", "getIpAddressesByHardware", params, &r.Options, &resp) + return +} + +// This will return an arrayObject of objects containing the ipaddresses. Using an string parameter you can send a partial ipaddress to search within a given ipaddress. You can also set the max limit as well using the setting the resultLimit. +func (r Network_Monitor) GetIpAddressesByVirtualGuest(guest *datatypes.Virtual_Guest, partialIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + guest, + partialIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor", "getIpAddressesByVirtualGuest", params, &r.Options, &resp) + return +} + +// The Monitoring_Query_Host type represents a monitoring instance. It consists of a hardware ID to monitor, an IP address attached to that hardware ID, a method of monitoring, and what to do in the instance that the monitor ever fails. +type Network_Monitor_Version1_Query_Host struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMonitorVersion1QueryHostService returns an instance of the Network_Monitor_Version1_Query_Host SoftLayer service +func GetNetworkMonitorVersion1QueryHostService(sess *session.Session) Network_Monitor_Version1_Query_Host { + return Network_Monitor_Version1_Query_Host{Session: sess} +} + +func (r Network_Monitor_Version1_Query_Host) Id(id int) Network_Monitor_Version1_Query_Host { + r.Options.Id = &id + return r +} + +func (r Network_Monitor_Version1_Query_Host) Mask(mask string) Network_Monitor_Version1_Query_Host { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Monitor_Version1_Query_Host) Filter(filter string) Network_Monitor_Version1_Query_Host { + r.Options.Filter = filter + return r +} + +func (r Network_Monitor_Version1_Query_Host) Limit(limit int) Network_Monitor_Version1_Query_Host { + r.Options.Limit = &limit + return r +} + +func (r Network_Monitor_Version1_Query_Host) Offset(offset int) Network_Monitor_Version1_Query_Host { + r.Options.Offset = &offset + return r +} + +// Passing in an unsaved instances of a Query_Host object into this function will create the object and return the results to the user. +func (r Network_Monitor_Version1_Query_Host) CreateObject(templateObject *datatypes.Network_Monitor_Version1_Query_Host) (resp datatypes.Network_Monitor_Version1_Query_Host, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "createObject", params, &r.Options, &resp) + return +} + +// Passing in a collection of unsaved instances of Query_Host objects into this function will create all objects and return the results to the user. +func (r Network_Monitor_Version1_Query_Host) CreateObjects(templateObjects []datatypes.Network_Monitor_Version1_Query_Host) (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "createObjects", params, &r.Options, &resp) + return +} + +// Like any other API object, the monitoring objects can be deleted by passing an instance of them into this function. The ID on the object must be set. +func (r Network_Monitor_Version1_Query_Host) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "deleteObject", nil, &r.Options, &resp) + return +} + +// Like any other API object, the monitoring objects can be deleted by passing an instance of them into this function. The ID on the object must be set. +func (r Network_Monitor_Version1_Query_Host) DeleteObjects(templateObjects []datatypes.Network_Monitor_Version1_Query_Host) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "deleteObjects", params, &r.Options, &resp) + return +} + +// Like any other API object, the monitoring objects can have their exposed properties edited by passing in a modified version of the object. +func (r Network_Monitor_Version1_Query_Host) EditObject(templateObject *datatypes.Network_Monitor_Version1_Query_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "editObject", params, &r.Options, &resp) + return +} + +// Like any other API object, the monitoring objects can have their exposed properties edited by passing in a modified version of the object. +func (r Network_Monitor_Version1_Query_Host) EditObjects(templateObjects []datatypes.Network_Monitor_Version1_Query_Host) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "editObjects", params, &r.Options, &resp) + return +} + +// This method returns all Query_Host objects associated with the passed in hardware ID as long as that hardware ID is owned by the current user's account. +// +// This behavior can also be accomplished by simply tapping networkMonitors on the Hardware_Server object. +func (r Network_Monitor_Version1_Query_Host) FindByHardwareId(hardwareId *int) (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "findByHardwareId", params, &r.Options, &resp) + return +} + +// Retrieve The hardware that is being monitored by this monitoring instance +func (r Network_Monitor_Version1_Query_Host) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The most recent result for this particular monitoring instance. +func (r Network_Monitor_Version1_Query_Host) GetLastResult() (resp datatypes.Network_Monitor_Version1_Query_Result, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getLastResult", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Monitor_Version1_Query_Host object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Monitor_Version1_Query_Host service. You can only retrieve query hosts attached to hardware that belong to your account. +func (r Network_Monitor_Version1_Query_Host) GetObject() (resp datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of monitoring query that is executed when this hardware is monitored. +func (r Network_Monitor_Version1_Query_Host) GetQueryType() (resp datatypes.Network_Monitor_Version1_Query_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getQueryType", nil, &r.Options, &resp) + return +} + +// Retrieve The action taken when a monitor fails. +func (r Network_Monitor_Version1_Query_Host) GetResponseAction() (resp datatypes.Network_Monitor_Version1_Query_ResponseType, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getResponseAction", nil, &r.Options, &resp) + return +} + +// The monitoring stratum type stores the maximum level of the various components of the monitoring system that a particular hardware object has access to. This object cannot be accessed by ID, and cannot be modified. The user can access this object through Hardware_Server->availableMonitoring. +// +// There are two values on this object that are important: +// # monitorLevel determines the highest level of SoftLayer_Network_Monitor_Version1_Query_Type object that can be placed in a monitoring instance on this server +// # responseLevel determines the highest level of SoftLayer_Network_Monitor_Version1_Query_ResponseType object that can be placed in a monitoring instance on this server +// +// +// Also note that the query type and response types are available through getAllQueryTypes and getAllResponseTypes, respectively. +type Network_Monitor_Version1_Query_Host_Stratum struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMonitorVersion1QueryHostStratumService returns an instance of the Network_Monitor_Version1_Query_Host_Stratum SoftLayer service +func GetNetworkMonitorVersion1QueryHostStratumService(sess *session.Session) Network_Monitor_Version1_Query_Host_Stratum { + return Network_Monitor_Version1_Query_Host_Stratum{Session: sess} +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Id(id int) Network_Monitor_Version1_Query_Host_Stratum { + r.Options.Id = &id + return r +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Mask(mask string) Network_Monitor_Version1_Query_Host_Stratum { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Filter(filter string) Network_Monitor_Version1_Query_Host_Stratum { + r.Options.Filter = filter + return r +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Limit(limit int) Network_Monitor_Version1_Query_Host_Stratum { + r.Options.Limit = &limit + return r +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Offset(offset int) Network_Monitor_Version1_Query_Host_Stratum { + r.Options.Offset = &offset + return r +} + +// Calling this function returns all possible query type objects. These objects are to be used to set the values on the SoftLayer_Network_Monitor_Version1_Query_Host when creating new monitoring instances. +func (r Network_Monitor_Version1_Query_Host_Stratum) GetAllQueryTypes() (resp []datatypes.Network_Monitor_Version1_Query_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host_Stratum", "getAllQueryTypes", nil, &r.Options, &resp) + return +} + +// Calling this function returns all possible response type objects. These objects are to be used to set the values on the SoftLayer_Network_Monitor_Version1_Query_Host when creating new monitoring instances. +func (r Network_Monitor_Version1_Query_Host_Stratum) GetAllResponseTypes() (resp []datatypes.Network_Monitor_Version1_Query_ResponseType, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host_Stratum", "getAllResponseTypes", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware object that these monitoring permissions applies to. +func (r Network_Monitor_Version1_Query_Host_Stratum) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host_Stratum", "getHardware", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Monitor_Version1_Query_Host_Stratum object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Monitor_Version1_Query_Host_Stratum service. You can only retrieve strata attached to hardware that belong to your account. +func (r Network_Monitor_Version1_Query_Host_Stratum) GetObject() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host_Stratum", "getObject", nil, &r.Options, &resp) + return +} + +// SoftLayer_Network_Pod refers to a portion of a data center that share a Backend Customer Router (BCR) and usually a front-end counterpart known as a Frontend Customer Router (FCR). A Pod primarily denotes a logical location within the network and the physical aspects that support networks. This is in contrast to representing a specific physical location. +// +// A ``Pod`` is identified by a ``name``, which is unique. A Pod name follows the format 'dddnn.podii', where 'ddd' is a data center code, 'nn' is the data center number, 'pod' is a literal string and 'ii' is a two digit, left-zero- padded number which corresponds to a Backend Customer Router (BCR) of the desired data center. Examples: +// * dal09.pod01 = Dallas 9, Pod 1 (ie. bcr01) +// * sjc01.pod04 = San Jose 1, Pod 4 (ie. bcr04) +// * ams01.pod01 = Amsterdam 1, Pod 1 (ie. bcr01) +type Network_Pod struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkPodService returns an instance of the Network_Pod SoftLayer service +func GetNetworkPodService(sess *session.Session) Network_Pod { + return Network_Pod{Session: sess} +} + +func (r Network_Pod) Id(id int) Network_Pod { + r.Options.Id = &id + return r +} + +func (r Network_Pod) Mask(mask string) Network_Pod { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Pod) Filter(filter string) Network_Pod { + r.Options.Filter = filter + return r +} + +func (r Network_Pod) Limit(limit int) Network_Pod { + r.Options.Limit = &limit + return r +} + +func (r Network_Pod) Offset(offset int) Network_Pod { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Pod) GetAllObjects() (resp []datatypes.Network_Pod, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Pod", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Provides the list of capabilities a Pod fulfills. See [[SoftLayer_Network_Pod/listCapabilities]] for more information on capabilities. +func (r Network_Pod) GetCapabilities() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Pod", "getCapabilities", nil, &r.Options, &resp) + return +} + +// Set the initialization parameter to the ``name`` of the Pod to retrieve. +func (r Network_Pod) GetObject() (resp datatypes.Network_Pod, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Pod", "getObject", nil, &r.Options, &resp) + return +} + +// A capability is simply a string literal that denotes the availability of a feature. Capabilities are generally self describing, but any additional details concerning the implications of a capability will be documented elsewhere; usually by the Service or Operation related to it. +func (r Network_Pod) ListCapabilities() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Pod", "listCapabilities", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_SecurityGroup data type contains general information for a single security group. A security group contains a set of IP filter [[SoftLayer_Network_SecurityGroup_Rule (type)|rules]] that define how to handle incoming (ingress) and outgoing (egress) traffic to both the public and private interfaces of a virtual server instance and a set of [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)|bindings]] to associate virtual guest network components with the security group. +type Network_SecurityGroup struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSecurityGroupService returns an instance of the Network_SecurityGroup SoftLayer service +func GetNetworkSecurityGroupService(sess *session.Session) Network_SecurityGroup { + return Network_SecurityGroup{Session: sess} +} + +func (r Network_SecurityGroup) Id(id int) Network_SecurityGroup { + r.Options.Id = &id + return r +} + +func (r Network_SecurityGroup) Mask(mask string) Network_SecurityGroup { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_SecurityGroup) Filter(filter string) Network_SecurityGroup { + r.Options.Filter = filter + return r +} + +func (r Network_SecurityGroup) Limit(limit int) Network_SecurityGroup { + r.Options.Limit = &limit + return r +} + +func (r Network_SecurityGroup) Offset(offset int) Network_SecurityGroup { + r.Options.Offset = &offset + return r +} + +// Add new rules to a security group by sending in an array of template [[SoftLayer_Network_SecurityGroup_Rule (type)]] objects to be created. +func (r Network_SecurityGroup) AddRules(ruleTemplates []datatypes.Network_SecurityGroup_Rule) (resp datatypes.Network_SecurityGroup_RequestRules, err error) { + params := []interface{}{ + ruleTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "addRules", params, &r.Options, &resp) + return +} + +// Attach virtual guest network components to a security group by creating [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)]] objects. +func (r Network_SecurityGroup) AttachNetworkComponents(networkComponentIds []int) (resp datatypes.Network_SecurityGroup_Request, err error) { + params := []interface{}{ + networkComponentIds, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "attachNetworkComponents", params, &r.Options, &resp) + return +} + +// Create a new security group. +func (r Network_SecurityGroup) CreateObject(templateObject *datatypes.Network_SecurityGroup) (resp datatypes.Network_SecurityGroup, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "createObject", params, &r.Options, &resp) + return +} + +// Create new security groups. +func (r Network_SecurityGroup) CreateObjects(templateObjects []datatypes.Network_SecurityGroup) (resp []datatypes.Network_SecurityGroup, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "createObjects", params, &r.Options, &resp) + return +} + +// Delete a security group for an account. A security group cannot be deleted if any network components are attached or if the security group is a remote security group for a [[SoftLayer_Network_SecurityGroup_Rule (type)|rule]]. +func (r Network_SecurityGroup) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete security groups for an account. A security group cannot be deleted if any network components are attached or if the security group is a remote security group for a [[SoftLayer_Network_SecurityGroup_Rule (type)|rule]]. +func (r Network_SecurityGroup) DeleteObjects(templateObjects []datatypes.Network_SecurityGroup) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "deleteObjects", params, &r.Options, &resp) + return +} + +// Detach virtual guest network components from a security group by deleting its [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)]]. +func (r Network_SecurityGroup) DetachNetworkComponents(networkComponentIds []int) (resp datatypes.Network_SecurityGroup_Request, err error) { + params := []interface{}{ + networkComponentIds, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "detachNetworkComponents", params, &r.Options, &resp) + return +} + +// Edit a security group. +func (r Network_SecurityGroup) EditObject(templateObject *datatypes.Network_SecurityGroup) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "editObject", params, &r.Options, &resp) + return +} + +// Edit security groups. +func (r Network_SecurityGroup) EditObjects(templateObjects []datatypes.Network_SecurityGroup) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "editObjects", params, &r.Options, &resp) + return +} + +// Edit rules that belong to the security group. An array of skeleton [[SoftLayer_Network_SecurityGroup_Rule]] objects must be sent in with only the properties defined that you want to change. To edit a property to null, send in -1 for integer properties and "" for string properties. Unchanged properties are left alone. +func (r Network_SecurityGroup) EditRules(ruleTemplates []datatypes.Network_SecurityGroup_Rule) (resp datatypes.Network_SecurityGroup_RequestRules, err error) { + params := []interface{}{ + ruleTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "editRules", params, &r.Options, &resp) + return +} + +// Retrieve The account this security group belongs to. +func (r Network_SecurityGroup) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_SecurityGroup) GetAllObjects() (resp []datatypes.Network_SecurityGroup, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getAllObjects", nil, &r.Options, &resp) + return +} + +// List the current security group limits +func (r Network_SecurityGroup) GetLimits() (resp []datatypes.Container_Network_SecurityGroup_Limit, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getLimits", nil, &r.Options, &resp) + return +} + +// Retrieve The network component bindings for this security group. +func (r Network_SecurityGroup) GetNetworkComponentBindings() (resp []datatypes.Virtual_Network_SecurityGroup_NetworkComponentBinding, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getNetworkComponentBindings", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_SecurityGroup) GetObject() (resp datatypes.Network_SecurityGroup, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The order bindings for this security group +func (r Network_SecurityGroup) GetOrderBindings() (resp []datatypes.Network_SecurityGroup_OrderBinding, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getOrderBindings", nil, &r.Options, &resp) + return +} + +// Retrieve The rules for this security group. +func (r Network_SecurityGroup) GetRules() (resp []datatypes.Network_SecurityGroup_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getRules", nil, &r.Options, &resp) + return +} + +// List the data centers that currently support the use of security groups. +func (r Network_SecurityGroup) GetSupportedDataCenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getSupportedDataCenters", nil, &r.Options, &resp) + return +} + +// Remove rules from a security group. +func (r Network_SecurityGroup) RemoveRules(ruleIds []int) (resp datatypes.Network_SecurityGroup_RequestRules, err error) { + params := []interface{}{ + ruleIds, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "removeRules", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Security_Scanner_Request data type represents a single vulnerability scan request. It provides information on when the scan was created, last updated, and the current status. The status messages are as follows: +// *Scan Pending +// *Scan Processing +// *Scan Complete +// *Scan Cancelled +// *Generating Report. +type Network_Security_Scanner_Request struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSecurityScannerRequestService returns an instance of the Network_Security_Scanner_Request SoftLayer service +func GetNetworkSecurityScannerRequestService(sess *session.Session) Network_Security_Scanner_Request { + return Network_Security_Scanner_Request{Session: sess} +} + +func (r Network_Security_Scanner_Request) Id(id int) Network_Security_Scanner_Request { + r.Options.Id = &id + return r +} + +func (r Network_Security_Scanner_Request) Mask(mask string) Network_Security_Scanner_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Security_Scanner_Request) Filter(filter string) Network_Security_Scanner_Request { + r.Options.Filter = filter + return r +} + +func (r Network_Security_Scanner_Request) Limit(limit int) Network_Security_Scanner_Request { + r.Options.Limit = &limit + return r +} + +func (r Network_Security_Scanner_Request) Offset(offset int) Network_Security_Scanner_Request { + r.Options.Offset = &offset + return r +} + +// Create a new vulnerability scan request. New scan requests are picked up every five minutes, and the time to complete an actual scan may vary. Once the scan is finished, it can take up to another five minutes for the report to be generated and accessible. +func (r Network_Security_Scanner_Request) CreateObject(templateObject *datatypes.Network_Security_Scanner_Request) (resp datatypes.Network_Security_Scanner_Request, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a security scan request. +func (r Network_Security_Scanner_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guest a security scan is run against. +func (r Network_Security_Scanner_Request) GetGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getGuest", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware a security scan is run against. +func (r Network_Security_Scanner_Request) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getHardware", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Security_Scanner_Request object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Security_Scanner_Request service. You can only retrieve requests and reports that are assigned to your SoftLayer account. +func (r Network_Security_Scanner_Request) GetObject() (resp datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Get the vulnerability report for a scan request, formatted as HTML string. Previous scan reports are held indefinitely. +func (r Network_Security_Scanner_Request) GetReport() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getReport", nil, &r.Options, &resp) + return +} + +// Retrieve Flag whether the requestor owns the hardware the scan was run on. This flag will return for hardware servers only, virtual servers will result in a null return even if you have a request out for them. +func (r Network_Security_Scanner_Request) GetRequestorOwnedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getRequestorOwnedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A security scan request's status. +func (r Network_Security_Scanner_Request) GetStatus() (resp datatypes.Network_Security_Scanner_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Service_Vpn_Overrides data type contains information relating user ids to subnet ids when VPN access is manually configured. It is essentially an entry in a 'white list' of subnets a SoftLayer portal VPN user may access. +type Network_Service_Vpn_Overrides struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkServiceVpnOverridesService returns an instance of the Network_Service_Vpn_Overrides SoftLayer service +func GetNetworkServiceVpnOverridesService(sess *session.Session) Network_Service_Vpn_Overrides { + return Network_Service_Vpn_Overrides{Session: sess} +} + +func (r Network_Service_Vpn_Overrides) Id(id int) Network_Service_Vpn_Overrides { + r.Options.Id = &id + return r +} + +func (r Network_Service_Vpn_Overrides) Mask(mask string) Network_Service_Vpn_Overrides { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Service_Vpn_Overrides) Filter(filter string) Network_Service_Vpn_Overrides { + r.Options.Filter = filter + return r +} + +func (r Network_Service_Vpn_Overrides) Limit(limit int) Network_Service_Vpn_Overrides { + r.Options.Limit = &limit + return r +} + +func (r Network_Service_Vpn_Overrides) Offset(offset int) Network_Service_Vpn_Overrides { + r.Options.Offset = &offset + return r +} + +// Create Softlayer portal user VPN overrides. +func (r Network_Service_Vpn_Overrides) CreateObjects(templateObjects []datatypes.Network_Service_Vpn_Overrides) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "createObjects", params, &r.Options, &resp) + return +} + +// Use this method to delete a single SoftLayer portal VPN user subnet override. +func (r Network_Service_Vpn_Overrides) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "deleteObject", nil, &r.Options, &resp) + return +} + +// Use this method to delete a collection of SoftLayer portal VPN user subnet overrides. +func (r Network_Service_Vpn_Overrides) DeleteObjects(templateObjects []datatypes.Network_Service_Vpn_Overrides) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "deleteObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Service_Vpn_Overrides) GetObject() (resp datatypes.Network_Service_Vpn_Overrides, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Subnet components accessible by a SoftLayer VPN portal user. +func (r Network_Service_Vpn_Overrides) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "getSubnet", nil, &r.Options, &resp) + return +} + +// Retrieve SoftLayer VPN portal user. +func (r Network_Service_Vpn_Overrides) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "getUser", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Storage data type contains general information regarding a Storage product such as account id, access username and password, the Storage product type, and the server the Storage service is associated with. Currently, only EVault backup storage has an associated server. +type Network_Storage struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageService returns an instance of the Network_Storage SoftLayer service +func GetNetworkStorageService(sess *session.Session) Network_Storage { + return Network_Storage{Session: sess} +} + +func (r Network_Storage) Id(id int) Network_Storage { + r.Options.Id = &id + return r +} + +func (r Network_Storage) Mask(mask string) Network_Storage { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage) Filter(filter string) Network_Storage { + r.Options.Filter = filter + return r +} + +func (r Network_Storage) Limit(limit int) Network_Storage { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage) Offset(offset int) Network_Storage { + r.Options.Offset = &offset + return r +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage) AllowAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromHardware", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) AllowAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage) AllowAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage volume will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage) AllowAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromHostList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage volume. +func (r Network_Storage) AllowAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) AllowAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage) AllowAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) AllowAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage) AllowAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage) AllowAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage) AllowAccessToReplicantFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Hardware objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationHardware property of this storage volume. +func (r Network_Storage) AllowAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) AllowAccessToReplicantFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationIpAddresses property of this storage volume. +func (r Network_Storage) AllowAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage) AllowAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage) AllowAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage replicant volume. +func (r Network_Storage) AllowAccessToReplicantFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationVirtualGuests property of this storage volume. +func (r Network_Storage) AllowAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will assign an existing credential to the current volume. The credential must have been created using the 'addNewCredential' method. The volume type must support an additional credential. +func (r Network_Storage) AssignCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "assignCredential", params, &r.Options, &resp) + return +} + +// This method will set up a new credential for the remote storage volume. The storage volume must support an additional credential. Once created, the credential will be automatically assigned to the current volume. If there are no volumes assigned to the credential it will be automatically deleted. +func (r Network_Storage) AssignNewCredential(typ *string) (resp datatypes.Network_Storage_Credential, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "assignNewCredential", params, &r.Options, &resp) + return +} + +// The method will change the password for the given Storage/Virtual Server Storage account. +func (r Network_Storage) ChangePassword(username *string, currentPassword *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + currentPassword, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "changePassword", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBandwidth() Retrieve the bandwidth usage for the current billing cycle. +func (r Network_Storage) CollectBandwidth(typ *string, startDate *datatypes.Time, endDate *datatypes.Time) (resp uint, err error) { + params := []interface{}{ + typ, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "collectBandwidth", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBytesUsed() retrieves the number of bytes capacity currently in use on a Storage account. +func (r Network_Storage) CollectBytesUsed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "collectBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) CreateFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "createFolder", params, &r.Options, &resp) + return +} + +// The LUN ID only takes effect during the Host Authorization process. It is required to de-authorize all hosts before using this method. +func (r Network_Storage) CreateOrUpdateLunId(lunId *int) (resp datatypes.Network_Storage_Property, err error) { + params := []interface{}{ + lunId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "createOrUpdateLunId", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) CreateSnapshot(notes *string) (resp datatypes.Network_Storage, err error) { + params := []interface{}{ + notes, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "createSnapshot", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete all files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage) DeleteAllFiles() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete an individual file within a Storage account. Depending on the type of Storage account, Deleting a file either deletes the file permanently or sends the file to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the file is in the account's recycle bin. If the file exist in the recycle bin, then it is permanently deleted. +// +// Please note, a file can not be restored once it is permanently deleted. +func (r Network_Storage) DeleteFile(fileId *string) (resp bool, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteFile", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete multiple files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage) DeleteFiles(fileIds []string) (resp bool, err error) { + params := []interface{}{ + fileIds, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteFiles", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) DeleteFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteFolder", params, &r.Options, &resp) + return +} + +// Delete a network storage volume. '''This cannot be undone.''' At this time only network storage snapshots may be deleted with this method. +// +// ''deleteObject'' returns Boolean ''true'' on successful deletion or ''false'' if it was unable to remove a volume; +func (r Network_Storage) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Disable scheduled snapshots of this storage volume. Scheduling options include 'INTERVAL', HOURLY, DAILY and WEEKLY schedules. +func (r Network_Storage) DisableSnapshots(scheduleType *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "disableSnapshots", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Download a file from a Storage account. This method returns a file's details including the file's raw content. +func (r Network_Storage) DownloadFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "downloadFile", params, &r.Options, &resp) + return +} + +// This method will change the password of a credential created using the 'addNewCredential' method. If the credential exists on multiple storage volumes it will change for those volumes as well. +func (r Network_Storage) EditCredential(username *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "editCredential", params, &r.Options, &resp) + return +} + +// The password and/or notes may be modified for the Storage service except evault passwords and notes. +func (r Network_Storage) EditObject(templateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "editObject", params, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Enable scheduled snapshots of this storage volume. Scheduling options include HOURLY, DAILY and WEEKLY schedules. For HOURLY schedules, provide relevant data for $scheduleType, $retentionCount and $minute. For DAILY schedules, provide relevant data for $scheduleType, $retentionCount, $minute, and $hour. For WEEKLY schedules, provide relevant data for all parameters of this method. +func (r Network_Storage) EnableSnapshots(scheduleType *string, retentionCount *int, minute *int, hour *int, dayOfWeek *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + retentionCount, + minute, + hour, + dayOfWeek, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "enableSnapshots", params, &r.Options, &resp) + return +} + +// Failback from a volume replicant. In order to failback the volume must have already been failed over to a replicant. +func (r Network_Storage) FailbackFromReplicant() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "failbackFromReplicant", nil, &r.Options, &resp) + return +} + +// Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage) FailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "failoverToReplicant", params, &r.Options, &resp) + return +} + +// Retrieve The account that a Storage services belongs to. +func (r Network_Storage) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Other usernames and passwords associated with a Storage volume. +func (r Network_Storage) GetAccountPassword() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAccountPassword", nil, &r.Options, &resp) + return +} + +// Retrieve The currently active transactions on a network storage volume. +func (r Network_Storage) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files in a Storage account's root directory. This does not download file content. +func (r Network_Storage) GetAllFiles() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files matching the filter's criteria in a Storage account's root directory. This does not download file content. +func (r Network_Storage) GetAllFilesByFilter(filter *datatypes.Container_Utility_File_Entity) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + filter, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllFilesByFilter", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Hardware that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage) GetAllowableHardware(filterHostname *string) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowableHardware", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet_IpAddress that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage) GetAllowableIpAddresses(subnetId *int, filterIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + subnetId, + filterIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowableIpAddresses", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage) GetAllowableSubnets(filterNetworkIdentifier *string) (resp []datatypes.Network_Subnet, err error) { + params := []interface{}{ + filterNetworkIdentifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowableSubnets", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Virtual_Guest that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage) GetAllowableVirtualGuests(filterHostname *string) (resp []datatypes.Virtual_Guest, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowableVirtualGuests", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume. +func (r Network_Storage) GetAllowedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedHardware", nil, &r.Options, &resp) + return +} + +// Retrieves the total number of allowed hosts limit per volume. +func (r Network_Storage) GetAllowedHostsLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedHostsLimit", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. +func (r Network_Storage) GetAllowedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage) GetAllowedReplicationHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedReplicationHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage) GetAllowedReplicationIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedReplicationIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage) GetAllowedReplicationSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedReplicationSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage) GetAllowedReplicationVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedReplicationVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume. +func (r Network_Storage) GetAllowedSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. +func (r Network_Storage) GetAllowedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a Storage volume. +func (r Network_Storage) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetBillingItemCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getBillingItemCategory", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by username and storage account type. Use this method if you wish to retrieve a storage record by username rather than by id. The ''type'' parameter must correspond to one of the available ''nasType'' values in the SoftLayer_Network_Storage data type. +func (r Network_Storage) GetByUsername(username *string, typ *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + username, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getByUsername", params, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume, in bytes. +func (r Network_Storage) GetBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetCdnUrls() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getCdnUrls", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetClusterResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getClusterResource", nil, &r.Options, &resp) + return +} + +// Retrieve The schedule id which was executed to create a snapshot. +func (r Network_Storage) GetCreationScheduleId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getCreationScheduleId", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetCredentials() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve The Daily Schedule which is associated with this network storage volume. +func (r Network_Storage) GetDailySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getDailySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The events which have taken place on a network storage volume. +func (r Network_Storage) GetEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getEvents", nil, &r.Options, &resp) + return +} + +// +// +// +func (r Network_Storage) GetFileBlockEncryptedLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileBlockEncryptedLocations", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date of a file within a Storage account. This does not download file content. +func (r Network_Storage) GetFileByIdentifier(identifier *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + identifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileByIdentifier", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the file number of files in a Virtual Server Storage account's root directory. This does not include the files stored in the recycle bin. +func (r Network_Storage) GetFileCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileCount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetFileList(folder *string, path *string) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + folder, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileList", params, &r.Options, &resp) + return +} + +// Retrieve Retrieves the NFS Network Mount Address Name for a given File Storage Volume. +func (r Network_Storage) GetFileNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the number of files pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. +func (r Network_Storage) GetFilePendingDeleteCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFilePendingDeleteCount", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve a list of files that are pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. This method does not download file content. +func (r Network_Storage) GetFilesPendingDelete() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFilesPendingDelete", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetFolderList() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Folder, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFolderList", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// getGraph() retrieves a Storage account's usage and returns a PNG graph image, title, and the minimum and maximum dates included in the graphed date range. Virtual Server storage accounts can also graph upload and download bandwidth usage. +func (r Network_Storage) GetGraph(startDate *datatypes.Time, endDate *datatypes.Time, typ *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDate, + endDate, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getGraph", params, &r.Options, &resp) + return +} + +// Retrieve When applicable, the hardware associated with a Storage service. +func (r Network_Storage) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetHasEncryptionAtRest() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getHasEncryptionAtRest", nil, &r.Options, &resp) + return +} + +// Retrieve The Hourly Schedule which is associated with this network storage volume. +func (r Network_Storage) GetHourlySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getHourlySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The Interval Schedule which is associated with this network storage volume. +func (r Network_Storage) GetIntervalSchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIntervalSchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum number of IOPs selected for this volume. +func (r Network_Storage) GetIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIops", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to order snapshot space, or, if snapshot space is already available, to assign a snapshot schedule, or to take a manual snapshot. +func (r Network_Storage) GetIsReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIsReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to have Hosts authorized to access it. This does not indicate whether another operation may be blocking, please refer to this volume's volumeStatus property for details. +func (r Network_Storage) GetIsReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIsReadyToMount", nil, &r.Options, &resp) + return +} + +// Retrieve Relationship between a container volume and iSCSI LUNs. +func (r Network_Storage) GetIscsiLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIscsiLuns", nil, &r.Options, &resp) + return +} + +// Retrieve Returns the target IP addresses of an iSCSI volume. +func (r Network_Storage) GetIscsiTargetIpAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIscsiTargetIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The ID of the LUN volume. +func (r Network_Storage) GetLunId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getLunId", nil, &r.Options, &resp) + return +} + +// Retrieve The manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. +func (r Network_Storage) GetManualSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getManualSnapshots", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetMaximumExpansionSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getMaximumExpansionSize", nil, &r.Options, &resp) + return +} + +// Retrieve A network storage volume's metric tracking object. This object records all periodic polled data available to this volume. +func (r Network_Storage) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a network storage volume may be mounted. +func (r Network_Storage) GetMountableFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getMountableFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of split or move operation as a part of volume duplication. +func (r Network_Storage) GetMoveAndSplitStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getMoveAndSplitStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The subscribers that will be notified for usage amount warnings and overages. +func (r Network_Storage) GetNotificationSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Storage object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Storage service. +// +// Please use the associated methods in the [[SoftLayer_Network_Storage]] service to retrieve a Storage account's id. +func (r Network_Storage) GetObject() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetObjectStorageConnectionInformation() (resp []datatypes.Container_Network_Service_Resource_ObjectStorage_ConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getObjectStorageConnectionInformation", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by SoftLayer_Network_Storage_Credential object. Use this method if you wish to retrieve a storage record by a credential rather than by id. +func (r Network_Storage) GetObjectsByCredential(credentialObject *datatypes.Network_Storage_Credential) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + credentialObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getObjectsByCredential", params, &r.Options, &resp) + return +} + +// Retrieve The name of the snapshot that this volume was duplicated from. +func (r Network_Storage) GetOriginalSnapshotName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOriginalSnapshotName", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the volume that this volume was duplicated from. +func (r Network_Storage) GetOriginalVolumeName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOriginalVolumeName", nil, &r.Options, &resp) + return +} + +// Retrieve The size (in GB) of the volume or LUN before any size expansion, or of the volume (before any possible size expansion) from which the duplicate volume or LUN was created. +func (r Network_Storage) GetOriginalVolumeSize() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOriginalVolumeSize", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type. +func (r Network_Storage) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type ID. +func (r Network_Storage) GetOsTypeId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOsTypeId", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume in a parental role. +func (r Network_Storage) GetParentPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getParentPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve The parent volume of a volume in a complex storage relationship. +func (r Network_Storage) GetParentVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getParentVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume. +func (r Network_Storage) GetPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve All permissions group(s) this volume is in. +func (r Network_Storage) GetPermissionsGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getPermissionsGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The properties used to provide additional details about a network storage volume. +func (r Network_Storage) GetProperties() (resp []datatypes.Network_Storage_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve The number of IOPs provisioned for this volume. +func (r Network_Storage) GetProvisionedIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getProvisionedIops", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the details of a file that is pending deletion in a Storage account's a recycle bin. +func (r Network_Storage) GetRecycleBinFileByIdentifier(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getRecycleBinFileByIdentifier", params, &r.Options, &resp) + return +} + +// Retrieves the remaining number of allowed hosts per volume. +func (r Network_Storage) GetRemainingAllowedHosts() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getRemainingAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieves the remaining number of allowed hosts for a volume's replicant. +func (r Network_Storage) GetRemainingAllowedHostsForReplicant() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getRemainingAllowedHostsForReplicant", nil, &r.Options, &resp) + return +} + +// Retrieve The iSCSI LUN volumes being replicated by this network storage volume. +func (r Network_Storage) GetReplicatingLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicatingLuns", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volume being replicated by a volume. +func (r Network_Storage) GetReplicatingVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicatingVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volume replication events. +func (r Network_Storage) GetReplicationEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicationEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes configured to be replicants of a volume. +func (r Network_Storage) GetReplicationPartners() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicationPartners", nil, &r.Options, &resp) + return +} + +// Retrieve The Replication Schedule associated with a network storage volume. +func (r Network_Storage) GetReplicationSchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicationSchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The current replication status of a network storage volume. Indicates Failover or Failback status. +func (r Network_Storage) GetReplicationStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The schedules which are associated with a network storage volume. +func (r Network_Storage) GetSchedules() (resp []datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSchedules", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource a Storage service is connected to. +func (r Network_Storage) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Retrieve The IP address of a Storage resource. +func (r Network_Storage) GetServiceResourceBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getServiceResourceBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The name of a Storage's network resource. +func (r Network_Storage) GetServiceResourceName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getServiceResourceName", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured snapshot space size. +func (r Network_Storage) GetSnapshotCapacityGb() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotCapacityGb", nil, &r.Options, &resp) + return +} + +// Retrieve The creation timestamp of the snapshot on the storage platform. +func (r Network_Storage) GetSnapshotCreationTimestamp() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotCreationTimestamp", nil, &r.Options, &resp) + return +} + +// Retrieve The percentage of used snapshot space after which to delete automated snapshots. +func (r Network_Storage) GetSnapshotDeletionThresholdPercentage() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotDeletionThresholdPercentage", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshot size in bytes. +func (r Network_Storage) GetSnapshotSizeBytes() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotSizeBytes", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's available snapshot reservation space. +func (r Network_Storage) GetSnapshotSpaceAvailable() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotSpaceAvailable", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshots associated with this SoftLayer_Network_Storage volume. +func (r Network_Storage) GetSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieves a list of snapshots for this SoftLayer_Network_Storage volume. This method works with the result limits and offset to support pagination. +func (r Network_Storage) GetSnapshotsForVolume() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotsForVolume", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetStaasVersion() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStaasVersion", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage groups this volume is attached to. +func (r Network_Storage) GetStorageGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStorageGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetStorageGroupsNetworkConnectionDetails() (resp []datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStorageGroupsNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetStorageTierLevel() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStorageTierLevel", nil, &r.Options, &resp) + return +} + +// Retrieve A description of the Storage object. +func (r Network_Storage) GetStorageType() (resp datatypes.Network_Storage_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStorageType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetTargetIpAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getTargetIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume. +func (r Network_Storage) GetTotalBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getTotalBytesUsed", nil, &r.Options, &resp) + return +} + +// Retrieve The total snapshot retention count of all schedules on this network storage volume. +func (r Network_Storage) GetTotalScheduleSnapshotRetentionCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getTotalScheduleSnapshotRetentionCount", nil, &r.Options, &resp) + return +} + +// Retrieve The usage notification for SL Storage services. +func (r Network_Storage) GetUsageNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getUsageNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetValidReplicationTargetDatacenterLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getValidReplicationTargetDatacenterLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The type of network storage service. +func (r Network_Storage) GetVendorName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVendorName", nil, &r.Options, &resp) + return +} + +// Retrieve When applicable, the virtual guest associated with a Storage service. +func (r Network_Storage) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// This method returns the parameters for cloning a volume +func (r Network_Storage) GetVolumeDuplicateParameters() (resp datatypes.Container_Network_Storage_VolumeDuplicateParameters, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVolumeDuplicateParameters", nil, &r.Options, &resp) + return +} + +// Retrieve The username and password history for a Storage service. +func (r Network_Storage) GetVolumeHistory() (resp []datatypes.Network_Storage_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVolumeHistory", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of a network storage volume. +func (r Network_Storage) GetVolumeStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVolumeStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The account username and password for the EVault webCC interface. +func (r Network_Storage) GetWebccAccount() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getWebccAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The Weekly Schedule which is associated with this network storage volume. +func (r Network_Storage) GetWeeklySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getWeeklySchedule", nil, &r.Options, &resp) + return +} + +// Immediate Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage) ImmediateFailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "immediateFailoverToReplicant", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) IsBlockingOperationInProgress(exemptStatusKeyNames []string) (resp bool, err error) { + params := []interface{}{ + exemptStatusKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "isBlockingOperationInProgress", params, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready for snapshot. +func (r Network_Storage) IsDuplicateReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "isDuplicateReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready to mount. +func (r Network_Storage) IsDuplicateReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "isDuplicateReadyToMount", nil, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage) RemoveAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage) RemoveAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage) RemoveAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage) RemoveAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromHostList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage volume. +func (r Network_Storage) RemoveAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) RemoveAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) RemoveAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) RemoveAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage) RemoveAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage) RemoveAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Hardware objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationHardware property of this storage volume. +func (r Network_Storage) RemoveAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationIpAddresses property of this storage volume. +func (r Network_Storage) RemoveAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) RemoveAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage) RemoveAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationVirtualGuests property of this storage volume. +func (r Network_Storage) RemoveAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will remove a credential from the current volume. The credential must have been created using the 'addNewCredential' method. +func (r Network_Storage) RemoveCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeCredential", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Restore an individual file so that it may be used as it was before it was deleted. +// +// If a file is deleted from a Virtual Server Storage account, the file is placed into the account's recycle bin and not permanently deleted. Therefore, restoreFile can be used to place the file back into your Virtual Server account's root directory. +func (r Network_Storage) RestoreFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "restoreFile", params, &r.Options, &resp) + return +} + +// Restore the volume from a snapshot that was previously taken. +func (r Network_Storage) RestoreFromSnapshot(snapshotId *int) (resp bool, err error) { + params := []interface{}{ + snapshotId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "restoreFromSnapshot", params, &r.Options, &resp) + return +} + +// The method will retrieve the password for the StorageLayer or Virtual Server Storage Account and email the password. The Storage Account passwords will be emailed to the master user. For Virtual Server Storage, the password will be sent to the email address used as the username. +func (r Network_Storage) SendPasswordReminderEmail(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "sendPasswordReminderEmail", params, &r.Options, &resp) + return +} + +// Enable or disable the mounting of a Storage volume. When mounting is enabled the Storage volume will be mountable or available for use. +// +// For Virtual Server volumes, disabling mounting will deny access to the Virtual Server Account, remove published material and deny all file interaction including uploads and downloads. +// +// Enabling or disabling mounting for Storage volumes is not possible if mounting has been disabled by SoftLayer or a parent account. +func (r Network_Storage) SetMountable(mountable *bool) (resp bool, err error) { + params := []interface{}{ + mountable, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "setMountable", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) SetSnapshotAllocation(capacityGb *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + capacityGb, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "setSnapshotAllocation", params, &r.Options, &resp) + return +} + +// Upgrade the Storage volume to one of the upgradable packages (for example from 10 Gigs of EVault storage to 100 Gigs of EVault storage). +func (r Network_Storage) UpgradeVolumeCapacity(itemId *int) (resp bool, err error) { + params := []interface{}{ + itemId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "upgradeVolumeCapacity", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Upload a file to a Storage account's root directory. Once uploaded, this method returns new file entity identifier for the upload file. +// +// The following properties are required in the ''file'' parameter. +// *'''name''': The name of the file you wish to upload +// *'''content''': The raw contents of the file you wish to upload. +// *'''contentType''': The MIME-type of content that you wish to upload. +func (r Network_Storage) UploadFile(file *datatypes.Container_Utility_File_Entity) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + file, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "uploadFile", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostService returns an instance of the Network_Storage_Allowed_Host SoftLayer service +func GetNetworkStorageAllowedHostService(sess *session.Session) Network_Storage_Allowed_Host { + return Network_Storage_Allowed_Host{Session: sess} +} + +func (r Network_Storage_Allowed_Host) Id(id int) Network_Storage_Allowed_Host { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host) Mask(mask string) Network_Storage_Allowed_Host { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host) Filter(filter string) Network_Storage_Allowed_Host { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host) Limit(limit int) Network_Storage_Allowed_Host { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host) Offset(offset int) Network_Storage_Allowed_Host { + r.Options.Offset = &offset + return r +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Hardware object's id. +func (r Network_Storage_Allowed_Host) CreateFromHardware(hardwareId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hardwareId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "createFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet_IpAddress object's id. +func (r Network_Storage_Allowed_Host) CreateFromIpAddress(ipAddressId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + ipAddressId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "createFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet object's id. Allowed_Host objects created for SoftLayer_Network_Subnet objects do not support IQNs. +func (r Network_Storage_Allowed_Host) CreateFromSubnet(subnetId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "createFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Virtual_Guest object's id. +func (r Network_Storage_Allowed_Host) CreateFromVirtualGuest(virtualGuestId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + virtualGuestId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "createFromVirtualGuest", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host) GetAllObjects() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host) GetAssignedIscsiVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getAssignedIscsiVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host) GetAssignedNfsVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getAssignedNfsVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host) GetObject() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Connections to a target with a source IP in this subnet prefix are allowed. +func (r Network_Storage_Allowed_Host) GetSourceSubnet() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getSourceSubnet", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host_Hardware struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostHardwareService returns an instance of the Network_Storage_Allowed_Host_Hardware SoftLayer service +func GetNetworkStorageAllowedHostHardwareService(sess *session.Session) Network_Storage_Allowed_Host_Hardware { + return Network_Storage_Allowed_Host_Hardware{Session: sess} +} + +func (r Network_Storage_Allowed_Host_Hardware) Id(id int) Network_Storage_Allowed_Host_Hardware { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host_Hardware) Mask(mask string) Network_Storage_Allowed_Host_Hardware { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host_Hardware) Filter(filter string) Network_Storage_Allowed_Host_Hardware { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host_Hardware) Limit(limit int) Network_Storage_Allowed_Host_Hardware { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host_Hardware) Offset(offset int) Network_Storage_Allowed_Host_Hardware { + r.Options.Offset = &offset + return r +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Hardware object's id. +func (r Network_Storage_Allowed_Host_Hardware) CreateFromHardware(hardwareId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hardwareId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "createFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet_IpAddress object's id. +func (r Network_Storage_Allowed_Host_Hardware) CreateFromIpAddress(ipAddressId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + ipAddressId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "createFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet object's id. Allowed_Host objects created for SoftLayer_Network_Subnet objects do not support IQNs. +func (r Network_Storage_Allowed_Host_Hardware) CreateFromSubnet(subnetId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "createFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Virtual_Guest object's id. +func (r Network_Storage_Allowed_Host_Hardware) CreateFromVirtualGuest(virtualGuestId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + virtualGuestId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "createFromVirtualGuest", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Hardware) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account object which this SoftLayer_Network_Storage_Allowed_Host belongs to. +func (r Network_Storage_Allowed_Host_Hardware) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Hardware) GetAllObjects() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host_Hardware) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_Hardware) GetAssignedIscsiVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAssignedIscsiVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_Hardware) GetAssignedNfsVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAssignedNfsVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host_Hardware) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_Hardware) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host_Hardware) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Hardware) GetObject() (resp datatypes.Network_Storage_Allowed_Host_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware object which this SoftLayer_Network_Storage_Allowed_Host is referencing. +func (r Network_Storage_Allowed_Host_Hardware) GetResource() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getResource", nil, &r.Options, &resp) + return +} + +// Retrieve Connections to a target with a source IP in this subnet prefix are allowed. +func (r Network_Storage_Allowed_Host_Hardware) GetSourceSubnet() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getSourceSubnet", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host_Hardware) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host_IpAddress struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostIpAddressService returns an instance of the Network_Storage_Allowed_Host_IpAddress SoftLayer service +func GetNetworkStorageAllowedHostIpAddressService(sess *session.Session) Network_Storage_Allowed_Host_IpAddress { + return Network_Storage_Allowed_Host_IpAddress{Session: sess} +} + +func (r Network_Storage_Allowed_Host_IpAddress) Id(id int) Network_Storage_Allowed_Host_IpAddress { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host_IpAddress) Mask(mask string) Network_Storage_Allowed_Host_IpAddress { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host_IpAddress) Filter(filter string) Network_Storage_Allowed_Host_IpAddress { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host_IpAddress) Limit(limit int) Network_Storage_Allowed_Host_IpAddress { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host_IpAddress) Offset(offset int) Network_Storage_Allowed_Host_IpAddress { + r.Options.Offset = &offset + return r +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Hardware object's id. +func (r Network_Storage_Allowed_Host_IpAddress) CreateFromHardware(hardwareId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hardwareId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "createFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet_IpAddress object's id. +func (r Network_Storage_Allowed_Host_IpAddress) CreateFromIpAddress(ipAddressId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + ipAddressId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "createFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet object's id. Allowed_Host objects created for SoftLayer_Network_Subnet objects do not support IQNs. +func (r Network_Storage_Allowed_Host_IpAddress) CreateFromSubnet(subnetId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "createFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Virtual_Guest object's id. +func (r Network_Storage_Allowed_Host_IpAddress) CreateFromVirtualGuest(virtualGuestId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + virtualGuestId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "createFromVirtualGuest", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_IpAddress) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account object which this SoftLayer_Network_Storage_Allowed_Host belongs to. +func (r Network_Storage_Allowed_Host_IpAddress) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_IpAddress) GetAllObjects() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host_IpAddress) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_IpAddress) GetAssignedIscsiVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAssignedIscsiVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_IpAddress) GetAssignedNfsVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAssignedNfsVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host_IpAddress) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_IpAddress) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host_IpAddress) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_IpAddress) GetObject() (resp datatypes.Network_Storage_Allowed_Host_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress object which this SoftLayer_Network_Storage_Allowed_Host is referencing. +func (r Network_Storage_Allowed_Host_IpAddress) GetResource() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getResource", nil, &r.Options, &resp) + return +} + +// Retrieve Connections to a target with a source IP in this subnet prefix are allowed. +func (r Network_Storage_Allowed_Host_IpAddress) GetSourceSubnet() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getSourceSubnet", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host_IpAddress) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host_Subnet struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostSubnetService returns an instance of the Network_Storage_Allowed_Host_Subnet SoftLayer service +func GetNetworkStorageAllowedHostSubnetService(sess *session.Session) Network_Storage_Allowed_Host_Subnet { + return Network_Storage_Allowed_Host_Subnet{Session: sess} +} + +func (r Network_Storage_Allowed_Host_Subnet) Id(id int) Network_Storage_Allowed_Host_Subnet { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host_Subnet) Mask(mask string) Network_Storage_Allowed_Host_Subnet { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host_Subnet) Filter(filter string) Network_Storage_Allowed_Host_Subnet { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host_Subnet) Limit(limit int) Network_Storage_Allowed_Host_Subnet { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host_Subnet) Offset(offset int) Network_Storage_Allowed_Host_Subnet { + r.Options.Offset = &offset + return r +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Hardware object's id. +func (r Network_Storage_Allowed_Host_Subnet) CreateFromHardware(hardwareId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hardwareId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "createFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet_IpAddress object's id. +func (r Network_Storage_Allowed_Host_Subnet) CreateFromIpAddress(ipAddressId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + ipAddressId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "createFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet object's id. Allowed_Host objects created for SoftLayer_Network_Subnet objects do not support IQNs. +func (r Network_Storage_Allowed_Host_Subnet) CreateFromSubnet(subnetId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "createFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Virtual_Guest object's id. +func (r Network_Storage_Allowed_Host_Subnet) CreateFromVirtualGuest(virtualGuestId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + virtualGuestId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "createFromVirtualGuest", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Subnet) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account object which this SoftLayer_Network_Storage_Allowed_Host belongs to. +func (r Network_Storage_Allowed_Host_Subnet) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Subnet) GetAllObjects() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host_Subnet) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_Subnet) GetAssignedIscsiVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAssignedIscsiVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_Subnet) GetAssignedNfsVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAssignedNfsVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host_Subnet) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_Subnet) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host_Subnet) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Subnet) GetObject() (resp datatypes.Network_Storage_Allowed_Host_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet object which this SoftLayer_Network_Storage_Allowed_Host is referencing. +func (r Network_Storage_Allowed_Host_Subnet) GetResource() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getResource", nil, &r.Options, &resp) + return +} + +// Retrieve Connections to a target with a source IP in this subnet prefix are allowed. +func (r Network_Storage_Allowed_Host_Subnet) GetSourceSubnet() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getSourceSubnet", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host_Subnet) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host_VirtualGuest struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostVirtualGuestService returns an instance of the Network_Storage_Allowed_Host_VirtualGuest SoftLayer service +func GetNetworkStorageAllowedHostVirtualGuestService(sess *session.Session) Network_Storage_Allowed_Host_VirtualGuest { + return Network_Storage_Allowed_Host_VirtualGuest{Session: sess} +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Id(id int) Network_Storage_Allowed_Host_VirtualGuest { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Mask(mask string) Network_Storage_Allowed_Host_VirtualGuest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Filter(filter string) Network_Storage_Allowed_Host_VirtualGuest { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Limit(limit int) Network_Storage_Allowed_Host_VirtualGuest { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Offset(offset int) Network_Storage_Allowed_Host_VirtualGuest { + r.Options.Offset = &offset + return r +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Hardware object's id. +func (r Network_Storage_Allowed_Host_VirtualGuest) CreateFromHardware(hardwareId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hardwareId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "createFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet_IpAddress object's id. +func (r Network_Storage_Allowed_Host_VirtualGuest) CreateFromIpAddress(ipAddressId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + ipAddressId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "createFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Network_Subnet object's id. Allowed_Host objects created for SoftLayer_Network_Subnet objects do not support IQNs. +func (r Network_Storage_Allowed_Host_VirtualGuest) CreateFromSubnet(subnetId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "createFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to create a new SoftLayer_Network_Storage_Allowed_Host using an existing SoftLayer_Virtual_Guest object's id. +func (r Network_Storage_Allowed_Host_VirtualGuest) CreateFromVirtualGuest(virtualGuestId *int, iqn *string) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + virtualGuestId, + iqn, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "createFromVirtualGuest", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_VirtualGuest) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account object which this SoftLayer_Network_Storage_Allowed_Host belongs to. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAllObjects() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAssignedIscsiVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAssignedIscsiVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAssignedNfsVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAssignedNfsVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_VirtualGuest) GetObject() (resp datatypes.Network_Storage_Allowed_Host_VirtualGuest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Virtual_Guest object which this SoftLayer_Network_Storage_Allowed_Host is referencing. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetResource() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getResource", nil, &r.Options, &resp) + return +} + +// Retrieve Connections to a target with a source IP in this subnet prefix are allowed. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetSourceSubnet() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getSourceSubnet", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host_VirtualGuest) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Storage_Backup_Evault contains general information regarding an EVault Storage service such as account id, username, maximum capacity, password, Storage's product type and the server id. +type Network_Storage_Backup_Evault struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageBackupEvaultService returns an instance of the Network_Storage_Backup_Evault SoftLayer service +func GetNetworkStorageBackupEvaultService(sess *session.Session) Network_Storage_Backup_Evault { + return Network_Storage_Backup_Evault{Session: sess} +} + +func (r Network_Storage_Backup_Evault) Id(id int) Network_Storage_Backup_Evault { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Backup_Evault) Mask(mask string) Network_Storage_Backup_Evault { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Backup_Evault) Filter(filter string) Network_Storage_Backup_Evault { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Backup_Evault) Limit(limit int) Network_Storage_Backup_Evault { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Backup_Evault) Offset(offset int) Network_Storage_Backup_Evault { + r.Options.Offset = &offset + return r +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromHardware", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) AllowAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage volume will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromHostList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) AllowAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) AllowAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Hardware objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationIpAddresses property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage replicant volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will assign an existing credential to the current volume. The credential must have been created using the 'addNewCredential' method. The volume type must support an additional credential. +func (r Network_Storage_Backup_Evault) AssignCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "assignCredential", params, &r.Options, &resp) + return +} + +// This method will set up a new credential for the remote storage volume. The storage volume must support an additional credential. Once created, the credential will be automatically assigned to the current volume. If there are no volumes assigned to the credential it will be automatically deleted. +func (r Network_Storage_Backup_Evault) AssignNewCredential(typ *string) (resp datatypes.Network_Storage_Credential, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "assignNewCredential", params, &r.Options, &resp) + return +} + +// The method will change the password for the given Storage/Virtual Server Storage account. +func (r Network_Storage_Backup_Evault) ChangePassword(username *string, currentPassword *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + currentPassword, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "changePassword", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBandwidth() Retrieve the bandwidth usage for the current billing cycle. +func (r Network_Storage_Backup_Evault) CollectBandwidth(typ *string, startDate *datatypes.Time, endDate *datatypes.Time) (resp uint, err error) { + params := []interface{}{ + typ, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "collectBandwidth", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBytesUsed() retrieves the number of bytes capacity currently in use on a Storage account. +func (r Network_Storage_Backup_Evault) CollectBytesUsed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "collectBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) CreateFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "createFolder", params, &r.Options, &resp) + return +} + +// The LUN ID only takes effect during the Host Authorization process. It is required to de-authorize all hosts before using this method. +func (r Network_Storage_Backup_Evault) CreateOrUpdateLunId(lunId *int) (resp datatypes.Network_Storage_Property, err error) { + params := []interface{}{ + lunId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "createOrUpdateLunId", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) CreateSnapshot(notes *string) (resp datatypes.Network_Storage, err error) { + params := []interface{}{ + notes, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "createSnapshot", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete all files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage_Backup_Evault) DeleteAllFiles() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete an individual file within a Storage account. Depending on the type of Storage account, Deleting a file either deletes the file permanently or sends the file to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the file is in the account's recycle bin. If the file exist in the recycle bin, then it is permanently deleted. +// +// Please note, a file can not be restored once it is permanently deleted. +func (r Network_Storage_Backup_Evault) DeleteFile(fileId *string) (resp bool, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteFile", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete multiple files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage_Backup_Evault) DeleteFiles(fileIds []string) (resp bool, err error) { + params := []interface{}{ + fileIds, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteFiles", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) DeleteFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteFolder", params, &r.Options, &resp) + return +} + +// Delete a network storage volume. '''This cannot be undone.''' At this time only network storage snapshots may be deleted with this method. +// +// ''deleteObject'' returns Boolean ''true'' on successful deletion or ''false'' if it was unable to remove a volume; +func (r Network_Storage_Backup_Evault) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method can be used to help maintain the storage space on a vault. When a job is removed from the Webcc, the task and stored usage still exists on the vault. This method can be used to delete the associated task and its usage. +// +// All that is required for the use of the method is to pass in an integer array of task(s). +// +// +func (r Network_Storage_Backup_Evault) DeleteTasks(tasks []int) (resp bool, err error) { + params := []interface{}{ + tasks, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteTasks", params, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Disable scheduled snapshots of this storage volume. Scheduling options include 'INTERVAL', HOURLY, DAILY and WEEKLY schedules. +func (r Network_Storage_Backup_Evault) DisableSnapshots(scheduleType *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "disableSnapshots", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Download a file from a Storage account. This method returns a file's details including the file's raw content. +func (r Network_Storage_Backup_Evault) DownloadFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "downloadFile", params, &r.Options, &resp) + return +} + +// This method will change the password of a credential created using the 'addNewCredential' method. If the credential exists on multiple storage volumes it will change for those volumes as well. +func (r Network_Storage_Backup_Evault) EditCredential(username *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "editCredential", params, &r.Options, &resp) + return +} + +// The password and/or notes may be modified for the Storage service except evault passwords and notes. +func (r Network_Storage_Backup_Evault) EditObject(templateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "editObject", params, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Enable scheduled snapshots of this storage volume. Scheduling options include HOURLY, DAILY and WEEKLY schedules. For HOURLY schedules, provide relevant data for $scheduleType, $retentionCount and $minute. For DAILY schedules, provide relevant data for $scheduleType, $retentionCount, $minute, and $hour. For WEEKLY schedules, provide relevant data for all parameters of this method. +func (r Network_Storage_Backup_Evault) EnableSnapshots(scheduleType *string, retentionCount *int, minute *int, hour *int, dayOfWeek *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + retentionCount, + minute, + hour, + dayOfWeek, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "enableSnapshots", params, &r.Options, &resp) + return +} + +// Failback from a volume replicant. In order to failback the volume must have already been failed over to a replicant. +func (r Network_Storage_Backup_Evault) FailbackFromReplicant() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "failbackFromReplicant", nil, &r.Options, &resp) + return +} + +// Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage_Backup_Evault) FailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "failoverToReplicant", params, &r.Options, &resp) + return +} + +// Retrieve The account that a Storage services belongs to. +func (r Network_Storage_Backup_Evault) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Other usernames and passwords associated with a Storage volume. +func (r Network_Storage_Backup_Evault) GetAccountPassword() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAccountPassword", nil, &r.Options, &resp) + return +} + +// Retrieve The currently active transactions on a network storage volume. +func (r Network_Storage_Backup_Evault) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files in a Storage account's root directory. This does not download file content. +func (r Network_Storage_Backup_Evault) GetAllFiles() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files matching the filter's criteria in a Storage account's root directory. This does not download file content. +func (r Network_Storage_Backup_Evault) GetAllFilesByFilter(filter *datatypes.Container_Utility_File_Entity) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + filter, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllFilesByFilter", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Hardware that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Backup_Evault) GetAllowableHardware(filterHostname *string) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowableHardware", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet_IpAddress that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Backup_Evault) GetAllowableIpAddresses(subnetId *int, filterIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + subnetId, + filterIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowableIpAddresses", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Backup_Evault) GetAllowableSubnets(filterNetworkIdentifier *string) (resp []datatypes.Network_Subnet, err error) { + params := []interface{}{ + filterNetworkIdentifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowableSubnets", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Virtual_Guest that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Backup_Evault) GetAllowableVirtualGuests(filterHostname *string) (resp []datatypes.Virtual_Guest, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowableVirtualGuests", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume. +func (r Network_Storage_Backup_Evault) GetAllowedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedHardware", nil, &r.Options, &resp) + return +} + +// Retrieves the total number of allowed hosts limit per volume. +func (r Network_Storage_Backup_Evault) GetAllowedHostsLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedHostsLimit", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. +func (r Network_Storage_Backup_Evault) GetAllowedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Backup_Evault) GetAllowedReplicationHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedReplicationHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Backup_Evault) GetAllowedReplicationIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedReplicationIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Backup_Evault) GetAllowedReplicationSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedReplicationSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Backup_Evault) GetAllowedReplicationVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedReplicationVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume. +func (r Network_Storage_Backup_Evault) GetAllowedSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. +func (r Network_Storage_Backup_Evault) GetAllowedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a Storage volume. +func (r Network_Storage_Backup_Evault) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetBillingItemCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getBillingItemCategory", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by username and storage account type. Use this method if you wish to retrieve a storage record by username rather than by id. The ''type'' parameter must correspond to one of the available ''nasType'' values in the SoftLayer_Network_Storage data type. +func (r Network_Storage_Backup_Evault) GetByUsername(username *string, typ *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + username, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getByUsername", params, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume, in bytes. +func (r Network_Storage_Backup_Evault) GetBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetCdnUrls() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getCdnUrls", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetClusterResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getClusterResource", nil, &r.Options, &resp) + return +} + +// Retrieve The schedule id which was executed to create a snapshot. +func (r Network_Storage_Backup_Evault) GetCreationScheduleId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getCreationScheduleId", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetCredentials() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve The Daily Schedule which is associated with this network storage volume. +func (r Network_Storage_Backup_Evault) GetDailySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getDailySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The events which have taken place on a network storage volume. +func (r Network_Storage_Backup_Evault) GetEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getEvents", nil, &r.Options, &resp) + return +} + +// +// +// +func (r Network_Storage_Backup_Evault) GetFileBlockEncryptedLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileBlockEncryptedLocations", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date of a file within a Storage account. This does not download file content. +func (r Network_Storage_Backup_Evault) GetFileByIdentifier(identifier *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + identifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileByIdentifier", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the file number of files in a Virtual Server Storage account's root directory. This does not include the files stored in the recycle bin. +func (r Network_Storage_Backup_Evault) GetFileCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileCount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetFileList(folder *string, path *string) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + folder, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileList", params, &r.Options, &resp) + return +} + +// Retrieve Retrieves the NFS Network Mount Address Name for a given File Storage Volume. +func (r Network_Storage_Backup_Evault) GetFileNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the number of files pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. +func (r Network_Storage_Backup_Evault) GetFilePendingDeleteCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFilePendingDeleteCount", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve a list of files that are pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. This method does not download file content. +func (r Network_Storage_Backup_Evault) GetFilesPendingDelete() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFilesPendingDelete", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetFolderList() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Folder, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFolderList", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// getGraph() retrieves a Storage account's usage and returns a PNG graph image, title, and the minimum and maximum dates included in the graphed date range. Virtual Server storage accounts can also graph upload and download bandwidth usage. +func (r Network_Storage_Backup_Evault) GetGraph(startDate *datatypes.Time, endDate *datatypes.Time, typ *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDate, + endDate, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getGraph", params, &r.Options, &resp) + return +} + +// Retrieve When applicable, the hardware associated with a Storage service. +func (r Network_Storage_Backup_Evault) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve a list of hardware associated with a SoftLayer customer account, placing all hardware with associated EVault storage accounts at the beginning of the list. The return type is SoftLayer_Hardware_Server[] contains the results; the number of items returned in the result will be returned in the soap header (totalItems). ''getHardwareWithEvaultFirst'' is useful in situations where you wish to search for hardware and provide paginated output. +// +// +// +// +// +// Results are only returned for hardware belonging to the account of the user making the API call. +// +// This method drives the backup page of the SoftLayer customer portal. It serves a very specific function, but we have exposed it as it may prove useful for API developers too. +func (r Network_Storage_Backup_Evault) GetHardwareWithEvaultFirst(option *string, exactMatch *bool, criteria *string, mode *string) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + option, + exactMatch, + criteria, + mode, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getHardwareWithEvaultFirst", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetHasEncryptionAtRest() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getHasEncryptionAtRest", nil, &r.Options, &resp) + return +} + +// Retrieve The Hourly Schedule which is associated with this network storage volume. +func (r Network_Storage_Backup_Evault) GetHourlySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getHourlySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The Interval Schedule which is associated with this network storage volume. +func (r Network_Storage_Backup_Evault) GetIntervalSchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIntervalSchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum number of IOPs selected for this volume. +func (r Network_Storage_Backup_Evault) GetIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIops", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to order snapshot space, or, if snapshot space is already available, to assign a snapshot schedule, or to take a manual snapshot. +func (r Network_Storage_Backup_Evault) GetIsReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIsReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to have Hosts authorized to access it. This does not indicate whether another operation may be blocking, please refer to this volume's volumeStatus property for details. +func (r Network_Storage_Backup_Evault) GetIsReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIsReadyToMount", nil, &r.Options, &resp) + return +} + +// Retrieve Relationship between a container volume and iSCSI LUNs. +func (r Network_Storage_Backup_Evault) GetIscsiLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIscsiLuns", nil, &r.Options, &resp) + return +} + +// Retrieve Returns the target IP addresses of an iSCSI volume. +func (r Network_Storage_Backup_Evault) GetIscsiTargetIpAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIscsiTargetIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The ID of the LUN volume. +func (r Network_Storage_Backup_Evault) GetLunId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getLunId", nil, &r.Options, &resp) + return +} + +// Retrieve The manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. +func (r Network_Storage_Backup_Evault) GetManualSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getManualSnapshots", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetMaximumExpansionSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getMaximumExpansionSize", nil, &r.Options, &resp) + return +} + +// Retrieve A network storage volume's metric tracking object. This object records all periodic polled data available to this volume. +func (r Network_Storage_Backup_Evault) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a network storage volume may be mounted. +func (r Network_Storage_Backup_Evault) GetMountableFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getMountableFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of split or move operation as a part of volume duplication. +func (r Network_Storage_Backup_Evault) GetMoveAndSplitStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getMoveAndSplitStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The subscribers that will be notified for usage amount warnings and overages. +func (r Network_Storage_Backup_Evault) GetNotificationSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Storage_Backup_Evault object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Storage_Backup_Evault service. +func (r Network_Storage_Backup_Evault) GetObject() (resp datatypes.Network_Storage_Backup_Evault, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetObjectStorageConnectionInformation() (resp []datatypes.Container_Network_Service_Resource_ObjectStorage_ConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getObjectStorageConnectionInformation", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by SoftLayer_Network_Storage_Credential object. Use this method if you wish to retrieve a storage record by a credential rather than by id. +func (r Network_Storage_Backup_Evault) GetObjectsByCredential(credentialObject *datatypes.Network_Storage_Credential) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + credentialObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getObjectsByCredential", params, &r.Options, &resp) + return +} + +// Retrieve The name of the snapshot that this volume was duplicated from. +func (r Network_Storage_Backup_Evault) GetOriginalSnapshotName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOriginalSnapshotName", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the volume that this volume was duplicated from. +func (r Network_Storage_Backup_Evault) GetOriginalVolumeName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOriginalVolumeName", nil, &r.Options, &resp) + return +} + +// Retrieve The size (in GB) of the volume or LUN before any size expansion, or of the volume (before any possible size expansion) from which the duplicate volume or LUN was created. +func (r Network_Storage_Backup_Evault) GetOriginalVolumeSize() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOriginalVolumeSize", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type. +func (r Network_Storage_Backup_Evault) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type ID. +func (r Network_Storage_Backup_Evault) GetOsTypeId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOsTypeId", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume in a parental role. +func (r Network_Storage_Backup_Evault) GetParentPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getParentPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve The parent volume of a volume in a complex storage relationship. +func (r Network_Storage_Backup_Evault) GetParentVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getParentVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume. +func (r Network_Storage_Backup_Evault) GetPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve All permissions group(s) this volume is in. +func (r Network_Storage_Backup_Evault) GetPermissionsGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getPermissionsGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The properties used to provide additional details about a network storage volume. +func (r Network_Storage_Backup_Evault) GetProperties() (resp []datatypes.Network_Storage_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve The number of IOPs provisioned for this volume. +func (r Network_Storage_Backup_Evault) GetProvisionedIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getProvisionedIops", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the details of a file that is pending deletion in a Storage account's a recycle bin. +func (r Network_Storage_Backup_Evault) GetRecycleBinFileByIdentifier(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getRecycleBinFileByIdentifier", params, &r.Options, &resp) + return +} + +// Retrieves the remaining number of allowed hosts per volume. +func (r Network_Storage_Backup_Evault) GetRemainingAllowedHosts() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getRemainingAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieves the remaining number of allowed hosts for a volume's replicant. +func (r Network_Storage_Backup_Evault) GetRemainingAllowedHostsForReplicant() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getRemainingAllowedHostsForReplicant", nil, &r.Options, &resp) + return +} + +// Retrieve The iSCSI LUN volumes being replicated by this network storage volume. +func (r Network_Storage_Backup_Evault) GetReplicatingLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicatingLuns", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volume being replicated by a volume. +func (r Network_Storage_Backup_Evault) GetReplicatingVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicatingVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volume replication events. +func (r Network_Storage_Backup_Evault) GetReplicationEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicationEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes configured to be replicants of a volume. +func (r Network_Storage_Backup_Evault) GetReplicationPartners() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicationPartners", nil, &r.Options, &resp) + return +} + +// Retrieve The Replication Schedule associated with a network storage volume. +func (r Network_Storage_Backup_Evault) GetReplicationSchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicationSchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The current replication status of a network storage volume. Indicates Failover or Failback status. +func (r Network_Storage_Backup_Evault) GetReplicationStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The schedules which are associated with a network storage volume. +func (r Network_Storage_Backup_Evault) GetSchedules() (resp []datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSchedules", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource a Storage service is connected to. +func (r Network_Storage_Backup_Evault) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Retrieve The IP address of a Storage resource. +func (r Network_Storage_Backup_Evault) GetServiceResourceBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getServiceResourceBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The name of a Storage's network resource. +func (r Network_Storage_Backup_Evault) GetServiceResourceName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getServiceResourceName", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured snapshot space size. +func (r Network_Storage_Backup_Evault) GetSnapshotCapacityGb() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotCapacityGb", nil, &r.Options, &resp) + return +} + +// Retrieve The creation timestamp of the snapshot on the storage platform. +func (r Network_Storage_Backup_Evault) GetSnapshotCreationTimestamp() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotCreationTimestamp", nil, &r.Options, &resp) + return +} + +// Retrieve The percentage of used snapshot space after which to delete automated snapshots. +func (r Network_Storage_Backup_Evault) GetSnapshotDeletionThresholdPercentage() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotDeletionThresholdPercentage", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshot size in bytes. +func (r Network_Storage_Backup_Evault) GetSnapshotSizeBytes() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotSizeBytes", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's available snapshot reservation space. +func (r Network_Storage_Backup_Evault) GetSnapshotSpaceAvailable() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotSpaceAvailable", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshots associated with this SoftLayer_Network_Storage volume. +func (r Network_Storage_Backup_Evault) GetSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieves a list of snapshots for this SoftLayer_Network_Storage volume. This method works with the result limits and offset to support pagination. +func (r Network_Storage_Backup_Evault) GetSnapshotsForVolume() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotsForVolume", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetStaasVersion() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStaasVersion", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage groups this volume is attached to. +func (r Network_Storage_Backup_Evault) GetStorageGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStorageGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetStorageGroupsNetworkConnectionDetails() (resp []datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStorageGroupsNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetStorageTierLevel() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStorageTierLevel", nil, &r.Options, &resp) + return +} + +// Retrieve A description of the Storage object. +func (r Network_Storage_Backup_Evault) GetStorageType() (resp datatypes.Network_Storage_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStorageType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetTargetIpAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getTargetIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume. +func (r Network_Storage_Backup_Evault) GetTotalBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getTotalBytesUsed", nil, &r.Options, &resp) + return +} + +// Retrieve The total snapshot retention count of all schedules on this network storage volume. +func (r Network_Storage_Backup_Evault) GetTotalScheduleSnapshotRetentionCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getTotalScheduleSnapshotRetentionCount", nil, &r.Options, &resp) + return +} + +// Retrieve The usage notification for SL Storage services. +func (r Network_Storage_Backup_Evault) GetUsageNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getUsageNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetValidReplicationTargetDatacenterLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getValidReplicationTargetDatacenterLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The type of network storage service. +func (r Network_Storage_Backup_Evault) GetVendorName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVendorName", nil, &r.Options, &resp) + return +} + +// Retrieve When applicable, the virtual guest associated with a Storage service. +func (r Network_Storage_Backup_Evault) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// This method returns the parameters for cloning a volume +func (r Network_Storage_Backup_Evault) GetVolumeDuplicateParameters() (resp datatypes.Container_Network_Storage_VolumeDuplicateParameters, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVolumeDuplicateParameters", nil, &r.Options, &resp) + return +} + +// Retrieve The username and password history for a Storage service. +func (r Network_Storage_Backup_Evault) GetVolumeHistory() (resp []datatypes.Network_Storage_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVolumeHistory", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of a network storage volume. +func (r Network_Storage_Backup_Evault) GetVolumeStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVolumeStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetWebCCAuthenticationDetails() (resp datatypes.Container_Network_Storage_Backup_Evault_WebCc_Authentication_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getWebCCAuthenticationDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The account username and password for the EVault webCC interface. +func (r Network_Storage_Backup_Evault) GetWebccAccount() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getWebccAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The Weekly Schedule which is associated with this network storage volume. +func (r Network_Storage_Backup_Evault) GetWeeklySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getWeeklySchedule", nil, &r.Options, &resp) + return +} + +// Immediate Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage_Backup_Evault) ImmediateFailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "immediateFailoverToReplicant", params, &r.Options, &resp) + return +} + +// Evault Bare Metal Restore is a special version of Rescue Kernel designed specifically for making full system restores made with Evault's BMR backup. This process works very similar to Rescue Kernel, except only the Evault restore program is available. The process takes approximately 10 minutes. Once completed you will be able to access your server to do a restore through VNC or your servers KVM-over-IP. IP information and credentials can be found on the hardware page of the customer portal. The Evault Application will be running automatically upon startup, and will walk you through the restore process. +func (r Network_Storage_Backup_Evault) InitiateBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "initiateBareMetalRestore", nil, &r.Options, &resp) + return +} + +// This method operates the same as the initiateBareMetalRestore() method. However, using this method, the Bare Metal Restore can be initiated on any Windows server under the account. +func (r Network_Storage_Backup_Evault) InitiateBareMetalRestoreForServer(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "initiateBareMetalRestoreForServer", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) IsBlockingOperationInProgress(exemptStatusKeyNames []string) (resp bool, err error) { + params := []interface{}{ + exemptStatusKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "isBlockingOperationInProgress", params, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready for snapshot. +func (r Network_Storage_Backup_Evault) IsDuplicateReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "isDuplicateReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready to mount. +func (r Network_Storage_Backup_Evault) IsDuplicateReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "isDuplicateReadyToMount", nil, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromHostList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) RemoveAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) RemoveAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) RemoveAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Hardware objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationIpAddresses property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will remove a credential from the current volume. The credential must have been created using the 'addNewCredential' method. +func (r Network_Storage_Backup_Evault) RemoveCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeCredential", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Restore an individual file so that it may be used as it was before it was deleted. +// +// If a file is deleted from a Virtual Server Storage account, the file is placed into the account's recycle bin and not permanently deleted. Therefore, restoreFile can be used to place the file back into your Virtual Server account's root directory. +func (r Network_Storage_Backup_Evault) RestoreFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "restoreFile", params, &r.Options, &resp) + return +} + +// Restore the volume from a snapshot that was previously taken. +func (r Network_Storage_Backup_Evault) RestoreFromSnapshot(snapshotId *int) (resp bool, err error) { + params := []interface{}{ + snapshotId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "restoreFromSnapshot", params, &r.Options, &resp) + return +} + +// The method will retrieve the password for the StorageLayer or Virtual Server Storage Account and email the password. The Storage Account passwords will be emailed to the master user. For Virtual Server Storage, the password will be sent to the email address used as the username. +func (r Network_Storage_Backup_Evault) SendPasswordReminderEmail(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "sendPasswordReminderEmail", params, &r.Options, &resp) + return +} + +// Enable or disable the mounting of a Storage volume. When mounting is enabled the Storage volume will be mountable or available for use. +// +// For Virtual Server volumes, disabling mounting will deny access to the Virtual Server Account, remove published material and deny all file interaction including uploads and downloads. +// +// Enabling or disabling mounting for Storage volumes is not possible if mounting has been disabled by SoftLayer or a parent account. +func (r Network_Storage_Backup_Evault) SetMountable(mountable *bool) (resp bool, err error) { + params := []interface{}{ + mountable, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "setMountable", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) SetSnapshotAllocation(capacityGb *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + capacityGb, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "setSnapshotAllocation", params, &r.Options, &resp) + return +} + +// Upgrade the Storage volume to one of the upgradable packages (for example from 10 Gigs of EVault storage to 100 Gigs of EVault storage). +func (r Network_Storage_Backup_Evault) UpgradeVolumeCapacity(itemId *int) (resp bool, err error) { + params := []interface{}{ + itemId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "upgradeVolumeCapacity", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Upload a file to a Storage account's root directory. Once uploaded, this method returns new file entity identifier for the upload file. +// +// The following properties are required in the ''file'' parameter. +// *'''name''': The name of the file you wish to upload +// *'''content''': The raw contents of the file you wish to upload. +// *'''contentType''': The MIME-type of content that you wish to upload. +func (r Network_Storage_Backup_Evault) UploadFile(file *datatypes.Container_Utility_File_Entity) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + file, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "uploadFile", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Group struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageGroupService returns an instance of the Network_Storage_Group SoftLayer service +func GetNetworkStorageGroupService(sess *session.Session) Network_Storage_Group { + return Network_Storage_Group{Session: sess} +} + +func (r Network_Storage_Group) Id(id int) Network_Storage_Group { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Group) Mask(mask string) Network_Storage_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Group) Filter(filter string) Network_Storage_Group { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Group) Limit(limit int) Network_Storage_Group { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Group) Offset(offset int) Network_Storage_Group { + r.Options.Offset = &offset + return r +} + +// Use this method to attach a SoftLayer_Network_Storage_Allowed_Host object to this group. This will automatically enable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group) AddAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "addAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to attach a SoftLayer_Network_Storage volume to this group. This will automatically enable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group) AttachToVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "attachToVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group) CreateObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group) EditObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account which owns this group. +func (r Network_Storage_Group) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getAccount", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve all network storage groups. +func (r Network_Storage_Group) GetAllObjects() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The allowed hosts list for this group. +func (r Network_Storage_Group) GetAllowedHosts() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes this group is attached to. +func (r Network_Storage_Group) GetAttachedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getAttachedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The type which defines this group. +func (r Network_Storage_Group) GetGroupType() (resp datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getGroupType", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve network connection information for SoftLayer_Network_Storage_Allowed_Host objects within this group. +func (r Network_Storage_Group) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group) GetObject() (resp datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The OS Type this group is configured for. +func (r Network_Storage_Group) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource this group is created on. +func (r Network_Storage_Group) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage_Allowed_Host object from this group. This will automatically disable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group) RemoveAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "removeAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage volume from this group. This will automatically disable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group) RemoveFromVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "removeFromVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Group_Iscsi struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageGroupIscsiService returns an instance of the Network_Storage_Group_Iscsi SoftLayer service +func GetNetworkStorageGroupIscsiService(sess *session.Session) Network_Storage_Group_Iscsi { + return Network_Storage_Group_Iscsi{Session: sess} +} + +func (r Network_Storage_Group_Iscsi) Id(id int) Network_Storage_Group_Iscsi { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Group_Iscsi) Mask(mask string) Network_Storage_Group_Iscsi { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Group_Iscsi) Filter(filter string) Network_Storage_Group_Iscsi { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Group_Iscsi) Limit(limit int) Network_Storage_Group_Iscsi { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Group_Iscsi) Offset(offset int) Network_Storage_Group_Iscsi { + r.Options.Offset = &offset + return r +} + +// Use this method to attach a SoftLayer_Network_Storage_Allowed_Host object to this group. This will automatically enable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group_Iscsi) AddAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "addAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to attach a SoftLayer_Network_Storage volume to this group. This will automatically enable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group_Iscsi) AttachToVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "attachToVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Iscsi) CreateObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Iscsi) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Iscsi) EditObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account which owns this group. +func (r Network_Storage_Group_Iscsi) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getAccount", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve all network storage groups. +func (r Network_Storage_Group_Iscsi) GetAllObjects() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The allowed hosts list for this group. +func (r Network_Storage_Group_Iscsi) GetAllowedHosts() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes this group is attached to. +func (r Network_Storage_Group_Iscsi) GetAttachedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getAttachedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The type which defines this group. +func (r Network_Storage_Group_Iscsi) GetGroupType() (resp datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getGroupType", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve network connection information for SoftLayer_Network_Storage_Allowed_Host objects within this group. +func (r Network_Storage_Group_Iscsi) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Iscsi) GetObject() (resp datatypes.Network_Storage_Group_Iscsi, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The OS Type this group is configured for. +func (r Network_Storage_Group_Iscsi) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource this group is created on. +func (r Network_Storage_Group_Iscsi) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage_Allowed_Host object from this group. This will automatically disable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group_Iscsi) RemoveAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "removeAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage volume from this group. This will automatically disable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group_Iscsi) RemoveFromVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "removeFromVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Group_Nfs struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageGroupNfsService returns an instance of the Network_Storage_Group_Nfs SoftLayer service +func GetNetworkStorageGroupNfsService(sess *session.Session) Network_Storage_Group_Nfs { + return Network_Storage_Group_Nfs{Session: sess} +} + +func (r Network_Storage_Group_Nfs) Id(id int) Network_Storage_Group_Nfs { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Group_Nfs) Mask(mask string) Network_Storage_Group_Nfs { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Group_Nfs) Filter(filter string) Network_Storage_Group_Nfs { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Group_Nfs) Limit(limit int) Network_Storage_Group_Nfs { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Group_Nfs) Offset(offset int) Network_Storage_Group_Nfs { + r.Options.Offset = &offset + return r +} + +// Use this method to attach a SoftLayer_Network_Storage_Allowed_Host object to this group. This will automatically enable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group_Nfs) AddAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "addAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to attach a SoftLayer_Network_Storage volume to this group. This will automatically enable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group_Nfs) AttachToVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "attachToVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Nfs) CreateObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Nfs) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Nfs) EditObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account which owns this group. +func (r Network_Storage_Group_Nfs) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getAccount", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve all network storage groups. +func (r Network_Storage_Group_Nfs) GetAllObjects() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The allowed hosts list for this group. +func (r Network_Storage_Group_Nfs) GetAllowedHosts() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes this group is attached to. +func (r Network_Storage_Group_Nfs) GetAttachedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getAttachedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The type which defines this group. +func (r Network_Storage_Group_Nfs) GetGroupType() (resp datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getGroupType", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve network connection information for SoftLayer_Network_Storage_Allowed_Host objects within this group. +func (r Network_Storage_Group_Nfs) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Nfs) GetObject() (resp datatypes.Network_Storage_Group_Nfs, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The OS Type this group is configured for. +func (r Network_Storage_Group_Nfs) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource this group is created on. +func (r Network_Storage_Group_Nfs) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage_Allowed_Host object from this group. This will automatically disable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group_Nfs) RemoveAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "removeAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage volume from this group. This will automatically disable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group_Nfs) RemoveFromVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "removeFromVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Group_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageGroupTypeService returns an instance of the Network_Storage_Group_Type SoftLayer service +func GetNetworkStorageGroupTypeService(sess *session.Session) Network_Storage_Group_Type { + return Network_Storage_Group_Type{Session: sess} +} + +func (r Network_Storage_Group_Type) Id(id int) Network_Storage_Group_Type { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Group_Type) Mask(mask string) Network_Storage_Group_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Group_Type) Filter(filter string) Network_Storage_Group_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Group_Type) Limit(limit int) Network_Storage_Group_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Group_Type) Offset(offset int) Network_Storage_Group_Type { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all storage group types available. +func (r Network_Storage_Group_Type) GetAllObjects() (resp []datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Type) GetObject() (resp datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Hub_Cleversafe_Account struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageHubCleversafeAccountService returns an instance of the Network_Storage_Hub_Cleversafe_Account SoftLayer service +func GetNetworkStorageHubCleversafeAccountService(sess *session.Session) Network_Storage_Hub_Cleversafe_Account { + return Network_Storage_Hub_Cleversafe_Account{Session: sess} +} + +func (r Network_Storage_Hub_Cleversafe_Account) Id(id int) Network_Storage_Hub_Cleversafe_Account { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Hub_Cleversafe_Account) Mask(mask string) Network_Storage_Hub_Cleversafe_Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Hub_Cleversafe_Account) Filter(filter string) Network_Storage_Hub_Cleversafe_Account { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Hub_Cleversafe_Account) Limit(limit int) Network_Storage_Hub_Cleversafe_Account { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Hub_Cleversafe_Account) Offset(offset int) Network_Storage_Hub_Cleversafe_Account { + r.Options.Offset = &offset + return r +} + +// Create credentials for an IBM Cloud Object Storage Account +func (r Network_Storage_Hub_Cleversafe_Account) CredentialCreate() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "credentialCreate", nil, &r.Options, &resp) + return +} + +// Delete a credential +func (r Network_Storage_Hub_Cleversafe_Account) CredentialDelete(credential *datatypes.Network_Storage_Credential) (resp bool, err error) { + params := []interface{}{ + credential, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "credentialDelete", params, &r.Options, &resp) + return +} + +// Retrieve SoftLayer account to which an IBM Cloud Object Storage account belongs to. +func (r Network_Storage_Hub_Cleversafe_Account) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Hub_Cleversafe_Account) GetAllObjects() (resp []datatypes.Network_Storage_Hub_Cleversafe_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve An associated parent billing item which is active. Includes billing items which are scheduled to be cancelled in the future. +func (r Network_Storage_Hub_Cleversafe_Account) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Get buckets +func (r Network_Storage_Hub_Cleversafe_Account) GetBuckets() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Bucket, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getBuckets", nil, &r.Options, &resp) + return +} + +// Retrieve An associated parent billing item which has been cancelled. +func (r Network_Storage_Hub_Cleversafe_Account) GetCancelledBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getCancelledBillingItem", nil, &r.Options, &resp) + return +} + +// Returns the capacity usage for an IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetCapacityUsage() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getCapacityUsage", nil, &r.Options, &resp) + return +} + +// Returns credential limits for this IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetCredentialLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getCredentialLimit", nil, &r.Options, &resp) + return +} + +// Retrieve Credentials used for generating an AWS signature. Max of 2. +func (r Network_Storage_Hub_Cleversafe_Account) GetCredentials() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getCredentials", nil, &r.Options, &resp) + return +} + +// Returns a collection of endpoint URLs available to this IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetEndpoints(accountId *int) (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Endpoint, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getEndpoints", params, &r.Options, &resp) + return +} + +// Retrieve Provides an interface to various metrics relating to the usage of an IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Hub_Cleversafe_Account) GetObject() (resp datatypes.Network_Storage_Hub_Cleversafe_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Unique identifier for an IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetUuid() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getUuid", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Hub_Swift_Share struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageHubSwiftShareService returns an instance of the Network_Storage_Hub_Swift_Share SoftLayer service +func GetNetworkStorageHubSwiftShareService(sess *session.Session) Network_Storage_Hub_Swift_Share { + return Network_Storage_Hub_Swift_Share{Session: sess} +} + +func (r Network_Storage_Hub_Swift_Share) Id(id int) Network_Storage_Hub_Swift_Share { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Hub_Swift_Share) Mask(mask string) Network_Storage_Hub_Swift_Share { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Hub_Swift_Share) Filter(filter string) Network_Storage_Hub_Swift_Share { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Hub_Swift_Share) Limit(limit int) Network_Storage_Hub_Swift_Share { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Hub_Swift_Share) Offset(offset int) Network_Storage_Hub_Swift_Share { + r.Options.Offset = &offset + return r +} + +// This method returns a collection of container objects. +func (r Network_Storage_Hub_Swift_Share) GetContainerList() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Folder, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Swift_Share", "getContainerList", nil, &r.Options, &resp) + return +} + +// This method returns a file object given the file's full name. +func (r Network_Storage_Hub_Swift_Share) GetFile(fileName *string, container *string) (resp datatypes.Container_Network_Storage_Hub_ObjectStorage_File, err error) { + params := []interface{}{ + fileName, + container, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Swift_Share", "getFile", params, &r.Options, &resp) + return +} + +// This method returns a collection of the file objects within a container and the given path. +func (r Network_Storage_Hub_Swift_Share) GetFileList(container *string, path *string) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + container, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Swift_Share", "getFileList", params, &r.Options, &resp) + return +} + +// The iscsi data type provides access to additional information about an iscsi volume such as the snapshot capacity limit and replication partners. +type Network_Storage_Iscsi struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageIscsiService returns an instance of the Network_Storage_Iscsi SoftLayer service +func GetNetworkStorageIscsiService(sess *session.Session) Network_Storage_Iscsi { + return Network_Storage_Iscsi{Session: sess} +} + +func (r Network_Storage_Iscsi) Id(id int) Network_Storage_Iscsi { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Iscsi) Mask(mask string) Network_Storage_Iscsi { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Iscsi) Filter(filter string) Network_Storage_Iscsi { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Iscsi) Limit(limit int) Network_Storage_Iscsi { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Iscsi) Offset(offset int) Network_Storage_Iscsi { + r.Options.Offset = &offset + return r +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromHardware", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage volume will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromHostList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replica volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replica volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage replicant volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will assign an existing credential to the current volume. The credential must have been created using the 'addNewCredential' method. The volume type must support an additional credential. +func (r Network_Storage_Iscsi) AssignCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "assignCredential", params, &r.Options, &resp) + return +} + +// This method will set up a new credential for the remote storage volume. The storage volume must support an additional credential. Once created, the credential will be automatically assigned to the current volume. If there are no volumes assigned to the credential it will be automatically deleted. +func (r Network_Storage_Iscsi) AssignNewCredential(typ *string) (resp datatypes.Network_Storage_Credential, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "assignNewCredential", params, &r.Options, &resp) + return +} + +// The method will change the password for the given Storage/Virtual Server Storage account. +func (r Network_Storage_Iscsi) ChangePassword(username *string, currentPassword *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + currentPassword, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "changePassword", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBandwidth() Retrieve the bandwidth usage for the current billing cycle. +func (r Network_Storage_Iscsi) CollectBandwidth(typ *string, startDate *datatypes.Time, endDate *datatypes.Time) (resp uint, err error) { + params := []interface{}{ + typ, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "collectBandwidth", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBytesUsed() retrieves the number of bytes capacity currently in use on a Storage account. +func (r Network_Storage_Iscsi) CollectBytesUsed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "collectBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) CreateFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "createFolder", params, &r.Options, &resp) + return +} + +// The LUN ID only takes effect during the Host Authorization process. It is required to de-authorize all hosts before using this method. +func (r Network_Storage_Iscsi) CreateOrUpdateLunId(lunId *int) (resp datatypes.Network_Storage_Property, err error) { + params := []interface{}{ + lunId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "createOrUpdateLunId", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) CreateSnapshot(notes *string) (resp datatypes.Network_Storage, err error) { + params := []interface{}{ + notes, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "createSnapshot", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete all files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage_Iscsi) DeleteAllFiles() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete an individual file within a Storage account. Depending on the type of Storage account, Deleting a file either deletes the file permanently or sends the file to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the file is in the account's recycle bin. If the file exist in the recycle bin, then it is permanently deleted. +// +// Please note, a file can not be restored once it is permanently deleted. +func (r Network_Storage_Iscsi) DeleteFile(fileId *string) (resp bool, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteFile", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete multiple files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage_Iscsi) DeleteFiles(fileIds []string) (resp bool, err error) { + params := []interface{}{ + fileIds, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteFiles", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) DeleteFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteFolder", params, &r.Options, &resp) + return +} + +// Delete a network storage volume. '''This cannot be undone.''' At this time only network storage snapshots may be deleted with this method. +// +// ''deleteObject'' returns Boolean ''true'' on successful deletion or ''false'' if it was unable to remove a volume; +func (r Network_Storage_Iscsi) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Disable scheduled snapshots of this storage volume. Scheduling options include 'INTERVAL', HOURLY, DAILY and WEEKLY schedules. +func (r Network_Storage_Iscsi) DisableSnapshots(scheduleType *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "disableSnapshots", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Download a file from a Storage account. This method returns a file's details including the file's raw content. +func (r Network_Storage_Iscsi) DownloadFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "downloadFile", params, &r.Options, &resp) + return +} + +// This method will change the password of a credential created using the 'addNewCredential' method. If the credential exists on multiple storage volumes it will change for those volumes as well. +func (r Network_Storage_Iscsi) EditCredential(username *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "editCredential", params, &r.Options, &resp) + return +} + +// The password and/or notes may be modified for the Storage service except evault passwords and notes. +func (r Network_Storage_Iscsi) EditObject(templateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "editObject", params, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Enable scheduled snapshots of this storage volume. Scheduling options include HOURLY, DAILY and WEEKLY schedules. For HOURLY schedules, provide relevant data for $scheduleType, $retentionCount and $minute. For DAILY schedules, provide relevant data for $scheduleType, $retentionCount, $minute, and $hour. For WEEKLY schedules, provide relevant data for all parameters of this method. +func (r Network_Storage_Iscsi) EnableSnapshots(scheduleType *string, retentionCount *int, minute *int, hour *int, dayOfWeek *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + retentionCount, + minute, + hour, + dayOfWeek, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "enableSnapshots", params, &r.Options, &resp) + return +} + +// Failback from a volume replicant. In order to failback the volume must have already been failed over to a replicant. +func (r Network_Storage_Iscsi) FailbackFromReplicant() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "failbackFromReplicant", nil, &r.Options, &resp) + return +} + +// Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage_Iscsi) FailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "failoverToReplicant", params, &r.Options, &resp) + return +} + +// Retrieve The account that a Storage services belongs to. +func (r Network_Storage_Iscsi) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Other usernames and passwords associated with a Storage volume. +func (r Network_Storage_Iscsi) GetAccountPassword() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAccountPassword", nil, &r.Options, &resp) + return +} + +// Retrieve The currently active transactions on a network storage volume. +func (r Network_Storage_Iscsi) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files in a Storage account's root directory. This does not download file content. +func (r Network_Storage_Iscsi) GetAllFiles() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files matching the filter's criteria in a Storage account's root directory. This does not download file content. +func (r Network_Storage_Iscsi) GetAllFilesByFilter(filter *datatypes.Container_Utility_File_Entity) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + filter, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllFilesByFilter", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Hardware that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Iscsi) GetAllowableHardware(filterHostname *string) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowableHardware", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet_IpAddress that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Iscsi) GetAllowableIpAddresses(subnetId *int, filterIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + subnetId, + filterIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowableIpAddresses", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Iscsi) GetAllowableSubnets(filterNetworkIdentifier *string) (resp []datatypes.Network_Subnet, err error) { + params := []interface{}{ + filterNetworkIdentifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowableSubnets", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Virtual_Guest that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Iscsi) GetAllowableVirtualGuests(filterHostname *string) (resp []datatypes.Virtual_Guest, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowableVirtualGuests", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume. +func (r Network_Storage_Iscsi) GetAllowedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedHardware", nil, &r.Options, &resp) + return +} + +// Retrieves the total number of allowed hosts limit per volume. +func (r Network_Storage_Iscsi) GetAllowedHostsLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedHostsLimit", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. +func (r Network_Storage_Iscsi) GetAllowedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Iscsi) GetAllowedReplicationHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedReplicationHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Iscsi) GetAllowedReplicationIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedReplicationIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Iscsi) GetAllowedReplicationSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedReplicationSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Iscsi) GetAllowedReplicationVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedReplicationVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume. +func (r Network_Storage_Iscsi) GetAllowedSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. +func (r Network_Storage_Iscsi) GetAllowedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a Storage volume. +func (r Network_Storage_Iscsi) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetBillingItemCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getBillingItemCategory", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by username and storage account type. Use this method if you wish to retrieve a storage record by username rather than by id. The ''type'' parameter must correspond to one of the available ''nasType'' values in the SoftLayer_Network_Storage data type. +func (r Network_Storage_Iscsi) GetByUsername(username *string, typ *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + username, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getByUsername", params, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume, in bytes. +func (r Network_Storage_Iscsi) GetBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetCdnUrls() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getCdnUrls", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetClusterResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getClusterResource", nil, &r.Options, &resp) + return +} + +// Retrieve The schedule id which was executed to create a snapshot. +func (r Network_Storage_Iscsi) GetCreationScheduleId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getCreationScheduleId", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetCredentials() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve The Daily Schedule which is associated with this network storage volume. +func (r Network_Storage_Iscsi) GetDailySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getDailySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The events which have taken place on a network storage volume. +func (r Network_Storage_Iscsi) GetEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getEvents", nil, &r.Options, &resp) + return +} + +// +// +// +func (r Network_Storage_Iscsi) GetFileBlockEncryptedLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileBlockEncryptedLocations", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date of a file within a Storage account. This does not download file content. +func (r Network_Storage_Iscsi) GetFileByIdentifier(identifier *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + identifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileByIdentifier", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the file number of files in a Virtual Server Storage account's root directory. This does not include the files stored in the recycle bin. +func (r Network_Storage_Iscsi) GetFileCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileCount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetFileList(folder *string, path *string) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + folder, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileList", params, &r.Options, &resp) + return +} + +// Retrieve Retrieves the NFS Network Mount Address Name for a given File Storage Volume. +func (r Network_Storage_Iscsi) GetFileNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the number of files pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. +func (r Network_Storage_Iscsi) GetFilePendingDeleteCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFilePendingDeleteCount", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve a list of files that are pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. This method does not download file content. +func (r Network_Storage_Iscsi) GetFilesPendingDelete() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFilesPendingDelete", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetFolderList() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Folder, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFolderList", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// getGraph() retrieves a Storage account's usage and returns a PNG graph image, title, and the minimum and maximum dates included in the graphed date range. Virtual Server storage accounts can also graph upload and download bandwidth usage. +func (r Network_Storage_Iscsi) GetGraph(startDate *datatypes.Time, endDate *datatypes.Time, typ *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDate, + endDate, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getGraph", params, &r.Options, &resp) + return +} + +// Retrieve When applicable, the hardware associated with a Storage service. +func (r Network_Storage_Iscsi) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetHasEncryptionAtRest() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getHasEncryptionAtRest", nil, &r.Options, &resp) + return +} + +// Retrieve The Hourly Schedule which is associated with this network storage volume. +func (r Network_Storage_Iscsi) GetHourlySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getHourlySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The Interval Schedule which is associated with this network storage volume. +func (r Network_Storage_Iscsi) GetIntervalSchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIntervalSchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum number of IOPs selected for this volume. +func (r Network_Storage_Iscsi) GetIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIops", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to order snapshot space, or, if snapshot space is already available, to assign a snapshot schedule, or to take a manual snapshot. +func (r Network_Storage_Iscsi) GetIsReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIsReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to have Hosts authorized to access it. This does not indicate whether another operation may be blocking, please refer to this volume's volumeStatus property for details. +func (r Network_Storage_Iscsi) GetIsReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIsReadyToMount", nil, &r.Options, &resp) + return +} + +// Retrieve Relationship between a container volume and iSCSI LUNs. +func (r Network_Storage_Iscsi) GetIscsiLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIscsiLuns", nil, &r.Options, &resp) + return +} + +// Retrieve Returns the target IP addresses of an iSCSI volume. +func (r Network_Storage_Iscsi) GetIscsiTargetIpAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIscsiTargetIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The ID of the LUN volume. +func (r Network_Storage_Iscsi) GetLunId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getLunId", nil, &r.Options, &resp) + return +} + +// Retrieve The manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. +func (r Network_Storage_Iscsi) GetManualSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getManualSnapshots", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetMaximumExpansionSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getMaximumExpansionSize", nil, &r.Options, &resp) + return +} + +// Retrieve A network storage volume's metric tracking object. This object records all periodic polled data available to this volume. +func (r Network_Storage_Iscsi) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a network storage volume may be mounted. +func (r Network_Storage_Iscsi) GetMountableFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getMountableFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of split or move operation as a part of volume duplication. +func (r Network_Storage_Iscsi) GetMoveAndSplitStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getMoveAndSplitStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The subscribers that will be notified for usage amount warnings and overages. +func (r Network_Storage_Iscsi) GetNotificationSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetObject() (resp datatypes.Network_Storage_Iscsi, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetObjectStorageConnectionInformation() (resp []datatypes.Container_Network_Service_Resource_ObjectStorage_ConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getObjectStorageConnectionInformation", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by SoftLayer_Network_Storage_Credential object. Use this method if you wish to retrieve a storage record by a credential rather than by id. +func (r Network_Storage_Iscsi) GetObjectsByCredential(credentialObject *datatypes.Network_Storage_Credential) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + credentialObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getObjectsByCredential", params, &r.Options, &resp) + return +} + +// Retrieve The name of the snapshot that this volume was duplicated from. +func (r Network_Storage_Iscsi) GetOriginalSnapshotName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOriginalSnapshotName", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the volume that this volume was duplicated from. +func (r Network_Storage_Iscsi) GetOriginalVolumeName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOriginalVolumeName", nil, &r.Options, &resp) + return +} + +// Retrieve The size (in GB) of the volume or LUN before any size expansion, or of the volume (before any possible size expansion) from which the duplicate volume or LUN was created. +func (r Network_Storage_Iscsi) GetOriginalVolumeSize() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOriginalVolumeSize", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type. +func (r Network_Storage_Iscsi) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type ID. +func (r Network_Storage_Iscsi) GetOsTypeId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOsTypeId", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume in a parental role. +func (r Network_Storage_Iscsi) GetParentPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getParentPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve The parent volume of a volume in a complex storage relationship. +func (r Network_Storage_Iscsi) GetParentVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getParentVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume. +func (r Network_Storage_Iscsi) GetPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve All permissions group(s) this volume is in. +func (r Network_Storage_Iscsi) GetPermissionsGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getPermissionsGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The properties used to provide additional details about a network storage volume. +func (r Network_Storage_Iscsi) GetProperties() (resp []datatypes.Network_Storage_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve The number of IOPs provisioned for this volume. +func (r Network_Storage_Iscsi) GetProvisionedIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getProvisionedIops", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the details of a file that is pending deletion in a Storage account's a recycle bin. +func (r Network_Storage_Iscsi) GetRecycleBinFileByIdentifier(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getRecycleBinFileByIdentifier", params, &r.Options, &resp) + return +} + +// Retrieves the remaining number of allowed hosts per volume. +func (r Network_Storage_Iscsi) GetRemainingAllowedHosts() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getRemainingAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieves the remaining number of allowed hosts for a volume's replicant. +func (r Network_Storage_Iscsi) GetRemainingAllowedHostsForReplicant() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getRemainingAllowedHostsForReplicant", nil, &r.Options, &resp) + return +} + +// Retrieve The iSCSI LUN volumes being replicated by this network storage volume. +func (r Network_Storage_Iscsi) GetReplicatingLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicatingLuns", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volume being replicated by a volume. +func (r Network_Storage_Iscsi) GetReplicatingVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicatingVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volume replication events. +func (r Network_Storage_Iscsi) GetReplicationEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicationEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes configured to be replicants of a volume. +func (r Network_Storage_Iscsi) GetReplicationPartners() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicationPartners", nil, &r.Options, &resp) + return +} + +// Retrieve The Replication Schedule associated with a network storage volume. +func (r Network_Storage_Iscsi) GetReplicationSchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicationSchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The current replication status of a network storage volume. Indicates Failover or Failback status. +func (r Network_Storage_Iscsi) GetReplicationStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The schedules which are associated with a network storage volume. +func (r Network_Storage_Iscsi) GetSchedules() (resp []datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSchedules", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource a Storage service is connected to. +func (r Network_Storage_Iscsi) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Retrieve The IP address of a Storage resource. +func (r Network_Storage_Iscsi) GetServiceResourceBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getServiceResourceBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The name of a Storage's network resource. +func (r Network_Storage_Iscsi) GetServiceResourceName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getServiceResourceName", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured snapshot space size. +func (r Network_Storage_Iscsi) GetSnapshotCapacityGb() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotCapacityGb", nil, &r.Options, &resp) + return +} + +// Retrieve The creation timestamp of the snapshot on the storage platform. +func (r Network_Storage_Iscsi) GetSnapshotCreationTimestamp() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotCreationTimestamp", nil, &r.Options, &resp) + return +} + +// Retrieve The percentage of used snapshot space after which to delete automated snapshots. +func (r Network_Storage_Iscsi) GetSnapshotDeletionThresholdPercentage() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotDeletionThresholdPercentage", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshot size in bytes. +func (r Network_Storage_Iscsi) GetSnapshotSizeBytes() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotSizeBytes", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's available snapshot reservation space. +func (r Network_Storage_Iscsi) GetSnapshotSpaceAvailable() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotSpaceAvailable", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshots associated with this SoftLayer_Network_Storage volume. +func (r Network_Storage_Iscsi) GetSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieves a list of snapshots for this SoftLayer_Network_Storage volume. This method works with the result limits and offset to support pagination. +func (r Network_Storage_Iscsi) GetSnapshotsForVolume() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotsForVolume", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetStaasVersion() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStaasVersion", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage groups this volume is attached to. +func (r Network_Storage_Iscsi) GetStorageGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStorageGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetStorageGroupsNetworkConnectionDetails() (resp []datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStorageGroupsNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetStorageTierLevel() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStorageTierLevel", nil, &r.Options, &resp) + return +} + +// Retrieve A description of the Storage object. +func (r Network_Storage_Iscsi) GetStorageType() (resp datatypes.Network_Storage_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStorageType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetTargetIpAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getTargetIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume. +func (r Network_Storage_Iscsi) GetTotalBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getTotalBytesUsed", nil, &r.Options, &resp) + return +} + +// Retrieve The total snapshot retention count of all schedules on this network storage volume. +func (r Network_Storage_Iscsi) GetTotalScheduleSnapshotRetentionCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getTotalScheduleSnapshotRetentionCount", nil, &r.Options, &resp) + return +} + +// Retrieve The usage notification for SL Storage services. +func (r Network_Storage_Iscsi) GetUsageNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getUsageNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetValidReplicationTargetDatacenterLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getValidReplicationTargetDatacenterLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The type of network storage service. +func (r Network_Storage_Iscsi) GetVendorName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVendorName", nil, &r.Options, &resp) + return +} + +// Retrieve When applicable, the virtual guest associated with a Storage service. +func (r Network_Storage_Iscsi) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// This method returns the parameters for cloning a volume +func (r Network_Storage_Iscsi) GetVolumeDuplicateParameters() (resp datatypes.Container_Network_Storage_VolumeDuplicateParameters, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVolumeDuplicateParameters", nil, &r.Options, &resp) + return +} + +// Retrieve The username and password history for a Storage service. +func (r Network_Storage_Iscsi) GetVolumeHistory() (resp []datatypes.Network_Storage_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVolumeHistory", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of a network storage volume. +func (r Network_Storage_Iscsi) GetVolumeStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVolumeStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The account username and password for the EVault webCC interface. +func (r Network_Storage_Iscsi) GetWebccAccount() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getWebccAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The Weekly Schedule which is associated with this network storage volume. +func (r Network_Storage_Iscsi) GetWeeklySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getWeeklySchedule", nil, &r.Options, &resp) + return +} + +// Immediate Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage_Iscsi) ImmediateFailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "immediateFailoverToReplicant", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) IsBlockingOperationInProgress(exemptStatusKeyNames []string) (resp bool, err error) { + params := []interface{}{ + exemptStatusKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "isBlockingOperationInProgress", params, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready for snapshot. +func (r Network_Storage_Iscsi) IsDuplicateReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "isDuplicateReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready to mount. +func (r Network_Storage_Iscsi) IsDuplicateReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "isDuplicateReadyToMount", nil, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromHostList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replica volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replica volume. +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replica volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage replica volume. +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replica volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage replica volume. +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will remove a credential from the current volume. The credential must have been created using the 'addNewCredential' method. +func (r Network_Storage_Iscsi) RemoveCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeCredential", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Restore an individual file so that it may be used as it was before it was deleted. +// +// If a file is deleted from a Virtual Server Storage account, the file is placed into the account's recycle bin and not permanently deleted. Therefore, restoreFile can be used to place the file back into your Virtual Server account's root directory. +func (r Network_Storage_Iscsi) RestoreFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "restoreFile", params, &r.Options, &resp) + return +} + +// Restore the volume from a snapshot that was previously taken. +func (r Network_Storage_Iscsi) RestoreFromSnapshot(snapshotId *int) (resp bool, err error) { + params := []interface{}{ + snapshotId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "restoreFromSnapshot", params, &r.Options, &resp) + return +} + +// The method will retrieve the password for the StorageLayer or Virtual Server Storage Account and email the password. The Storage Account passwords will be emailed to the master user. For Virtual Server Storage, the password will be sent to the email address used as the username. +func (r Network_Storage_Iscsi) SendPasswordReminderEmail(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "sendPasswordReminderEmail", params, &r.Options, &resp) + return +} + +// Enable or disable the mounting of a Storage volume. When mounting is enabled the Storage volume will be mountable or available for use. +// +// For Virtual Server volumes, disabling mounting will deny access to the Virtual Server Account, remove published material and deny all file interaction including uploads and downloads. +// +// Enabling or disabling mounting for Storage volumes is not possible if mounting has been disabled by SoftLayer or a parent account. +func (r Network_Storage_Iscsi) SetMountable(mountable *bool) (resp bool, err error) { + params := []interface{}{ + mountable, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "setMountable", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) SetSnapshotAllocation(capacityGb *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + capacityGb, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "setSnapshotAllocation", params, &r.Options, &resp) + return +} + +// Upgrade the Storage volume to one of the upgradable packages (for example from 10 Gigs of EVault storage to 100 Gigs of EVault storage). +func (r Network_Storage_Iscsi) UpgradeVolumeCapacity(itemId *int) (resp bool, err error) { + params := []interface{}{ + itemId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "upgradeVolumeCapacity", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Upload a file to a Storage account's root directory. Once uploaded, this method returns new file entity identifier for the upload file. +// +// The following properties are required in the ''file'' parameter. +// *'''name''': The name of the file you wish to upload +// *'''content''': The raw contents of the file you wish to upload. +// *'''contentType''': The MIME-type of content that you wish to upload. +func (r Network_Storage_Iscsi) UploadFile(file *datatypes.Container_Utility_File_Entity) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + file, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "uploadFile", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Iscsi_OS_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageIscsiOSTypeService returns an instance of the Network_Storage_Iscsi_OS_Type SoftLayer service +func GetNetworkStorageIscsiOSTypeService(sess *session.Session) Network_Storage_Iscsi_OS_Type { + return Network_Storage_Iscsi_OS_Type{Session: sess} +} + +func (r Network_Storage_Iscsi_OS_Type) Id(id int) Network_Storage_Iscsi_OS_Type { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Iscsi_OS_Type) Mask(mask string) Network_Storage_Iscsi_OS_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Iscsi_OS_Type) Filter(filter string) Network_Storage_Iscsi_OS_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Iscsi_OS_Type) Limit(limit int) Network_Storage_Iscsi_OS_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Iscsi_OS_Type) Offset(offset int) Network_Storage_Iscsi_OS_Type { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all iSCSI OS Types. +func (r Network_Storage_Iscsi_OS_Type) GetAllObjects() (resp []datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi_OS_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi_OS_Type) GetObject() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi_OS_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_MassDataMigration_CrossRegion_Country_Xref struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageMassDataMigrationCrossRegionCountryXrefService returns an instance of the Network_Storage_MassDataMigration_CrossRegion_Country_Xref SoftLayer service +func GetNetworkStorageMassDataMigrationCrossRegionCountryXrefService(sess *session.Session) Network_Storage_MassDataMigration_CrossRegion_Country_Xref { + return Network_Storage_MassDataMigration_CrossRegion_Country_Xref{Session: sess} +} + +func (r Network_Storage_MassDataMigration_CrossRegion_Country_Xref) Id(id int) Network_Storage_MassDataMigration_CrossRegion_Country_Xref { + r.Options.Id = &id + return r +} + +func (r Network_Storage_MassDataMigration_CrossRegion_Country_Xref) Mask(mask string) Network_Storage_MassDataMigration_CrossRegion_Country_Xref { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_MassDataMigration_CrossRegion_Country_Xref) Filter(filter string) Network_Storage_MassDataMigration_CrossRegion_Country_Xref { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_MassDataMigration_CrossRegion_Country_Xref) Limit(limit int) Network_Storage_MassDataMigration_CrossRegion_Country_Xref { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_MassDataMigration_CrossRegion_Country_Xref) Offset(offset int) Network_Storage_MassDataMigration_CrossRegion_Country_Xref { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Storage_MassDataMigration_CrossRegion_Country_Xref) GetAllObjects() (resp []datatypes.Network_Storage_MassDataMigration_CrossRegion_Country_Xref, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_CrossRegion_Country_Xref", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve SoftLayer_Locale_Country Id. +func (r Network_Storage_MassDataMigration_CrossRegion_Country_Xref) GetCountry() (resp datatypes.Locale_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_CrossRegion_Country_Xref", "getCountry", nil, &r.Options, &resp) + return +} + +// Retrieve Location Group ID of CleverSafe cross region. +func (r Network_Storage_MassDataMigration_CrossRegion_Country_Xref) GetLocationGroup() (resp datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_CrossRegion_Country_Xref", "getLocationGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_MassDataMigration_CrossRegion_Country_Xref) GetObject() (resp datatypes.Network_Storage_MassDataMigration_CrossRegion_Country_Xref, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_CrossRegion_Country_Xref", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Storage_MassDataMigration_Request data type contains information on a single Mass Data Migration request. Creation of these requests is limited to SoftLayer customers through the SoftLayer Customer Portal. +type Network_Storage_MassDataMigration_Request struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageMassDataMigrationRequestService returns an instance of the Network_Storage_MassDataMigration_Request SoftLayer service +func GetNetworkStorageMassDataMigrationRequestService(sess *session.Session) Network_Storage_MassDataMigration_Request { + return Network_Storage_MassDataMigration_Request{Session: sess} +} + +func (r Network_Storage_MassDataMigration_Request) Id(id int) Network_Storage_MassDataMigration_Request { + r.Options.Id = &id + return r +} + +func (r Network_Storage_MassDataMigration_Request) Mask(mask string) Network_Storage_MassDataMigration_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_MassDataMigration_Request) Filter(filter string) Network_Storage_MassDataMigration_Request { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_MassDataMigration_Request) Limit(limit int) Network_Storage_MassDataMigration_Request { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_MassDataMigration_Request) Offset(offset int) Network_Storage_MassDataMigration_Request { + r.Options.Offset = &offset + return r +} + +// Edit the properties of a Mass Data Migration request record by passing in a modified instance of a SoftLayer_Network_Storage_MassDataMigration_Request object. +func (r Network_Storage_MassDataMigration_Request) EditObject(templateObject *datatypes.Network_Storage_MassDataMigration_Request) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account to which the request belongs. +func (r Network_Storage_MassDataMigration_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The active tickets that are attached to the MDMS request. +func (r Network_Storage_MassDataMigration_Request) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The customer address where the device is shipped to. +func (r Network_Storage_MassDataMigration_Request) GetAddress() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getAddress", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_MassDataMigration_Request) GetAllObjects() (resp []datatypes.Network_Storage_MassDataMigration_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieves a list of all the possible statuses to which a request may be set. +func (r Network_Storage_MassDataMigration_Request) GetAllRequestStatuses() (resp []datatypes.Network_Storage_MassDataMigration_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getAllRequestStatuses", nil, &r.Options, &resp) + return +} + +// Retrieve An associated parent billing item which is active. Includes billing items which are scheduled to be cancelled in the future. +func (r Network_Storage_MassDataMigration_Request) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The employee user who created the request. +func (r Network_Storage_MassDataMigration_Request) GetCreateEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getCreateEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created the request. +func (r Network_Storage_MassDataMigration_Request) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The device configurations. +func (r Network_Storage_MassDataMigration_Request) GetDeviceConfiguration() (resp datatypes.Network_Storage_MassDataMigration_Request_DeviceConfiguration, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getDeviceConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve The key contacts for this requests. +func (r Network_Storage_MassDataMigration_Request) GetKeyContacts() (resp []datatypes.Network_Storage_MassDataMigration_Request_KeyContact, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getKeyContacts", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified the request. +func (r Network_Storage_MassDataMigration_Request) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified the request. +func (r Network_Storage_MassDataMigration_Request) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getModifyUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_MassDataMigration_Request) GetObject() (resp datatypes.Network_Storage_MassDataMigration_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Returns placeholder MDMS requests for any MDMS order pending approval. +func (r Network_Storage_MassDataMigration_Request) GetPendingRequests() (resp []datatypes.Network_Storage_MassDataMigration_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getPendingRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The shipments of the request. +func (r Network_Storage_MassDataMigration_Request) GetShipments() (resp []datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getShipments", nil, &r.Options, &resp) + return +} + +// Retrieve The status of the request. +func (r Network_Storage_MassDataMigration_Request) GetStatus() (resp datatypes.Network_Storage_MassDataMigration_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Ticket that is attached to this mass data migration request. +func (r Network_Storage_MassDataMigration_Request) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getTicket", nil, &r.Options, &resp) + return +} + +// Retrieve All tickets that are attached to the mass data migration request. +func (r Network_Storage_MassDataMigration_Request) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request", "getTickets", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Storage_MassDataMigration_Request_KeyContact data type contains name, email, and phone for key contact at customer location who will handle Mass Data Migration. +type Network_Storage_MassDataMigration_Request_KeyContact struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageMassDataMigrationRequestKeyContactService returns an instance of the Network_Storage_MassDataMigration_Request_KeyContact SoftLayer service +func GetNetworkStorageMassDataMigrationRequestKeyContactService(sess *session.Session) Network_Storage_MassDataMigration_Request_KeyContact { + return Network_Storage_MassDataMigration_Request_KeyContact{Session: sess} +} + +func (r Network_Storage_MassDataMigration_Request_KeyContact) Id(id int) Network_Storage_MassDataMigration_Request_KeyContact { + r.Options.Id = &id + return r +} + +func (r Network_Storage_MassDataMigration_Request_KeyContact) Mask(mask string) Network_Storage_MassDataMigration_Request_KeyContact { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_MassDataMigration_Request_KeyContact) Filter(filter string) Network_Storage_MassDataMigration_Request_KeyContact { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_MassDataMigration_Request_KeyContact) Limit(limit int) Network_Storage_MassDataMigration_Request_KeyContact { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_MassDataMigration_Request_KeyContact) Offset(offset int) Network_Storage_MassDataMigration_Request_KeyContact { + r.Options.Offset = &offset + return r +} + +// Retrieve The request this key contact belongs to. +func (r Network_Storage_MassDataMigration_Request_KeyContact) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request_KeyContact", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_MassDataMigration_Request_KeyContact) GetObject() (resp datatypes.Network_Storage_MassDataMigration_Request_KeyContact, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request_KeyContact", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The request this key contact belongs to. +func (r Network_Storage_MassDataMigration_Request_KeyContact) GetRequest() (resp datatypes.Network_Storage_MassDataMigration_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request_KeyContact", "getRequest", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Storage_MassDataMigration_Request_Status data type contains general information relating to the statuses to which a Mass Data Migration Request may be set. +type Network_Storage_MassDataMigration_Request_Status struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageMassDataMigrationRequestStatusService returns an instance of the Network_Storage_MassDataMigration_Request_Status SoftLayer service +func GetNetworkStorageMassDataMigrationRequestStatusService(sess *session.Session) Network_Storage_MassDataMigration_Request_Status { + return Network_Storage_MassDataMigration_Request_Status{Session: sess} +} + +func (r Network_Storage_MassDataMigration_Request_Status) Id(id int) Network_Storage_MassDataMigration_Request_Status { + r.Options.Id = &id + return r +} + +func (r Network_Storage_MassDataMigration_Request_Status) Mask(mask string) Network_Storage_MassDataMigration_Request_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_MassDataMigration_Request_Status) Filter(filter string) Network_Storage_MassDataMigration_Request_Status { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_MassDataMigration_Request_Status) Limit(limit int) Network_Storage_MassDataMigration_Request_Status { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_MassDataMigration_Request_Status) Offset(offset int) Network_Storage_MassDataMigration_Request_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Storage_MassDataMigration_Request_Status) GetObject() (resp datatypes.Network_Storage_MassDataMigration_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_MassDataMigration_Request_Status", "getObject", nil, &r.Options, &resp) + return +} + +// Schedules can be created for select Storage services, such as iscsi. These schedules are used to perform various tasks such as scheduling snapshots or synchronizing replicants. +type Network_Storage_Schedule struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageScheduleService returns an instance of the Network_Storage_Schedule SoftLayer service +func GetNetworkStorageScheduleService(sess *session.Session) Network_Storage_Schedule { + return Network_Storage_Schedule{Session: sess} +} + +func (r Network_Storage_Schedule) Id(id int) Network_Storage_Schedule { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Schedule) Mask(mask string) Network_Storage_Schedule { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Schedule) Filter(filter string) Network_Storage_Schedule { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Schedule) Limit(limit int) Network_Storage_Schedule { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Schedule) Offset(offset int) Network_Storage_Schedule { + r.Options.Offset = &offset + return r +} + +// Create a nas volume schedule +func (r Network_Storage_Schedule) CreateObject(templateObject *datatypes.Network_Storage_Schedule) (resp datatypes.Network_Storage_Schedule, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "createObject", params, &r.Options, &resp) + return +} + +// Delete a network storage schedule. '''This cannot be undone.''' ''deleteObject'' returns Boolean ''true'' on successful deletion or ''false'' if it was unable to remove a schedule; +func (r Network_Storage_Schedule) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit a nas volume schedule +func (r Network_Storage_Schedule) EditObject(templateObject *datatypes.Network_Storage_Schedule) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The hour parameter of this schedule. +func (r Network_Storage_Schedule) GetDay() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getDay", nil, &r.Options, &resp) + return +} + +// Retrieve The day of the month parameter of this schedule. +func (r Network_Storage_Schedule) GetDayOfMonth() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getDayOfMonth", nil, &r.Options, &resp) + return +} + +// Retrieve The day of the week parameter of this schedule. +func (r Network_Storage_Schedule) GetDayOfWeek() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getDayOfWeek", nil, &r.Options, &resp) + return +} + +// Retrieve Events which have been created as the result of a schedule execution. +func (r Network_Storage_Schedule) GetEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The hour parameter of this schedule. +func (r Network_Storage_Schedule) GetHour() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getHour", nil, &r.Options, &resp) + return +} + +// Retrieve The minute parameter of this schedule. +func (r Network_Storage_Schedule) GetMinute() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getMinute", nil, &r.Options, &resp) + return +} + +// Retrieve The month of the year parameter of this schedule. +func (r Network_Storage_Schedule) GetMonthOfYear() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getMonthOfYear", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Schedule) GetObject() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The associated partnership for a schedule. +func (r Network_Storage_Schedule) GetPartnership() (resp datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getPartnership", nil, &r.Options, &resp) + return +} + +// Retrieve Properties used for configuration of a schedule. +func (r Network_Storage_Schedule) GetProperties() (resp []datatypes.Network_Storage_Schedule_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve Replica snapshots which have been created as the result of this schedule's execution. +func (r Network_Storage_Schedule) GetReplicaSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getReplicaSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieve The number of snapshots this schedule is configured to retain. +func (r Network_Storage_Schedule) GetRetentionCount() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getRetentionCount", nil, &r.Options, &resp) + return +} + +// Retrieve The minute parameter of this schedule. +func (r Network_Storage_Schedule) GetSecond() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getSecond", nil, &r.Options, &resp) + return +} + +// Retrieve Snapshots which have been created as the result of this schedule's execution. +func (r Network_Storage_Schedule) GetSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieve The type provides a standardized definition for a schedule. +func (r Network_Storage_Schedule) GetType() (resp datatypes.Network_Storage_Schedule_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The associated volume for a schedule. +func (r Network_Storage_Schedule) GetVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getVolume", nil, &r.Options, &resp) + return +} + +// A schedule property type is used to allow for a standardized method of defining network storage schedules. +type Network_Storage_Schedule_Property_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageSchedulePropertyTypeService returns an instance of the Network_Storage_Schedule_Property_Type SoftLayer service +func GetNetworkStorageSchedulePropertyTypeService(sess *session.Session) Network_Storage_Schedule_Property_Type { + return Network_Storage_Schedule_Property_Type{Session: sess} +} + +func (r Network_Storage_Schedule_Property_Type) Id(id int) Network_Storage_Schedule_Property_Type { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Schedule_Property_Type) Mask(mask string) Network_Storage_Schedule_Property_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Schedule_Property_Type) Filter(filter string) Network_Storage_Schedule_Property_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Schedule_Property_Type) Limit(limit int) Network_Storage_Schedule_Property_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Schedule_Property_Type) Offset(offset int) Network_Storage_Schedule_Property_Type { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all network storage schedule property types. +func (r Network_Storage_Schedule_Property_Type) GetAllObjects() (resp []datatypes.Network_Storage_Schedule_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule_Property_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Schedule_Property_Type) GetObject() (resp datatypes.Network_Storage_Schedule_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule_Property_Type", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Subnet data type contains general information relating to a single SoftLayer subnet. Personal information in this type such as names, addresses, and phone numbers are assigned to the account only and not to users belonging to the account. +type Network_Subnet struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetService returns an instance of the Network_Subnet SoftLayer service +func GetNetworkSubnetService(sess *session.Session) Network_Subnet { + return Network_Subnet{Session: sess} +} + +func (r Network_Subnet) Id(id int) Network_Subnet { + r.Options.Id = &id + return r +} + +func (r Network_Subnet) Mask(mask string) Network_Subnet { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet) Filter(filter string) Network_Subnet { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet) Limit(limit int) Network_Subnet { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet) Offset(offset int) Network_Subnet { + r.Options.Offset = &offset + return r +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Network_Subnet) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Network_Subnet) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Create the default PTR records for this subnet +func (r Network_Subnet) CreateReverseDomainRecords() (resp datatypes.Dns_Domain_Reverse, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "createReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// This function is used to create a new transaction to modify a subnet route. Routes are updated in one to two minutes depending on the number of transactions that are pending for a router. +// +// Usage of this function is restricted and may only be called from authorized accounts. It is not available for general API users without justification and consent from a SoftLayer representative. +func (r Network_Subnet) CreateSubnetRouteUpdateTransaction(newEndPointIpAddress *string) (resp bool, err error) { + params := []interface{}{ + newEndPointIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "createSubnetRouteUpdateTransaction", params, &r.Options, &resp) + return +} + +// This function is used to create a new SoftLayer SWIP transaction to register your RWHOIS data with ARIN. SWIP transactions can only be initiated on subnets that contain more than 8 IP addresses. +func (r Network_Subnet) CreateSwipTransaction() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "createSwipTransaction", nil, &r.Options, &resp) + return +} + +// Edit the note for this subnet. +func (r Network_Subnet) EditNote(note *string) (resp bool, err error) { + params := []interface{}{ + note, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "editNote", params, &r.Options, &resp) + return +} + +// Retrieve a list of a SoftLayer customer's subnets along with their SWIP transaction statuses. This is a shortcut method that combines the SoftLayer_Network_Subnet retrieval methods along with [[object masks]] to retrieve their subnets' associated SWIP transactions as well. +// +// This is a special function built for SoftLayer's use on the SWIP section of the customer portal, but may also be useful for API users looking for the same data. +func (r Network_Subnet) FindAllSubnetsAndActiveSwipTransactionStatus() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "findAllSubnetsAndActiveSwipTransactionStatus", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve If present, the active registration for this subnet. +func (r Network_Subnet) GetActiveRegistration() (resp datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getActiveRegistration", nil, &r.Options, &resp) + return +} + +// Retrieve All the swip transactions associated with a subnet that are still active. +func (r Network_Subnet) GetActiveSwipTransaction() (resp datatypes.Network_Subnet_Swip_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getActiveSwipTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a subnet. +func (r Network_Subnet) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve Identifier which distinguishes whether the subnet is public or private address space. +func (r Network_Subnet) GetAddressSpace() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAddressSpace", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this Subnet to Network Storage supporting access control lists. +func (r Network_Subnet) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Network_Subnet) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Network_Subnet) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Network_Subnet. +func (r Network_Subnet) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Network_Subnet. +func (r Network_Subnet) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The billing item for a subnet. +func (r Network_Subnet) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetBoundDescendants() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getBoundDescendants", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this subnet is associated with a router. Subnets that are not associated with a router cannot be routed. +func (r Network_Subnet) GetBoundRouterFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getBoundRouterFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetBoundRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getBoundRouters", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetChildren() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve The data center this subnet may be routed within. +func (r Network_Subnet) GetDatacenter() (resp datatypes.Location_Datacenter, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetDescendants() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getDescendants", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetDisplayLabel() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getDisplayLabel", nil, &r.Options, &resp) + return +} + +// Retrieve A static routed ip address +func (r Network_Subnet) GetEndPointIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getEndPointIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetGlobalIpRecord() (resp datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getGlobalIpRecord", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware using IP addresses on this subnet. +func (r Network_Subnet) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All the ip addresses associated with a subnet. +func (r Network_Subnet) GetIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The upstream network component firewall. +func (r Network_Subnet) GetNetworkComponentFirewall() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkComponentFirewall", nil, &r.Options, &resp) + return +} + +// Retrieve The Private Network identifier this subnet is within, if applicable. +func (r Network_Subnet) GetNetworkId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkId", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetNetworkProtectionAddresses() (resp []datatypes.Network_Protection_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkProtectionAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve IPSec network tunnels that have access to a private subnet. +func (r Network_Subnet) GetNetworkTunnelContexts() (resp []datatypes.Network_Tunnel_Module_Context, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkTunnelContexts", nil, &r.Options, &resp) + return +} + +// Retrieve The VLAN object that a subnet is associated with. +func (r Network_Subnet) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Subnet object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Subnet service. You can only retrieve the subnet whose vlan is associated with the account that you portal user is assigned to. +func (r Network_Subnet) GetObject() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The pod in which this subnet resides. +func (r Network_Subnet) GetPodName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getPodName", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetProtectedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getProtectedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve All registrations that have been created for this subnet. +func (r Network_Subnet) GetRegistrations() (resp []datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRegistrations", nil, &r.Options, &resp) + return +} + +// Retrieve The reverse DNS domain associated with this subnet. +func (r Network_Subnet) GetReverseDomain() (resp datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getReverseDomain", nil, &r.Options, &resp) + return +} + +// Retrieve all reverse DNS records associated with a subnet. +func (r Network_Subnet) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve An identifier of the role the subnet is within. Roles dictate how a subnet may be used. +func (r Network_Subnet) GetRoleKeyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoleKeyName", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the role the subnet is within. Roles dictate how a subnet may be used. +func (r Network_Subnet) GetRoleName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoleName", nil, &r.Options, &resp) + return +} + +// getRoutableEndpointAddresses retrieves valid routable endpoint addresses for a subnet. You may use any IP address in a portable subnet, but may not use the network identifier, gateway, or broadcast address for primary and secondary on VLAN subnets. +func (r Network_Subnet) GetRoutableEndpointIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoutableEndpointIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The identifier for the type of route then subnet is currently configured for. +func (r Network_Subnet) GetRoutingTypeKeyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoutingTypeKeyName", nil, &r.Options, &resp) + return +} + +// Retrieve The name for the type of route then subnet is currently configured for. +func (r Network_Subnet) GetRoutingTypeName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoutingTypeName", nil, &r.Options, &resp) + return +} + +// Retrieve the subnet associated with an IP address. You may only retrieve subnets assigned to your SoftLayer customer account. +func (r Network_Subnet) GetSubnetForIpAddress(ipAddress *string) (resp datatypes.Network_Subnet, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getSubnetForIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve All the swip transactions associated with a subnet. +func (r Network_Subnet) GetSwipTransaction() (resp []datatypes.Network_Subnet_Swip_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getSwipTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetUnboundDescendants() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getUnboundDescendants", nil, &r.Options, &resp) + return +} + +// Retrieve Provides the total number of utilized IP addresses on this subnet. The primary consumer of IP addresses are compute resources, which can consume more than one address. This value is only supported for primary subnet types. +func (r Network_Subnet) GetUtilizedIpAddressCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getUtilizedIpAddressCount", nil, &r.Options, &resp) + return +} + +// Retrieve The Virtual Servers using IP addresses on this subnet. +func (r Network_Subnet) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// This method is used to remove access to multiple SoftLayer_Network_Storage volumes +func (r Network_Subnet) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Subnet_IpAddress data type contains general information relating to a single SoftLayer IPv4 address. +type Network_Subnet_IpAddress struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetIpAddressService returns an instance of the Network_Subnet_IpAddress SoftLayer service +func GetNetworkSubnetIpAddressService(sess *session.Session) Network_Subnet_IpAddress { + return Network_Subnet_IpAddress{Session: sess} +} + +func (r Network_Subnet_IpAddress) Id(id int) Network_Subnet_IpAddress { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_IpAddress) Mask(mask string) Network_Subnet_IpAddress { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_IpAddress) Filter(filter string) Network_Subnet_IpAddress { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_IpAddress) Limit(limit int) Network_Subnet_IpAddress { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_IpAddress) Offset(offset int) Network_Subnet_IpAddress { + r.Options.Offset = &offset + return r +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Network_Subnet_IpAddress) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Network_Subnet_IpAddress) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Edit a subnet IP address. +func (r Network_Subnet_IpAddress) EditObject(templateObject *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "editObject", params, &r.Options, &resp) + return +} + +// This function is used to edit multiple objects at the same time. +func (r Network_Subnet_IpAddress) EditObjects(templateObjects []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "editObjects", params, &r.Options, &resp) + return +} + +// Search for an IP address record by IPv4 address. +func (r Network_Subnet_IpAddress) FindByIpv4Address(ipAddress *string) (resp datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "findByIpv4Address", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this IP Address to Network Storage supporting access control lists. +func (r Network_Subnet_IpAddress) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Network_Subnet_IpAddress) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Network_Subnet_IpAddress) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve The application delivery controller using this address. +func (r Network_Subnet_IpAddress) GetApplicationDeliveryController() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getApplicationDeliveryController", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Network_Subnet_IpAddress. +func (r Network_Subnet_IpAddress) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Network_Subnet_IpAddress. +func (r Network_Subnet_IpAddress) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Search for an IP address record by IP address. +func (r Network_Subnet_IpAddress) GetByIpAddress(ipAddress *string) (resp datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve An IPSec network tunnel's address translations. These translations use a SoftLayer ip address from an assigned static NAT subnet to deliver the packets to the remote (customer) destination. +func (r Network_Subnet_IpAddress) GetContextTunnelTranslations() (resp []datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getContextTunnelTranslations", nil, &r.Options, &resp) + return +} + +// Retrieve All the subnets routed to an IP address. +func (r Network_Subnet_IpAddress) GetEndpointSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getEndpointSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve A network component that is statically routed to an IP address. +func (r Network_Subnet_IpAddress) GetGuestNetworkComponent() (resp datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getGuestNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve A network component that is statically routed to an IP address. +func (r Network_Subnet_IpAddress) GetGuestNetworkComponentBinding() (resp datatypes.Virtual_Guest_Network_Component_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getGuestNetworkComponentBinding", nil, &r.Options, &resp) + return +} + +// Retrieve A server that this IP address is routed to. +func (r Network_Subnet_IpAddress) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve A network component that is statically routed to an IP address. +func (r Network_Subnet_IpAddress) GetNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getNetworkComponent", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Subnet_IpAddress object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Subnet_IpAddress service. You can only retrieve the IP address whose subnet is associated with a VLAN that is associated with the account that your portal user is assigned to. +func (r Network_Subnet_IpAddress) GetObject() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The network gateway appliance using this address as the private IP address. +func (r Network_Subnet_IpAddress) GetPrivateNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getPrivateNetworkGateway", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet_IpAddress) GetProtectionAddress() (resp []datatypes.Network_Protection_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getProtectionAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The network gateway appliance using this address as the public IP address. +func (r Network_Subnet_IpAddress) GetPublicNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getPublicNetworkGateway", nil, &r.Options, &resp) + return +} + +// Retrieve An IPMI-based management network component of the IP address. +func (r Network_Subnet_IpAddress) GetRemoteManagementNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getRemoteManagementNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve An IP address' associated subnet. +func (r Network_Subnet_IpAddress) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getSubnet", nil, &r.Options, &resp) + return +} + +// Retrieve All events for this IP address stored in the datacenter syslogs from the last 24 hours +func (r Network_Subnet_IpAddress) GetSyslogEventsOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getSyslogEventsOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve All events for this IP address stored in the datacenter syslogs from the last 7 days +func (r Network_Subnet_IpAddress) GetSyslogEventsSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getSyslogEventsSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by destination port, for the last 24 hours +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsByDestinationPortOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsByDestinationPortOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by destination port, for the last 7 days +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsByDestinationPortSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsByDestinationPortSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source port, for the last 24 hours +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsByProtocolsOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsByProtocolsOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source port, for the last 7 days +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsByProtocolsSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsByProtocolsSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source ip address, for the last 24 hours +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsBySourceIpOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsBySourceIpOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source ip address, for the last 7 days +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsBySourceIpSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsBySourceIpSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source port, for the last 24 hours +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsBySourcePortOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsBySourcePortOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source port, for the last 7 days +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsBySourcePortSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsBySourcePortSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual guest that this IP address is routed to. +func (r Network_Subnet_IpAddress) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual licenses allocated for an IP Address. +func (r Network_Subnet_IpAddress) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// This method is used to remove access to multiple SoftLayer_Network_Storage volumes +func (r Network_Subnet_IpAddress) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Subnet_IpAddress_Global struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetIpAddressGlobalService returns an instance of the Network_Subnet_IpAddress_Global SoftLayer service +func GetNetworkSubnetIpAddressGlobalService(sess *session.Session) Network_Subnet_IpAddress_Global { + return Network_Subnet_IpAddress_Global{Session: sess} +} + +func (r Network_Subnet_IpAddress_Global) Id(id int) Network_Subnet_IpAddress_Global { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_IpAddress_Global) Mask(mask string) Network_Subnet_IpAddress_Global { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_IpAddress_Global) Filter(filter string) Network_Subnet_IpAddress_Global { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_IpAddress_Global) Limit(limit int) Network_Subnet_IpAddress_Global { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_IpAddress_Global) Offset(offset int) Network_Subnet_IpAddress_Global { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Subnet_IpAddress_Global) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The active transaction associated with this Global IP. +func (r Network_Subnet_IpAddress_Global) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for this Global IP. +func (r Network_Subnet_IpAddress_Global) GetBillingItem() (resp datatypes.Billing_Item_Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet_IpAddress_Global) GetDestinationIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getDestinationIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet_IpAddress_Global) GetIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getIpAddress", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Subnet_IpAddress_Global) GetObject() (resp datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getObject", nil, &r.Options, &resp) + return +} + +// This function is used to create a new transaction to modify a global IP route. Routes are updated in one to two minutes depending on the number of transactions that are pending for a router. +func (r Network_Subnet_IpAddress_Global) Route(newEndPointIpAddress *string) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + newEndPointIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "route", params, &r.Options, &resp) + return +} + +// This function is used to create a new transaction to unroute a global IP address. Routes are updated in one to two minutes depending on the number of transactions that are pending for a router. +func (r Network_Subnet_IpAddress_Global) Unroute() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "unroute", nil, &r.Options, &resp) + return +} + +// The subnet registration data type contains general information relating to a single subnet registration instance. These registration instances can be updated to reflect changes, and will record the changes in the [[SoftLayer_Network_Subnet_Registration_Event|events]]. +type Network_Subnet_Registration struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetRegistrationService returns an instance of the Network_Subnet_Registration SoftLayer service +func GetNetworkSubnetRegistrationService(sess *session.Session) Network_Subnet_Registration { + return Network_Subnet_Registration{Session: sess} +} + +func (r Network_Subnet_Registration) Id(id int) Network_Subnet_Registration { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Registration) Mask(mask string) Network_Subnet_Registration { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Registration) Filter(filter string) Network_Subnet_Registration { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Registration) Limit(limit int) Network_Subnet_Registration { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Registration) Offset(offset int) Network_Subnet_Registration { + r.Options.Offset = &offset + return r +} + +// This method will initiate the removal of a subnet registration. +func (r Network_Subnet_Registration) ClearRegistration() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "clearRegistration", nil, &r.Options, &resp) + return +} + +// This method will create a new SoftLayer_Network_Subnet_Registration object. +// +// Input - [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]]
    • networkIdentifier
      The base address of the [[SoftLayer_Network_Subnet|subnet]] being registered. This can be derived directly from the SoftLayer_Network_Subnet object's networkIdentifier property.
      • Required
      • Type - string
    • cidr
      The CIDR prefix of the [[SoftLayer_Network_Subnet|subnet]] being registered. This can be derived directly from the SoftLayer_Network_Subnet object's cidr property.
      • Required
      • Type - integer
    +func (r Network_Subnet_Registration) CreateObject(templateObject *datatypes.Network_Subnet_Registration) (resp datatypes.Network_Subnet_Registration, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "createObject", params, &r.Options, &resp) + return +} + +// This method will edit an existing SoftLayer_Network_Subnet_Registration object. For more detail, see [[SoftLayer_Network_Subnet_Registration::createObject|createObject]]. +func (r Network_Subnet_Registration) EditObject(templateObject *datatypes.Network_Subnet_Registration) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "editObject", params, &r.Options, &resp) + return +} + +// This method modifies a single registration by modifying the current [[SoftLayer_Network_Subnet_Registration_Details]] objects that are linked to that registration. +func (r Network_Subnet_Registration) EditRegistrationAttachedDetails(personObjectSkeleton *datatypes.Network_Subnet_Registration_Details, networkObjectSkeleton *datatypes.Network_Subnet_Registration_Details) (resp bool, err error) { + params := []interface{}{ + personObjectSkeleton, + networkObjectSkeleton, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "editRegistrationAttachedDetails", params, &r.Options, &resp) + return +} + +// Retrieve The account that this registration belongs to. +func (r Network_Subnet_Registration) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The cross-reference records that tie the [[SoftLayer_Account_Regional_Registry_Detail]] objects to the registration object. +func (r Network_Subnet_Registration) GetDetailReferences() (resp []datatypes.Network_Subnet_Registration_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getDetailReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The related registration events. +func (r Network_Subnet_Registration) GetEvents() (resp []datatypes.Network_Subnet_Registration_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The "network" detail object. +func (r Network_Subnet_Registration) GetNetworkDetail() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getNetworkDetail", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Subnet_Registration) GetObject() (resp datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The "person" detail object. +func (r Network_Subnet_Registration) GetPersonDetail() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getPersonDetail", nil, &r.Options, &resp) + return +} + +// Retrieve The related Regional Internet Registry. +func (r Network_Subnet_Registration) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve The RIR handle that this registration object belongs to. This field may not be populated until the registration is complete. +func (r Network_Subnet_Registration) GetRegionalInternetRegistryHandle() (resp datatypes.Account_Rwhois_Handle, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getRegionalInternetRegistryHandle", nil, &r.Options, &resp) + return +} + +// Retrieve The status of this registration. +func (r Network_Subnet_Registration) GetStatus() (resp datatypes.Network_Subnet_Registration_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The subnet that this registration pertains to. +func (r Network_Subnet_Registration) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getSubnet", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Subnet_Registration_Details objects are used to relate [[SoftLayer_Account_Regional_Registry_Detail]] objects to a [[SoftLayer_Network_Subnet_Registration]] object. This allows for easy reuse of registration details. It is important to note that only one detail object per type may be associated to a registration object. +type Network_Subnet_Registration_Details struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetRegistrationDetailsService returns an instance of the Network_Subnet_Registration_Details SoftLayer service +func GetNetworkSubnetRegistrationDetailsService(sess *session.Session) Network_Subnet_Registration_Details { + return Network_Subnet_Registration_Details{Session: sess} +} + +func (r Network_Subnet_Registration_Details) Id(id int) Network_Subnet_Registration_Details { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Registration_Details) Mask(mask string) Network_Subnet_Registration_Details { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Registration_Details) Filter(filter string) Network_Subnet_Registration_Details { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Registration_Details) Limit(limit int) Network_Subnet_Registration_Details { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Registration_Details) Offset(offset int) Network_Subnet_Registration_Details { + r.Options.Offset = &offset + return r +} + +// This method will create a new SoftLayer_Network_Subnet_Registration_Details object. +// +// Input - [[SoftLayer_Network_Subnet_Registration_Details (type)|SoftLayer_Network_Subnet_Registration_Details]]
    • detailId
      The numeric ID of the [[SoftLayer_Account_Regional_Registry_Detail|detail]] object to relate.
      • Required
      • Type - integer
    • registrationId
      The numeric ID of the [[SoftLayer_Network_Subnet_Registration|registration]] object to relate.
      • Required
      • Type - integer
    +func (r Network_Subnet_Registration_Details) CreateObject(templateObject *datatypes.Network_Subnet_Registration_Details) (resp datatypes.Network_Subnet_Registration_Details, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "createObject", params, &r.Options, &resp) + return +} + +// This method will delete an existing SoftLayer_Account_Regional_Registry_Detail object. +func (r Network_Subnet_Registration_Details) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The related [[SoftLayer_Account_Regional_Registry_Detail|detail object]]. +func (r Network_Subnet_Registration_Details) GetDetail() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "getDetail", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Subnet_Registration_Details) GetObject() (resp datatypes.Network_Subnet_Registration_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The related [[SoftLayer_Network_Subnet_Registration|registration object]]. +func (r Network_Subnet_Registration_Details) GetRegistration() (resp datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "getRegistration", nil, &r.Options, &resp) + return +} + +// Subnet Registration Status objects describe the current status of a subnet registration. +// +// The standard values for these objects are as follows:
    • OPEN - Indicates that the registration object is new and has yet to be submitted to the RIR
    • PENDING - Indicates that the registration object has been submitted to the RIR and is awaiting response
    • COMPLETE - Indicates that the RIR action has completed
    • DELETED - Indicates that the registration object has been gracefully removed is no longer valid
    • CANCELLED - Indicates that the registration object has been abruptly removed is no longer valid
    +type Network_Subnet_Registration_Status struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetRegistrationStatusService returns an instance of the Network_Subnet_Registration_Status SoftLayer service +func GetNetworkSubnetRegistrationStatusService(sess *session.Session) Network_Subnet_Registration_Status { + return Network_Subnet_Registration_Status{Session: sess} +} + +func (r Network_Subnet_Registration_Status) Id(id int) Network_Subnet_Registration_Status { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Registration_Status) Mask(mask string) Network_Subnet_Registration_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Registration_Status) Filter(filter string) Network_Subnet_Registration_Status { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Registration_Status) Limit(limit int) Network_Subnet_Registration_Status { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Registration_Status) Offset(offset int) Network_Subnet_Registration_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Subnet_Registration_Status) GetAllObjects() (resp []datatypes.Network_Subnet_Registration_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Status", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Subnet_Registration_Status) GetObject() (resp datatypes.Network_Subnet_Registration_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Status", "getObject", nil, &r.Options, &resp) + return +} + +// Every SoftLayer customer account has contact information associated with it for reverse WHOIS purposes. An account's RWHOIS data, modeled by the SoftLayer_Network_Subnet_Rwhois_Data data type, is used by SoftLayer's reverse WHOIS server as well as for SWIP transactions. SoftLayer's reverse WHOIS servers respond to WHOIS queries for IP addresses belonging to a customer's servers, returning this RWHOIS data. +// +// A SoftLayer customer's RWHOIS data may not necessarily match their account or portal users' contact information. +type Network_Subnet_Rwhois_Data struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetRwhoisDataService returns an instance of the Network_Subnet_Rwhois_Data SoftLayer service +func GetNetworkSubnetRwhoisDataService(sess *session.Session) Network_Subnet_Rwhois_Data { + return Network_Subnet_Rwhois_Data{Session: sess} +} + +func (r Network_Subnet_Rwhois_Data) Id(id int) Network_Subnet_Rwhois_Data { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Rwhois_Data) Mask(mask string) Network_Subnet_Rwhois_Data { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Rwhois_Data) Filter(filter string) Network_Subnet_Rwhois_Data { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Rwhois_Data) Limit(limit int) Network_Subnet_Rwhois_Data { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Rwhois_Data) Offset(offset int) Network_Subnet_Rwhois_Data { + r.Options.Offset = &offset + return r +} + +// Edit the RWHOIS record by passing in a modified version of the record object. All fields are editable. +func (r Network_Subnet_Rwhois_Data) EditObject(templateObject *datatypes.Network_Subnet_Rwhois_Data) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Rwhois_Data", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account associated with this reverse WHOIS data. +func (r Network_Subnet_Rwhois_Data) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Rwhois_Data", "getAccount", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Subnet_Rwhois_Data object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Subnet_Rwhois_Data service. +// +// The best way to get Rwhois Data for an account is through getRhwoisData on the Account service. +func (r Network_Subnet_Rwhois_Data) GetObject() (resp datatypes.Network_Subnet_Rwhois_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Rwhois_Data", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Subnet_Swip_Transaction data type contains basic information tracked at SoftLayer to allow automation of Swip creation, update, and removal requests. A specific transaction is attached to an accountId and a subnetId. This also contains a "Status Name" which tells the customer what the transaction is doing: +// +// +// * REQUEST QUEUED: Request is queued up to be sent to ARIN +// * REQUEST SENT: The email request has been sent to ARIN +// * REQUEST CONFIRMED: ARIN has confirmed that the request is good, and should be available in 24 hours +// * OK: The subnet has been checked with WHOIS and it the SWIP transaction has completed correctly +// * REMOVE QUEUED: A subnet is queued to be removed from ARIN's systems +// * REMOVE SENT: The removal email request has been sent to ARIN +// * REMOVE CONFIRMED: ARIN has confirmed that the removal request is good, and the subnet should be clear in WHOIS in 24 hours +// * DELETED: This specific SWIP Transaction has been removed from ARIN and is no longer in effect +// * SOFTLAYER MANUALLY PROCESSING: Sometimes a request doesn't go through correctly and has to be manually processed by SoftLayer. This may take some time. +type Network_Subnet_Swip_Transaction struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetSwipTransactionService returns an instance of the Network_Subnet_Swip_Transaction SoftLayer service +func GetNetworkSubnetSwipTransactionService(sess *session.Session) Network_Subnet_Swip_Transaction { + return Network_Subnet_Swip_Transaction{Session: sess} +} + +func (r Network_Subnet_Swip_Transaction) Id(id int) Network_Subnet_Swip_Transaction { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Swip_Transaction) Mask(mask string) Network_Subnet_Swip_Transaction { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Swip_Transaction) Filter(filter string) Network_Subnet_Swip_Transaction { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Swip_Transaction) Limit(limit int) Network_Subnet_Swip_Transaction { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Swip_Transaction) Offset(offset int) Network_Subnet_Swip_Transaction { + r.Options.Offset = &offset + return r +} + +// This function will return an array of SoftLayer_Network_Subnet_Swip_Transaction objects, one for each SWIP that is currently in transaction with ARIN. This includes all swip registrations, swip removal requests, and SWIP objects that are currently OK. +func (r Network_Subnet_Swip_Transaction) FindMyTransactions() (resp []datatypes.Network_Subnet_Swip_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "findMyTransactions", nil, &r.Options, &resp) + return +} + +// Retrieve The Account whose RWHOIS data was used to SWIP this subnet +func (r Network_Subnet_Swip_Transaction) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "getAccount", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Subnet_Swip_Transaction object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Subnet_Swip_transaction service. You can only retrieve Swip transactions tied to the account. +func (r Network_Subnet_Swip_Transaction) GetObject() (resp datatypes.Network_Subnet_Swip_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The subnet that this SWIP transaction was created for. +func (r Network_Subnet_Swip_Transaction) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "getSubnet", nil, &r.Options, &resp) + return +} + +// This method finds all subnets attached to your account that are in OK status and starts "DELETE" transactions with ARIN, allowing you to remove your SWIP registration information. +func (r Network_Subnet_Swip_Transaction) RemoveAllSubnetSwips() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "removeAllSubnetSwips", nil, &r.Options, &resp) + return +} + +// This function, when called on an instantiated SWIP transaction, will allow you to start a "DELETE" transaction with ARIN, allowing you to remove your SWIP registration information. +func (r Network_Subnet_Swip_Transaction) RemoveSwipData() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "removeSwipData", nil, &r.Options, &resp) + return +} + +// This function will allow you to update ARIN's registration data for a subnet to your current RWHOIS data. +func (r Network_Subnet_Swip_Transaction) ResendSwipData() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "resendSwipData", nil, &r.Options, &resp) + return +} + +// swipAllSubnets finds all subnets attached to your account and attempts to create a SWIP transaction for all subnets that do not already have a SWIP transaction in progress. +func (r Network_Subnet_Swip_Transaction) SwipAllSubnets() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "swipAllSubnets", nil, &r.Options, &resp) + return +} + +// This method finds all subnets attached to your account that are in "OK" status and updates their data with ARIN. Use this function after you have updated your RWHOIS data if you want to keep SWIP up to date. +func (r Network_Subnet_Swip_Transaction) UpdateAllSubnetSwips() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "updateAllSubnetSwips", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_TippingPointReporting struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkTippingPointReportingService returns an instance of the Network_TippingPointReporting SoftLayer service +func GetNetworkTippingPointReportingService(sess *session.Session) Network_TippingPointReporting { + return Network_TippingPointReporting{Session: sess} +} + +func (r Network_TippingPointReporting) Id(id int) Network_TippingPointReporting { + r.Options.Id = &id + return r +} + +func (r Network_TippingPointReporting) Mask(mask string) Network_TippingPointReporting { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_TippingPointReporting) Filter(filter string) Network_TippingPointReporting { + r.Options.Filter = filter + return r +} + +func (r Network_TippingPointReporting) Limit(limit int) Network_TippingPointReporting { + r.Options.Limit = &limit + return r +} + +func (r Network_TippingPointReporting) Offset(offset int) Network_TippingPointReporting { + r.Options.Offset = &offset + return r +} + +// This method, when given an attack signature ID (available in the return values of getReportForIpAddressOrSubnet and getSubnetReportForEntireAccount) and an IP Address and subnet mask, returns all attacks for that subnet in the specified time frame and direction. Once the results have been filtered, additional data is available, including starting and ending times for the attack, originating IP address and port, and destination IP address and port. +// +// CVE and Bugtraq information is not available at this level. +func (r Network_TippingPointReporting) DrillDownAttack(signatureId *string, IpAddress *string, subnetMask *int, timeFrame *int, direction *string) (resp datatypes.Container_Network_IntrusionProtection_SubnetReport, err error) { + params := []interface{}{ + signatureId, + IpAddress, + subnetMask, + timeFrame, + direction, + } + err = r.Session.DoRequest("SoftLayer_Network_TippingPointReporting", "drillDownAttack", params, &r.Options, &resp) + return +} + +// This method returns the attack statistics for the current user's account and for the entire SoftLayer network. These attacks are recorded and monitored at the entry point to the network, and represent attacks in both directions. +// +// The data returned is: +// * Top attacks (by attack name) on datacenter Dal01 in the last hour (and last 24 hours) +// * Top attacks (by attack name) on IPs you own in the last hour (and last 24 hours) +// * Top IPs attacking IPs you own in the last hour (and last 24 hours) +// Each one of these lists can contain any number of items, the default is 5. The usable limit is less than 10, but setting the limit to an abnormally high value will effectively return all records. +// +// The data is returned as a collection of SoftLayer_Container_Network_IntrusionProtection_Statistics objects. +func (r Network_TippingPointReporting) GetMainStatistics(numberOfAttacks *int) (resp []datatypes.Container_Network_IntrusionProtection_Statistics, err error) { + params := []interface{}{ + numberOfAttacks, + } + err = r.Session.DoRequest("SoftLayer_Network_TippingPointReporting", "getMainStatistics", params, &r.Options, &resp) + return +} + +// This method expands on the getSubnetReportForEntireAccount method by offering the ability to filter by subnet or IP address. This method is identical to getSubnetReportForEntireAccount, but allows filtering by subnet. Like in the getSubnetReportForEntireAccount method, CVE and BugTraq IDs are provided, if available. +// +// This method should be called once an attack has been identified using getSubnetReportForEntireAccount (in which case "All Subnets" is the subnet) or getReportForIpAddressOrSubnet. +func (r Network_TippingPointReporting) GetReportForIpAddressOrSubnet(IpAddress *string, subnetMask *int, timeFrame *int, orderBy *string, orderDirection *string) (resp []datatypes.Container_Network_IntrusionProtection_SubnetReport, err error) { + params := []interface{}{ + IpAddress, + subnetMask, + timeFrame, + orderBy, + orderDirection, + } + err = r.Session.DoRequest("SoftLayer_Network_TippingPointReporting", "getReportForIpAddressOrSubnet", params, &r.Options, &resp) + return +} + +// This method returns specific attacks by name for all subnets on the current user's account. +// +// The data returned is stored in SoftLayer_Container_Network_IntrusionProtection_SubnetReport objects, with the "subnet" value set to "All Subnets" +// +// The data is separated into "Inbound" and "Outbound" traffic. A significant amount of outbound attack traffic could indicate that your servers have been compromised. +// +// The data returned includes Attack Count, attack name, extended attack description, and IDs that correspond with the BugTraq or CVE databases. BugTraq can be accessed at [http://www.securityfocus.com/vulnerabilities] The CVE database is located at [http://cve.mitre.org/find/index.html] +// +// For more detailed information, use the getReportForIpAddressOrSubnet method +func (r Network_TippingPointReporting) GetSubnetReportForEntireAccount(timeFrame *int, orderBy *string, orderDirection *string, returnSubnetGroups *bool) (resp []datatypes.Container_Network_IntrusionProtection_SubnetReport, err error) { + params := []interface{}{ + timeFrame, + orderBy, + orderDirection, + returnSubnetGroups, + } + err = r.Session.DoRequest("SoftLayer_Network_TippingPointReporting", "getSubnetReportForEntireAccount", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Tunnel_Module_Context data type contains general information relating to a single SoftLayer network tunnel. The SoftLayer_Network_Tunnel_Module_Context is useful to gather information such as related customer subnets (remote) and internal subnets (local) associated with the network tunnel as well as other information needed to manage the network tunnel. Account and billing information related to the network tunnel can also be retrieved. +type Network_Tunnel_Module_Context struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkTunnelModuleContextService returns an instance of the Network_Tunnel_Module_Context SoftLayer service +func GetNetworkTunnelModuleContextService(sess *session.Session) Network_Tunnel_Module_Context { + return Network_Tunnel_Module_Context{Session: sess} +} + +func (r Network_Tunnel_Module_Context) Id(id int) Network_Tunnel_Module_Context { + r.Options.Id = &id + return r +} + +func (r Network_Tunnel_Module_Context) Mask(mask string) Network_Tunnel_Module_Context { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Tunnel_Module_Context) Filter(filter string) Network_Tunnel_Module_Context { + r.Options.Filter = filter + return r +} + +func (r Network_Tunnel_Module_Context) Limit(limit int) Network_Tunnel_Module_Context { + r.Options.Limit = &limit + return r +} + +func (r Network_Tunnel_Module_Context) Offset(offset int) Network_Tunnel_Module_Context { + r.Options.Offset = &offset + return r +} + +// Associates a remote subnet to the network tunnel. When a remote subnet is associated, a network tunnel will allow the customer (remote) network to communicate with the private and service subnets on the SoftLayer network which are on the other end of this network tunnel. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the association described above to take effect. +func (r Network_Tunnel_Module_Context) AddCustomerSubnetToNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "addCustomerSubnetToNetworkTunnel", params, &r.Options, &resp) + return +} + +// Associates a private subnet to the network tunnel. When a private subnet is associated, the network tunnel will allow the customer (remote) network to access the private subnet. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the association described above to take effect. +func (r Network_Tunnel_Module_Context) AddPrivateSubnetToNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "addPrivateSubnetToNetworkTunnel", params, &r.Options, &resp) + return +} + +// Associates a service subnet to the network tunnel. When a service subnet is associated, a network tunnel will allow the customer (remote) network to communicate with the private and service subnets on the SoftLayer network which are on the other end of this network tunnel. Service subnets provide access to SoftLayer services such as the customer management portal and the SoftLayer API. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the association described above to take effect. +func (r Network_Tunnel_Module_Context) AddServiceSubnetToNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "addServiceSubnetToNetworkTunnel", params, &r.Options, &resp) + return +} + +// A transaction will be created to apply the IPSec network tunnel's configuration to SoftLayer network devices. During this time, an IPSec network tunnel cannot be modified in anyway. Only one network tunnel configuration transaction can be created. If a transaction has been created or is running, a new transaction cannot be created until the previous transaction completes. +func (r Network_Tunnel_Module_Context) ApplyConfigurationsToDevice() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "applyConfigurationsToDevice", nil, &r.Options, &resp) + return +} + +// Create an address translation for a network tunnel. +// +// To create an address translation, ip addresses from an assigned /30 static route subnet are used. Address translations deliver packets to a destination ip address that is on a customer (remote) subnet. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for an address translation to be created. +func (r Network_Tunnel_Module_Context) CreateAddressTranslation(translation *datatypes.Network_Tunnel_Module_Context_Address_Translation) (resp datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + params := []interface{}{ + translation, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "createAddressTranslation", params, &r.Options, &resp) + return +} + +// This has the same functionality as the SoftLayer_Network_Tunnel_Module_Context::createAddressTranslation. However, it allows multiple translations to be passed in for creation. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the address translations to be created. +func (r Network_Tunnel_Module_Context) CreateAddressTranslations(translations []datatypes.Network_Tunnel_Module_Context_Address_Translation) (resp []datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + params := []interface{}{ + translations, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "createAddressTranslations", params, &r.Options, &resp) + return +} + +// Remove an existing address translation from a network tunnel. +// +// Address translations deliver packets to a destination ip address that is on a customer subnet (remote). +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for an address translation to be deleted. +func (r Network_Tunnel_Module_Context) DeleteAddressTranslation(translationId *int) (resp bool, err error) { + params := []interface{}{ + translationId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "deleteAddressTranslation", params, &r.Options, &resp) + return +} + +// Provides all of the address translation configurations for an IPSec VPN tunnel in a text file +func (r Network_Tunnel_Module_Context) DownloadAddressTranslationConfigurations() (resp datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "downloadAddressTranslationConfigurations", nil, &r.Options, &resp) + return +} + +// Provides all of the configurations for an IPSec VPN network tunnel in a text file +func (r Network_Tunnel_Module_Context) DownloadParameterConfigurations() (resp datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "downloadParameterConfigurations", nil, &r.Options, &resp) + return +} + +// Edit name, source (SoftLayer IP) ip address and/or destination (Customer IP) ip address for an existing address translation for a network tunnel. +// +// Address translations deliver packets to a destination ip address that is on a customer (remote) subnet. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for an address translation to be created. +func (r Network_Tunnel_Module_Context) EditAddressTranslation(translation *datatypes.Network_Tunnel_Module_Context_Address_Translation) (resp datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + params := []interface{}{ + translation, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "editAddressTranslation", params, &r.Options, &resp) + return +} + +// Edit name, source (SoftLayer IP) ip address and/or destination (Customer IP) ip address for existing address translations for a network tunnel. +// +// Address translations deliver packets to a destination ip address that is on a customer (remote) subnet. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for an address translation to be modified. +func (r Network_Tunnel_Module_Context) EditAddressTranslations(translations []datatypes.Network_Tunnel_Module_Context_Address_Translation) (resp []datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + params := []interface{}{ + translations, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "editAddressTranslations", params, &r.Options, &resp) + return +} + +// Negotiation parameters for both phases one and two are editable. Here are the phase one and two parameters that can modified: +// +// +// *Phase One +// **Authentication +// ***Default value is set to MD5. +// ***Valid Options are: MD5, SHA1, SHA256. +// **Encryption +// ***Default value is set to 3DES. +// ***Valid Options are: DES, 3DES, AES128, AES192, AES256. +// **Diffie-Hellman Group +// ***Default value is set to 2. +// ***Valid Options are: 0 (None), 1, 2, 5. +// **Keylife +// ***Default value is set to 3600. +// ***Limits are: MIN = 120, MAX = 172800 +// **Preshared Key +// *Phase Two +// **Authentication +// ***Default value is set to MD5. +// ***Valid Options are: MD5, SHA1, SHA256. +// **Encryption +// ***Default value is set to 3DES. +// ***Valid Options are: DES, 3DES, AES128, AES192, AES256. +// **Diffie-Hellman Group +// ***Default value is set to 2. +// ***Valid Options are: 0 (None), 1, 2, 5. +// **Keylife +// ***Default value is set to 28800. +// ***Limits are: MIN = 120, MAX = 172800 +// **Perfect Forward Secrecy +// ***Valid Options are: Off = 0, On = 1. +// ***NOTE: If perfect forward secrecy is turned On (set to 1), then a phase 2 diffie-hellman group is required. +// +// +// The remote peer address for the network tunnel may also be modified if needed. Invalid options will not be accepted and will cause an exception to be thrown. There are properties that provide valid options and limits for each negotiation parameter. Those properties are as follows: +// * encryptionDefault +// * encryptionOptions +// * authenticationDefault +// * authenticationOptions +// * diffieHellmanGroupDefault +// * diffieHellmanGroupOptions +// * phaseOneKeylifeDefault +// * phaseTwoKeylifeDefault +// * keylifeLimits +// +// +// Configurations cannot be modified if a network tunnel's requires complex manual setups/configuration modifications by the SoftLayer Network department. If the former is required, the configurations for the network tunnel will be locked until the manual configurations are complete. A network tunnel's configurations are applied via a transaction. If a network tunnel configuration change transaction is currently running, the network tunnel's setting cannot be modified until the running transaction completes. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the modifications made to take effect. +func (r Network_Tunnel_Module_Context) EditObject(templateObject *datatypes.Network_Tunnel_Module_Context) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account that a network tunnel belongs to. +func (r Network_Tunnel_Module_Context) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The transaction that is currently applying configurations for the network tunnel. +func (r Network_Tunnel_Module_Context) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// The address translations will be returned. All the translations will be formatted so that the configurations can be copied into a host file. +// +// Format: +// +// {address translation SoftLayer IP Address} {address translation name} +func (r Network_Tunnel_Module_Context) GetAddressTranslationConfigurations() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAddressTranslationConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve A network tunnel's address translations. +func (r Network_Tunnel_Module_Context) GetAddressTranslations() (resp []datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAddressTranslations", nil, &r.Options, &resp) + return +} + +// Retrieve Subnets that provide access to SoftLayer services such as the management portal and the SoftLayer API. +func (r Network_Tunnel_Module_Context) GetAllAvailableServiceSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAllAvailableServiceSubnets", nil, &r.Options, &resp) + return +} + +// The default authentication type used for both phases of the negotiation process. The default value is set to MD5. +func (r Network_Tunnel_Module_Context) GetAuthenticationDefault() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAuthenticationDefault", nil, &r.Options, &resp) + return +} + +// Authentication options available for both phases of the negotiation process. +// +// The authentication options are as follows: +// * MD5 +// * SHA1 +// * SHA256 +func (r Network_Tunnel_Module_Context) GetAuthenticationOptions() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAuthenticationOptions", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for network tunnel. +func (r Network_Tunnel_Module_Context) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve Remote subnets that are allowed access through a network tunnel. +func (r Network_Tunnel_Module_Context) GetCustomerSubnets() (resp []datatypes.Network_Customer_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getCustomerSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter location for one end of the network tunnel that allows access to account's private subnets. +func (r Network_Tunnel_Module_Context) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getDatacenter", nil, &r.Options, &resp) + return +} + +// The default Diffie-Hellman group used for both phases of the negotiation process. The default value is set to 2. +func (r Network_Tunnel_Module_Context) GetDiffieHellmanGroupDefault() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getDiffieHellmanGroupDefault", nil, &r.Options, &resp) + return +} + +// The Diffie-Hellman group options used for both phases of the negotiation process. +// +// The diffie-hellman group options are as follows: +// * 0 (None) +// * 1 +// * 2 +// * 5 +func (r Network_Tunnel_Module_Context) GetDiffieHellmanGroupOptions() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getDiffieHellmanGroupOptions", nil, &r.Options, &resp) + return +} + +// The default encryption type used for both phases of the negotiation process. The default value is set to 3DES. +func (r Network_Tunnel_Module_Context) GetEncryptionDefault() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getEncryptionDefault", nil, &r.Options, &resp) + return +} + +// Encryption options available for both phases of the negotiation process. +// +// The valid encryption options are as follows: +// * DES +// * 3DES +// * AES128 +// * AES192 +// * AES256 +func (r Network_Tunnel_Module_Context) GetEncryptionOptions() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getEncryptionOptions", nil, &r.Options, &resp) + return +} + +// Retrieve Private subnets that can be accessed through the network tunnel. +func (r Network_Tunnel_Module_Context) GetInternalSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getInternalSubnets", nil, &r.Options, &resp) + return +} + +// The keylife limits. Keylife max limit is set to 120. Keylife min limit is set to 172800. +func (r Network_Tunnel_Module_Context) GetKeylifeLimits() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getKeylifeLimits", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Tunnel_Module_Context object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Tunnel_Module_Context service. The IPSec network tunnel will be returned if it is associated with the account and the user has proper permission to manage network tunnels. +func (r Network_Tunnel_Module_Context) GetObject() (resp datatypes.Network_Tunnel_Module_Context, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getObject", nil, &r.Options, &resp) + return +} + +// All of the IPSec VPN tunnel's configurations will be returned. It will list all of phase one and two negotiation parameters. Both remote and local subnets will be provided as well. This is useful when the configurations need to be passed on to another team and/or company for internal network configuration. +func (r Network_Tunnel_Module_Context) GetParameterConfigurationsForCustomerView() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getParameterConfigurationsForCustomerView", nil, &r.Options, &resp) + return +} + +// The default phase 1 keylife used if a value is not provided. The default value is set to 3600. +func (r Network_Tunnel_Module_Context) GetPhaseOneKeylifeDefault() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getPhaseOneKeylifeDefault", nil, &r.Options, &resp) + return +} + +// The default phase 2 keylife used if a value is not provided. The default value is set to 28800. +func (r Network_Tunnel_Module_Context) GetPhaseTwoKeylifeDefault() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getPhaseTwoKeylifeDefault", nil, &r.Options, &resp) + return +} + +// Retrieve Service subnets that can be access through the network tunnel. +func (r Network_Tunnel_Module_Context) GetServiceSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getServiceSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Subnets used for a network tunnel's address translations. +func (r Network_Tunnel_Module_Context) GetStaticRouteSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getStaticRouteSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The transaction history for this network tunnel. +func (r Network_Tunnel_Module_Context) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Disassociate a customer subnet (remote) from a network tunnel. When a remote subnet is disassociated, that subnet will not able to communicate with private and service subnets on the SoftLayer network. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the disassociation described above to take effect. +func (r Network_Tunnel_Module_Context) RemoveCustomerSubnetFromNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "removeCustomerSubnetFromNetworkTunnel", params, &r.Options, &resp) + return +} + +// Disassociate a private subnet from a network tunnel. When a private subnet is disassociated, the customer (remote) subnet on the other end of the tunnel will not able to communicate with the private subnet that was just disassociated. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the disassociation described above to take effect. +func (r Network_Tunnel_Module_Context) RemovePrivateSubnetFromNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "removePrivateSubnetFromNetworkTunnel", params, &r.Options, &resp) + return +} + +// Disassociate a service subnet from a network tunnel. When a service subnet is disassociated, that customer (remote) subnet on the other end of the network tunnel will not able to communicate with that service subnet on the SoftLayer network. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the disassociation described above to take effect. +func (r Network_Tunnel_Module_Context) RemoveServiceSubnetFromNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "removeServiceSubnetFromNetworkTunnel", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Vlan data type models a single VLAN within SoftLayer's public and private networks. a Virtual LAN is a structure that associates network interfaces on routers, switches, and servers in different locations to act as if they were on the same local network broadcast domain. VLANs are a central part of the SoftLayer network. They can determine how new IP subnets are routed and how individual servers communicate to each other. +type Network_Vlan struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkVlanService returns an instance of the Network_Vlan SoftLayer service +func GetNetworkVlanService(sess *session.Session) Network_Vlan { + return Network_Vlan{Session: sess} +} + +func (r Network_Vlan) Id(id int) Network_Vlan { + r.Options.Id = &id + return r +} + +func (r Network_Vlan) Mask(mask string) Network_Vlan { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Vlan) Filter(filter string) Network_Vlan { + r.Options.Filter = filter + return r +} + +func (r Network_Vlan) Limit(limit int) Network_Vlan { + r.Options.Limit = &limit + return r +} + +func (r Network_Vlan) Offset(offset int) Network_Vlan { + r.Options.Offset = &offset + return r +} + +// Edit a VLAN's properties +func (r Network_Vlan) EditObject(templateObject *datatypes.Network_Vlan) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account associated with a VLAN. +func (r Network_Vlan) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A VLAN's additional primary subnets. These are used to extend the number of servers attached to the VLAN by adding more ip addresses to the primary IP address pool. +func (r Network_Vlan) GetAdditionalPrimarySubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAdditionalPrimarySubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway this VLAN is inside of. +func (r Network_Vlan) GetAttachedNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAttachedNetworkGateway", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this VLAN is inside a gateway. +func (r Network_Vlan) GetAttachedNetworkGatewayFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAttachedNetworkGatewayFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The inside VLAN record if this VLAN is inside a network gateway. +func (r Network_Vlan) GetAttachedNetworkGatewayVlan() (resp datatypes.Network_Gateway_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAttachedNetworkGatewayVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a network vlan. +func (r Network_Vlan) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Get a set of reasons why this VLAN may not be cancelled. If the result is empty, this VLAN may be cancelled. +func (r Network_Vlan) GetCancelFailureReasons() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getCancelFailureReasons", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a network vlan is on a Hardware Firewall (Dedicated). +func (r Network_Vlan) GetDedicatedFirewallFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getDedicatedFirewallFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The extension router that a VLAN is associated with. +func (r Network_Vlan) GetExtensionRouter() (resp datatypes.Hardware_Router, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getExtensionRouter", nil, &r.Options, &resp) + return +} + +// Retrieve A firewalled Vlan's network components. +func (r Network_Vlan) GetFirewallGuestNetworkComponents() (resp []datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallGuestNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A firewalled vlan's inbound/outbound interfaces. +func (r Network_Vlan) GetFirewallInterfaces() (resp []datatypes.Network_Firewall_Module_Context_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallInterfaces", nil, &r.Options, &resp) + return +} + +// Retrieve A firewalled Vlan's network components. +func (r Network_Vlan) GetFirewallNetworkComponents() (resp []datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallNetworkComponents", nil, &r.Options, &resp) + return +} + +// Get the IP addresses associated with this server that are protectable by a network component firewall. Note, this may not return all values for IPv6 subnets for this VLAN. Please use getFirewallProtectableSubnets to get all protectable subnets. +func (r Network_Vlan) GetFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallProtectableIpAddresses", nil, &r.Options, &resp) + return +} + +// Get the subnets associated with this server that are protectable by a network component firewall. +func (r Network_Vlan) GetFirewallProtectableSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallProtectableSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The currently running rule set of a firewalled VLAN. +func (r Network_Vlan) GetFirewallRules() (resp []datatypes.Network_Vlan_Firewall_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallRules", nil, &r.Options, &resp) + return +} + +// Retrieve The networking components that are connected to a VLAN. +func (r Network_Vlan) GetGuestNetworkComponents() (resp []datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getGuestNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve All of the hardware that exists on a VLAN. Hardware is associated with a VLAN by its networking components. +func (r Network_Vlan) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Vlan) GetHighAvailabilityFirewallFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getHighAvailabilityFirewallFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a vlan can be assigned to a host that has local disk functionality. +func (r Network_Vlan) GetLocalDiskStorageCapabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getLocalDiskStorageCapabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The network in which this VLAN resides. +func (r Network_Vlan) GetNetwork() (resp datatypes.Network, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetwork", nil, &r.Options, &resp) + return +} + +// Retrieve The network components that are connected to this VLAN through a trunk. +func (r Network_Vlan) GetNetworkComponentTrunks() (resp []datatypes.Network_Component_Network_Vlan_Trunk, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetworkComponentTrunks", nil, &r.Options, &resp) + return +} + +// Retrieve The networking components that are connected to a VLAN. +func (r Network_Vlan) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Identifier to denote whether a VLAN is used for public or private connectivity. +func (r Network_Vlan) GetNetworkSpace() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetworkSpace", nil, &r.Options, &resp) + return +} + +// Retrieve The Hardware Firewall (Dedicated) for a network vlan. +func (r Network_Vlan) GetNetworkVlanFirewall() (resp datatypes.Network_Vlan_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetworkVlanFirewall", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Vlan object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Vlan service. You can only retrieve VLANs that are associated with your SoftLayer customer account. +func (r Network_Vlan) GetObject() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The primary router that a VLAN is associated with. Every SoftLayer VLAN is connected to more than one router for greater network redundancy. +func (r Network_Vlan) GetPrimaryRouter() (resp datatypes.Hardware_Router, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrimaryRouter", nil, &r.Options, &resp) + return +} + +// Retrieve A VLAN's primary subnet. Each VLAN has at least one subnet, usually the subnet that is assigned to a server or new IP address block when it's purchased. +func (r Network_Vlan) GetPrimarySubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrimarySubnet", nil, &r.Options, &resp) + return +} + +// Retrieve A VLAN's primary IPv6 subnet. Some VLAN's may not have a primary IPv6 subnet. +func (r Network_Vlan) GetPrimarySubnetVersion6() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrimarySubnetVersion6", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Vlan) GetPrimarySubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrimarySubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The gateways this VLAN is the private VLAN of. +func (r Network_Vlan) GetPrivateNetworkGateways() (resp []datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrivateNetworkGateways", nil, &r.Options, &resp) + return +} + +// Retrieve a VLAN's associated private network VLAN. getPrivateVlan gathers it's information by retrieving the private VLAN of a VLAN's primary hardware object. +func (r Network_Vlan) GetPrivateVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrivateVlan", nil, &r.Options, &resp) + return +} + +// Retrieve the private network VLAN associated with an IP address. +func (r Network_Vlan) GetPrivateVlanByIpAddress(ipAddress *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrivateVlanByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Vlan) GetProtectedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getProtectedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The gateways this VLAN is the public VLAN of. +func (r Network_Vlan) GetPublicNetworkGateways() (resp []datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPublicNetworkGateways", nil, &r.Options, &resp) + return +} + +// Retrieve the VLAN that belongs to a server's public network interface, as described by a server's fully-qualified domain name. A server's ''FQDN'' is it's hostname, followed by a period then it's domain name. +func (r Network_Vlan) GetPublicVlanByFqdn(fqdn *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + fqdn, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPublicVlanByFqdn", params, &r.Options, &resp) + return +} + +// Retrieve all reverse DNS records associated with the subnets assigned to a VLAN. +func (r Network_Vlan) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a vlan can be assigned to a host that has SAN disk functionality. +func (r Network_Vlan) GetSanStorageCapabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getSanStorageCapabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale VLANs this VLAN applies to. +func (r Network_Vlan) GetScaleVlans() (resp []datatypes.Scale_Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getScaleVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The secondary router that a VLAN is associated with. Every SoftLayer VLAN is connected to more than one router for greater network redundancy. +func (r Network_Vlan) GetSecondaryRouter() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getSecondaryRouter", nil, &r.Options, &resp) + return +} + +// Retrieve The subnets that exist as secondary interfaces on a VLAN +func (r Network_Vlan) GetSecondarySubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getSecondarySubnets", nil, &r.Options, &resp) + return +} + +// Retrieve All of the subnets that exist as VLAN interfaces. +func (r Network_Vlan) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve References to all tags for this VLAN. +func (r Network_Vlan) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The number of primary IP addresses in a VLAN. +func (r Network_Vlan) GetTotalPrimaryIpAddressCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getTotalPrimaryIpAddressCount", nil, &r.Options, &resp) + return +} + +// Retrieve The type of this VLAN. +func (r Network_Vlan) GetType() (resp datatypes.Network_Vlan_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve All of the Virtual Servers that are connected to a VLAN. +func (r Network_Vlan) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve the VLAN associated with an IP address via the IP's associated subnet. +func (r Network_Vlan) GetVlanForIpAddress(ipAddress *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getVlanForIpAddress", params, &r.Options, &resp) + return +} + +// Tag a VLAN by passing in one or more tags separated by a comma. Tag references are cleared out every time this method is called. If your VLAN is already tagged you will need to pass the current tags along with any new ones. To remove all tag references pass an empty string. To remove one or more tags omit them from the tag list. +func (r Network_Vlan) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "setTags", params, &r.Options, &resp) + return +} + +// The '''getSensorData''' method updates a VLAN's firewall to allow or disallow intra-VLAN communication. +func (r Network_Vlan) UpdateFirewallIntraVlanCommunication(enabled *bool) (err error) { + var resp datatypes.Void + params := []interface{}{ + enabled, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "updateFirewallIntraVlanCommunication", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Vlan_Firewall data type contains general information relating to a single SoftLayer VLAN firewall. This is the object which ties the running rules to a specific downstream server. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Vlan_Firewall struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkVlanFirewallService returns an instance of the Network_Vlan_Firewall SoftLayer service +func GetNetworkVlanFirewallService(sess *session.Session) Network_Vlan_Firewall { + return Network_Vlan_Firewall{Session: sess} +} + +func (r Network_Vlan_Firewall) Id(id int) Network_Vlan_Firewall { + r.Options.Id = &id + return r +} + +func (r Network_Vlan_Firewall) Mask(mask string) Network_Vlan_Firewall { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Vlan_Firewall) Filter(filter string) Network_Vlan_Firewall { + r.Options.Filter = filter + return r +} + +func (r Network_Vlan_Firewall) Limit(limit int) Network_Vlan_Firewall { + r.Options.Limit = &limit + return r +} + +func (r Network_Vlan_Firewall) Offset(offset int) Network_Vlan_Firewall { + r.Options.Offset = &offset + return r +} + +// Approve a request from technical support to bypass the firewall. Once approved, support will be able to route and unroute the VLAN on the firewall. +func (r Network_Vlan_Firewall) ApproveBypassRequest() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "approveBypassRequest", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Vlan_Firewall) GetAccountId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getAccountId", nil, &r.Options, &resp) + return +} + +// Retrieve A firewall's allotted bandwidth (measured in GB). +func (r Network_Vlan_Firewall) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this firewall is attached to. +func (r Network_Vlan_Firewall) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw private bandwidth usage data for the current billing cycle. +func (r Network_Vlan_Firewall) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw public bandwidth usage data for the current billing cycle. +func (r Network_Vlan_Firewall) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a Hardware Firewall (Dedicated). +func (r Network_Vlan_Firewall) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve Administrative bypass request status. +func (r Network_Vlan_Firewall) GetBypassRequestStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getBypassRequestStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter that the firewall resides in. +func (r Network_Vlan_Firewall) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The firewall device type. +func (r Network_Vlan_Firewall) GetFirewallType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getFirewallType", nil, &r.Options, &resp) + return +} + +// Retrieve A name reflecting the hostname and domain of the firewall. This is created from the combined values of the firewall's logical name and vlan number automatically, and thus can not be edited directly. +func (r Network_Vlan_Firewall) GetFullyQualifiedDomainName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getFullyQualifiedDomainName", nil, &r.Options, &resp) + return +} + +// Retrieve The credentials to log in to a firewall device. This is only present for dedicated appliances. +func (r Network_Vlan_Firewall) GetManagementCredentials() (resp datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getManagementCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve A firewall's metric tracking object. +func (r Network_Vlan_Firewall) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object ID for this firewall. +func (r Network_Vlan_Firewall) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// Retrieve The update requests made for this firewall. +func (r Network_Vlan_Firewall) GetNetworkFirewallUpdateRequests() (resp []datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getNetworkFirewallUpdateRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway associated with this firewall, if any. +func (r Network_Vlan_Firewall) GetNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getNetworkGateway", nil, &r.Options, &resp) + return +} + +// Retrieve The VLAN object that a firewall is associated with and protecting. +func (r Network_Vlan_Firewall) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The VLAN objects that a firewall is associated with and protecting. +func (r Network_Vlan_Firewall) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Vlan_Firewall object. You can only get objects for vlans attached to your account that have a network firewall enabled. +func (r Network_Vlan_Firewall) GetObject() (resp datatypes.Network_Vlan_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The currently running rule set of this network component firewall. +func (r Network_Vlan_Firewall) GetRules() (resp []datatypes.Network_Vlan_Firewall_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getRules", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Vlan_Firewall) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve A firewall's associated upgrade request object, if any. +func (r Network_Vlan_Firewall) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Whether this firewall qualifies for High Availability upgrade. +func (r Network_Vlan_Firewall) IsHighAvailabilityUpgradeAvailable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "isHighAvailabilityUpgradeAvailable", nil, &r.Options, &resp) + return +} + +// Reject a request from technical support to bypass the firewall. Once rejected, IBM support will not be able to route and unroute the VLAN on the firewall. +func (r Network_Vlan_Firewall) RejectBypassRequest() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "rejectBypassRequest", nil, &r.Options, &resp) + return +} + +// This will completely reset the firewall to factory settings. If the firewall is not a dedicated appliance an error will occur. Note, this process is performed asynchronously. During the process all traffic will not be routed through the firewall. +func (r Network_Vlan_Firewall) RestoreDefaults() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "restoreDefaults", nil, &r.Options, &resp) + return +} + +// This method will associate a comma separated list of tags with this object. +func (r Network_Vlan_Firewall) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "setTags", params, &r.Options, &resp) + return +} + +// Enable or disable route bypass for this context. If enabled, this will bypass the firewall entirely and all traffic will be routed directly to the host(s) behind it. If disabled, traffic will flow through the firewall normally. This feature is only available for Hardware Firewall (Dedicated) and dedicated appliances. +func (r Network_Vlan_Firewall) UpdateRouteBypass(bypass *bool) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + bypass, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "updateRouteBypass", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Vlan_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkVlanTypeService returns an instance of the Network_Vlan_Type SoftLayer service +func GetNetworkVlanTypeService(sess *session.Session) Network_Vlan_Type { + return Network_Vlan_Type{Session: sess} +} + +func (r Network_Vlan_Type) Id(id int) Network_Vlan_Type { + r.Options.Id = &id + return r +} + +func (r Network_Vlan_Type) Mask(mask string) Network_Vlan_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Vlan_Type) Filter(filter string) Network_Vlan_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Vlan_Type) Limit(limit int) Network_Vlan_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Vlan_Type) Offset(offset int) Network_Vlan_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Vlan_Type) GetObject() (resp datatypes.Network_Vlan_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Type", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/notification.go b/vendor/github.com/softlayer/softlayer-go/services/notification.go new file mode 100644 index 000000000000..628ad256d57d --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/notification.go @@ -0,0 +1,892 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// Details provided for the notification are basic. Details such as the related preferences, name and keyname for the notification can be retrieved. The keyname property for the notification can be used to refer to a notification when integrating into the SoftLayer Notification system. The name property can used more for display purposes. +type Notification struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationService returns an instance of the Notification SoftLayer service +func GetNotificationService(sess *session.Session) Notification { + return Notification{Session: sess} +} + +func (r Notification) Id(id int) Notification { + r.Options.Id = &id + return r +} + +func (r Notification) Mask(mask string) Notification { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification) Filter(filter string) Notification { + r.Options.Filter = filter + return r +} + +func (r Notification) Limit(limit int) Notification { + r.Options.Limit = &limit + return r +} + +func (r Notification) Offset(offset int) Notification { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all active notifications that can be subscribed to. +func (r Notification) GetAllObjects() (resp []datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification) GetObject() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The preferences related to the notification. These are preferences are configurable and optional for subscribers to use. +func (r Notification) GetPreferences() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve The required preferences related to the notification. While configurable, the subscriber does not have the option whether to use the preference. +func (r Notification) GetRequiredPreferences() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification", "getRequiredPreferences", nil, &r.Options, &resp) + return +} + +// This is an extension of the SoftLayer_Notification class. These are implementation details specific to those notifications which can be subscribed to and received on a mobile device. +type Notification_Mobile struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationMobileService returns an instance of the Notification_Mobile SoftLayer service +func GetNotificationMobileService(sess *session.Session) Notification_Mobile { + return Notification_Mobile{Session: sess} +} + +func (r Notification_Mobile) Id(id int) Notification_Mobile { + r.Options.Id = &id + return r +} + +func (r Notification_Mobile) Mask(mask string) Notification_Mobile { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_Mobile) Filter(filter string) Notification_Mobile { + r.Options.Filter = filter + return r +} + +func (r Notification_Mobile) Limit(limit int) Notification_Mobile { + r.Options.Limit = &limit + return r +} + +func (r Notification_Mobile) Offset(offset int) Notification_Mobile { + r.Options.Offset = &offset + return r +} + +// Create a new subscriber for a given resource. +func (r Notification_Mobile) CreateSubscriberForMobileDevice(keyName *string, resourceTableId *int, userRecordId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + userRecordId, + } + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "createSubscriberForMobileDevice", params, &r.Options, &resp) + return +} + +// Use this method to retrieve all active notifications that can be subscribed to. +func (r Notification_Mobile) GetAllObjects() (resp []datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Mobile) GetObject() (resp datatypes.Notification_Mobile, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The preferences related to the notification. These are preferences are configurable and optional for subscribers to use. +func (r Notification_Mobile) GetPreferences() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve The required preferences related to the notification. While configurable, the subscriber does not have the option whether to use the preference. +func (r Notification_Mobile) GetRequiredPreferences() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "getRequiredPreferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Notification_Occurrence_Event struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationOccurrenceEventService returns an instance of the Notification_Occurrence_Event SoftLayer service +func GetNotificationOccurrenceEventService(sess *session.Session) Notification_Occurrence_Event { + return Notification_Occurrence_Event{Session: sess} +} + +func (r Notification_Occurrence_Event) Id(id int) Notification_Occurrence_Event { + r.Options.Id = &id + return r +} + +func (r Notification_Occurrence_Event) Mask(mask string) Notification_Occurrence_Event { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_Occurrence_Event) Filter(filter string) Notification_Occurrence_Event { + r.Options.Filter = filter + return r +} + +func (r Notification_Occurrence_Event) Limit(limit int) Notification_Occurrence_Event { + r.Options.Limit = &limit + return r +} + +func (r Notification_Occurrence_Event) Offset(offset int) Notification_Occurrence_Event { + r.Options.Offset = &offset + return r +} + +// <<<< EOT +func (r Notification_Occurrence_Event) AcknowledgeNotification() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "acknowledgeNotification", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether or not this event has been acknowledged by the user. +func (r Notification_Occurrence_Event) GetAcknowledgedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getAcknowledgedFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_Event) GetAllObjects() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve the contents of the file attached to a SoftLayer event by it's given identifier. +func (r Notification_Occurrence_Event) GetAttachedFile(attachmentId *int) (resp []byte, err error) { + params := []interface{}{ + attachmentId, + } + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getAttachedFile", params, &r.Options, &resp) + return +} + +// Retrieve A collection of attachments for this event which provide supplementary information to impacted users some examples are RFO (Reason For Outage) and root cause analysis documents. +func (r Notification_Occurrence_Event) GetAttachments() (resp []datatypes.Notification_Occurrence_Event_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getAttachments", nil, &r.Options, &resp) + return +} + +// Retrieve The first update for this event. +func (r Notification_Occurrence_Event) GetFirstUpdate() (resp datatypes.Notification_Occurrence_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getFirstUpdate", nil, &r.Options, &resp) + return +} + +// This method will return the number of impacted owned accounts associated with this event for the current user. +func (r Notification_Occurrence_Event) GetImpactedAccountCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedAccountCount", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of accounts impacted by this event. Each impacted account record relates directly to a [[SoftLayer_Account]]. +func (r Notification_Occurrence_Event) GetImpactedAccounts() (resp []datatypes.Notification_Occurrence_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedAccounts", nil, &r.Options, &resp) + return +} + +// This method will return the number of impacted devices associated with this event for the current user. +func (r Notification_Occurrence_Event) GetImpactedDeviceCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedDeviceCount", nil, &r.Options, &resp) + return +} + +// This method will return a collection of SoftLayer_Notification_Occurrence_Resource objects which is a listing of the current users' impacted devices that are associated with this event. +func (r Notification_Occurrence_Event) GetImpactedDevices() (resp []datatypes.Notification_Occurrence_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedDevices", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of resources impacted by this event. Each record will relate to some physical resource that the user has access to such as [[SoftLayer_Hardware]] or [[SoftLayer_Virtual_Guest]]. +func (r Notification_Occurrence_Event) GetImpactedResources() (resp []datatypes.Notification_Occurrence_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedResources", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of users impacted by this event. Each impacted user record relates directly to a [[SoftLayer_User_Customer]]. +func (r Notification_Occurrence_Event) GetImpactedUsers() (resp []datatypes.Notification_Occurrence_User, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedUsers", nil, &r.Options, &resp) + return +} + +// Retrieve The last update for this event. +func (r Notification_Occurrence_Event) GetLastUpdate() (resp datatypes.Notification_Occurrence_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getLastUpdate", nil, &r.Options, &resp) + return +} + +// Retrieve The type of event such as planned or unplanned maintenance. +func (r Notification_Occurrence_Event) GetNotificationOccurrenceEventType() (resp datatypes.Notification_Occurrence_Event_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getNotificationOccurrenceEventType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_Event) GetObject() (resp datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Notification_Occurrence_Event) GetStatusCode() (resp datatypes.Notification_Occurrence_Status_Code, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getStatusCode", nil, &r.Options, &resp) + return +} + +// Retrieve All updates for this event. +func (r Notification_Occurrence_Event) GetUpdates() (resp []datatypes.Notification_Occurrence_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getUpdates", nil, &r.Options, &resp) + return +} + +// This type contains general information relating to a user that may be impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_User struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationOccurrenceUserService returns an instance of the Notification_Occurrence_User SoftLayer service +func GetNotificationOccurrenceUserService(sess *session.Session) Notification_Occurrence_User { + return Notification_Occurrence_User{Session: sess} +} + +func (r Notification_Occurrence_User) Id(id int) Notification_Occurrence_User { + r.Options.Id = &id + return r +} + +func (r Notification_Occurrence_User) Mask(mask string) Notification_Occurrence_User { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_Occurrence_User) Filter(filter string) Notification_Occurrence_User { + r.Options.Filter = filter + return r +} + +func (r Notification_Occurrence_User) Limit(limit int) Notification_Occurrence_User { + r.Options.Limit = &limit + return r +} + +func (r Notification_Occurrence_User) Offset(offset int) Notification_Occurrence_User { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Notification_Occurrence_User) Acknowledge() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "acknowledge", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_User) GetAllObjects() (resp []datatypes.Notification_Occurrence_User, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_User) GetImpactedDeviceCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getImpactedDeviceCount", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of resources impacted by the associated event. +func (r Notification_Occurrence_User) GetImpactedResources() (resp []datatypes.Notification_Occurrence_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getImpactedResources", nil, &r.Options, &resp) + return +} + +// Retrieve The associated event. +func (r Notification_Occurrence_User) GetNotificationOccurrenceEvent() (resp datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getNotificationOccurrenceEvent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_User) GetObject() (resp datatypes.Notification_Occurrence_User, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The impacted user. +func (r Notification_Occurrence_User) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getUser", nil, &r.Options, &resp) + return +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationUserSubscriberService returns an instance of the Notification_User_Subscriber SoftLayer service +func GetNotificationUserSubscriberService(sess *session.Session) Notification_User_Subscriber { + return Notification_User_Subscriber{Session: sess} +} + +func (r Notification_User_Subscriber) Id(id int) Notification_User_Subscriber { + r.Options.Id = &id + return r +} + +func (r Notification_User_Subscriber) Mask(mask string) Notification_User_Subscriber { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_User_Subscriber) Filter(filter string) Notification_User_Subscriber { + r.Options.Filter = filter + return r +} + +func (r Notification_User_Subscriber) Limit(limit int) Notification_User_Subscriber { + r.Options.Limit = &limit + return r +} + +func (r Notification_User_Subscriber) Offset(offset int) Notification_User_Subscriber { + r.Options.Offset = &offset + return r +} + +// Use the method to create a new subscription for a notification. This method is the entry method to the notification system. Certain properties are required to create a subscription while others are optional. +// +// The required property is the resourceRecord property which is type SoftLayer_Notification_User_Subscriber_Resource. For the resourceRecord property, the only property that needs to be populated is the resourceTableId. The resourceTableId is the unique identifier of a SoftLayer service to create the subscription for. For example, the unique identifier of the Storage Evault service to create the subscription on. +// +// Optional properties that can be set is the preferences property. The preference property is an array SoftLayer_Notification_User_Subscriber_Preference. By default, the system will populate the preferences with the default values if no preferences are passed in. The preferences passed in must be the preferences related to the notification subscribing to. The notification preferences and preference details (such as minimum and maximum values) can be retrieved using the SoftLayer_Notification service. The properties that need to be populated for preferences are the notificationPreferenceId and value. +// +// For example to create a subscriber for a Storage EVault service to be notified 15 times during a billing cycle and to be notified when the vault usage reaches 85% of its allowed capacity use the following structure: +// +// +// *userRecordId = 1111 +// *notificationId = 3 +// *resourceRecord +// **resourceTableId = 1234 +// *preferences[1] +// **notificationPreferenceId = 2 +// **value = 85 +// *preference[2] +// **notificationPreferenceId = 3 +// **value = 15 +// +// +func (r Notification_User_Subscriber) CreateObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "createObject", params, &r.Options, &resp) + return +} + +// The subscriber's subscription status can be "turned off" or "turned on" if the subscription is not required. +// +// Subscriber preferences may also be edited. To edit the preferences, you must pass in the id off the preferences to edit. Here is an example of structure to pass in. In this example, the structure will set the subscriber status to active and the threshold preference to 90 and the limit preference to 20 +// +// +// *id = 1111 +// *active = 1 +// *preferences[1] +// **id = 11 +// **value = 90 +// *preference[2] +// **id = 12 +// **value = 20 +func (r Notification_User_Subscriber) EditObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The delivery methods used to send the subscribed notification. +func (r Notification_User_Subscriber) GetDeliveryMethods() (resp []datatypes.Notification_Delivery_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getDeliveryMethods", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscribed to. +func (r Notification_User_Subscriber) GetNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber) GetObject() (resp datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. +func (r Notification_User_Subscriber) GetPreferences() (resp []datatypes.Notification_User_Subscriber_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve Preference details such as description, minimum and maximum limits, default value and unit of measure. +func (r Notification_User_Subscriber) GetPreferencesDetails() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getPreferencesDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The subscriber id to resource id mapping. +func (r Notification_User_Subscriber) GetResourceRecord() (resp datatypes.Notification_User_Subscriber_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve User record for the subscription. +func (r Notification_User_Subscriber) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getUserRecord", nil, &r.Options, &resp) + return +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber_Billing struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationUserSubscriberBillingService returns an instance of the Notification_User_Subscriber_Billing SoftLayer service +func GetNotificationUserSubscriberBillingService(sess *session.Session) Notification_User_Subscriber_Billing { + return Notification_User_Subscriber_Billing{Session: sess} +} + +func (r Notification_User_Subscriber_Billing) Id(id int) Notification_User_Subscriber_Billing { + r.Options.Id = &id + return r +} + +func (r Notification_User_Subscriber_Billing) Mask(mask string) Notification_User_Subscriber_Billing { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_User_Subscriber_Billing) Filter(filter string) Notification_User_Subscriber_Billing { + r.Options.Filter = filter + return r +} + +func (r Notification_User_Subscriber_Billing) Limit(limit int) Notification_User_Subscriber_Billing { + r.Options.Limit = &limit + return r +} + +func (r Notification_User_Subscriber_Billing) Offset(offset int) Notification_User_Subscriber_Billing { + r.Options.Offset = &offset + return r +} + +// Use the method to create a new subscription for a notification. This method is the entry method to the notification system. Certain properties are required to create a subscription while others are optional. +// +// The required property is the resourceRecord property which is type SoftLayer_Notification_User_Subscriber_Resource. For the resourceRecord property, the only property that needs to be populated is the resourceTableId. The resourceTableId is the unique identifier of a SoftLayer service to create the subscription for. For example, the unique identifier of the Storage Evault service to create the subscription on. +// +// Optional properties that can be set is the preferences property. The preference property is an array SoftLayer_Notification_User_Subscriber_Preference. By default, the system will populate the preferences with the default values if no preferences are passed in. The preferences passed in must be the preferences related to the notification subscribing to. The notification preferences and preference details (such as minimum and maximum values) can be retrieved using the SoftLayer_Notification service. The properties that need to be populated for preferences are the notificationPreferenceId and value. +// +// For example to create a subscriber for a Storage EVault service to be notified 15 times during a billing cycle and to be notified when the vault usage reaches 85% of its allowed capacity use the following structure: +// +// +// *userRecordId = 1111 +// *notificationId = 3 +// *resourceRecord +// **resourceTableId = 1234 +// *preferences[1] +// **notificationPreferenceId = 2 +// **value = 85 +// *preference[2] +// **notificationPreferenceId = 3 +// **value = 15 +// +// +func (r Notification_User_Subscriber_Billing) CreateObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "createObject", params, &r.Options, &resp) + return +} + +// The subscriber's subscription status can be "turned off" or "turned on" if the subscription is not required. +// +// Subscriber preferences may also be edited. To edit the preferences, you must pass in the id off the preferences to edit. Here is an example of structure to pass in. In this example, the structure will set the subscriber status to active and the threshold preference to 90 and the limit preference to 20 +// +// +// *id = 1111 +// *active = 1 +// *preferences[1] +// **id = 11 +// **value = 90 +// *preference[2] +// **id = 12 +// **value = 20 +func (r Notification_User_Subscriber_Billing) EditObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The delivery methods used to send the subscribed notification. +func (r Notification_User_Subscriber_Billing) GetDeliveryMethods() (resp []datatypes.Notification_Delivery_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getDeliveryMethods", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscribed to. +func (r Notification_User_Subscriber_Billing) GetNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Billing) GetObject() (resp datatypes.Notification_User_Subscriber_Billing, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. +func (r Notification_User_Subscriber_Billing) GetPreferences() (resp []datatypes.Notification_User_Subscriber_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve Preference details such as description, minimum and maximum limits, default value and unit of measure. +func (r Notification_User_Subscriber_Billing) GetPreferencesDetails() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getPreferencesDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The subscriber id to resource id mapping. +func (r Notification_User_Subscriber_Billing) GetResourceRecord() (resp datatypes.Notification_User_Subscriber_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve User record for the subscription. +func (r Notification_User_Subscriber_Billing) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getUserRecord", nil, &r.Options, &resp) + return +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber_Mobile struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationUserSubscriberMobileService returns an instance of the Notification_User_Subscriber_Mobile SoftLayer service +func GetNotificationUserSubscriberMobileService(sess *session.Session) Notification_User_Subscriber_Mobile { + return Notification_User_Subscriber_Mobile{Session: sess} +} + +func (r Notification_User_Subscriber_Mobile) Id(id int) Notification_User_Subscriber_Mobile { + r.Options.Id = &id + return r +} + +func (r Notification_User_Subscriber_Mobile) Mask(mask string) Notification_User_Subscriber_Mobile { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_User_Subscriber_Mobile) Filter(filter string) Notification_User_Subscriber_Mobile { + r.Options.Filter = filter + return r +} + +func (r Notification_User_Subscriber_Mobile) Limit(limit int) Notification_User_Subscriber_Mobile { + r.Options.Limit = &limit + return r +} + +func (r Notification_User_Subscriber_Mobile) Offset(offset int) Notification_User_Subscriber_Mobile { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Notification_User_Subscriber_Mobile) ClearSnoozeTimer() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "clearSnoozeTimer", nil, &r.Options, &resp) + return +} + +// Use the method to create a new subscription for a notification. This method is the entry method to the notification system. Certain properties are required to create a subscription while others are optional. +// +// The required property is the resourceRecord property which is type SoftLayer_Notification_User_Subscriber_Resource. For the resourceRecord property, the only property that needs to be populated is the resourceTableId. The resourceTableId is the unique identifier of a SoftLayer service to create the subscription for. For example, the unique identifier of the Storage Evault service to create the subscription on. +// +// Optional properties that can be set is the preferences property. The preference property is an array SoftLayer_Notification_User_Subscriber_Preference. By default, the system will populate the preferences with the default values if no preferences are passed in. The preferences passed in must be the preferences related to the notification subscribing to. The notification preferences and preference details (such as minimum and maximum values) can be retrieved using the SoftLayer_Notification service. The properties that need to be populated for preferences are the notificationPreferenceId and value. +// +// For example to create a subscriber for a Storage EVault service to be notified 15 times during a billing cycle and to be notified when the vault usage reaches 85% of its allowed capacity use the following structure: +// +// +// *userRecordId = 1111 +// *notificationId = 3 +// *resourceRecord +// **resourceTableId = 1234 +// *preferences[1] +// **notificationPreferenceId = 2 +// **value = 85 +// *preference[2] +// **notificationPreferenceId = 3 +// **value = 15 +// +// +func (r Notification_User_Subscriber_Mobile) CreateObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "createObject", params, &r.Options, &resp) + return +} + +// The subscriber's subscription status can be "turned off" or "turned on" if the subscription is not required. +// +// Subscriber preferences may also be edited. To edit the preferences, you must pass in the id off the preferences to edit. Here is an example of structure to pass in. In this example, the structure will set the subscriber status to active and the threshold preference to 90 and the limit preference to 20 +// +// +// *id = 1111 +// *active = 1 +// *preferences[1] +// **id = 11 +// **value = 90 +// *preference[2] +// **id = 12 +// **value = 20 +func (r Notification_User_Subscriber_Mobile) EditObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The delivery methods used to send the subscribed notification. +func (r Notification_User_Subscriber_Mobile) GetDeliveryMethods() (resp []datatypes.Notification_Delivery_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getDeliveryMethods", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscribed to. +func (r Notification_User_Subscriber_Mobile) GetNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Mobile) GetObject() (resp datatypes.Notification_User_Subscriber_Mobile, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. +func (r Notification_User_Subscriber_Mobile) GetPreferences() (resp []datatypes.Notification_User_Subscriber_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve Preference details such as description, minimum and maximum limits, default value and unit of measure. +func (r Notification_User_Subscriber_Mobile) GetPreferencesDetails() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getPreferencesDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The subscriber id to resource id mapping. +func (r Notification_User_Subscriber_Mobile) GetResourceRecord() (resp datatypes.Notification_User_Subscriber_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve User record for the subscription. +func (r Notification_User_Subscriber_Mobile) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getUserRecord", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Mobile) SetSnoozeTimer(start *int, end *int) (resp bool, err error) { + params := []interface{}{ + start, + end, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "setSnoozeTimer", params, &r.Options, &resp) + return +} + +// Preferences are settings that can be modified to change the behavior of the subscription. For example, modify the limit preference to only receive notifications 10 times instead of 1 during a billing cycle. +// +// NOTE: Some preferences have certain restrictions on values that can be set. +type Notification_User_Subscriber_Preference struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationUserSubscriberPreferenceService returns an instance of the Notification_User_Subscriber_Preference SoftLayer service +func GetNotificationUserSubscriberPreferenceService(sess *session.Session) Notification_User_Subscriber_Preference { + return Notification_User_Subscriber_Preference{Session: sess} +} + +func (r Notification_User_Subscriber_Preference) Id(id int) Notification_User_Subscriber_Preference { + r.Options.Id = &id + return r +} + +func (r Notification_User_Subscriber_Preference) Mask(mask string) Notification_User_Subscriber_Preference { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_User_Subscriber_Preference) Filter(filter string) Notification_User_Subscriber_Preference { + r.Options.Filter = filter + return r +} + +func (r Notification_User_Subscriber_Preference) Limit(limit int) Notification_User_Subscriber_Preference { + r.Options.Limit = &limit + return r +} + +func (r Notification_User_Subscriber_Preference) Offset(offset int) Notification_User_Subscriber_Preference { + r.Options.Offset = &offset + return r +} + +// Use the method to create a new notification preference for a subscriber +func (r Notification_User_Subscriber_Preference) CreateObject(templateObject *datatypes.Notification_User_Subscriber_Preference) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Preference) EditObjects(templateObjects []datatypes.Notification_User_Subscriber_Preference) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "editObjects", params, &r.Options, &resp) + return +} + +// Retrieve Details such name, keyname, minimum and maximum values for the preference. +func (r Notification_User_Subscriber_Preference) GetDefaultPreference() (resp datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "getDefaultPreference", nil, &r.Options, &resp) + return +} + +// Retrieve Details of the subscriber tied to the preference. +func (r Notification_User_Subscriber_Preference) GetNotificationUserSubscriber() (resp datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "getNotificationUserSubscriber", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Preference) GetObject() (resp datatypes.Notification_User_Subscriber_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/product.go b/vendor/github.com/softlayer/softlayer-go/services/product.go new file mode 100644 index 000000000000..b627a458c173 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/product.go @@ -0,0 +1,2077 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Product_Item_Category data type contains general category information for prices. +type Product_Item_Category struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemCategoryService returns an instance of the Product_Item_Category SoftLayer service +func GetProductItemCategoryService(sess *session.Session) Product_Item_Category { + return Product_Item_Category{Session: sess} +} + +func (r Product_Item_Category) Id(id int) Product_Item_Category { + r.Options.Id = &id + return r +} + +func (r Product_Item_Category) Mask(mask string) Product_Item_Category { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Category) Filter(filter string) Product_Item_Category { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Category) Limit(limit int) Product_Item_Category { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Category) Offset(offset int) Product_Item_Category { + r.Options.Offset = &offset + return r +} + +// Returns a list of of active Items in the "Additional Services" package with their active prices for a given product item category and sorts them by price. +func (r Product_Item_Category) GetAdditionalProductsForCategory() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getAdditionalProductsForCategory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetBandwidthCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getBandwidthCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items associated with an account that share a category code with an item category's category code. +func (r Product_Item_Category) GetBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getBillingItems", nil, &r.Options, &resp) + return +} + +// This method returns a collection of computing categories. These categories are also top level items in a service offering. +func (r Product_Item_Category) GetComputingCategories(resetCache *bool) (resp []datatypes.Product_Item_Category, err error) { + params := []interface{}{ + resetCache, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getComputingCategories", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetCustomUsageRatesCategories(resetCache *bool) (resp []datatypes.Product_Item_Category, err error) { + params := []interface{}{ + resetCache, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getCustomUsageRatesCategories", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetExternalResourceCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getExternalResourceCategories", nil, &r.Options, &resp) + return +} + +// Retrieve This invoice item's "item category group". +func (r Product_Item_Category) GetGroup() (resp datatypes.Product_Item_Category_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getGroup", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of service offering category groups. Each group contains a collection of items associated with this category. +func (r Product_Item_Category) GetGroups() (resp []datatypes.Product_Package_Item_Category_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getGroups", nil, &r.Options, &resp) + return +} + +// Each product item price must be tied to a category for it to be sold. These categories describe how a particular product item is sold. For example, the 250GB hard drive can be sold as disk0, disk1, ... disk11. There are different prices for this product item depending on which category it is. This keeps down the number of products in total. +func (r Product_Item_Category) GetObject() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetObjectStorageCategories(resetCache *bool) (resp []datatypes.Product_Item_Category, err error) { + params := []interface{}{ + resetCache, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getObjectStorageCategories", params, &r.Options, &resp) + return +} + +// Retrieve Any unique options associated with an item category. +func (r Product_Item_Category) GetOrderOptions() (resp []datatypes.Product_Item_Category_Order_Option_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getOrderOptions", nil, &r.Options, &resp) + return +} + +// Retrieve A list of configuration available in this category.' +func (r Product_Item_Category) GetPackageConfigurations() (resp []datatypes.Product_Package_Order_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getPackageConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve A list of preset configurations this category is used in.' +func (r Product_Item_Category) GetPresetConfigurations() (resp []datatypes.Product_Package_Preset_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getPresetConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve The question references that are associated with an item category. +func (r Product_Item_Category) GetQuestionReferences() (resp []datatypes.Product_Item_Category_Question_Xref, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getQuestionReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The questions that are associated with an item category. +func (r Product_Item_Category) GetQuestions() (resp []datatypes.Product_Item_Category_Question, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getQuestions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetSoftwareCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getSoftwareCategories", nil, &r.Options, &resp) + return +} + +// This method returns a list of subnet categories. +func (r Product_Item_Category) GetSubnetCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getSubnetCategories", nil, &r.Options, &resp) + return +} + +// This method returns a collection of computing categories. These categories are also top level items in a service offering. +func (r Product_Item_Category) GetTopLevelCategories(resetCache *bool) (resp []datatypes.Product_Item_Category, err error) { + params := []interface{}{ + resetCache, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getTopLevelCategories", params, &r.Options, &resp) + return +} + +// This method returns service product categories that can be canceled via API. You can use these categories to find the billing items you wish to cancel. +func (r Product_Item_Category) GetValidCancelableServiceItemCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getValidCancelableServiceItemCategories", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetVlanCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getVlanCategories", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Item_Category_Group data type contains general category group information. +type Product_Item_Category_Group struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemCategoryGroupService returns an instance of the Product_Item_Category_Group SoftLayer service +func GetProductItemCategoryGroupService(sess *session.Session) Product_Item_Category_Group { + return Product_Item_Category_Group{Session: sess} +} + +func (r Product_Item_Category_Group) Id(id int) Product_Item_Category_Group { + r.Options.Id = &id + return r +} + +func (r Product_Item_Category_Group) Mask(mask string) Product_Item_Category_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Category_Group) Filter(filter string) Product_Item_Category_Group { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Category_Group) Limit(limit int) Product_Item_Category_Group { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Category_Group) Offset(offset int) Product_Item_Category_Group { + r.Options.Offset = &offset + return r +} + +// Each product item category must be tied to a category group. These category groups describe how a particular product item category is categorized. For example, the disk0, disk1, ... disk11 can be categorized as Server and Attached Services. There are different groups for each of this product item category depending on the function of the item product in the subject category. +func (r Product_Item_Category_Group) GetObject() (resp datatypes.Product_Item_Category_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Represents the assignment of a policy to a product. The existence of a record means that the associated product is subject to the terms defined in the document content of the policy. +type Product_Item_Policy_Assignment struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemPolicyAssignmentService returns an instance of the Product_Item_Policy_Assignment SoftLayer service +func GetProductItemPolicyAssignmentService(sess *session.Session) Product_Item_Policy_Assignment { + return Product_Item_Policy_Assignment{Session: sess} +} + +func (r Product_Item_Policy_Assignment) Id(id int) Product_Item_Policy_Assignment { + r.Options.Id = &id + return r +} + +func (r Product_Item_Policy_Assignment) Mask(mask string) Product_Item_Policy_Assignment { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Policy_Assignment) Filter(filter string) Product_Item_Policy_Assignment { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Policy_Assignment) Limit(limit int) Product_Item_Policy_Assignment { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Policy_Assignment) Offset(offset int) Product_Item_Policy_Assignment { + r.Options.Offset = &offset + return r +} + +// Register the acceptance of the associated policy to product assignment, and link the created record to a Ticket. +func (r Product_Item_Policy_Assignment) AcceptFromTicket(ticketId *int) (resp bool, err error) { + params := []interface{}{ + ticketId, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "acceptFromTicket", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Policy_Assignment) GetObject() (resp datatypes.Product_Item_Policy_Assignment, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve the binary contents of the associated PDF policy document. +func (r Product_Item_Policy_Assignment) GetPolicyDocumentContents() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "getPolicyDocumentContents", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the assigned policy. +func (r Product_Item_Policy_Assignment) GetPolicyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "getPolicyName", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Product_Item]] for this policy assignment. +func (r Product_Item_Policy_Assignment) GetProduct() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "getProduct", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Item_Price data type contains general information relating to a single SoftLayer product item price. You can find out what packages each price is in as well as which category under which this price is sold. All prices are returned in floating point values measured in US Dollars ($USD). +type Product_Item_Price struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemPriceService returns an instance of the Product_Item_Price SoftLayer service +func GetProductItemPriceService(sess *session.Session) Product_Item_Price { + return Product_Item_Price{Session: sess} +} + +func (r Product_Item_Price) Id(id int) Product_Item_Price { + r.Options.Id = &id + return r +} + +func (r Product_Item_Price) Mask(mask string) Product_Item_Price { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Price) Filter(filter string) Product_Item_Price { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Price) Limit(limit int) Product_Item_Price { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Price) Offset(offset int) Product_Item_Price { + r.Options.Offset = &offset + return r +} + +// Retrieve The account that the item price is restricted to. +func (r Product_Item_Price) GetAccountRestrictions() (resp []datatypes.Product_Item_Price_Account_Restriction, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getAccountRestrictions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Item_Price) GetAttributes() (resp []datatypes.Product_Item_Price_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the price is for Big Data OS/Journal disks only. (Deprecated) +func (r Product_Item_Price) GetBigDataOsJournalDiskFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getBigDataOsJournalDiskFlag", nil, &r.Options, &resp) + return +} + +// Retrieve cross reference for bundles +func (r Product_Item_Price) GetBundleReferences() (resp []datatypes.Product_Item_Bundles, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getBundleReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum capacity value for which this price is suitable. +func (r Product_Item_Price) GetCapacityRestrictionMaximum() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getCapacityRestrictionMaximum", nil, &r.Options, &resp) + return +} + +// Retrieve The minimum capacity value for which this price is suitable. +func (r Product_Item_Price) GetCapacityRestrictionMinimum() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getCapacityRestrictionMinimum", nil, &r.Options, &resp) + return +} + +// Retrieve The type of capacity restriction by which this price must abide. +func (r Product_Item_Price) GetCapacityRestrictionType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getCapacityRestrictionType", nil, &r.Options, &resp) + return +} + +// Retrieve All categories which this item is a member. +func (r Product_Item_Price) GetCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getCategories", nil, &r.Options, &resp) + return +} + +// Retrieve Signifies pricing that is only available on a dedicated host virtual server order. +func (r Product_Item_Price) GetDedicatedHostInstanceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getDedicatedHostInstanceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether this price defines a software license for its product item. +func (r Product_Item_Price) GetDefinedSoftwareLicenseFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getDefinedSoftwareLicenseFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The product item a price is tied to. +func (r Product_Item_Price) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Price) GetObject() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Item_Price) GetOrderPremiums() (resp []datatypes.Product_Item_Price_Premium, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getOrderPremiums", nil, &r.Options, &resp) + return +} + +// Retrieve cross reference for packages +func (r Product_Item_Price) GetPackageReferences() (resp []datatypes.Product_Package_Item_Prices, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getPackageReferences", nil, &r.Options, &resp) + return +} + +// Retrieve A price's packages under which this item is sold. +func (r Product_Item_Price) GetPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getPackages", nil, &r.Options, &resp) + return +} + +// Retrieve A list of preset configurations this price is used in.' +func (r Product_Item_Price) GetPresetConfigurations() (resp []datatypes.Product_Package_Preset_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getPresetConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve The type keyname of this price which can be STANDARD or TIERED. +func (r Product_Item_Price) GetPriceType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getPriceType", nil, &r.Options, &resp) + return +} + +// Retrieve The pricing location group that this price is applicable for. Prices that have a pricing location group will only be available for ordering with the locations specified on the location group. +func (r Product_Item_Price) GetPricingLocationGroup() (resp datatypes.Location_Group_Pricing, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getPricingLocationGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The number of server cores required to order this item. This is deprecated. Use [[SoftLayer_Product_Item_Price/getCapacityRestrictionMinimum|getCapacityRestrictionMinimum]] and [[SoftLayer_Product_Item_Price/getCapacityRestrictionMaximum|getCapacityRestrictionMaximum]] +func (r Product_Item_Price) GetRequiredCoreCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getRequiredCoreCount", nil, &r.Options, &resp) + return +} + +// Returns a collection of rate-based [[SoftLayer_Product_Item_Price]] objects associated with the [[SoftLayer_Product_Item]] objects and the [[SoftLayer_Location]] specified. The location is required to get the appropriate rate-based prices because the usage rates may vary from datacenter to datacenter. +func (r Product_Item_Price) GetUsageRatePrices(location *datatypes.Location, items []datatypes.Product_Item) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + location, + items, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getUsageRatePrices", params, &r.Options, &resp) + return +} + +// no documentation yet +type Product_Item_Price_Premium struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemPricePremiumService returns an instance of the Product_Item_Price_Premium SoftLayer service +func GetProductItemPricePremiumService(sess *session.Session) Product_Item_Price_Premium { + return Product_Item_Price_Premium{Session: sess} +} + +func (r Product_Item_Price_Premium) Id(id int) Product_Item_Price_Premium { + r.Options.Id = &id + return r +} + +func (r Product_Item_Price_Premium) Mask(mask string) Product_Item_Price_Premium { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Price_Premium) Filter(filter string) Product_Item_Price_Premium { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Price_Premium) Limit(limit int) Product_Item_Price_Premium { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Price_Premium) Offset(offset int) Product_Item_Price_Premium { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Product_Item_Price_Premium) GetItemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price_Premium", "getItemPrice", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Item_Price_Premium) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price_Premium", "getLocation", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Price_Premium) GetObject() (resp datatypes.Product_Item_Price_Premium, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price_Premium", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Item_Price_Premium) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price_Premium", "getPackage", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Product_Order struct { + Session *session.Session + Options sl.Options +} + +// GetProductOrderService returns an instance of the Product_Order SoftLayer service +func GetProductOrderService(sess *session.Session) Product_Order { + return Product_Order{Session: sess} +} + +func (r Product_Order) Id(id int) Product_Order { + r.Options.Id = &id + return r +} + +func (r Product_Order) Mask(mask string) Product_Order { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Order) Filter(filter string) Product_Order { + r.Options.Filter = filter + return r +} + +func (r Product_Order) Limit(limit int) Product_Order { + r.Options.Limit = &limit + return r +} + +func (r Product_Order) Offset(offset int) Product_Order { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Product_Order) CheckItemAvailability(itemPrices []datatypes.Product_Item_Price, accountId *int, availabilityTypeKeyNames []string) (resp bool, err error) { + params := []interface{}{ + itemPrices, + accountId, + availabilityTypeKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "checkItemAvailability", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Order) CheckItemAvailabilityForImageTemplate(imageTemplateId *int, accountId *int, packageId *int, availabilityTypeKeyNames []string) (resp bool, err error) { + params := []interface{}{ + imageTemplateId, + accountId, + packageId, + availabilityTypeKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "checkItemAvailabilityForImageTemplate", params, &r.Options, &resp) + return +} + +// Check order items for conflicts +func (r Product_Order) CheckItemConflicts(itemPrices []datatypes.Product_Item_Price) (resp bool, err error) { + params := []interface{}{ + itemPrices, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "checkItemConflicts", params, &r.Options, &resp) + return +} + +// This method simply returns a receipt for a previously finalized payment authorization from PayPal. The response matches the response returned from placeOrder when the order was originally placed with PayPal as the payment type. +func (r Product_Order) GetExternalPaymentAuthorizationReceipt(token *string, payerId *string) (resp datatypes.Container_Product_Order_Receipt, err error) { + params := []interface{}{ + token, + payerId, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getExternalPaymentAuthorizationReceipt", params, &r.Options, &resp) + return +} + +// This method returns a collection of [[SoftLayer_Container_Product_Order_Network]] objects. This will contain the available networks that can be used when ordering services. +// +// If a location id is supplied, the list of networks will be trimmed down to only those that are available at that particular datacenter. +// +// If a package id is supplied, the list of public VLANs and subnets will be trimmed down to those that are available for that particular package. +// +// The account id is for internal use only and will be ignored when supplied by customers. +func (r Product_Order) GetNetworks(locationId *int, packageId *int, accountId *int) (resp []datatypes.Container_Product_Order_Network, err error) { + params := []interface{}{ + locationId, + packageId, + accountId, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getNetworks", params, &r.Options, &resp) + return +} + +// When the account is on an external reseller brand, this service will provide a SoftLayer_Product_Order with the the pricing adjusted by the external reseller. +func (r Product_Order) GetResellerOrder(orderContainer *datatypes.Container_Product_Order) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + orderContainer, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getResellerOrder", params, &r.Options, &resp) + return +} + +// Sometimes taxes cannot be calculated immediately, so we start the calculations and let them run in the background. This method will return the current progress and information related to a specific tax calculation, which allows real-time progress updates on tax calculations. +func (r Product_Order) GetTaxCalculationResult(orderHash *string) (resp datatypes.Container_Tax_Cache, err error) { + params := []interface{}{ + orderHash, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getTaxCalculationResult", params, &r.Options, &resp) + return +} + +// Return collections of public and private VLANs that are available during ordering. If a location ID is provided, the resulting VLANs will be limited to that location. If the Virtual Server package id (46) is provided, the VLANs will be narrowed down to those locations that contain routers with the VIRTUAL_IMAGE_STORE data attribute. +// +// For the selectedItems parameter, this is a comma-separated string of category codes and item values. For example: +// +//
    • port_speed=10,guest_disk0=LOCAL_DISK
    • port_speed=100,disk0=SAN_DISK
    • port_speed=100,private_network_only=1,guest_disk0=LOCAL_DISK
    +// +// This parameter is used to narrow the available results down even further. It's not necessary when selecting a VLAN, but it will help avoid errors when attempting to place an order. The only acceptable category codes are: +// +//
    • port_speed
    • A disk category, such as guest_disk0 or disk0, with values of either LOCAL_DISK or SAN_DISK
    • private_network_only
    • dual_path_network
    +// +// For most customers, it's sufficient to only provide the first 2 parameters. +func (r Product_Order) GetVlans(locationId *int, packageId *int, selectedItems *string, vlanIds []int, subnetIds []int, accountId *int, orderContainer *datatypes.Container_Product_Order, hardwareFirewallOrderedFlag *bool) (resp datatypes.Container_Product_Order_Network_Vlans, err error) { + params := []interface{}{ + locationId, + packageId, + selectedItems, + vlanIds, + subnetIds, + accountId, + orderContainer, + hardwareFirewallOrderedFlag, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getVlans", params, &r.Options, &resp) + return +} + +// +// Use this method to place bare metal server, virtual server and additional service orders with SoftLayer. Upon success, your credit card or PayPal account will incur charges for the monthly order total (or prorated value if ordered mid billing cycle). If all products on the order are only billed hourly, you will be charged on your billing anniversary date, which occurs monthly on the day you ordered your first service with SoftLayer. For new customers, you are required to provide billing information when you place an order. For existing customers, the credit card on file will be charged. If you're a PayPal customer, a URL will be returned from the call to [[SoftLayer_Product_Order/placeOrder|placeOrder]] which is to be used to finish the authorization process. This authorization tells PayPal that you indeed want to place an order with SoftLayer. From PayPal's web site, you will be redirected back to SoftLayer for your order receipt.

    +// +// +// When an order is placed, your order will be in a "pending approval" state. When all internal checks pass, your order will be automatically approved. For orders that may need extra attention, a Sales representative will review the order and contact you if necessary. Once the order is approved, your server or service will be provisioned and available to you shortly thereafter. Depending on the type of server or service ordered, provisioning times will vary.

    +// +// +//

    Order Containers

    +// +// +// When placing API orders, it's important to order your server and services on the appropriate [[SoftLayer_Container_Product_Order (type)|order container]]. Failing to provide the correct container may delay your server or service from being provisioned in a timely manner. Some common order containers are included below.

    +// +// +// Note: SoftLayer_Container_Product_Order_ has been removed from the containers in the table below for readability.

    +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +//
    ProductOrder containerPackage type
    Bare metal server by CPU[[SoftLayer_Container_Product_Order_Hardware_Server (type)|Hardware_Server]]BARE_METAL_CPU
    Bare metal server by core[[SoftLayer_Container_Product_Order_Hardware_Server (type)|Hardware_Server]]BARE_METAL_CORE
    Virtual server[[SoftLayer_Container_Product_Order_Virtual_Guest (type)|Virtual_Guest]]VIRTUAL_SERVER_INSTANCE
    DNS domain registration[[SoftLayer_Container_Product_Order_Dns_Domain_Registration (type)|Dns_Domain_Registration]]ADDITIONAL_SERVICES
    Local & dedicated load balancers[[SoftLayer_Container_Product_Order_Network_LoadBalancer (type)|Network_LoadBalancer]]ADDITIONAL_SERVICES_LOAD_BALANCER
    Content delivery network[[SoftLayer_Container_Product_Order_Network_ContentDelivery_Account (type)|Network_ContentDelivery_Account]]ADDITIONAL_SERVICES_CDN
    Content delivery network Addon[[SoftLayer_Container_Product_Order_Network_ContentDelivery_Account_Addon (type)|Network_ContentDelivery_Account_Addon]]ADDITIONAL_SERVICES_CDN_ADDON
    Hardware & software firewalls[[SoftLayer_Container_Product_Order_Network_Protection_Firewall (type)|Network_Protection_Firewall]]ADDITIONAL_SERVICES_FIREWALL
    Dedicated firewall[[SoftLayer_Container_Product_Order_Network_Protection_Firewall_Dedicated (type)|Network_Protection_Firewall_Dedicated]]ADDITIONAL_SERVICES_FIREWALL
    Object storage[[SoftLayer_Container_Product_Order_Network_Storage_Object (type)|Network_Storage_Object]]ADDITIONAL_SERVICES_OBJECT_STORAGE
    Object storage (hub)[[SoftLayer_Container_Product_Order_Network_Storage_Hub (type)|Network_Storage_Hub]]ADDITIONAL_SERVICES_OBJECT_STORAGE
    Network attached storage[[SoftLayer_Container_Product_Order_Network_Storage_Nas (type)|Network_Storage_Nas]]ADDITIONAL_SERVICES_NETWORK_ATTACHED_STORAGE
    Iscsi storage[[SoftLayer_Container_Product_Order_Network_Storage_Iscsi (type)|Network_Storage_Iscsi]]ADDITIONAL_SERVICES_ISCSI_STORAGE
    Evault[[SoftLayer_Container_Product_Order_Network_Storage_Backup_Evault_Vault (type)|Network_Storage_Backup_Evault_Vault]]ADDITIONAL_SERVICES
    Evault Plugin[[SoftLayer_Container_Product_Order_Network_Storage_Backup_Evault_Plugin (type)|Network_Storage_Backup_Evault_Plugin]]ADDITIONAL_SERVICES
    Application delivery appliance[[SoftLayer_Container_Product_Order_Network_Application_Delivery_Controller (type)|Network_Application_Delivery_Controller]]ADDITIONAL_SERVICES_APPLICATION_DELIVERY_APPLIANCE
    Network subnet[[SoftLayer_Container_Product_Order_Network_Subnet (type)|Network_Subnet]]ADDITIONAL_SERVICES
    Global IPv4[[SoftLayer_Container_Product_Order_Network_Subnet (type)|Network_Subnet]]ADDITIONAL_SERVICES_GLOBAL_IP_ADDRESSES
    Global IPv6[[SoftLayer_Container_Product_Order_Network_Subnet (type)|Network_Subnet]]ADDITIONAL_SERVICES_GLOBAL_IP_ADDRESSES
    Network VLAN[[SoftLayer_Container_Product_Order_Network_Vlan (type)|Network_Vlan]]ADDITIONAL_SERVICES_NETWORK_VLAN
    Portable storage[[SoftLayer_Container_Product_Order_Virtual_Disk_Image (type)|Virtual_Disk_Image]]ADDITIONAL_SERVICES_PORTABLE_STORAGE
    SSL certificate[[SoftLayer_Container_Product_Order_Security_Certificate (type)|Security_Certificate]]ADDITIONAL_SERVICES_SSL_CERTIFICATE
    External authentication[[SoftLayer_Container_Product_Order_User_Customer_External_Binding (type)|User_Customer_External_Binding]]ADDITIONAL_SERVICES
    Dedicated Host[[SoftLayer_Container_Product_Order_Virtual_DedicatedHost (type)|Virtual_DedicatedHosts]]DEDICATED_HOST
    +// +// +//

    Server example

    +// +// +// This example includes a single bare metal server being ordered with monthly billing.

    +// +// +// Warning: the price ids provided below may be outdated or unavailable, so you will need to determine the available prices from the bare metal server [[SoftLayer_Product_Package/getAllObjects|packages]], which have a [[SoftLayer_Product_Package_Type (type)|package type]] of '''BARE_METAL_CPU''' or '''BARE_METAL_CORE'''. You can get a full list of [[SoftLayer_Product_Package_Type/getAllObjects|package types]] to see other potentially available server packages.

    +// +// +// +// +// +// +// your username +// your api key +// +// +// +// +// +// +// +// example.com +// server1 +// +// +// 138124 +// 142 +// +// +// 58 +// +// +// 22337 +// +// +// 21189 +// +// +// 876 +// +// +// 57 +// +// +// 55 +// +// +// 21190 +// +// +// 36381 +// +// +// 21 +// +// +// 22013 +// +// +// 906 +// +// +// 420 +// +// +// 418 +// +// +// 342 +// +// +// false +// +// +// +// +// +//

    +// +// +//

    Virtual server example

    +// +// +// This example includes 2 identical virtual servers (except for hostname) being ordered for hourly billing. It includes an optional image template id and VLAN data specified on the virtualGuest objects - primaryBackendNetworkComponent and primaryNetworkComponent.

    +// +// +// Warning: the price ids provided below may be outdated or unavailable, so you will need to determine the available prices from the virtual server [[SoftLayer_Product_Package/getAllObjects|package]], which has a [[SoftLayer_Product_Package_Type (type)|package type]] of '''VIRTUAL_SERVER_INSTANCE'''.

    +// +// +// +// +// +// +// your username +// your api key +// +// +// +// +// +// 13251 +// 37473 +// 46 +// +// +// 2159 +// +// +// 55 +// +// +// 13754 +// +// +// 1641 +// +// +// 905 +// +// +// 1800 +// +// +// 58 +// +// +// 21 +// +// +// 1645 +// +// +// 272 +// +// +// 57 +// +// +// 418 +// +// +// 420 +// +// +// 2 +// true +// +// +// example.com +// server1 +// +// +// 12345 +// +// +// +// +// 67890 +// +// +// +// +// example.com +// server2 +// +// +// 12345 +// +// +// +// +// 67890 +// +// +// +// +// +// +// +// +// +//

    +// +// +//

    VLAN example

    +// +// +// Warning: the price ids provided below may be outdated or unavailable, so you will need to determine the available prices from the additional services [[SoftLayer_Product_Package/getAllObjects|package]], which has a [[SoftLayer_Product_Package_Type (type)|package type]] of '''ADDITIONAL_SERVICES'''. You can get a full list of [[SoftLayer_Product_Package_Type/getAllObjects|package types]] to find other available additional service packages.

    +// +// +// +// +// +// +// your username +// your api key +// +// +// +// +// +// 154820 +// 0 +// +// +// 2021 +// +// +// 2018 +// +// +// true +// +// +// +// +// +//

    +// +// +//

    Multiple products example

    +// +// +// This example includes a combination of the above examples in a single order. Note that all the configuration options for each individual order container are the same as above, except now we encapsulate each one within the orderContainers property on the base [[SoftLayer_Container_Product_Order (type)|order container]].

    +// +// +// Warning: not all products are available to be ordered with other products. For example, since SSL certificates require validation from a 3rd party, the approval process may take days or even weeks, and this would not be acceptable when you need your hourly virtual server right now. To better accommodate customers, we restrict several products to be ordered individually.

    +// +// +// +// +// +// +// your username +// your api key +// +// +// +// +// +// +// +// ... +// +// +// ... +// +// +// ... +// +// +// +// +// +// +// +// +// +// +func (r Product_Order) PlaceOrder(orderData interface{}, saveAsQuote *bool) (resp datatypes.Container_Product_Order_Receipt, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + saveAsQuote, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "placeOrder", params, &r.Options, &resp) + return +} + +// Use this method for placing server quotes and additional services quotes. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server quotes. After placing the quote, you must go to this URL to finish the order process. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Product_Order) PlaceQuote(orderData *datatypes.Container_Product_Order) (resp datatypes.Container_Product_Order_Receipt, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "placeQuote", params, &r.Options, &resp) + return +} + +// This method simply finalizes an authorization from PayPal. It tells SoftLayer that the customer has completed the PayPal process. This is ONLY needed if you, the customer, have your own API into PayPal and wish to automate authorizations from PayPal and our system. For most, this method will not be needed. Once an order is placed using placeOrder() for PayPal customers, a URL is given back to the customer. In it is the token and PayerID. If you want to systematically pay with PayPal, do so then call this method with the token and PayerID. +func (r Product_Order) ProcessExternalPaymentAuthorization(token *string, payerId *string) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + token, + payerId, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "processExternalPaymentAuthorization", params, &r.Options, &resp) + return +} + +// Get list of items that are required with the item prices provided +func (r Product_Order) RequiredItems(itemPrices []datatypes.Product_Item_Price) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + itemPrices, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "requiredItems", params, &r.Options, &resp) + return +} + +// This service is used to verify that an order meets all the necessary requirements to purchase a server, virtual server or service from SoftLayer. It will verify that the products requested do not conflict. For example, you cannot order a Windows firewall with a Linux operating system. It will also check to make sure you have provided all the products that are required for the [[SoftLayer_Product_Package_Order_Configuration (type)|package configuration]] associated with the [[SoftLayer_Product_Package|package id]] on each of the [[SoftLayer_Container_Product_Order (type)|order containers]] specified.

    +// +// This service returns the same container that was provided, but with additional information that can be used for debugging or validation. It will also contain pricing information (prorated if applicable) for each of the products on the order. If an exception occurs during verification, a container with the SoftLayer_Exception_Order exception type will be specified in the result.

    +// +// verifyOrder accepts the same [[SoftLayer_Container_Product_Order (type)|container types]] as placeOrder, so see [[SoftLayer_Product_Order/placeOrder|placeOrder]] for more details. +// +// +func (r Product_Order) VerifyOrder(orderData interface{}) (resp datatypes.Container_Product_Order, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "verifyOrder", params, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Package data type contains information about packages from which orders can be generated. Packages contain general information regarding what is in them, where they are currently sold, availability, and pricing. +type Product_Package struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackageService returns an instance of the Product_Package SoftLayer service +func GetProductPackageService(sess *session.Session) Product_Package { + return Product_Package{Session: sess} +} + +func (r Product_Package) Id(id int) Product_Package { + r.Options.Id = &id + return r +} + +func (r Product_Package) Mask(mask string) Product_Package { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package) Filter(filter string) Product_Package { + r.Options.Filter = filter + return r +} + +func (r Product_Package) Limit(limit int) Product_Package { + r.Options.Limit = &limit + return r +} + +func (r Product_Package) Offset(offset int) Product_Package { + r.Options.Offset = &offset + return r +} + +// Retrieve The preset configurations available only for the authenticated account and this package. +func (r Product_Package) GetAccountRestrictedActivePresets() (resp []datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAccountRestrictedActivePresets", nil, &r.Options, &resp) + return +} + +// Retrieve The results from this call are similar to [[SoftLayer_Product_Package/getCategories|getCategories]], but these ONLY include account-restricted prices. Not all accounts have restricted pricing. +func (r Product_Package) GetAccountRestrictedCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAccountRestrictedCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The flag to indicate if there are any restricted prices in a package for the currently-active account. +func (r Product_Package) GetAccountRestrictedPricesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAccountRestrictedPricesFlag", nil, &r.Options, &resp) + return +} + +// Return a list of Items in the package with their active prices. +func (r Product_Package) GetActiveItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveItems", nil, &r.Options, &resp) + return +} + +// This method is deprecated and should not be used in production code. +// +// This method will return the [[SoftLayer_Product_Package]] objects from which you can order a bare metal server, virtual server, service (such as CDN or Object Storage) or other software filtered by an attribute type associated with the package. Once you have the package you want to order from, you may query one of various endpoints from that package to get specific information about its products and pricing. See [[SoftLayer_Product_Package/getCategories|getCategories]] or [[SoftLayer_Product_Package/getItems|getItems]] for more information. +func (r Product_Package) GetActivePackagesByAttribute(attributeKeyName *string) (resp []datatypes.Product_Package, err error) { + params := []interface{}{ + attributeKeyName, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActivePackagesByAttribute", params, &r.Options, &resp) + return +} + +// Retrieve The available preset configurations for this package. +func (r Product_Package) GetActivePresets() (resp []datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActivePresets", nil, &r.Options, &resp) + return +} + +// This method pulls all the active private hosted cloud packages. This will give you a basic description of the packages that are currently active and from which you can order private hosted cloud configurations. +func (r Product_Package) GetActivePrivateHostedCloudPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActivePrivateHostedCloudPackages", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid RAM items available for purchase in this package. +func (r Product_Package) GetActiveRamItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveRamItems", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid server items available for purchase in this package. +func (r Product_Package) GetActiveServerItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveServerItems", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid software items available for purchase in this package. +func (r Product_Package) GetActiveSoftwareItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveSoftwareItems", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of [[SoftLayer_Product_Item_Price]] objects for pay-as-you-go usage. +func (r Product_Package) GetActiveUsagePrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveUsagePrices", nil, &r.Options, &resp) + return +} + +// This method returns a collection of active usage rate [[SoftLayer_Product_Item_Price]] objects for the current package and specified datacenter. Optionally you can retrieve the active usage rate prices for a particular [[SoftLayer_Product_Item_Category]] by specifying a category code as the first parameter. This information is useful so that you can see "pay as you go" rates (if any) for the current package, location and optionally category. +func (r Product_Package) GetActiveUsageRatePrices(locationId *int, categoryCode *string) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + locationId, + categoryCode, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveUsageRatePrices", params, &r.Options, &resp) + return +} + +// Retrieve This flag indicates that the package is an additional service. +func (r Product_Package) GetAdditionalServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAdditionalServiceFlag", nil, &r.Options, &resp) + return +} + +// This method pulls all the active packages. This will give you a basic description of the packages that are currently active +func (r Product_Package) GetAllObjects() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package) GetAttributes() (resp []datatypes.Product_Package_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) +func (r Product_Package) GetAvailableLocations() (resp []datatypes.Product_Package_Locations, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAvailableLocations", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetAvailablePackagesForImageTemplate(imageTemplate *datatypes.Virtual_Guest_Block_Device_Template_Group) (resp []datatypes.Product_Package, err error) { + params := []interface{}{ + imageTemplate, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAvailablePackagesForImageTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The maximum number of available disk storage units associated with the servers in a package. +func (r Product_Package) GetAvailableStorageUnits() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAvailableStorageUnits", nil, &r.Options, &resp) + return +} + +// Retrieve This is a collection of categories ([[SoftLayer_Product_Item_Category]]) associated with a package which can be used for ordering. These categories have several objects prepopulated which are useful when determining the available products for purchase. The categories contain groups ([[SoftLayer_Product_Package_Item_Category_Group]]) that organize the products and prices by similar features. For example, operating systems will be grouped by their manufacturer and virtual server disks will be grouped by their disk type (SAN vs. local). Each group will contain prices ([[SoftLayer_Product_Item_Price]]) which you can use determine the cost of each product. Each price has a product ([[SoftLayer_Product_Item]]) which provides the name and other useful information about the server, service or software you may purchase. +func (r Product_Package) GetCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getCategories", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetCdnItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getCdnItems", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetCloudStorageItems(provider *int) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + provider, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getCloudStorageItems", params, &r.Options, &resp) + return +} + +// Retrieve The item categories associated with a package, including information detailing which item categories are required as part of a SoftLayer product order. +func (r Product_Package) GetConfiguration() (resp []datatypes.Product_Package_Order_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid RAM items available for purchase in this package. +func (r Product_Package) GetDefaultRamItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDefaultRamItems", nil, &r.Options, &resp) + return +} + +// Retrieve The node type for a package in a solution deployment. +func (r Product_Package) GetDeploymentNodeType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDeploymentNodeType", nil, &r.Options, &resp) + return +} + +// Retrieve The packages that are allowed in a multi-server solution. (Deprecated) +func (r Product_Package) GetDeploymentPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDeploymentPackages", nil, &r.Options, &resp) + return +} + +// Retrieve The solution deployment type. +func (r Product_Package) GetDeploymentType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDeploymentType", nil, &r.Options, &resp) + return +} + +// Retrieve The package that represents a multi-server solution. (Deprecated) +func (r Product_Package) GetDeployments() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDeployments", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates the package does not allow custom disk partitions. +func (r Product_Package) GetDisallowCustomDiskPartitions() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDisallowCustomDiskPartitions", nil, &r.Options, &resp) + return +} + +// Retrieve The Softlayer order step is optionally step-based. This returns the first SoftLayer_Product_Package_Order_Step in the step-based order process. +func (r Product_Package) GetFirstOrderStep() (resp datatypes.Product_Package_Order_Step, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getFirstOrderStep", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package is a specialized network gateway appliance package. +func (r Product_Package) GetGatewayApplianceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getGatewayApplianceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates that the package supports GPUs. +func (r Product_Package) GetGpuFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getGpuFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the package contains prices that can be ordered hourly. +func (r Product_Package) GetHourlyBillingAvailableFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getHourlyBillingAvailableFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Packages with this flag do not allow monthly orders. +func (r Product_Package) GetHourlyOnlyOrders() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getHourlyOnlyOrders", nil, &r.Options, &resp) + return +} + +// Returns a collection of SoftLayer_Product_Item_Attribute_Type objects. These item attribute types specifically deal with when an item, SoftLayer_Product_Item, from the product catalog may no longer be available. The keynames for these attribute types start with 'UNAVAILABLE_AFTER_DATE_*', where the '*' may represent any string. For example, 'UNAVAILABLE_AFTER_DATE_NEW_ORDERS', signifies that the item is not available for new orders. There is a catch all attribute type, 'UNAVAILABLE_AFTER_DATE_ALL'. If an item has one of these availability attributes set, the value should be a valid date in MM/DD/YYYY, indicating the date after which the item will no longer be available. +func (r Product_Package) GetItemAvailabilityTypes() (resp []datatypes.Product_Item_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemAvailabilityTypes", nil, &r.Options, &resp) + return +} + +// Retrieve The item-item conflicts associated with a package. +func (r Product_Package) GetItemConflicts() (resp []datatypes.Product_Item_Resource_Conflict, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemConflicts", nil, &r.Options, &resp) + return +} + +// Retrieve The item-location conflicts associated with a package. +func (r Product_Package) GetItemLocationConflicts() (resp []datatypes.Product_Item_Resource_Conflict, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemLocationConflicts", nil, &r.Options, &resp) + return +} + +// Retrieve cross reference for item prices +func (r Product_Package) GetItemPriceReferences() (resp []datatypes.Product_Package_Item_Prices, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemPriceReferences", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of SoftLayer_Product_Item_Prices that are valid for this package. +func (r Product_Package) GetItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemPrices", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects from a collection of SoftLayer_Software_Description +func (r Product_Package) GetItemPricesFromSoftwareDescriptions(softwareDescriptions []datatypes.Software_Description, includeTranslationsFlag *bool, returnAllPricesFlag *bool) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + softwareDescriptions, + includeTranslationsFlag, + returnAllPricesFlag, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemPricesFromSoftwareDescriptions", params, &r.Options, &resp) + return +} + +// Retrieve A collection of valid items available for purchase in this package. +func (r Product_Package) GetItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItems", nil, &r.Options, &resp) + return +} + +// Return a collection of [[SoftLayer_Product_Item]] objects from a [[SoftLayer_Virtual_Guest_Block_Device_Template_Group]] object +func (r Product_Package) GetItemsFromImageTemplate(imageTemplate *datatypes.Virtual_Guest_Block_Device_Template_Group) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + imageTemplate, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemsFromImageTemplate", params, &r.Options, &resp) + return +} + +// Retrieve A collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) +func (r Product_Package) GetLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The lowest server [[SoftLayer_Product_Item_Price]] related to this package. +func (r Product_Package) GetLowestServerPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getLowestServerPrice", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum available network speed associated with the package. +func (r Product_Package) GetMaximumPortSpeed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getMaximumPortSpeed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetMessageQueueItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getMessageQueueItems", nil, &r.Options, &resp) + return +} + +// Retrieve The minimum available network speed associated with the package. +func (r Product_Package) GetMinimumPortSpeed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getMinimumPortSpeed", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates that this is a MongoDB engineered package. (Deprecated) +func (r Product_Package) GetMongoDbEngineeredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getMongoDbEngineeredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package is not in compliance with EU support. +func (r Product_Package) GetNonEuCompliantFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getNonEuCompliantFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetObject() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getObject", nil, &r.Options, &resp) + return +} + +// This method will return a collection of [[SoftLayer_Container_Product_Order_Network_Storage_Hub_Datacenter]] objects which contain a datacenter location and all the associated active usage rate prices where object storage is available. This method is really only applicable to the object storage additional service package which has a [[SoftLayer_Product_Package_Type]] of '''ADDITIONAL_SERVICES_OBJECT_STORAGE'''. This information is useful so that you can see the "pay as you go" rates per datacenter. +func (r Product_Package) GetObjectStorageDatacenters() (resp []datatypes.Container_Product_Order_Network_Storage_Hub_Datacenter, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getObjectStorageDatacenters", nil, &r.Options, &resp) + return +} + +// This method will return a collection of [[SoftLayer_Container_Product_Order_Network_Storage_ObjectStorage_LocationGroup]] objects which contain a location group and all the associated active usage rate prices where object storage is available. This method is really only applicable to the object storage additional service package which has a [[SoftLayer_Product_Package_Type]] of '''ADDITIONAL_SERVICES_OBJECT_STORAGE'''. This information is useful so that you can see the "pay as you go" rates per location group. +func (r Product_Package) GetObjectStorageLocationGroups() (resp []datatypes.Container_Product_Order_Network_Storage_ObjectStorage_LocationGroup, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getObjectStorageLocationGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The premium price modifiers associated with the [[SoftLayer_Product_Item_Price]] and [[SoftLayer_Location]] objects in a package. +func (r Product_Package) GetOrderPremiums() (resp []datatypes.Product_Item_Price_Premium, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getOrderPremiums", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates if the package may be available in PoP locations in addition to Datacenters. +func (r Product_Package) GetPopLocationAvailabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPopLocationAvailabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates the package is pre-configured. (Deprecated) +func (r Product_Package) GetPreconfiguredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPreconfiguredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package requires the user to define a preset configuration. +func (r Product_Package) GetPresetConfigurationRequiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPresetConfigurationRequiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package prevents the user from specifying a Vlan. +func (r Product_Package) GetPreventVlanSelectionFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPreventVlanSelectionFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates the package is for a private hosted cloud deployment. (Deprecated) +func (r Product_Package) GetPrivateHostedCloudPackageFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPrivateHostedCloudPackageFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The server role of the private hosted cloud deployment. (Deprecated) +func (r Product_Package) GetPrivateHostedCloudPackageType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPrivateHostedCloudPackageType", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package only has access to the private network. +func (r Product_Package) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package is a specialized mass storage QuantaStor package. (Deprecated) +func (r Product_Package) GetQuantaStorPackageFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getQuantaStorPackageFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates the package does not allow different disks with RAID. +func (r Product_Package) GetRaidDiskRestrictionFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getRaidDiskRestrictionFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This flag determines if the package contains a redundant power supply product. +func (r Product_Package) GetRedundantPowerFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getRedundantPowerFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The regional locations that a package is available in. +func (r Product_Package) GetRegions() (resp []datatypes.Location_Region, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getRegions", nil, &r.Options, &resp) + return +} + +// Retrieve The resource group template that describes a multi-server solution. (Deprecated) +func (r Product_Package) GetResourceGroupTemplate() (resp datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getResourceGroupTemplate", nil, &r.Options, &resp) + return +} + +// This call is similar to [[SoftLayer_Product_Package/getCategories|getCategories]], except that it does not include account-restricted pricing. Not all accounts have restricted pricing. +func (r Product_Package) GetStandardCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getStandardCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The top level category code for this service offering. +func (r Product_Package) GetTopLevelItemCategoryCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getTopLevelItemCategoryCode", nil, &r.Options, &resp) + return +} + +// Retrieve The type of service offering. This property can be used to help filter packages. +func (r Product_Package) GetType() (resp datatypes.Product_Package_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getType", nil, &r.Options, &resp) + return +} + +// Package presets are used to simplify ordering by eliminating the need for price ids when submitting orders. +// +// Orders submitted with a preset id defined will use the prices included in the package preset. Prices submitted on an order with a preset id will replace the prices included in the package preset for that prices category. If the package preset has a fixed configuration flag (fixedConfigurationFlag) set then the prices included in the preset configuration cannot be replaced by prices submitted on the order. The only exception to the fixed configuration flag would be if a price submitted on the order is an account-restricted price for the same product item. +type Product_Package_Preset struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackagePresetService returns an instance of the Product_Package_Preset SoftLayer service +func GetProductPackagePresetService(sess *session.Session) Product_Package_Preset { + return Product_Package_Preset{Session: sess} +} + +func (r Product_Package_Preset) Id(id int) Product_Package_Preset { + r.Options.Id = &id + return r +} + +func (r Product_Package_Preset) Mask(mask string) Product_Package_Preset { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package_Preset) Filter(filter string) Product_Package_Preset { + r.Options.Filter = filter + return r +} + +func (r Product_Package_Preset) Limit(limit int) Product_Package_Preset { + r.Options.Limit = &limit + return r +} + +func (r Product_Package_Preset) Offset(offset int) Product_Package_Preset { + r.Options.Offset = &offset + return r +} + +// This method returns all the active package presets. +func (r Product_Package_Preset) GetAllObjects() (resp []datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Preset) GetAvailableStorageUnits() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getAvailableStorageUnits", nil, &r.Options, &resp) + return +} + +// Retrieve The item categories that are included in this package preset configuration. +func (r Product_Package_Preset) GetCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The compute family this configuration belongs to. +func (r Product_Package_Preset) GetComputeGroup() (resp datatypes.Product_Item_Server_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getComputeGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The preset configuration (category and price). +func (r Product_Package_Preset) GetConfiguration() (resp []datatypes.Product_Package_Preset_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve When true this preset is only allowed to upgrade/downgrade to other presets in the same compute family. +func (r Product_Package_Preset) GetDisallowedComputeGroupUpgradeFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getDisallowedComputeGroupUpgradeFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A package preset with this flag set will not allow the price's defined in the preset configuration to be overriden during order placement. +func (r Product_Package_Preset) GetFixedConfigurationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getFixedConfigurationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The locations this preset configuration is available in. If empty the preset is available in all locations the package is available in. +func (r Product_Package_Preset) GetLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The lowest server prices related to this package preset. +func (r Product_Package_Preset) GetLowestPresetServerPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getLowestPresetServerPrice", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package_Preset) GetObject() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The package this preset belongs to. +func (r Product_Package_Preset) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve The item categories associated with a package preset, including information detailing which item categories are required as part of a SoftLayer product order. +func (r Product_Package_Preset) GetPackageConfiguration() (resp []datatypes.Product_Package_Order_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getPackageConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve The item prices that are included in this package preset configuration. +func (r Product_Package_Preset) GetPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getPrices", nil, &r.Options, &resp) + return +} + +// Retrieve Describes how all disks in this preset will be configured. +func (r Product_Package_Preset) GetStorageGroupTemplateArrays() (resp []datatypes.Configuration_Storage_Group_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getStorageGroupTemplateArrays", nil, &r.Options, &resp) + return +} + +// Retrieve The starting hourly price for this configuration. Additional options not defined in the preset may increase the cost. +func (r Product_Package_Preset) GetTotalMinimumHourlyFee() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getTotalMinimumHourlyFee", nil, &r.Options, &resp) + return +} + +// Retrieve The starting monthly price for this configuration. Additional options not defined in the preset may increase the cost. +func (r Product_Package_Preset) GetTotalMinimumRecurringFee() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getTotalMinimumRecurringFee", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Package_Server data type contains summarized information for bare metal servers regarding pricing, processor stats, and feature sets. +type Product_Package_Server struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackageServerService returns an instance of the Product_Package_Server SoftLayer service +func GetProductPackageServerService(sess *session.Session) Product_Package_Server { + return Product_Package_Server{Session: sess} +} + +func (r Product_Package_Server) Id(id int) Product_Package_Server { + r.Options.Id = &id + return r +} + +func (r Product_Package_Server) Mask(mask string) Product_Package_Server { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package_Server) Filter(filter string) Product_Package_Server { + r.Options.Filter = filter + return r +} + +func (r Product_Package_Server) Limit(limit int) Product_Package_Server { + r.Options.Limit = &limit + return r +} + +func (r Product_Package_Server) Offset(offset int) Product_Package_Server { + r.Options.Offset = &offset + return r +} + +// This method will grab all the package servers. +func (r Product_Package_Server) GetAllObjects() (resp []datatypes.Product_Package_Server, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetCatalog() (resp datatypes.Product_Catalog, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getCatalog", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetItemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getItemPrice", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package_Server) GetObject() (resp datatypes.Product_Package_Server, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getPreset", nil, &r.Options, &resp) + return +} + +// The [[SoftLayer_Product_Package_Server_Option]] data type contains various data points associated with package servers that can be used in selection criteria. +type Product_Package_Server_Option struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackageServerOptionService returns an instance of the Product_Package_Server_Option SoftLayer service +func GetProductPackageServerOptionService(sess *session.Session) Product_Package_Server_Option { + return Product_Package_Server_Option{Session: sess} +} + +func (r Product_Package_Server_Option) Id(id int) Product_Package_Server_Option { + r.Options.Id = &id + return r +} + +func (r Product_Package_Server_Option) Mask(mask string) Product_Package_Server_Option { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package_Server_Option) Filter(filter string) Product_Package_Server_Option { + r.Options.Filter = filter + return r +} + +func (r Product_Package_Server_Option) Limit(limit int) Product_Package_Server_Option { + r.Options.Limit = &limit + return r +} + +func (r Product_Package_Server_Option) Offset(offset int) Product_Package_Server_Option { + r.Options.Offset = &offset + return r +} + +// This method will grab all the package server options. +func (r Product_Package_Server_Option) GetAllOptions() (resp []datatypes.Product_Package_Server_Option, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server_Option", "getAllOptions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package_Server_Option) GetObject() (resp datatypes.Product_Package_Server_Option, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server_Option", "getObject", nil, &r.Options, &resp) + return +} + +// This method will grab all the package server options for the specified type. +func (r Product_Package_Server_Option) GetOptions(typ *string) (resp []datatypes.Product_Package_Server_Option, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Product_Package_Server_Option", "getOptions", params, &r.Options, &resp) + return +} + +// The [[SoftLayer_Product_Package_Type]] object indicates the type for a service offering (package). The type can be used to filter packages. For example, if you are looking for the package representing virtual servers, you can filter on the type's key name of '''VIRTUAL_SERVER_INSTANCE'''. For bare metal servers by core or CPU, filter on '''BARE_METAL_CORE''' or '''BARE_METAL_CPU''', respectively. +type Product_Package_Type struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackageTypeService returns an instance of the Product_Package_Type SoftLayer service +func GetProductPackageTypeService(sess *session.Session) Product_Package_Type { + return Product_Package_Type{Session: sess} +} + +func (r Product_Package_Type) Id(id int) Product_Package_Type { + r.Options.Id = &id + return r +} + +func (r Product_Package_Type) Mask(mask string) Product_Package_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package_Type) Filter(filter string) Product_Package_Type { + r.Options.Filter = filter + return r +} + +func (r Product_Package_Type) Limit(limit int) Product_Package_Type { + r.Options.Limit = &limit + return r +} + +func (r Product_Package_Type) Offset(offset int) Product_Package_Type { + r.Options.Offset = &offset + return r +} + +// This method will return all of the available package types. +func (r Product_Package_Type) GetAllObjects() (resp []datatypes.Product_Package_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package_Type) GetObject() (resp datatypes.Product_Package_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Type", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve All the packages associated with the given package type. +func (r Product_Package_Type) GetPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Type", "getPackages", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Upgrade_Request data type contains general information relating to a hardware, virtual server, or service upgrade. It also relates a [[SoftLayer_Billing_Order]] to a [[SoftLayer_Ticket]]. +type Product_Upgrade_Request struct { + Session *session.Session + Options sl.Options +} + +// GetProductUpgradeRequestService returns an instance of the Product_Upgrade_Request SoftLayer service +func GetProductUpgradeRequestService(sess *session.Session) Product_Upgrade_Request { + return Product_Upgrade_Request{Session: sess} +} + +func (r Product_Upgrade_Request) Id(id int) Product_Upgrade_Request { + r.Options.Id = &id + return r +} + +func (r Product_Upgrade_Request) Mask(mask string) Product_Upgrade_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Upgrade_Request) Filter(filter string) Product_Upgrade_Request { + r.Options.Filter = filter + return r +} + +func (r Product_Upgrade_Request) Limit(limit int) Product_Upgrade_Request { + r.Options.Limit = &limit + return r +} + +func (r Product_Upgrade_Request) Offset(offset int) Product_Upgrade_Request { + r.Options.Offset = &offset + return r +} + +// When a change is made to an upgrade by Sales, this method will approve the changes that were made. A customer must acknowledge the change and approve it so that the upgrade request can proceed. +func (r Product_Upgrade_Request) ApproveChanges() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "approveChanges", nil, &r.Options, &resp) + return +} + +// Retrieve The account that an order belongs to +func (r Product_Upgrade_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates that the upgrade request has completed or has been cancelled. +func (r Product_Upgrade_Request) GetCompletedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getCompletedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This is the invoice associated with the upgrade request. For hourly servers or services, an invoice will not be available. +func (r Product_Upgrade_Request) GetInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getInvoice", nil, &r.Options, &resp) + return +} + +// getObject retrieves a SoftLayer_Product_Upgrade_Request object on your account whose ID corresponds to the ID of the init parameter passed to the SoftLayer_Product_Upgrade_Request service. +func (r Product_Upgrade_Request) GetObject() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An order record associated to the upgrade request +func (r Product_Upgrade_Request) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve A server object associated with the upgrade request if any. +func (r Product_Upgrade_Request) GetServer() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getServer", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of the upgrade request. +func (r Product_Upgrade_Request) GetStatus() (resp datatypes.Product_Upgrade_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The ticket that is used to coordinate the upgrade process. +func (r Product_Upgrade_Request) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getTicket", nil, &r.Options, &resp) + return +} + +// Retrieve The user that placed the order. +func (r Product_Upgrade_Request) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual server object associated with the upgrade request if any. +func (r Product_Upgrade_Request) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// In case an upgrade cannot be performed, the maintenance window needs to be updated to a future date. +func (r Product_Upgrade_Request) UpdateMaintenanceWindow(maintenanceStartTime *datatypes.Time, maintenanceWindowId *int) (resp bool, err error) { + params := []interface{}{ + maintenanceStartTime, + maintenanceWindowId, + } + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "updateMaintenanceWindow", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/provisioning.go b/vendor/github.com/softlayer/softlayer-go/services/provisioning.go new file mode 100644 index 000000000000..dc426bbb285e --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/provisioning.go @@ -0,0 +1,564 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Provisioning_Hook contains all the information needed to add a hook into a server/Virtual provision and os reload. +type Provisioning_Hook struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningHookService returns an instance of the Provisioning_Hook SoftLayer service +func GetProvisioningHookService(sess *session.Session) Provisioning_Hook { + return Provisioning_Hook{Session: sess} +} + +func (r Provisioning_Hook) Id(id int) Provisioning_Hook { + r.Options.Id = &id + return r +} + +func (r Provisioning_Hook) Mask(mask string) Provisioning_Hook { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Hook) Filter(filter string) Provisioning_Hook { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Hook) Limit(limit int) Provisioning_Hook { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Hook) Offset(offset int) Provisioning_Hook { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Provisioning_Hook) CreateObject(templateObject *datatypes.Provisioning_Hook) (resp datatypes.Provisioning_Hook, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Hook) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Hook) EditObject(templateObject *datatypes.Provisioning_Hook) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Provisioning_Hook) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Provisioning_Hook) GetHookType() (resp datatypes.Provisioning_Hook_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "getHookType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Hook) GetObject() (resp datatypes.Provisioning_Hook, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Provisioning_Hook_Type struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningHookTypeService returns an instance of the Provisioning_Hook_Type SoftLayer service +func GetProvisioningHookTypeService(sess *session.Session) Provisioning_Hook_Type { + return Provisioning_Hook_Type{Session: sess} +} + +func (r Provisioning_Hook_Type) Id(id int) Provisioning_Hook_Type { + r.Options.Id = &id + return r +} + +func (r Provisioning_Hook_Type) Mask(mask string) Provisioning_Hook_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Hook_Type) Filter(filter string) Provisioning_Hook_Type { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Hook_Type) Limit(limit int) Provisioning_Hook_Type { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Hook_Type) Offset(offset int) Provisioning_Hook_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Provisioning_Hook_Type) GetAllHookTypes() (resp []datatypes.Provisioning_Hook_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook_Type", "getAllHookTypes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Hook_Type) GetObject() (resp datatypes.Provisioning_Hook_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook_Type", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Provisioning_Maintenance_Classification represent a maintenance type for the specific hardware maintenance desired. +type Provisioning_Maintenance_Classification struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceClassificationService returns an instance of the Provisioning_Maintenance_Classification SoftLayer service +func GetProvisioningMaintenanceClassificationService(sess *session.Session) Provisioning_Maintenance_Classification { + return Provisioning_Maintenance_Classification{Session: sess} +} + +func (r Provisioning_Maintenance_Classification) Id(id int) Provisioning_Maintenance_Classification { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Classification) Mask(mask string) Provisioning_Maintenance_Classification { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Classification) Filter(filter string) Provisioning_Maintenance_Classification { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Classification) Limit(limit int) Provisioning_Maintenance_Classification { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Classification) Offset(offset int) Provisioning_Maintenance_Classification { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Provisioning_Maintenance_Classification) GetItemCategories() (resp []datatypes.Provisioning_Maintenance_Classification_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification", "getItemCategories", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Provisioning_Maintenance_Classification data types, which contain all maintenance classifications. +func (r Provisioning_Maintenance_Classification) GetMaintenanceClassification(maintenanceClassificationId *int) (resp []datatypes.Provisioning_Maintenance_Classification, err error) { + params := []interface{}{ + maintenanceClassificationId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification", "getMaintenanceClassification", params, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Provisioning_Maintenance_Classification data types, which contain all maintenance classifications. +func (r Provisioning_Maintenance_Classification) GetMaintenanceClassificationsByItemCategory() (resp []datatypes.Provisioning_Maintenance_Classification_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification", "getMaintenanceClassificationsByItemCategory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Maintenance_Classification) GetObject() (resp datatypes.Provisioning_Maintenance_Classification, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Provisioning_Maintenance_Classification_Item_Category struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceClassificationItemCategoryService returns an instance of the Provisioning_Maintenance_Classification_Item_Category SoftLayer service +func GetProvisioningMaintenanceClassificationItemCategoryService(sess *session.Session) Provisioning_Maintenance_Classification_Item_Category { + return Provisioning_Maintenance_Classification_Item_Category{Session: sess} +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Id(id int) Provisioning_Maintenance_Classification_Item_Category { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Mask(mask string) Provisioning_Maintenance_Classification_Item_Category { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Filter(filter string) Provisioning_Maintenance_Classification_Item_Category { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Limit(limit int) Provisioning_Maintenance_Classification_Item_Category { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Offset(offset int) Provisioning_Maintenance_Classification_Item_Category { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Provisioning_Maintenance_Classification_Item_Category) GetMaintenanceClassification() (resp datatypes.Provisioning_Maintenance_Classification, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification_Item_Category", "getMaintenanceClassification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Maintenance_Classification_Item_Category) GetObject() (resp datatypes.Provisioning_Maintenance_Classification_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification_Item_Category", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Provisioning_Maintenance_Slots represent the available slots for a given maintenance window at a SoftLayer data center. +type Provisioning_Maintenance_Slots struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceSlotsService returns an instance of the Provisioning_Maintenance_Slots SoftLayer service +func GetProvisioningMaintenanceSlotsService(sess *session.Session) Provisioning_Maintenance_Slots { + return Provisioning_Maintenance_Slots{Session: sess} +} + +func (r Provisioning_Maintenance_Slots) Id(id int) Provisioning_Maintenance_Slots { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Slots) Mask(mask string) Provisioning_Maintenance_Slots { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Slots) Filter(filter string) Provisioning_Maintenance_Slots { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Slots) Limit(limit int) Provisioning_Maintenance_Slots { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Slots) Offset(offset int) Provisioning_Maintenance_Slots { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Provisioning_Maintenance_Slots) GetObject() (resp datatypes.Provisioning_Maintenance_Slots, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Slots", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Provisioning_Maintenance_Ticket struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceTicketService returns an instance of the Provisioning_Maintenance_Ticket SoftLayer service +func GetProvisioningMaintenanceTicketService(sess *session.Session) Provisioning_Maintenance_Ticket { + return Provisioning_Maintenance_Ticket{Session: sess} +} + +func (r Provisioning_Maintenance_Ticket) Id(id int) Provisioning_Maintenance_Ticket { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Ticket) Mask(mask string) Provisioning_Maintenance_Ticket { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Ticket) Filter(filter string) Provisioning_Maintenance_Ticket { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Ticket) Limit(limit int) Provisioning_Maintenance_Ticket { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Ticket) Offset(offset int) Provisioning_Maintenance_Ticket { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Provisioning_Maintenance_Ticket) GetAvailableSlots() (resp datatypes.Provisioning_Maintenance_Slots, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Ticket", "getAvailableSlots", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Provisioning_Maintenance_Ticket) GetMaintenanceClass() (resp datatypes.Provisioning_Maintenance_Classification, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Ticket", "getMaintenanceClass", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Maintenance_Ticket) GetObject() (resp datatypes.Provisioning_Maintenance_Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Ticket", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Provisioning_Maintenance_Ticket) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Ticket", "getTicket", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Provisioning_Maintenance_Window represent a time window that SoftLayer performs a hardware or software maintenance and upgrades. +type Provisioning_Maintenance_Window struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceWindowService returns an instance of the Provisioning_Maintenance_Window SoftLayer service +func GetProvisioningMaintenanceWindowService(sess *session.Session) Provisioning_Maintenance_Window { + return Provisioning_Maintenance_Window{Session: sess} +} + +func (r Provisioning_Maintenance_Window) Id(id int) Provisioning_Maintenance_Window { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Window) Mask(mask string) Provisioning_Maintenance_Window { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Window) Filter(filter string) Provisioning_Maintenance_Window { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Window) Limit(limit int) Provisioning_Maintenance_Window { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Window) Offset(offset int) Provisioning_Maintenance_Window { + r.Options.Offset = &offset + return r +} + +// getMaintenceWindowForTicket() returns a boolean +func (r Provisioning_Maintenance_Window) AddCustomerUpgradeWindow(customerUpgradeWindow *datatypes.Container_Provisioning_Maintenance_Window) (resp bool, err error) { + params := []interface{}{ + customerUpgradeWindow, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "addCustomerUpgradeWindow", params, &r.Options, &resp) + return +} + +// getMaintenanceClassifications() returns an object of maintenance classifications +func (r Provisioning_Maintenance_Window) GetMaintenanceClassifications() (resp []datatypes.Provisioning_Maintenance_Classification, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceClassifications", nil, &r.Options, &resp) + return +} + +// getMaintenanceStartEndTime() returns a specific maintenance window +func (r Provisioning_Maintenance_Window) GetMaintenanceStartEndTime(ticketId *int) (resp datatypes.Provisioning_Maintenance_Window, err error) { + params := []interface{}{ + ticketId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceStartEndTime", params, &r.Options, &resp) + return +} + +// getMaintenceWindowForTicket() returns a specific maintenance window +func (r Provisioning_Maintenance_Window) GetMaintenanceWindowForTicket(maintenanceWindowId *int) (resp []datatypes.Provisioning_Maintenance_Window, err error) { + params := []interface{}{ + maintenanceWindowId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceWindowForTicket", params, &r.Options, &resp) + return +} + +// getMaintenanceWindowTicketsByTicketId() returns a list maintenance window ticket records by ticket id +func (r Provisioning_Maintenance_Window) GetMaintenanceWindowTicketsByTicketId(ticketId *int) (resp []datatypes.Provisioning_Maintenance_Ticket, err error) { + params := []interface{}{ + ticketId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceWindowTicketsByTicketId", params, &r.Options, &resp) + return +} + +// This method returns a list of available maintenance windows +func (r Provisioning_Maintenance_Window) GetMaintenanceWindows(beginDate *datatypes.Time, endDate *datatypes.Time, locationId *int, slotsNeeded *int) (resp []datatypes.Provisioning_Maintenance_Window, err error) { + params := []interface{}{ + beginDate, + endDate, + locationId, + slotsNeeded, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceWindows", params, &r.Options, &resp) + return +} + +// (DEPRECATED) Use [[SoftLayer_Provisioning_Maintenance_Window::getMaintenanceWindows|getMaintenanceWindows]] method. +func (r Provisioning_Maintenance_Window) GetMaintenceWindows(beginDate *datatypes.Time, endDate *datatypes.Time, locationId *int, slotsNeeded *int) (resp []datatypes.Provisioning_Maintenance_Window, err error) { + params := []interface{}{ + beginDate, + endDate, + locationId, + slotsNeeded, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenceWindows", params, &r.Options, &resp) + return +} + +// getMaintenceWindowForTicket() returns a boolean +func (r Provisioning_Maintenance_Window) UpdateCustomerUpgradeWindow(maintenanceStartTime *datatypes.Time, newMaintenanceWindowId *int, ticketId *int) (resp bool, err error) { + params := []interface{}{ + maintenanceStartTime, + newMaintenanceWindowId, + ticketId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "updateCustomerUpgradeWindow", params, &r.Options, &resp) + return +} + +// The SoftLayer_Provisioning_Version1_Transaction_Group data type contains general information relating to a single SoftLayer hardware transaction group. +// +// SoftLayer customers are unable to change their hardware transactions or the hardware transaction group. +type Provisioning_Version1_Transaction_Group struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningVersion1TransactionGroupService returns an instance of the Provisioning_Version1_Transaction_Group SoftLayer service +func GetProvisioningVersion1TransactionGroupService(sess *session.Session) Provisioning_Version1_Transaction_Group { + return Provisioning_Version1_Transaction_Group{Session: sess} +} + +func (r Provisioning_Version1_Transaction_Group) Id(id int) Provisioning_Version1_Transaction_Group { + r.Options.Id = &id + return r +} + +func (r Provisioning_Version1_Transaction_Group) Mask(mask string) Provisioning_Version1_Transaction_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Version1_Transaction_Group) Filter(filter string) Provisioning_Version1_Transaction_Group { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Version1_Transaction_Group) Limit(limit int) Provisioning_Version1_Transaction_Group { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Version1_Transaction_Group) Offset(offset int) Provisioning_Version1_Transaction_Group { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Provisioning_Version1_Transaction_Group) GetAllObjects() (resp []datatypes.Provisioning_Version1_Transaction_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Version1_Transaction_Group", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Provisioning_Version1_Transaction_Group object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Provisioning_Version1_Transaction_Group service. +func (r Provisioning_Version1_Transaction_Group) GetObject() (resp datatypes.Provisioning_Version1_Transaction_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Version1_Transaction_Group", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/resource.go b/vendor/github.com/softlayer/softlayer-go/services/resource.go new file mode 100644 index 000000000000..47a77317cd42 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/resource.go @@ -0,0 +1,419 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Resource_Configuration struct { + Session *session.Session + Options sl.Options +} + +// GetResourceConfigurationService returns an instance of the Resource_Configuration SoftLayer service +func GetResourceConfigurationService(sess *session.Session) Resource_Configuration { + return Resource_Configuration{Session: sess} +} + +func (r Resource_Configuration) Id(id int) Resource_Configuration { + r.Options.Id = &id + return r +} + +func (r Resource_Configuration) Mask(mask string) Resource_Configuration { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Resource_Configuration) Filter(filter string) Resource_Configuration { + r.Options.Filter = filter + return r +} + +func (r Resource_Configuration) Limit(limit int) Resource_Configuration { + r.Options.Limit = &limit + return r +} + +func (r Resource_Configuration) Offset(offset int) Resource_Configuration { + r.Options.Offset = &offset + return r +} + +// The setOsPasswordFromEncrypted method is used to set the operating system password from a key/pair encrypted password signed by SoftLayer. +func (r Resource_Configuration) SetOsPasswordFromEncrypted(encryptedPassword *string) (resp bool, err error) { + params := []interface{}{ + encryptedPassword, + } + err = r.Session.DoRequest("SoftLayer_Resource_Configuration", "setOsPasswordFromEncrypted", params, &r.Options, &resp) + return +} + +// no documentation yet +type Resource_Group struct { + Session *session.Session + Options sl.Options +} + +// GetResourceGroupService returns an instance of the Resource_Group SoftLayer service +func GetResourceGroupService(sess *session.Session) Resource_Group { + return Resource_Group{Session: sess} +} + +func (r Resource_Group) Id(id int) Resource_Group { + r.Options.Id = &id + return r +} + +func (r Resource_Group) Mask(mask string) Resource_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Resource_Group) Filter(filter string) Resource_Group { + r.Options.Filter = filter + return r +} + +func (r Resource_Group) Limit(limit int) Resource_Group { + r.Options.Limit = &limit + return r +} + +func (r Resource_Group) Offset(offset int) Resource_Group { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Resource_Group) EditObject(templateObject *datatypes.Resource_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Resource_Group", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated group ancestors. +func (r Resource_Group) GetAncestorGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getAncestorGroups", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated attributes. +func (r Resource_Group) GetAttributes() (resp []datatypes.Resource_Group_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated hardware members. +func (r Resource_Group) GetHardwareMembers() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getHardwareMembers", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated members. +func (r Resource_Group) GetMembers() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Resource_Group) GetObject() (resp datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated root resource group. +func (r Resource_Group) GetRootResourceGroup() (resp datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getRootResourceGroup", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated subnet members. +func (r Resource_Group) GetSubnetMembers() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getSubnetMembers", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated template. +func (r Resource_Group) GetTemplate() (resp datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getTemplate", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated VLAN members. +func (r Resource_Group) GetVlanMembers() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getVlanMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Resource_Group_Template struct { + Session *session.Session + Options sl.Options +} + +// GetResourceGroupTemplateService returns an instance of the Resource_Group_Template SoftLayer service +func GetResourceGroupTemplateService(sess *session.Session) Resource_Group_Template { + return Resource_Group_Template{Session: sess} +} + +func (r Resource_Group_Template) Id(id int) Resource_Group_Template { + r.Options.Id = &id + return r +} + +func (r Resource_Group_Template) Mask(mask string) Resource_Group_Template { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Resource_Group_Template) Filter(filter string) Resource_Group_Template { + r.Options.Filter = filter + return r +} + +func (r Resource_Group_Template) Limit(limit int) Resource_Group_Template { + r.Options.Limit = &limit + return r +} + +func (r Resource_Group_Template) Offset(offset int) Resource_Group_Template { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Resource_Group_Template) GetAllObjects() (resp []datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Resource_Group_Template) GetChildren() (resp []datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Resource_Group_Template) GetMembers() (resp []datatypes.Resource_Group_Template_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Resource_Group_Template) GetObject() (resp datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Resource_Group_Template) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getPackage", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Resource_Metadata struct { + Session *session.Session + Options sl.Options +} + +// GetResourceMetadataService returns an instance of the Resource_Metadata SoftLayer service +func GetResourceMetadataService(sess *session.Session) Resource_Metadata { + return Resource_Metadata{Session: sess} +} + +func (r Resource_Metadata) Id(id int) Resource_Metadata { + r.Options.Id = &id + return r +} + +func (r Resource_Metadata) Mask(mask string) Resource_Metadata { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Resource_Metadata) Filter(filter string) Resource_Metadata { + r.Options.Filter = filter + return r +} + +func (r Resource_Metadata) Limit(limit int) Resource_Metadata { + r.Options.Limit = &limit + return r +} + +func (r Resource_Metadata) Offset(offset int) Resource_Metadata { + r.Options.Offset = &offset + return r +} + +// The getBackendMacAddresses method retrieves a list of backend MAC addresses for the resource +func (r Resource_Metadata) GetBackendMacAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getBackendMacAddresses", nil, &r.Options, &resp) + return +} + +// The getDatacenter method retrieves the name of the datacenter in which the resource is located. +func (r Resource_Metadata) GetDatacenter() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getDatacenter", nil, &r.Options, &resp) + return +} + +// The getDatacenterId retrieves the ID for the datacenter in which the resource is located. +func (r Resource_Metadata) GetDatacenterId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getDatacenterId", nil, &r.Options, &resp) + return +} + +// The getDomain method retrieves the hostname for the resource. +func (r Resource_Metadata) GetDomain() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getDomain", nil, &r.Options, &resp) + return +} + +// The getFrontendMacAddresses method retrieves a list of frontend MAC addresses for the resource +func (r Resource_Metadata) GetFrontendMacAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getFrontendMacAddresses", nil, &r.Options, &resp) + return +} + +// The getFullyQualifiedDomainName method provides the user with a combined return which includes the hostname and domain for the resource. Because this method returns multiple pieces of information, it avoids the need to use multiple methods to return the desired information. +func (r Resource_Metadata) GetFullyQualifiedDomainName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getFullyQualifiedDomainName", nil, &r.Options, &resp) + return +} + +// The getId getGlobalIdentifier retrieves the globalIdentifier for the resource +func (r Resource_Metadata) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// The getHostname method retrieves the hostname for the resource. +func (r Resource_Metadata) GetHostname() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getHostname", nil, &r.Options, &resp) + return +} + +// The getId method retrieves the ID for the resource +func (r Resource_Metadata) GetId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getId", nil, &r.Options, &resp) + return +} + +// The getPrimaryBackendIpAddress method retrieves the primary backend IP address for the resource +func (r Resource_Metadata) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// The getPrimaryIpAddress method retrieves the primary IP address for the resource. For resources with a frontend network, the frontend IP address will be returned. For resources that have been provisioned with only a backend network, the backend IP address will be returned, as a frontend address will not exist. +func (r Resource_Metadata) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// The getProvisionState method retrieves the provision state of the resource. The provision state may be used to determine when it is considered safe to perform additional setup operations. The method returns 'PROCESSING' to indicate the provision is in progress and 'COMPLETE' when the provision is complete. +func (r Resource_Metadata) GetProvisionState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getProvisionState", nil, &r.Options, &resp) + return +} + +// The getRouter method will return the router associated with a network component. When the router is redundant, the hostname of the redundant group will be returned, rather than the router hostname. +func (r Resource_Metadata) GetRouter(macAddress *string) (resp string, err error) { + params := []interface{}{ + macAddress, + } + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getRouter", params, &r.Options, &resp) + return +} + +// The getServiceResource method retrieves a specific service resource associated with the resource. Service resources are additional resources that may be used by this resource. +func (r Resource_Metadata) GetServiceResource(serviceName *string, index *int) (resp string, err error) { + params := []interface{}{ + serviceName, + index, + } + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getServiceResource", params, &r.Options, &resp) + return +} + +// The getServiceResources method retrieves all service resources associated with the resource. Service resources are additional resources that may be used by this resource. The output format is =
    for each service resource. +func (r Resource_Metadata) GetServiceResources() (resp []datatypes.Container_Resource_Metadata_ServiceResource, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getServiceResources", nil, &r.Options, &resp) + return +} + +// The getTags method retrieves all tags associated with the resource. Tags are single keywords assigned to a resource that assist the user in identifying the resource and its roles when performing a simple search. Tags are assigned by any user with access to the resource. +func (r Resource_Metadata) GetTags() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getTags", nil, &r.Options, &resp) + return +} + +// The getUserMetadata method retrieves metadata completed by users who interact with the resource. Metadata gathered using this method is unique to parameters set using the '''setUserMetadata''' method, which must be executed prior to completing this method. User metadata may also be provided while placing an order for a resource. +func (r Resource_Metadata) GetUserMetadata() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getUserMetadata", nil, &r.Options, &resp) + return +} + +// The getVlanIds method returns a list of VLAN IDs for the network component matching the provided MAC address associated with the resource. For each return, the native VLAN will appear first, followed by any trunked VLANs associated with the network component. +func (r Resource_Metadata) GetVlanIds(macAddress *string) (resp []int, err error) { + params := []interface{}{ + macAddress, + } + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getVlanIds", params, &r.Options, &resp) + return +} + +// The getVlans method returns a list of VLAN numbers for the network component matching the provided MAC address associated with the resource. For each return, the native VLAN will appear first, followed by any trunked VLANs associated with the network component. +func (r Resource_Metadata) GetVlans(macAddress *string) (resp []int, err error) { + params := []interface{}{ + macAddress, + } + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getVlans", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/sales.go b/vendor/github.com/softlayer/softlayer-go/services/sales.go new file mode 100644 index 000000000000..4a0e777baad9 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/sales.go @@ -0,0 +1,112 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The presale event data types indicate the information regarding an individual presale event. The '''locationId''' will indicate the datacenter associated with the presale event. The '''itemId''' will indicate the product item associated with a particular presale event - however these are more rare. The '''startDate''' and '''endDate''' will provide information regarding when the presale event is available for use. At the end of the presale event, the server or services purchased will be available once approved and provisioned. +type Sales_Presale_Event struct { + Session *session.Session + Options sl.Options +} + +// GetSalesPresaleEventService returns an instance of the Sales_Presale_Event SoftLayer service +func GetSalesPresaleEventService(sess *session.Session) Sales_Presale_Event { + return Sales_Presale_Event{Session: sess} +} + +func (r Sales_Presale_Event) Id(id int) Sales_Presale_Event { + r.Options.Id = &id + return r +} + +func (r Sales_Presale_Event) Mask(mask string) Sales_Presale_Event { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Sales_Presale_Event) Filter(filter string) Sales_Presale_Event { + r.Options.Filter = filter + return r +} + +func (r Sales_Presale_Event) Limit(limit int) Sales_Presale_Event { + r.Options.Limit = &limit + return r +} + +func (r Sales_Presale_Event) Offset(offset int) Sales_Presale_Event { + r.Options.Offset = &offset + return r +} + +// Retrieve A flag to indicate that the presale event is currently active. A presale event is active if the current time is between the start and end dates. +func (r Sales_Presale_Event) GetActiveFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getActiveFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Sales_Presale_Event) GetAllObjects() (resp []datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve A flag to indicate that the presale event is expired. A presale event is expired if the current time is after the end date. +func (r Sales_Presale_Event) GetExpiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getExpiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Product_Item]] associated with the presale event. +func (r Sales_Presale_Event) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Location]] associated with the presale event. +func (r Sales_Presale_Event) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getLocation", nil, &r.Options, &resp) + return +} + +// '''getObject''' retrieves the [[SoftLayer_Sales_Presale_Event]] object whose id number corresponds to the id number of the init parameter passed to the SoftLayer_Sales_Presale_Event service. Customers may only retrieve presale events that are currently active. +func (r Sales_Presale_Event) GetObject() (resp datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The orders ([[SoftLayer_Billing_Order]]) associated with this presale event that were created for the customer's account. +func (r Sales_Presale_Event) GetOrders() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getOrders", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/scale.go b/vendor/github.com/softlayer/softlayer-go/services/scale.go new file mode 100644 index 000000000000..5962ef27dd58 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/scale.go @@ -0,0 +1,1668 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Scale_Asset struct { + Session *session.Session + Options sl.Options +} + +// GetScaleAssetService returns an instance of the Scale_Asset SoftLayer service +func GetScaleAssetService(sess *session.Session) Scale_Asset { + return Scale_Asset{Session: sess} +} + +func (r Scale_Asset) Id(id int) Scale_Asset { + r.Options.Id = &id + return r +} + +func (r Scale_Asset) Mask(mask string) Scale_Asset { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Asset) Filter(filter string) Scale_Asset { + r.Options.Filter = filter + return r +} + +func (r Scale_Asset) Limit(limit int) Scale_Asset { + r.Options.Limit = &limit + return r +} + +func (r Scale_Asset) Offset(offset int) Scale_Asset { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Asset) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset) GetObject() (resp datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this asset belongs to. +func (r Scale_Asset) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Asset_Hardware struct { + Session *session.Session + Options sl.Options +} + +// GetScaleAssetHardwareService returns an instance of the Scale_Asset_Hardware SoftLayer service +func GetScaleAssetHardwareService(sess *session.Session) Scale_Asset_Hardware { + return Scale_Asset_Hardware{Session: sess} +} + +func (r Scale_Asset_Hardware) Id(id int) Scale_Asset_Hardware { + r.Options.Id = &id + return r +} + +func (r Scale_Asset_Hardware) Mask(mask string) Scale_Asset_Hardware { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Asset_Hardware) Filter(filter string) Scale_Asset_Hardware { + r.Options.Filter = filter + return r +} + +func (r Scale_Asset_Hardware) Limit(limit int) Scale_Asset_Hardware { + r.Options.Limit = &limit + return r +} + +func (r Scale_Asset_Hardware) Offset(offset int) Scale_Asset_Hardware { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Asset_Hardware) CreateObject(templateObject *datatypes.Scale_Asset_Hardware) (resp datatypes.Scale_Asset_Hardware, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset_Hardware) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware for this asset. +func (r Scale_Asset_Hardware) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The identifier of the hardware for this asset. +func (r Scale_Asset_Hardware) GetHardwareId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "getHardwareId", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset_Hardware) GetObject() (resp datatypes.Scale_Asset_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this asset belongs to. +func (r Scale_Asset_Hardware) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Asset_Virtual_Guest struct { + Session *session.Session + Options sl.Options +} + +// GetScaleAssetVirtualGuestService returns an instance of the Scale_Asset_Virtual_Guest SoftLayer service +func GetScaleAssetVirtualGuestService(sess *session.Session) Scale_Asset_Virtual_Guest { + return Scale_Asset_Virtual_Guest{Session: sess} +} + +func (r Scale_Asset_Virtual_Guest) Id(id int) Scale_Asset_Virtual_Guest { + r.Options.Id = &id + return r +} + +func (r Scale_Asset_Virtual_Guest) Mask(mask string) Scale_Asset_Virtual_Guest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Asset_Virtual_Guest) Filter(filter string) Scale_Asset_Virtual_Guest { + r.Options.Filter = filter + return r +} + +func (r Scale_Asset_Virtual_Guest) Limit(limit int) Scale_Asset_Virtual_Guest { + r.Options.Limit = &limit + return r +} + +func (r Scale_Asset_Virtual_Guest) Offset(offset int) Scale_Asset_Virtual_Guest { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Asset_Virtual_Guest) CreateObject(templateObject *datatypes.Scale_Asset_Virtual_Guest) (resp datatypes.Scale_Asset_Virtual_Guest, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset_Virtual_Guest) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset_Virtual_Guest) GetObject() (resp datatypes.Scale_Asset_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this asset belongs to. +func (r Scale_Asset_Virtual_Guest) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The guest for this asset. +func (r Scale_Asset_Virtual_Guest) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Retrieve The identifier of the guest for this asset. +func (r Scale_Asset_Virtual_Guest) GetVirtualGuestId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "getVirtualGuestId", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Group struct { + Session *session.Session + Options sl.Options +} + +// GetScaleGroupService returns an instance of the Scale_Group SoftLayer service +func GetScaleGroupService(sess *session.Session) Scale_Group { + return Scale_Group{Session: sess} +} + +func (r Scale_Group) Id(id int) Scale_Group { + r.Options.Id = &id + return r +} + +func (r Scale_Group) Mask(mask string) Scale_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Group) Filter(filter string) Scale_Group { + r.Options.Filter = filter + return r +} + +func (r Scale_Group) Limit(limit int) Scale_Group { + r.Options.Limit = &limit + return r +} + +func (r Scale_Group) Offset(offset int) Scale_Group { + r.Options.Offset = &offset + return r +} + +// Create a scale group. If minimumMemberCount is greater than zero or desiredMemberCount is present, guest members will be created right away. +func (r Scale_Group) CreateObject(templateObject *datatypes.Scale_Group) (resp datatypes.Scale_Group, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Group", "createObject", params, &r.Options, &resp) + return +} + +// Delete this group. This can only be done on an empty, active group. This means that minimumMemberCount must be 0 since it is the only way for a group to have no group members. To delete a group and all of its members at the same time, use forceDeleteObject. +func (r Scale_Group) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit this group. The name can be edited at any time. The minimumMemberCount and maximumMemberCount fields can also be edited at any time provided they don't force a scale up or scale down to bring the group into the proper range. Otherwise, the group's status must be active to set those fields. If the group member count is less than the new minimumMemberCount and the group is active, it will scale up the group members to reach the new minimum. Similarly if the group member count is greater than the new maximumMemberCount and the group is active, it will scale down the group members to reach the new maximum. +// +// When editing an active group, a special field can be provided: desiredMemberCount. When given, the group members are automatically scaled up or down to reach that number. +func (r Scale_Group) EditObject(templateObject *datatypes.Scale_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Group", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) ForceDeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "forceDeleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The account for this scaling group. +func (r Scale_Group) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getAccount", nil, &r.Options, &resp) + return +} + +// This returns the number of hourly instances an account can add from this point. It is essentially the same as [[SoftLayer_Account/hourlyInstanceLimit|hourlyInstanceLimit]] minus existing hourly instances and ones spoken for as part of a scaling group (as determined by the group's maximum). This number can be used to help determine a maximum member count for a new group to ensure it won't go over the account limit. This can return a negative value if the current hourly instance count combined with the unused-but-possible count (based on other scale group maximums) is over the limit. +func (r Scale_Group) GetAvailableHourlyInstanceLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getAvailableHourlyInstanceLimit", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) GetAvailableRegionalGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getAvailableRegionalGroups", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of load balancers for this auto scale group. +func (r Scale_Group) GetLoadBalancers() (resp []datatypes.Scale_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of log entries for this group. +func (r Scale_Group) GetLogs() (resp []datatypes.Scale_Group_Log, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getLogs", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of VLANs for this auto scale group. VLANs are optional. This can contain a public or private VLAN or both. When a single VLAN for a public/private type is given it can be a non-purchased VLAN only if the minimumMemberCount on the group is >= 1. This can also contain any number of public/private purchased VLANs and members are staggered across them when scaled up. +func (r Scale_Group) GetNetworkVlans() (resp []datatypes.Scale_Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) GetObject() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of policies for this group. This can be empty. +func (r Scale_Group) GetPolicies() (resp []datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getPolicies", nil, &r.Options, &resp) + return +} + +// Retrieve The regional group for this scale group. +func (r Scale_Group) GetRegionalGroup() (resp datatypes.Location_Group_Regional, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getRegionalGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The status for this scale group. +func (r Scale_Group) GetStatus() (resp datatypes.Scale_Group_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The termination policy for this scaling group. +func (r Scale_Group) GetTerminationPolicy() (resp datatypes.Scale_Termination_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getTerminationPolicy", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of guests that have been pinned to this group. Guest assets are only used for certain trigger checks such as resource watches. They do not count towards the auto scaling guest counts of this group in anyway and are never automatically added or removed. +func (r Scale_Group) GetVirtualGuestAssets() (resp []datatypes.Scale_Asset_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getVirtualGuestAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of guests that have been scaled with the group. When this group is active, the count of guests here is guaranteed to be between minimumMemberCount and maximumMemberCount inclusively. +func (r Scale_Group) GetVirtualGuestMembers() (resp []datatypes.Scale_Member_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getVirtualGuestMembers", nil, &r.Options, &resp) + return +} + +// Resume this group. The group must be in a suspended status to do this. By doing this, the group's status will become active. +func (r Scale_Group) Resume() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Scale_Group", "resume", nil, &r.Options, &resp) + return +} + +// Scale this group up or down by the amount given. If the number is negative, the given amount of guest members are removed. Similarly, if the number is positive, the given amount of guest members are added. Note, this call will add or remove as much as asked for, but will NOT go beyond the limits set by minimumMemberCount and maximumMemberCount. The result is a collection of SoftLayer_Scale_Member instances that were either removed or added. This call can only be invoked on an active group and does not respect cooldown (i.e. even if in a cooldown period, the scaling will still occur). +func (r Scale_Group) Scale(delta *int) (resp []datatypes.Scale_Member, err error) { + params := []interface{}{ + delta, + } + err = r.Session.DoRequest("SoftLayer_Scale_Group", "scale", params, &r.Options, &resp) + return +} + +// Scale this group up or down to the number given. This call will add or remove as many guests as necessary, but will NOT go beyond the limits set by minimumMemberCount and maximumMemberCount. This call and its result are the equivalent of calling scale(number - virtualGuestMemberCount). This call can only be invoked on an active group and does not respect cooldown (i.e. even if in a cooldown period, the scaling will still occur). +func (r Scale_Group) ScaleTo(number *int) (resp []datatypes.Scale_Member, err error) { + params := []interface{}{ + number, + } + err = r.Session.DoRequest("SoftLayer_Scale_Group", "scaleTo", params, &r.Options, &resp) + return +} + +// Suspend this group. The group must be in an active status to do this. While suspended, a group cannot add or remove guest members for any reason. Changes to group settings that will cause a member to be added or deleted is also not allowed. +func (r Scale_Group) Suspend() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Scale_Group", "suspend", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Group_Status struct { + Session *session.Session + Options sl.Options +} + +// GetScaleGroupStatusService returns an instance of the Scale_Group_Status SoftLayer service +func GetScaleGroupStatusService(sess *session.Session) Scale_Group_Status { + return Scale_Group_Status{Session: sess} +} + +func (r Scale_Group_Status) Id(id int) Scale_Group_Status { + r.Options.Id = &id + return r +} + +func (r Scale_Group_Status) Mask(mask string) Scale_Group_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Group_Status) Filter(filter string) Scale_Group_Status { + r.Options.Filter = filter + return r +} + +func (r Scale_Group_Status) Limit(limit int) Scale_Group_Status { + r.Options.Limit = &limit + return r +} + +func (r Scale_Group_Status) Offset(offset int) Scale_Group_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Group_Status) GetAllObjects() (resp []datatypes.Scale_Group_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group_Status", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group_Status) GetObject() (resp datatypes.Scale_Group_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group_Status", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_LoadBalancer struct { + Session *session.Session + Options sl.Options +} + +// GetScaleLoadBalancerService returns an instance of the Scale_LoadBalancer SoftLayer service +func GetScaleLoadBalancerService(sess *session.Session) Scale_LoadBalancer { + return Scale_LoadBalancer{Session: sess} +} + +func (r Scale_LoadBalancer) Id(id int) Scale_LoadBalancer { + r.Options.Id = &id + return r +} + +func (r Scale_LoadBalancer) Mask(mask string) Scale_LoadBalancer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_LoadBalancer) Filter(filter string) Scale_LoadBalancer { + r.Options.Filter = filter + return r +} + +func (r Scale_LoadBalancer) Limit(limit int) Scale_LoadBalancer { + r.Options.Limit = &limit + return r +} + +func (r Scale_LoadBalancer) Offset(offset int) Scale_LoadBalancer { + r.Options.Offset = &offset + return r +} + +// Create a load balancer for a scale group. Once created, the configuration will be used to configure the load balancers for autoscaled members. +// +// If the given virtual server port exists for the given virtual IP address, it is reused here if all the other values match. Otherwise, the virtual server port will be created. +func (r Scale_LoadBalancer) CreateObject(templateObject *datatypes.Scale_LoadBalancer) (resp datatypes.Scale_LoadBalancer, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "createObject", params, &r.Options, &resp) + return +} + +// Delete this load balancer configuration. Note, this does not affect existing scaled members. Once deleted however, future scaled members will not be load balanced with this configuration. +func (r Scale_LoadBalancer) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit this load balancer configuration. Note, this does not affect existing scaled members. Once edited however, future scaled members will be load balanced with this configuration. +func (r Scale_LoadBalancer) EditObject(templateObject *datatypes.Scale_LoadBalancer) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The percentage of connections allocated to this virtual server. +func (r Scale_LoadBalancer) GetAllocationPercent() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getAllocationPercent", nil, &r.Options, &resp) + return +} + +// Retrieve The health check for this configuration. +func (r Scale_LoadBalancer) GetHealthCheck() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getHealthCheck", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_LoadBalancer) GetObject() (resp datatypes.Scale_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The routing method. +func (r Scale_LoadBalancer) GetRoutingMethod() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getRoutingMethod", nil, &r.Options, &resp) + return +} + +// Retrieve The routing type. +func (r Scale_LoadBalancer) GetRoutingType() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getRoutingType", nil, &r.Options, &resp) + return +} + +// Retrieve The group this load balancer configuration is for. +func (r Scale_LoadBalancer) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The ID of the virtual IP address. +func (r Scale_LoadBalancer) GetVirtualIpAddressId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getVirtualIpAddressId", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual server for this configuration. +func (r Scale_LoadBalancer) GetVirtualServer() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getVirtualServer", nil, &r.Options, &resp) + return +} + +// Retrieve The port on the virtual server. +func (r Scale_LoadBalancer) GetVirtualServerPort() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getVirtualServerPort", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Member struct { + Session *session.Session + Options sl.Options +} + +// GetScaleMemberService returns an instance of the Scale_Member SoftLayer service +func GetScaleMemberService(sess *session.Session) Scale_Member { + return Scale_Member{Session: sess} +} + +func (r Scale_Member) Id(id int) Scale_Member { + r.Options.Id = &id + return r +} + +func (r Scale_Member) Mask(mask string) Scale_Member { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Member) Filter(filter string) Scale_Member { + r.Options.Filter = filter + return r +} + +func (r Scale_Member) Limit(limit int) Scale_Member { + r.Options.Limit = &limit + return r +} + +func (r Scale_Member) Offset(offset int) Scale_Member { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Member) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Member) GetObject() (resp datatypes.Scale_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this member belongs to. +func (r Scale_Member) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Member_Virtual_Guest struct { + Session *session.Session + Options sl.Options +} + +// GetScaleMemberVirtualGuestService returns an instance of the Scale_Member_Virtual_Guest SoftLayer service +func GetScaleMemberVirtualGuestService(sess *session.Session) Scale_Member_Virtual_Guest { + return Scale_Member_Virtual_Guest{Session: sess} +} + +func (r Scale_Member_Virtual_Guest) Id(id int) Scale_Member_Virtual_Guest { + r.Options.Id = &id + return r +} + +func (r Scale_Member_Virtual_Guest) Mask(mask string) Scale_Member_Virtual_Guest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Member_Virtual_Guest) Filter(filter string) Scale_Member_Virtual_Guest { + r.Options.Filter = filter + return r +} + +func (r Scale_Member_Virtual_Guest) Limit(limit int) Scale_Member_Virtual_Guest { + r.Options.Limit = &limit + return r +} + +func (r Scale_Member_Virtual_Guest) Offset(offset int) Scale_Member_Virtual_Guest { + r.Options.Offset = &offset + return r +} + +// Delete this group member. Note, this can only be done on an active group when it wont cause the group to go below its minimumMemberCount. This is not the recommended way to delete members. Instead, users should invoke scale(-1) on SoftLayer_Scale_Group so it can choose the best guest member to remove. +func (r Scale_Member_Virtual_Guest) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Member_Virtual_Guest) GetObject() (resp datatypes.Scale_Member_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this member belongs to. +func (r Scale_Member_Virtual_Guest) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The guest for this member. +func (r Scale_Member_Virtual_Guest) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Retrieve The identifier of the guest for this member. +func (r Scale_Member_Virtual_Guest) GetVirtualGuestId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "getVirtualGuestId", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Network_Vlan struct { + Session *session.Session + Options sl.Options +} + +// GetScaleNetworkVlanService returns an instance of the Scale_Network_Vlan SoftLayer service +func GetScaleNetworkVlanService(sess *session.Session) Scale_Network_Vlan { + return Scale_Network_Vlan{Session: sess} +} + +func (r Scale_Network_Vlan) Id(id int) Scale_Network_Vlan { + r.Options.Id = &id + return r +} + +func (r Scale_Network_Vlan) Mask(mask string) Scale_Network_Vlan { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Network_Vlan) Filter(filter string) Scale_Network_Vlan { + r.Options.Filter = filter + return r +} + +func (r Scale_Network_Vlan) Limit(limit int) Scale_Network_Vlan { + r.Options.Limit = &limit + return r +} + +func (r Scale_Network_Vlan) Offset(offset int) Scale_Network_Vlan { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Network_Vlan) CreateObject(templateObject *datatypes.Scale_Network_Vlan) (resp datatypes.Scale_Network_Vlan, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Network_Vlan) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The network VLAN to scale with. +func (r Scale_Network_Vlan) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Network_Vlan) GetObject() (resp datatypes.Scale_Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this network VLAN is for. +func (r Scale_Network_Vlan) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyService returns an instance of the Scale_Policy SoftLayer service +func GetScalePolicyService(sess *session.Session) Scale_Policy { + return Scale_Policy{Session: sess} +} + +func (r Scale_Policy) Id(id int) Scale_Policy { + r.Options.Id = &id + return r +} + +func (r Scale_Policy) Mask(mask string) Scale_Policy { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy) Filter(filter string) Scale_Policy { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy) Limit(limit int) Scale_Policy { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy) Offset(offset int) Scale_Policy { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy) CreateObject(templateObject *datatypes.Scale_Policy) (resp datatypes.Scale_Policy, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy) EditObject(templateObject *datatypes.Scale_Policy) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The actions to perform upon any trigger hit. Currently this must be a single value. +func (r Scale_Policy) GetActions() (resp []datatypes.Scale_Policy_Action, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getActions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy) GetObject() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The one-time triggers to check for this group. +func (r Scale_Policy) GetOneTimeTriggers() (resp []datatypes.Scale_Policy_Trigger_OneTime, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getOneTimeTriggers", nil, &r.Options, &resp) + return +} + +// Retrieve The repeating triggers to check for this group. +func (r Scale_Policy) GetRepeatingTriggers() (resp []datatypes.Scale_Policy_Trigger_Repeating, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getRepeatingTriggers", nil, &r.Options, &resp) + return +} + +// Retrieve The resource-use triggers to check for this group. +func (r Scale_Policy) GetResourceUseTriggers() (resp []datatypes.Scale_Policy_Trigger_ResourceUse, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getResourceUseTriggers", nil, &r.Options, &resp) + return +} + +// Retrieve The scale actions to perform upon any trigger hit. Currently this must be a single value. +func (r Scale_Policy) GetScaleActions() (resp []datatypes.Scale_Policy_Action_Scale, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getScaleActions", nil, &r.Options, &resp) + return +} + +// Retrieve The group this policy is on. +func (r Scale_Policy) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The triggers to check for this group. +func (r Scale_Policy) GetTriggers() (resp []datatypes.Scale_Policy_Trigger, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getTriggers", nil, &r.Options, &resp) + return +} + +// Manually trigger the actions on this policy. Returns members if the trigger has an effect, or an empty set of members if there is no effect. Sometimes this may not have an effect if the group is not active, in cooldown, or the result would violate the group range. If this call fails, the group is suspended, the failure logged, and a ticket is created. +func (r Scale_Policy) Trigger() (resp []datatypes.Scale_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "trigger", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Action struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyActionService returns an instance of the Scale_Policy_Action SoftLayer service +func GetScalePolicyActionService(sess *session.Session) Scale_Policy_Action { + return Scale_Policy_Action{Session: sess} +} + +func (r Scale_Policy_Action) Id(id int) Scale_Policy_Action { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Action) Mask(mask string) Scale_Policy_Action { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Action) Filter(filter string) Scale_Policy_Action { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Action) Limit(limit int) Scale_Policy_Action { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Action) Offset(offset int) Scale_Policy_Action { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Action) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action) EditObject(templateObject *datatypes.Scale_Policy_Action) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action) GetObject() (resp datatypes.Scale_Policy_Action, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this action is on. +func (r Scale_Policy_Action) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of action. +func (r Scale_Policy_Action) GetType() (resp datatypes.Scale_Policy_Action_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Action_Scale struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyActionScaleService returns an instance of the Scale_Policy_Action_Scale SoftLayer service +func GetScalePolicyActionScaleService(sess *session.Session) Scale_Policy_Action_Scale { + return Scale_Policy_Action_Scale{Session: sess} +} + +func (r Scale_Policy_Action_Scale) Id(id int) Scale_Policy_Action_Scale { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Action_Scale) Mask(mask string) Scale_Policy_Action_Scale { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Action_Scale) Filter(filter string) Scale_Policy_Action_Scale { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Action_Scale) Limit(limit int) Scale_Policy_Action_Scale { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Action_Scale) Offset(offset int) Scale_Policy_Action_Scale { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Action_Scale) CreateObject(templateObject *datatypes.Scale_Policy_Action_Scale) (resp datatypes.Scale_Policy_Action_Scale, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action_Scale) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action_Scale) EditObject(templateObject *datatypes.Scale_Policy_Action) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action_Scale) GetObject() (resp datatypes.Scale_Policy_Action_Scale, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this action is on. +func (r Scale_Policy_Action_Scale) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of action. +func (r Scale_Policy_Action_Scale) GetType() (resp datatypes.Scale_Policy_Action_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Action_Type struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyActionTypeService returns an instance of the Scale_Policy_Action_Type SoftLayer service +func GetScalePolicyActionTypeService(sess *session.Session) Scale_Policy_Action_Type { + return Scale_Policy_Action_Type{Session: sess} +} + +func (r Scale_Policy_Action_Type) Id(id int) Scale_Policy_Action_Type { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Action_Type) Mask(mask string) Scale_Policy_Action_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Action_Type) Filter(filter string) Scale_Policy_Action_Type { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Action_Type) Limit(limit int) Scale_Policy_Action_Type { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Action_Type) Offset(offset int) Scale_Policy_Action_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Action_Type) GetAllObjects() (resp []datatypes.Scale_Policy_Action_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action_Type) GetObject() (resp datatypes.Scale_Policy_Action_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerService returns an instance of the Scale_Policy_Trigger SoftLayer service +func GetScalePolicyTriggerService(sess *session.Session) Scale_Policy_Trigger { + return Scale_Policy_Trigger{Session: sess} +} + +func (r Scale_Policy_Trigger) Id(id int) Scale_Policy_Trigger { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger) Mask(mask string) Scale_Policy_Trigger { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger) Filter(filter string) Scale_Policy_Trigger { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger) Limit(limit int) Scale_Policy_Trigger { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger) Offset(offset int) Scale_Policy_Trigger { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger) CreateObject(templateObject *datatypes.Scale_Policy_Trigger) (resp datatypes.Scale_Policy_Trigger, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger) EditObject(templateObject *datatypes.Scale_Policy_Trigger) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger) GetObject() (resp datatypes.Scale_Policy_Trigger, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this trigger is on. +func (r Scale_Policy_Trigger) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of trigger. +func (r Scale_Policy_Trigger) GetType() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_OneTime struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerOneTimeService returns an instance of the Scale_Policy_Trigger_OneTime SoftLayer service +func GetScalePolicyTriggerOneTimeService(sess *session.Session) Scale_Policy_Trigger_OneTime { + return Scale_Policy_Trigger_OneTime{Session: sess} +} + +func (r Scale_Policy_Trigger_OneTime) Id(id int) Scale_Policy_Trigger_OneTime { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_OneTime) Mask(mask string) Scale_Policy_Trigger_OneTime { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_OneTime) Filter(filter string) Scale_Policy_Trigger_OneTime { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_OneTime) Limit(limit int) Scale_Policy_Trigger_OneTime { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_OneTime) Offset(offset int) Scale_Policy_Trigger_OneTime { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_OneTime) CreateObject(templateObject *datatypes.Scale_Policy_Trigger_OneTime) (resp datatypes.Scale_Policy_Trigger_OneTime, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_OneTime) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_OneTime) EditObject(templateObject *datatypes.Scale_Policy_Trigger) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_OneTime) GetObject() (resp datatypes.Scale_Policy_Trigger_OneTime, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this trigger is on. +func (r Scale_Policy_Trigger_OneTime) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of trigger. +func (r Scale_Policy_Trigger_OneTime) GetType() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_Repeating struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerRepeatingService returns an instance of the Scale_Policy_Trigger_Repeating SoftLayer service +func GetScalePolicyTriggerRepeatingService(sess *session.Session) Scale_Policy_Trigger_Repeating { + return Scale_Policy_Trigger_Repeating{Session: sess} +} + +func (r Scale_Policy_Trigger_Repeating) Id(id int) Scale_Policy_Trigger_Repeating { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_Repeating) Mask(mask string) Scale_Policy_Trigger_Repeating { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_Repeating) Filter(filter string) Scale_Policy_Trigger_Repeating { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_Repeating) Limit(limit int) Scale_Policy_Trigger_Repeating { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_Repeating) Offset(offset int) Scale_Policy_Trigger_Repeating { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) CreateObject(templateObject *datatypes.Scale_Policy_Trigger_Repeating) (resp datatypes.Scale_Policy_Trigger_Repeating, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) EditObject(templateObject *datatypes.Scale_Policy_Trigger) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) GetObject() (resp datatypes.Scale_Policy_Trigger_Repeating, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this trigger is on. +func (r Scale_Policy_Trigger_Repeating) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of trigger. +func (r Scale_Policy_Trigger_Repeating) GetType() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) ValidateCronExpression(expression *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + expression, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "validateCronExpression", params, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_ResourceUse struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerResourceUseService returns an instance of the Scale_Policy_Trigger_ResourceUse SoftLayer service +func GetScalePolicyTriggerResourceUseService(sess *session.Session) Scale_Policy_Trigger_ResourceUse { + return Scale_Policy_Trigger_ResourceUse{Session: sess} +} + +func (r Scale_Policy_Trigger_ResourceUse) Id(id int) Scale_Policy_Trigger_ResourceUse { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_ResourceUse) Mask(mask string) Scale_Policy_Trigger_ResourceUse { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_ResourceUse) Filter(filter string) Scale_Policy_Trigger_ResourceUse { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_ResourceUse) Limit(limit int) Scale_Policy_Trigger_ResourceUse { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_ResourceUse) Offset(offset int) Scale_Policy_Trigger_ResourceUse { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse) CreateObject(templateObject *datatypes.Scale_Policy_Trigger_ResourceUse) (resp datatypes.Scale_Policy_Trigger_ResourceUse, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse) EditObject(templateObject *datatypes.Scale_Policy_Trigger) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse) GetObject() (resp datatypes.Scale_Policy_Trigger_ResourceUse, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this trigger is on. +func (r Scale_Policy_Trigger_ResourceUse) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of trigger. +func (r Scale_Policy_Trigger_ResourceUse) GetType() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The resource watches for this trigger. +func (r Scale_Policy_Trigger_ResourceUse) GetWatches() (resp []datatypes.Scale_Policy_Trigger_ResourceUse_Watch, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "getWatches", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_ResourceUse_Watch struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerResourceUseWatchService returns an instance of the Scale_Policy_Trigger_ResourceUse_Watch SoftLayer service +func GetScalePolicyTriggerResourceUseWatchService(sess *session.Session) Scale_Policy_Trigger_ResourceUse_Watch { + return Scale_Policy_Trigger_ResourceUse_Watch{Session: sess} +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Id(id int) Scale_Policy_Trigger_ResourceUse_Watch { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Mask(mask string) Scale_Policy_Trigger_ResourceUse_Watch { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Filter(filter string) Scale_Policy_Trigger_ResourceUse_Watch { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Limit(limit int) Scale_Policy_Trigger_ResourceUse_Watch { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Offset(offset int) Scale_Policy_Trigger_ResourceUse_Watch { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) CreateObject(templateObject *datatypes.Scale_Policy_Trigger_ResourceUse_Watch) (resp datatypes.Scale_Policy_Trigger_ResourceUse_Watch, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) EditObject(templateObject *datatypes.Scale_Policy_Trigger_ResourceUse_Watch) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetAllPossibleAlgorithms() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getAllPossibleAlgorithms", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetAllPossibleMetrics() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getAllPossibleMetrics", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetAllPossibleOperators() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getAllPossibleOperators", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetObject() (resp datatypes.Scale_Policy_Trigger_ResourceUse_Watch, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The trigger this watch is on. +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetScalePolicyTrigger() (resp datatypes.Scale_Policy_Trigger_ResourceUse, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getScalePolicyTrigger", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_Type struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerTypeService returns an instance of the Scale_Policy_Trigger_Type SoftLayer service +func GetScalePolicyTriggerTypeService(sess *session.Session) Scale_Policy_Trigger_Type { + return Scale_Policy_Trigger_Type{Session: sess} +} + +func (r Scale_Policy_Trigger_Type) Id(id int) Scale_Policy_Trigger_Type { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_Type) Mask(mask string) Scale_Policy_Trigger_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_Type) Filter(filter string) Scale_Policy_Trigger_Type { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_Type) Limit(limit int) Scale_Policy_Trigger_Type { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_Type) Offset(offset int) Scale_Policy_Trigger_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_Type) GetAllObjects() (resp []datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Type) GetObject() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Termination_Policy struct { + Session *session.Session + Options sl.Options +} + +// GetScaleTerminationPolicyService returns an instance of the Scale_Termination_Policy SoftLayer service +func GetScaleTerminationPolicyService(sess *session.Session) Scale_Termination_Policy { + return Scale_Termination_Policy{Session: sess} +} + +func (r Scale_Termination_Policy) Id(id int) Scale_Termination_Policy { + r.Options.Id = &id + return r +} + +func (r Scale_Termination_Policy) Mask(mask string) Scale_Termination_Policy { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Termination_Policy) Filter(filter string) Scale_Termination_Policy { + r.Options.Filter = filter + return r +} + +func (r Scale_Termination_Policy) Limit(limit int) Scale_Termination_Policy { + r.Options.Limit = &limit + return r +} + +func (r Scale_Termination_Policy) Offset(offset int) Scale_Termination_Policy { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Termination_Policy) GetAllObjects() (resp []datatypes.Scale_Termination_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Termination_Policy", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Termination_Policy) GetObject() (resp datatypes.Scale_Termination_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Termination_Policy", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/search.go b/vendor/github.com/softlayer/softlayer-go/services/search.go new file mode 100644 index 000000000000..5720699901c0 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/search.go @@ -0,0 +1,122 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Search struct { + Session *session.Session + Options sl.Options +} + +// GetSearchService returns an instance of the Search SoftLayer service +func GetSearchService(sess *session.Session) Search { + return Search{Session: sess} +} + +func (r Search) Id(id int) Search { + r.Options.Id = &id + return r +} + +func (r Search) Mask(mask string) Search { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Search) Filter(filter string) Search { + r.Options.Filter = filter + return r +} + +func (r Search) Limit(limit int) Search { + r.Options.Limit = &limit + return r +} + +func (r Search) Offset(offset int) Search { + r.Options.Offset = &offset + return r +} + +// This method allows for searching for SoftLayer resources by simple terms and operators. Fields that are used for searching will be available at sldn.softlayer.com. It returns a collection or array of [[SoftLayer_Container_Search_Result (type)|SoftLayer_Container_Search_Result]] objects that have search metadata for each result and the resulting resource found. +// +// The advancedSearch() method recognizes the special _objectType: quantifier in search strings. See the documentation for the [[SoftLayer_Search/search|search()]] method on how to restrict searches using object types. +// +// The advancedSearch() method recognizes [[SoftLayer_Container_Search_ObjectType_Property (type)|object properties]], which can also be used to limit searches. Example: +// +// _objectType:Type_1 propertyA:value +// +// A search string can specify multiple properties, separated with spaces. Example: +// +// _objectType:Type_1 propertyA:value propertyB:value +// +// A collection of available object types and their properties can be retrieved by calling the [[SoftLayer_Search/getObjectTypes|getObjectTypes()]] method. +func (r Search) AdvancedSearch(searchString *string) (resp []datatypes.Container_Search_Result, err error) { + params := []interface{}{ + searchString, + } + err = r.Session.DoRequest("SoftLayer_Search", "advancedSearch", params, &r.Options, &resp) + return +} + +// This method returns a collection of [[SoftLayer_Container_Search_ObjectType (type)|SoftLayer_Container_Search_ObjectType]] containers that specify which indexed object types and properties are exposed for the current user. These object types can be used to discover searchable data and to create or validate object index search strings. +// +//

    Refer to the [[SoftLayer_Search/search|search()]] and [[SoftLayer_Search/advancedSearch|advancedSearch()]] methods for information on using object types and properties in search strings. +func (r Search) GetObjectTypes() (resp []datatypes.Container_Search_ObjectType, err error) { + err = r.Session.DoRequest("SoftLayer_Search", "getObjectTypes", nil, &r.Options, &resp) + return +} + +// This method allows for searching for SoftLayer resources by simple phrase. It returns a collection or array of [[SoftLayer_Container_Search_Result (type)|SoftLayer_Container_Search_Result]] objects that have search metadata for each result and the resulting resource found. +// +// This method recognizes the special _objectType: quantifier in search strings. This quantifier can be used to restrict a search to specific object types. Example usage: +// +// _objectType:Type_1 (other search terms...) +// +// A search string can specify multiple object types, separated by commas (no spaces are permitted between the type names). Example: +// +// _objectType:Type_1,Type_2,Type_3 (other search terms...) +// +// If the list of object types is prefixed with a hyphen or minus sign (-), then the specified types are excluded from the search. Example: +// +// _objectType:-Type_4,Type_5 (other search terms...) +// +// A collection of available object types can be retrieved by calling the [[SoftLayer_Search/getObjectTypes|getObjectTypes()]] method. +func (r Search) Search(searchString *string) (resp []datatypes.Container_Search_Result, err error) { + params := []interface{}{ + searchString, + } + err = r.Session.DoRequest("SoftLayer_Search", "search", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/security.go b/vendor/github.com/softlayer/softlayer-go/services/security.go new file mode 100644 index 000000000000..1a6fb22283e4 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/security.go @@ -0,0 +1,456 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Security_Certificate struct { + Session *session.Session + Options sl.Options +} + +// GetSecurityCertificateService returns an instance of the Security_Certificate SoftLayer service +func GetSecurityCertificateService(sess *session.Session) Security_Certificate { + return Security_Certificate{Session: sess} +} + +func (r Security_Certificate) Id(id int) Security_Certificate { + r.Options.Id = &id + return r +} + +func (r Security_Certificate) Mask(mask string) Security_Certificate { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Certificate) Filter(filter string) Security_Certificate { + r.Options.Filter = filter + return r +} + +func (r Security_Certificate) Limit(limit int) Security_Certificate { + r.Options.Limit = &limit + return r +} + +func (r Security_Certificate) Offset(offset int) Security_Certificate { + r.Options.Offset = &offset + return r +} + +// Add a certificate to your account for your records, or for use with various services. Only the certificate and private key are usually required. If your issuer provided an intermediate certificate, you must also provide that certificate. Details will be extracted from the certificate. Validation will be performed between the certificate and the private key as well as the certificate and the intermediate certificate, if provided. +// +// The certificate signing request is not required, but can be provided for your records. +func (r Security_Certificate) CreateObject(templateObject *datatypes.Security_Certificate) (resp datatypes.Security_Certificate, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "createObject", params, &r.Options, &resp) + return +} + +// Remove a certificate from your account. You may not remove a certificate with associated services. +func (r Security_Certificate) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "deleteObject", nil, &r.Options, &resp) + return +} + +// Update a certificate. Modifications are restricted to the note and CSR if the are any services associated with the certificate. There are no modification restrictions for a certificate with no associated services. +func (r Security_Certificate) EditObject(templateObject *datatypes.Security_Certificate) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "editObject", params, &r.Options, &resp) + return +} + +// Locate certificates by their common name, traditionally a domain name. +func (r Security_Certificate) FindByCommonName(commonName *string) (resp []datatypes.Security_Certificate, err error) { + params := []interface{}{ + commonName, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "findByCommonName", params, &r.Options, &resp) + return +} + +// Retrieve The number of services currently associated with the certificate. +func (r Security_Certificate) GetAssociatedServiceCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "getAssociatedServiceCount", nil, &r.Options, &resp) + return +} + +// Retrieve The load balancers virtual IP addresses currently associated with the certificate. +func (r Security_Certificate) GetLoadBalancerVirtualIpAddresses() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "getLoadBalancerVirtualIpAddresses", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Security_Certificate) GetObject() (resp datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve the certificate in PEM (Privacy Enhanced Mail) format, which is a string containing all base64 encoded (DER) certificates delimited by -----BEGIN/END *----- clauses. +func (r Security_Certificate) GetPemFormat() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "getPemFormat", nil, &r.Options, &resp) + return +} + +// SoftLayer_Security_Certificate_Request data type is used to harness your SSL certificate order to a Certificate Authority. This contains data that is required by a Certificate Authority to place an SSL certificate order. +type Security_Certificate_Request struct { + Session *session.Session + Options sl.Options +} + +// GetSecurityCertificateRequestService returns an instance of the Security_Certificate_Request SoftLayer service +func GetSecurityCertificateRequestService(sess *session.Session) Security_Certificate_Request { + return Security_Certificate_Request{Session: sess} +} + +func (r Security_Certificate_Request) Id(id int) Security_Certificate_Request { + r.Options.Id = &id + return r +} + +func (r Security_Certificate_Request) Mask(mask string) Security_Certificate_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Certificate_Request) Filter(filter string) Security_Certificate_Request { + r.Options.Filter = filter + return r +} + +func (r Security_Certificate_Request) Limit(limit int) Security_Certificate_Request { + r.Options.Limit = &limit + return r +} + +func (r Security_Certificate_Request) Offset(offset int) Security_Certificate_Request { + r.Options.Offset = &offset + return r +} + +// Cancels a pending SSL certificate order at the Certificate Authority +func (r Security_Certificate_Request) CancelSslOrder() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "cancelSslOrder", nil, &r.Options, &resp) + return +} + +// Retrieve The account to which a SSL certificate request belongs. +func (r Security_Certificate_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Gets the email domains that can be used to validate a certificate to a domain. +func (r Security_Certificate_Request) GetAdministratorEmailDomains(commonName *string) (resp []string, err error) { + params := []interface{}{ + commonName, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getAdministratorEmailDomains", params, &r.Options, &resp) + return +} + +// Gets the email accounts that can be used to validate a certificate to a domain. +func (r Security_Certificate_Request) GetAdministratorEmailPrefixes() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getAdministratorEmailPrefixes", nil, &r.Options, &resp) + return +} + +// Retrieve The Certificate Authority name +func (r Security_Certificate_Request) GetCertificateAuthorityName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getCertificateAuthorityName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Security_Certificate_Request) GetObject() (resp datatypes.Security_Certificate_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The order contains the information related to a SSL certificate request. +func (r Security_Certificate_Request) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve The associated order item for this SSL certificate request. +func (r Security_Certificate_Request) GetOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getOrderItem", nil, &r.Options, &resp) + return +} + +// Returns previous SSL certificate order data. You can use this data for to place a renewal order for a completed SSL certificate. +func (r Security_Certificate_Request) GetPreviousOrderData() (resp datatypes.Container_Product_Order_Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getPreviousOrderData", nil, &r.Options, &resp) + return +} + +// Returns all the SSL certificate requests. +func (r Security_Certificate_Request) GetSslCertificateRequests(accountId *int) (resp []datatypes.Security_Certificate_Request, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getSslCertificateRequests", params, &r.Options, &resp) + return +} + +// Retrieve The status of a SSL certificate request. +func (r Security_Certificate_Request) GetStatus() (resp datatypes.Security_Certificate_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// A Certificate Authority sends out various emails to your domain administrator or your technical contact. Use this service to have these emails re-sent. +func (r Security_Certificate_Request) ResendEmail(emailType *string) (resp bool, err error) { + params := []interface{}{ + emailType, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "resendEmail", params, &r.Options, &resp) + return +} + +// Allows you to validate a Certificate Signing Request (CSR) required for an SSL certificate with the certificate authority (CA). This method sends the CSR, the length of the subscription in months, the certificate type, and the server type for validation against requirements of the CA. Returns true if valid. +// +// More information on CSR generation can be found at: [http://en.wikipedia.org/wiki/Certificate_signing_request Wikipedia] [https://knowledge.verisign.com/support/ssl-certificates-support/index?page=content&id=AR235&actp=LIST&viewlocale=en_US VeriSign] +func (r Security_Certificate_Request) ValidateCsr(csr *string, validityMonths *int, itemId *int, serverType *string) (resp bool, err error) { + params := []interface{}{ + csr, + validityMonths, + itemId, + serverType, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "validateCsr", params, &r.Options, &resp) + return +} + +// Represents a server type that can be specified when ordering an SSL certificate. +type Security_Certificate_Request_ServerType struct { + Session *session.Session + Options sl.Options +} + +// GetSecurityCertificateRequestServerTypeService returns an instance of the Security_Certificate_Request_ServerType SoftLayer service +func GetSecurityCertificateRequestServerTypeService(sess *session.Session) Security_Certificate_Request_ServerType { + return Security_Certificate_Request_ServerType{Session: sess} +} + +func (r Security_Certificate_Request_ServerType) Id(id int) Security_Certificate_Request_ServerType { + r.Options.Id = &id + return r +} + +func (r Security_Certificate_Request_ServerType) Mask(mask string) Security_Certificate_Request_ServerType { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Certificate_Request_ServerType) Filter(filter string) Security_Certificate_Request_ServerType { + r.Options.Filter = filter + return r +} + +func (r Security_Certificate_Request_ServerType) Limit(limit int) Security_Certificate_Request_ServerType { + r.Options.Limit = &limit + return r +} + +func (r Security_Certificate_Request_ServerType) Offset(offset int) Security_Certificate_Request_ServerType { + r.Options.Offset = &offset + return r +} + +// Returns all SSL certificate server types, which are passed in on a [[SoftLayer_Container_Product_Order_Security_Certificate|certificate order]]. +func (r Security_Certificate_Request_ServerType) GetAllObjects() (resp []datatypes.Security_Certificate_Request_ServerType, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request_ServerType", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Security_Certificate_Request_ServerType) GetObject() (resp datatypes.Security_Certificate_Request_ServerType, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request_ServerType", "getObject", nil, &r.Options, &resp) + return +} + +// Represents the status of an SSL certificate request. +type Security_Certificate_Request_Status struct { + Session *session.Session + Options sl.Options +} + +// GetSecurityCertificateRequestStatusService returns an instance of the Security_Certificate_Request_Status SoftLayer service +func GetSecurityCertificateRequestStatusService(sess *session.Session) Security_Certificate_Request_Status { + return Security_Certificate_Request_Status{Session: sess} +} + +func (r Security_Certificate_Request_Status) Id(id int) Security_Certificate_Request_Status { + r.Options.Id = &id + return r +} + +func (r Security_Certificate_Request_Status) Mask(mask string) Security_Certificate_Request_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Certificate_Request_Status) Filter(filter string) Security_Certificate_Request_Status { + r.Options.Filter = filter + return r +} + +func (r Security_Certificate_Request_Status) Limit(limit int) Security_Certificate_Request_Status { + r.Options.Limit = &limit + return r +} + +func (r Security_Certificate_Request_Status) Offset(offset int) Security_Certificate_Request_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Security_Certificate_Request_Status) GetObject() (resp datatypes.Security_Certificate_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request_Status", "getObject", nil, &r.Options, &resp) + return +} + +// Returns all SSL certificate request status objects +func (r Security_Certificate_Request_Status) GetSslRequestStatuses() (resp []datatypes.Security_Certificate_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request_Status", "getSslRequestStatuses", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Security_Ssh_Key struct { + Session *session.Session + Options sl.Options +} + +// GetSecuritySshKeyService returns an instance of the Security_Ssh_Key SoftLayer service +func GetSecuritySshKeyService(sess *session.Session) Security_Ssh_Key { + return Security_Ssh_Key{Session: sess} +} + +func (r Security_Ssh_Key) Id(id int) Security_Ssh_Key { + r.Options.Id = &id + return r +} + +func (r Security_Ssh_Key) Mask(mask string) Security_Ssh_Key { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Ssh_Key) Filter(filter string) Security_Ssh_Key { + r.Options.Filter = filter + return r +} + +func (r Security_Ssh_Key) Limit(limit int) Security_Ssh_Key { + r.Options.Limit = &limit + return r +} + +func (r Security_Ssh_Key) Offset(offset int) Security_Ssh_Key { + r.Options.Offset = &offset + return r +} + +// Add a ssh key to your account for use during server provisioning and os reloads. +func (r Security_Ssh_Key) CreateObject(templateObject *datatypes.Security_Ssh_Key) (resp datatypes.Security_Ssh_Key, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "createObject", params, &r.Options, &resp) + return +} + +// Remove a ssh key from your account. +func (r Security_Ssh_Key) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "deleteObject", nil, &r.Options, &resp) + return +} + +// Update a ssh key. +func (r Security_Ssh_Key) EditObject(templateObject *datatypes.Security_Ssh_Key) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Security_Ssh_Key) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The image template groups that are linked to an SSH key. +func (r Security_Ssh_Key) GetBlockDeviceTemplateGroups() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "getBlockDeviceTemplateGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Security_Ssh_Key) GetObject() (resp datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The OS root users that are linked to an SSH key. +func (r Security_Ssh_Key) GetSoftwarePasswords() (resp []datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "getSoftwarePasswords", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/software.go b/vendor/github.com/softlayer/softlayer-go/services/software.go new file mode 100644 index 000000000000..7dc00688f8ac --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/software.go @@ -0,0 +1,797 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// SoftLayer_Software_AccountLicense is a class that represents software licenses that are tied only to a customer's account and not to any particular hardware, IP address, etc. +type Software_AccountLicense struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareAccountLicenseService returns an instance of the Software_AccountLicense SoftLayer service +func GetSoftwareAccountLicenseService(sess *session.Session) Software_AccountLicense { + return Software_AccountLicense{Session: sess} +} + +func (r Software_AccountLicense) Id(id int) Software_AccountLicense { + r.Options.Id = &id + return r +} + +func (r Software_AccountLicense) Mask(mask string) Software_AccountLicense { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_AccountLicense) Filter(filter string) Software_AccountLicense { + r.Options.Filter = filter + return r +} + +func (r Software_AccountLicense) Limit(limit int) Software_AccountLicense { + r.Options.Limit = &limit + return r +} + +func (r Software_AccountLicense) Offset(offset int) Software_AccountLicense { + r.Options.Offset = &offset + return r +} + +// Retrieve The customer account this Account License belongs to. +func (r Software_AccountLicense) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_AccountLicense) GetAllObjects() (resp []datatypes.Software_AccountLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software account license. +func (r Software_AccountLicense) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getBillingItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_AccountLicense) GetObject() (resp datatypes.Software_AccountLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Software_Description that this account license is for. +func (r Software_AccountLicense) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// A SoftLayer_Software_Component ties the installation of a specific piece of software onto a specific piece of hardware. +// +// SoftLayer_Software_Component works with SoftLayer_Software_License and SoftLayer_Software_Description to tie this all together. +// +//

    • SoftLayer_Software_Component is the installation of a specific piece of software onto a specific piece of hardware in accordance to a software license.
      • SoftLayer_Software_License dictates when and how a specific piece of software may be installed onto a piece of hardware.
        • SoftLayer_Software_Description describes a specific piece of software which can be installed onto hardware in accordance with it's license agreement.
    +type Software_Component struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareComponentService returns an instance of the Software_Component SoftLayer service +func GetSoftwareComponentService(sess *session.Session) Software_Component { + return Software_Component{Session: sess} +} + +func (r Software_Component) Id(id int) Software_Component { + r.Options.Id = &id + return r +} + +func (r Software_Component) Mask(mask string) Software_Component { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Component) Filter(filter string) Software_Component { + r.Options.Filter = filter + return r +} + +func (r Software_Component) Limit(limit int) Software_Component { + r.Options.Limit = &limit + return r +} + +func (r Software_Component) Offset(offset int) Software_Component { + r.Options.Offset = &offset + return r +} + +// Retrieve The average amount of time that a software component takes to install. +func (r Software_Component) GetAverageInstallationDuration() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getAverageInstallationDuration", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software component. +func (r Software_Component) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware this Software Component is installed upon. +func (r Software_Component) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getHardware", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a software component. If the software component does not support downloading license files an exception will be thrown. +func (r Software_Component) GetLicenseFile() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getLicenseFile", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Software_Component object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Software_Component service. +// +// The best way to get software components is through getSoftwareComponents from the Hardware service. +func (r Software_Component) GetObject() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve History Records for Software Passwords. +func (r Software_Component) GetPasswordHistory() (resp []datatypes.Software_Component_Password_History, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getPasswordHistory", nil, &r.Options, &resp) + return +} + +// Retrieve Username/Password pairs used for access to this Software Installation. +func (r Software_Component) GetPasswords() (resp []datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getPasswords", nil, &r.Options, &resp) + return +} + +// Retrieve The Software Description of this Software Component. +func (r Software_Component) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The License this Software Component uses. +func (r Software_Component) GetSoftwareLicense() (resp datatypes.Software_License, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getSoftwareLicense", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component) GetVendorSetUpConfiguration() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getVendorSetUpConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guest this software component is installed upon. +func (r Software_Component) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// This object specifies a specific type of Software Component: An Anti-virus/spyware instance. Anti-virus/spyware installations have specific properties and methods such as SoftLayer_Software_Component_AntivirusSpyware::updateAntivirusSpywarePolicy. Defaults are initiated by this object. +type Software_Component_AntivirusSpyware struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareComponentAntivirusSpywareService returns an instance of the Software_Component_AntivirusSpyware SoftLayer service +func GetSoftwareComponentAntivirusSpywareService(sess *session.Session) Software_Component_AntivirusSpyware { + return Software_Component_AntivirusSpyware{Session: sess} +} + +func (r Software_Component_AntivirusSpyware) Id(id int) Software_Component_AntivirusSpyware { + r.Options.Id = &id + return r +} + +func (r Software_Component_AntivirusSpyware) Mask(mask string) Software_Component_AntivirusSpyware { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Component_AntivirusSpyware) Filter(filter string) Software_Component_AntivirusSpyware { + r.Options.Filter = filter + return r +} + +func (r Software_Component_AntivirusSpyware) Limit(limit int) Software_Component_AntivirusSpyware { + r.Options.Limit = &limit + return r +} + +func (r Software_Component_AntivirusSpyware) Offset(offset int) Software_Component_AntivirusSpyware { + r.Options.Offset = &offset + return r +} + +// Retrieve The average amount of time that a software component takes to install. +func (r Software_Component_AntivirusSpyware) GetAverageInstallationDuration() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getAverageInstallationDuration", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software component. +func (r Software_Component_AntivirusSpyware) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware this Software Component is installed upon. +func (r Software_Component_AntivirusSpyware) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getHardware", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a software component. If the software component does not support downloading license files an exception will be thrown. +func (r Software_Component_AntivirusSpyware) GetLicenseFile() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getLicenseFile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_AntivirusSpyware) GetObject() (resp datatypes.Software_Component_AntivirusSpyware, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve History Records for Software Passwords. +func (r Software_Component_AntivirusSpyware) GetPasswordHistory() (resp []datatypes.Software_Component_Password_History, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getPasswordHistory", nil, &r.Options, &resp) + return +} + +// Retrieve Username/Password pairs used for access to this Software Installation. +func (r Software_Component_AntivirusSpyware) GetPasswords() (resp []datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getPasswords", nil, &r.Options, &resp) + return +} + +// Retrieve The Software Description of this Software Component. +func (r Software_Component_AntivirusSpyware) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The License this Software Component uses. +func (r Software_Component_AntivirusSpyware) GetSoftwareLicense() (resp datatypes.Software_License, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getSoftwareLicense", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_AntivirusSpyware) GetVendorSetUpConfiguration() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getVendorSetUpConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guest this software component is installed upon. +func (r Software_Component_AntivirusSpyware) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Update an anti-virus/spyware policy. The policy options that it accepts are the following: +// *1 - Minimal +// *2 - Relaxed +// *3 - Default +// *4 - High +// *5 - Ultimate +func (r Software_Component_AntivirusSpyware) UpdateAntivirusSpywarePolicy(newPolicy *string, enforce *bool) (resp bool, err error) { + params := []interface{}{ + newPolicy, + enforce, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "updateAntivirusSpywarePolicy", params, &r.Options, &resp) + return +} + +// This object specifies a specific type of Software Component: A Host Intrusion Protection System instance. +type Software_Component_HostIps struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareComponentHostIpsService returns an instance of the Software_Component_HostIps SoftLayer service +func GetSoftwareComponentHostIpsService(sess *session.Session) Software_Component_HostIps { + return Software_Component_HostIps{Session: sess} +} + +func (r Software_Component_HostIps) Id(id int) Software_Component_HostIps { + r.Options.Id = &id + return r +} + +func (r Software_Component_HostIps) Mask(mask string) Software_Component_HostIps { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Component_HostIps) Filter(filter string) Software_Component_HostIps { + r.Options.Filter = filter + return r +} + +func (r Software_Component_HostIps) Limit(limit int) Software_Component_HostIps { + r.Options.Limit = &limit + return r +} + +func (r Software_Component_HostIps) Offset(offset int) Software_Component_HostIps { + r.Options.Offset = &offset + return r +} + +// Retrieve The average amount of time that a software component takes to install. +func (r Software_Component_HostIps) GetAverageInstallationDuration() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getAverageInstallationDuration", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software component. +func (r Software_Component_HostIps) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Get the current Host IPS policies. +func (r Software_Component_HostIps) GetCurrentHostIpsPolicies() (resp []datatypes.Container_Software_Component_HostIps_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getCurrentHostIpsPolicies", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware this Software Component is installed upon. +func (r Software_Component_HostIps) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getHardware", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a software component. If the software component does not support downloading license files an exception will be thrown. +func (r Software_Component_HostIps) GetLicenseFile() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getLicenseFile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_HostIps) GetObject() (resp datatypes.Software_Component_HostIps, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve History Records for Software Passwords. +func (r Software_Component_HostIps) GetPasswordHistory() (resp []datatypes.Software_Component_Password_History, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getPasswordHistory", nil, &r.Options, &resp) + return +} + +// Retrieve Username/Password pairs used for access to this Software Installation. +func (r Software_Component_HostIps) GetPasswords() (resp []datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getPasswords", nil, &r.Options, &resp) + return +} + +// Retrieve The Software Description of this Software Component. +func (r Software_Component_HostIps) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The License this Software Component uses. +func (r Software_Component_HostIps) GetSoftwareLicense() (resp datatypes.Software_License, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getSoftwareLicense", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_HostIps) GetVendorSetUpConfiguration() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getVendorSetUpConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guest this software component is installed upon. +func (r Software_Component_HostIps) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Update the Host IPS policies. To retrieve valid policy options you must use the provided relationships. +func (r Software_Component_HostIps) UpdateHipsPolicies(newIpsMode *string, newIpsProtection *string, newFirewallMode *string, newFirewallRuleset *string, newApplicationMode *string, newApplicationRuleset *string, newEnforcementPolicy *string) (resp bool, err error) { + params := []interface{}{ + newIpsMode, + newIpsProtection, + newFirewallMode, + newFirewallRuleset, + newApplicationMode, + newApplicationRuleset, + newEnforcementPolicy, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "updateHipsPolicies", params, &r.Options, &resp) + return +} + +// This SoftLayer_Software_Component_Password data type contains a password for a specific software component instance. +type Software_Component_Password struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareComponentPasswordService returns an instance of the Software_Component_Password SoftLayer service +func GetSoftwareComponentPasswordService(sess *session.Session) Software_Component_Password { + return Software_Component_Password{Session: sess} +} + +func (r Software_Component_Password) Id(id int) Software_Component_Password { + r.Options.Id = &id + return r +} + +func (r Software_Component_Password) Mask(mask string) Software_Component_Password { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Component_Password) Filter(filter string) Software_Component_Password { + r.Options.Filter = filter + return r +} + +func (r Software_Component_Password) Limit(limit int) Software_Component_Password { + r.Options.Limit = &limit + return r +} + +func (r Software_Component_Password) Offset(offset int) Software_Component_Password { + r.Options.Offset = &offset + return r +} + +// Create a password for a software component. +func (r Software_Component_Password) CreateObject(templateObject *datatypes.Software_Component_Password) (resp datatypes.Software_Component_Password, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "createObject", params, &r.Options, &resp) + return +} + +// Create more than one password for a software component. +func (r Software_Component_Password) CreateObjects(templateObjects []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "createObjects", params, &r.Options, &resp) + return +} + +// Delete a password from a software component. +func (r Software_Component_Password) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete more than one passwords from a software component. +func (r Software_Component_Password) DeleteObjects(templateObjects []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "deleteObjects", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, port, and notes. +func (r Software_Component_Password) EditObject(templateObject *datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "editObject", params, &r.Options, &resp) + return +} + +// Edit more than one password from a software component. +func (r Software_Component_Password) EditObjects(templateObjects []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "editObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_Password) GetObject() (resp datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Software_Component instance that this username/password pair is valid for. +func (r Software_Component_Password) GetSoftware() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "getSoftware", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Software_Component_Password) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "getSshKeys", nil, &r.Options, &resp) + return +} + +// This class holds a description for a specific installation of a Software Component. +// +// SoftLayer_Software_Licenses tie a Software Component (A specific installation on a piece of hardware) to it's description. +// +// The "Manufacturer" and "Name" properties of a SoftLayer_Software_Description are used by the framework to factory specific objects, objects that may have special methods for that specific piece of software, or objects that contain application specific data, such as default ports. For example, if you create a SoftLayer_Software_Component who's SoftLayer_Software_License points to the SoftLayer_Software_Description for "Swsoft" "Plesk", you'll actually get a SoftLayer_Software_Component_Swsoft_Plesk object. +type Software_Description struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareDescriptionService returns an instance of the Software_Description SoftLayer service +func GetSoftwareDescriptionService(sess *session.Session) Software_Description { + return Software_Description{Session: sess} +} + +func (r Software_Description) Id(id int) Software_Description { + r.Options.Id = &id + return r +} + +func (r Software_Description) Mask(mask string) Software_Description { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Description) Filter(filter string) Software_Description { + r.Options.Filter = filter + return r +} + +func (r Software_Description) Limit(limit int) Software_Description { + r.Options.Limit = &limit + return r +} + +func (r Software_Description) Offset(offset int) Software_Description { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Software_Description) GetAllObjects() (resp []datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Software_Description) GetAttributes() (resp []datatypes.Software_Description_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve The average amount of time that a software description takes to install. +func (r Software_Description) GetAverageInstallationDuration() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getAverageInstallationDuration", nil, &r.Options, &resp) + return +} + +// Retrieve A list of the software descriptions that are compatible with this software description. +func (r Software_Description) GetCompatibleSoftwareDescriptions() (resp []datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getCompatibleSoftwareDescriptions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Description) GetCustomerOwnedLicenseDescriptions() (resp []datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getCustomerOwnedLicenseDescriptions", nil, &r.Options, &resp) + return +} + +// Retrieve The feature attributes of a software description. +func (r Software_Description) GetFeatures() (resp []datatypes.Software_Description_Feature, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getFeatures", nil, &r.Options, &resp) + return +} + +// Retrieve The latest version of a software description. +func (r Software_Description) GetLatestVersion() (resp []datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getLatestVersion", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Description) GetObject() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The various product items to which this software description is linked. +func (r Software_Description) GetProductItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getProductItems", nil, &r.Options, &resp) + return +} + +// Retrieve This details the provisioning transaction group for this software. This is only valid for Operating System software. +func (r Software_Description) GetProvisionTransactionGroup() (resp datatypes.Provisioning_Version1_Transaction_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getProvisionTransactionGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The transaction group that a software description belongs to. A transaction group is a sequence of transactions that must be performed in a specific order for the installation of software. +func (r Software_Description) GetReloadTransactionGroup() (resp datatypes.Provisioning_Version1_Transaction_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getReloadTransactionGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The default user created for a given a software description. +func (r Software_Description) GetRequiredUser() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getRequiredUser", nil, &r.Options, &resp) + return +} + +// Retrieve Software Licenses that govern this Software Description. +func (r Software_Description) GetSoftwareLicenses() (resp []datatypes.Software_License, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getSoftwareLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve A suggestion for an upgrade path from this Software Description +func (r Software_Description) GetUpgradeSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getUpgradeSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A suggestion for an upgrade path from this Software Description (Deprecated - Use upgradeSoftwareDescription) +func (r Software_Description) GetUpgradeSwDesc() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getUpgradeSwDesc", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Software_Description) GetValidFilesystemTypes() (resp []datatypes.Configuration_Storage_Filesystem_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getValidFilesystemTypes", nil, &r.Options, &resp) + return +} + +// SoftLayer_Software_VirtualLicense is the application class that handles a special type of Software License. Most software licenses are licensed to a specific hardware ID; virtual licenses are designed for virtual machines and therefore are assigned to an IP Address. Not all software packages can be "virtual licensed". +type Software_VirtualLicense struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareVirtualLicenseService returns an instance of the Software_VirtualLicense SoftLayer service +func GetSoftwareVirtualLicenseService(sess *session.Session) Software_VirtualLicense { + return Software_VirtualLicense{Session: sess} +} + +func (r Software_VirtualLicense) Id(id int) Software_VirtualLicense { + r.Options.Id = &id + return r +} + +func (r Software_VirtualLicense) Mask(mask string) Software_VirtualLicense { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_VirtualLicense) Filter(filter string) Software_VirtualLicense { + r.Options.Filter = filter + return r +} + +func (r Software_VirtualLicense) Limit(limit int) Software_VirtualLicense { + r.Options.Limit = &limit + return r +} + +func (r Software_VirtualLicense) Offset(offset int) Software_VirtualLicense { + r.Options.Offset = &offset + return r +} + +// Retrieve The customer account this Virtual License belongs to. +func (r Software_VirtualLicense) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software virtual license. +func (r Software_VirtualLicense) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware record to which the software virtual license is assigned. +func (r Software_VirtualLicense) GetHostHardware() (resp datatypes.Hardware_Server, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getHostHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The IP Address record associated with a virtual license. +func (r Software_VirtualLicense) GetIpAddressRecord() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getIpAddressRecord", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a virtual license, if such a file exists. If there is no file for this virtual license, calling this method will either throw an exception or return false. +func (r Software_VirtualLicense) GetLicenseFile() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getLicenseFile", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Software_VirtualLicense object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Software_VirtualLicense service. You can only retrieve Virtual Licenses assigned to your account number. +func (r Software_VirtualLicense) GetObject() (resp datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Software_Description that this virtual license is for. +func (r Software_VirtualLicense) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The subnet this Virtual License's IP address belongs to. +func (r Software_VirtualLicense) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getSubnet", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/survey.go b/vendor/github.com/softlayer/softlayer-go/services/survey.go new file mode 100644 index 000000000000..aa956c8423bd --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/survey.go @@ -0,0 +1,112 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Survey data type contains general information relating to a single SoftLayer survey. +type Survey struct { + Session *session.Session + Options sl.Options +} + +// GetSurveyService returns an instance of the Survey SoftLayer service +func GetSurveyService(sess *session.Session) Survey { + return Survey{Session: sess} +} + +func (r Survey) Id(id int) Survey { + r.Options.Id = &id + return r +} + +func (r Survey) Mask(mask string) Survey { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Survey) Filter(filter string) Survey { + r.Options.Filter = filter + return r +} + +func (r Survey) Limit(limit int) Survey { + r.Options.Limit = &limit + return r +} + +func (r Survey) Offset(offset int) Survey { + r.Options.Offset = &offset + return r +} + +// Provides survey details for the given type +func (r Survey) GetActiveSurveyByType(typ *string) (resp datatypes.Survey, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Survey", "getActiveSurveyByType", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Survey object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Survey service. You can only retrieve the survey that your portal user has taken. +func (r Survey) GetObject() (resp datatypes.Survey, err error) { + err = r.Session.DoRequest("SoftLayer_Survey", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The questions for a survey. +func (r Survey) GetQuestions() (resp []datatypes.Survey_Question, err error) { + err = r.Session.DoRequest("SoftLayer_Survey", "getQuestions", nil, &r.Options, &resp) + return +} + +// Retrieve The status of the survey +func (r Survey) GetStatus() (resp datatypes.Survey_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Survey", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The type of survey +func (r Survey) GetType() (resp datatypes.Survey_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Survey", "getType", nil, &r.Options, &resp) + return +} + +// Response to a SoftLayer survey's questions. +func (r Survey) TakeSurvey(responses []datatypes.Survey_Response) (resp bool, err error) { + params := []interface{}{ + responses, + } + err = r.Session.DoRequest("SoftLayer_Survey", "takeSurvey", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/tag.go b/vendor/github.com/softlayer/softlayer-go/services/tag.go new file mode 100644 index 000000000000..644ac1a77574 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/tag.go @@ -0,0 +1,123 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Tag data type is an optional type associated with hardware. The account ID that the tag is tied to, and the tag itself are stored in this data type. There is also a flag to denote whether the tag is internal or not. +type Tag struct { + Session *session.Session + Options sl.Options +} + +// GetTagService returns an instance of the Tag SoftLayer service +func GetTagService(sess *session.Session) Tag { + return Tag{Session: sess} +} + +func (r Tag) Id(id int) Tag { + r.Options.Id = &id + return r +} + +func (r Tag) Mask(mask string) Tag { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Tag) Filter(filter string) Tag { + r.Options.Filter = filter + return r +} + +func (r Tag) Limit(limit int) Tag { + r.Options.Limit = &limit + return r +} + +func (r Tag) Offset(offset int) Tag { + r.Options.Offset = &offset + return r +} + +// This function is responsible for setting the Tags values. The internal flag is set to 0 if the user is a customer, and 1 otherwise. AccountId is set to the account bound to the user, and the tags name is set to the clean version of the tag inputted by the user. +func (r Tag) AutoComplete(tag *string) (resp []datatypes.Tag, err error) { + params := []interface{}{ + tag, + } + err = r.Session.DoRequest("SoftLayer_Tag", "autoComplete", params, &r.Options, &resp) + return +} + +// Retrieve The account to which the tag is tied. +func (r Tag) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Tag", "getAccount", nil, &r.Options, &resp) + return +} + +// Returns all tags of a given object type. +func (r Tag) GetAllTagTypes() (resp []datatypes.Tag_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Tag", "getAllTagTypes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Tag) GetObject() (resp datatypes.Tag, err error) { + err = r.Session.DoRequest("SoftLayer_Tag", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve References that tie object to the tag. +func (r Tag) GetReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Tag", "getReferences", nil, &r.Options, &resp) + return +} + +// Returns the Tag object with a given name. The user types in the tag name and this method returns the tag with that name. +func (r Tag) GetTagByTagName(tagList *string) (resp []datatypes.Tag, err error) { + params := []interface{}{ + tagList, + } + err = r.Session.DoRequest("SoftLayer_Tag", "getTagByTagName", params, &r.Options, &resp) + return +} + +// Tag an object by passing in one or more tags separated by a comma. Tag references are cleared out every time this method is called. If your object is already tagged you will need to pass the current tags along with any new ones. To remove all tag references pass an empty string. To remove one or more tags omit them from the tag list. The characters permitted are A-Z, 0-9, whitespace, _ (underscore), - (hypen), . (period), and : (colon). All other characters will be stripped away. +func (r Tag) SetTags(tags *string, keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + tags, + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_Tag", "setTags", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/ticket.go b/vendor/github.com/softlayer/softlayer-go/services/ticket.go new file mode 100644 index 000000000000..42f310834dce --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/ticket.go @@ -0,0 +1,982 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Ticket data type models a single SoftLayer customer support or notification ticket. Each ticket object contains references to it's updates, the user it's assigned to, the SoftLayer department and employee that it's assigned to, and any hardware objects or attached files associated with the ticket. Tickets are described in further detail on the [[SoftLayer_Ticket]] service page. +// +// To create a support ticket execute the [[SoftLayer_Ticket::createStandardTicket|createStandardTicket]] or [[SoftLayer_Ticket::createAdministrativeTicket|createAdministrativeTicket]] methods in the SoftLayer_Ticket service. To create an upgrade ticket for the SoftLayer sales group execute the [[SoftLayer_Ticket::createUpgradeTicket|createUpgradeTicket]]. +type Ticket struct { + Session *session.Session + Options sl.Options +} + +// GetTicketService returns an instance of the Ticket SoftLayer service +func GetTicketService(sess *session.Session) Ticket { + return Ticket{Session: sess} +} + +func (r Ticket) Id(id int) Ticket { + r.Options.Id = &id + return r +} + +func (r Ticket) Mask(mask string) Ticket { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket) Filter(filter string) Ticket { + r.Options.Filter = filter + return r +} + +func (r Ticket) Limit(limit int) Ticket { + r.Options.Limit = &limit + return r +} + +func (r Ticket) Offset(offset int) Ticket { + r.Options.Offset = &offset + return r +} + +// +// +// +func (r Ticket) AddAssignedAgent(agentId *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + agentId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAssignedAgent", params, &r.Options, &resp) + return +} + +// Creates new additional emails for assigned user if new emails are provided. Attaches any newly created additional emails to ticket. +func (r Ticket) AddAttachedAdditionalEmails(emails []string) (resp bool, err error) { + params := []interface{}{ + emails, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAttachedAdditionalEmails", params, &r.Options, &resp) + return +} + +// Attach the given Dedicated Host to a SoftLayer ticket. An attachment provides an easy way for SoftLayer's employees to quickly look up your records in the case of specific issues. +func (r Ticket) AddAttachedDedicatedHost(dedicatedHostId *int) (resp datatypes.Ticket_Attachment_Dedicated_Host, err error) { + params := []interface{}{ + dedicatedHostId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAttachedDedicatedHost", params, &r.Options, &resp) + return +} + +// Attach the given file to a SoftLayer ticket. A file attachment is a convenient way to submit non-textual error reports to SoftLayer employees in a ticket. File attachments to tickets must have a unique name. +func (r Ticket) AddAttachedFile(fileAttachment *datatypes.Container_Utility_File_Attachment) (resp datatypes.Ticket_Attachment_File, err error) { + params := []interface{}{ + fileAttachment, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAttachedFile", params, &r.Options, &resp) + return +} + +// Attach the given hardware to a SoftLayer ticket. A hardware attachment provides an easy way for SoftLayer's employees to quickly look up your hardware records in the case of hardware-specific issues. +func (r Ticket) AddAttachedHardware(hardwareId *int) (resp datatypes.Ticket_Attachment_Hardware, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAttachedHardware", params, &r.Options, &resp) + return +} + +// Attach the given CloudLayer Computing Instance to a SoftLayer ticket. An attachment provides an easy way for SoftLayer's employees to quickly look up your records in the case of specific issues. +func (r Ticket) AddAttachedVirtualGuest(guestId *int, callCommit *bool) (resp datatypes.Ticket_Attachment_Virtual_Guest, err error) { + params := []interface{}{ + guestId, + callCommit, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAttachedVirtualGuest", params, &r.Options, &resp) + return +} + +// As part of the customer service process SoftLayer has provided a quick feedback mechanism for its customers to rate their overall experience with SoftLayer after a ticket is closed. addFinalComments() sets these comments for a ticket update made by a SoftLayer employee. Final comments may only be set on closed tickets, can only be set once, and may not exceed 4000 characters in length. Once the comments are set ''addFinalComments()'' returns a boolean true. +func (r Ticket) AddFinalComments(finalComments *string) (resp bool, err error) { + params := []interface{}{ + finalComments, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addFinalComments", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) AddScheduledAlert(activationTime *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + activationTime, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addScheduledAlert", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) AddScheduledAutoClose(activationTime *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + activationTime, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addScheduledAutoClose", params, &r.Options, &resp) + return +} + +// Add an update to a ticket. A ticket update's entry has a maximum length of 4000 characters, so ''addUpdate()'' splits the ''entry'' property in the ''templateObject'' parameter into 3900 character blocks and creates one entry per 3900 character block. Once complete ''addUpdate()'' emails the ticket's owner and additional email addresses with an update message if the ticket's ''notifyUserOnUpdateFlag'' is set. If the ticket is a Legal or Abuse ticket, then the account's abuse emails are also notified when the updates are processed. Finally, ''addUpdate()'' returns an array of the newly created ticket updates. +func (r Ticket) AddUpdate(templateObject *datatypes.Ticket_Update, attachedFiles []datatypes.Container_Utility_File_Attachment) (resp []datatypes.Ticket_Update, err error) { + params := []interface{}{ + templateObject, + attachedFiles, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addUpdate", params, &r.Options, &resp) + return +} + +// Create an administrative support ticket. Use an administrative ticket if you require SoftLayer's assistance managing your server or content. If you are experiencing an issue with SoftLayer's hardware, network, or services then please open a standard support ticket. +// +// Support tickets may only be created in the open state. The SoftLayer API defaults new ticket properties ''userEditableFlag'' to true, ''accountId'' to the id of the account that your API user belongs to, and ''statusId'' to 1001 (or "open"). You may not assign your new to ticket to users that your API user does not have access to. +// +// Once your ticket is created it is placed in a queue for SoftLayer employees to work. As they update the ticket new [[SoftLayer_Ticket_Update]] entries are added to the ticket object. +// +// Administrative support tickets add a one-time $3USD charge to your account. +func (r Ticket) CreateAdministrativeTicket(templateObject *datatypes.Ticket, contents *string, attachmentId *int, rootPassword *string, controlPanelPassword *string, accessPort *string, attachedFiles []datatypes.Container_Utility_File_Attachment, attachmentType *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + templateObject, + contents, + attachmentId, + rootPassword, + controlPanelPassword, + accessPort, + attachedFiles, + attachmentType, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createAdministrativeTicket", params, &r.Options, &resp) + return +} + +// A cancel server request creates a ticket to cancel the resource on next bill date. The hardware ID parameter is required to determine which server is to be cancelled. NOTE: Hourly bare metal servers will be cancelled on next bill date. +// +// The reason parameter could be from the list below: +// * "No longer needed" +// * "Business closing down" +// * "Server / Upgrade Costs" +// * "Migrating to larger server" +// * "Migrating to smaller server" +// * "Migrating to a different SoftLayer datacenter" +// * "Network performance / latency" +// * "Support response / timing" +// * "Sales process / upgrades" +// * "Moving to competitor" +// +// +// The content parameter describes further the reason for cancelling the server. +func (r Ticket) CreateCancelServerTicket(attachmentId *int, reason *string, content *string, cancelAssociatedItems *bool, attachmentType *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + attachmentId, + reason, + content, + cancelAssociatedItems, + attachmentType, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createCancelServerTicket", params, &r.Options, &resp) + return +} + +// A cancel service request creates a sales ticket. The hardware ID parameter is required to determine which server is to be cancelled. +// +// The reason parameter could be from the list below: +// * "No longer needed" +// * "Business closing down" +// * "Server / Upgrade Costs" +// * "Migrating to larger server" +// * "Migrating to smaller server" +// * "Migrating to a different SoftLayer datacenter" +// * "Network performance / latency" +// * "Support response / timing" +// * "Sales process / upgrades" +// * "Moving to competitor" +// +// +// The content parameter describes further the reason for cancelling service. +func (r Ticket) CreateCancelServiceTicket(attachmentId *int, reason *string, content *string, attachmentType *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + attachmentId, + reason, + content, + attachmentType, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createCancelServiceTicket", params, &r.Options, &resp) + return +} + +// Create a standard support ticket. Use a standard support ticket if you need to work out a problem related to SoftLayer's hardware, network, or services. If you require SoftLayer's assistance managing your server or content then please open an administrative ticket. +// +// Support tickets may only be created in the open state. The SoftLayer API defaults new ticket properties ''userEditableFlag'' to true, ''accountId'' to the id of the account that your API user belongs to, and ''statusId'' to 1001 (or "open"). You may not assign your new to ticket to users that your API user does not have access to. +// +// Once your ticket is created it is placed in a queue for SoftLayer employees to work. As they update the ticket new [[SoftLayer_Ticket_Update]] entries are added to the ticket object. +func (r Ticket) CreateStandardTicket(templateObject *datatypes.Ticket, contents *string, attachmentId *int, rootPassword *string, controlPanelPassword *string, accessPort *string, attachedFiles []datatypes.Container_Utility_File_Attachment, attachmentType *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + templateObject, + contents, + attachmentId, + rootPassword, + controlPanelPassword, + accessPort, + attachedFiles, + attachmentType, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createStandardTicket", params, &r.Options, &resp) + return +} + +// Create a ticket for the SoftLayer sales team to perform a hardware or service upgrade. Our sales team will work with you on upgrade feasibility and pricing and then send the upgrade ticket to the proper department to perform the actual upgrade. Service affecting upgrades, such as server hardware or CloudLayer Computing Instance upgrades that require the server powered down must have a two hour maintenance specified for our datacenter engineers to perform your upgrade. Account level upgrades, such as adding PPTP VPN users, CDNLayer accounts, and monitoring services are processed much faster and do not require a maintenance window. +func (r Ticket) CreateUpgradeTicket(attachmentId *int, genericUpgrade *string, upgradeMaintenanceWindow *string, details *string, attachmentType *string, title *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + attachmentId, + genericUpgrade, + upgradeMaintenanceWindow, + details, + attachmentType, + title, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createUpgradeTicket", params, &r.Options, &resp) + return +} + +// Edit a SoftLayer ticket. The edit method is two-fold. You may either edit a ticket itself, add an update to a ticket, attach up to two files to a ticket, or perform all of these tasks. The SoftLayer API ignores changes made to the ''userEditableFlag'' and ''accountId'' properties. You may not assign a ticket to a user that your API account does not have access to. You may not enter a custom title for standard support tickets, buy may do so when editing an administrative ticket. Finally, you may not close a ticket using this method. Please contact SoftLayer if you need a ticket closed. +// +// If you need to only add an update to a ticket then please use the [[SoftLayer_Ticket::addUpdate|addUpdate]] method in this service. Likewise if you need to only attach a file to a ticket then use the [[SoftLayer_Ticket::addAttachedFile|addAttachedFile]] method. The edit method exists as a convenience if you need to perform all these tasks at once. +func (r Ticket) Edit(templateObject *datatypes.Ticket, contents *string, attachedFiles []datatypes.Container_Utility_File_Attachment) (resp datatypes.Ticket, err error) { + params := []interface{}{ + templateObject, + contents, + attachedFiles, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "edit", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account associated with a ticket. +func (r Ticket) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAccount", nil, &r.Options, &resp) + return +} + +// getAllTicketGroups() retrieves a list of all groups that a ticket may be assigned to. Ticket groups represent the internal department at SoftLayer who a ticket is assigned to. +// +// Every SoftLayer ticket has groupId and ticketGroup properties that correspond to one of the groups returned by getAllTicketGroups(). +func (r Ticket) GetAllTicketGroups() (resp []datatypes.Ticket_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAllTicketGroups", nil, &r.Options, &resp) + return +} + +// getAllTicketStatuses() retrieves a list of all statuses that a ticket may exist in. Ticket status represent the current state of a ticket, usually "open", "assigned", and "closed". +// +// Every SoftLayer ticket has statusId and status properties that correspond to one of the statuses returned by getAllTicketStatuses(). +func (r Ticket) GetAllTicketStatuses() (resp []datatypes.Ticket_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAllTicketStatuses", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetAssignedAgents() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAssignedAgents", nil, &r.Options, &resp) + return +} + +// Retrieve The portal user that a ticket is assigned to. +func (r Ticket) GetAssignedUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAssignedUser", nil, &r.Options, &resp) + return +} + +// Retrieve The list of additional emails to notify when a ticket update is made. +func (r Ticket) GetAttachedAdditionalEmails() (resp []datatypes.User_Customer_AdditionalEmail, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedAdditionalEmails", nil, &r.Options, &resp) + return +} + +// Retrieve The Dedicated Hosts associated with a ticket. This is used in cases where a ticket is directly associated with one or more Dedicated Hosts. +func (r Ticket) GetAttachedDedicatedHosts() (resp []datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedDedicatedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve the file attached to a SoftLayer ticket by it's given identifier. To retrieve a list of files attached to a ticket either call the SoftLayer_Ticket::getAttachedFiles method or call SoftLayer_Ticket::getObject with ''attachedFiles'' defined in an object mask. +func (r Ticket) GetAttachedFile(attachmentId *int) (resp []byte, err error) { + params := []interface{}{ + attachmentId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedFile", params, &r.Options, &resp) + return +} + +// Retrieve The files attached to a ticket. +func (r Ticket) GetAttachedFiles() (resp []datatypes.Ticket_Attachment_File, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedFiles", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware associated with a ticket. This is used in cases where a ticket is directly associated with one or more pieces of hardware. +func (r Ticket) GetAttachedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetAttachedHardwareCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedHardwareCount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetAttachedResources() (resp []datatypes.Ticket_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedResources", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guests associated with a ticket. This is used in cases where a ticket is directly associated with one or more virtualized guests installations or Virtual Servers. +func (r Ticket) GetAttachedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve Ticket is waiting on a response from a customer flag. +func (r Ticket) GetAwaitingUserResponseFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAwaitingUserResponseFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A service cancellation request. +func (r Ticket) GetCancellationRequest() (resp datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getCancellationRequest", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetEmployeeAttachments() (resp []datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getEmployeeAttachments", nil, &r.Options, &resp) + return +} + +// Retrieve A ticket's associated EU compliant record +func (r Ticket) GetEuSupportedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getEuSupportedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The first physical or virtual server attached to a ticket. +func (r Ticket) GetFirstAttachedResource() (resp datatypes.Ticket_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getFirstAttachedResource", nil, &r.Options, &resp) + return +} + +// Retrieve The first update made to a ticket. This is typically the contents of a ticket when it's created. +func (r Ticket) GetFirstUpdate() (resp datatypes.Ticket_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getFirstUpdate", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer department that a ticket is assigned to. +func (r Ticket) GetGroup() (resp datatypes.Ticket_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The invoice items associated with a ticket. Ticket based invoice items only exist when a ticket incurs a fee that has been invoiced. +func (r Ticket) GetInvoiceItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getInvoiceItems", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetLastActivity() (resp datatypes.Ticket_Activity, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLastActivity", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetLastEditor() (resp datatypes.User_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLastEditor", nil, &r.Options, &resp) + return +} + +// Retrieve The last update made to a ticket. +func (r Ticket) GetLastUpdate() (resp datatypes.Ticket_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLastUpdate", nil, &r.Options, &resp) + return +} + +// Retrieve A timestamp of the last time the Ticket was viewed by the active user. +func (r Ticket) GetLastViewedDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLastViewedDate", nil, &r.Options, &resp) + return +} + +// Retrieve A ticket's associated location within the SoftLayer location hierarchy. +func (r Ticket) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve True if there are new, unread updates to this ticket for the current user, False otherwise. +func (r Ticket) GetNewUpdatesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getNewUpdatesFlag", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Ticket object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Ticket service. You can only retrieve tickets that are associated with your SoftLayer customer account. +func (r Ticket) GetObject() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetScheduledActions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getScheduledActions", nil, &r.Options, &resp) + return +} + +// Retrieve The invoice associated with a ticket. Only tickets with an associated administrative charge have an invoice. +func (r Ticket) GetServerAdministrationBillingInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getServerAdministrationBillingInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve The refund invoice associated with a ticket. Only tickets with a refund applied in them have an associated refund invoice. +func (r Ticket) GetServerAdministrationRefundInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getServerAdministrationRefundInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetState() (resp []datatypes.Ticket_State, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getState", nil, &r.Options, &resp) + return +} + +// Retrieve A ticket's status. +func (r Ticket) GetStatus() (resp datatypes.Ticket_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve A ticket's subject. Only standard support tickets have an associated subject. A standard support ticket's title corresponds with it's subject's name. +func (r Ticket) GetSubject() (resp datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getSubject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve all tickets closed since a given date. +func (r Ticket) GetTicketsClosedSinceDate(closeDate *datatypes.Time) (resp []datatypes.Ticket, err error) { + params := []interface{}{ + closeDate, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "getTicketsClosedSinceDate", params, &r.Options, &resp) + return +} + +// Retrieve A ticket's updates. +func (r Ticket) GetUpdates() (resp []datatypes.Ticket_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getUpdates", nil, &r.Options, &resp) + return +} + +// Mark a ticket as viewed. All currently posted updates will be marked as viewed. The lastViewedDate property will be updated to the current time. +func (r Ticket) MarkAsViewed() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Ticket", "markAsViewed", nil, &r.Options, &resp) + return +} + +// +// +// +func (r Ticket) RemoveAssignedAgent(agentId *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + agentId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "removeAssignedAgent", params, &r.Options, &resp) + return +} + +// removeAttachedAdditionalEmails() removes the specified email addresses from a ticket's notification list. If one of the provided email addresses is not attached to the ticket then ''removeAttachedAdditiaonalEmails()'' ignores it and continues to the next one. Once the email addresses are removed ''removeAttachedAdditiaonalEmails()'' returns a boolean true. +func (r Ticket) RemoveAttachedAdditionalEmails(emails []string) (resp bool, err error) { + params := []interface{}{ + emails, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "removeAttachedAdditionalEmails", params, &r.Options, &resp) + return +} + +// detach the given hardware from a SoftLayer ticket. Removing a hardware attachment may delay ticket processing time if the hardware removed is relevant to the ticket's issue. Return a boolean true upon successful hardware detachment. +func (r Ticket) RemoveAttachedHardware(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "removeAttachedHardware", params, &r.Options, &resp) + return +} + +// Detach the given CloudLayer Computing Instance from a SoftLayer ticket. Removing an attachment may delay ticket processing time if the instance removed is relevant to the ticket's issue. Return a boolean true upon successful detachment. +func (r Ticket) RemoveAttachedVirtualGuest(guestId *int) (resp bool, err error) { + params := []interface{}{ + guestId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "removeAttachedVirtualGuest", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) RemoveScheduledAlert() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Ticket", "removeScheduledAlert", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) RemoveScheduledAutoClose() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Ticket", "removeScheduledAutoClose", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "setTags", params, &r.Options, &resp) + return +} + +// (DEPRECATED) Use [[SoftLayer_Ticket_Survey::getPreference]] method. +func (r Ticket) SurveyEligible() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "surveyEligible", nil, &r.Options, &resp) + return +} + +// Creates new additional emails for assigned user if new emails are provided. Attaches any newly created additional emails to ticket. Remove any additional emails from a ticket that are not provided as part of $emails +func (r Ticket) UpdateAttachedAdditionalEmails(emails []string) (resp bool, err error) { + params := []interface{}{ + emails, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "updateAttachedAdditionalEmails", params, &r.Options, &resp) + return +} + +// SoftLayer tickets can have have files attached to them. Attaching a file to a ticket is a good way to report issues, provide documentation, and give examples of an issue. Both SoftLayer customers and employees have the ability to attach files to a ticket. The SoftLayer_Ticket_Attachment_File data type models a single file attached to a ticket. +type Ticket_Attachment_File struct { + Session *session.Session + Options sl.Options +} + +// GetTicketAttachmentFileService returns an instance of the Ticket_Attachment_File SoftLayer service +func GetTicketAttachmentFileService(sess *session.Session) Ticket_Attachment_File { + return Ticket_Attachment_File{Session: sess} +} + +func (r Ticket_Attachment_File) Id(id int) Ticket_Attachment_File { + r.Options.Id = &id + return r +} + +func (r Ticket_Attachment_File) Mask(mask string) Ticket_Attachment_File { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Attachment_File) Filter(filter string) Ticket_Attachment_File { + r.Options.Filter = filter + return r +} + +func (r Ticket_Attachment_File) Limit(limit int) Ticket_Attachment_File { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Attachment_File) Offset(offset int) Ticket_Attachment_File { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Ticket_Attachment_File) GetExtensionWhitelist() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_File", "getExtensionWhitelist", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket_Attachment_File) GetObject() (resp datatypes.Ticket_Attachment_File, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_File", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket_Attachment_File) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_File", "getTicket", nil, &r.Options, &resp) + return +} + +// Retrieve The ticket that a file is attached to. +func (r Ticket_Attachment_File) GetUpdate() (resp datatypes.Ticket_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_File", "getUpdate", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Ticket_Priority struct { + Session *session.Session + Options sl.Options +} + +// GetTicketPriorityService returns an instance of the Ticket_Priority SoftLayer service +func GetTicketPriorityService(sess *session.Session) Ticket_Priority { + return Ticket_Priority{Session: sess} +} + +func (r Ticket_Priority) Id(id int) Ticket_Priority { + r.Options.Id = &id + return r +} + +func (r Ticket_Priority) Mask(mask string) Ticket_Priority { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Priority) Filter(filter string) Ticket_Priority { + r.Options.Filter = filter + return r +} + +func (r Ticket_Priority) Limit(limit int) Ticket_Priority { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Priority) Offset(offset int) Ticket_Priority { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Ticket_Priority) GetPriorities() (resp []datatypes.Container_Ticket_Priority, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Priority", "getPriorities", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Ticket_Subject data type models one of the possible subjects that a standard support ticket may belong to. A basic support ticket's title matches it's corresponding subject's name. +type Ticket_Subject struct { + Session *session.Session + Options sl.Options +} + +// GetTicketSubjectService returns an instance of the Ticket_Subject SoftLayer service +func GetTicketSubjectService(sess *session.Session) Ticket_Subject { + return Ticket_Subject{Session: sess} +} + +func (r Ticket_Subject) Id(id int) Ticket_Subject { + r.Options.Id = &id + return r +} + +func (r Ticket_Subject) Mask(mask string) Ticket_Subject { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Subject) Filter(filter string) Ticket_Subject { + r.Options.Filter = filter + return r +} + +func (r Ticket_Subject) Limit(limit int) Ticket_Subject { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Subject) Offset(offset int) Ticket_Subject { + r.Options.Offset = &offset + return r +} + +// Retrieve all possible ticket subjects. The SoftLayer customer portal uses this method in the add standard support ticket form. +func (r Ticket_Subject) GetAllObjects() (resp []datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket_Subject) GetCategory() (resp datatypes.Ticket_Subject_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve A child subject +func (r Ticket_Subject) GetChildren() (resp []datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket_Subject) GetGroup() (resp datatypes.Ticket_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getGroup", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Ticket_Subject object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Ticket_Subject service. +func (r Ticket_Subject) GetObject() (resp datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A parent subject +func (r Ticket_Subject) GetParent() (resp datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getParent", nil, &r.Options, &resp) + return +} + +// SoftLayer maintains relationships between the generic subjects for standard administration and the top five commonly asked questions about these subjects. getTopFileKnowledgeLayerQuestions() retrieves the top five questions and answers from the SoftLayer KnowledgeLayer related to the given ticket subject. +func (r Ticket_Subject) GetTopFiveKnowledgeLayerQuestions() (resp []datatypes.Container_KnowledgeLayer_QuestionAnswer, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getTopFiveKnowledgeLayerQuestions", nil, &r.Options, &resp) + return +} + +// SoftLayer_Ticket_Subject_Category groups ticket subjects into logical group. +type Ticket_Subject_Category struct { + Session *session.Session + Options sl.Options +} + +// GetTicketSubjectCategoryService returns an instance of the Ticket_Subject_Category SoftLayer service +func GetTicketSubjectCategoryService(sess *session.Session) Ticket_Subject_Category { + return Ticket_Subject_Category{Session: sess} +} + +func (r Ticket_Subject_Category) Id(id int) Ticket_Subject_Category { + r.Options.Id = &id + return r +} + +func (r Ticket_Subject_Category) Mask(mask string) Ticket_Subject_Category { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Subject_Category) Filter(filter string) Ticket_Subject_Category { + r.Options.Filter = filter + return r +} + +func (r Ticket_Subject_Category) Limit(limit int) Ticket_Subject_Category { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Subject_Category) Offset(offset int) Ticket_Subject_Category { + r.Options.Offset = &offset + return r +} + +// Retrieve all ticket subject categories. +func (r Ticket_Subject_Category) GetAllObjects() (resp []datatypes.Ticket_Subject_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject_Category", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket_Subject_Category) GetObject() (resp datatypes.Ticket_Subject_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject_Category", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket_Subject_Category) GetSubjects() (resp []datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject_Category", "getSubjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Ticket_Survey struct { + Session *session.Session + Options sl.Options +} + +// GetTicketSurveyService returns an instance of the Ticket_Survey SoftLayer service +func GetTicketSurveyService(sess *session.Session) Ticket_Survey { + return Ticket_Survey{Session: sess} +} + +func (r Ticket_Survey) Id(id int) Ticket_Survey { + r.Options.Id = &id + return r +} + +func (r Ticket_Survey) Mask(mask string) Ticket_Survey { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Survey) Filter(filter string) Ticket_Survey { + r.Options.Filter = filter + return r +} + +func (r Ticket_Survey) Limit(limit int) Ticket_Survey { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Survey) Offset(offset int) Ticket_Survey { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve the ticket survey preferences. It will return your [[SoftLayer_Container_Ticket_Survey_Preference|survey preference]] which indicates if your account is applicable to receive a survey and if you're opted in. You can control the survey opt via the [[SoftLayer_Ticket_Survey::optIn|opt-in]] or [[SoftLayer_Ticket_Survey::optOut|opt-out]] method. +func (r Ticket_Survey) GetPreference() (resp datatypes.Container_Ticket_Survey_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Survey", "getPreference", nil, &r.Options, &resp) + return +} + +// You will not receive a ticket survey if you are opted out. Use this method to opt back in if you wish to provide feedback to our support team. You may use the [[SoftLayer_Ticket_Survey::getPreference|getPreference]] method to check your current opt status. +// +// This method is depricated. Use [[SoftLayer_User_Customer::changePreference]] instead. +func (r Ticket_Survey) OptIn() (resp datatypes.Container_Ticket_Survey_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Survey", "optIn", nil, &r.Options, &resp) + return +} + +// By default, customers will occasionally receive a ticket survey upon closing of a ticket. Use this method to opt out of it for the next 90 days. Ticket surveys may not be applicable for some customers. Use the [[SoftLayer_Ticket_Survey::getPreference|getPreference]] method to retrieve your survey preference. The "applicable" property of the [[SoftLayer_Container_Ticket_Survey_Preference|survey preference]] indicates if the survey is relevant to your account or not. +// +// This method is depricated. Use [[SoftLayer_User_Customer::changePreference]] instead. +func (r Ticket_Survey) OptOut() (resp datatypes.Container_Ticket_Survey_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Survey", "optOut", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Ticket_Update_Employee data type models an update to a ticket made by a SoftLayer employee. +type Ticket_Update_Employee struct { + Session *session.Session + Options sl.Options +} + +// GetTicketUpdateEmployeeService returns an instance of the Ticket_Update_Employee SoftLayer service +func GetTicketUpdateEmployeeService(sess *session.Session) Ticket_Update_Employee { + return Ticket_Update_Employee{Session: sess} +} + +func (r Ticket_Update_Employee) Id(id int) Ticket_Update_Employee { + r.Options.Id = &id + return r +} + +func (r Ticket_Update_Employee) Mask(mask string) Ticket_Update_Employee { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Update_Employee) Filter(filter string) Ticket_Update_Employee { + r.Options.Filter = filter + return r +} + +func (r Ticket_Update_Employee) Limit(limit int) Ticket_Update_Employee { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Update_Employee) Offset(offset int) Ticket_Update_Employee { + r.Options.Offset = &offset + return r +} + +// As part of the customer service process SoftLayer has provided a quick feedback mechanism for its customers to rate the responses that its employees give on tickets. addResponseRating() sets the rating for a single ticket update made by a SoftLayer employee. Ticket ratings have the integer values 1 through 5, with 1 being the worst and 5 being the best. Once the rating is set ''addResponseRating()'' returns a boolean true. +func (r Ticket_Update_Employee) AddResponseRating(responseRating *int) (resp bool, err error) { + params := []interface{}{ + responseRating, + } + err = r.Session.DoRequest("SoftLayer_Ticket_Update_Employee", "addResponseRating", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Ticket_Update_Employee object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Ticket_Update_Employee service. You can only retrieve employee updates to tickets that your API account has access to. +func (r Ticket_Update_Employee) GetObject() (resp datatypes.Ticket_Update_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Update_Employee", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/user.go b/vendor/github.com/softlayer/softlayer-go/services/user.go new file mode 100644 index 000000000000..2951276e536b --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/user.go @@ -0,0 +1,4388 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_User_Customer data type contains general information relating to a single SoftLayer customer portal user. Personal information in this type such as names, addresses, and phone numbers are not necessarily associated with the customer account the user is assigned to. +type User_Customer struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerService returns an instance of the User_Customer SoftLayer service +func GetUserCustomerService(sess *session.Session) User_Customer { + return User_Customer{Session: sess} +} + +func (r User_Customer) Id(id int) User_Customer { + r.Options.Id = &id + return r +} + +func (r User_Customer) Mask(mask string) User_Customer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer) Filter(filter string) User_Customer { + r.Options.Filter = filter + return r +} + +func (r User_Customer) Limit(limit int) User_Customer { + r.Options.Limit = &limit + return r +} + +func (r User_Customer) Offset(offset int) User_Customer { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer) AcknowledgeSupportPolicy() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_User_Customer", "acknowledgeSupportPolicy", nil, &r.Options, &resp) + return +} + +// Create a user's API authentication key, allowing that user access to query the SoftLayer API. addApiAuthenticationKey() returns the users new API key. Each portal user is allowed a maximum of two API keys. +func (r User_Customer) AddApiAuthenticationKey() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "addApiAuthenticationKey", nil, &r.Options, &resp) + return +} + +// Grants the user access to one or more dedicated host devices. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access, then "not found" exceptions are thrown if the user attempts to access any of these devices. +// +// Users can assign device access to their child users, but not to themselves. An account's master has access to all devices on their customer account and can set dedicated host access for any of the other users on their account. +func (r User_Customer) AddBulkDedicatedHostAccess(dedicatedHostIds []int) (resp bool, err error) { + params := []interface{}{ + dedicatedHostIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addBulkDedicatedHostAccess", params, &r.Options, &resp) + return +} + +// Add multiple hardware to a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. addBulkHardwareAccess() does not attempt to add hardware access if the given user already has access to that hardware object. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer) AddBulkHardwareAccess(hardwareIds []int) (resp bool, err error) { + params := []interface{}{ + hardwareIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addBulkHardwareAccess", params, &r.Options, &resp) + return +} + +// Add multiple permissions to a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. addBulkPortalPermission() does not attempt to add permissions already assigned to the user. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission objects within the permissions parameter. +func (r User_Customer) AddBulkPortalPermission(permissions []datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permissions, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addBulkPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) AddBulkRoles(roles []datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + roles, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addBulkRoles", params, &r.Options, &resp) + return +} + +// Add multiple CloudLayer Computing Instances to a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. addBulkVirtualGuestAccess() does not attempt to add CloudLayer Computing Instance access if the given user already has access to that CloudLayer Computing Instance object. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set CloudLayer Computing Instance access for any of the other users on their account. +func (r User_Customer) AddBulkVirtualGuestAccess(virtualGuestIds []int) (resp bool, err error) { + params := []interface{}{ + virtualGuestIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addBulkVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// Grants the user access to a single dedicated host device. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access, then "not found" exceptions are thrown if the user attempts to access any of these devices. +// +// Users can assign device access to their child users, but not to themselves. An account's master has access to all devices on their customer account and can set dedicated host access for any of the other users on their account. +func (r User_Customer) AddDedicatedHostAccess(dedicatedHostId *int) (resp bool, err error) { + params := []interface{}{ + dedicatedHostId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addDedicatedHostAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) AddExternalBinding(externalBinding *datatypes.User_External_Binding) (resp datatypes.User_Customer_External_Binding, err error) { + params := []interface{}{ + externalBinding, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addExternalBinding", params, &r.Options, &resp) + return +} + +// Add hardware to a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user already has access to the hardware you're attempting to add then addHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer) AddHardwareAccess(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addHardwareAccess", params, &r.Options, &resp) + return +} + +// Create a notification subscription record for the user. If a subscription record exists for the notification, the record will be set to active, if currently inactive. +func (r User_Customer) AddNotificationSubscriber(notificationKeyName *string) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Add a permission to a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. If the user already has the permission you're attempting to add then addPortalPermission() returns true. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are added based on the keyName property of the permission parameter. +func (r User_Customer) AddPortalPermission(permission *datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permission, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) AddRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addRole", params, &r.Options, &resp) + return +} + +// Add a CloudLayer Computing Instance to a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user already has access to the CloudLayer Computing Instance you're attempting to add then addVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set CloudLayer Computing Instance access for any of the other users on their account. +func (r User_Customer) AddVirtualGuestAccess(virtualGuestId *int) (resp bool, err error) { + params := []interface{}{ + virtualGuestId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// Select a type of preference you would like to modify using [[SoftLayer_User_Customer::getPreferenceTypes|getPreferenceTypes]] and invoke this method using that preference type key name. +func (r User_Customer) ChangePreference(preferenceTypeKeyName *string, value *string) (resp []datatypes.User_Preference, err error) { + params := []interface{}{ + preferenceTypeKeyName, + value, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "changePreference", params, &r.Options, &resp) + return +} + +// This service checks the result of a previously requested external authentication. [[SoftLayer_Container_User_Customer_External_Binding_Phone|Phone external binding]] container can be used for this service. Make sure to set the [[SoftLayer_Container_User_Customer_External_Binding_Phone::authenticationToken|authenticationToken]] that is generated by [[SoftLayer_User_Customer|initiateExternalAuthentication]] service. +func (r User_Customer) CheckExternalAuthenticationStatus(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "checkExternalAuthenticationStatus", params, &r.Options, &resp) + return +} + +// Add a description here +// +// +func (r User_Customer) CheckPhoneFactorAuthenticationForPasswordSet(passwordSet *datatypes.Container_User_Customer_PasswordSet, authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp bool, err error) { + params := []interface{}{ + passwordSet, + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "checkPhoneFactorAuthenticationForPasswordSet", params, &r.Options, &resp) + return +} + +// Create a new subscriber for a given resource. +func (r User_Customer) CreateNotificationSubscriber(keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "createNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Create a new user in the SoftLayer customer portal. createObject() creates a user's portal record and adds them into the SoftLayer community forums. It is not possible to set up SLL or PPTP enable flags during object creation. These flags are ignored during object creation. You will need to make a subsequent call to edit object in order to enable VPN access. An account's master user and sub-users who have the User Manage permission can add new users. createObject() creates users with a default permission set. After adding a user it may be helpful to set their permissions and hardware access. +// +// Note, neither password nor vpnPassword parameters are required. +// +// Password When a new user is created, an email will be sent to the new user's email address with a link to a url that will allow the new user to create or change their password for the SoftLayer customer portal. +// +// If the password parameter is provided and is not null, then that value will be validated. If it is a valid password, then the user will be created with this password. This user will still receive a portal password email. It can be used within 24 hours to change their password, or it can be allowed to expire, and the password provided during user creation will remain as the user's password. +// +// If the password parameter is not provided or the value is null, the user must set their portal password using the link sent in email within 24 hours.  If the user fails to set their password within 24 hours, then a non-master user can use the "Reset Password" link on the login page of the portal to request a new email. A master user can use the link to retrieve a phone number to call to assist in resetting their password. +// +// The password parameter is ignored for VPN_ONLY users or for IBMid authenticated users. +// +// vpnPassword If the vpnPassword is provided, then the user's vpnPassword will be set to the provided password.  When creating a vpn only user, the vpnPassword MUST be supplied.  If the vpnPassword is not provided, then the user will need to use the portal to edit their profile and set the vpnPassword. +// +// +func (r User_Customer) CreateObject(templateObject *datatypes.User_Customer, password *string, vpnPassword *string) (resp datatypes.User_Customer, err error) { + params := []interface{}{ + templateObject, + password, + vpnPassword, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "createObject", params, &r.Options, &resp) + return +} + +// Create delivery methods for a notification that the user is subscribed to. Multiple delivery method keyNames can be supplied to create multiple delivery methods for the specified notification. Available delivery methods - 'EMAIL'. Available notifications - 'PLANNED_MAINTENANCE', 'UNPLANNED_INCIDENT'. +func (r User_Customer) CreateSubscriberDeliveryMethods(notificationKeyName *string, deliveryMethodKeyNames []string) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + deliveryMethodKeyNames, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "createSubscriberDeliveryMethods", params, &r.Options, &resp) + return +} + +// Create a new subscriber for a given resource. +func (r User_Customer) DeactivateNotificationSubscriber(keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "deactivateNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Account master users and sub-users who have the User Manage permission in the SoftLayer customer portal can update other user's information. Use editObject() if you wish to edit a single user account. Users who do not have the User Manage permission can only update their own information. +func (r User_Customer) EditObject(templateObject *datatypes.User_Customer) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "editObject", params, &r.Options, &resp) + return +} + +// Account master users and sub-users who have the User Manage permission in the SoftLayer customer portal can update other user's information. Use editObjects() if you wish to edit multiple users at once. Users who do not have the User Manage permission can only update their own information. +func (r User_Customer) EditObjects(templateObjects []datatypes.User_Customer) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "editObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) FindUserPreference(profileName *string, containerKeyname *string, preferenceKeyname *string) (resp []datatypes.Layout_Profile, err error) { + params := []interface{}{ + profileName, + containerKeyname, + preferenceKeyname, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "findUserPreference", params, &r.Options, &resp) + return +} + +// Retrieve The customer account that a user belongs to. +func (r User_Customer) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetActions() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getActions", nil, &r.Options, &resp) + return +} + +// The getActiveExternalAuthenticationVendors method will return a list of available external vendors that a SoftLayer user can authenticate against. The list will only contain vendors for which the user has at least one active external binding. +func (r User_Customer) GetActiveExternalAuthenticationVendors() (resp []datatypes.Container_User_Customer_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getActiveExternalAuthenticationVendors", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's additional email addresses. These email addresses are contacted when updates are made to support tickets. +func (r User_Customer) GetAdditionalEmails() (resp []datatypes.User_Customer_AdditionalEmail, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAdditionalEmails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetAllowedDedicatedHostIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAllowedDedicatedHostIds", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetAllowedHardwareIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAllowedHardwareIds", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetAllowedVirtualGuestIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAllowedVirtualGuestIds", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's API Authentication keys. There is a max limit of two API keys per user. +func (r User_Customer) GetApiAuthenticationKeys() (resp []datatypes.User_Customer_ApiAuthentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getApiAuthenticationKeys", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetAuthenticationToken(token *datatypes.Container_User_Authentication_Token) (resp datatypes.Container_User_Authentication_Token, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAuthenticationToken", params, &r.Options, &resp) + return +} + +// Retrieve The CDN accounts associated with a portal user. +func (r User_Customer) GetCdnAccounts() (resp []datatypes.Network_ContentDelivery_Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getCdnAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's child users. Some portal users may not have child users. +func (r User_Customer) GetChildUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getChildUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated closed tickets. +func (r User_Customer) GetClosedTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getClosedTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The dedicated hosts to which the user has been granted access. +func (r User_Customer) GetDedicatedHosts() (resp []datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getDedicatedHosts", nil, &r.Options, &resp) + return +} + +// This API gets the default account for the OpenIdConnect identity that is linked to the current SoftLayer user identity. If there is no default present, the API returns null, except in the special case where we find one active user linked to the IBMid. In that case, we will set the link from the IBMid to that user as default, and return the account of which that user is a member. Invoke this only on IBMid-authenticated users. +func (r User_Customer) GetDefaultAccount(providerType *string) (resp datatypes.Account, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getDefaultAccount", params, &r.Options, &resp) + return +} + +// Retrieve The external authentication bindings that link an external identifier to a SoftLayer user. +func (r User_Customer) GetExternalBindings() (resp []datatypes.User_External_Binding, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getExternalBindings", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's accessible hardware. These permissions control which hardware a user has access to in the SoftLayer customer portal. +func (r User_Customer) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve the number of servers that a portal user has access to. Portal users can have restrictions set to limit services for and to perform actions on hardware. You can set these permissions in the portal by clicking the "administrative" then "user admin" links. +func (r User_Customer) GetHardwareCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHardwareCount", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware notifications associated with this user. A hardware notification links a user to a piece of hardware, and that user will be notified if any monitors on that hardware fail, if the monitors have a status of 'Notify User'. +func (r User_Customer) GetHardwareNotifications() (resp []datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHardwareNotifications", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user has acknowledged the support policy. +func (r User_Customer) GetHasAcknowledgedSupportPolicyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHasAcknowledgedSupportPolicyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Permission granting the user access to all Dedicated Host devices on the account. +func (r User_Customer) GetHasFullDedicatedHostAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHasFullDedicatedHostAccessFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a portal user has access to all hardware on their account. +func (r User_Customer) GetHasFullHardwareAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHasFullHardwareAccessFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a portal user has access to all hardware on their account. +func (r User_Customer) GetHasFullVirtualGuestAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHasFullVirtualGuestAccessFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetImpersonationToken() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getImpersonationToken", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetLayoutProfiles() (resp []datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getLayoutProfiles", nil, &r.Options, &resp) + return +} + +// Retrieve A user's locale. Locale holds user's language and region information. +func (r User_Customer) GetLocale() (resp datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getLocale", nil, &r.Options, &resp) + return +} + +// Retrieve A user's attempts to log into the SoftLayer customer portal. +func (r User_Customer) GetLoginAttempts() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getLoginAttempts", nil, &r.Options, &resp) + return +} + +// Attempt to authenticate a user to the SoftLayer customer portal using the provided authentication container. Depending on the specific type of authentication container that is used, this API will leverage the appropriate authentication protocol. If authentication is successful then the API returns a list of linked accounts for the user, a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer) GetLoginToken(request *datatypes.Container_Authentication_Request_Contract) (resp datatypes.Container_Authentication_Response_Common, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getLoginToken", params, &r.Options, &resp) + return +} + +// An OpenIdConnect identity, for example an IBMid, can be linked or mapped to one or more individual SoftLayer users, but no more than one SoftLayer user per account. This effectively links the OpenIdConnect identity to those accounts. This API returns a list of all the accounts for which there is a link between the OpenIdConnect identity and a SoftLayer user. Invoke this only on IBMid-authenticated users. +func (r User_Customer) GetMappedAccounts(providerType *string) (resp []datatypes.Account, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getMappedAccounts", params, &r.Options, &resp) + return +} + +// Retrieve A portal user's associated mobile device profiles. +func (r User_Customer) GetMobileDevices() (resp []datatypes.User_Customer_MobileDevice, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getMobileDevices", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscription records for the user. +func (r User_Customer) GetNotificationSubscribers() (resp []datatypes.Notification_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer service. You can only retrieve users that are assigned to the customer account belonging to the user making the API call. +func (r User_Customer) GetObject() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getObject", nil, &r.Options, &resp) + return +} + +// This API returns a SoftLayer_Container_User_Customer_OpenIdConnect_MigrationState object containing the necessary information to determine what migration state the user is in. If the account is not OpenIdConnect authenticated, then an exception is thrown. +func (r User_Customer) GetOpenIdConnectMigrationState() (resp datatypes.Container_User_Customer_OpenIdConnect_MigrationState, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getOpenIdConnectMigrationState", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated open tickets. +func (r User_Customer) GetOpenTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getOpenTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's vpn accessible subnets. +func (r User_Customer) GetOverrides() (resp []datatypes.Network_Service_Vpn_Overrides, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getOverrides", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's parent user. If a SoftLayer_User_Customer has a null parentId property then it doesn't have a parent user. +func (r User_Customer) GetParent() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's permissions. These permissions control that user's access to functions within the SoftLayer customer portal and API. +func (r User_Customer) GetPermissions() (resp []datatypes.User_Customer_CustomerPermission_Permission, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPermissions", nil, &r.Options, &resp) + return +} + +// Attempt to authenticate a username and password to the SoftLayer customer portal. Many portal user accounts are configured to require answering a security question on login. In this case getPortalLoginToken() also verifies the given security question ID and answer. If authentication is successful then the API returns a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer) GetPortalLoginToken(username *string, password *string, securityQuestionId *int, securityQuestionAnswer *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + username, + password, + securityQuestionId, + securityQuestionAnswer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPortalLoginToken", params, &r.Options, &resp) + return +} + +// Select a type of preference you would like to get using [[SoftLayer_User_Customer::getPreferenceTypes|getPreferenceTypes]] and invoke this method using that preference type key name. +func (r User_Customer) GetPreference(preferenceTypeKeyName *string) (resp datatypes.User_Preference, err error) { + params := []interface{}{ + preferenceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPreference", params, &r.Options, &resp) + return +} + +// Use any of the preference types to fetch or modify user preferences using [[SoftLayer_User_Customer::getPreference|getPreference]] or [[SoftLayer_User_Customer::changePreference|changePreference]], respectively. +func (r User_Customer) GetPreferenceTypes() (resp []datatypes.User_Preference_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPreferenceTypes", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetPreferences() (resp []datatypes.User_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve the authentication requirements for an outstanding password set/reset request. The password key provided to the user in an email generated by the [[SoftLayer_User_Customer::newUserPassword|newUserPassword]]. Password recovery keys are valid for 24 hours after they're generated. +func (r User_Customer) GetRequirementsForPasswordSet(passwordSet *datatypes.Container_User_Customer_PasswordSet) (resp datatypes.Container_User_Customer_PasswordSet, err error) { + params := []interface{}{ + passwordSet, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getRequirementsForPasswordSet", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetRoles() (resp []datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getRoles", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetSalesforceUserLink() (resp datatypes.User_Customer_Link, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSalesforceUserLink", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's security question answers. Some portal users may not have security answers or may not be configured to require answering a security question on login. +func (r User_Customer) GetSecurityAnswers() (resp []datatypes.User_Customer_Security_Answer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSecurityAnswers", nil, &r.Options, &resp) + return +} + +// Retrieve A user's notification subscription records. +func (r User_Customer) GetSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSubscribers", nil, &r.Options, &resp) + return +} + +// Retrieve A user's successful attempts to log into the SoftLayer customer portal. +func (r User_Customer) GetSuccessfulLogins() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSuccessfulLogins", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user is required to acknowledge the support policy for portal access. +func (r User_Customer) GetSupportPolicyAcknowledgementRequiredFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSupportPolicyAcknowledgementRequiredFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetSupportPolicyDocument() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSupportPolicyDocument", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetSupportPolicyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSupportPolicyName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetSupportedLocales() (resp []datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSupportedLocales", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user must take a brief survey the next time they log into the SoftLayer customer portal. +func (r User_Customer) GetSurveyRequiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSurveyRequiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The surveys that a user has taken in the SoftLayer customer portal. +func (r User_Customer) GetSurveys() (resp []datatypes.Survey, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSurveys", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated tickets. +func (r User_Customer) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's time zone. +func (r User_Customer) GetTimezone() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getTimezone", nil, &r.Options, &resp) + return +} + +// Retrieve A user's unsuccessful attempts to log into the SoftLayer customer portal. +func (r User_Customer) GetUnsuccessfulLogins() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUnsuccessfulLogins", nil, &r.Options, &resp) + return +} + +// Retrieve a user object using a password token. When a new user is created or when a user has requested a password change using initiatePortalPasswordChange, they will have received an email that contains a url with a token. That token is used as the parameter for getUserIdForPasswordSet. +func (r User_Customer) GetUserIdForPasswordSet(key *string) (resp int, err error) { + params := []interface{}{ + key, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUserIdForPasswordSet", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetUserLinks() (resp []datatypes.User_Customer_Link, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUserLinks", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetUserPreferences(profileName *string, containerKeyname *string) (resp []datatypes.Layout_Profile, err error) { + params := []interface{}{ + profileName, + containerKeyname, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUserPreferences", params, &r.Options, &resp) + return +} + +// Retrieve A portal user's status, which controls overall access to the SoftLayer customer portal and VPN access to the private network. +func (r User_Customer) GetUserStatus() (resp datatypes.User_Customer_Status, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUserStatus", nil, &r.Options, &resp) + return +} + +// Retrieve the number of CloudLayer Computing Instances that a portal user has access to. Portal users can have restrictions set to limit services for and to perform actions on CloudLayer Computing Instances. You can set these permissions in the portal by clicking the "administrative" then "user admin" links. +func (r User_Customer) GetVirtualGuestCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getVirtualGuestCount", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's accessible CloudLayer Computing Instances. These permissions control which CloudLayer Computing Instances a user has access to in the SoftLayer customer portal. +func (r User_Customer) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) InTerminalStatus() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "inTerminalStatus", nil, &r.Options, &resp) + return +} + +// The service initiates an external authentication with the given external authentication vendor. The authentication container and its content will be verified before an attempt is made to initiate an external authentication. [[SoftLayer_Container_User_Customer_External_Binding_Phone|Phone external binding]] container can be used for this service. +// +// This service returns a unique authentication request token. You can use [[SoftLayer_User_Customer::checkExternalAuthenticationStatus|checkExternalAuthenticationStatus]] service to check if the authentication request is complete or not. +func (r User_Customer) InitiateExternalAuthentication(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp string, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "initiateExternalAuthentication", params, &r.Options, &resp) + return +} + +// Sends password change email to the user containing url that allows the user the change their password. This is the first step when a user wishes to change their password. The url that is generated contains a one-time use token that is valid for only 24-hours. +// +// If this is a new master user who has never logged into the portal, then password reset will be initiated. Once a master user has logged into the portal, they must setup their security questions prior to logging out because master users are required to answer a security question during the password reset process. Should a master user not have security questions defined and not remember their password in order to define the security questions, then they will need to contact support at live chat or Revenue Services for assistance. +// +// Due to security reasons, the number reset requests per username are limited within a undisclosed timeframe. +func (r User_Customer) InitiatePortalPasswordChange(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "initiatePortalPasswordChange", params, &r.Options, &resp) + return +} + +// A Brand Agent that has permissions to Add Customer Accounts will be able to request the password email be sent to the Master User of a Customer Account created by the same Brand as the agent making the request. Due to security reasons, the number of reset requests are limited within an undisclosed timeframe. +func (r User_Customer) InitiatePortalPasswordChangeByBrandAgent(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "initiatePortalPasswordChangeByBrandAgent", params, &r.Options, &resp) + return +} + +// Send email invitation to a user to join a SoftLayer account and authenticate with OpenIdConnect. Throws an exception on error. +func (r User_Customer) InviteUserToLinkOpenIdConnect(providerType *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "inviteUserToLinkOpenIdConnect", params, &r.Options, &resp) + return +} + +// Portal users are considered master users if they don't have an associated parent user. The only users who don't have parent users are users whose username matches their SoftLayer account name. Master users have special permissions throughout the SoftLayer customer portal. +func (r User_Customer) IsMasterUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "isMasterUser", nil, &r.Options, &resp) + return +} + +// This method is deprecated! SoftLayer Community Forums no longer exist, therefore, any password verified will return false. In the future, this method will be completely removed. +// +// Determine if a string is the given user's login password to the SoftLayer community forums. +func (r User_Customer) IsValidForumPassword(password *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "isValidForumPassword", params, &r.Options, &resp) + return +} + +// Determine if a string is the given user's login password to the SoftLayer customer portal. +func (r User_Customer) IsValidPortalPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "isValidPortalPassword", params, &r.Options, &resp) + return +} + +// The perform external authentication method will authenticate the given external authentication container with an external vendor. The authentication container and its contents will be verified before an attempt is made to authenticate the contents of the container with an external vendor. +func (r User_Customer) PerformExternalAuthentication(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "performExternalAuthentication", params, &r.Options, &resp) + return +} + +// Set the password for a user who has an outstanding password request. A user with an outstanding password request will have an unused and unexpired password key. The password key is part of the url provided to the user in the email sent to the user with information on how to set their password. The email was generated by the [[SoftLayer_User_Customer::initiatePortalPasswordRequest|initiatePortalPasswordRequest]] method. Password recovery keys are valid for 24 hours after they're generated. +// +// User portal passwords must match the following restrictions. Portal passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your forum password +func (r User_Customer) ProcessPasswordSetRequest(passwordSet *datatypes.Container_User_Customer_PasswordSet, authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp bool, err error) { + params := []interface{}{ + passwordSet, + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "processPasswordSetRequest", params, &r.Options, &resp) + return +} + +// Revoke access to all dedicated hosts on the account for this user. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access or the access has been revoked, then "not found" exceptions are thrown if the user attempts to access any of these devices. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer) RemoveAllDedicatedHostAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeAllDedicatedHostAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove all hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer) RemoveAllHardwareAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeAllHardwareAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove all cloud computing instances from a portal user's instance access list. A user's instance access list controls which of an account's computing instance objects a user has access to in the SoftLayer customer portal and API. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer) RemoveAllVirtualAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeAllVirtualAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove a user's API authentication key, removing that user's access to query the SoftLayer API. +func (r User_Customer) RemoveApiAuthenticationKey(keyId *int) (resp bool, err error) { + params := []interface{}{ + keyId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeApiAuthenticationKey", params, &r.Options, &resp) + return +} + +// Revokes access for the user to one or more dedicated host devices. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access or the access has been revoked, then "not found" exceptions are thrown if the user attempts to access any of these devices. +// +// Users can assign device access to their child users, but not to themselves. An account's master has access to all devices on their customer account and can set dedicated host access for any of the other users on their account. +// +// If the user has full dedicatedHost access, then it will provide access to "ALL but passed in" dedicatedHost ids. +func (r User_Customer) RemoveBulkDedicatedHostAccess(dedicatedHostIds []int) (resp bool, err error) { + params := []interface{}{ + dedicatedHostIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeBulkDedicatedHostAccess", params, &r.Options, &resp) + return +} + +// Remove multiple hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the hardware you're attempting remove add then removeBulkHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +// +// If the user has full hardware access, then it will provide access to "ALL but passed in" hardware ids. +func (r User_Customer) RemoveBulkHardwareAccess(hardwareIds []int) (resp bool, err error) { + params := []interface{}{ + hardwareIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeBulkHardwareAccess", params, &r.Options, &resp) + return +} + +// Remove multiple permissions from a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. Removing a user's permission will affect that user's portal and API access. removePortalPermission() does not attempt to remove permissions that are not assigned to the user. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission objects within the permissions parameter. +func (r User_Customer) RemoveBulkPortalPermission(permissions []datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permissions, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeBulkPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) RemoveBulkRoles(roles []datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + roles, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeBulkRoles", params, &r.Options, &resp) + return +} + +// Remove multiple CloudLayer Computing Instances from a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the CloudLayer Computing Instance you're attempting remove add then removeBulkVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer) RemoveBulkVirtualGuestAccess(virtualGuestIds []int) (resp bool, err error) { + params := []interface{}{ + virtualGuestIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeBulkVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// Revokes access for the user to a single dedicated host device. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access or the access has been revoked, then "not found" exceptions are thrown if the user attempts to access any of these devices. +// +// Users can assign device access to their child users, but not to themselves. An account's master has access to all devices on their customer account and can set dedicated host access for any of the other users on their account. +func (r User_Customer) RemoveDedicatedHostAccess(dedicatedHostId *int) (resp bool, err error) { + params := []interface{}{ + dedicatedHostId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeDedicatedHostAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) RemoveExternalBinding(externalBinding *datatypes.User_External_Binding) (resp bool, err error) { + params := []interface{}{ + externalBinding, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeExternalBinding", params, &r.Options, &resp) + return +} + +// Remove hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the hardware you're attempting remove add then removeHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer) RemoveHardwareAccess(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeHardwareAccess", params, &r.Options, &resp) + return +} + +// Remove a permission from a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. Removing a user's permission will affect that user's portal and API access. If the user does not have the permission you're attempting to remove then removePortalPermission() returns true. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission parameter. +func (r User_Customer) RemovePortalPermission(permission *datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permission, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removePortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) RemoveRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeRole", params, &r.Options, &resp) + return +} + +// Remove a CloudLayer Computing Instance from a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's computing instances a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the CloudLayer Computing Instance you're attempting remove add then removeVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set instance access for any of the other users on their account. +func (r User_Customer) RemoveVirtualGuestAccess(virtualGuestId *int) (resp bool, err error) { + params := []interface{}{ + virtualGuestId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) SamlAuthenticate(accountId *string, samlResponse *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + accountId, + samlResponse, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "samlAuthenticate", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) SamlBeginAuthentication(accountId *int) (resp string, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "samlBeginAuthentication", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) SamlBeginLogout() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "samlBeginLogout", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) SamlLogout(samlResponse *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + samlResponse, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "samlLogout", params, &r.Options, &resp) + return +} + +// An OpenIdConnect identity, for example an IBMid, can be linked or mapped to one or more individual SoftLayer users, but no more than one per account. If an OpenIdConnect identity is mapped to multiple accounts in this manner, one such account should be identified as the default account for that identity. Invoke this only on IBMid-authenticated users. +func (r User_Customer) SetDefaultAccount(providerType *string, accountId *int) (resp datatypes.Account, err error) { + params := []interface{}{ + providerType, + accountId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "setDefaultAccount", params, &r.Options, &resp) + return +} + +// As master user, calling this api for the IBMid provider type when there is an existing IBMid for the email on the SL account will silently (without sending an invitation email) create a link for the IBMid. NOTE: If the SoftLayer user is already linked to IBMid, this call will fail. If the IBMid specified by the email of this user, is already used in a link to another user in this account, this call will fail. If there is already an open invitation from this SoftLayer user to this or any IBMid, this call will fail. If there is already an open invitation from some other SoftLayer user in this account to this IBMid, then this call will fail. +func (r User_Customer) SilentlyMigrateUserOpenIdConnect(providerType *string) (resp bool, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "silentlyMigrateUserOpenIdConnect", params, &r.Options, &resp) + return +} + +// This method is deprecated! SoftLayer Community Forums no longer exist, therefore, this method will return false. In the future, this method will be completely removed. +// +// Update a user's password on the SoftLayer community forums. As with portal passwords, user forum passwords must match the following restrictions. Forum passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your portal password +// Finally, users can only update their own password. +func (r User_Customer) UpdateForumPassword(password *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateForumPassword", params, &r.Options, &resp) + return +} + +// Update the active status for a notification that the user is subscribed to. A notification along with an active flag can be supplied to update the active status for a particular notification subscription. +func (r User_Customer) UpdateNotificationSubscriber(notificationKeyName *string, active *int) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + active, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Update a user's login security questions and answers on the SoftLayer customer portal. These questions and answers are used to optionally log into the SoftLayer customer portal using two-factor authentication. Each user must have three distinct questions set with a unique answer for each question, and each answer may only contain alphanumeric or the . , - _ ( ) [ ] : ; > < characters. Existing user security questions and answers are deleted before new ones are set, and users may only update their own security questions and answers. +func (r User_Customer) UpdateSecurityAnswers(questions []datatypes.User_Security_Question, answers []string) (resp bool, err error) { + params := []interface{}{ + questions, + answers, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateSecurityAnswers", params, &r.Options, &resp) + return +} + +// Update a delivery method for a notification that the user is subscribed to. A delivery method keyName along with an active flag can be supplied to update the active status of the delivery methods for the specified notification. Available delivery methods - 'EMAIL'. Available notifications - 'PLANNED_MAINTENANCE', 'UNPLANNED_INCIDENT'. +func (r User_Customer) UpdateSubscriberDeliveryMethod(notificationKeyName *string, deliveryMethodKeyNames []string, active *int) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + deliveryMethodKeyNames, + active, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateSubscriberDeliveryMethod", params, &r.Options, &resp) + return +} + +// Update a user's VPN password on the SoftLayer customer portal. As with portal passwords, VPN passwords must match the following restrictions. VPN passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ = +// * ...not match your username +// * ...not match your forum password +// Finally, users can only update their own VPN password. An account's master user can update any of their account users' VPN passwords. +func (r User_Customer) UpdateVpnPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateVpnPassword", params, &r.Options, &resp) + return +} + +// Always call this function to enable changes when manually configuring VPN subnet access. +func (r User_Customer) UpdateVpnUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateVpnUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) ValidateAuthenticationToken(authenticationToken *datatypes.Container_User_Authentication_Token) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationToken, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "validateAuthenticationToken", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_ApiAuthentication type contains user's authentication key(s). +type User_Customer_ApiAuthentication struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerApiAuthenticationService returns an instance of the User_Customer_ApiAuthentication SoftLayer service +func GetUserCustomerApiAuthenticationService(sess *session.Session) User_Customer_ApiAuthentication { + return User_Customer_ApiAuthentication{Session: sess} +} + +func (r User_Customer_ApiAuthentication) Id(id int) User_Customer_ApiAuthentication { + r.Options.Id = &id + return r +} + +func (r User_Customer_ApiAuthentication) Mask(mask string) User_Customer_ApiAuthentication { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_ApiAuthentication) Filter(filter string) User_Customer_ApiAuthentication { + r.Options.Filter = filter + return r +} + +func (r User_Customer_ApiAuthentication) Limit(limit int) User_Customer_ApiAuthentication { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_ApiAuthentication) Offset(offset int) User_Customer_ApiAuthentication { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_ApiAuthentication) EditObject(templateObject *datatypes.User_Customer_ApiAuthentication) (resp datatypes.User_Customer_ApiAuthentication, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_ApiAuthentication", "editObject", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_ApiAuthentication object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_ApiAuthentication service. +func (r User_Customer_ApiAuthentication) GetObject() (resp datatypes.User_Customer_ApiAuthentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_ApiAuthentication", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The user who owns the api authentication key. +func (r User_Customer_ApiAuthentication) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_ApiAuthentication", "getUser", nil, &r.Options, &resp) + return +} + +// Each SoftLayer portal account is assigned a series of permissions that determine what access the user has to functions within the SoftLayer customer portal. This status is reflected in the SoftLayer_User_Customer_Status data type. Permissions differ from user status in that user status applies globally to the portal while user permissions are applied to specific portal functions. +type User_Customer_CustomerPermission_Permission struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerCustomerPermissionPermissionService returns an instance of the User_Customer_CustomerPermission_Permission SoftLayer service +func GetUserCustomerCustomerPermissionPermissionService(sess *session.Session) User_Customer_CustomerPermission_Permission { + return User_Customer_CustomerPermission_Permission{Session: sess} +} + +func (r User_Customer_CustomerPermission_Permission) Id(id int) User_Customer_CustomerPermission_Permission { + r.Options.Id = &id + return r +} + +func (r User_Customer_CustomerPermission_Permission) Mask(mask string) User_Customer_CustomerPermission_Permission { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_CustomerPermission_Permission) Filter(filter string) User_Customer_CustomerPermission_Permission { + r.Options.Filter = filter + return r +} + +func (r User_Customer_CustomerPermission_Permission) Limit(limit int) User_Customer_CustomerPermission_Permission { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_CustomerPermission_Permission) Offset(offset int) User_Customer_CustomerPermission_Permission { + r.Options.Offset = &offset + return r +} + +// Retrieve all available permissions. +func (r User_Customer_CustomerPermission_Permission) GetAllObjects() (resp []datatypes.User_Customer_CustomerPermission_Permission, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_CustomerPermission_Permission", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_CustomerPermission_Permission object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_CustomerPermission_Permission service. +func (r User_Customer_CustomerPermission_Permission) GetObject() (resp datatypes.User_Customer_CustomerPermission_Permission, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_CustomerPermission_Permission", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding data type contains general information for a single external binding. This includes the 3rd party vendor, type of binding, and a unique identifier and password that is used to authenticate against the 3rd party service. +type User_Customer_External_Binding struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingService returns an instance of the User_Customer_External_Binding SoftLayer service +func GetUserCustomerExternalBindingService(sess *session.Session) User_Customer_External_Binding { + return User_Customer_External_Binding{Session: sess} +} + +func (r User_Customer_External_Binding) Id(id int) User_Customer_External_Binding { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding) Mask(mask string) User_Customer_External_Binding { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding) Filter(filter string) User_Customer_External_Binding { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding) Limit(limit int) User_Customer_External_Binding { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding) Offset(offset int) User_Customer_External_Binding { + r.Options.Offset = &offset + return r +} + +// Delete an external authentication binding. If the external binding currently has an active billing item associated you will be prevented from deleting the binding. The alternative method to remove an external authentication binding is to use the service cancellation form. +func (r User_Customer_External_Binding) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "deleteObject", nil, &r.Options, &resp) + return +} + +// Disabling an external binding will allow you to keep the external binding on your SoftLayer account, but will not require you to authentication with our trusted 2 form factor vendor when logging into the SoftLayer customer portal. +// +// You may supply one of the following reason when you disable an external binding: +// *Unspecified +// *TemporarilyUnavailable +// *Lost +// *Stolen +func (r User_Customer_External_Binding) Disable(reason *string) (resp bool, err error) { + params := []interface{}{ + reason, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "disable", params, &r.Options, &resp) + return +} + +// Enabling an external binding will activate the binding on your account and require you to authenticate with our trusted 3rd party 2 form factor vendor when logging into the SoftLayer customer portal. +// +// Please note that API access will be disabled for users that have an active external binding. +func (r User_Customer_External_Binding) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "enable", nil, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_Customer_External_Binding) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_Customer_External_Binding) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_Customer_External_Binding) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding) GetObject() (resp datatypes.User_Customer_External_Binding, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_Customer_External_Binding) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that the external authentication binding belongs to. +func (r User_Customer_External_Binding) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_Customer_External_Binding) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getVendor", nil, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_Customer_External_Binding) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "updateNote", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding_Phone data type contains information about an external binding that uses a phone call, SMS or mobile app for 2 form factor authentication. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal or VPN to authenticate them against a trusted 3rd party, in this case using a mobile phone, mobile phone application or land-line phone. +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Phone struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingPhoneService returns an instance of the User_Customer_External_Binding_Phone SoftLayer service +func GetUserCustomerExternalBindingPhoneService(sess *session.Session) User_Customer_External_Binding_Phone { + return User_Customer_External_Binding_Phone{Session: sess} +} + +func (r User_Customer_External_Binding_Phone) Id(id int) User_Customer_External_Binding_Phone { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding_Phone) Mask(mask string) User_Customer_External_Binding_Phone { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding_Phone) Filter(filter string) User_Customer_External_Binding_Phone { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding_Phone) Limit(limit int) User_Customer_External_Binding_Phone { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding_Phone) Offset(offset int) User_Customer_External_Binding_Phone { + r.Options.Offset = &offset + return r +} + +// Return a phone validation result. +func (r User_Customer_External_Binding_Phone) CheckPhoneValidationResult(token *string) (resp bool, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "checkPhoneValidationResult", params, &r.Options, &resp) + return +} + +// Delete an external authentication binding. If the external binding currently has an active billing item associated you will be prevented from deleting the binding. The alternative method to remove an external authentication binding is to use the service cancellation form. +func (r User_Customer_External_Binding_Phone) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "deleteObject", nil, &r.Options, &resp) + return +} + +// Disabling an external binding will allow you to keep the external binding on your SoftLayer account, but will not require you to authentication with our trusted 2 form factor vendor when logging into the SoftLayer customer portal. +// +// You may supply one of the following reason when you disable an external binding: +// *Unspecified +// *TemporarilyUnavailable +// *Lost +// *Stolen +func (r User_Customer_External_Binding_Phone) Disable(reason *string) (resp bool, err error) { + params := []interface{}{ + reason, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "disable", params, &r.Options, &resp) + return +} + +// Enabling an external binding will activate the binding on your account and require you to authenticate with our trusted 3rd party 2 form factor vendor when logging into the SoftLayer customer portal. +// +// Please note that API access will be disabled for users that have an active external binding. +func (r User_Customer_External_Binding_Phone) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "enable", nil, &r.Options, &resp) + return +} + +// This service returns key names of all available authentication modes. See [[SoftLayer_Container_User_Customer_External_Binding_Phone_Mode|authentication mode]] container for details. +func (r User_Customer_External_Binding_Phone) GetAllAuthenticationModes() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getAllAuthenticationModes", nil, &r.Options, &resp) + return +} + +// This service returns key names of all available authentication modes. Refer to [[SoftLayer_User_Customer_External_Binding_Phone::getAllAuthenticationModes|getAllAuthenticationModes]] to retrieve authentication mode key names. +func (r User_Customer_External_Binding_Phone) GetAllAuthenticationPinModes(authenticationModeKeyName *string) (resp []string, err error) { + params := []interface{}{ + authenticationModeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getAllAuthenticationPinModes", params, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_Customer_External_Binding_Phone) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getAttributes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Phone) GetAuthenticationMode() (resp datatypes.Container_User_Customer_External_Binding_Phone_Mode, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getAuthenticationMode", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_Customer_External_Binding_Phone) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The current external binding status. It can be "ACTIVE" or "BLOCKED". +func (r User_Customer_External_Binding_Phone) GetBindingStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getBindingStatus", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_Customer_External_Binding_Phone) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Phone) GetObject() (resp datatypes.User_Customer_External_Binding_Phone, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getObject", nil, &r.Options, &resp) + return +} + +// Some vendor's mobile app requires an activation code. Use this method to get an activation data. +func (r User_Customer_External_Binding_Phone) GetPhoneAppActivationCode() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getPhoneAppActivationCode", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Phone) GetPhoneData() (resp []datatypes.Container_User_Data_Phone, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getPhoneData", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_External_Binding_Phone) GetPinLength() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getPinLength", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_Customer_External_Binding_Phone) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that the external authentication binding belongs to. +func (r User_Customer_External_Binding_Phone) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_Customer_External_Binding_Phone) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getVendor", nil, &r.Options, &resp) + return +} + +// Initiates a phone validation requests and returns a unique token. Use [[SoftLayer_User_Customer_External_Binding_Phone::checkPhoneValidationResult|checkPhoneValidationResult]] to find the phone validation result. +func (r User_Customer_External_Binding_Phone) RequestPhoneValidation(phoneData *datatypes.Container_User_Data_Phone) (resp string, err error) { + params := []interface{}{ + phoneData, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "requestPhoneValidation", params, &r.Options, &resp) + return +} + +// This service allow you to change your phone authentication mode. See [[SoftLayer_Container_User_Customer_External_Binding_Phone_Mode|authentication mode]] container for available modes. +func (r User_Customer_External_Binding_Phone) UpdateAuthenticationMode(mode *datatypes.Container_User_Customer_External_Binding_Phone_Mode) (resp bool, err error) { + params := []interface{}{ + mode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "updateAuthenticationMode", params, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_Customer_External_Binding_Phone) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "updateNote", params, &r.Options, &resp) + return +} + +// Phone external binding supports a primary and a backup phone number. You can use this method to update your phone number used for the phone authentication. You can provide an array of [[SoftLayer_Container_User_Data_Phone|User Phone]] objects. You have to mark one as the primary phone number by setting "phoneType" to "PRIMARY". +// +// +// *countryCode: Country code number for the phone number. Default: 1 (United States & Canada +1) +// *phone: Phone number that 2 Form Factor system will call or text for user authentication. +// The phone number format must match the format selected in the Country Code. +// *extension: Specify the extension that will be dialed after the call is answered. Digits, commas, *, and # +// are allowed. Commas can be used for a one second pause to navigate phone system menus. +// *phoneType: Specify the primary and backup phone number by setting this value to "PRIMARY" or "BACKUP". +// If omitted, it will be considered to be the primary phone number. If you are passing two Phone objects, you must specify the phone type of each phone number. +// +// +func (r User_Customer_External_Binding_Phone) UpdatePhone(phoneData []datatypes.Container_User_Data_Phone) (resp bool, err error) { + params := []interface{}{ + phoneData, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "updatePhone", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding_Totp data type contains information about a single time-based one time password external binding. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal to authenticate them. +// +// The information provided by this external binding data type includes: +// * The type of credential +// * The current state of the credential +// ** Active +// ** Inactive +// +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Totp struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingTotpService returns an instance of the User_Customer_External_Binding_Totp SoftLayer service +func GetUserCustomerExternalBindingTotpService(sess *session.Session) User_Customer_External_Binding_Totp { + return User_Customer_External_Binding_Totp{Session: sess} +} + +func (r User_Customer_External_Binding_Totp) Id(id int) User_Customer_External_Binding_Totp { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding_Totp) Mask(mask string) User_Customer_External_Binding_Totp { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding_Totp) Filter(filter string) User_Customer_External_Binding_Totp { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding_Totp) Limit(limit int) User_Customer_External_Binding_Totp { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding_Totp) Offset(offset int) User_Customer_External_Binding_Totp { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_External_Binding_Totp) Activate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "activate", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Totp) Deactivate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "deactivate", nil, &r.Options, &resp) + return +} + +// Delete an external authentication binding. If the external binding currently has an active billing item associated you will be prevented from deleting the binding. The alternative method to remove an external authentication binding is to use the service cancellation form. +func (r User_Customer_External_Binding_Totp) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "deleteObject", nil, &r.Options, &resp) + return +} + +// Disabling an external binding will allow you to keep the external binding on your SoftLayer account, but will not require you to authentication with our trusted 2 form factor vendor when logging into the SoftLayer customer portal. +// +// You may supply one of the following reason when you disable an external binding: +// *Unspecified +// *TemporarilyUnavailable +// *Lost +// *Stolen +func (r User_Customer_External_Binding_Totp) Disable(reason *string) (resp bool, err error) { + params := []interface{}{ + reason, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "disable", params, &r.Options, &resp) + return +} + +// Enabling an external binding will activate the binding on your account and require you to authenticate with our trusted 3rd party 2 form factor vendor when logging into the SoftLayer customer portal. +// +// Please note that API access will be disabled for users that have an active external binding. +func (r User_Customer_External_Binding_Totp) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "enable", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Totp) GenerateSecretKey() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "generateSecretKey", nil, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_Customer_External_Binding_Totp) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_Customer_External_Binding_Totp) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_Customer_External_Binding_Totp) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Totp) GetObject() (resp datatypes.User_Customer_External_Binding_Totp, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_Customer_External_Binding_Totp) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that the external authentication binding belongs to. +func (r User_Customer_External_Binding_Totp) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_Customer_External_Binding_Totp) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getVendor", nil, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_Customer_External_Binding_Totp) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "updateNote", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding_Vendor data type contains information for a single external binding vendor. This information includes a user friendly vendor name, a unique version of the vendor name, and a unique internal identifier that can be used when creating a new external binding. +type User_Customer_External_Binding_Vendor struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingVendorService returns an instance of the User_Customer_External_Binding_Vendor SoftLayer service +func GetUserCustomerExternalBindingVendorService(sess *session.Session) User_Customer_External_Binding_Vendor { + return User_Customer_External_Binding_Vendor{Session: sess} +} + +func (r User_Customer_External_Binding_Vendor) Id(id int) User_Customer_External_Binding_Vendor { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding_Vendor) Mask(mask string) User_Customer_External_Binding_Vendor { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding_Vendor) Filter(filter string) User_Customer_External_Binding_Vendor { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding_Vendor) Limit(limit int) User_Customer_External_Binding_Vendor { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding_Vendor) Offset(offset int) User_Customer_External_Binding_Vendor { + r.Options.Offset = &offset + return r +} + +// getAllObjects() will return a list of the available external binding vendors that SoftLayer supports. Use this list to select the appropriate vendor when creating a new external binding. +func (r User_Customer_External_Binding_Vendor) GetAllObjects() (resp []datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Vendor", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Vendor) GetObject() (resp datatypes.User_Customer_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Vendor", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding_Verisign data type contains information about a single VeriSign external binding. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal to authenticate them against a 3rd party, in this case VeriSign. +// +// The information provided by the VeriSign external binding data type includes: +// * The type of credential +// * The current state of the credential +// ** Enabled +// ** Disabled +// ** Locked +// * The credential's expiration date +// * The last time the credential was updated +// +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Verisign struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingVerisignService returns an instance of the User_Customer_External_Binding_Verisign SoftLayer service +func GetUserCustomerExternalBindingVerisignService(sess *session.Session) User_Customer_External_Binding_Verisign { + return User_Customer_External_Binding_Verisign{Session: sess} +} + +func (r User_Customer_External_Binding_Verisign) Id(id int) User_Customer_External_Binding_Verisign { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding_Verisign) Mask(mask string) User_Customer_External_Binding_Verisign { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding_Verisign) Filter(filter string) User_Customer_External_Binding_Verisign { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding_Verisign) Limit(limit int) User_Customer_External_Binding_Verisign { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding_Verisign) Offset(offset int) User_Customer_External_Binding_Verisign { + r.Options.Offset = &offset + return r +} + +// Delete a VeriSign external binding. The only VeriSign external binding that can be deleted through this method is the free VeriSign external binding for the master user of a SoftLayer account. All other external bindings must be canceled using the SoftLayer service cancellation form. +// +// When a VeriSign external binding is deleted the credential is deactivated in VeriSign's system for use on the SoftLayer site and the $0 billing item associated with the free VeriSign external binding is cancelled. +func (r User_Customer_External_Binding_Verisign) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "deleteObject", nil, &r.Options, &resp) + return +} + +// Disabling an external binding will allow you to keep the external binding on your SoftLayer account, but will not require you to authentication with our trusted 2 form factor vendor when logging into the SoftLayer customer portal. +// +// You may supply one of the following reason when you disable an external binding: +// *Unspecified +// *TemporarilyUnavailable +// *Lost +// *Stolen +func (r User_Customer_External_Binding_Verisign) Disable(reason *string) (resp bool, err error) { + params := []interface{}{ + reason, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "disable", params, &r.Options, &resp) + return +} + +// Enabling an external binding will activate the binding on your account and require you to authenticate with our trusted 3rd party 2 form factor vendor when logging into the SoftLayer customer portal. +// +// Please note that API access will be disabled for users that have an active external binding. +func (r User_Customer_External_Binding_Verisign) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "enable", nil, &r.Options, &resp) + return +} + +// An activation code is required when provisioning a new mobile credential from Verisign. This method will return the required activation code. +func (r User_Customer_External_Binding_Verisign) GetActivationCodeForMobileClient() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getActivationCodeForMobileClient", nil, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_Customer_External_Binding_Verisign) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_Customer_External_Binding_Verisign) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The date that a VeriSign credential expires. +func (r User_Customer_External_Binding_Verisign) GetCredentialExpirationDate() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getCredentialExpirationDate", nil, &r.Options, &resp) + return +} + +// Retrieve The last time a VeriSign credential was updated. +func (r User_Customer_External_Binding_Verisign) GetCredentialLastUpdateDate() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getCredentialLastUpdateDate", nil, &r.Options, &resp) + return +} + +// Retrieve The current state of a VeriSign credential. This can be 'Enabled', 'Disabled', or 'Locked'. +func (r User_Customer_External_Binding_Verisign) GetCredentialState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getCredentialState", nil, &r.Options, &resp) + return +} + +// Retrieve The type of VeriSign credential. This can be either 'Hardware' or 'Software'. +func (r User_Customer_External_Binding_Verisign) GetCredentialType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getCredentialType", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_Customer_External_Binding_Verisign) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Verisign) GetObject() (resp datatypes.User_Customer_External_Binding_Verisign, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_Customer_External_Binding_Verisign) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that the external authentication binding belongs to. +func (r User_Customer_External_Binding_Verisign) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_Customer_External_Binding_Verisign) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getVendor", nil, &r.Options, &resp) + return +} + +// If a VeriSign credential becomes locked because of too many failed login attempts the unlock method can be used to unlock a VeriSign credential. As a security precaution a valid security code generated by the credential will be required before the credential is unlocked. +func (r User_Customer_External_Binding_Verisign) Unlock(securityCode *string) (resp bool, err error) { + params := []interface{}{ + securityCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "unlock", params, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_Customer_External_Binding_Verisign) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "updateNote", params, &r.Options, &resp) + return +} + +// Validate the user id and VeriSign credential id used to create an external authentication binding. +func (r User_Customer_External_Binding_Verisign) ValidateCredentialId(userId *int, externalId *string) (resp bool, err error) { + params := []interface{}{ + userId, + externalId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "validateCredentialId", params, &r.Options, &resp) + return +} + +// no documentation yet +type User_Customer_Invitation struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerInvitationService returns an instance of the User_Customer_Invitation SoftLayer service +func GetUserCustomerInvitationService(sess *session.Session) User_Customer_Invitation { + return User_Customer_Invitation{Session: sess} +} + +func (r User_Customer_Invitation) Id(id int) User_Customer_Invitation { + r.Options.Id = &id + return r +} + +func (r User_Customer_Invitation) Mask(mask string) User_Customer_Invitation { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Invitation) Filter(filter string) User_Customer_Invitation { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Invitation) Limit(limit int) User_Customer_Invitation { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Invitation) Offset(offset int) User_Customer_Invitation { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_Invitation) GetObject() (resp datatypes.User_Customer_Invitation, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Invitation", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_Invitation) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Invitation", "getUser", nil, &r.Options, &resp) + return +} + +// This class represents a mobile device belonging to a user. The device can be a phone, tablet, or possibly even some Android based net books. The purpose is to tie just enough info with the device and the user to enable push notifications through non-softlayer entities (Google, Apple, RIM). +type User_Customer_MobileDevice struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerMobileDeviceService returns an instance of the User_Customer_MobileDevice SoftLayer service +func GetUserCustomerMobileDeviceService(sess *session.Session) User_Customer_MobileDevice { + return User_Customer_MobileDevice{Session: sess} +} + +func (r User_Customer_MobileDevice) Id(id int) User_Customer_MobileDevice { + r.Options.Id = &id + return r +} + +func (r User_Customer_MobileDevice) Mask(mask string) User_Customer_MobileDevice { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_MobileDevice) Filter(filter string) User_Customer_MobileDevice { + r.Options.Filter = filter + return r +} + +func (r User_Customer_MobileDevice) Limit(limit int) User_Customer_MobileDevice { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_MobileDevice) Offset(offset int) User_Customer_MobileDevice { + r.Options.Offset = &offset + return r +} + +// Create a new mobile device association for a user. +func (r User_Customer_MobileDevice) CreateObject(templateObject *datatypes.User_Customer_MobileDevice) (resp datatypes.User_Customer_MobileDevice, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "createObject", params, &r.Options, &resp) + return +} + +// Delete a mobile device association for a user. +func (r User_Customer_MobileDevice) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit the object by passing in a modified instance of the object +func (r User_Customer_MobileDevice) EditObject(templateObject *datatypes.User_Customer_MobileDevice) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve Notification subscriptions available to a mobile device. +func (r User_Customer_MobileDevice) GetAvailablePushNotificationSubscriptions() (resp []datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getAvailablePushNotificationSubscriptions", nil, &r.Options, &resp) + return +} + +// Retrieve The user this mobile device belongs to. +func (r User_Customer_MobileDevice) GetCustomer() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getCustomer", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_MobileDevice) GetObject() (resp datatypes.User_Customer_MobileDevice, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The operating system this device is using +func (r User_Customer_MobileDevice) GetOperatingSystem() (resp datatypes.User_Customer_MobileDevice_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscriptions attached to a mobile device. +func (r User_Customer_MobileDevice) GetPushNotificationSubscriptions() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getPushNotificationSubscriptions", nil, &r.Options, &resp) + return +} + +// Retrieve The type of device this user is using +func (r User_Customer_MobileDevice) GetType() (resp datatypes.User_Customer_MobileDevice_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getType", nil, &r.Options, &resp) + return +} + +// This class represents the mobile operating system installed on a user's registered mobile device. It assists us when determining the how to get a push notification to the user. +type User_Customer_MobileDevice_OperatingSystem struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerMobileDeviceOperatingSystemService returns an instance of the User_Customer_MobileDevice_OperatingSystem SoftLayer service +func GetUserCustomerMobileDeviceOperatingSystemService(sess *session.Session) User_Customer_MobileDevice_OperatingSystem { + return User_Customer_MobileDevice_OperatingSystem{Session: sess} +} + +func (r User_Customer_MobileDevice_OperatingSystem) Id(id int) User_Customer_MobileDevice_OperatingSystem { + r.Options.Id = &id + return r +} + +func (r User_Customer_MobileDevice_OperatingSystem) Mask(mask string) User_Customer_MobileDevice_OperatingSystem { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_MobileDevice_OperatingSystem) Filter(filter string) User_Customer_MobileDevice_OperatingSystem { + r.Options.Filter = filter + return r +} + +func (r User_Customer_MobileDevice_OperatingSystem) Limit(limit int) User_Customer_MobileDevice_OperatingSystem { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_MobileDevice_OperatingSystem) Offset(offset int) User_Customer_MobileDevice_OperatingSystem { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_MobileDevice_OperatingSystem) GetAllObjects() (resp []datatypes.User_Customer_MobileDevice_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice_OperatingSystem", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_MobileDevice_OperatingSystem) GetObject() (resp datatypes.User_Customer_MobileDevice_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice_OperatingSystem", "getObject", nil, &r.Options, &resp) + return +} + +// Describes a supported class of mobile device. In this the word class is used in the context of classes of consumer electronic devices, the two most prominent examples being mobile phones and tablets. +type User_Customer_MobileDevice_Type struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerMobileDeviceTypeService returns an instance of the User_Customer_MobileDevice_Type SoftLayer service +func GetUserCustomerMobileDeviceTypeService(sess *session.Session) User_Customer_MobileDevice_Type { + return User_Customer_MobileDevice_Type{Session: sess} +} + +func (r User_Customer_MobileDevice_Type) Id(id int) User_Customer_MobileDevice_Type { + r.Options.Id = &id + return r +} + +func (r User_Customer_MobileDevice_Type) Mask(mask string) User_Customer_MobileDevice_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_MobileDevice_Type) Filter(filter string) User_Customer_MobileDevice_Type { + r.Options.Filter = filter + return r +} + +func (r User_Customer_MobileDevice_Type) Limit(limit int) User_Customer_MobileDevice_Type { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_MobileDevice_Type) Offset(offset int) User_Customer_MobileDevice_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_MobileDevice_Type) GetAllObjects() (resp []datatypes.User_Customer_MobileDevice_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_MobileDevice_Type) GetObject() (resp datatypes.User_Customer_MobileDevice_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice_Type", "getObject", nil, &r.Options, &resp) + return +} + +// The Customer_Notification_Hardware object stores links between customers and the hardware devices they wish to monitor. This link is not enough, the user must be sure to also create SoftLayer_Network_Monitor_Version1_Query_Host instance with the response action set to "notify users" in order for the users linked to that hardware object to be notified on failure. +type User_Customer_Notification_Hardware struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerNotificationHardwareService returns an instance of the User_Customer_Notification_Hardware SoftLayer service +func GetUserCustomerNotificationHardwareService(sess *session.Session) User_Customer_Notification_Hardware { + return User_Customer_Notification_Hardware{Session: sess} +} + +func (r User_Customer_Notification_Hardware) Id(id int) User_Customer_Notification_Hardware { + r.Options.Id = &id + return r +} + +func (r User_Customer_Notification_Hardware) Mask(mask string) User_Customer_Notification_Hardware { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Notification_Hardware) Filter(filter string) User_Customer_Notification_Hardware { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Notification_Hardware) Limit(limit int) User_Customer_Notification_Hardware { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Notification_Hardware) Offset(offset int) User_Customer_Notification_Hardware { + r.Options.Offset = &offset + return r +} + +// Passing in an unsaved instances of a Customer_Notification_Hardware object into this function will create the object and return the results to the user. +func (r User_Customer_Notification_Hardware) CreateObject(templateObject *datatypes.User_Customer_Notification_Hardware) (resp datatypes.User_Customer_Notification_Hardware, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "createObject", params, &r.Options, &resp) + return +} + +// Passing in a collection of unsaved instances of Customer_Notification_Hardware objects into this function will create all objects and return the results to the user. +func (r User_Customer_Notification_Hardware) CreateObjects(templateObjects []datatypes.User_Customer_Notification_Hardware) (resp []datatypes.Dns_Domain, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "createObjects", params, &r.Options, &resp) + return +} + +// Like any other API object, the customer notification objects can be deleted by passing an instance of them into this function. The ID on the object must be set. +func (r User_Customer_Notification_Hardware) DeleteObjects(templateObjects []datatypes.User_Customer_Notification_Hardware) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "deleteObjects", params, &r.Options, &resp) + return +} + +// This method returns all Customer_Notification_Hardware objects associated with the passed in hardware ID as long as that hardware ID is owned by the current user's account. +// +// This behavior can also be accomplished by simply tapping monitoringUserNotification on the Hardware_Server object. +func (r User_Customer_Notification_Hardware) FindByHardwareId(hardwareId *int) (resp []datatypes.User_Customer_Notification_Hardware, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "findByHardwareId", params, &r.Options, &resp) + return +} + +// Retrieve The hardware object that will be monitored. +func (r User_Customer_Notification_Hardware) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "getHardware", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_Notification_Hardware object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_Notification_Hardware service. You can only retrieve hardware notifications attached to hardware and users that belong to your account +func (r User_Customer_Notification_Hardware) GetObject() (resp datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The user that will be notified when the associated hardware object fails a monitoring instance. +func (r User_Customer_Notification_Hardware) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "getUser", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_Notification_Virtual_Guest object stores links between customers and the virtual guests they wish to monitor. This link is not enough, the user must be sure to also create SoftLayer_Network_Monitor_Version1_Query_Host instance with the response action set to "notify users" in order for the users linked to that hardware object to be notified on failure. +type User_Customer_Notification_Virtual_Guest struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerNotificationVirtualGuestService returns an instance of the User_Customer_Notification_Virtual_Guest SoftLayer service +func GetUserCustomerNotificationVirtualGuestService(sess *session.Session) User_Customer_Notification_Virtual_Guest { + return User_Customer_Notification_Virtual_Guest{Session: sess} +} + +func (r User_Customer_Notification_Virtual_Guest) Id(id int) User_Customer_Notification_Virtual_Guest { + r.Options.Id = &id + return r +} + +func (r User_Customer_Notification_Virtual_Guest) Mask(mask string) User_Customer_Notification_Virtual_Guest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Notification_Virtual_Guest) Filter(filter string) User_Customer_Notification_Virtual_Guest { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Notification_Virtual_Guest) Limit(limit int) User_Customer_Notification_Virtual_Guest { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Notification_Virtual_Guest) Offset(offset int) User_Customer_Notification_Virtual_Guest { + r.Options.Offset = &offset + return r +} + +// Passing in an unsaved instance of a SoftLayer_Customer_Notification_Virtual_Guest object into this function will create the object and return the results to the user. +func (r User_Customer_Notification_Virtual_Guest) CreateObject(templateObject *datatypes.User_Customer_Notification_Virtual_Guest) (resp datatypes.User_Customer_Notification_Virtual_Guest, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "createObject", params, &r.Options, &resp) + return +} + +// Passing in a collection of unsaved instances of SoftLayer_Customer_Notification_Virtual_Guest objects into this function will create all objects and return the results to the user. +func (r User_Customer_Notification_Virtual_Guest) CreateObjects(templateObjects []datatypes.User_Customer_Notification_Virtual_Guest) (resp []datatypes.User_Customer_Notification_Virtual_Guest, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "createObjects", params, &r.Options, &resp) + return +} + +// Like any other API object, the customer notification objects can be deleted by passing an instance of them into this function. The ID on the object must be set. +func (r User_Customer_Notification_Virtual_Guest) DeleteObjects(templateObjects []datatypes.User_Customer_Notification_Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "deleteObjects", params, &r.Options, &resp) + return +} + +// This method returns all SoftLayer_User_Customer_Notification_Virtual_Guest objects associated with the passed in ID as long as that hardware ID is owned by the current user's account. +// +// This behavior can also be accomplished by simply tapping monitoringUserNotification on the Virtual_Guest object. +func (r User_Customer_Notification_Virtual_Guest) FindByGuestId(id *int) (resp []datatypes.User_Customer_Notification_Virtual_Guest, err error) { + params := []interface{}{ + id, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "findByGuestId", params, &r.Options, &resp) + return +} + +// Retrieve The virtual guest object that will be monitored. +func (r User_Customer_Notification_Virtual_Guest) GetGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "getGuest", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_Notification_Virtual_Guest object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_Notification_Virtual_Guest service. You can only retrieve guest notifications attached to virtual guests and users that belong to your account +func (r User_Customer_Notification_Virtual_Guest) GetObject() (resp datatypes.User_Customer_Notification_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The user that will be notified when the associated virtual guest object fails a monitoring instance. +func (r User_Customer_Notification_Virtual_Guest) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "getUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +type User_Customer_OpenIdConnect struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerOpenIdConnectService returns an instance of the User_Customer_OpenIdConnect SoftLayer service +func GetUserCustomerOpenIdConnectService(sess *session.Session) User_Customer_OpenIdConnect { + return User_Customer_OpenIdConnect{Session: sess} +} + +func (r User_Customer_OpenIdConnect) Id(id int) User_Customer_OpenIdConnect { + r.Options.Id = &id + return r +} + +func (r User_Customer_OpenIdConnect) Mask(mask string) User_Customer_OpenIdConnect { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_OpenIdConnect) Filter(filter string) User_Customer_OpenIdConnect { + r.Options.Filter = filter + return r +} + +func (r User_Customer_OpenIdConnect) Limit(limit int) User_Customer_OpenIdConnect { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_OpenIdConnect) Offset(offset int) User_Customer_OpenIdConnect { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) AcknowledgeSupportPolicy() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "acknowledgeSupportPolicy", nil, &r.Options, &resp) + return +} + +// Completes invitation process for an OpenIdConnect user created by Bluemix Unified User Console. +func (r User_Customer_OpenIdConnect) ActivateOpenIdConnectUser(verificationCode *string, userInfo *datatypes.User_Customer) (err error) { + var resp datatypes.Void + params := []interface{}{ + verificationCode, + userInfo, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "activateOpenIdConnectUser", params, &r.Options, &resp) + return +} + +// Create a user's API authentication key, allowing that user access to query the SoftLayer API. addApiAuthenticationKey() returns the users new API key. Each portal user is allowed a maximum of two API keys. +func (r User_Customer_OpenIdConnect) AddApiAuthenticationKey() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addApiAuthenticationKey", nil, &r.Options, &resp) + return +} + +// Grants the user access to one or more dedicated host devices. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access, then "not found" exceptions are thrown if the user attempts to access any of these devices. +// +// Users can assign device access to their child users, but not to themselves. An account's master has access to all devices on their customer account and can set dedicated host access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddBulkDedicatedHostAccess(dedicatedHostIds []int) (resp bool, err error) { + params := []interface{}{ + dedicatedHostIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addBulkDedicatedHostAccess", params, &r.Options, &resp) + return +} + +// Add multiple hardware to a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. addBulkHardwareAccess() does not attempt to add hardware access if the given user already has access to that hardware object. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddBulkHardwareAccess(hardwareIds []int) (resp bool, err error) { + params := []interface{}{ + hardwareIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addBulkHardwareAccess", params, &r.Options, &resp) + return +} + +// Add multiple permissions to a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. addBulkPortalPermission() does not attempt to add permissions already assigned to the user. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission objects within the permissions parameter. +func (r User_Customer_OpenIdConnect) AddBulkPortalPermission(permissions []datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permissions, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addBulkPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) AddBulkRoles(roles []datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + roles, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addBulkRoles", params, &r.Options, &resp) + return +} + +// Add multiple CloudLayer Computing Instances to a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. addBulkVirtualGuestAccess() does not attempt to add CloudLayer Computing Instance access if the given user already has access to that CloudLayer Computing Instance object. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set CloudLayer Computing Instance access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddBulkVirtualGuestAccess(virtualGuestIds []int) (resp bool, err error) { + params := []interface{}{ + virtualGuestIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addBulkVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// Grants the user access to a single dedicated host device. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access, then "not found" exceptions are thrown if the user attempts to access any of these devices. +// +// Users can assign device access to their child users, but not to themselves. An account's master has access to all devices on their customer account and can set dedicated host access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddDedicatedHostAccess(dedicatedHostId *int) (resp bool, err error) { + params := []interface{}{ + dedicatedHostId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addDedicatedHostAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) AddExternalBinding(externalBinding *datatypes.User_External_Binding) (resp datatypes.User_Customer_External_Binding, err error) { + params := []interface{}{ + externalBinding, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addExternalBinding", params, &r.Options, &resp) + return +} + +// Add hardware to a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user already has access to the hardware you're attempting to add then addHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddHardwareAccess(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addHardwareAccess", params, &r.Options, &resp) + return +} + +// Create a notification subscription record for the user. If a subscription record exists for the notification, the record will be set to active, if currently inactive. +func (r User_Customer_OpenIdConnect) AddNotificationSubscriber(notificationKeyName *string) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Add a permission to a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. If the user already has the permission you're attempting to add then addPortalPermission() returns true. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are added based on the keyName property of the permission parameter. +func (r User_Customer_OpenIdConnect) AddPortalPermission(permission *datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permission, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) AddRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addRole", params, &r.Options, &resp) + return +} + +// Add a CloudLayer Computing Instance to a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user already has access to the CloudLayer Computing Instance you're attempting to add then addVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set CloudLayer Computing Instance access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddVirtualGuestAccess(virtualGuestId *int) (resp bool, err error) { + params := []interface{}{ + virtualGuestId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// Select a type of preference you would like to modify using [[SoftLayer_User_Customer::getPreferenceTypes|getPreferenceTypes]] and invoke this method using that preference type key name. +func (r User_Customer_OpenIdConnect) ChangePreference(preferenceTypeKeyName *string, value *string) (resp []datatypes.User_Preference, err error) { + params := []interface{}{ + preferenceTypeKeyName, + value, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "changePreference", params, &r.Options, &resp) + return +} + +// This service checks the result of a previously requested external authentication. [[SoftLayer_Container_User_Customer_External_Binding_Phone|Phone external binding]] container can be used for this service. Make sure to set the [[SoftLayer_Container_User_Customer_External_Binding_Phone::authenticationToken|authenticationToken]] that is generated by [[SoftLayer_User_Customer|initiateExternalAuthentication]] service. +func (r User_Customer_OpenIdConnect) CheckExternalAuthenticationStatus(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "checkExternalAuthenticationStatus", params, &r.Options, &resp) + return +} + +// Add a description here +// +// +func (r User_Customer_OpenIdConnect) CheckPhoneFactorAuthenticationForPasswordSet(passwordSet *datatypes.Container_User_Customer_PasswordSet, authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp bool, err error) { + params := []interface{}{ + passwordSet, + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "checkPhoneFactorAuthenticationForPasswordSet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) CompleteInvitationAfterLogin(providerType *string, accessToken *string, emailRegistrationCode *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + providerType, + accessToken, + emailRegistrationCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "completeInvitationAfterLogin", params, &r.Options, &resp) + return +} + +// Create a new subscriber for a given resource. +func (r User_Customer_OpenIdConnect) CreateNotificationSubscriber(keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "createNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Create a new user in the SoftLayer customer portal. createObject() creates a user's portal record and adds them into the SoftLayer community forums. It is not possible to set up SLL or PPTP enable flags during object creation. These flags are ignored during object creation. You will need to make a subsequent call to edit object in order to enable VPN access. An account's master user and sub-users who have the User Manage permission can add new users. createObject() creates users with a default permission set. After adding a user it may be helpful to set their permissions and hardware access. +// +// Note, neither password nor vpnPassword parameters are required. +// +// Password When a new user is created, an email will be sent to the new user's email address with a link to a url that will allow the new user to create or change their password for the SoftLayer customer portal. +// +// If the password parameter is provided and is not null, then that value will be validated. If it is a valid password, then the user will be created with this password. This user will still receive a portal password email. It can be used within 24 hours to change their password, or it can be allowed to expire, and the password provided during user creation will remain as the user's password. +// +// If the password parameter is not provided or the value is null, the user must set their portal password using the link sent in email within 24 hours.  If the user fails to set their password within 24 hours, then a non-master user can use the "Reset Password" link on the login page of the portal to request a new email. A master user can use the link to retrieve a phone number to call to assist in resetting their password. +// +// The password parameter is ignored for VPN_ONLY users or for IBMid authenticated users. +// +// vpnPassword If the vpnPassword is provided, then the user's vpnPassword will be set to the provided password.  When creating a vpn only user, the vpnPassword MUST be supplied.  If the vpnPassword is not provided, then the user will need to use the portal to edit their profile and set the vpnPassword. +// +// +func (r User_Customer_OpenIdConnect) CreateObject(templateObject *datatypes.User_Customer_OpenIdConnect, password *string, vpnPassword *string) (resp datatypes.User_Customer_OpenIdConnect, err error) { + params := []interface{}{ + templateObject, + password, + vpnPassword, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) CreateOpenIdConnectUserAndCompleteInvitation(providerType *string, user *datatypes.User_Customer, password *string, registrationCode *string) (resp string, err error) { + params := []interface{}{ + providerType, + user, + password, + registrationCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "createOpenIdConnectUserAndCompleteInvitation", params, &r.Options, &resp) + return +} + +// Create delivery methods for a notification that the user is subscribed to. Multiple delivery method keyNames can be supplied to create multiple delivery methods for the specified notification. Available delivery methods - 'EMAIL'. Available notifications - 'PLANNED_MAINTENANCE', 'UNPLANNED_INCIDENT'. +func (r User_Customer_OpenIdConnect) CreateSubscriberDeliveryMethods(notificationKeyName *string, deliveryMethodKeyNames []string) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + deliveryMethodKeyNames, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "createSubscriberDeliveryMethods", params, &r.Options, &resp) + return +} + +// Create a new subscriber for a given resource. +func (r User_Customer_OpenIdConnect) DeactivateNotificationSubscriber(keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "deactivateNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Declines an invitation to link an OpenIdConnect identity to a SoftLayer (Atlas) identity and account. Note that this uses a registration code that is likely a one-time-use-only token, so if an invitation has already been processed (accepted or previously declined) it will not be possible to process it a second time. +func (r User_Customer_OpenIdConnect) DeclineInvitation(providerType *string, registrationCode *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + providerType, + registrationCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "declineInvitation", params, &r.Options, &resp) + return +} + +// Account master users and sub-users who have the User Manage permission in the SoftLayer customer portal can update other user's information. Use editObject() if you wish to edit a single user account. Users who do not have the User Manage permission can only update their own information. +func (r User_Customer_OpenIdConnect) EditObject(templateObject *datatypes.User_Customer) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "editObject", params, &r.Options, &resp) + return +} + +// Account master users and sub-users who have the User Manage permission in the SoftLayer customer portal can update other user's information. Use editObjects() if you wish to edit multiple users at once. Users who do not have the User Manage permission can only update their own information. +func (r User_Customer_OpenIdConnect) EditObjects(templateObjects []datatypes.User_Customer) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "editObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) FindUserPreference(profileName *string, containerKeyname *string, preferenceKeyname *string) (resp []datatypes.Layout_Profile, err error) { + params := []interface{}{ + profileName, + containerKeyname, + preferenceKeyname, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "findUserPreference", params, &r.Options, &resp) + return +} + +// Retrieve The customer account that a user belongs to. +func (r User_Customer_OpenIdConnect) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetActions() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getActions", nil, &r.Options, &resp) + return +} + +// The getActiveExternalAuthenticationVendors method will return a list of available external vendors that a SoftLayer user can authenticate against. The list will only contain vendors for which the user has at least one active external binding. +func (r User_Customer_OpenIdConnect) GetActiveExternalAuthenticationVendors() (resp []datatypes.Container_User_Customer_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getActiveExternalAuthenticationVendors", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's additional email addresses. These email addresses are contacted when updates are made to support tickets. +func (r User_Customer_OpenIdConnect) GetAdditionalEmails() (resp []datatypes.User_Customer_AdditionalEmail, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAdditionalEmails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetAllowedDedicatedHostIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAllowedDedicatedHostIds", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetAllowedHardwareIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAllowedHardwareIds", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetAllowedVirtualGuestIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAllowedVirtualGuestIds", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's API Authentication keys. There is a max limit of two API keys per user. +func (r User_Customer_OpenIdConnect) GetApiAuthenticationKeys() (resp []datatypes.User_Customer_ApiAuthentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getApiAuthenticationKeys", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetAuthenticationToken(token *datatypes.Container_User_Authentication_Token) (resp datatypes.Container_User_Authentication_Token, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAuthenticationToken", params, &r.Options, &resp) + return +} + +// Retrieve The CDN accounts associated with a portal user. +func (r User_Customer_OpenIdConnect) GetCdnAccounts() (resp []datatypes.Network_ContentDelivery_Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getCdnAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's child users. Some portal users may not have child users. +func (r User_Customer_OpenIdConnect) GetChildUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getChildUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated closed tickets. +func (r User_Customer_OpenIdConnect) GetClosedTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getClosedTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The dedicated hosts to which the user has been granted access. +func (r User_Customer_OpenIdConnect) GetDedicatedHosts() (resp []datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getDedicatedHosts", nil, &r.Options, &resp) + return +} + +// This API gets the default account for the OpenIdConnect identity that is linked to the current SoftLayer user identity. If there is no default present, the API returns null, except in the special case where we find one active user linked to the IBMid. In that case, we will set the link from the IBMid to that user as default, and return the account of which that user is a member. Invoke this only on IBMid-authenticated users. +func (r User_Customer_OpenIdConnect) GetDefaultAccount(providerType *string) (resp datatypes.Account, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getDefaultAccount", params, &r.Options, &resp) + return +} + +// Retrieve The external authentication bindings that link an external identifier to a SoftLayer user. +func (r User_Customer_OpenIdConnect) GetExternalBindings() (resp []datatypes.User_External_Binding, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getExternalBindings", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's accessible hardware. These permissions control which hardware a user has access to in the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve the number of servers that a portal user has access to. Portal users can have restrictions set to limit services for and to perform actions on hardware. You can set these permissions in the portal by clicking the "administrative" then "user admin" links. +func (r User_Customer_OpenIdConnect) GetHardwareCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHardwareCount", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware notifications associated with this user. A hardware notification links a user to a piece of hardware, and that user will be notified if any monitors on that hardware fail, if the monitors have a status of 'Notify User'. +func (r User_Customer_OpenIdConnect) GetHardwareNotifications() (resp []datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHardwareNotifications", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user has acknowledged the support policy. +func (r User_Customer_OpenIdConnect) GetHasAcknowledgedSupportPolicyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHasAcknowledgedSupportPolicyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Permission granting the user access to all Dedicated Host devices on the account. +func (r User_Customer_OpenIdConnect) GetHasFullDedicatedHostAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHasFullDedicatedHostAccessFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a portal user has access to all hardware on their account. +func (r User_Customer_OpenIdConnect) GetHasFullHardwareAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHasFullHardwareAccessFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a portal user has access to all hardware on their account. +func (r User_Customer_OpenIdConnect) GetHasFullVirtualGuestAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHasFullVirtualGuestAccessFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetImpersonationToken() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getImpersonationToken", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetLayoutProfiles() (resp []datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLayoutProfiles", nil, &r.Options, &resp) + return +} + +// Retrieve A user's locale. Locale holds user's language and region information. +func (r User_Customer_OpenIdConnect) GetLocale() (resp datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLocale", nil, &r.Options, &resp) + return +} + +// Validates a supplied OpenIdConnect access token to the SoftLayer customer portal and returns the default account name and id for the active user. An exception will be thrown if no matching customer is found. +func (r User_Customer_OpenIdConnect) GetLoginAccountInfoOpenIdConnect(providerType *string, accessToken *string) (resp datatypes.Container_User_Customer_OpenIdConnect_LoginAccountInfo, err error) { + params := []interface{}{ + providerType, + accessToken, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLoginAccountInfoOpenIdConnect", params, &r.Options, &resp) + return +} + +// Retrieve A user's attempts to log into the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetLoginAttempts() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLoginAttempts", nil, &r.Options, &resp) + return +} + +// Attempt to authenticate a user to the SoftLayer customer portal using the provided authentication container. Depending on the specific type of authentication container that is used, this API will leverage the appropriate authentication protocol. If authentication is successful then the API returns a list of linked accounts for the user, a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer_OpenIdConnect) GetLoginToken(request *datatypes.Container_Authentication_Request_Contract) (resp datatypes.Container_Authentication_Response_Common, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLoginToken", params, &r.Options, &resp) + return +} + +// An OpenIdConnect identity, for example an IBMid, can be linked or mapped to one or more individual SoftLayer users, but no more than one SoftLayer user per account. This effectively links the OpenIdConnect identity to those accounts. This API returns a list of all the accounts for which there is a link between the OpenIdConnect identity and a SoftLayer user. Invoke this only on IBMid-authenticated users. +func (r User_Customer_OpenIdConnect) GetMappedAccounts(providerType *string) (resp []datatypes.Account, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getMappedAccounts", params, &r.Options, &resp) + return +} + +// Retrieve A portal user's associated mobile device profiles. +func (r User_Customer_OpenIdConnect) GetMobileDevices() (resp []datatypes.User_Customer_MobileDevice, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getMobileDevices", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscription records for the user. +func (r User_Customer_OpenIdConnect) GetNotificationSubscribers() (resp []datatypes.Notification_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetObject() (resp datatypes.User_Customer_OpenIdConnect, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getObject", nil, &r.Options, &resp) + return +} + +// This API returns a SoftLayer_Container_User_Customer_OpenIdConnect_MigrationState object containing the necessary information to determine what migration state the user is in. If the account is not OpenIdConnect authenticated, then an exception is thrown. +func (r User_Customer_OpenIdConnect) GetOpenIdConnectMigrationState() (resp datatypes.Container_User_Customer_OpenIdConnect_MigrationState, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getOpenIdConnectMigrationState", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetOpenIdRegistrationInfoFromCode(providerType *string, registrationCode *string) (resp datatypes.Account_Authentication_OpenIdConnect_RegistrationInformation, err error) { + params := []interface{}{ + providerType, + registrationCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getOpenIdRegistrationInfoFromCode", params, &r.Options, &resp) + return +} + +// Retrieve An user's associated open tickets. +func (r User_Customer_OpenIdConnect) GetOpenTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getOpenTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's vpn accessible subnets. +func (r User_Customer_OpenIdConnect) GetOverrides() (resp []datatypes.Network_Service_Vpn_Overrides, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getOverrides", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's parent user. If a SoftLayer_User_Customer has a null parentId property then it doesn't have a parent user. +func (r User_Customer_OpenIdConnect) GetParent() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's permissions. These permissions control that user's access to functions within the SoftLayer customer portal and API. +func (r User_Customer_OpenIdConnect) GetPermissions() (resp []datatypes.User_Customer_CustomerPermission_Permission, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPermissions", nil, &r.Options, &resp) + return +} + +// Attempt to authenticate a username and password to the SoftLayer customer portal. Many portal user accounts are configured to require answering a security question on login. In this case getPortalLoginToken() also verifies the given security question ID and answer. If authentication is successful then the API returns a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer_OpenIdConnect) GetPortalLoginToken(username *string, password *string, securityQuestionId *int, securityQuestionAnswer *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + username, + password, + securityQuestionId, + securityQuestionAnswer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPortalLoginToken", params, &r.Options, &resp) + return +} + +// Attempt to authenticate a supplied OpenIdConnect access token to the SoftLayer customer portal. If authentication is successful then the API returns a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer_OpenIdConnect) GetPortalLoginTokenOpenIdConnect(providerType *string, accessToken *string, accountId *int, securityQuestionId *int, securityQuestionAnswer *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + providerType, + accessToken, + accountId, + securityQuestionId, + securityQuestionAnswer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPortalLoginTokenOpenIdConnect", params, &r.Options, &resp) + return +} + +// Select a type of preference you would like to get using [[SoftLayer_User_Customer::getPreferenceTypes|getPreferenceTypes]] and invoke this method using that preference type key name. +func (r User_Customer_OpenIdConnect) GetPreference(preferenceTypeKeyName *string) (resp datatypes.User_Preference, err error) { + params := []interface{}{ + preferenceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPreference", params, &r.Options, &resp) + return +} + +// Use any of the preference types to fetch or modify user preferences using [[SoftLayer_User_Customer::getPreference|getPreference]] or [[SoftLayer_User_Customer::changePreference|changePreference]], respectively. +func (r User_Customer_OpenIdConnect) GetPreferenceTypes() (resp []datatypes.User_Preference_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPreferenceTypes", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetPreferences() (resp []datatypes.User_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve the authentication requirements for an outstanding password set/reset request. The password key provided to the user in an email generated by the [[SoftLayer_User_Customer::newUserPassword|newUserPassword]]. Password recovery keys are valid for 24 hours after they're generated. +func (r User_Customer_OpenIdConnect) GetRequirementsForPasswordSet(passwordSet *datatypes.Container_User_Customer_PasswordSet) (resp datatypes.Container_User_Customer_PasswordSet, err error) { + params := []interface{}{ + passwordSet, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getRequirementsForPasswordSet", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetRoles() (resp []datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getRoles", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetSalesforceUserLink() (resp datatypes.User_Customer_Link, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSalesforceUserLink", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's security question answers. Some portal users may not have security answers or may not be configured to require answering a security question on login. +func (r User_Customer_OpenIdConnect) GetSecurityAnswers() (resp []datatypes.User_Customer_Security_Answer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSecurityAnswers", nil, &r.Options, &resp) + return +} + +// Retrieve A user's notification subscription records. +func (r User_Customer_OpenIdConnect) GetSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSubscribers", nil, &r.Options, &resp) + return +} + +// Retrieve A user's successful attempts to log into the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetSuccessfulLogins() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSuccessfulLogins", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user is required to acknowledge the support policy for portal access. +func (r User_Customer_OpenIdConnect) GetSupportPolicyAcknowledgementRequiredFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSupportPolicyAcknowledgementRequiredFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetSupportPolicyDocument() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSupportPolicyDocument", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetSupportPolicyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSupportPolicyName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetSupportedLocales() (resp []datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSupportedLocales", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user must take a brief survey the next time they log into the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetSurveyRequiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSurveyRequiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The surveys that a user has taken in the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetSurveys() (resp []datatypes.Survey, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSurveys", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated tickets. +func (r User_Customer_OpenIdConnect) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's time zone. +func (r User_Customer_OpenIdConnect) GetTimezone() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getTimezone", nil, &r.Options, &resp) + return +} + +// Retrieve A user's unsuccessful attempts to log into the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetUnsuccessfulLogins() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUnsuccessfulLogins", nil, &r.Options, &resp) + return +} + +// Returns an IMS User Object from the provided OpenIdConnect User ID or IBMid Unique Identifier for the Account of the active user. Enforces the User Management permissions for the Active User. An exception will be thrown if no matching IMS User is found. NOTE that providing IBMid Unique Identifier is optional, but it will be preferred over OpenIdConnect User ID if provided. +func (r User_Customer_OpenIdConnect) GetUserForUnifiedInvitation(openIdConnectUserId *string, ibmIdUniqueIdentifier *string) (resp datatypes.User_Customer_OpenIdConnect, err error) { + params := []interface{}{ + openIdConnectUserId, + ibmIdUniqueIdentifier, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserForUnifiedInvitation", params, &r.Options, &resp) + return +} + +// Retrieve a user object using a password token. When a new user is created or when a user has requested a password change using initiatePortalPasswordChange, they will have received an email that contains a url with a token. That token is used as the parameter for getUserIdForPasswordSet. +func (r User_Customer_OpenIdConnect) GetUserIdForPasswordSet(key *string) (resp int, err error) { + params := []interface{}{ + key, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserIdForPasswordSet", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetUserLinks() (resp []datatypes.User_Customer_Link, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserLinks", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetUserPreferences(profileName *string, containerKeyname *string) (resp []datatypes.Layout_Profile, err error) { + params := []interface{}{ + profileName, + containerKeyname, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserPreferences", params, &r.Options, &resp) + return +} + +// Retrieve A portal user's status, which controls overall access to the SoftLayer customer portal and VPN access to the private network. +func (r User_Customer_OpenIdConnect) GetUserStatus() (resp datatypes.User_Customer_Status, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserStatus", nil, &r.Options, &resp) + return +} + +// Retrieve the number of CloudLayer Computing Instances that a portal user has access to. Portal users can have restrictions set to limit services for and to perform actions on CloudLayer Computing Instances. You can set these permissions in the portal by clicking the "administrative" then "user admin" links. +func (r User_Customer_OpenIdConnect) GetVirtualGuestCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getVirtualGuestCount", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's accessible CloudLayer Computing Instances. These permissions control which CloudLayer Computing Instances a user has access to in the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) InTerminalStatus() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "inTerminalStatus", nil, &r.Options, &resp) + return +} + +// The service initiates an external authentication with the given external authentication vendor. The authentication container and its content will be verified before an attempt is made to initiate an external authentication. [[SoftLayer_Container_User_Customer_External_Binding_Phone|Phone external binding]] container can be used for this service. +// +// This service returns a unique authentication request token. You can use [[SoftLayer_User_Customer::checkExternalAuthenticationStatus|checkExternalAuthenticationStatus]] service to check if the authentication request is complete or not. +func (r User_Customer_OpenIdConnect) InitiateExternalAuthentication(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp string, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "initiateExternalAuthentication", params, &r.Options, &resp) + return +} + +// Sends password change email to the user containing url that allows the user the change their password. This is the first step when a user wishes to change their password. The url that is generated contains a one-time use token that is valid for only 24-hours. +// +// If this is a new master user who has never logged into the portal, then password reset will be initiated. Once a master user has logged into the portal, they must setup their security questions prior to logging out because master users are required to answer a security question during the password reset process. Should a master user not have security questions defined and not remember their password in order to define the security questions, then they will need to contact support at live chat or Revenue Services for assistance. +// +// Due to security reasons, the number reset requests per username are limited within a undisclosed timeframe. +func (r User_Customer_OpenIdConnect) InitiatePortalPasswordChange(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "initiatePortalPasswordChange", params, &r.Options, &resp) + return +} + +// A Brand Agent that has permissions to Add Customer Accounts will be able to request the password email be sent to the Master User of a Customer Account created by the same Brand as the agent making the request. Due to security reasons, the number of reset requests are limited within an undisclosed timeframe. +func (r User_Customer_OpenIdConnect) InitiatePortalPasswordChangeByBrandAgent(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "initiatePortalPasswordChangeByBrandAgent", params, &r.Options, &resp) + return +} + +// Send email invitation to a user to join a SoftLayer account and authenticate with OpenIdConnect. Throws an exception on error. +func (r User_Customer_OpenIdConnect) InviteUserToLinkOpenIdConnect(providerType *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "inviteUserToLinkOpenIdConnect", params, &r.Options, &resp) + return +} + +// Portal users are considered master users if they don't have an associated parent user. The only users who don't have parent users are users whose username matches their SoftLayer account name. Master users have special permissions throughout the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) IsMasterUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "isMasterUser", nil, &r.Options, &resp) + return +} + +// This method is deprecated! SoftLayer Community Forums no longer exist, therefore, any password verified will return false. In the future, this method will be completely removed. +// +// Determine if a string is the given user's login password to the SoftLayer community forums. +func (r User_Customer_OpenIdConnect) IsValidForumPassword(password *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "isValidForumPassword", params, &r.Options, &resp) + return +} + +// Determine if a string is the given user's login password to the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) IsValidPortalPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "isValidPortalPassword", params, &r.Options, &resp) + return +} + +// The perform external authentication method will authenticate the given external authentication container with an external vendor. The authentication container and its contents will be verified before an attempt is made to authenticate the contents of the container with an external vendor. +func (r User_Customer_OpenIdConnect) PerformExternalAuthentication(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "performExternalAuthentication", params, &r.Options, &resp) + return +} + +// Set the password for a user who has an outstanding password request. A user with an outstanding password request will have an unused and unexpired password key. The password key is part of the url provided to the user in the email sent to the user with information on how to set their password. The email was generated by the [[SoftLayer_User_Customer::initiatePortalPasswordRequest|initiatePortalPasswordRequest]] method. Password recovery keys are valid for 24 hours after they're generated. +// +// User portal passwords must match the following restrictions. Portal passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your forum password +func (r User_Customer_OpenIdConnect) ProcessPasswordSetRequest(passwordSet *datatypes.Container_User_Customer_PasswordSet, authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp bool, err error) { + params := []interface{}{ + passwordSet, + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "processPasswordSetRequest", params, &r.Options, &resp) + return +} + +// Revoke access to all dedicated hosts on the account for this user. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access or the access has been revoked, then "not found" exceptions are thrown if the user attempts to access any of these devices. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer_OpenIdConnect) RemoveAllDedicatedHostAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeAllDedicatedHostAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove all hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer_OpenIdConnect) RemoveAllHardwareAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeAllHardwareAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove all cloud computing instances from a portal user's instance access list. A user's instance access list controls which of an account's computing instance objects a user has access to in the SoftLayer customer portal and API. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer_OpenIdConnect) RemoveAllVirtualAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeAllVirtualAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove a user's API authentication key, removing that user's access to query the SoftLayer API. +func (r User_Customer_OpenIdConnect) RemoveApiAuthenticationKey(keyId *int) (resp bool, err error) { + params := []interface{}{ + keyId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeApiAuthenticationKey", params, &r.Options, &resp) + return +} + +// Revokes access for the user to one or more dedicated host devices. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access or the access has been revoked, then "not found" exceptions are thrown if the user attempts to access any of these devices. +// +// Users can assign device access to their child users, but not to themselves. An account's master has access to all devices on their customer account and can set dedicated host access for any of the other users on their account. +// +// If the user has full dedicatedHost access, then it will provide access to "ALL but passed in" dedicatedHost ids. +func (r User_Customer_OpenIdConnect) RemoveBulkDedicatedHostAccess(dedicatedHostIds []int) (resp bool, err error) { + params := []interface{}{ + dedicatedHostIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeBulkDedicatedHostAccess", params, &r.Options, &resp) + return +} + +// Remove multiple hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the hardware you're attempting remove add then removeBulkHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +// +// If the user has full hardware access, then it will provide access to "ALL but passed in" hardware ids. +func (r User_Customer_OpenIdConnect) RemoveBulkHardwareAccess(hardwareIds []int) (resp bool, err error) { + params := []interface{}{ + hardwareIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeBulkHardwareAccess", params, &r.Options, &resp) + return +} + +// Remove multiple permissions from a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. Removing a user's permission will affect that user's portal and API access. removePortalPermission() does not attempt to remove permissions that are not assigned to the user. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission objects within the permissions parameter. +func (r User_Customer_OpenIdConnect) RemoveBulkPortalPermission(permissions []datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permissions, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeBulkPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) RemoveBulkRoles(roles []datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + roles, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeBulkRoles", params, &r.Options, &resp) + return +} + +// Remove multiple CloudLayer Computing Instances from a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the CloudLayer Computing Instance you're attempting remove add then removeBulkVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) RemoveBulkVirtualGuestAccess(virtualGuestIds []int) (resp bool, err error) { + params := []interface{}{ + virtualGuestIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeBulkVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// Revokes access for the user to a single dedicated host device. The user will only be allowed to see and access devices in both the portal and the API to which they have been granted access. If the user's account has devices to which the user has not been granted access or the access has been revoked, then "not found" exceptions are thrown if the user attempts to access any of these devices. +// +// Users can assign device access to their child users, but not to themselves. An account's master has access to all devices on their customer account and can set dedicated host access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) RemoveDedicatedHostAccess(dedicatedHostId *int) (resp bool, err error) { + params := []interface{}{ + dedicatedHostId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeDedicatedHostAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) RemoveExternalBinding(externalBinding *datatypes.User_External_Binding) (resp bool, err error) { + params := []interface{}{ + externalBinding, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeExternalBinding", params, &r.Options, &resp) + return +} + +// Remove hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the hardware you're attempting remove add then removeHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) RemoveHardwareAccess(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeHardwareAccess", params, &r.Options, &resp) + return +} + +// Remove a permission from a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. Removing a user's permission will affect that user's portal and API access. If the user does not have the permission you're attempting to remove then removePortalPermission() returns true. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission parameter. +func (r User_Customer_OpenIdConnect) RemovePortalPermission(permission *datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permission, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removePortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) RemoveRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeRole", params, &r.Options, &resp) + return +} + +// Remove a CloudLayer Computing Instance from a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's computing instances a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the CloudLayer Computing Instance you're attempting remove add then removeVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set instance access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) RemoveVirtualGuestAccess(virtualGuestId *int) (resp bool, err error) { + params := []interface{}{ + virtualGuestId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) SamlAuthenticate(accountId *string, samlResponse *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + accountId, + samlResponse, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "samlAuthenticate", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) SamlBeginAuthentication(accountId *int) (resp string, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "samlBeginAuthentication", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) SamlBeginLogout() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "samlBeginLogout", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) SamlLogout(samlResponse *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + samlResponse, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "samlLogout", params, &r.Options, &resp) + return +} + +// An OpenIdConnect identity, for example an IBMid, can be linked or mapped to one or more individual SoftLayer users, but no more than one per account. If an OpenIdConnect identity is mapped to multiple accounts in this manner, one such account should be identified as the default account for that identity. Invoke this only on IBMid-authenticated users. +func (r User_Customer_OpenIdConnect) SetDefaultAccount(providerType *string, accountId *int) (resp datatypes.Account, err error) { + params := []interface{}{ + providerType, + accountId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "setDefaultAccount", params, &r.Options, &resp) + return +} + +// As master user, calling this api for the IBMid provider type when there is an existing IBMid for the email on the SL account will silently (without sending an invitation email) create a link for the IBMid. NOTE: If the SoftLayer user is already linked to IBMid, this call will fail. If the IBMid specified by the email of this user, is already used in a link to another user in this account, this call will fail. If there is already an open invitation from this SoftLayer user to this or any IBMid, this call will fail. If there is already an open invitation from some other SoftLayer user in this account to this IBMid, then this call will fail. +func (r User_Customer_OpenIdConnect) SilentlyMigrateUserOpenIdConnect(providerType *string) (resp bool, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "silentlyMigrateUserOpenIdConnect", params, &r.Options, &resp) + return +} + +// This method is deprecated! SoftLayer Community Forums no longer exist, therefore, this method will return false. In the future, this method will be completely removed. +// +// Update a user's password on the SoftLayer community forums. As with portal passwords, user forum passwords must match the following restrictions. Forum passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your portal password +// Finally, users can only update their own password. +func (r User_Customer_OpenIdConnect) UpdateForumPassword(password *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateForumPassword", params, &r.Options, &resp) + return +} + +// Update the active status for a notification that the user is subscribed to. A notification along with an active flag can be supplied to update the active status for a particular notification subscription. +func (r User_Customer_OpenIdConnect) UpdateNotificationSubscriber(notificationKeyName *string, active *int) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + active, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Update a user's login security questions and answers on the SoftLayer customer portal. These questions and answers are used to optionally log into the SoftLayer customer portal using two-factor authentication. Each user must have three distinct questions set with a unique answer for each question, and each answer may only contain alphanumeric or the . , - _ ( ) [ ] : ; > < characters. Existing user security questions and answers are deleted before new ones are set, and users may only update their own security questions and answers. +func (r User_Customer_OpenIdConnect) UpdateSecurityAnswers(questions []datatypes.User_Security_Question, answers []string) (resp bool, err error) { + params := []interface{}{ + questions, + answers, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateSecurityAnswers", params, &r.Options, &resp) + return +} + +// Update a delivery method for a notification that the user is subscribed to. A delivery method keyName along with an active flag can be supplied to update the active status of the delivery methods for the specified notification. Available delivery methods - 'EMAIL'. Available notifications - 'PLANNED_MAINTENANCE', 'UNPLANNED_INCIDENT'. +func (r User_Customer_OpenIdConnect) UpdateSubscriberDeliveryMethod(notificationKeyName *string, deliveryMethodKeyNames []string, active *int) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + deliveryMethodKeyNames, + active, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateSubscriberDeliveryMethod", params, &r.Options, &resp) + return +} + +// Update a user's VPN password on the SoftLayer customer portal. As with portal passwords, VPN passwords must match the following restrictions. VPN passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ = +// * ...not match your username +// * ...not match your forum password +// Finally, users can only update their own VPN password. An account's master user can update any of their account users' VPN passwords. +func (r User_Customer_OpenIdConnect) UpdateVpnPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateVpnPassword", params, &r.Options, &resp) + return +} + +// Always call this function to enable changes when manually configuring VPN subnet access. +func (r User_Customer_OpenIdConnect) UpdateVpnUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateVpnUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) ValidateAuthenticationToken(authenticationToken *datatypes.Container_User_Authentication_Token) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationToken, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "validateAuthenticationToken", params, &r.Options, &resp) + return +} + +// Contains user information for Service Provider Enrollment. +type User_Customer_Prospect_ServiceProvider_EnrollRequest struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerProspectServiceProviderEnrollRequestService returns an instance of the User_Customer_Prospect_ServiceProvider_EnrollRequest SoftLayer service +func GetUserCustomerProspectServiceProviderEnrollRequestService(sess *session.Session) User_Customer_Prospect_ServiceProvider_EnrollRequest { + return User_Customer_Prospect_ServiceProvider_EnrollRequest{Session: sess} +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Id(id int) User_Customer_Prospect_ServiceProvider_EnrollRequest { + r.Options.Id = &id + return r +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Mask(mask string) User_Customer_Prospect_ServiceProvider_EnrollRequest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Filter(filter string) User_Customer_Prospect_ServiceProvider_EnrollRequest { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Limit(limit int) User_Customer_Prospect_ServiceProvider_EnrollRequest { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Offset(offset int) User_Customer_Prospect_ServiceProvider_EnrollRequest { + r.Options.Offset = &offset + return r +} + +// Create a new Service Provider Enrollment +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Enroll(templateObject *datatypes.User_Customer_Prospect_ServiceProvider_EnrollRequest) (resp datatypes.User_Customer_Prospect_ServiceProvider_EnrollRequest, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Prospect_ServiceProvider_EnrollRequest", "enroll", params, &r.Options, &resp) + return +} + +// Retrieve Catalyst company types. +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) GetCompanyType() (resp datatypes.Catalyst_Company_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Prospect_ServiceProvider_EnrollRequest", "getCompanyType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) GetObject() (resp datatypes.User_Customer_Prospect_ServiceProvider_EnrollRequest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Prospect_ServiceProvider_EnrollRequest", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_Security_Answer type contains user's answers to security questions. +type User_Customer_Security_Answer struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerSecurityAnswerService returns an instance of the User_Customer_Security_Answer SoftLayer service +func GetUserCustomerSecurityAnswerService(sess *session.Session) User_Customer_Security_Answer { + return User_Customer_Security_Answer{Session: sess} +} + +func (r User_Customer_Security_Answer) Id(id int) User_Customer_Security_Answer { + r.Options.Id = &id + return r +} + +func (r User_Customer_Security_Answer) Mask(mask string) User_Customer_Security_Answer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Security_Answer) Filter(filter string) User_Customer_Security_Answer { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Security_Answer) Limit(limit int) User_Customer_Security_Answer { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Security_Answer) Offset(offset int) User_Customer_Security_Answer { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_User_Customer_Security_Answer object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_Security_Answer service. +func (r User_Customer_Security_Answer) GetObject() (resp datatypes.User_Customer_Security_Answer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Security_Answer", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The question the security answer is associated with. +func (r User_Customer_Security_Answer) GetQuestion() (resp datatypes.User_Security_Question, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Security_Answer", "getQuestion", nil, &r.Options, &resp) + return +} + +// Retrieve The user who the security answer belongs to. +func (r User_Customer_Security_Answer) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Security_Answer", "getUser", nil, &r.Options, &resp) + return +} + +// Each SoftLayer portal account is assigned a status code that determines how it's treated in the customer portal. This status is reflected in the SoftLayer_User_Customer_Status data type. Status differs from user permissions in that user status applies globally to the portal while user permissions are applied to specific portal functions. +type User_Customer_Status struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerStatusService returns an instance of the User_Customer_Status SoftLayer service +func GetUserCustomerStatusService(sess *session.Session) User_Customer_Status { + return User_Customer_Status{Session: sess} +} + +func (r User_Customer_Status) Id(id int) User_Customer_Status { + r.Options.Id = &id + return r +} + +func (r User_Customer_Status) Mask(mask string) User_Customer_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Status) Filter(filter string) User_Customer_Status { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Status) Limit(limit int) User_Customer_Status { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Status) Offset(offset int) User_Customer_Status { + r.Options.Offset = &offset + return r +} + +// Retrieve all user status objects. +func (r User_Customer_Status) GetAllObjects() (resp []datatypes.User_Customer_Status, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Status", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_Status object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_Status service. +func (r User_Customer_Status) GetObject() (resp datatypes.User_Customer_Status, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Status", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_External_Binding data type contains general information for a single external binding. This includes the 3rd party vendor, type of binding, and a unique identifier and password that is used to authenticate against the 3rd party service. +type User_External_Binding struct { + Session *session.Session + Options sl.Options +} + +// GetUserExternalBindingService returns an instance of the User_External_Binding SoftLayer service +func GetUserExternalBindingService(sess *session.Session) User_External_Binding { + return User_External_Binding{Session: sess} +} + +func (r User_External_Binding) Id(id int) User_External_Binding { + r.Options.Id = &id + return r +} + +func (r User_External_Binding) Mask(mask string) User_External_Binding { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_External_Binding) Filter(filter string) User_External_Binding { + r.Options.Filter = filter + return r +} + +func (r User_External_Binding) Limit(limit int) User_External_Binding { + r.Options.Limit = &limit + return r +} + +func (r User_External_Binding) Offset(offset int) User_External_Binding { + r.Options.Offset = &offset + return r +} + +// Delete an external authentication binding. If the external binding currently has an active billing item associated you will be prevented from deleting the binding. The alternative method to remove an external authentication binding is to use the service cancellation form. +func (r User_External_Binding) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_External_Binding) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_External_Binding) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_External_Binding) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_External_Binding) GetObject() (resp datatypes.User_External_Binding, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_External_Binding) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_External_Binding) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getVendor", nil, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_External_Binding) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "updateNote", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_External_Binding_Vendor data type contains information for a single external binding vendor. This information includes a user friendly vendor name, a unique version of the vendor name, and a unique internal identifier that can be used when creating a new external binding. +type User_External_Binding_Vendor struct { + Session *session.Session + Options sl.Options +} + +// GetUserExternalBindingVendorService returns an instance of the User_External_Binding_Vendor SoftLayer service +func GetUserExternalBindingVendorService(sess *session.Session) User_External_Binding_Vendor { + return User_External_Binding_Vendor{Session: sess} +} + +func (r User_External_Binding_Vendor) Id(id int) User_External_Binding_Vendor { + r.Options.Id = &id + return r +} + +func (r User_External_Binding_Vendor) Mask(mask string) User_External_Binding_Vendor { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_External_Binding_Vendor) Filter(filter string) User_External_Binding_Vendor { + r.Options.Filter = filter + return r +} + +func (r User_External_Binding_Vendor) Limit(limit int) User_External_Binding_Vendor { + r.Options.Limit = &limit + return r +} + +func (r User_External_Binding_Vendor) Offset(offset int) User_External_Binding_Vendor { + r.Options.Offset = &offset + return r +} + +// getAllObjects() will return a list of the available external binding vendors that SoftLayer supports. Use this list to select the appropriate vendor when creating a new external binding. +func (r User_External_Binding_Vendor) GetAllObjects() (resp []datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding_Vendor", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_External_Binding_Vendor) GetObject() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding_Vendor", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type User_Permission_Action struct { + Session *session.Session + Options sl.Options +} + +// GetUserPermissionActionService returns an instance of the User_Permission_Action SoftLayer service +func GetUserPermissionActionService(sess *session.Session) User_Permission_Action { + return User_Permission_Action{Session: sess} +} + +func (r User_Permission_Action) Id(id int) User_Permission_Action { + r.Options.Id = &id + return r +} + +func (r User_Permission_Action) Mask(mask string) User_Permission_Action { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Permission_Action) Filter(filter string) User_Permission_Action { + r.Options.Filter = filter + return r +} + +func (r User_Permission_Action) Limit(limit int) User_Permission_Action { + r.Options.Limit = &limit + return r +} + +func (r User_Permission_Action) Offset(offset int) User_Permission_Action { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Permission_Action) GetAllObjects() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Action", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Action) GetObject() (resp datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Action", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type User_Permission_Group struct { + Session *session.Session + Options sl.Options +} + +// GetUserPermissionGroupService returns an instance of the User_Permission_Group SoftLayer service +func GetUserPermissionGroupService(sess *session.Session) User_Permission_Group { + return User_Permission_Group{Session: sess} +} + +func (r User_Permission_Group) Id(id int) User_Permission_Group { + r.Options.Id = &id + return r +} + +func (r User_Permission_Group) Mask(mask string) User_Permission_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Permission_Group) Filter(filter string) User_Permission_Group { + r.Options.Filter = filter + return r +} + +func (r User_Permission_Group) Limit(limit int) User_Permission_Group { + r.Options.Limit = &limit + return r +} + +func (r User_Permission_Group) Offset(offset int) User_Permission_Group { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Permission_Group) AddAction(action *datatypes.User_Permission_Action) (err error) { + var resp datatypes.Void + params := []interface{}{ + action, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "addAction", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) AddBulkActions(actions []datatypes.User_Permission_Action) (err error) { + var resp datatypes.Void + params := []interface{}{ + actions, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "addBulkActions", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) AddBulkResourceObjects(resourceObjects []datatypes.Entity, resourceTypeKeyName *string) (resp bool, err error) { + params := []interface{}{ + resourceObjects, + resourceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "addBulkResourceObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) AddResourceObject(resourceObject *datatypes.Entity, resourceTypeKeyName *string) (resp bool, err error) { + params := []interface{}{ + resourceObject, + resourceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "addResourceObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) CreateObject(templateObject *datatypes.User_Permission_Group) (resp datatypes.User_Permission_Group, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) EditObject(templateObject *datatypes.User_Permission_Group) (resp datatypes.User_Permission_Group, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Group) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Group) GetActions() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getActions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) GetObject() (resp datatypes.User_Permission_Group, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Group) GetRoles() (resp []datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The type of the permission group. +func (r User_Permission_Group) GetType() (resp datatypes.User_Permission_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) LinkRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "linkRole", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) RemoveAction(action *datatypes.User_Permission_Action) (err error) { + var resp datatypes.Void + params := []interface{}{ + action, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "removeAction", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) RemoveBulkActions(actions []datatypes.User_Permission_Action) (err error) { + var resp datatypes.Void + params := []interface{}{ + actions, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "removeBulkActions", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) RemoveBulkResourceObjects(resourceObjects []datatypes.Entity, resourceTypeKeyName *string) (resp bool, err error) { + params := []interface{}{ + resourceObjects, + resourceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "removeBulkResourceObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) RemoveResourceObject(resourceObject *datatypes.Entity, resourceTypeKeyName *string) (resp bool, err error) { + params := []interface{}{ + resourceObject, + resourceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "removeResourceObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) UnlinkRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "unlinkRole", params, &r.Options, &resp) + return +} + +// no documentation yet +type User_Permission_Group_Type struct { + Session *session.Session + Options sl.Options +} + +// GetUserPermissionGroupTypeService returns an instance of the User_Permission_Group_Type SoftLayer service +func GetUserPermissionGroupTypeService(sess *session.Session) User_Permission_Group_Type { + return User_Permission_Group_Type{Session: sess} +} + +func (r User_Permission_Group_Type) Id(id int) User_Permission_Group_Type { + r.Options.Id = &id + return r +} + +func (r User_Permission_Group_Type) Mask(mask string) User_Permission_Group_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Permission_Group_Type) Filter(filter string) User_Permission_Group_Type { + r.Options.Filter = filter + return r +} + +func (r User_Permission_Group_Type) Limit(limit int) User_Permission_Group_Type { + r.Options.Limit = &limit + return r +} + +func (r User_Permission_Group_Type) Offset(offset int) User_Permission_Group_Type { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r User_Permission_Group_Type) GetGroups() (resp []datatypes.User_Permission_Group, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group_Type", "getGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group_Type) GetObject() (resp datatypes.User_Permission_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type User_Permission_Role struct { + Session *session.Session + Options sl.Options +} + +// GetUserPermissionRoleService returns an instance of the User_Permission_Role SoftLayer service +func GetUserPermissionRoleService(sess *session.Session) User_Permission_Role { + return User_Permission_Role{Session: sess} +} + +func (r User_Permission_Role) Id(id int) User_Permission_Role { + r.Options.Id = &id + return r +} + +func (r User_Permission_Role) Mask(mask string) User_Permission_Role { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Permission_Role) Filter(filter string) User_Permission_Role { + r.Options.Filter = filter + return r +} + +func (r User_Permission_Role) Limit(limit int) User_Permission_Role { + r.Options.Limit = &limit + return r +} + +func (r User_Permission_Role) Offset(offset int) User_Permission_Role { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Permission_Role) AddUser(user *datatypes.User_Customer) (err error) { + var resp datatypes.Void + params := []interface{}{ + user, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "addUser", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) CreateObject(templateObject *datatypes.User_Permission_Role) (resp datatypes.User_Permission_Role, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) EditObject(templateObject *datatypes.User_Permission_Role) (resp datatypes.User_Permission_Role, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Role) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Role) GetActions() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getActions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Role) GetGroups() (resp []datatypes.User_Permission_Group, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) GetObject() (resp datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Role) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getUsers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) LinkGroup(group *datatypes.User_Permission_Group) (err error) { + var resp datatypes.Void + params := []interface{}{ + group, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "linkGroup", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) RemoveUser(user *datatypes.User_Customer) (err error) { + var resp datatypes.Void + params := []interface{}{ + user, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "removeUser", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) UnlinkGroup(group *datatypes.User_Permission_Group) (err error) { + var resp datatypes.Void + params := []interface{}{ + group, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "unlinkGroup", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Security_Question data type contains questions. +type User_Security_Question struct { + Session *session.Session + Options sl.Options +} + +// GetUserSecurityQuestionService returns an instance of the User_Security_Question SoftLayer service +func GetUserSecurityQuestionService(sess *session.Session) User_Security_Question { + return User_Security_Question{Session: sess} +} + +func (r User_Security_Question) Id(id int) User_Security_Question { + r.Options.Id = &id + return r +} + +func (r User_Security_Question) Mask(mask string) User_Security_Question { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Security_Question) Filter(filter string) User_Security_Question { + r.Options.Filter = filter + return r +} + +func (r User_Security_Question) Limit(limit int) User_Security_Question { + r.Options.Limit = &limit + return r +} + +func (r User_Security_Question) Offset(offset int) User_Security_Question { + r.Options.Offset = &offset + return r +} + +// Retrieve all viewable security questions. +func (r User_Security_Question) GetAllObjects() (resp []datatypes.User_Security_Question, err error) { + err = r.Session.DoRequest("SoftLayer_User_Security_Question", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getAllObjects retrieves all the SoftLayer_User_Security_Question objects where it is set to be viewable. +func (r User_Security_Question) GetObject() (resp datatypes.User_Security_Question, err error) { + err = r.Session.DoRequest("SoftLayer_User_Security_Question", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/utility.go b/vendor/github.com/softlayer/softlayer-go/services/utility.go new file mode 100644 index 000000000000..9eed189f36b3 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/utility.go @@ -0,0 +1,88 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Utility_Network struct { + Session *session.Session + Options sl.Options +} + +// GetUtilityNetworkService returns an instance of the Utility_Network SoftLayer service +func GetUtilityNetworkService(sess *session.Session) Utility_Network { + return Utility_Network{Session: sess} +} + +func (r Utility_Network) Id(id int) Utility_Network { + r.Options.Id = &id + return r +} + +func (r Utility_Network) Mask(mask string) Utility_Network { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Utility_Network) Filter(filter string) Utility_Network { + r.Options.Filter = filter + return r +} + +func (r Utility_Network) Limit(limit int) Utility_Network { + r.Options.Limit = &limit + return r +} + +func (r Utility_Network) Offset(offset int) Utility_Network { + r.Options.Offset = &offset + return r +} + +// A method used to return the nameserver information for a given address +func (r Utility_Network) NsLookup(address *string, typ *string) (resp string, err error) { + params := []interface{}{ + address, + typ, + } + err = r.Session.DoRequest("SoftLayer_Utility_Network", "nsLookup", params, &r.Options, &resp) + return +} + +// Perform a WHOIS lookup from SoftLayer's application servers on the given IP address or hostname and return the raw results of that command. The returned result is similar to the result received from running the command `whois` from a UNIX command shell. A WHOIS lookup queries a host's registrar to retrieve domain registrant information including registration date, expiry date, and the administrative, technical, billing, and abuse contacts responsible for a domain. WHOIS lookups are useful for determining a physical contact responsible for a particular domain. WHOIS lookups are also useful for determining domain availability. Running a WHOIS lookup on an IP address queries ARIN for that IP block's ownership, and is helpful for determining a physical entity responsible for a certain IP address. +func (r Utility_Network) Whois(address *string) (resp string, err error) { + params := []interface{}{ + address, + } + err = r.Session.DoRequest("SoftLayer_Utility_Network", "whois", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/virtual.go b/vendor/github.com/softlayer/softlayer-go/services/virtual.go new file mode 100644 index 000000000000..7e7cf670850f --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/virtual.go @@ -0,0 +1,3167 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// This data type presents the structure for a dedicated host. The data type contains relational properties to distinguish a dedicated host and associate an account to it. +type Virtual_DedicatedHost struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualDedicatedHostService returns an instance of the Virtual_DedicatedHost SoftLayer service +func GetVirtualDedicatedHostService(sess *session.Session) Virtual_DedicatedHost { + return Virtual_DedicatedHost{Session: sess} +} + +func (r Virtual_DedicatedHost) Id(id int) Virtual_DedicatedHost { + r.Options.Id = &id + return r +} + +func (r Virtual_DedicatedHost) Mask(mask string) Virtual_DedicatedHost { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_DedicatedHost) Filter(filter string) Virtual_DedicatedHost { + r.Options.Filter = filter + return r +} + +func (r Virtual_DedicatedHost) Limit(limit int) Virtual_DedicatedHost { + r.Options.Limit = &limit + return r +} + +func (r Virtual_DedicatedHost) Offset(offset int) Virtual_DedicatedHost { + r.Options.Offset = &offset + return r +} + +// This method will cancel a dedicated host immediately. +func (r Virtual_DedicatedHost) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit a dedicated host's properties. +func (r Virtual_DedicatedHost) EditObject(templateObject *datatypes.Virtual_DedicatedHost) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account that the dedicated host belongs to. +func (r Virtual_DedicatedHost) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The container that represents allocations on the dedicated host. +func (r Virtual_DedicatedHost) GetAllocationStatus() (resp datatypes.Container_Virtual_DedicatedHost_AllocationStatus, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getAllocationStatus", nil, &r.Options, &resp) + return +} + +// This method will get the available backend routers to order a dedicated host. +func (r Virtual_DedicatedHost) GetAvailableRouters(dedicatedHost *datatypes.Virtual_DedicatedHost) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + dedicatedHost, + } + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getAvailableRouters", params, &r.Options, &resp) + return +} + +// Retrieve The backend router behind dedicated host's pool of resources. +func (r Virtual_DedicatedHost) GetBackendRouter() (resp datatypes.Hardware_Router_Backend, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getBackendRouter", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for the dedicated host. +func (r Virtual_DedicatedHost) GetBillingItem() (resp datatypes.Billing_Item_Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter that the dedicated host resides in. +func (r Virtual_DedicatedHost) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The guests associated with the dedicated host. +func (r Virtual_DedicatedHost) GetGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getGuests", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_DedicatedHost) GetInternalTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getInternalTagReferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_DedicatedHost) GetObject() (resp datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The container that represents PCI device allocations on the dedicated host. +func (r Virtual_DedicatedHost) GetPciDeviceAllocationStatus() (resp datatypes.Container_Virtual_DedicatedHost_Pci_Device_AllocationStatus, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getPciDeviceAllocationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_DedicatedHost) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getTagReferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_DedicatedHost) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "setTags", params, &r.Options, &resp) + return +} + +// The virtual disk image data type presents the structure in which a virtual disk image will be presented. +// +// Virtual block devices are assigned to disk images. +type Virtual_Disk_Image struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualDiskImageService returns an instance of the Virtual_Disk_Image SoftLayer service +func GetVirtualDiskImageService(sess *session.Session) Virtual_Disk_Image { + return Virtual_Disk_Image{Session: sess} +} + +func (r Virtual_Disk_Image) Id(id int) Virtual_Disk_Image { + r.Options.Id = &id + return r +} + +func (r Virtual_Disk_Image) Mask(mask string) Virtual_Disk_Image { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Disk_Image) Filter(filter string) Virtual_Disk_Image { + r.Options.Filter = filter + return r +} + +func (r Virtual_Disk_Image) Limit(limit int) Virtual_Disk_Image { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Disk_Image) Offset(offset int) Virtual_Disk_Image { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Virtual_Disk_Image) EditObject(templateObject *datatypes.Virtual_Disk_Image) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Disk_Image) GetAvailableBootModes() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getAvailableBootModes", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a virtual disk image. +func (r Virtual_Disk_Image) GetBillingItem() (resp datatypes.Billing_Item_Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The block devices that a disk image is attached to. Block devices connect computing instances to disk images. +func (r Virtual_Disk_Image) GetBlockDevices() (resp []datatypes.Virtual_Guest_Block_Device, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getBlockDevices", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Disk_Image) GetBootableVolumeFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getBootableVolumeFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Disk_Image) GetCoalescedDiskImages() (resp []datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getCoalescedDiskImages", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Disk_Image) GetCopyOnWriteFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getCopyOnWriteFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Disk_Image) GetLocalDiskFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getLocalDiskFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether this disk image is meant for storage of custom user data supplied with a Cloud Computing Instance order. +func (r Virtual_Disk_Image) GetMetadataFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getMetadataFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Disk_Image) GetObject() (resp datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Disk_Image) GetPublicIsoImages() (resp []datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getPublicIsoImages", nil, &r.Options, &resp) + return +} + +// Retrieve References to the software that resides on a disk image. +func (r Virtual_Disk_Image) GetSoftwareReferences() (resp []datatypes.Virtual_Disk_Image_Software, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getSoftwareReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The original disk image that the current disk image was cloned from. +func (r Virtual_Disk_Image) GetSourceDiskImage() (resp datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getSourceDiskImage", nil, &r.Options, &resp) + return +} + +// Retrieve The storage repository that a disk image resides in. +func (r Virtual_Disk_Image) GetStorageRepository() (resp datatypes.Virtual_Storage_Repository, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getStorageRepository", nil, &r.Options, &resp) + return +} + +// Retrieve The type of storage repository that a disk image resides in. +func (r Virtual_Disk_Image) GetStorageRepositoryType() (resp datatypes.Virtual_Storage_Repository_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getStorageRepositoryType", nil, &r.Options, &resp) + return +} + +// Retrieve The template that attaches a disk image to a [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|archive]]. +func (r Virtual_Disk_Image) GetTemplateBlockDevice() (resp datatypes.Virtual_Guest_Block_Device_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getTemplateBlockDevice", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual disk image's type. +func (r Virtual_Disk_Image) GetType() (resp datatypes.Virtual_Disk_Image_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getType", nil, &r.Options, &resp) + return +} + +// The virtual guest data type presents the structure in which all virtual guests will be presented. Internally, the structure supports various virtualization platforms with no change to external interaction. +// +// A guest, also known as a virtual server, represents an allocation of resources on a virtual host. +type Virtual_Guest struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualGuestService returns an instance of the Virtual_Guest SoftLayer service +func GetVirtualGuestService(sess *session.Session) Virtual_Guest { + return Virtual_Guest{Session: sess} +} + +func (r Virtual_Guest) Id(id int) Virtual_Guest { + r.Options.Id = &id + return r +} + +func (r Virtual_Guest) Mask(mask string) Virtual_Guest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Guest) Filter(filter string) Virtual_Guest { + r.Options.Filter = filter + return r +} + +func (r Virtual_Guest) Limit(limit int) Virtual_Guest { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Guest) Offset(offset int) Virtual_Guest { + r.Options.Offset = &offset + return r +} + +// Activate the private network port +func (r Virtual_Guest) ActivatePrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "activatePrivatePort", nil, &r.Options, &resp) + return +} + +// Activate the public network port +func (r Virtual_Guest) ActivatePublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "activatePublicPort", nil, &r.Options, &resp) + return +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Virtual_Guest) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Virtual_Guest) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Creates a transaction to attach a guest's disk image. If the disk image is already attached it will be ignored. +// +// WARNING: SoftLayer_Virtual_Guest::checkHostDiskAvailability should be called before this method. If the SoftLayer_Virtual_Guest::checkHostDiskAvailability method is not called before this method, the guest migration will happen automatically. +func (r Virtual_Guest) AttachDiskImage(imageId *int) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + imageId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "attachDiskImage", params, &r.Options, &resp) + return +} + +// Reopens the public and/or private ports to reverse the changes made when the server was isolated for a destructive action. +func (r Virtual_Guest) CancelIsolationForDestructiveAction() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "cancelIsolationForDestructiveAction", nil, &r.Options, &resp) + return +} + +// Captures a Flex Image of the hard disk on the virtual machine, based on the capture template parameter. Returns the image template group containing the disk image. +func (r Virtual_Guest) CaptureImage(captureTemplate *datatypes.Container_Disk_Image_Capture_Template) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + captureTemplate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "captureImage", params, &r.Options, &resp) + return +} + +// Checks the associated host for available disk space to determine if guest migration is necessary. This method is only used with local disks. If this method returns false, calling attachDiskImage($imageId) will automatically migrate the destination guest to a new host before attaching the portable volume. +func (r Virtual_Guest) CheckHostDiskAvailability(diskCapacity *int) (resp bool, err error) { + params := []interface{}{ + diskCapacity, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "checkHostDiskAvailability", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Virtual_Guest) CloseAlarm(alarmId *string) (resp bool, err error) { + params := []interface{}{ + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "closeAlarm", params, &r.Options, &resp) + return +} + +// Creates a transaction to configure the guest's metadata disk. If the guest has user data associated with it, the transaction will create a small virtual drive and write the metadata to a file on the drive; if the drive already exists, the metadata will be rewritten. If the guest has no user data associated with it, the transaction will remove the virtual drive if it exists. +// +// WARNING: The transaction created by this service will shut down the guest while the metadata disk is configured. The guest will be turned back on once this process is complete. +func (r Virtual_Guest) ConfigureMetadataDisk() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "configureMetadataDisk", nil, &r.Options, &resp) + return +} + +// Create a transaction to archive a computing instance's block devices +func (r Virtual_Guest) CreateArchiveTransaction(groupName *string, blockDevices []datatypes.Virtual_Guest_Block_Device, note *string) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + groupName, + blockDevices, + note, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "createArchiveTransaction", params, &r.Options, &resp) + return +} + +// +// +// createObject() enables the creation of computing instances on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a computing instance, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a computing instance of the specified configuration. +// +// +// To determine when the instance is available you can poll the instance via [[SoftLayer_Virtual_Guest/getObject|getObject]], with an [[Extended-Object-Masks|object mask]] requesting the provisionDate relational property. When provisionDate is not null, the instance will be ready. +// +// +// Warning: Computing instances created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Virtual_Guest/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Virtual_Guest (type)|SoftLayer_Virtual_Guest]] +//
      +//
    • hostname +//
      Hostname for the computing instance.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the computing instance.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • startCpus +//
      The number of CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • maxMemory +//
      The amount of memory to allocate in megabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the instance is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the instance.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the computing instance will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • localDiskFlag +//
      Specifies the disk type for the instance.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the disks for the computing instance will be provisioned on the host which it runs, otherwise SAN disks will be provisioned.
      • +//
      +//
      +//
    • +//
    • dedicatedAccountHostOnlyFlag +//
      Specifies whether or not the instance must only run on hosts with instances from the same account
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a compute instance is to run on hosts that only have guests from the same account.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the computing instance with.
        +//
      • Conditionally required - Disallowed when blockDeviceTemplateGroup.globalIdentifier is provided, as the template will specify the operating system.
      • +//
      • Type - string
      • +//
      • Notice - Some operating systems are charged based on the value specified in startCpus. The price which is used can be determined by calling [[SoftLayer_Virtual_Guest/generateOrderTemplate|generateOrderTemplate]] with your desired device specifications.
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • blockDeviceTemplateGroup.globalIdentifier +//
      A global identifier for the template to be used to provision the computing instance.
        +//
      • Conditionally required - Disallowed when operatingSystemReferenceCode is provided, as the template will specify the operating system.
      • +//
      • Type - string
      • +//
      • Notice - Some operating systems are charged based on the value specified in startCpus. The price which is used can be determined by calling [[SoftLayer_Virtual_Guest/generateOrderTemplate|generateOrderTemplate]] with your desired device specifications.
      • +//
      • Both public and non-public images may be specified.
      • +//
      • A list of public images may be obtained via a request to [[SoftLayer_Virtual_Guest_Block_Device_Template_Group/getPublicImages|getPublicImages]].
      • +//
      • A list of non-public images, images owned by an account or specifically shared with an account, may be obtained via a request to [[SoftLayer_Account/getBlockDeviceTemplateGroups|getBlockDeviceTemplateGroups]].
      • +//
      +// { +// "blockDeviceTemplateGroup": { +// "globalIdentifier": "07beadaa-1e11-476e-a188-3f7795feb9fb" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the instance's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - 10
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Virtual_Guest_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the computing instance.
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the instance only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a compute instance is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the computing instance.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Virtual_Guest_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the computing instance.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the computing instance.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Virtual_Guest_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the computing instance.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • primaryNetworkComponent.securityGroupBindings +//
      Specifies the security groups to be attached to this VSI's Frontend Network Adapter
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)| SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding]] with the securityGroup property set.
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Virtual_Guest_Network_Component (type)|network component]] structure with the securityGroupBindings property populated with an array of [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)|Security Group Order Binding]] structures. The securityGroup property in each must be set to specify the security group to be attached to the primary frontend network component.
      • +//
      +// { +// "primaryNetworkComponent": { +// "securityGroupBindings": [ +// { +// "securityGroup": { +// "id": 1 +// } +// }, +// { +// "securityGroup": { +// "id": 2 +// } +// } +// ] +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.securityGroupBindings +//
      Specifies the security groups to be attached to this VSI's Backend Network Adapter
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)| SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding]] with the securityGroup property set.
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Virtual_Guest_Network_Component (type)|network component]] structure with the securityGroupBindings property populated with an array of [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)|Security Group Order Binding]] structures. The securityGroup property in each must be set to specify the security group to be attached to the primary backend network component.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "securityGroupBindings": [ +// { +// "securityGroup": { +// "id": 1 +// } +// }, +// { +// "securityGroup": { +// "id": 2 +// } +// } +// ] +// } +// } +//
      +//
    • +//
    • blockDevices +//
      Block device and disk image settings for the computing instance
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Virtual_Guest_Block_Device (type)|SoftLayer_Virtual_Guest_Block_Device]
      • +//
      • Default - The smallest available capacity for the primary disk will be used. If an image template is specified the disk capacity will be be provided by the template.
      • +//
      • Description - The blockDevices property is an array of [[SoftLayer_Virtual_Guest_Block_Device (type)|block device]] structures. +//
      • Each block device must specify the device property along with the diskImage property, which is a [[SoftLayer_Virtual_Disk_Image (type)|disk image]] structure with the capacity property set.
      • +//
      • The device number '1' is reserved for the SWAP disk attached to the computing instance.
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "blockDevices": [ +// { +// "device": "0", +// "diskImage": { +// "capacity": 100 +// } +// } +// ], +// "localDiskFlag": true +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the computing instance.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Virtual_Guest_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the computing instance. This is primarily useful for providing data to software that may be on the instance and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the computing instance upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "startCpus": 1, +// "maxMemory": 1024, +// "hourlyBillingFlag": true, +// "localDiskFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Virtual_Guest.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Virtual_Guest/1301396/getObject +// +// +// { +// "accountId": 232298, +// "createDate": "2012-11-30T16:28:17-06:00", +// "dedicatedAccountHostOnlyFlag": false, +// "domain": "example.com", +// "hostname": "host1", +// "id": 1301396, +// "lastPowerStateId": null, +// "lastVerifiedDate": null, +// "maxCpu": 1, +// "maxCpuUnits": "CORE", +// "maxMemory": 1024, +// "metricPollDate": null, +// "modifyDate": null, +// "privateNetworkOnlyFlag": false, +// "startCpus": 1, +// "statusId": 1001, +// "globalIdentifier": "2d203774-0ee1-49f5-9599-6ef67358dd31" +// } +// +func (r Virtual_Guest) CreateObject(templateObject *datatypes.Virtual_Guest) (resp datatypes.Virtual_Guest, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "createObject", params, &r.Options, &resp) + return +} + +// +// createObjects() enables the creation of multiple computing instances on an account in a single call. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a computing instance a set of template objects must be sent in with a few required +// values. +// +// +// Warning: Computing instances created via this method will incur charges on your account. +// +// +// See [[SoftLayer_Virtual_Guest/createObject|createObject]] for specifics on the requirements of each template object. +// +// +//

    Example

    +// curl -X POST -d '{ +// "parameters":[ +// [ +// { +// "hostname": "host1", +// "domain": "example.com", +// "startCpus": 1, +// "maxMemory": 1024, +// "hourlyBillingFlag": true, +// "localDiskFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// }, +// { +// "hostname": "host2", +// "domain": "example.com", +// "startCpus": 1, +// "maxMemory": 1024, +// "hourlyBillingFlag": true, +// "localDiskFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Virtual_Guest/createObjects.json +// +// HTTP/1.1 200 OK +// +// +// [ +// { +// "accountId": 232298, +// "createDate": "2012-11-30T23:56:48-06:00", +// "dedicatedAccountHostOnlyFlag": false, +// "domain": "softlayer.com", +// "hostname": "ubuntu1", +// "id": 1301456, +// "lastPowerStateId": null, +// "lastVerifiedDate": null, +// "maxCpu": 1, +// "maxCpuUnits": "CORE", +// "maxMemory": 1024, +// "metricPollDate": null, +// "modifyDate": null, +// "privateNetworkOnlyFlag": false, +// "startCpus": 1, +// "statusId": 1001, +// "globalIdentifier": "fed4c822-48c0-45d0-85e2-90476aa0c542" +// }, +// { +// "accountId": 232298, +// "createDate": "2012-11-30T23:56:49-06:00", +// "dedicatedAccountHostOnlyFlag": false, +// "domain": "softlayer.com", +// "hostname": "ubuntu2", +// "id": 1301457, +// "lastPowerStateId": null, +// "lastVerifiedDate": null, +// "maxCpu": 1, +// "maxCpuUnits": "CORE", +// "maxMemory": 1024, +// "metricPollDate": null, +// "modifyDate": null, +// "privateNetworkOnlyFlag": false, +// "startCpus": 1, +// "statusId": 1001, +// "globalIdentifier": "bed4c686-9562-4ade-9049-dc4d5b6b200c" +// } +// ] +// +func (r Virtual_Guest) CreateObjects(templateObjects []datatypes.Virtual_Guest) (resp []datatypes.Virtual_Guest, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "createObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) CreatePostSoftwareInstallTransaction(data *string, returnBoolean *bool) (resp bool, err error) { + params := []interface{}{ + data, + returnBoolean, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "createPostSoftwareInstallTransaction", params, &r.Options, &resp) + return +} + +// +// This method will cancel a computing instance effective immediately. For instances billed hourly, the charges will stop immediately after the method returns. +func (r Virtual_Guest) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) DeleteTransientWebhook() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "deleteTransientWebhook", nil, &r.Options, &resp) + return +} + +// Creates a transaction to detach a guest's disk image. If the disk image is already detached it will be ignored. +// +// WARNING: The transaction created by this service will shut down the guest while the disk image is attached. The guest will be turned back on once this process is complete. +func (r Virtual_Guest) DetachDiskImage(imageId *int) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + imageId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "detachDiskImage", params, &r.Options, &resp) + return +} + +// Edit a computing instance's properties +func (r Virtual_Guest) EditObject(templateObject *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "editObject", params, &r.Options, &resp) + return +} + +// Reboot a guest into the Idera Bare Metal Restore image. +func (r Virtual_Guest) ExecuteIderaBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "executeIderaBareMetalRestore", nil, &r.Options, &resp) + return +} + +// Reboot a guest into the R1Soft Bare Metal Restore image. +func (r Virtual_Guest) ExecuteR1SoftBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "executeR1SoftBareMetalRestore", nil, &r.Options, &resp) + return +} + +// Download and run remote script from uri on virtual guests. +func (r Virtual_Guest) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// Reboot a Linux guest into the Xen rescue image. +func (r Virtual_Guest) ExecuteRescueLayer() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "executeRescueLayer", nil, &r.Options, &resp) + return +} + +// Find CCI by only its primary public or private IP address. IP addresses within secondary subnets tied to the CCI will not return the CCI. If no CCI is found, no errors are generated and no data is returned. +func (r Virtual_Guest) FindByIpAddress(ipAddress *string) (resp datatypes.Virtual_Guest, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Virtual_Guest (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Virtual_Guest/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Virtual_Guest) GenerateOrderTemplate(templateObject *datatypes.Virtual_Guest) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account that a virtual guest belongs to. +func (r Virtual_Guest) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetAccountOwnedPoolFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAccountOwnedPoolFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual guest's currently active network monitoring incidents. +func (r Virtual_Guest) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A transaction that is still be performed on a cloud server. +func (r Virtual_Guest) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve Any active transaction(s) that are currently running for the server (example: os reload). +func (r Virtual_Guest) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects for an OS reload +func (r Virtual_Guest) GetAdditionalRequiredPricesForOsReload(config *datatypes.Container_Hardware_Server_Configuration) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + config, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAdditionalRequiredPricesForOsReload", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Virtual_Guest) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this Virtual Guest to Network Storage volumes that require access control lists. +func (r Virtual_Guest) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Virtual_Guest has access to. +func (r Virtual_Guest) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Virtual_Guest has access to. +func (r Virtual_Guest) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve A antivirus / spyware software component object. +func (r Virtual_Guest) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetApplicationDeliveryController() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getApplicationDeliveryController", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Virtual_Guest. +func (r Virtual_Guest) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetAttributes() (resp []datatypes.Virtual_Guest_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAttributes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetAvailableBlockDevicePositions() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAvailableBlockDevicePositions", nil, &r.Options, &resp) + return +} + +// Retrieve An object that stores the maximum level for the monitoring query types and response types. +func (r Virtual_Guest) GetAvailableMonitoring() (resp []datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAvailableMonitoring", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Virtual_Guest. +func (r Virtual_Guest) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily private bandwidth usage for the current billing cycle. +func (r Virtual_Guest) GetAverageDailyPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAverageDailyPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Virtual_Guest) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve A guests's backend network components. +func (r Virtual_Guest) GetBackendNetworkComponents() (resp []datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's backend or private router. +func (r Virtual_Guest) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's allotted bandwidth (measured in GB). +func (r Virtual_Guest) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Virtual_Guest) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Use this method when needing the metric data for bandwidth for a single guest. It will gather the correct input parameters based on the date ranges +func (r Virtual_Guest) GetBandwidthDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, networkType *string) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + networkType, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve a collection of bandwidth data from an individual public or private network tracking object. Data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Virtual_Guest) GetBandwidthForDateRange(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthForDateRange", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single guest. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. +func (r Virtual_Guest) GetBandwidthImage(networkType *string, snapshotRange *string, dateSpecified *datatypes.Time, dateSpecifiedEnd *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + networkType, + snapshotRange, + dateSpecified, + dateSpecifiedEnd, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single guest. It will gather the correct input parameters for the generic graphing utility based on the date ranges +func (r Virtual_Guest) GetBandwidthImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, networkType *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + networkType, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthImageByDate", params, &r.Options, &resp) + return +} + +// Returns the total amount of bandwidth used during the time specified for a computing instance. +func (r Virtual_Guest) GetBandwidthTotal(startDateTime *datatypes.Time, endDateTime *datatypes.Time, direction *string, side *string) (resp uint, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + direction, + side, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthTotal", params, &r.Options, &resp) + return +} + +// Retrieve The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. +func (r Virtual_Guest) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw private bandwidth usage data for the current billing cycle. +func (r Virtual_Guest) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw public bandwidth usage data for the current billing cycle. +func (r Virtual_Guest) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a CloudLayer Compute Instance. +func (r Virtual_Guest) GetBillingItem() (resp datatypes.Billing_Item_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the instance is ineligible for cancellation because it is disconnected. +func (r Virtual_Guest) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The global identifier for the image template that was used to provision or reload a guest. +func (r Virtual_Guest) GetBlockDeviceTemplateGroup() (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBlockDeviceTemplateGroup", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's block devices. Block devices link [[SoftLayer_Virtual_Disk_Image|disk images]] to computing instances. +func (r Virtual_Guest) GetBlockDevices() (resp []datatypes.Virtual_Guest_Block_Device, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBlockDevices", nil, &r.Options, &resp) + return +} + +// Retrieves the boot mode of the VSI. +func (r Virtual_Guest) GetBootMode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBootMode", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetBootOrder() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBootOrder", nil, &r.Options, &resp) + return +} + +// Gets the console access logs for a computing instance +func (r Virtual_Guest) GetConsoleAccessLog() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getConsoleAccessLog", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating a computing instance's console IP address is assigned. +func (r Virtual_Guest) GetConsoleIpAddressFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getConsoleIpAddressFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A record containing information about a computing instance's console IP and port number. +func (r Virtual_Guest) GetConsoleIpAddressRecord() (resp datatypes.Virtual_Guest_Network_Component_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getConsoleIpAddressRecord", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection software component object. +func (r Virtual_Guest) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's control panel. +func (r Virtual_Guest) GetControlPanel() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getControlPanel", nil, &r.Options, &resp) + return +} + +// If the virtual server currently has an operating system that has a core capacity restriction, return the associated core-restricted operating system item price. Some operating systems (e.g., Red Hat Enterprise Linux) may be billed by the number of processor cores, so therefore require that a certain number of cores be present on the server. +func (r Virtual_Guest) GetCoreRestrictedOperatingSystemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCoreRestrictedOperatingSystemPrice", nil, &r.Options, &resp) + return +} + +// Use this method when needing the metric data for a single guest's CPUs. It will gather the correct input parameters based on the date ranges +func (r Virtual_Guest) GetCpuMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, cpuIndexes []int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + cpuIndexes, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCpuMetricDataByDate", params, &r.Options, &resp) + return +} + +// Use this method when needing a cpu usage image for a single guest. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. +func (r Virtual_Guest) GetCpuMetricImage(snapshotRange *string, dateSpecified *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + snapshotRange, + dateSpecified, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCpuMetricImage", params, &r.Options, &resp) + return +} + +// Use this method when needing a CPU usage image for a single guest. It will gather the correct input parameters for the generic graphing utility based on the date ranges +func (r Virtual_Guest) GetCpuMetricImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, cpuIndexes []int) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + cpuIndexes, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCpuMetricImageByDate", params, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a computing instance, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Virtual_Guest_Configuration (type)]]. +func (r Virtual_Guest) GetCreateObjectOptions() (resp datatypes.Container_Virtual_Guest_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve An object that provides commonly used bandwidth summary components for the current billing cycle. +func (r Virtual_Guest) GetCurrentBandwidthSummary() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCurrentBandwidthSummary", nil, &r.Options, &resp) + return +} + +// getUpgradeItemPrices() retrieves a list of all upgrades available to a CloudLayer Computing Instance. Upgradeable items include, but are not limited to, number of cores, amount of RAM, storage configuration, and network port speed. +func (r Virtual_Guest) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// Get the total billing price in US Dollars ($) for this instance. This includes all bandwidth used up to this point for this instance. +func (r Virtual_Guest) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Virtual_Guest) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Virtual_Guest) GetCustomMetricDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCustomMetricDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The datacenter that a virtual guest resides in. +func (r Virtual_Guest) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The dedicated host associated with this guest. +func (r Virtual_Guest) GetDedicatedHost() (resp datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getDedicatedHost", nil, &r.Options, &resp) + return +} + +// Return a drive retention SoftLayer_Item_Price object for a guest. +func (r Virtual_Guest) GetDriveRetentionItemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getDriveRetentionItemPrice", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's associated EVault network storage service account. +func (r Virtual_Guest) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Get the subnets associated with this CloudLayer computing instance that are protectable by a network component firewall. +func (r Virtual_Guest) GetFirewallProtectableSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFirewallProtectableSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's hardware firewall services. +func (r Virtual_Guest) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetFirstAvailableBlockDevicePosition() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFirstAvailableBlockDevicePosition", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's frontend network components. +func (r Virtual_Guest) GetFrontendNetworkComponents() (resp []datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's frontend or public router. +func (r Virtual_Guest) GetFrontendRouters() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's universally unique identifier. +func (r Virtual_Guest) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetGuestBootParameter() (resp datatypes.Virtual_Guest_Boot_Parameter, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getGuestBootParameter", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual host on which a virtual guest resides (available only on private clouds). +func (r Virtual_Guest) GetHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getHost", nil, &r.Options, &resp) + return +} + +// Retrieve A host IPS software component object. +func (r Virtual_Guest) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a computing instance is billed hourly instead of monthly. +func (r Virtual_Guest) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The total private inbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetInboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getInboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetInternalTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getInternalTagReferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetIsoBootImage() (resp datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getIsoBootImage", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects from a collection of SoftLayer_Software_Description +func (r Virtual_Guest) GetItemPricesFromSoftwareDescriptions(softwareDescriptions []datatypes.Software_Description, includeTranslationsFlag *bool, returnAllPricesFlag *bool) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + softwareDescriptions, + includeTranslationsFlag, + returnAllPricesFlag, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getItemPricesFromSoftwareDescriptions", params, &r.Options, &resp) + return +} + +// Retrieve The last known power state of a virtual guest in the event the guest is turned off outside of IMS or has gone offline. +func (r Virtual_Guest) GetLastKnownPowerState() (resp datatypes.Virtual_Guest_Power_State, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLastKnownPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve The last transaction that a cloud server's operating system was loaded. +func (r Virtual_Guest) GetLastOperatingSystemReload() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLastOperatingSystemReload", nil, &r.Options, &resp) + return +} + +// Retrieve The last transaction a cloud server had performed. +func (r Virtual_Guest) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual guest's latest network monitoring incident. +func (r Virtual_Guest) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the virtual guest has at least one disk which is local to the host it runs on. This does not include a SWAP device. +func (r Virtual_Guest) GetLocalDiskFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLocalDiskFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Where guest is located within SoftLayer's location hierarchy. +func (r Virtual_Guest) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the virtual guest is a managed resource. +func (r Virtual_Guest) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Use this method when needing the metric data for memory for a single computing instance. +func (r Virtual_Guest) GetMemoryMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMemoryMetricDataByDate", params, &r.Options, &resp) + return +} + +// Use this method when needing a memory usage image for a single guest. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. +func (r Virtual_Guest) GetMemoryMetricImage(snapshotRange *string, dateSpecified *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + snapshotRange, + dateSpecified, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMemoryMetricImage", params, &r.Options, &resp) + return +} + +// Use this method when needing a image displaying the amount of memory used over time for a single computing instance. It will gather the correct input parameters for the generic graphing utility based on the date ranges +func (r Virtual_Guest) GetMemoryMetricImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMemoryMetricImageByDate", params, &r.Options, &resp) + return +} + +// Retrieve A guest's metric tracking object. +func (r Virtual_Guest) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object id for this guest. +func (r Virtual_Guest) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Virtual_Guest) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Virtual_Guest) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual guest's network monitoring services. +func (r Virtual_Guest) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring notification objects for this guest. Each object links this guest instance to a user account that will be notified if monitoring on this guest object fails +func (r Virtual_Guest) GetMonitoringUserNotification() (resp []datatypes.User_Customer_Notification_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringUserNotification", nil, &r.Options, &resp) + return +} + +// Get the IP addresses associated with this CloudLayer computing instance that are protectable by a network component firewall. Note, this may not return all values for IPv6 subnets for this CloudLayer computing instance. Please use getFirewallProtectableSubnets to get all protectable subnets. +func (r Virtual_Guest) GetNetworkComponentFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkComponentFirewallProtectableIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve A guests's network components. +func (r Virtual_Guest) GetNetworkComponents() (resp []datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve All of a virtual guest's network monitoring incidents. +func (r Virtual_Guest) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve A guests's network monitors. +func (r Virtual_Guest) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's associated network storage accounts. +func (r Virtual_Guest) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network Vlans that a guest's network components are associated with. +func (r Virtual_Guest) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetObject() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An open ticket requesting cancellation of this server, if one exists. +func (r Virtual_Guest) GetOpenCancellationTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOpenCancellationTicket", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's operating system. +func (r Virtual_Guest) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's operating system software description. +func (r Virtual_Guest) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Obtain an order container that is ready to be sent to the [[SoftLayer_Product_Order#placeOrder|SoftLayer_Product_Order::placeOrder]] method. This container will include all services that the selected computing instance has. If desired you may remove prices which were returned. +func (r Virtual_Guest) GetOrderTemplate(billingType *string, orderPrices []datatypes.Product_Item_Price) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + billingType, + orderPrices, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The original package id provided with the order for a Cloud Computing Instance. +func (r Virtual_Guest) GetOrderedPackageId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOrderedPackageId", nil, &r.Options, &resp) + return +} + +// Retrieve The total private outbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetOutboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOutboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this computing instance for the current billing cycle exceeds the allocation. +func (r Virtual_Guest) GetOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve When true this virtual guest must be migrated using SoftLayer_Virtual_Guest::migrate. +func (r Virtual_Guest) GetPendingMigrationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPendingMigrationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The current power state of a virtual guest. +func (r Virtual_Guest) GetPowerState() (resp datatypes.Virtual_Guest_Power_State, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's primary private IP address. +func (r Virtual_Guest) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's primary backend network component. +func (r Virtual_Guest) GetPrimaryBackendNetworkComponent() (resp datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The guest's primary public IP address. +func (r Virtual_Guest) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's primary public network component. +func (r Virtual_Guest) GetPrimaryNetworkComponent() (resp datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the computing instance only has access to the private network. +func (r Virtual_Guest) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this computing instance for the current billing cycle is projected to exceed the allocation. +func (r Virtual_Guest) GetProjectedOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getProjectedOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetProvisionDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getProvisionDate", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this computing instance. +func (r Virtual_Guest) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Recent metric data for a guest +func (r Virtual_Guest) GetRecentMetricData(time *uint) (resp []datatypes.Metric_Tracking_Object, err error) { + params := []interface{}{ + time, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRecentMetricData", params, &r.Options, &resp) + return +} + +// Retrieve The regional group this guest is in. +func (r Virtual_Guest) GetRegionalGroup() (resp datatypes.Location_Group_Regional, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRegionalGroup", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms generated by monitoring agents that reside in the SoftLayer monitoring cluster. +// +// A monitoring agent with "remoteMonitoringAgentFlag" indicates that it work from SoftLayer monitoring cluster. If a monitoring agent does not have the flag, it resides in your cloud instance. +func (r Virtual_Guest) GetRemoteMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRemoteMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms generated by monitoring agents that reside in the SoftLayer monitoring cluster. +// +// A monitoring agent with "remoteMonitoringAgentFlag" indicates that it work from SoftLayer monitoring cluster. If a monitoring agent does not have the flag, it resides in your cloud instance. +func (r Virtual_Guest) GetRemoteMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRemoteMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve the reverse domain records associated with this server. +func (r Virtual_Guest) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this guest corresponds to. +func (r Virtual_Guest) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve The scale member for this guest, if applicable. +func (r Virtual_Guest) GetScaleMember() (resp datatypes.Scale_Member_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getScaleMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this guest is a member of a scale group and was automatically created as part of a scale group action. +func (r Virtual_Guest) GetScaledFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getScaledFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's vulnerability scan requests. +func (r Virtual_Guest) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The server room that a guest is located at. There may be more than one server room for every data center. +func (r Virtual_Guest) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getServerRoom", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's installed software. +func (r Virtual_Guest) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Virtual_Guest) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's status. +func (r Virtual_Guest) GetStatus() (resp datatypes.Virtual_Guest_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a computing instance is a Transient Instance. +func (r Virtual_Guest) GetTransientGuestFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getTransientGuestFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The endpoint used to notify customers their transient guest is terminating. +func (r Virtual_Guest) GetTransientWebhookURI() (resp datatypes.Virtual_Guest_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getTransientWebhookURI", nil, &r.Options, &resp) + return +} + +// Retrieve The type of this virtual guest. +func (r Virtual_Guest) GetType() (resp datatypes.Virtual_Guest_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getType", nil, &r.Options, &resp) + return +} + +// getUpgradeItemPrices() retrieves a list of all upgrades available to a CloudLayer Computing Instance. Upgradeable items include, but are not limited to, number of cores, amount of RAM, storage configuration, and network port speed. +// +// This method exclude downgrade item prices by default. You can set the "includeDowngradeItemPrices" parameter to true so that it can include downgrade item prices. +func (r Virtual_Guest) GetUpgradeItemPrices(includeDowngradeItemPrices *bool) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + includeDowngradeItemPrices, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getUpgradeItemPrices", params, &r.Options, &resp) + return +} + +// Retrieve A computing instance's associated upgrade request object if any. +func (r Virtual_Guest) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve A base64 encoded string containing custom user data for a Cloud Computing Instance order. +func (r Virtual_Guest) GetUserData() (resp []datatypes.Virtual_Guest_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve A list of users that have access to this computing instance. +func (r Virtual_Guest) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getUsers", nil, &r.Options, &resp) + return +} + +// This method will return the list of block device template groups that are valid to the host. For instance, it will validate that the template groups returned are compatible with the size and number of disks on the host. +func (r Virtual_Guest) GetValidBlockDeviceTemplateGroups(visibility *string) (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + visibility, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getValidBlockDeviceTemplateGroups", params, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment that a hardware belongs too. +func (r Virtual_Guest) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The id of the bandwidth allotment that a computing instance belongs too. +func (r Virtual_Guest) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment that a computing instance belongs too. +func (r Virtual_Guest) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Virtual_Guest) IsBackendPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "isBackendPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Virtual_Guest) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "isPingable", nil, &r.Options, &resp) + return +} + +// Closes the public or private ports to isolate the instance before a destructive action. +func (r Virtual_Guest) IsolateInstanceForDestructiveAction() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "isolateInstanceForDestructiveAction", nil, &r.Options, &resp) + return +} + +// Creates a transaction to migrate a virtual guest to a new host. NOTE: Will only migrate if SoftLayer_Virtual_Guest property pendingMigrationFlag = true +func (r Virtual_Guest) Migrate() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "migrate", nil, &r.Options, &resp) + return +} + +// Create a transaction to migrate an instance from one dedicated host to another dedicated host +func (r Virtual_Guest) MigrateDedicatedHost(destinationHostId *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + destinationHostId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "migrateDedicatedHost", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) MountIsoImage(diskImageId *int) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + diskImageId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "mountIsoImage", params, &r.Options, &resp) + return +} + +// Pause a virtual guest +func (r Virtual_Guest) Pause() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "pause", nil, &r.Options, &resp) + return +} + +// Power cycle a virtual guest +func (r Virtual_Guest) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "powerCycle", nil, &r.Options, &resp) + return +} + +// Power off a virtual guest +func (r Virtual_Guest) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "powerOff", nil, &r.Options, &resp) + return +} + +// Power off a virtual guest +func (r Virtual_Guest) PowerOffSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "powerOffSoft", nil, &r.Options, &resp) + return +} + +// Power on a virtual guest +func (r Virtual_Guest) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "powerOn", nil, &r.Options, &resp) + return +} + +// Power cycle a virtual guest +func (r Virtual_Guest) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "rebootDefault", nil, &r.Options, &resp) + return +} + +// Power cycle a guest. +func (r Virtual_Guest) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "rebootHard", nil, &r.Options, &resp) + return +} + +// Attempt to complete a soft reboot of a guest by shutting down the operating system. +func (r Virtual_Guest) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "rebootSoft", nil, &r.Options, &resp) + return +} + +// Create a transaction to perform an OS reload +func (r Virtual_Guest) ReloadCurrentOperatingSystemConfiguration() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "reloadCurrentOperatingSystemConfiguration", nil, &r.Options, &resp) + return +} + +// Reloads current operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the computing instance to the current specifications on record. +// +// If reloading from an image template, we recommend first getting the list of valid private block device template groups, by calling the getOperatingSystemReloadImages method. +func (r Virtual_Guest) ReloadOperatingSystem(token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "reloadOperatingSystem", params, &r.Options, &resp) + return +} + +// This method is used to remove access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Virtual_Guest) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Virtual_Guest) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Resume a virtual guest +func (r Virtual_Guest) Resume() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "resume", nil, &r.Options, &resp) + return +} + +// Sets the private network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, or 1000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the port speed. +func (r Virtual_Guest) SetPrivateNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "setPrivateNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// Sets the public network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, or 1000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the port speed. +func (r Virtual_Guest) SetPublicNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "setPublicNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "setTags", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) SetTransientWebhook(uri *string, secret *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + secret, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "setTransientWebhook", params, &r.Options, &resp) + return +} + +// Sets the data that will be written to the configuration drive. +func (r Virtual_Guest) SetUserMetadata(metadata []string) (resp bool, err error) { + params := []interface{}{ + metadata, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "setUserMetadata", params, &r.Options, &resp) + return +} + +// Shuts down the private network port +func (r Virtual_Guest) ShutdownPrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "shutdownPrivatePort", nil, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Virtual_Guest) ShutdownPublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "shutdownPublicPort", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) UnmountIsoImage() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "unmountIsoImage", nil, &r.Options, &resp) + return +} + +// Validate an image template for OS Reload +func (r Virtual_Guest) ValidateImageTemplate(imageTemplateId *int) (resp bool, err error) { + params := []interface{}{ + imageTemplateId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "validateImageTemplate", params, &r.Options, &resp) + return +} + +// Verify that a virtual server can go through the operating system reload process. It may be useful to call this method before attempting to actually reload the operating system just to verify that the reload will go smoothly. If the server configuration is not setup correctly or there is some other issue, an exception will be thrown indicating the error. If there were no issues, this will just return true. +func (r Virtual_Guest) VerifyReloadOperatingSystem(config *datatypes.Container_Hardware_Server_Configuration) (resp bool, err error) { + params := []interface{}{ + config, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "verifyReloadOperatingSystem", params, &r.Options, &resp) + return +} + +// The virtual block device template group data type presents the structure in which a group of archived image templates will be presented. The structure consists of a parent template group which contain multiple child template group objects. Each child template group object represents the image template in a particular location. Unless editing/deleting a specific child template group object, it is best to use the parent object. +// +// A virtual block device template group, also known as an image template group, represents an image of a virtual guest instance. +type Virtual_Guest_Block_Device_Template_Group struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualGuestBlockDeviceTemplateGroupService returns an instance of the Virtual_Guest_Block_Device_Template_Group SoftLayer service +func GetVirtualGuestBlockDeviceTemplateGroupService(sess *session.Session) Virtual_Guest_Block_Device_Template_Group { + return Virtual_Guest_Block_Device_Template_Group{Session: sess} +} + +func (r Virtual_Guest_Block_Device_Template_Group) Id(id int) Virtual_Guest_Block_Device_Template_Group { + r.Options.Id = &id + return r +} + +func (r Virtual_Guest_Block_Device_Template_Group) Mask(mask string) Virtual_Guest_Block_Device_Template_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Guest_Block_Device_Template_Group) Filter(filter string) Virtual_Guest_Block_Device_Template_Group { + r.Options.Filter = filter + return r +} + +func (r Virtual_Guest_Block_Device_Template_Group) Limit(limit int) Virtual_Guest_Block_Device_Template_Group { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Guest_Block_Device_Template_Group) Offset(offset int) Virtual_Guest_Block_Device_Template_Group { + r.Options.Offset = &offset + return r +} + +// This method allows you to mark this image template as customer managed software license (BYOL) +func (r Virtual_Guest_Block_Device_Template_Group) AddByolAttribute() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "addByolAttribute", nil, &r.Options, &resp) + return +} + +// This method allows you to mark this image template as cloud init +func (r Virtual_Guest_Block_Device_Template_Group) AddCloudInitAttribute() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "addCloudInitAttribute", nil, &r.Options, &resp) + return +} + +// This method will create transaction(s) to add available locations to an archive image template. +func (r Virtual_Guest_Block_Device_Template_Group) AddLocations(locations []datatypes.Location) (resp bool, err error) { + params := []interface{}{ + locations, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "addLocations", params, &r.Options, &resp) + return +} + +// This method allows you to mark this image's supported boot modes as 'HVM' or 'PV'. +func (r Virtual_Guest_Block_Device_Template_Group) AddSupportedBootMode(bootMode *string) (resp bool, err error) { + params := []interface{}{ + bootMode, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "addSupportedBootMode", params, &r.Options, &resp) + return +} + +// Create a transaction to export/copy a template to an external source. +func (r Virtual_Guest_Block_Device_Template_Group) CopyToExternalSource(configuration *datatypes.Container_Virtual_Guest_Block_Device_Template_Configuration) (resp bool, err error) { + params := []interface{}{ + configuration, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "copyToExternalSource", params, &r.Options, &resp) + return +} + +// Create a transaction to import a disk image from an external source and create a standard image template. +func (r Virtual_Guest_Block_Device_Template_Group) CreateFromExternalSource(configuration *datatypes.Container_Virtual_Guest_Block_Device_Template_Configuration) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + configuration, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "createFromExternalSource", params, &r.Options, &resp) + return +} + +// Create a transaction to copy archived block devices into public repository +func (r Virtual_Guest_Block_Device_Template_Group) CreatePublicArchiveTransaction(groupName *string, summary *string, note *string, locations []datatypes.Location) (resp int, err error) { + params := []interface{}{ + groupName, + summary, + note, + locations, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "createPublicArchiveTransaction", params, &r.Options, &resp) + return +} + +// This method allows you to remove BYOL attribute for a given image template. +func (r Virtual_Guest_Block_Device_Template_Group) DeleteByolAttribute() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "deleteByolAttribute", nil, &r.Options, &resp) + return +} + +// This method allows you to remove cloud init attribute for a given image template. +func (r Virtual_Guest_Block_Device_Template_Group) DeleteCloudInitAttribute() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "deleteCloudInitAttribute", nil, &r.Options, &resp) + return +} + +// Deleting a block device template group is different from the deletion of other objects. A block device template group can contain several gigabytes of data in its disk images. This may take some time to delete and requires a transaction to be created. This method creates a transaction that will delete all resources associated with the block device template group. +func (r Virtual_Guest_Block_Device_Template_Group) DeleteObject() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method will deny another SoftLayer customer account's previously given access to provision CloudLayer Computing Instances from an image template group. Template access should only be removed from the parent template group object, not the child. +func (r Virtual_Guest_Block_Device_Template_Group) DenySharingAccess(accountId *int) (resp bool, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "denySharingAccess", params, &r.Options, &resp) + return +} + +// Edit an image template group's associated name and note. All other properties in the SoftLayer_Virtual_Guest_Block_Device_Template_Group data type are read-only. +func (r Virtual_Guest_Block_Device_Template_Group) EditObject(templateObject *datatypes.Virtual_Guest_Block_Device_Template_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "editObject", params, &r.Options, &resp) + return +} + +// Find block device template groups contain GC enabled image for the current active user. Caller can optionally specify data center names to retrieve GC image from those data centers only. +func (r Virtual_Guest_Block_Device_Template_Group) FindGcImagesByCurrentUser(dataCenters []string) (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + dataCenters, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "findGcImagesByCurrentUser", params, &r.Options, &resp) + return +} + +// Retrieve A block device template group's [[SoftLayer_Account|account]]. +func (r Virtual_Guest_Block_Device_Template_Group) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest_Block_Device_Template_Group) GetAccountContacts() (resp []datatypes.Account_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getAccountContacts", nil, &r.Options, &resp) + return +} + +// Retrieve The accounts which may have read-only access to an image template group. Will only be populated for parent template group objects. +func (r Virtual_Guest_Block_Device_Template_Group) GetAccountReferences() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group_Accounts, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getAccountReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The block devices that are part of an image template group +func (r Virtual_Guest_Block_Device_Template_Group) GetBlockDevices() (resp []datatypes.Virtual_Guest_Block_Device_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getBlockDevices", nil, &r.Options, &resp) + return +} + +// Retrieve The total disk space of all images in a image template group. +func (r Virtual_Guest_Block_Device_Template_Group) GetBlockDevicesDiskSpaceTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getBlockDevicesDiskSpaceTotal", nil, &r.Options, &resp) + return +} + +// This method returns the boot mode, if any, set on a given image template. +func (r Virtual_Guest_Block_Device_Template_Group) GetBootMode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getBootMode", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that customer is providing the software licenses. +func (r Virtual_Guest_Block_Device_Template_Group) GetByolFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getByolFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The image template groups that are clones of an image template group. +func (r Virtual_Guest_Block_Device_Template_Group) GetChildren() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve The location containing this image template group. Will only be populated for child template group objects. +func (r Virtual_Guest_Block_Device_Template_Group) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of locations containing a copy of this image template group. Will only be populated for parent template group objects. +func (r Virtual_Guest_Block_Device_Template_Group) GetDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getDatacenters", nil, &r.Options, &resp) + return +} + +// This method returns the default boot mode set by the software description +func (r Virtual_Guest_Block_Device_Template_Group) GetDefaultBootMode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getDefaultBootMode", nil, &r.Options, &resp) + return +} + +// This method returns an array of encryption values +func (r Virtual_Guest_Block_Device_Template_Group) GetEncryptionAttributes() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getEncryptionAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating if this is a flex image. +func (r Virtual_Guest_Block_Device_Template_Group) GetFlexImageFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getFlexImageFlag", nil, &r.Options, &resp) + return +} + +// Retrieve An image template's universally unique identifier. +func (r Virtual_Guest_Block_Device_Template_Group) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual disk image type of this template. Value will be populated on parent and child, but only supports object filtering on the parent. +func (r Virtual_Guest_Block_Device_Template_Group) GetImageType() (resp datatypes.Virtual_Disk_Image_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getImageType", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual disk image type keyname (e.g. SYSTEM, DISK_CAPTURE, ISO, etc) of this template. Value will be populated on parent and child, but only supports object filtering on the parent. +func (r Virtual_Guest_Block_Device_Template_Group) GetImageTypeKeyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getImageTypeKeyName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest_Block_Device_Template_Group) GetObject() (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The image template group that another image template group was cloned from. +func (r Virtual_Guest_Block_Device_Template_Group) GetParent() (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getParent", nil, &r.Options, &resp) + return +} + +// This method gets all public customer owned image templates that the user is allowed to see. +func (r Virtual_Guest_Block_Device_Template_Group) GetPublicCustomerOwnedImages() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getPublicCustomerOwnedImages", nil, &r.Options, &resp) + return +} + +// This method gets all public image templates that the user is allowed to see. +func (r Virtual_Guest_Block_Device_Template_Group) GetPublicImages() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getPublicImages", nil, &r.Options, &resp) + return +} + +// Retrieve The ssh keys to be implemented on the server when provisioned or reloaded from an image template group. +func (r Virtual_Guest_Block_Device_Template_Group) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve A template group's status. +func (r Virtual_Guest_Block_Device_Template_Group) GetStatus() (resp datatypes.Virtual_Guest_Block_Device_Template_Group_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getStatus", nil, &r.Options, &resp) + return +} + +// Returns the image storage locations. +func (r Virtual_Guest_Block_Device_Template_Group) GetStorageLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getStorageLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The storage repository that an image template group resides on. +func (r Virtual_Guest_Block_Device_Template_Group) GetStorageRepository() (resp datatypes.Virtual_Storage_Repository, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getStorageRepository", nil, &r.Options, &resp) + return +} + +// This method indicates which boot modes are supported by the image. +func (r Virtual_Guest_Block_Device_Template_Group) GetSupportedBootModes() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getSupportedBootModes", nil, &r.Options, &resp) + return +} + +// Retrieve The tags associated with this image template group. +func (r Virtual_Guest_Block_Device_Template_Group) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve A transaction that is being performed on a image template group. +func (r Virtual_Guest_Block_Device_Template_Group) GetTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getTransaction", nil, &r.Options, &resp) + return +} + +// Returns an array of SoftLayer_Software_Description that are supported for VHD imports. +func (r Virtual_Guest_Block_Device_Template_Group) GetVhdImportSoftwareDescriptions() (resp []datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "getVhdImportSoftwareDescriptions", nil, &r.Options, &resp) + return +} + +// This method indicates whether or not this image is a customer supplied license image. +func (r Virtual_Guest_Block_Device_Template_Group) IsByol() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "isByol", nil, &r.Options, &resp) + return +} + +// This method indicates whether or not this image uses an operating system capable of using a customer supplied license image. +func (r Virtual_Guest_Block_Device_Template_Group) IsByolCapableOperatingSystem() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "isByolCapableOperatingSystem", nil, &r.Options, &resp) + return +} + +// This method indicates whether or not this image uses an operating system that requires using a customer supplied license image +func (r Virtual_Guest_Block_Device_Template_Group) IsByolOnlyOperatingSystem() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "isByolOnlyOperatingSystem", nil, &r.Options, &resp) + return +} + +// This method indicates whether or not this image is a cloud-init image. +func (r Virtual_Guest_Block_Device_Template_Group) IsCloudInit() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "isCloudInit", nil, &r.Options, &resp) + return +} + +// This method indicates whether or not this image uses an operating system that requires cloud init +func (r Virtual_Guest_Block_Device_Template_Group) IsCloudInitOnlyOperatingSystem() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "isCloudInitOnlyOperatingSystem", nil, &r.Options, &resp) + return +} + +// This method indicates whether or not encrypted attributes are set on the primary disk. +func (r Virtual_Guest_Block_Device_Template_Group) IsEncrypted() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "isEncrypted", nil, &r.Options, &resp) + return +} + +// This method will permit another SoftLayer customer account access to provision CloudLayer Computing Instances from an image template group. Template access should only be given to the parent template group object, not the child. +func (r Virtual_Guest_Block_Device_Template_Group) PermitSharingAccess(accountId *int) (resp bool, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "permitSharingAccess", params, &r.Options, &resp) + return +} + +// This method will create transaction(s) to remove available locations from an archive image template. +func (r Virtual_Guest_Block_Device_Template_Group) RemoveLocations(locations []datatypes.Location) (resp bool, err error) { + params := []interface{}{ + locations, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "removeLocations", params, &r.Options, &resp) + return +} + +// This method allows you to remove a supported boot mode attribute for a given image template. +func (r Virtual_Guest_Block_Device_Template_Group) RemoveSupportedBootMode(bootMode *string) (resp bool, err error) { + params := []interface{}{ + bootMode, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "removeSupportedBootMode", params, &r.Options, &resp) + return +} + +// Create transaction(s) to set the archived block device available locations +func (r Virtual_Guest_Block_Device_Template_Group) SetAvailableLocations(locations []datatypes.Location) (resp bool, err error) { + params := []interface{}{ + locations, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "setAvailableLocations", params, &r.Options, &resp) + return +} + +// This method allows you to specify the boot mode for a given image template. +func (r Virtual_Guest_Block_Device_Template_Group) SetBootMode(newBootMode *string) (resp bool, err error) { + params := []interface{}{ + newBootMode, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "setBootMode", params, &r.Options, &resp) + return +} + +// Set the tags for this template group. +func (r Virtual_Guest_Block_Device_Template_Group) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Block_Device_Template_Group", "setTags", params, &r.Options, &resp) + return +} + +// no documentation yet +type Virtual_Guest_Boot_Parameter struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualGuestBootParameterService returns an instance of the Virtual_Guest_Boot_Parameter SoftLayer service +func GetVirtualGuestBootParameterService(sess *session.Session) Virtual_Guest_Boot_Parameter { + return Virtual_Guest_Boot_Parameter{Session: sess} +} + +func (r Virtual_Guest_Boot_Parameter) Id(id int) Virtual_Guest_Boot_Parameter { + r.Options.Id = &id + return r +} + +func (r Virtual_Guest_Boot_Parameter) Mask(mask string) Virtual_Guest_Boot_Parameter { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Guest_Boot_Parameter) Filter(filter string) Virtual_Guest_Boot_Parameter { + r.Options.Filter = filter + return r +} + +func (r Virtual_Guest_Boot_Parameter) Limit(limit int) Virtual_Guest_Boot_Parameter { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Guest_Boot_Parameter) Offset(offset int) Virtual_Guest_Boot_Parameter { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Virtual_Guest_Boot_Parameter) CreateObject(templateObject *datatypes.Virtual_Guest_Boot_Parameter) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Boot_Parameter", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest_Boot_Parameter) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Boot_Parameter", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest_Boot_Parameter) EditObject(templateObject *datatypes.Virtual_Guest_Boot_Parameter) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Boot_Parameter", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest_Boot_Parameter) GetGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Boot_Parameter", "getGuest", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest_Boot_Parameter) GetGuestBootParameterType() (resp datatypes.Virtual_Guest_Boot_Parameter_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Boot_Parameter", "getGuestBootParameterType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest_Boot_Parameter) GetObject() (resp datatypes.Virtual_Guest_Boot_Parameter, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Boot_Parameter", "getObject", nil, &r.Options, &resp) + return +} + +// Describes a virtual guest boot parameter. In this the word class is used in the context of arguments sent to cloud computing instances such as single user mode and boot into bash. +type Virtual_Guest_Boot_Parameter_Type struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualGuestBootParameterTypeService returns an instance of the Virtual_Guest_Boot_Parameter_Type SoftLayer service +func GetVirtualGuestBootParameterTypeService(sess *session.Session) Virtual_Guest_Boot_Parameter_Type { + return Virtual_Guest_Boot_Parameter_Type{Session: sess} +} + +func (r Virtual_Guest_Boot_Parameter_Type) Id(id int) Virtual_Guest_Boot_Parameter_Type { + r.Options.Id = &id + return r +} + +func (r Virtual_Guest_Boot_Parameter_Type) Mask(mask string) Virtual_Guest_Boot_Parameter_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Guest_Boot_Parameter_Type) Filter(filter string) Virtual_Guest_Boot_Parameter_Type { + r.Options.Filter = filter + return r +} + +func (r Virtual_Guest_Boot_Parameter_Type) Limit(limit int) Virtual_Guest_Boot_Parameter_Type { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Guest_Boot_Parameter_Type) Offset(offset int) Virtual_Guest_Boot_Parameter_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Virtual_Guest_Boot_Parameter_Type) GetAllObjects() (resp []datatypes.Virtual_Guest_Boot_Parameter_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Boot_Parameter_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest_Boot_Parameter_Type) GetObject() (resp datatypes.Virtual_Guest_Boot_Parameter_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Boot_Parameter_Type", "getObject", nil, &r.Options, &resp) + return +} + +// The virtual guest network component data type presents the structure in which all computing instance network components are presented. Internally, the structure supports various virtualization platforms with no change to external interaction. +// +// A guest, also known as a virtual server, represents an allocation of resources on a virtual host. +type Virtual_Guest_Network_Component struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualGuestNetworkComponentService returns an instance of the Virtual_Guest_Network_Component SoftLayer service +func GetVirtualGuestNetworkComponentService(sess *session.Session) Virtual_Guest_Network_Component { + return Virtual_Guest_Network_Component{Session: sess} +} + +func (r Virtual_Guest_Network_Component) Id(id int) Virtual_Guest_Network_Component { + r.Options.Id = &id + return r +} + +func (r Virtual_Guest_Network_Component) Mask(mask string) Virtual_Guest_Network_Component { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Guest_Network_Component) Filter(filter string) Virtual_Guest_Network_Component { + r.Options.Filter = filter + return r +} + +func (r Virtual_Guest_Network_Component) Limit(limit int) Virtual_Guest_Network_Component { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Guest_Network_Component) Offset(offset int) Virtual_Guest_Network_Component { + r.Options.Offset = &offset + return r +} + +// Completely restrict all incoming and outgoing bandwidth traffic to a network component +func (r Virtual_Guest_Network_Component) Disable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "disable", nil, &r.Options, &resp) + return +} + +// Allow incoming and outgoing bandwidth traffic to a network component +func (r Virtual_Guest_Network_Component) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "enable", nil, &r.Options, &resp) + return +} + +// Retrieve The computing instance that this network component exists on. +func (r Virtual_Guest_Network_Component) GetGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getGuest", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest_Network_Component) GetHighAvailabilityFirewallFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getHighAvailabilityFirewallFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest_Network_Component) GetIcpBinding() (resp datatypes.Virtual_Guest_Network_Component_IcpBinding, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getIcpBinding", nil, &r.Options, &resp) + return +} + +// Retrieve The records of all IP addresses bound to a computing instance's network component. +func (r Virtual_Guest_Network_Component) GetIpAddressBindings() (resp []datatypes.Virtual_Guest_Network_Component_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getIpAddressBindings", nil, &r.Options, &resp) + return +} + +// Retrieve The upstream network component firewall. +func (r Virtual_Guest_Network_Component) GetNetworkComponentFirewall() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getNetworkComponentFirewall", nil, &r.Options, &resp) + return +} + +// Retrieve The VLAN that a computing instance network component's subnet is associated with. +func (r Virtual_Guest_Network_Component) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest_Network_Component) GetObject() (resp datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance network component's primary IP address. +func (r Virtual_Guest_Network_Component) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest_Network_Component) GetPrimaryIpAddressRecord() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getPrimaryIpAddressRecord", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's subnet for its primary IP address +func (r Virtual_Guest_Network_Component) GetPrimarySubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getPrimarySubnet", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's primary IPv6 IP address record. +func (r Virtual_Guest_Network_Component) GetPrimaryVersion6IpAddressRecord() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getPrimaryVersion6IpAddressRecord", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's routers. +func (r Virtual_Guest_Network_Component) GetRouter() (resp datatypes.Hardware_Router, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getRouter", nil, &r.Options, &resp) + return +} + +// Retrieve The bindings associating security groups to this network component +func (r Virtual_Guest_Network_Component) GetSecurityGroupBindings() (resp []datatypes.Virtual_Network_SecurityGroup_NetworkComponentBinding, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getSecurityGroupBindings", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's subnets. A subnet is a group of IP addresses +func (r Virtual_Guest_Network_Component) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "getSubnets", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Virtual_Guest_Network_Component) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "isPingable", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest_Network_Component) SecurityGroupsReady() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest_Network_Component", "securityGroupsReady", nil, &r.Options, &resp) + return +} + +// The virtual host represents the platform on which virtual guests reside. At times a virtual host has no allocations on the physical server, however with many modern platforms it is a virtual machine with small CPU and Memory allocations that runs in the Control Domain. +type Virtual_Host struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualHostService returns an instance of the Virtual_Host SoftLayer service +func GetVirtualHostService(sess *session.Session) Virtual_Host { + return Virtual_Host{Session: sess} +} + +func (r Virtual_Host) Id(id int) Virtual_Host { + r.Options.Id = &id + return r +} + +func (r Virtual_Host) Mask(mask string) Virtual_Host { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Host) Filter(filter string) Virtual_Host { + r.Options.Filter = filter + return r +} + +func (r Virtual_Host) Limit(limit int) Virtual_Host { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Host) Offset(offset int) Virtual_Host { + r.Options.Offset = &offset + return r +} + +// Retrieve The account which a virtual host belongs to. +func (r Virtual_Host) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Boolean flag indicating whether this virtualization platform gets billed per guest rather than at a fixed rate. +func (r Virtual_Host) GetBilledPerGuestFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getBilledPerGuestFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Boolean flag indicating whether this virtualization platform gets billed per memory usage rather than at a fixed rate. +func (r Virtual_Host) GetBilledPerMemoryUsageFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getBilledPerMemoryUsageFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The guests associated with a virtual host. +func (r Virtual_Host) GetGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware record which a virtual host resides on. +func (r Virtual_Host) GetHardware() (resp datatypes.Hardware_Server, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getHardware", nil, &r.Options, &resp) + return +} + +// Query a virtualization platform directly to retrieve details regarding a guest. +func (r Virtual_Host) GetLiveGuestByUuid(uuid *string) (resp datatypes.Virtual_Guest, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getLiveGuestByUuid", params, &r.Options, &resp) + return +} + +// Query a virtualization platform directly to retrieve a list of known guests. +func (r Virtual_Host) GetLiveGuestList() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getLiveGuestList", nil, &r.Options, &resp) + return +} + +// Query a virtualization platform directly to retrieve recent metric data for a guest. +func (r Virtual_Host) GetLiveGuestRecentMetricData(uuid *string, time *int, limit *int, interval *int) (resp []datatypes.Metric_Tracking_Object, err error) { + params := []interface{}{ + uuid, + time, + limit, + interval, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getLiveGuestRecentMetricData", params, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object for this virtual host. +func (r Virtual_Host) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Host) GetObject() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "getObject", nil, &r.Options, &resp) + return +} + +// Pause a virtual guest +func (r Virtual_Host) PauseLiveGuest(uuid *string) (resp bool, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "pauseLiveGuest", params, &r.Options, &resp) + return +} + +// Power cycle a virtual guest +func (r Virtual_Host) PowerCycleLiveGuest(uuid *string) (resp bool, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "powerCycleLiveGuest", params, &r.Options, &resp) + return +} + +// Power off a virtual guest +func (r Virtual_Host) PowerOffLiveGuest(uuid *string) (resp bool, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "powerOffLiveGuest", params, &r.Options, &resp) + return +} + +// Power on a virtual guest +func (r Virtual_Host) PowerOnLiveGuest(uuid *string) (resp bool, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "powerOnLiveGuest", params, &r.Options, &resp) + return +} + +// Attempt to complete a soft reboot of a guest by shutting down the operating system. +func (r Virtual_Host) RebootSoftLiveGuest(uuid *string) (resp bool, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "rebootSoftLiveGuest", params, &r.Options, &resp) + return +} + +// Resume a virtual guest +func (r Virtual_Host) ResumeLiveGuest(uuid *string) (resp bool, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Host", "resumeLiveGuest", params, &r.Options, &resp) + return +} + +// The SoftLayer_Virtual_Storage_Repository represents a web based storage system that can be accessed through many types of devices, interfaces, and other resources. +type Virtual_Storage_Repository struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualStorageRepositoryService returns an instance of the Virtual_Storage_Repository SoftLayer service +func GetVirtualStorageRepositoryService(sess *session.Session) Virtual_Storage_Repository { + return Virtual_Storage_Repository{Session: sess} +} + +func (r Virtual_Storage_Repository) Id(id int) Virtual_Storage_Repository { + r.Options.Id = &id + return r +} + +func (r Virtual_Storage_Repository) Mask(mask string) Virtual_Storage_Repository { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Storage_Repository) Filter(filter string) Virtual_Storage_Repository { + r.Options.Filter = filter + return r +} + +func (r Virtual_Storage_Repository) Limit(limit int) Virtual_Storage_Repository { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Storage_Repository) Offset(offset int) Virtual_Storage_Repository { + r.Options.Offset = &offset + return r +} + +// Retrieve The [[SoftLayer_Account|account]] that a storage repository belongs to. +func (r Virtual_Storage_Repository) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getAccount", nil, &r.Options, &resp) + return +} + +// Returns the archive storage disk usage fee rate per gigabyte. +func (r Virtual_Storage_Repository) GetArchiveDiskUsageRatePerGb() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getArchiveDiskUsageRatePerGb", nil, &r.Options, &resp) + return +} + +// Returns the average disk space usage for a storage repository. +func (r Virtual_Storage_Repository) GetAverageUsageMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getAverageUsageMetricDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a storage repository. +func (r Virtual_Storage_Repository) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter that a virtual storage repository resides in. +func (r Virtual_Storage_Repository) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Virtual_Disk_Image|disk images]] that are in a storage repository. Disk images are the virtual hard drives for a virtual guest. +func (r Virtual_Storage_Repository) GetDiskImages() (resp []datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getDiskImages", nil, &r.Options, &resp) + return +} + +// Retrieve The computing instances that have disk images in a storage repository. +func (r Virtual_Storage_Repository) GetGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getGuests", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Storage_Repository) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_Virtual_Storage_Repository, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Storage_Repository) GetObject() (resp datatypes.Virtual_Storage_Repository, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a public storage repository. +func (r Virtual_Storage_Repository) GetPublicImageBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getPublicImageBillingItem", nil, &r.Options, &resp) + return +} + +// Returns the public image storage disk usage fee rate per gigabyte. +func (r Virtual_Storage_Repository) GetPublicImageDiskUsageRatePerGb() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getPublicImageDiskUsageRatePerGb", nil, &r.Options, &resp) + return +} + +// Returns the public image storage locations. +func (r Virtual_Storage_Repository) GetStorageLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getStorageLocations", nil, &r.Options, &resp) + return +} + +// Retrieve A storage repository's [[SoftLayer_Virtual_Storage_Repository_Type|type]]. +func (r Virtual_Storage_Repository) GetType() (resp datatypes.Virtual_Storage_Repository_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve disk usage data on a [[SoftLayer_Virtual_Guest|Cloud Computing Instance]] image for the time range you provide. Each data entry objects contain ''dateTime'' and ''counter'' properties. ''dateTime'' property indicates the time that the disk usage data was measured and ''counter'' property holds the disk usage in bytes. +func (r Virtual_Storage_Repository) GetUsageMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getUsageMetricDataByDate", params, &r.Options, &resp) + return +} + +// Returns a disk usage image based on disk usage specified by the input parameters. +func (r Virtual_Storage_Repository) GetUsageMetricImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Storage_Repository", "getUsageMetricImageByDate", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/session/rest.go b/vendor/github.com/softlayer/softlayer-go/session/rest.go new file mode 100644 index 000000000000..9bfaaae3f002 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/session/rest.go @@ -0,0 +1,314 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package session + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/sl" +) + +type RestTransport struct{} + +// DoRequest - Implementation of the TransportHandler interface for handling +// calls to the REST endpoint. +func (r *RestTransport) DoRequest(sess *Session, service string, method string, args []interface{}, options *sl.Options, pResult interface{}) error { + restMethod := httpMethod(method, args) + + // Parse any method parameters and determine the HTTP method + var parameters []byte + if len(args) > 0 { + // parse the parameters + parameters, _ = json.Marshal( + map[string]interface{}{ + "parameters": args, + }) + } + + path := buildPath(service, method, options) + + resp, code, err := sendHTTPRequest( + sess, + path, + restMethod, + bytes.NewBuffer(parameters), + options) + + if err != nil { + //Preserve the original sl error + if _, ok := err.(sl.Error); ok { + return err + } + return sl.Error{Wrapped: err, StatusCode: code} + } + + err = findResponseError(code, resp) + if err != nil { + return err + } + + // Some APIs that normally return a collection, omit the []'s when the API returns a single value + returnType := reflect.TypeOf(pResult).String() + if strings.Index(returnType, "[]") == 1 && strings.Index(string(resp), "[") != 0 { + resp = []byte("[" + string(resp) + "]") + } + + // At this point, all that's left to do is parse the return value to the appropriate type, and return + // any parse errors (or nil if successful) + + err = nil + switch pResult.(type) { + case *[]uint8: + // exclude quotes + *pResult.(*[]uint8) = resp[1 : len(resp)-1] + case *datatypes.Void: + case *uint: + var val uint64 + val, err = strconv.ParseUint(string(resp), 0, 64) + if err == nil { + *pResult.(*uint) = uint(val) + } + case *bool: + *pResult.(*bool), err = strconv.ParseBool(string(resp)) + case *string: + str := string(resp) + strIdx := len(str) - 1 + if str == "null" { + str = "" + } else if str[0] == '"' && str[strIdx] == '"' { + rawStr := rawString{str} + err = json.Unmarshal([]byte(`{"val":`+str+`}`), &rawStr) + if err == nil { + str = rawStr.Val + } + } + *pResult.(*string) = str + default: + // Must be a json representation of one of the many softlayer datatypes + err = json.Unmarshal(resp, pResult) + } + + if err != nil { + err = sl.Error{Message: err.Error(), Wrapped: err} + } + + return err +} + +type rawString struct { + Val string +} + +func buildPath(service string, method string, options *sl.Options) string { + path := service + + if options.Id != nil { + path = path + "/" + strconv.Itoa(*options.Id) + } + + // omit the API method name if the method represents one of the basic REST methods + if method != "getObject" && method != "deleteObject" && method != "createObject" && + method != "editObject" && method != "editObjects" { + path = path + "/" + method + } + + return path + ".json" +} + +func encodeQuery(opts *sl.Options) string { + query := new(url.URL).Query() + + if opts.Mask != "" { + query.Add("objectMask", opts.Mask) + } + + if opts.Filter != "" { + query.Add("objectFilter", opts.Filter) + } + + // resultLimit=, + // If offset unspecified, default to 0 + if opts.Limit != nil { + startOffset := 0 + if opts.Offset != nil { + startOffset = *opts.Offset + } + + query.Add("resultLimit", fmt.Sprintf("%d,%d", startOffset, *opts.Limit)) + } + + return query.Encode() +} + +func sendHTTPRequest( + sess *Session, path string, requestType string, + requestBody *bytes.Buffer, options *sl.Options) ([]byte, int, error) { + + retries := sess.Retries + if retries < 2 { + return makeHTTPRequest(sess, path, requestType, requestBody, options) + } + + wait := sess.RetryWait + if wait == 0 { + wait = DefaultRetryWait + } + + return tryHTTPRequest(retries, wait, sess, path, requestType, requestBody, options) +} + +func tryHTTPRequest( + retries int, wait time.Duration, sess *Session, + path string, requestType string, requestBody *bytes.Buffer, + options *sl.Options) ([]byte, int, error) { + + resp, code, err := makeHTTPRequest(sess, path, requestType, requestBody, options) + if err != nil { + if !isRetryable(err) { + return resp, code, err + } + + if retries--; retries > 0 { + jitter := time.Duration(rand.Int63n(int64(wait))) + wait = wait + jitter/2 + time.Sleep(wait) + return tryHTTPRequest( + retries, wait, sess, path, requestType, requestBody, options) + } + } + + return resp, code, err +} + +func makeHTTPRequest( + session *Session, path string, requestType string, + requestBody *bytes.Buffer, options *sl.Options) ([]byte, int, error) { + log := Logger + + client := session.HTTPClient + if client == nil { + client = &http.Client{} + } + + client.Timeout = DefaultTimeout + if session.Timeout != 0 { + client.Timeout = session.Timeout + } + + var url string + if session.Endpoint == "" { + url = url + DefaultEndpoint + } else { + url = url + session.Endpoint + } + url = fmt.Sprintf("%s/%s", strings.TrimRight(url, "/"), path) + req, err := http.NewRequest(requestType, url, requestBody) + if err != nil { + return nil, 0, err + } + + if session.APIKey != "" { + req.SetBasicAuth(session.UserName, session.APIKey) + } else if session.AuthToken != "" { + req.SetBasicAuth(fmt.Sprintf("%d", session.UserId), session.AuthToken) + } + + // For cases where session is built from the raw structure and not using New() , the UserAgent would be empty + if session.userAgent == "" { + session.userAgent = getDefaultUserAgent() + } + + req.Header.Set("User-Agent", session.userAgent) + + if session.Headers != nil { + for key, value := range session.Headers { + req.Header.Set(key, value) + } + } + + req.URL.RawQuery = encodeQuery(options) + + if session.Debug { + log.Println("[DEBUG] Request URL: ", requestType, req.URL) + log.Println("[DEBUG] Parameters: ", requestBody.String()) + } + + resp, err := client.Do(req) + if err != nil { + statusCode := 520 + if resp != nil && resp.StatusCode != 0 { + statusCode = resp.StatusCode + } + + if isTimeout(err) { + statusCode = 599 + } + + return nil, statusCode, err + } + + defer resp.Body.Close() + + responseBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, resp.StatusCode, err + } + + if session.Debug { + log.Println("[DEBUG] Status Code: ", resp.StatusCode) + log.Println("[DEBUG] Response: ", string(responseBody)) + } + err = findResponseError(resp.StatusCode, responseBody) + return responseBody, resp.StatusCode, err +} + +func httpMethod(name string, args []interface{}) string { + if name == "deleteObject" { + return "DELETE" + } else if name == "editObject" || name == "editObjects" { + return "PUT" + } else if name == "createObject" || name == "createObjects" || len(args) > 0 { + return "POST" + } + + return "GET" +} + +func findResponseError(code int, resp []byte) error { + if code < 200 || code > 299 { + e := sl.Error{StatusCode: code} + err := json.Unmarshal(resp, &e) + // If unparseable, wrap the json error + if err != nil { + e.Wrapped = err + e.Message = err.Error() + } + return e + } + return nil +} diff --git a/vendor/github.com/softlayer/softlayer-go/session/session.go b/vendor/github.com/softlayer/softlayer-go/session/session.go new file mode 100644 index 000000000000..f82635744b36 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/session/session.go @@ -0,0 +1,359 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package session + +import ( + "fmt" + "log" + "math/rand" + "net" + "net/http" + "os" + "os/user" + "runtime" + "strings" + "time" + + "github.com/softlayer/softlayer-go/config" + "github.com/softlayer/softlayer-go/sl" +) + +// Logger is the logger used by the SoftLayer session package. Can be overridden by the user. +var Logger *log.Logger + +func init() { + // initialize the logger used by the session package. + Logger = log.New(os.Stderr, "", log.LstdFlags) +} + +// DefaultEndpoint is the default endpoint for API calls, when no override +// is provided. +const DefaultEndpoint = "https://api.softlayer.com/rest/v3" + +var retryableErrorCodes = []string{"SoftLayer_Exception_WebService_RateLimitExceeded"} + +// TransportHandler interface for the protocol-specific handling of API requests. +type TransportHandler interface { + // DoRequest is the protocol-specific handler for making API requests. + // + // sess is a reference to the current session object, where authentication and + // endpoint information can be found. + // + // service and method are the SoftLayer service name and method name, exactly as they + // are documented at http://sldn.softlayer.com/reference/softlayerapi (i.e., with the + // 'SoftLayer_' prefix and properly cased. + // + // args is a slice of arguments required for the service method being invoked. The + // types of each argument varies. See the method definition in the services package + // for the expected type of each argument. + // + // options is an sl.Options struct, containing any mask, filter, or result limit values + // to be applied. + // + // pResult is a pointer to a variable to be populated with the result of the API call. + // DoRequest should ensure that the native API response (i.e., XML or JSON) is correctly + // unmarshaled into the result structure. + // + // A sl.Error is returned, and can be (with a type assertion) inspected for details of + // the error (http code, API error message, etc.), or simply handled as a generic error, + // (in which case no type assertion would be necessary) + DoRequest( + sess *Session, + service string, + method string, + args []interface{}, + options *sl.Options, + pResult interface{}) error +} + +const ( + DefaultTimeout = time.Second * 120 + DefaultRetryWait = time.Second * 3 +) + +// Session stores the information required for communication with the SoftLayer +// API +type Session struct { + // UserName is the name of the SoftLayer API user + UserName string + + // ApiKey is the secret for making API calls + APIKey string + + // Endpoint is the SoftLayer API endpoint to communicate with + Endpoint string + + // UserId is the user id for token-based authentication + UserId int + + // AuthToken is the token secret for token-based authentication + AuthToken string + + // Debug controls logging of request details (URI, parameters, etc.) + Debug bool + + // The handler whose DoRequest() function will be called for each API request. + // Handles the request and any response parsing specific to the desired protocol + // (e.g., REST). Set automatically for a new Session, based on the + // provided Endpoint. + TransportHandler TransportHandler + + // HTTPClient This allows a custom user configured HTTP Client. + HTTPClient *http.Client + + // Custom Headers to be used on each request (Currently only for rest) + Headers map[string]string + + // Timeout specifies a time limit for http requests made by this + // session. Requests that take longer that the specified timeout + // will result in an error. + Timeout time.Duration + + // Retries is the number of times to retry a connection that failed due to a timeout. + Retries int + + // RetryWait minimum wait time to retry a request + RetryWait time.Duration + + // userAgent is the user agent to send with each API request + // User shouldn't be able to change or set the base user agent + userAgent string +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// New creates and returns a pointer to a new session object. It takes up to +// four parameters, all of which are optional. If specified, they will be +// interpreted in the following sequence: +// +// 1. UserName +// 2. Api Key +// 3. Endpoint +// 4. Timeout +// +// If one or more are omitted, New() will attempt to retrieve these values from +// the environment, and the ~/.softlayer config file, in that order. +func New(args ...interface{}) *Session { + keys := map[string]int{"username": 0, "api_key": 1, "endpoint_url": 2, "timeout": 3} + values := []string{"", "", "", ""} + + for i := 0; i < len(args); i++ { + values[i] = args[i].(string) + } + + // Default to the environment variables + + // Prioritize SL_USERNAME + envFallback("SL_USERNAME", &values[keys["username"]]) + envFallback("SOFTLAYER_USERNAME", &values[keys["username"]]) + + // Prioritize SL_API_KEY + envFallback("SL_API_KEY", &values[keys["api_key"]]) + envFallback("SOFTLAYER_API_KEY", &values[keys["api_key"]]) + + // Prioritize SL_ENDPOINT_URL + envFallback("SL_ENDPOINT_URL", &values[keys["endpoint_url"]]) + envFallback("SOFTLAYER_ENDPOINT_URL", &values[keys["endpoint_url"]]) + + envFallback("SL_TIMEOUT", &values[keys["timeout"]]) + envFallback("SOFTLAYER_TIMEOUT", &values[keys["timeout"]]) + + // Read ~/.softlayer for configuration + var homeDir string + u, err := user.Current() + if err != nil { + for _, name := range []string{"HOME", "USERPROFILE"} { // *nix, windows + if dir := os.Getenv(name); dir != "" { + homeDir = dir + break + } + } + } else { + homeDir = u.HomeDir + } + + if homeDir != "" { + configPath := fmt.Sprintf("%s/.softlayer", homeDir) + if _, err = os.Stat(configPath); !os.IsNotExist(err) { + // config file exists + file, err := config.LoadFile(configPath) + if err != nil { + log.Println(fmt.Sprintf("[WARN] session: Could not parse %s : %s", configPath, err)) + } else { + for k, v := range keys { + value, ok := file.Get("softlayer", k) + if ok && values[v] == "" { + values[v] = value + } + } + } + } + } else { + log.Println("[WARN] session: home dir could not be determined. Skipping read of ~/.softlayer.") + } + + endpointURL := values[keys["endpoint_url"]] + if endpointURL == "" { + endpointURL = DefaultEndpoint + } + + sess := &Session{ + UserName: values[keys["username"]], + APIKey: values[keys["api_key"]], + Endpoint: endpointURL, + userAgent: getDefaultUserAgent(), + } + + timeout := values[keys["timeout"]] + if timeout != "" { + timeoutDuration, err := time.ParseDuration(fmt.Sprintf("%ss", timeout)) + if err == nil { + sess.Timeout = timeoutDuration + } + } + + sess.RetryWait = DefaultRetryWait + + return sess +} + +// DoRequest hands off the processing to the assigned transport handler. It is +// normally called internally by the service objects, but is exported so that it can +// be invoked directly by client code in exceptional cases where direct control is +// needed over one of the parameters. +// +// For a description of parameters, see TransportHandler.DoRequest in this package +func (r *Session) DoRequest(service string, method string, args []interface{}, options *sl.Options, pResult interface{}) error { + if r.TransportHandler == nil { + r.TransportHandler = getDefaultTransport(r.Endpoint) + } + + return r.TransportHandler.DoRequest(r, service, method, args, options, pResult) +} + +// SetTimeout creates a copy of the session and sets the passed timeout into it +// before returning it. +func (r *Session) SetTimeout(timeout time.Duration) *Session { + var s Session + s = *r + s.Timeout = timeout + + return &s +} + +// SetRetries creates a copy of the session and sets the passed retries into it +// before returning it. +func (r *Session) SetRetries(retries int) *Session { + var s Session + s = *r + s.Retries = retries + + return &s +} + +// SetRetryWait creates a copy of the session and sets the passed retryWait into it +// before returning it. +func (r *Session) SetRetryWait(retryWait time.Duration) *Session { + var s Session + s = *r + s.RetryWait = retryWait + + return &s +} + +// AppendUserAgent allows higher level application to identify themselves by +// appending to the useragent string +func (r *Session) AppendUserAgent(agent string) { + if r.userAgent == "" { + r.userAgent = getDefaultUserAgent() + } + + if agent != "" { + r.userAgent += " " + agent + } +} + +// ResetUserAgent resets the current user agent to the default value +func (r *Session) ResetUserAgent() { + r.userAgent = getDefaultUserAgent() +} + +func envFallback(keyName string, value *string) { + if *value == "" { + *value = os.Getenv(keyName) + } +} + +func getDefaultTransport(endpointURL string) TransportHandler { + var transportHandler TransportHandler + + if strings.Contains(endpointURL, "/xmlrpc/") { + transportHandler = &XmlRpcTransport{} + } else { + transportHandler = &RestTransport{} + } + + return transportHandler +} + +func isTimeout(err error) bool { + if slErr, ok := err.(sl.Error); ok { + switch slErr.StatusCode { + case 408, 504, 599: + return true + } + } + + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + return true + } + + if netErr, ok := err.(*net.OpError); ok && netErr.Timeout() { + return true + } + + if netErr, ok := err.(net.UnknownNetworkError); ok && netErr.Timeout() { + return true + } + + return false +} + +func hasRetryableCode(err error) bool { + for _, code := range retryableErrorCodes { + if slErr, ok := err.(sl.Error); ok { + if slErr.Exception == code { + return true + } + } + } + return false +} + +func isRetryable(err error) bool { + return isTimeout(err) || hasRetryableCode(err) +} + +func getDefaultUserAgent() string { + return fmt.Sprintf("softlayer-go/%s (%s;%s;%s)", sl.Version.String(), + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + ) +} diff --git a/vendor/github.com/softlayer/softlayer-go/session/xmlrpc.go b/vendor/github.com/softlayer/softlayer-go/session/xmlrpc.go new file mode 100644 index 000000000000..a373f3e33c63 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/session/xmlrpc.go @@ -0,0 +1,253 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package session + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/http" + "net/http/httputil" + "strings" + "time" + + "github.com/renier/xmlrpc" + "github.com/softlayer/softlayer-go/sl" +) + +// Debugging RoundTripper +type debugRoundTripper struct{} + +func (mrt debugRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + log := Logger + log.Println("->>>Request:") + dumpedReq, _ := httputil.DumpRequestOut(request, true) + log.Println(string(dumpedReq)) + + response, err := http.DefaultTransport.RoundTrip(request) + if err != nil { + log.Println("Error:", err) + return response, err + } + + log.Println("\n\n<<<-Response:") + dumpedResp, _ := httputil.DumpResponse(response, true) + log.Println(string(dumpedResp)) + + return response, err +} + +// XML-RPC Transport +type XmlRpcTransport struct{} + +func (x *XmlRpcTransport) DoRequest( + sess *Session, + service string, + method string, + args []interface{}, + options *sl.Options, + pResult interface{}, +) error { + + var err error + serviceUrl := fmt.Sprintf("%s/%s", strings.TrimRight(sess.Endpoint, "/"), service) + + timeout := DefaultTimeout + if sess.Timeout != 0 { + timeout = sess.Timeout + } + + // Declaring client outside of the if /else. So we can set the correct http transport based if it is TLS or not + var client *xmlrpc.Client + if sess.HTTPClient != nil && sess.HTTPClient.Transport != nil { + client, err = xmlrpc.NewClient(serviceUrl, sess.HTTPClient.Transport, timeout) + } else { + var roundTripper http.RoundTripper + if sess.Debug { + roundTripper = debugRoundTripper{} + } + + client, err = xmlrpc.NewClient(serviceUrl, roundTripper, timeout) + } + //Verify no errors happened in creating the xmlrpc client + if err != nil { + return fmt.Errorf("Could not create an xmlrpc client for %s: %s", service, err) + } + + authenticate := map[string]interface{}{} + if sess.UserName != "" { + authenticate["username"] = sess.UserName + } + + if sess.APIKey != "" { + authenticate["apiKey"] = sess.APIKey + } + + if sess.UserId != 0 { + authenticate["userId"] = sess.UserId + authenticate["complexType"] = "PortalLoginToken" + } + + if sess.AuthToken != "" { + authenticate["authToken"] = sess.AuthToken + authenticate["complexType"] = "PortalLoginToken" + } + + // For cases where session is built from the raw structure and not using New() , the UserAgent would be empty + if sess.userAgent == "" { + sess.userAgent = getDefaultUserAgent() + } + + headers := map[string]interface{}{} + headers["User-Agent"] = sess.userAgent + + if len(authenticate) > 0 { + headers["authenticate"] = authenticate + } + + if options.Id != nil { + headers[fmt.Sprintf("%sInitParameters", service)] = map[string]int{ + "id": *options.Id, + } + } + + mask := options.Mask + if mask != "" { + if !strings.HasPrefix(mask, "mask[") && !strings.Contains(mask, ";") && strings.Contains(mask, ",") { + mask = fmt.Sprintf("mask[%s]", mask) + headers["SoftLayer_ObjectMask"] = map[string]string{"mask": mask} + } else { + headers[fmt.Sprintf("%sObjectMask", service)] = + map[string]interface{}{"mask": genXMLMask(mask)} + } + } + + if options.Filter != "" { + // FIXME: This json unmarshaling presents a performance problem, + // since the filter builder marshals a data structure to json. + // This then undoes that step to pass it to the xmlrpc request. + // It would be better to get the umarshaled data structure + // from the filter builder, but that will require changes to the + // public API in Options. + objFilter := map[string]interface{}{} + err := json.Unmarshal([]byte(options.Filter), &objFilter) + if err != nil { + return fmt.Errorf("Error encoding object filter: %s", err) + } + headers[fmt.Sprintf("%sObjectFilter", service)] = objFilter + } + + if options.Limit != nil { + offset := 0 + if options.Offset != nil { + offset = *options.Offset + } + + headers["resultLimit"] = map[string]int{ + "limit": *options.Limit, + "offset": offset, + } + } + + // Add incoming arguments to xmlrpc parameter array + params := []interface{}{} + + if len(headers) > 0 { + params = append(params, map[string]interface{}{"headers": headers}) + } + + for _, arg := range args { + params = append(params, arg) + } + + retries := sess.Retries + if retries < 2 { + err = client.Call(method, params, pResult) + } else { + wait := sess.RetryWait + if wait == 0 { + wait = DefaultRetryWait + } + + err = makeXmlRequest(retries, wait, client, method, params, pResult) + } + + if xmlRpcError, ok := err.(*xmlrpc.XmlRpcError); ok { + err = sl.Error{ + StatusCode: xmlRpcError.HttpStatusCode, + Exception: xmlRpcError.Code.(string), + Message: xmlRpcError.Err, + } + } + return err +} + +func makeXmlRequest( + retries int, wait time.Duration, client *xmlrpc.Client, + method string, params []interface{}, pResult interface{}) error { + + err := client.Call(method, params, pResult) + + if xmlRpcError, ok := err.(*xmlrpc.XmlRpcError); ok { + err = sl.Error{ + StatusCode: xmlRpcError.HttpStatusCode, + Exception: xmlRpcError.Code.(string), + Message: xmlRpcError.Err, + } + } + + if err != nil { + if !isRetryable(err) { + return err + } + + if retries--; retries > 0 { + jitter := time.Duration(rand.Int63n(int64(wait))) + wait = wait + jitter/2 + time.Sleep(wait) + return makeXmlRequest( + retries, wait, client, method, params, pResult) + } + } + + return err +} + +func genXMLMask(mask string) interface{} { + objectMask := map[string]interface{}{} + for _, item := range strings.Split(mask, ";") { + if !strings.Contains(item, ".") { + objectMask[item] = []string{} + continue + } + + level := objectMask + names := strings.Split(item, ".") + totalNames := len(names) + for i, name := range names { + if i == totalNames-1 { + level[name] = []string{} + continue + } + + level[name] = map[string]interface{}{} + level = level[name].(map[string]interface{}) + } + } + + return objectMask +} diff --git a/vendor/github.com/softlayer/softlayer-go/sl/errors.go b/vendor/github.com/softlayer/softlayer-go/sl/errors.go new file mode 100644 index 000000000000..fe638ab62f66 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/sl/errors.go @@ -0,0 +1,49 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sl + +import "fmt" + +// Error contains detailed information about an API error, which can be useful +// for debugging, or when finer error handling is required than just the mere +// presence or absence of an error. +// +// Error implements the error interface +type Error struct { + StatusCode int + Exception string `json:"code"` + Message string `json:"error"` + Wrapped error +} + +func (r Error) Error() string { + if r.Wrapped != nil { + return r.Wrapped.Error() + } + + var msg string + if r.Exception != "" { + msg = r.Exception + ": " + } + if r.Message != "" { + msg = msg + r.Message + " " + } + if r.StatusCode != 0 { + msg = fmt.Sprintf("%s(HTTP %d)", msg, r.StatusCode) + } + return msg +} diff --git a/vendor/github.com/softlayer/softlayer-go/sl/helpers.go b/vendor/github.com/softlayer/softlayer-go/sl/helpers.go new file mode 100644 index 000000000000..6a3090672cbd --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/sl/helpers.go @@ -0,0 +1,175 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package sl has convenience functions for returning pointers to values +package sl + +import ( + "reflect" + "strings" + "time" + + "github.com/softlayer/softlayer-go/datatypes" +) + +// Int returns a pointer to the int value provided +func Int(v int) *int { + return &v +} + +// Uint returns a pointer to the uint value provided +func Uint(v uint) *uint { + return &v +} + +// String returns a pointer to the string value provided +func String(v string) *string { + return &v +} + +// Bool returns a pointer to the bool value provided +func Bool(v bool) *bool { + return &v +} + +// Time converts the time.Time value provided to a datatypes.Time value, +// and returns a pointer to it +func Time(v time.Time) *datatypes.Time { + r := datatypes.Time{Time: v} + return &r +} + +// Float converts the float value provided to a datatypes.Float64 value, +// and returns a pointer to it +func Float(v float64) *datatypes.Float64 { + r := datatypes.Float64(v) + return &r +} + +// Convenience functions to simplify dereference of datatype properties + +// Get returns the value of p, either p itself, or, if p is a pointer, the +// value that p points to. d is an optional default value to be returned +// in the event that p is nil. If d is not specified, and p is nil, a +// type-appropriate zero-value is returned instead. +func Get(p interface{}, d ...interface{}) interface{} { + var ( + val interface{} + ok bool + ) + + if val, ok = GetOk(p); ok { + return val + } + + if len(d) > 0 { + return d[0] + } + + return val +} + +// GetOk returns the value of p, either p itself, or, if p is a pointer, the +// value that p points to. If p is nil, a type-appropriate zero-value is +// returned instead. If p is a value or non-nil pointer, the second return +// value will be true. Otherwise, it will be false. +func GetOk(p interface{}) (interface{}, bool) { + t := reflect.TypeOf(p) + + // if p is a non-pointer, just return it + if t.Kind() != reflect.Ptr { + return p, true + } + + // p is a pointer. If non-nil, return the value pointed to + v := reflect.Indirect(reflect.ValueOf(p)) + if v.IsValid() { + return v.Interface(), true + } + + // p is a nil pointer. Return the zero value for the pointed-to type + return reflect.Zero(t.Elem()).Interface(), false +} + +// Grab returns the value specified by the path given, +// starting from the struct s. +// If at any point in the path the lookup falls short +// (i.e. a field is not found), or if the last field in the path is nil +// itself, a type-appropriate zero-value is returned. +// This behavior can be overidden by providing a default value. +// +// This is useful for getting values our of deeply nested structures +// Example: val := sl.Grab(virtualGuest, "Datacenter.Name") +func Grab(s interface{}, path string, d ...interface{}) interface{} { + var ( + val interface{} + ok bool + ) + + if val, ok = GrabOk(s, path); ok { + return val + } + + if len(d) > 0 { + return d[0] + } + + return val +} + +// GrabOk returns the value specified by the path given, +// starting from the struct s. +// If at any point in the path the lookup falls short +// (i.e. a field is not found), or if the last field in the path is nil +// itself, a type-appropriate zero-value is returned. +// It returns a second value, a boolean, which will be false if it failed +// to lookup the value, including if the last field in the path was nil. +// +// This is useful for getting values our of deeply nested structures +// Example: val, ok := sl.GrabOk(virtualGuest, "Datacenter.Name") +func GrabOk(s interface{}, path string) (interface{}, bool) { + t := reflect.TypeOf(s) + if t.Kind() != reflect.Struct { + return nil, false + } + + dotIndex := strings.Index(path, ".") + if dotIndex == -1 { + dotIndex = len(path) + } + + fieldName := path[0:dotIndex] + val := reflect.ValueOf(s) + fieldVal := val.FieldByName(fieldName) + if fieldVal.Kind() == reflect.Ptr { + if fieldVal.IsNil() { + return reflect.Zero(fieldVal.Type().Elem()).Interface(), false + } + + fieldVal = reflect.Indirect(fieldVal) + } + + result, ok := GetOk(fieldVal.Interface()) + if !ok { + return result, ok + } + + if dotIndex == len(path) { + return result, ok + } + + return GrabOk(result, path[dotIndex+1:len(path)]) +} diff --git a/vendor/github.com/softlayer/softlayer-go/sl/options.go b/vendor/github.com/softlayer/softlayer-go/sl/options.go new file mode 100644 index 000000000000..5d1dd0aaf6c3 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/sl/options.go @@ -0,0 +1,27 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sl + +// Options contains the individual query parameters that can be applied to +// a request. +type Options struct { + Id *int + Mask string + Filter string + Limit *int + Offset *int +} diff --git a/vendor/github.com/softlayer/softlayer-go/sl/version.go b/vendor/github.com/softlayer/softlayer-go/sl/version.go new file mode 100644 index 000000000000..8f36a8d09741 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/sl/version.go @@ -0,0 +1,47 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package sl + +import "fmt" + +type VersionInfo struct { + Major int + Minor int + Patch int + Pre string +} + +var Version = VersionInfo{ + Major: 0, + Minor: 1, + Patch: 0, + Pre: "alpha", +} + +func (v VersionInfo) String() string { + result := fmt.Sprintf("v%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.Pre != "" { + result += fmt.Sprintf("-%s", v.Pre) + } + + return result +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/LICENSE b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/LICENSE new file mode 100644 index 000000000000..efc75a2253ea --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2017-2018 Tencent Ltd. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/client.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/client.go new file mode 100644 index 000000000000..fc167bb40896 --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/client.go @@ -0,0 +1,270 @@ +package common + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "log" + "net/http" + "net/http/httputil" + "strconv" + "strings" + "time" + + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" + tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" +) + +type Client struct { + region string + httpClient *http.Client + httpProfile *profile.HttpProfile + profile *profile.ClientProfile + credential *Credential + signMethod string + unsignedPayload bool + debug bool +} + +func (c *Client) Send(request tchttp.Request, response tchttp.Response) (err error) { + if request.GetDomain() == "" { + domain := c.httpProfile.Endpoint + if domain == "" { + domain = tchttp.GetServiceDomain(request.GetService()) + } + request.SetDomain(domain) + } + + if request.GetHttpMethod() == "" { + request.SetHttpMethod(c.httpProfile.ReqMethod) + } + + tchttp.CompleteCommonParams(request, c.GetRegion()) + + if c.signMethod == "HmacSHA1" || c.signMethod == "HmacSHA256" { + return c.sendWithSignatureV1(request, response) + } else { + return c.sendWithSignatureV3(request, response) + } +} + +func (c *Client) sendWithSignatureV1(request tchttp.Request, response tchttp.Response) (err error) { + // TODO: not an elegant way, it should be done in common params, but finally it need to refactor + request.GetParams()["Language"] = c.profile.Language + err = tchttp.ConstructParams(request) + if err != nil { + return err + } + err = signRequest(request, c.credential, c.signMethod) + if err != nil { + return err + } + httpRequest, err := http.NewRequest(request.GetHttpMethod(), request.GetUrl(), request.GetBodyReader()) + if err != nil { + return err + } + if request.GetHttpMethod() == "POST" { + httpRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded") + } + if c.debug { + outbytes, err := httputil.DumpRequest(httpRequest, true) + if err != nil { + log.Printf("[ERROR] dump request failed because %s", err) + return err + } + log.Printf("[DEBUG] http request = %s", outbytes) + } + httpResponse, err := c.httpClient.Do(httpRequest) + if err != nil { + msg := fmt.Sprintf("Fail to get response because %s", err) + return errors.NewTencentCloudSDKError("ClientError.NetworkError", msg, "") + } + err = tchttp.ParseFromHttpResponse(httpResponse, response) + return err +} + +func (c *Client) sendWithSignatureV3(request tchttp.Request, response tchttp.Response) (err error) { + headers := map[string]string{ + "Host": request.GetDomain(), + "X-TC-Action": request.GetAction(), + "X-TC-Version": request.GetVersion(), + "X-TC-Timestamp": request.GetParams()["Timestamp"], + "X-TC-RequestClient": request.GetParams()["RequestClient"], + "X-TC-Language": c.profile.Language, + } + if c.region != "" { + headers["X-TC-Region"] = c.region + } + if c.credential.Token != "" { + headers["X-TC-Token"] = c.credential.Token + } + if request.GetHttpMethod() == "GET" { + headers["Content-Type"] = "application/x-www-form-urlencoded" + } else { + headers["Content-Type"] = "application/json" + } + + // start signature v3 process + + // build canonical request string + httpRequestMethod := request.GetHttpMethod() + canonicalURI := "/" + canonicalQueryString := "" + if httpRequestMethod == "GET" { + err = tchttp.ConstructParams(request) + if err != nil { + return err + } + params := make(map[string]string) + for key, value := range request.GetParams() { + params[key] = value + } + delete(params, "Action") + delete(params, "Version") + delete(params, "Nonce") + delete(params, "Region") + delete(params, "RequestClient") + delete(params, "Timestamp") + canonicalQueryString = tchttp.GetUrlQueriesEncoded(params) + } + canonicalHeaders := fmt.Sprintf("content-type:%s\nhost:%s\n", headers["Content-Type"], headers["Host"]) + signedHeaders := "content-type;host" + requestPayload := "" + if httpRequestMethod == "POST" { + b, err := json.Marshal(request) + if err != nil { + return err + } + requestPayload = string(b) + } + hashedRequestPayload := "" + if c.unsignedPayload { + hashedRequestPayload = sha256hex("UNSIGNED-PAYLOAD") + headers["X-TC-Content-SHA256"] = "UNSIGNED-PAYLOAD" + } else { + hashedRequestPayload = sha256hex(requestPayload) + } + canonicalRequest := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", + httpRequestMethod, + canonicalURI, + canonicalQueryString, + canonicalHeaders, + signedHeaders, + hashedRequestPayload) + //log.Println("canonicalRequest:", canonicalRequest) + + // build string to sign + algorithm := "TC3-HMAC-SHA256" + requestTimestamp := headers["X-TC-Timestamp"] + timestamp, _ := strconv.ParseInt(requestTimestamp, 10, 64) + t := time.Unix(timestamp, 0).UTC() + // must be the format 2006-01-02, ref to package time for more info + date := t.Format("2006-01-02") + credentialScope := fmt.Sprintf("%s/%s/tc3_request", date, request.GetService()) + hashedCanonicalRequest := sha256hex(canonicalRequest) + string2sign := fmt.Sprintf("%s\n%s\n%s\n%s", + algorithm, + requestTimestamp, + credentialScope, + hashedCanonicalRequest) + //log.Println("string2sign", string2sign) + + // sign string + secretDate := hmacsha256(date, "TC3"+c.credential.SecretKey) + secretService := hmacsha256(request.GetService(), secretDate) + secretKey := hmacsha256("tc3_request", secretService) + signature := hex.EncodeToString([]byte(hmacsha256(string2sign, secretKey))) + //log.Println("signature", signature) + + // build authorization + authorization := fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", + algorithm, + c.credential.SecretId, + credentialScope, + signedHeaders, + signature) + //log.Println("authorization", authorization) + + headers["Authorization"] = authorization + url := "https://" + request.GetDomain() + request.GetPath() + if canonicalQueryString != "" { + url = url + "?" + canonicalQueryString + } + httpRequest, err := http.NewRequest(httpRequestMethod, url, strings.NewReader(requestPayload)) + if err != nil { + return err + } + for k, v := range headers { + httpRequest.Header[k] = []string{v} + } + if c.debug { + outbytes, err := httputil.DumpRequest(httpRequest, true) + if err != nil { + log.Printf("[ERROR] dump request failed because %s", err) + return err + } + log.Printf("[DEBUG] http request = %s", outbytes) + } + httpResponse, err := c.httpClient.Do(httpRequest) + if err != nil { + msg := fmt.Sprintf("Fail to get response because %s", err) + return errors.NewTencentCloudSDKError("ClientError.NetworkError", msg, "") + } + err = tchttp.ParseFromHttpResponse(httpResponse, response) + return err +} + +func (c *Client) GetRegion() string { + return c.region +} + +func (c *Client) Init(region string) *Client { + c.httpClient = &http.Client{} + c.region = region + c.signMethod = "TC3-HMAC-SHA256" + c.debug = false + log.SetFlags(log.LstdFlags | log.Lshortfile) + return c +} + +func (c *Client) WithSecretId(secretId, secretKey string) *Client { + c.credential = NewCredential(secretId, secretKey) + return c +} + +func (c *Client) WithCredential(cred *Credential) *Client { + c.credential = cred + return c +} + +func (c *Client) WithProfile(clientProfile *profile.ClientProfile) *Client { + c.profile = clientProfile + c.signMethod = clientProfile.SignMethod + c.unsignedPayload = clientProfile.UnsignedPayload + c.httpProfile = clientProfile.HttpProfile + c.debug = clientProfile.Debug + c.httpClient.Timeout = time.Duration(c.httpProfile.ReqTimeout) * time.Second + return c +} + +func (c *Client) WithSignatureMethod(method string) *Client { + c.signMethod = method + return c +} + +func (c *Client) WithHttpTransport(transport http.RoundTripper) *Client { + c.httpClient.Transport = transport + return c +} + +func (c *Client) WithDebug(flag bool) *Client { + c.debug = flag + return c +} + +func NewClientWithSecretId(secretId, secretKey, region string) (client *Client, err error) { + client = &Client{} + client.Init(region).WithSecretId(secretId, secretKey) + return +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/credentials.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/credentials.go new file mode 100644 index 000000000000..b734c137346e --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/credentials.go @@ -0,0 +1,58 @@ +package common + +type Credential struct { + SecretId string + SecretKey string + Token string +} + +func NewCredential(secretId, secretKey string) *Credential { + return &Credential{ + SecretId: secretId, + SecretKey: secretKey, + } +} + +func NewTokenCredential(secretId, secretKey, token string) *Credential { + return &Credential{ + SecretId: secretId, + SecretKey: secretKey, + Token: token, + } +} + +func (c *Credential) GetCredentialParams() map[string]string { + p := map[string]string{ + "SecretId": c.SecretId, + } + if c.Token != "" { + p["Token"] = c.Token + } + return p +} + +// Nowhere use them and we haven't well designed these structures and +// underlying method, which leads to the situation that it is hard to +// refactor it to interfaces. +// Hence they are removed and merged into Credential. + +//type TokenCredential struct { +// SecretId string +// SecretKey string +// Token string +//} + +//func NewTokenCredential(secretId, secretKey, token string) *TokenCredential { +// return &TokenCredential{ +// SecretId: secretId, +// SecretKey: secretKey, +// Token: token, +// } +//} + +//func (c *TokenCredential) GetCredentialParams() map[string]string { +// return map[string]string{ +// "SecretId": c.SecretId, +// "Token": c.Token, +// } +//} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors/errors.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors/errors.go new file mode 100644 index 000000000000..27589e59a162 --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors/errors.go @@ -0,0 +1,35 @@ +package errors + +import ( + "fmt" +) + +type TencentCloudSDKError struct { + Code string + Message string + RequestId string +} + +func (e *TencentCloudSDKError) Error() string { + return fmt.Sprintf("[TencentCloudSDKError] Code=%s, Message=%s, RequestId=%s", e.Code, e.Message, e.RequestId) +} + +func NewTencentCloudSDKError(code, message, requestId string) error { + return &TencentCloudSDKError{ + Code: code, + Message: message, + RequestId: requestId, + } +} + +func (e *TencentCloudSDKError) GetCode() string { + return e.Code +} + +func (e *TencentCloudSDKError) GetMessage() string { + return e.Message +} + +func (e *TencentCloudSDKError) GetRequestId() string { + return e.RequestId +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/request.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/request.go new file mode 100644 index 000000000000..4e812b40f994 --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/request.go @@ -0,0 +1,233 @@ +package common + +import ( + "io" + //"log" + "math/rand" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +const ( + POST = "POST" + GET = "GET" + + RootDomain = "tencentcloudapi.com" + Path = "/" +) + +type Request interface { + GetAction() string + GetBodyReader() io.Reader + GetDomain() string + GetHttpMethod() string + GetParams() map[string]string + GetPath() string + GetService() string + GetUrl() string + GetVersion() string + SetDomain(string) + SetHttpMethod(string) +} + +type BaseRequest struct { + httpMethod string + domain string + path string + params map[string]string + formParams map[string]string + + service string + version string + action string +} + +func (r *BaseRequest) GetAction() string { + return r.action +} + +func (r *BaseRequest) GetHttpMethod() string { + return r.httpMethod +} + +func (r *BaseRequest) GetParams() map[string]string { + return r.params +} + +func (r *BaseRequest) GetPath() string { + return r.path +} + +func (r *BaseRequest) GetDomain() string { + return r.domain +} + +func (r *BaseRequest) SetDomain(domain string) { + r.domain = domain +} + +func (r *BaseRequest) SetHttpMethod(method string) { + switch strings.ToUpper(method) { + case POST: + { + r.httpMethod = POST + } + case GET: + { + r.httpMethod = GET + } + default: + { + r.httpMethod = GET + } + } +} + +func (r *BaseRequest) GetService() string { + return r.service +} + +func (r *BaseRequest) GetUrl() string { + if r.httpMethod == GET { + return "https://" + r.domain + r.path + "?" + GetUrlQueriesEncoded(r.params) + } else if r.httpMethod == POST { + return "https://" + r.domain + r.path + } else { + return "" + } +} + +func (r *BaseRequest) GetVersion() string { + return r.version +} + +func GetUrlQueriesEncoded(params map[string]string) string { + values := url.Values{} + for key, value := range params { + if value != "" { + values.Add(key, value) + } + } + return values.Encode() +} + +func (r *BaseRequest) GetBodyReader() io.Reader { + if r.httpMethod == POST { + s := GetUrlQueriesEncoded(r.params) + return strings.NewReader(s) + } else { + return strings.NewReader("") + } +} + +func (r *BaseRequest) Init() *BaseRequest { + r.domain = "" + r.path = Path + r.params = make(map[string]string) + r.formParams = make(map[string]string) + return r +} + +func (r *BaseRequest) WithApiInfo(service, version, action string) *BaseRequest { + r.service = service + r.version = version + r.action = action + return r +} + +func GetServiceDomain(service string) (domain string) { + domain = service + "." + RootDomain + return +} + +func CompleteCommonParams(request Request, region string) { + params := request.GetParams() + params["Region"] = region + if request.GetVersion() != "" { + params["Version"] = request.GetVersion() + } + params["Action"] = request.GetAction() + params["Timestamp"] = strconv.FormatInt(time.Now().Unix(), 10) + params["Nonce"] = strconv.Itoa(rand.Int()) + params["RequestClient"] = "SDK_GO_3.0.171" +} + +func ConstructParams(req Request) (err error) { + value := reflect.ValueOf(req).Elem() + err = flatStructure(value, req, "") + //log.Printf("[DEBUG] params=%s", req.GetParams()) + return +} + +func flatStructure(value reflect.Value, request Request, prefix string) (err error) { + //log.Printf("[DEBUG] reflect value: %v", value.Type()) + valueType := value.Type() + for i := 0; i < valueType.NumField(); i++ { + tag := valueType.Field(i).Tag + nameTag, hasNameTag := tag.Lookup("name") + if !hasNameTag { + continue + } + field := value.Field(i) + kind := field.Kind() + if kind == reflect.Ptr && field.IsNil() { + continue + } + if kind == reflect.Ptr { + field = field.Elem() + kind = field.Kind() + } + key := prefix + nameTag + if kind == reflect.String { + s := field.String() + if s != "" { + request.GetParams()[key] = s + } + } else if kind == reflect.Bool { + request.GetParams()[key] = strconv.FormatBool(field.Bool()) + } else if kind == reflect.Int || kind == reflect.Int64 { + request.GetParams()[key] = strconv.FormatInt(field.Int(), 10) + } else if kind == reflect.Uint || kind == reflect.Uint64 { + request.GetParams()[key] = strconv.FormatUint(field.Uint(), 10) + } else if kind == reflect.Float64 { + request.GetParams()[key] = strconv.FormatFloat(field.Float(), 'f', -1, 64) + } else if kind == reflect.Slice { + list := value.Field(i) + for j := 0; j < list.Len(); j++ { + vj := list.Index(j) + key := prefix + nameTag + "." + strconv.Itoa(j) + kind = vj.Kind() + if kind == reflect.Ptr && vj.IsNil() { + continue + } + if kind == reflect.Ptr { + vj = vj.Elem() + kind = vj.Kind() + } + if kind == reflect.String { + request.GetParams()[key] = vj.String() + } else if kind == reflect.Bool { + request.GetParams()[key] = strconv.FormatBool(vj.Bool()) + } else if kind == reflect.Int || kind == reflect.Int64 { + request.GetParams()[key] = strconv.FormatInt(vj.Int(), 10) + } else if kind == reflect.Uint || kind == reflect.Uint64 { + request.GetParams()[key] = strconv.FormatUint(vj.Uint(), 10) + } else if kind == reflect.Float64 { + request.GetParams()[key] = strconv.FormatFloat(vj.Float(), 'f', -1, 64) + } else { + if err = flatStructure(vj, request, key+"."); err != nil { + return + } + } + } + } else { + if err = flatStructure(reflect.ValueOf(field.Interface()), request, prefix+nameTag+"."); err != nil { + return + } + } + } + return +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/response.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/response.go new file mode 100644 index 000000000000..fd18e2064f73 --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/response.go @@ -0,0 +1,81 @@ +package common + +import ( + "encoding/json" + "fmt" + "io/ioutil" + //"log" + "net/http" + + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors" +) + +type Response interface { + ParseErrorFromHTTPResponse(body []byte) error +} + +type BaseResponse struct { +} + +type ErrorResponse struct { + Response struct { + Error struct { + Code string `json:"Code"` + Message string `json:"Message"` + } `json:"Error,omitempty"` + RequestId string `json:"RequestId"` + } `json:"Response"` +} + +type DeprecatedAPIErrorResponse struct { + Code int `json:"code"` + Message string `json:"message"` + CodeDesc string `json:"codeDesc"` +} + +func (r *BaseResponse) ParseErrorFromHTTPResponse(body []byte) (err error) { + resp := &ErrorResponse{} + err = json.Unmarshal(body, resp) + if err != nil { + msg := fmt.Sprintf("Fail to parse json content: %s, because: %s", body, err) + return errors.NewTencentCloudSDKError("ClientError.ParseJsonError", msg, "") + } + if resp.Response.Error.Code != "" { + return errors.NewTencentCloudSDKError(resp.Response.Error.Code, resp.Response.Error.Message, resp.Response.RequestId) + } + + deprecated := &DeprecatedAPIErrorResponse{} + err = json.Unmarshal(body, deprecated) + if err != nil { + msg := fmt.Sprintf("Fail to parse json content: %s, because: %s", body, err) + return errors.NewTencentCloudSDKError("ClientError.ParseJsonError", msg, "") + } + if deprecated.Code != 0 { + return errors.NewTencentCloudSDKError(deprecated.CodeDesc, deprecated.Message, "") + } + return nil +} + +func ParseFromHttpResponse(hr *http.Response, response Response) (err error) { + defer hr.Body.Close() + body, err := ioutil.ReadAll(hr.Body) + if err != nil { + msg := fmt.Sprintf("Fail to read response body because %s", err) + return errors.NewTencentCloudSDKError("ClientError.IOError", msg, "") + } + if hr.StatusCode != 200 { + msg := fmt.Sprintf("Request fail with http status code: %s, with body: %s", hr.Status, body) + return errors.NewTencentCloudSDKError("ClientError.HttpStatusCodeError", msg, "") + } + //log.Printf("[DEBUG] Response Body=%s", body) + err = response.ParseErrorFromHTTPResponse(body) + if err != nil { + return + } + err = json.Unmarshal(body, &response) + if err != nil { + msg := fmt.Sprintf("Fail to parse json content: %s, because: %s", body, err) + return errors.NewTencentCloudSDKError("ClientError.ParseJsonError", msg, "") + } + return +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile/client_profile.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile/client_profile.go new file mode 100644 index 000000000000..bc876801c9d0 --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile/client_profile.go @@ -0,0 +1,23 @@ +package profile + +type ClientProfile struct { + HttpProfile *HttpProfile + // Valid choices: HmacSHA1, HmacSHA256, TC3-HMAC-SHA256. + // Default value is TC3-HMAC-SHA256. + SignMethod string + UnsignedPayload bool + // Valid choices: zh-CN, en-US. + // Default value is zh-CN. + Language string + Debug bool +} + +func NewClientProfile() *ClientProfile { + return &ClientProfile{ + HttpProfile: NewHttpProfile(), + SignMethod: "TC3-HMAC-SHA256", + UnsignedPayload: false, + Language: "zh-CN", + Debug: false, + } +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile/http_profile.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile/http_profile.go new file mode 100644 index 000000000000..cf633a8a0454 --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile/http_profile.go @@ -0,0 +1,19 @@ +package profile + +type HttpProfile struct { + ReqMethod string + ReqTimeout int + Endpoint string + // Deprecated, use Scheme instead + Protocol string + Scheme string +} + +func NewHttpProfile() *HttpProfile { + return &HttpProfile{ + ReqMethod: "POST", + ReqTimeout: 60, + Endpoint: "", + Scheme: "HTTPS", + } +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/sign.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/sign.go new file mode 100644 index 000000000000..0aa7b7355719 --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/sign.go @@ -0,0 +1,94 @@ +package common + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "sort" + + tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http" +) + +const ( + SHA256 = "HmacSHA256" + SHA1 = "HmacSHA1" +) + +func Sign(s, secretKey, method string) string { + hashed := hmac.New(sha1.New, []byte(secretKey)) + if method == SHA256 { + hashed = hmac.New(sha256.New, []byte(secretKey)) + } + hashed.Write([]byte(s)) + + return base64.StdEncoding.EncodeToString(hashed.Sum(nil)) +} + +func sha256hex(s string) string { + b := sha256.Sum256([]byte(s)) + return hex.EncodeToString(b[:]) +} + +func hmacsha256(s, key string) string { + hashed := hmac.New(sha256.New, []byte(key)) + hashed.Write([]byte(s)) + return string(hashed.Sum(nil)) +} + +func signRequest(request tchttp.Request, credential *Credential, method string) (err error) { + if method != SHA256 { + method = SHA1 + } + checkAuthParams(request, credential, method) + s := getStringToSign(request) + signature := Sign(s, credential.SecretKey, method) + request.GetParams()["Signature"] = signature + return +} + +func checkAuthParams(request tchttp.Request, credential *Credential, method string) { + params := request.GetParams() + credentialParams := credential.GetCredentialParams() + for key, value := range credentialParams { + params[key] = value + } + params["SignatureMethod"] = method + delete(params, "Signature") +} + +func getStringToSign(request tchttp.Request) string { + method := request.GetHttpMethod() + domain := request.GetDomain() + path := request.GetPath() + + var buf bytes.Buffer + buf.WriteString(method) + buf.WriteString(domain) + buf.WriteString(path) + buf.WriteString("?") + + params := request.GetParams() + // sort params + keys := make([]string, 0, len(params)) + for k, _ := range params { + keys = append(keys, k) + } + sort.Strings(keys) + + for i := range keys { + k := keys[i] + // TODO: check if server side allows empty value in url. + if params[k] == "" { + continue + } + buf.WriteString(k) + buf.WriteString("=") + buf.WriteString(params[k]) + buf.WriteString("&") + } + buf.Truncate(buf.Len() - 1) + return buf.String() +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/types.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/types.go new file mode 100644 index 000000000000..ec2c786dbf3d --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/types.go @@ -0,0 +1,47 @@ +package common + +func IntPtr(v int) *int { + return &v +} + +func Int64Ptr(v int64) *int64 { + return &v +} + +func UintPtr(v uint) *uint { + return &v +} + +func Uint64Ptr(v uint64) *uint64 { + return &v +} + +func Float64Ptr(v float64) *float64 { + return &v +} + +func StringPtr(v string) *string { + return &v +} + +func StringValues(ptrs []*string) []string { + values := make([]string, len(ptrs)) + for i := 0; i < len(ptrs); i++ { + if ptrs[i] != nil { + values[i] = *ptrs[i] + } + } + return values +} + +func StringPtrs(vals []string) []*string { + ptrs := make([]*string, len(vals)) + for i := 0; i < len(vals); i++ { + ptrs[i] = &vals[i] + } + return ptrs +} + +func BoolPtr(v bool) *bool { + return &v +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312/client.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312/client.go new file mode 100644 index 000000000000..a5078a56763d --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312/client.go @@ -0,0 +1,1855 @@ +// Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v20170312 + +import ( + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" + tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" +) + +const APIVersion = "2017-03-12" + +type Client struct { + common.Client +} + +// Deprecated +func NewClientWithSecretId(secretId, secretKey, region string) (client *Client, err error) { + cpf := profile.NewClientProfile() + client = &Client{} + client.Init(region).WithSecretId(secretId, secretKey).WithProfile(cpf) + return +} + +func NewClient(credential *common.Credential, region string, clientProfile *profile.ClientProfile) (client *Client, err error) { + client = &Client{} + client.Init(region). + WithCredential(credential). + WithProfile(clientProfile) + return +} + + +func NewAllocateHostsRequest() (request *AllocateHostsRequest) { + request = &AllocateHostsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "AllocateHosts") + return +} + +func NewAllocateHostsResponse() (response *AllocateHostsResponse) { + response = &AllocateHostsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (AllocateHosts) 用于创建一个或多个指定配置的CDH实例。 +// * 当HostChargeType为PREPAID时,必须指定HostChargePrepaid参数。 +func (c *Client) AllocateHosts(request *AllocateHostsRequest) (response *AllocateHostsResponse, err error) { + if request == nil { + request = NewAllocateHostsRequest() + } + response = NewAllocateHostsResponse() + err = c.Send(request, response) + return +} + +func NewAssociateInstancesKeyPairsRequest() (request *AssociateInstancesKeyPairsRequest) { + request = &AssociateInstancesKeyPairsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "AssociateInstancesKeyPairs") + return +} + +func NewAssociateInstancesKeyPairsResponse() (response *AssociateInstancesKeyPairsResponse) { + response = &AssociateInstancesKeyPairsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (AssociateInstancesKeyPairs) 用于将密钥绑定到实例上。 +// +// * 将密钥的公钥写入到实例的`SSH`配置当中,用户就可以通过该密钥的私钥来登录实例。 +// * 如果实例原来绑定过密钥,那么原来的密钥将失效。 +// * 如果实例原来是通过密码登录,绑定密钥后无法使用密码登录。 +// * 支持批量操作。每次请求批量实例的上限为100。如果批量实例存在不允许操作的实例,操作会以特定错误码返回。 +func (c *Client) AssociateInstancesKeyPairs(request *AssociateInstancesKeyPairsRequest) (response *AssociateInstancesKeyPairsResponse, err error) { + if request == nil { + request = NewAssociateInstancesKeyPairsRequest() + } + response = NewAssociateInstancesKeyPairsResponse() + err = c.Send(request, response) + return +} + +func NewAssociateSecurityGroupsRequest() (request *AssociateSecurityGroupsRequest) { + request = &AssociateSecurityGroupsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "AssociateSecurityGroups") + return +} + +func NewAssociateSecurityGroupsResponse() (response *AssociateSecurityGroupsResponse) { + response = &AssociateSecurityGroupsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (AssociateSecurityGroups) 用于绑定安全组到指定实例。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) AssociateSecurityGroups(request *AssociateSecurityGroupsRequest) (response *AssociateSecurityGroupsResponse, err error) { + if request == nil { + request = NewAssociateSecurityGroupsRequest() + } + response = NewAssociateSecurityGroupsResponse() + err = c.Send(request, response) + return +} + +func NewCreateDisasterRecoverGroupRequest() (request *CreateDisasterRecoverGroupRequest) { + request = &CreateDisasterRecoverGroupRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "CreateDisasterRecoverGroup") + return +} + +func NewCreateDisasterRecoverGroupResponse() (response *CreateDisasterRecoverGroupResponse) { + response = &CreateDisasterRecoverGroupResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (CreateDisasterRecoverGroup)用于创建[分散置放群组](https://cloud.tencent.com/document/product/213/15486)。创建好的置放群组,可在[创建实例](https://cloud.tencent.com/document/api/213/15730)时指定。 +func (c *Client) CreateDisasterRecoverGroup(request *CreateDisasterRecoverGroupRequest) (response *CreateDisasterRecoverGroupResponse, err error) { + if request == nil { + request = NewCreateDisasterRecoverGroupRequest() + } + response = NewCreateDisasterRecoverGroupResponse() + err = c.Send(request, response) + return +} + +func NewCreateImageRequest() (request *CreateImageRequest) { + request = &CreateImageRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "CreateImage") + return +} + +func NewCreateImageResponse() (response *CreateImageResponse) { + response = &CreateImageResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(CreateImage)用于将实例的系统盘制作为新镜像,创建后的镜像可以用于创建实例。 +func (c *Client) CreateImage(request *CreateImageRequest) (response *CreateImageResponse, err error) { + if request == nil { + request = NewCreateImageRequest() + } + response = NewCreateImageResponse() + err = c.Send(request, response) + return +} + +func NewCreateKeyPairRequest() (request *CreateKeyPairRequest) { + request = &CreateKeyPairRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "CreateKeyPair") + return +} + +func NewCreateKeyPairResponse() (response *CreateKeyPairResponse) { + response = &CreateKeyPairResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (CreateKeyPair) 用于创建一个 `OpenSSH RSA` 密钥对,可以用于登录 `Linux` 实例。 +// +// * 开发者只需指定密钥对名称,即可由系统自动创建密钥对,并返回所生成的密钥对的 `ID` 及其公钥、私钥的内容。 +// * 密钥对名称不能和已经存在的密钥对的名称重复。 +// * 私钥的内容可以保存到文件中作为 `SSH` 的一种认证方式。 +// * 腾讯云不会保存用户的私钥,请妥善保管。 +func (c *Client) CreateKeyPair(request *CreateKeyPairRequest) (response *CreateKeyPairResponse, err error) { + if request == nil { + request = NewCreateKeyPairRequest() + } + response = NewCreateKeyPairResponse() + err = c.Send(request, response) + return +} + +func NewDeleteDisasterRecoverGroupsRequest() (request *DeleteDisasterRecoverGroupsRequest) { + request = &DeleteDisasterRecoverGroupsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DeleteDisasterRecoverGroups") + return +} + +func NewDeleteDisasterRecoverGroupsResponse() (response *DeleteDisasterRecoverGroupsResponse) { + response = &DeleteDisasterRecoverGroupsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DeleteDisasterRecoverGroups)用于删除[分散置放群组](https://cloud.tencent.com/document/product/213/15486)。只有空的置放群组才能被删除,非空的群组需要先销毁组内所有云服务器,才能执行删除操作,不然会产生删除置放群组失败的错误。 +func (c *Client) DeleteDisasterRecoverGroups(request *DeleteDisasterRecoverGroupsRequest) (response *DeleteDisasterRecoverGroupsResponse, err error) { + if request == nil { + request = NewDeleteDisasterRecoverGroupsRequest() + } + response = NewDeleteDisasterRecoverGroupsResponse() + err = c.Send(request, response) + return +} + +func NewDeleteImagesRequest() (request *DeleteImagesRequest) { + request = &DeleteImagesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DeleteImages") + return +} + +func NewDeleteImagesResponse() (response *DeleteImagesResponse) { + response = &DeleteImagesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DeleteImages)用于删除一个或多个镜像。 +// +// * 当[镜像状态](https://cloud.tencent.com/document/product/213/15753#Image)为`创建中`和`使用中`时, 不允许删除。镜像状态可以通过[DescribeImages](https://cloud.tencent.com/document/api/213/9418)获取。 +// * 每个地域最多只支持创建10个自定义镜像,删除镜像可以释放账户的配额。 +// * 当镜像正在被其它账户分享时,不允许删除。 +func (c *Client) DeleteImages(request *DeleteImagesRequest) (response *DeleteImagesResponse, err error) { + if request == nil { + request = NewDeleteImagesRequest() + } + response = NewDeleteImagesResponse() + err = c.Send(request, response) + return +} + +func NewDeleteKeyPairsRequest() (request *DeleteKeyPairsRequest) { + request = &DeleteKeyPairsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DeleteKeyPairs") + return +} + +func NewDeleteKeyPairsResponse() (response *DeleteKeyPairsResponse) { + response = &DeleteKeyPairsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DeleteKeyPairs) 用于删除已在腾讯云托管的密钥对。 +// +// * 可以同时删除多个密钥对。 +// * 不能删除已被实例或镜像引用的密钥对,所以需要独立判断是否所有密钥对都被成功删除。 +func (c *Client) DeleteKeyPairs(request *DeleteKeyPairsRequest) (response *DeleteKeyPairsResponse, err error) { + if request == nil { + request = NewDeleteKeyPairsRequest() + } + response = NewDeleteKeyPairsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeDisasterRecoverGroupQuotaRequest() (request *DescribeDisasterRecoverGroupQuotaRequest) { + request = &DescribeDisasterRecoverGroupQuotaRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeDisasterRecoverGroupQuota") + return +} + +func NewDescribeDisasterRecoverGroupQuotaResponse() (response *DescribeDisasterRecoverGroupQuotaResponse) { + response = &DescribeDisasterRecoverGroupQuotaResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DescribeDisasterRecoverGroupQuota)用于查询[分散置放群组](https://cloud.tencent.com/document/product/213/15486)配额。 +func (c *Client) DescribeDisasterRecoverGroupQuota(request *DescribeDisasterRecoverGroupQuotaRequest) (response *DescribeDisasterRecoverGroupQuotaResponse, err error) { + if request == nil { + request = NewDescribeDisasterRecoverGroupQuotaRequest() + } + response = NewDescribeDisasterRecoverGroupQuotaResponse() + err = c.Send(request, response) + return +} + +func NewDescribeDisasterRecoverGroupsRequest() (request *DescribeDisasterRecoverGroupsRequest) { + request = &DescribeDisasterRecoverGroupsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeDisasterRecoverGroups") + return +} + +func NewDescribeDisasterRecoverGroupsResponse() (response *DescribeDisasterRecoverGroupsResponse) { + response = &DescribeDisasterRecoverGroupsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DescribeDisasterRecoverGroups)用于查询[分散置放群组](https://cloud.tencent.com/document/product/213/15486)信息。 +func (c *Client) DescribeDisasterRecoverGroups(request *DescribeDisasterRecoverGroupsRequest) (response *DescribeDisasterRecoverGroupsResponse, err error) { + if request == nil { + request = NewDescribeDisasterRecoverGroupsRequest() + } + response = NewDescribeDisasterRecoverGroupsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeHostsRequest() (request *DescribeHostsRequest) { + request = &DescribeHostsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeHosts") + return +} + +func NewDescribeHostsResponse() (response *DescribeHostsResponse) { + response = &DescribeHostsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DescribeHosts) 用于获取一个或多个CDH实例的详细信息。 +func (c *Client) DescribeHosts(request *DescribeHostsRequest) (response *DescribeHostsResponse, err error) { + if request == nil { + request = NewDescribeHostsRequest() + } + response = NewDescribeHostsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeImageQuotaRequest() (request *DescribeImageQuotaRequest) { + request = &DescribeImageQuotaRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeImageQuota") + return +} + +func NewDescribeImageQuotaResponse() (response *DescribeImageQuotaResponse) { + response = &DescribeImageQuotaResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeImageQuota)用于查询用户帐号的镜像配额。 +func (c *Client) DescribeImageQuota(request *DescribeImageQuotaRequest) (response *DescribeImageQuotaResponse, err error) { + if request == nil { + request = NewDescribeImageQuotaRequest() + } + response = NewDescribeImageQuotaResponse() + err = c.Send(request, response) + return +} + +func NewDescribeImageSharePermissionRequest() (request *DescribeImageSharePermissionRequest) { + request = &DescribeImageSharePermissionRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeImageSharePermission") + return +} + +func NewDescribeImageSharePermissionResponse() (response *DescribeImageSharePermissionResponse) { + response = &DescribeImageSharePermissionResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeImageSharePermission)用于查询镜像分享信息。 +func (c *Client) DescribeImageSharePermission(request *DescribeImageSharePermissionRequest) (response *DescribeImageSharePermissionResponse, err error) { + if request == nil { + request = NewDescribeImageSharePermissionRequest() + } + response = NewDescribeImageSharePermissionResponse() + err = c.Send(request, response) + return +} + +func NewDescribeImagesRequest() (request *DescribeImagesRequest) { + request = &DescribeImagesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeImages") + return +} + +func NewDescribeImagesResponse() (response *DescribeImagesResponse) { + response = &DescribeImagesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeImages) 用于查看镜像列表。 +// +// * 可以通过指定镜像ID来查询指定镜像的详细信息,或通过设定过滤器来查询满足过滤条件的镜像的详细信息。 +// * 指定偏移(Offset)和限制(Limit)来选择结果中的一部分,默认返回满足条件的前20个镜像信息。 +func (c *Client) DescribeImages(request *DescribeImagesRequest) (response *DescribeImagesResponse, err error) { + if request == nil { + request = NewDescribeImagesRequest() + } + response = NewDescribeImagesResponse() + err = c.Send(request, response) + return +} + +func NewDescribeImportImageOsRequest() (request *DescribeImportImageOsRequest) { + request = &DescribeImportImageOsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeImportImageOs") + return +} + +func NewDescribeImportImageOsResponse() (response *DescribeImportImageOsResponse) { + response = &DescribeImportImageOsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 查看可以导入的镜像操作系统信息。 +func (c *Client) DescribeImportImageOs(request *DescribeImportImageOsRequest) (response *DescribeImportImageOsResponse, err error) { + if request == nil { + request = NewDescribeImportImageOsRequest() + } + response = NewDescribeImportImageOsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeInstanceFamilyConfigsRequest() (request *DescribeInstanceFamilyConfigsRequest) { + request = &DescribeInstanceFamilyConfigsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeInstanceFamilyConfigs") + return +} + +func NewDescribeInstanceFamilyConfigsResponse() (response *DescribeInstanceFamilyConfigsResponse) { + response = &DescribeInstanceFamilyConfigsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeInstanceFamilyConfigs)查询当前用户和地域所支持的机型族列表信息。 +func (c *Client) DescribeInstanceFamilyConfigs(request *DescribeInstanceFamilyConfigsRequest) (response *DescribeInstanceFamilyConfigsResponse, err error) { + if request == nil { + request = NewDescribeInstanceFamilyConfigsRequest() + } + response = NewDescribeInstanceFamilyConfigsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeInstanceInternetBandwidthConfigsRequest() (request *DescribeInstanceInternetBandwidthConfigsRequest) { + request = &DescribeInstanceInternetBandwidthConfigsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeInstanceInternetBandwidthConfigs") + return +} + +func NewDescribeInstanceInternetBandwidthConfigsResponse() (response *DescribeInstanceInternetBandwidthConfigsResponse) { + response = &DescribeInstanceInternetBandwidthConfigsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DescribeInstanceInternetBandwidthConfigs) 用于查询实例带宽配置。 +// +// * 只支持查询`BANDWIDTH_PREPAID`( 预付费按带宽结算 )计费模式的带宽配置。 +// * 接口返回实例的所有带宽配置信息(包含历史的带宽配置信息)。 +func (c *Client) DescribeInstanceInternetBandwidthConfigs(request *DescribeInstanceInternetBandwidthConfigsRequest) (response *DescribeInstanceInternetBandwidthConfigsResponse, err error) { + if request == nil { + request = NewDescribeInstanceInternetBandwidthConfigsRequest() + } + response = NewDescribeInstanceInternetBandwidthConfigsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeInstanceTypeConfigsRequest() (request *DescribeInstanceTypeConfigsRequest) { + request = &DescribeInstanceTypeConfigsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeInstanceTypeConfigs") + return +} + +func NewDescribeInstanceTypeConfigsResponse() (response *DescribeInstanceTypeConfigsResponse) { + response = &DescribeInstanceTypeConfigsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DescribeInstanceTypeConfigs) 用于查询实例机型配置。 +// +// * 可以根据`zone`、`instance-family`来查询实例机型配置。过滤条件详见过滤器[`Filter`](https://cloud.tencent.com/document/api/213/15753#Filter)。 +// * 如果参数为空,返回指定地域的所有实例机型配置。 +func (c *Client) DescribeInstanceTypeConfigs(request *DescribeInstanceTypeConfigsRequest) (response *DescribeInstanceTypeConfigsResponse, err error) { + if request == nil { + request = NewDescribeInstanceTypeConfigsRequest() + } + response = NewDescribeInstanceTypeConfigsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeInstanceVncUrlRequest() (request *DescribeInstanceVncUrlRequest) { + request = &DescribeInstanceVncUrlRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeInstanceVncUrl") + return +} + +func NewDescribeInstanceVncUrlResponse() (response *DescribeInstanceVncUrlResponse) { + response = &DescribeInstanceVncUrlResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 ( DescribeInstanceVncUrl ) 用于查询实例管理终端地址,获取的地址可用于实例的 VNC 登录。 +// +// * 处于 `STOPPED` 状态的机器无法使用此功能。 +// * 管理终端地址的有效期为 15 秒,调用接口成功后如果 15 秒内不使用该链接进行访问,管理终端地址自动失效,您需要重新查询。 +// * 管理终端地址一旦被访问,将自动失效,您需要重新查询。 +// * 如果连接断开,每分钟内重新连接的次数不能超过 30 次。 +// * 获取到 `InstanceVncUrl` 后,您需要在链接 末尾加上参数 `InstanceVncUrl=xxxx` 。 +// +// - 参数 `InstanceVncUrl` :调用接口成功后会返回的 `InstanceVncUrl` 的值。 +// +// 最后组成的 URL 格式如下: +// +// ``` +// https://img.qcloud.com/qcloud/app/active_vnc/index.html?InstanceVncUrl=wss%3A%2F%2Fbjvnc.qcloud.com%3A26789%2Fvnc%3Fs%3DaHpjWnRVMFNhYmxKdDM5MjRHNlVTSVQwajNUSW0wb2tBbmFtREFCTmFrcy8vUUNPMG0wSHZNOUUxRm5PMmUzWmFDcWlOdDJIbUJxSTZDL0RXcHZxYnZZMmRkWWZWcEZia2lyb09XMzdKNmM9 +// ``` +func (c *Client) DescribeInstanceVncUrl(request *DescribeInstanceVncUrlRequest) (response *DescribeInstanceVncUrlResponse, err error) { + if request == nil { + request = NewDescribeInstanceVncUrlRequest() + } + response = NewDescribeInstanceVncUrlResponse() + err = c.Send(request, response) + return +} + +func NewDescribeInstancesRequest() (request *DescribeInstancesRequest) { + request = &DescribeInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeInstances") + return +} + +func NewDescribeInstancesResponse() (response *DescribeInstancesResponse) { + response = &DescribeInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DescribeInstances) 用于查询一个或多个实例的详细信息。 +// +// * 可以根据实例`ID`、实例名称或者实例计费模式等信息来查询实例的详细信息。过滤信息详细请见过滤器`Filter`。 +// * 如果参数为空,返回当前用户一定数量(`Limit`所指定的数量,默认为20)的实例。 +// * 支持查询实例的最新操作(LatestOperation)以及最新操作状态(LatestOperationState)。 +func (c *Client) DescribeInstances(request *DescribeInstancesRequest) (response *DescribeInstancesResponse, err error) { + if request == nil { + request = NewDescribeInstancesRequest() + } + response = NewDescribeInstancesResponse() + err = c.Send(request, response) + return +} + +func NewDescribeInstancesOperationLimitRequest() (request *DescribeInstancesOperationLimitRequest) { + request = &DescribeInstancesOperationLimitRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeInstancesOperationLimit") + return +} + +func NewDescribeInstancesOperationLimitResponse() (response *DescribeInstancesOperationLimitResponse) { + response = &DescribeInstancesOperationLimitResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeInstancesOperationLimit)用于查询实例操作限制。 +// +// * 目前支持调整配置操作限制次数查询。 +func (c *Client) DescribeInstancesOperationLimit(request *DescribeInstancesOperationLimitRequest) (response *DescribeInstancesOperationLimitResponse, err error) { + if request == nil { + request = NewDescribeInstancesOperationLimitRequest() + } + response = NewDescribeInstancesOperationLimitResponse() + err = c.Send(request, response) + return +} + +func NewDescribeInstancesStatusRequest() (request *DescribeInstancesStatusRequest) { + request = &DescribeInstancesStatusRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeInstancesStatus") + return +} + +func NewDescribeInstancesStatusResponse() (response *DescribeInstancesStatusResponse) { + response = &DescribeInstancesStatusResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DescribeInstancesStatus) 用于查询一个或多个实例的状态。 +// +// * 可以根据实例`ID`来查询实例的状态。 +// * 如果参数为空,返回当前用户一定数量(Limit所指定的数量,默认为20)的实例状态。 +func (c *Client) DescribeInstancesStatus(request *DescribeInstancesStatusRequest) (response *DescribeInstancesStatusResponse, err error) { + if request == nil { + request = NewDescribeInstancesStatusRequest() + } + response = NewDescribeInstancesStatusResponse() + err = c.Send(request, response) + return +} + +func NewDescribeInternetChargeTypeConfigsRequest() (request *DescribeInternetChargeTypeConfigsRequest) { + request = &DescribeInternetChargeTypeConfigsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeInternetChargeTypeConfigs") + return +} + +func NewDescribeInternetChargeTypeConfigsResponse() (response *DescribeInternetChargeTypeConfigsResponse) { + response = &DescribeInternetChargeTypeConfigsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeInternetChargeTypeConfigs)用于查询网络的计费类型。 +func (c *Client) DescribeInternetChargeTypeConfigs(request *DescribeInternetChargeTypeConfigsRequest) (response *DescribeInternetChargeTypeConfigsResponse, err error) { + if request == nil { + request = NewDescribeInternetChargeTypeConfigsRequest() + } + response = NewDescribeInternetChargeTypeConfigsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeKeyPairsRequest() (request *DescribeKeyPairsRequest) { + request = &DescribeKeyPairsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeKeyPairs") + return +} + +func NewDescribeKeyPairsResponse() (response *DescribeKeyPairsResponse) { + response = &DescribeKeyPairsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DescribeKeyPairs) 用于查询密钥对信息。 +// +// * 密钥对是通过一种算法生成的一对密钥,在生成的密钥对中,一个向外界公开,称为公钥;另一个用户自己保留,称为私钥。密钥对的公钥内容可以通过这个接口查询,但私钥内容系统不保留。 +func (c *Client) DescribeKeyPairs(request *DescribeKeyPairsRequest) (response *DescribeKeyPairsResponse, err error) { + if request == nil { + request = NewDescribeKeyPairsRequest() + } + response = NewDescribeKeyPairsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeRegionsRequest() (request *DescribeRegionsRequest) { + request = &DescribeRegionsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeRegions") + return +} + +func NewDescribeRegionsResponse() (response *DescribeRegionsResponse) { + response = &DescribeRegionsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeRegions)用于查询地域信息。 +func (c *Client) DescribeRegions(request *DescribeRegionsRequest) (response *DescribeRegionsResponse, err error) { + if request == nil { + request = NewDescribeRegionsRequest() + } + response = NewDescribeRegionsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeReservedInstancesRequest() (request *DescribeReservedInstancesRequest) { + request = &DescribeReservedInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeReservedInstances") + return +} + +func NewDescribeReservedInstancesResponse() (response *DescribeReservedInstancesResponse) { + response = &DescribeReservedInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeReservedInstances)可提供列出用户已购买的预留实例 +func (c *Client) DescribeReservedInstances(request *DescribeReservedInstancesRequest) (response *DescribeReservedInstancesResponse, err error) { + if request == nil { + request = NewDescribeReservedInstancesRequest() + } + response = NewDescribeReservedInstancesResponse() + err = c.Send(request, response) + return +} + +func NewDescribeReservedInstancesOfferingsRequest() (request *DescribeReservedInstancesOfferingsRequest) { + request = &DescribeReservedInstancesOfferingsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeReservedInstancesOfferings") + return +} + +func NewDescribeReservedInstancesOfferingsResponse() (response *DescribeReservedInstancesOfferingsResponse) { + response = &DescribeReservedInstancesOfferingsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeReservedInstancesOfferings)供用户列出可购买的预留实例配置 +func (c *Client) DescribeReservedInstancesOfferings(request *DescribeReservedInstancesOfferingsRequest) (response *DescribeReservedInstancesOfferingsResponse, err error) { + if request == nil { + request = NewDescribeReservedInstancesOfferingsRequest() + } + response = NewDescribeReservedInstancesOfferingsResponse() + err = c.Send(request, response) + return +} + +func NewDescribeZoneInstanceConfigInfosRequest() (request *DescribeZoneInstanceConfigInfosRequest) { + request = &DescribeZoneInstanceConfigInfosRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeZoneInstanceConfigInfos") + return +} + +func NewDescribeZoneInstanceConfigInfosResponse() (response *DescribeZoneInstanceConfigInfosResponse) { + response = &DescribeZoneInstanceConfigInfosResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeZoneInstanceConfigInfos) 获取可用区的机型信息。 +func (c *Client) DescribeZoneInstanceConfigInfos(request *DescribeZoneInstanceConfigInfosRequest) (response *DescribeZoneInstanceConfigInfosResponse, err error) { + if request == nil { + request = NewDescribeZoneInstanceConfigInfosRequest() + } + response = NewDescribeZoneInstanceConfigInfosResponse() + err = c.Send(request, response) + return +} + +func NewDescribeZonesRequest() (request *DescribeZonesRequest) { + request = &DescribeZonesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DescribeZones") + return +} + +func NewDescribeZonesResponse() (response *DescribeZonesResponse) { + response = &DescribeZonesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(DescribeZones)用于查询可用区信息。 +func (c *Client) DescribeZones(request *DescribeZonesRequest) (response *DescribeZonesResponse, err error) { + if request == nil { + request = NewDescribeZonesRequest() + } + response = NewDescribeZonesResponse() + err = c.Send(request, response) + return +} + +func NewDisassociateInstancesKeyPairsRequest() (request *DisassociateInstancesKeyPairsRequest) { + request = &DisassociateInstancesKeyPairsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DisassociateInstancesKeyPairs") + return +} + +func NewDisassociateInstancesKeyPairsResponse() (response *DisassociateInstancesKeyPairsResponse) { + response = &DisassociateInstancesKeyPairsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DisassociateInstancesKeyPairs) 用于解除实例的密钥绑定关系。 +// +// * 只支持[`STOPPED`](https://cloud.tencent.com/document/product/213/15753#InstanceStatus)状态的`Linux`操作系统的实例。 +// * 解绑密钥后,实例可以通过原来设置的密码登录。 +// * 如果原来没有设置密码,解绑后将无法使用 `SSH` 登录。可以调用 [ResetInstancesPassword](https://cloud.tencent.com/document/api/213/15736) 接口来设置登录密码。 +// * 支持批量操作。每次请求批量实例的上限为100。如果批量实例存在不允许操作的实例,操作会以特定错误码返回。 +func (c *Client) DisassociateInstancesKeyPairs(request *DisassociateInstancesKeyPairsRequest) (response *DisassociateInstancesKeyPairsResponse, err error) { + if request == nil { + request = NewDisassociateInstancesKeyPairsRequest() + } + response = NewDisassociateInstancesKeyPairsResponse() + err = c.Send(request, response) + return +} + +func NewDisassociateSecurityGroupsRequest() (request *DisassociateSecurityGroupsRequest) { + request = &DisassociateSecurityGroupsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "DisassociateSecurityGroups") + return +} + +func NewDisassociateSecurityGroupsResponse() (response *DisassociateSecurityGroupsResponse) { + response = &DisassociateSecurityGroupsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (DisassociateSecurityGroups) 用于解绑实例的指定安全组。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) DisassociateSecurityGroups(request *DisassociateSecurityGroupsRequest) (response *DisassociateSecurityGroupsResponse, err error) { + if request == nil { + request = NewDisassociateSecurityGroupsRequest() + } + response = NewDisassociateSecurityGroupsResponse() + err = c.Send(request, response) + return +} + +func NewImportImageRequest() (request *ImportImageRequest) { + request = &ImportImageRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ImportImage") + return +} + +func NewImportImageResponse() (response *ImportImageResponse) { + response = &ImportImageResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(ImportImage)用于导入镜像,导入后的镜像可用于创建实例。 +func (c *Client) ImportImage(request *ImportImageRequest) (response *ImportImageResponse, err error) { + if request == nil { + request = NewImportImageRequest() + } + response = NewImportImageResponse() + err = c.Send(request, response) + return +} + +func NewImportKeyPairRequest() (request *ImportKeyPairRequest) { + request = &ImportKeyPairRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ImportKeyPair") + return +} + +func NewImportKeyPairResponse() (response *ImportKeyPairResponse) { + response = &ImportKeyPairResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ImportKeyPair) 用于导入密钥对。 +// +// * 本接口的功能是将密钥对导入到用户账户,并不会自动绑定到实例。如需绑定可以使用[AssociasteInstancesKeyPair](https://cloud.tencent.com/document/api/213/9404)接口。 +// * 需指定密钥对名称以及该密钥对的公钥文本。 +// * 如果用户只有私钥,可以通过 `SSL` 工具将私钥转换成公钥后再导入。 +func (c *Client) ImportKeyPair(request *ImportKeyPairRequest) (response *ImportKeyPairResponse, err error) { + if request == nil { + request = NewImportKeyPairRequest() + } + response = NewImportKeyPairResponse() + err = c.Send(request, response) + return +} + +func NewInquiryPriceModifyInstancesChargeTypeRequest() (request *InquiryPriceModifyInstancesChargeTypeRequest) { + request = &InquiryPriceModifyInstancesChargeTypeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "InquiryPriceModifyInstancesChargeType") + return +} + +func NewInquiryPriceModifyInstancesChargeTypeResponse() (response *InquiryPriceModifyInstancesChargeTypeResponse) { + response = &InquiryPriceModifyInstancesChargeTypeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (InquiryPriceModifyInstancesChargeType) 用于切换实例的计费模式询价。 +// +// * 只支持从 `POSTPAID_BY_HOUR` 计费模式切换为`PREPAID`计费模式。 +// * 关机不收费的实例、`BC1`和`BS1`机型族的实例、设置定时销毁的实例、竞价实例不支持该操作。 +func (c *Client) InquiryPriceModifyInstancesChargeType(request *InquiryPriceModifyInstancesChargeTypeRequest) (response *InquiryPriceModifyInstancesChargeTypeResponse, err error) { + if request == nil { + request = NewInquiryPriceModifyInstancesChargeTypeRequest() + } + response = NewInquiryPriceModifyInstancesChargeTypeResponse() + err = c.Send(request, response) + return +} + +func NewInquiryPriceRenewInstancesRequest() (request *InquiryPriceRenewInstancesRequest) { + request = &InquiryPriceRenewInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "InquiryPriceRenewInstances") + return +} + +func NewInquiryPriceRenewInstancesResponse() (response *InquiryPriceRenewInstancesResponse) { + response = &InquiryPriceRenewInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (InquiryPriceRenewInstances) 用于续费包年包月实例询价。 +// +// * 只支持查询包年包月实例的续费价格。 +func (c *Client) InquiryPriceRenewInstances(request *InquiryPriceRenewInstancesRequest) (response *InquiryPriceRenewInstancesResponse, err error) { + if request == nil { + request = NewInquiryPriceRenewInstancesRequest() + } + response = NewInquiryPriceRenewInstancesResponse() + err = c.Send(request, response) + return +} + +func NewInquiryPriceResetInstanceRequest() (request *InquiryPriceResetInstanceRequest) { + request = &InquiryPriceResetInstanceRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "InquiryPriceResetInstance") + return +} + +func NewInquiryPriceResetInstanceResponse() (response *InquiryPriceResetInstanceResponse) { + response = &InquiryPriceResetInstanceResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (InquiryPriceResetInstance) 用于重装实例询价。 +// +// * 如果指定了`ImageId`参数,则使用指定的镜像进行重装询价;否则按照当前实例使用的镜像进行重装询价。 +// * 目前只支持[系统盘类型](https://cloud.tencent.com/document/api/213/15753#SystemDisk)是`CLOUD_BASIC`、`CLOUD_PREMIUM`、`CLOUD_SSD`类型的实例使用该接口实现`Linux`和`Windows`操作系统切换的重装询价。 +// * 目前不支持境外地域的实例使用该接口实现`Linux`和`Windows`操作系统切换的重装询价。 +func (c *Client) InquiryPriceResetInstance(request *InquiryPriceResetInstanceRequest) (response *InquiryPriceResetInstanceResponse, err error) { + if request == nil { + request = NewInquiryPriceResetInstanceRequest() + } + response = NewInquiryPriceResetInstanceResponse() + err = c.Send(request, response) + return +} + +func NewInquiryPriceResetInstancesInternetMaxBandwidthRequest() (request *InquiryPriceResetInstancesInternetMaxBandwidthRequest) { + request = &InquiryPriceResetInstancesInternetMaxBandwidthRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "InquiryPriceResetInstancesInternetMaxBandwidth") + return +} + +func NewInquiryPriceResetInstancesInternetMaxBandwidthResponse() (response *InquiryPriceResetInstancesInternetMaxBandwidthResponse) { + response = &InquiryPriceResetInstancesInternetMaxBandwidthResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (InquiryPriceResetInstancesInternetMaxBandwidth) 用于调整实例公网带宽上限询价。 +// +// * 不同机型带宽上限范围不一致,具体限制详见[公网带宽上限](https://cloud.tencent.com/document/product/213/12523)。 +// * 对于`BANDWIDTH_PREPAID`计费方式的带宽,目前不支持调小带宽,且需要输入参数`StartTime`和`EndTime`,指定调整后的带宽的生效时间段。在这种场景下会涉及扣费,请确保账户余额充足。可通过[`DescribeAccountBalance`](https://cloud.tencent.com/document/product/555/20253)接口查询账户余额。 +// * 对于 `TRAFFIC_POSTPAID_BY_HOUR`、 `BANDWIDTH_POSTPAID_BY_HOUR` 和 `BANDWIDTH_PACKAGE` 计费方式的带宽,使用该接口调整带宽上限是实时生效的,可以在带宽允许的范围内调大或者调小带宽,不支持输入参数 `StartTime` 和 `EndTime` 。 +// * 接口不支持调整`BANDWIDTH_POSTPAID_BY_MONTH`计费方式的带宽。 +// * 接口不支持批量调整 `BANDWIDTH_PREPAID` 和 `BANDWIDTH_POSTPAID_BY_HOUR` 计费方式的带宽。 +// * 接口不支持批量调整混合计费方式的带宽。例如不支持同时调整`TRAFFIC_POSTPAID_BY_HOUR`和`BANDWIDTH_PACKAGE`计费方式的带宽。 +func (c *Client) InquiryPriceResetInstancesInternetMaxBandwidth(request *InquiryPriceResetInstancesInternetMaxBandwidthRequest) (response *InquiryPriceResetInstancesInternetMaxBandwidthResponse, err error) { + if request == nil { + request = NewInquiryPriceResetInstancesInternetMaxBandwidthRequest() + } + response = NewInquiryPriceResetInstancesInternetMaxBandwidthResponse() + err = c.Send(request, response) + return +} + +func NewInquiryPriceResetInstancesTypeRequest() (request *InquiryPriceResetInstancesTypeRequest) { + request = &InquiryPriceResetInstancesTypeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "InquiryPriceResetInstancesType") + return +} + +func NewInquiryPriceResetInstancesTypeResponse() (response *InquiryPriceResetInstancesTypeResponse) { + response = &InquiryPriceResetInstancesTypeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (InquiryPriceResetInstancesType) 用于调整实例的机型询价。 +// +// * 目前只支持[系统盘类型](https://cloud.tencent.com/document/product/213/15753#SystemDisk)是`CLOUD_BASIC`、`CLOUD_PREMIUM`、`CLOUD_SSD`类型的实例使用该接口进行调整机型询价。 +// * 目前不支持[CDH](https://cloud.tencent.com/document/product/416)实例使用该接口调整机型询价。 +// * 对于包年包月实例,使用该接口会涉及扣费,请确保账户余额充足。可通过[`DescribeAccountBalance`](https://cloud.tencent.com/document/product/555/20253)接口查询账户余额。 +func (c *Client) InquiryPriceResetInstancesType(request *InquiryPriceResetInstancesTypeRequest) (response *InquiryPriceResetInstancesTypeResponse, err error) { + if request == nil { + request = NewInquiryPriceResetInstancesTypeRequest() + } + response = NewInquiryPriceResetInstancesTypeResponse() + err = c.Send(request, response) + return +} + +func NewInquiryPriceResizeInstanceDisksRequest() (request *InquiryPriceResizeInstanceDisksRequest) { + request = &InquiryPriceResizeInstanceDisksRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "InquiryPriceResizeInstanceDisks") + return +} + +func NewInquiryPriceResizeInstanceDisksResponse() (response *InquiryPriceResizeInstanceDisksResponse) { + response = &InquiryPriceResizeInstanceDisksResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (InquiryPriceResizeInstanceDisks) 用于扩容实例的数据盘询价。 +// +// * 目前只支持扩容非弹性数据盘([`DescribeDisks`](https://cloud.tencent.com/document/api/362/16315)接口返回值中的`Portable`为`false`表示非弹性)询价,且[数据盘类型](https://cloud.tencent.com/document/product/213/15753#DataDisk)为:`CLOUD_BASIC`、`CLOUD_PREMIUM`、`CLOUD_SSD`。 +// * 目前不支持[CDH](https://cloud.tencent.com/document/product/416)实例使用该接口扩容数据盘询价。* 仅支持包年包月实例随机器购买的数据盘。* 目前只支持扩容一块数据盘询价。 +func (c *Client) InquiryPriceResizeInstanceDisks(request *InquiryPriceResizeInstanceDisksRequest) (response *InquiryPriceResizeInstanceDisksResponse, err error) { + if request == nil { + request = NewInquiryPriceResizeInstanceDisksRequest() + } + response = NewInquiryPriceResizeInstanceDisksResponse() + err = c.Send(request, response) + return +} + +func NewInquiryPriceRunInstancesRequest() (request *InquiryPriceRunInstancesRequest) { + request = &InquiryPriceRunInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "InquiryPriceRunInstances") + return +} + +func NewInquiryPriceRunInstancesResponse() (response *InquiryPriceRunInstancesResponse) { + response = &InquiryPriceRunInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(InquiryPriceRunInstances)用于创建实例询价。本接口仅允许针对购买限制范围内的实例配置进行询价, 详见:[创建实例](https://cloud.tencent.com/document/api/213/15730)。 +func (c *Client) InquiryPriceRunInstances(request *InquiryPriceRunInstancesRequest) (response *InquiryPriceRunInstancesResponse, err error) { + if request == nil { + request = NewInquiryPriceRunInstancesRequest() + } + response = NewInquiryPriceRunInstancesResponse() + err = c.Send(request, response) + return +} + +func NewModifyDisasterRecoverGroupAttributeRequest() (request *ModifyDisasterRecoverGroupAttributeRequest) { + request = &ModifyDisasterRecoverGroupAttributeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyDisasterRecoverGroupAttribute") + return +} + +func NewModifyDisasterRecoverGroupAttributeResponse() (response *ModifyDisasterRecoverGroupAttributeResponse) { + response = &ModifyDisasterRecoverGroupAttributeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ModifyDisasterRecoverGroupAttribute)用于修改[分散置放群组](https://cloud.tencent.com/document/product/213/15486)属性。 +func (c *Client) ModifyDisasterRecoverGroupAttribute(request *ModifyDisasterRecoverGroupAttributeRequest) (response *ModifyDisasterRecoverGroupAttributeResponse, err error) { + if request == nil { + request = NewModifyDisasterRecoverGroupAttributeRequest() + } + response = NewModifyDisasterRecoverGroupAttributeResponse() + err = c.Send(request, response) + return +} + +func NewModifyHostsAttributeRequest() (request *ModifyHostsAttributeRequest) { + request = &ModifyHostsAttributeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyHostsAttribute") + return +} + +func NewModifyHostsAttributeResponse() (response *ModifyHostsAttributeResponse) { + response = &ModifyHostsAttributeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(ModifyHostsAttribute)用于修改CDH实例的属性,如实例名称和续费标记等。参数HostName和RenewFlag必须设置其中一个,但不能同时设置。 +func (c *Client) ModifyHostsAttribute(request *ModifyHostsAttributeRequest) (response *ModifyHostsAttributeResponse, err error) { + if request == nil { + request = NewModifyHostsAttributeRequest() + } + response = NewModifyHostsAttributeResponse() + err = c.Send(request, response) + return +} + +func NewModifyImageAttributeRequest() (request *ModifyImageAttributeRequest) { + request = &ModifyImageAttributeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyImageAttribute") + return +} + +func NewModifyImageAttributeResponse() (response *ModifyImageAttributeResponse) { + response = &ModifyImageAttributeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(ModifyImageAttribute)用于修改镜像属性。 +// +// * 已分享的镜像无法修改属性。 +func (c *Client) ModifyImageAttribute(request *ModifyImageAttributeRequest) (response *ModifyImageAttributeResponse, err error) { + if request == nil { + request = NewModifyImageAttributeRequest() + } + response = NewModifyImageAttributeResponse() + err = c.Send(request, response) + return +} + +func NewModifyImageSharePermissionRequest() (request *ModifyImageSharePermissionRequest) { + request = &ModifyImageSharePermissionRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyImageSharePermission") + return +} + +func NewModifyImageSharePermissionResponse() (response *ModifyImageSharePermissionResponse) { + response = &ModifyImageSharePermissionResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(ModifyImageSharePermission)用于修改镜像分享信息。 +// +// * 分享镜像后,被分享账户可以通过该镜像创建实例。 +// * 每个自定义镜像最多可共享给50个账户。 +// * 分享镜像无法更改名称,描述,仅可用于创建实例。 +// * 只支持分享到对方账户相同地域。 +func (c *Client) ModifyImageSharePermission(request *ModifyImageSharePermissionRequest) (response *ModifyImageSharePermissionResponse, err error) { + if request == nil { + request = NewModifyImageSharePermissionRequest() + } + response = NewModifyImageSharePermissionResponse() + err = c.Send(request, response) + return +} + +func NewModifyInstancesAttributeRequest() (request *ModifyInstancesAttributeRequest) { + request = &ModifyInstancesAttributeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyInstancesAttribute") + return +} + +func NewModifyInstancesAttributeResponse() (response *ModifyInstancesAttributeResponse) { + response = &ModifyInstancesAttributeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ModifyInstancesAttribute) 用于修改实例的属性(目前只支持修改实例的名称和关联的安全组)。 +// +// * “实例名称”仅为方便用户自己管理之用,腾讯云并不以此名称作为提交工单或是进行实例管理操作的依据。 +// * 支持批量操作。每次请求批量实例的上限为100。 +// * 修改关联安全组时,子机原来关联的安全组会被解绑。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) ModifyInstancesAttribute(request *ModifyInstancesAttributeRequest) (response *ModifyInstancesAttributeResponse, err error) { + if request == nil { + request = NewModifyInstancesAttributeRequest() + } + response = NewModifyInstancesAttributeResponse() + err = c.Send(request, response) + return +} + +func NewModifyInstancesChargeTypeRequest() (request *ModifyInstancesChargeTypeRequest) { + request = &ModifyInstancesChargeTypeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyInstancesChargeType") + return +} + +func NewModifyInstancesChargeTypeResponse() (response *ModifyInstancesChargeTypeResponse) { + response = &ModifyInstancesChargeTypeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ModifyInstancesChargeType) 用于切换实例的计费模式。 +// +// * 只支持从 `POSTPAID_BY_HOUR` 计费模式切换为`PREPAID`计费模式。 +// * 关机不收费的实例、`BC1`和`BS1`机型族的实例、设置定时销毁的实例不支持该操作。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) ModifyInstancesChargeType(request *ModifyInstancesChargeTypeRequest) (response *ModifyInstancesChargeTypeResponse, err error) { + if request == nil { + request = NewModifyInstancesChargeTypeRequest() + } + response = NewModifyInstancesChargeTypeResponse() + err = c.Send(request, response) + return +} + +func NewModifyInstancesProjectRequest() (request *ModifyInstancesProjectRequest) { + request = &ModifyInstancesProjectRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyInstancesProject") + return +} + +func NewModifyInstancesProjectResponse() (response *ModifyInstancesProjectResponse) { + response = &ModifyInstancesProjectResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ModifyInstancesProject) 用于修改实例所属项目。 +// +// * 项目为一个虚拟概念,用户可以在一个账户下面建立多个项目,每个项目中管理不同的资源;将多个不同实例分属到不同项目中,后续使用 [`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口查询实例,项目ID可用于过滤结果。 +// * 绑定负载均衡的实例不支持修改实例所属项目,请先使用[`DeregisterInstancesFromLoadBalancer`](https://cloud.tencent.com/document/api/214/1258)接口解绑负载均衡。 +// [^_^]: # ( 修改实例所属项目会自动解关联实例原来关联的安全组,修改完成后可使用[`ModifyInstancesAttribute`](https://cloud.tencent.com/document/api/213/15739)接口关联安全组。) +// * 支持批量操作。每次请求批量实例的上限为100。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) ModifyInstancesProject(request *ModifyInstancesProjectRequest) (response *ModifyInstancesProjectResponse, err error) { + if request == nil { + request = NewModifyInstancesProjectRequest() + } + response = NewModifyInstancesProjectResponse() + err = c.Send(request, response) + return +} + +func NewModifyInstancesRenewFlagRequest() (request *ModifyInstancesRenewFlagRequest) { + request = &ModifyInstancesRenewFlagRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyInstancesRenewFlag") + return +} + +func NewModifyInstancesRenewFlagResponse() (response *ModifyInstancesRenewFlagResponse) { + response = &ModifyInstancesRenewFlagResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ModifyInstancesRenewFlag) 用于修改包年包月实例续费标识。 +// +// * 实例被标识为自动续费后,每次在实例到期时,会自动续费一个月。 +// * 支持批量操作。每次请求批量实例的上限为100。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) ModifyInstancesRenewFlag(request *ModifyInstancesRenewFlagRequest) (response *ModifyInstancesRenewFlagResponse, err error) { + if request == nil { + request = NewModifyInstancesRenewFlagRequest() + } + response = NewModifyInstancesRenewFlagResponse() + err = c.Send(request, response) + return +} + +func NewModifyInstancesVpcAttributeRequest() (request *ModifyInstancesVpcAttributeRequest) { + request = &ModifyInstancesVpcAttributeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyInstancesVpcAttribute") + return +} + +func NewModifyInstancesVpcAttributeResponse() (response *ModifyInstancesVpcAttributeResponse) { + response = &ModifyInstancesVpcAttributeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(ModifyInstancesVpcAttribute)用于修改实例vpc属性,如私有网络ip。 +// * 此操作默认会关闭实例,完成后再启动。 +// * 当指定私有网络ID和子网ID(子网必须在实例所在的可用区)与指定实例所在私有网络不一致时,会将实例迁移至指定的私有网络的子网下。执行此操作前请确保指定的实例上没有绑定[弹性网卡](https://cloud.tencent.com/document/product/576)和[负载均衡](https://cloud.tencent.com/document/product/214)。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) ModifyInstancesVpcAttribute(request *ModifyInstancesVpcAttributeRequest) (response *ModifyInstancesVpcAttributeResponse, err error) { + if request == nil { + request = NewModifyInstancesVpcAttributeRequest() + } + response = NewModifyInstancesVpcAttributeResponse() + err = c.Send(request, response) + return +} + +func NewModifyKeyPairAttributeRequest() (request *ModifyKeyPairAttributeRequest) { + request = &ModifyKeyPairAttributeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ModifyKeyPairAttribute") + return +} + +func NewModifyKeyPairAttributeResponse() (response *ModifyKeyPairAttributeResponse) { + response = &ModifyKeyPairAttributeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ModifyKeyPairAttribute) 用于修改密钥对属性。 +// +// * 修改密钥对ID所指定的密钥对的名称和描述信息。 +// * 密钥对名称不能和已经存在的密钥对的名称重复。 +// * 密钥对ID是密钥对的唯一标识,不可修改。 +func (c *Client) ModifyKeyPairAttribute(request *ModifyKeyPairAttributeRequest) (response *ModifyKeyPairAttributeResponse, err error) { + if request == nil { + request = NewModifyKeyPairAttributeRequest() + } + response = NewModifyKeyPairAttributeResponse() + err = c.Send(request, response) + return +} + +func NewPurchaseReservedInstancesOfferingRequest() (request *PurchaseReservedInstancesOfferingRequest) { + request = &PurchaseReservedInstancesOfferingRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "PurchaseReservedInstancesOffering") + return +} + +func NewPurchaseReservedInstancesOfferingResponse() (response *PurchaseReservedInstancesOfferingResponse) { + response = &PurchaseReservedInstancesOfferingResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(PurchaseReservedInstancesOffering)用于用户购买一个或者多个指定配置的预留实例 +func (c *Client) PurchaseReservedInstancesOffering(request *PurchaseReservedInstancesOfferingRequest) (response *PurchaseReservedInstancesOfferingResponse, err error) { + if request == nil { + request = NewPurchaseReservedInstancesOfferingRequest() + } + response = NewPurchaseReservedInstancesOfferingResponse() + err = c.Send(request, response) + return +} + +func NewRebootInstancesRequest() (request *RebootInstancesRequest) { + request = &RebootInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "RebootInstances") + return +} + +func NewRebootInstancesResponse() (response *RebootInstancesResponse) { + response = &RebootInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (RebootInstances) 用于重启实例。 +// +// * 只有状态为`RUNNING`的实例才可以进行此操作。 +// * 接口调用成功时,实例会进入`REBOOTING`状态;重启实例成功时,实例会进入`RUNNING`状态。 +// * 支持强制重启。强制重启的效果等同于关闭物理计算机的电源开关再重新启动。强制重启可能会导致数据丢失或文件系统损坏,请仅在服务器不能正常重启时使用。 +// * 支持批量操作,每次请求批量实例的上限为100。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) RebootInstances(request *RebootInstancesRequest) (response *RebootInstancesResponse, err error) { + if request == nil { + request = NewRebootInstancesRequest() + } + response = NewRebootInstancesResponse() + err = c.Send(request, response) + return +} + +func NewRenewHostsRequest() (request *RenewHostsRequest) { + request = &RenewHostsRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "RenewHosts") + return +} + +func NewRenewHostsResponse() (response *RenewHostsResponse) { + response = &RenewHostsResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (RenewHosts) 用于续费包年包月CDH实例。 +// +// * 只支持操作包年包月实例,否则操作会以特定[错误码](#6.-.E9.94.99.E8.AF.AF.E7.A0.81)返回。 +// * 续费时请确保账户余额充足。可通过[`DescribeAccountBalance`](https://cloud.tencent.com/document/product/555/20253)接口查询账户余额。 +func (c *Client) RenewHosts(request *RenewHostsRequest) (response *RenewHostsResponse, err error) { + if request == nil { + request = NewRenewHostsRequest() + } + response = NewRenewHostsResponse() + err = c.Send(request, response) + return +} + +func NewRenewInstancesRequest() (request *RenewInstancesRequest) { + request = &RenewInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "RenewInstances") + return +} + +func NewRenewInstancesResponse() (response *RenewInstancesResponse) { + response = &RenewInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (RenewInstances) 用于续费包年包月实例。 +// +// * 只支持操作包年包月实例。 +// * 续费时请确保账户余额充足。可通过[`DescribeAccountBalance`](https://cloud.tencent.com/document/product/555/20253)接口查询账户余额。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) RenewInstances(request *RenewInstancesRequest) (response *RenewInstancesResponse, err error) { + if request == nil { + request = NewRenewInstancesRequest() + } + response = NewRenewInstancesResponse() + err = c.Send(request, response) + return +} + +func NewResetInstanceRequest() (request *ResetInstanceRequest) { + request = &ResetInstanceRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ResetInstance") + return +} + +func NewResetInstanceResponse() (response *ResetInstanceResponse) { + response = &ResetInstanceResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ResetInstance) 用于重装指定实例上的操作系统。 +// +// * 如果指定了`ImageId`参数,则使用指定的镜像重装;否则按照当前实例使用的镜像进行重装。 +// * 系统盘将会被格式化,并重置;请确保系统盘中无重要文件。 +// * `Linux`和`Windows`系统互相切换时,该实例系统盘`ID`将发生变化,系统盘关联快照将无法回滚、恢复数据。 +// * 密码不指定将会通过站内信下发随机密码。 +// * 目前只支持[系统盘类型](https://cloud.tencent.com/document/api/213/9452#SystemDisk)是`CLOUD_BASIC`、`CLOUD_PREMIUM`、`CLOUD_SSD`类型的实例使用该接口实现`Linux`和`Windows`操作系统切换。 +// * 目前不支持境外地域的实例使用该接口实现`Linux`和`Windows`操作系统切换。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) ResetInstance(request *ResetInstanceRequest) (response *ResetInstanceResponse, err error) { + if request == nil { + request = NewResetInstanceRequest() + } + response = NewResetInstanceResponse() + err = c.Send(request, response) + return +} + +func NewResetInstancesInternetMaxBandwidthRequest() (request *ResetInstancesInternetMaxBandwidthRequest) { + request = &ResetInstancesInternetMaxBandwidthRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ResetInstancesInternetMaxBandwidth") + return +} + +func NewResetInstancesInternetMaxBandwidthResponse() (response *ResetInstancesInternetMaxBandwidthResponse) { + response = &ResetInstancesInternetMaxBandwidthResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ResetInstancesInternetMaxBandwidth) 用于调整实例公网带宽上限。 +// +// * 不同机型带宽上限范围不一致,具体限制详见[公网带宽上限](https://cloud.tencent.com/document/product/213/12523)。 +// * 对于 `BANDWIDTH_PREPAID` 计费方式的带宽,需要输入参数 `StartTime` 和 `EndTime` ,指定调整后的带宽的生效时间段。在这种场景下目前不支持调小带宽,会涉及扣费,请确保账户余额充足。可通过 [`DescribeAccountBalance`](https://cloud.tencent.com/document/product/555/20253) 接口查询账户余额。 +// * 对于 `TRAFFIC_POSTPAID_BY_HOUR` 、 `BANDWIDTH_POSTPAID_BY_HOUR` 和 `BANDWIDTH_PACKAGE` 计费方式的带宽,使用该接口调整带宽上限是实时生效的,可以在带宽允许的范围内调大或者调小带宽,不支持输入参数 `StartTime` 和 `EndTime` 。 +// * 接口不支持调整 `BANDWIDTH_POSTPAID_BY_MONTH` 计费方式的带宽。 +// * 接口不支持批量调整 `BANDWIDTH_PREPAID` 和 `BANDWIDTH_POSTPAID_BY_HOUR` 计费方式的带宽。 +// * 接口不支持批量调整混合计费方式的带宽。例如不支持同时调整 `TRAFFIC_POSTPAID_BY_HOUR` 和 `BANDWIDTH_PACKAGE` 计费方式的带宽。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) ResetInstancesInternetMaxBandwidth(request *ResetInstancesInternetMaxBandwidthRequest) (response *ResetInstancesInternetMaxBandwidthResponse, err error) { + if request == nil { + request = NewResetInstancesInternetMaxBandwidthRequest() + } + response = NewResetInstancesInternetMaxBandwidthResponse() + err = c.Send(request, response) + return +} + +func NewResetInstancesPasswordRequest() (request *ResetInstancesPasswordRequest) { + request = &ResetInstancesPasswordRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ResetInstancesPassword") + return +} + +func NewResetInstancesPasswordResponse() (response *ResetInstancesPasswordResponse) { + response = &ResetInstancesPasswordResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ResetInstancesPassword) 用于将实例操作系统的密码重置为用户指定的密码。 +// +// *如果是修改系统管理云密码:实例的操作系统不同,管理员帐号也会不一样(`Windows`为`Administrator`,`Ubuntu`为`ubuntu`,其它系统为`root`)。 +// * 重置处于运行中状态的实例密码,需要设置关机参数`ForceStop`为`TRUE`。如果没有显式指定强制关机参数,则只有处于关机状态的实例才允许执行重置密码操作。 +// * 支持批量操作。将多个实例操作系统的密码重置为相同的密码。每次请求批量实例的上限为100。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) ResetInstancesPassword(request *ResetInstancesPasswordRequest) (response *ResetInstancesPasswordResponse, err error) { + if request == nil { + request = NewResetInstancesPasswordRequest() + } + response = NewResetInstancesPasswordResponse() + err = c.Send(request, response) + return +} + +func NewResetInstancesTypeRequest() (request *ResetInstancesTypeRequest) { + request = &ResetInstancesTypeRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ResetInstancesType") + return +} + +func NewResetInstancesTypeResponse() (response *ResetInstancesTypeResponse) { + response = &ResetInstancesTypeResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ResetInstancesType) 用于调整实例的机型。 +// * 目前只支持[系统盘类型](/document/api/213/9452#block_device)是`CLOUD_BASIC`、`CLOUD_PREMIUM`、`CLOUD_SSD`类型的实例使用该接口进行机型调整。 +// * 目前不支持[CDH](https://cloud.tencent.com/document/product/416)实例使用该接口调整机型。对于包年包月实例,使用该接口会涉及扣费,请确保账户余额充足。可通过[`DescribeAccountBalance`](https://cloud.tencent.com/document/product/555/20253)接口查询账户余额。 +// * 本接口为异步接口,调整实例配置请求发送成功后会返回一个RequestId,此时操作并未立即完成。实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表调整实例配置操作成功。 +func (c *Client) ResetInstancesType(request *ResetInstancesTypeRequest) (response *ResetInstancesTypeResponse, err error) { + if request == nil { + request = NewResetInstancesTypeRequest() + } + response = NewResetInstancesTypeResponse() + err = c.Send(request, response) + return +} + +func NewResizeInstanceDisksRequest() (request *ResizeInstanceDisksRequest) { + request = &ResizeInstanceDisksRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "ResizeInstanceDisks") + return +} + +func NewResizeInstanceDisksResponse() (response *ResizeInstanceDisksResponse) { + response = &ResizeInstanceDisksResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (ResizeInstanceDisks) 用于扩容实例的数据盘。 +// +// * 目前只支持扩容非弹性数据盘([`DescribeDisks`](https://cloud.tencent.com/document/api/362/16315)接口返回值中的`Portable`为`false`表示非弹性),且[数据盘类型](https://cloud.tencent.com/document/api/213/15753#DataDisk)为:`CLOUD_BASIC`、`CLOUD_PREMIUM`、`CLOUD_SSD`和[CDH](https://cloud.tencent.com/document/product/416)实例的`LOCAL_BASIC`、`LOCAL_SSD`类型数据盘。 +// * 对于包年包月实例,使用该接口会涉及扣费,请确保账户余额充足。可通过[`DescribeAccountBalance`](https://cloud.tencent.com/document/product/555/20253)接口查询账户余额。 +// * 目前只支持扩容一块数据盘。 +// * 实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表操作成功。 +func (c *Client) ResizeInstanceDisks(request *ResizeInstanceDisksRequest) (response *ResizeInstanceDisksResponse, err error) { + if request == nil { + request = NewResizeInstanceDisksRequest() + } + response = NewResizeInstanceDisksResponse() + err = c.Send(request, response) + return +} + +func NewRunInstancesRequest() (request *RunInstancesRequest) { + request = &RunInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "RunInstances") + return +} + +func NewRunInstancesResponse() (response *RunInstancesResponse) { + response = &RunInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (RunInstances) 用于创建一个或多个指定配置的实例。 +// +// * 实例创建成功后将自动开机启动,[实例状态](https://cloud.tencent.com/document/product/213/15753#InstanceStatus)变为“运行中”。 +// * 预付费实例的购买会预先扣除本次实例购买所需金额,按小时后付费实例购买会预先冻结本次实例购买一小时内所需金额,在调用本接口前请确保账户余额充足。 +// * 本接口允许购买的实例数量遵循[CVM实例购买限制](https://cloud.tencent.com/document/product/213/2664),所创建的实例和官网入口创建的实例共用配额。 +// * 本接口为异步接口,当创建实例请求下发成功后会返回一个实例`ID`列表和一个`RequestId`,此时创建实例操作并未立即完成。在此期间实例的状态将会处于“PENDING”,实例创建结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728) 接口查询,如果实例状态(InstanceState)由“PENDING”变为“RUNNING”,则代表实例创建成功,“LAUNCH_FAILED”代表实例创建失败。 +func (c *Client) RunInstances(request *RunInstancesRequest) (response *RunInstancesResponse, err error) { + if request == nil { + request = NewRunInstancesRequest() + } + response = NewRunInstancesResponse() + err = c.Send(request, response) + return +} + +func NewStartInstancesRequest() (request *StartInstancesRequest) { + request = &StartInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "StartInstances") + return +} + +func NewStartInstancesResponse() (response *StartInstancesResponse) { + response = &StartInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (StartInstances) 用于启动一个或多个实例。 +// +// * 只有状态为`STOPPED`的实例才可以进行此操作。 +// * 接口调用成功时,实例会进入`STARTING`状态;启动实例成功时,实例会进入`RUNNING`状态。 +// * 支持批量操作。每次请求批量实例的上限为100。 +// * 本接口为异步接口,启动实例请求发送成功后会返回一个RequestId,此时操作并未立即完成。实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表启动实例操作成功。 +func (c *Client) StartInstances(request *StartInstancesRequest) (response *StartInstancesResponse, err error) { + if request == nil { + request = NewStartInstancesRequest() + } + response = NewStartInstancesResponse() + err = c.Send(request, response) + return +} + +func NewStopInstancesRequest() (request *StopInstancesRequest) { + request = &StopInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "StopInstances") + return +} + +func NewStopInstancesResponse() (response *StopInstancesResponse) { + response = &StopInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (StopInstances) 用于关闭一个或多个实例。 +// +// * 只有状态为`RUNNING`的实例才可以进行此操作。 +// * 接口调用成功时,实例会进入`STOPPING`状态;关闭实例成功时,实例会进入`STOPPED`状态。 +// * 支持强制关闭。强制关机的效果等同于关闭物理计算机的电源开关。强制关机可能会导致数据丢失或文件系统损坏,请仅在服务器不能正常关机时使用。 +// * 支持批量操作。每次请求批量实例的上限为100。 +// * 本接口为异步接口,关闭实例请求发送成功后会返回一个RequestId,此时操作并未立即完成。实例操作结果可以通过调用 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728#.E7.A4.BA.E4.BE.8B3-.E6.9F.A5.E8.AF.A2.E5.AE.9E.E4.BE.8B.E7.9A.84.E6.9C.80.E6.96.B0.E6.93.8D.E4.BD.9C.E6.83.85.E5.86.B5) 接口查询,如果实例的最新操作状态(LatestOperationState)为“SUCCESS”,则代表关闭实例操作成功。 +func (c *Client) StopInstances(request *StopInstancesRequest) (response *StopInstancesResponse, err error) { + if request == nil { + request = NewStopInstancesRequest() + } + response = NewStopInstancesResponse() + err = c.Send(request, response) + return +} + +func NewSyncImagesRequest() (request *SyncImagesRequest) { + request = &SyncImagesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "SyncImages") + return +} + +func NewSyncImagesResponse() (response *SyncImagesResponse) { + response = &SyncImagesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口(SyncImages)用于将自定义镜像同步到其它地区。 +// +// * 该接口每次调用只支持同步一个镜像。 +// * 该接口支持多个同步地域。 +// * 单个帐号在每个地域最多支持存在10个自定义镜像。 +func (c *Client) SyncImages(request *SyncImagesRequest) (response *SyncImagesResponse, err error) { + if request == nil { + request = NewSyncImagesRequest() + } + response = NewSyncImagesResponse() + err = c.Send(request, response) + return +} + +func NewTerminateInstancesRequest() (request *TerminateInstancesRequest) { + request = &TerminateInstancesRequest{ + BaseRequest: &tchttp.BaseRequest{}, + } + request.Init().WithApiInfo("cvm", APIVersion, "TerminateInstances") + return +} + +func NewTerminateInstancesResponse() (response *TerminateInstancesResponse) { + response = &TerminateInstancesResponse{ + BaseResponse: &tchttp.BaseResponse{}, + } + return +} + +// 本接口 (TerminateInstances) 用于主动退还实例。 +// +// * 不再使用的实例,可通过本接口主动退还。 +// * 按量计费的实例通过本接口可直接退还;包年包月实例如符合[退还规则](https://cloud.tencent.com/document/product/213/9711),也可通过本接口主动退还。 +// * 包年包月实例首次调用本接口,实例将被移至回收站,再次调用本接口,实例将被销毁,且不可恢复。按量计费实例调用本接口将被直接销毁 +// * 支持批量操作,每次请求批量实例的上限为100。 +func (c *Client) TerminateInstances(request *TerminateInstancesRequest) (response *TerminateInstancesResponse, err error) { + if request == nil { + request = NewTerminateInstancesRequest() + } + response = NewTerminateInstancesResponse() + err = c.Send(request, response) + return +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312/models.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312/models.go new file mode 100644 index 000000000000..b814c27b9cae --- /dev/null +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312/models.go @@ -0,0 +1,3853 @@ +// Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v20170312 + +import ( + "encoding/json" + + tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http" +) + +type ActionTimer struct { + + // 扩展数据 + Externals *Externals `json:"Externals,omitempty" name:"Externals"` + + // 定时器名称,目前仅支持销毁一个值:TerminateInstances。 + TimerAction *string `json:"TimerAction,omitempty" name:"TimerAction"` + + // 执行时间,格式形如:2018-5-29 11:26:40,执行时间必须大于当前时间5分钟。 + ActionTime *string `json:"ActionTime,omitempty" name:"ActionTime"` +} + +type AllocateHostsRequest struct { + *tchttp.BaseRequest + + // 实例所在的位置。通过该参数可以指定实例所属可用区,所属项目等属性。 + Placement *Placement `json:"Placement,omitempty" name:"Placement"` + + // 用于保证请求幂等性的字符串。 + ClientToken *string `json:"ClientToken,omitempty" name:"ClientToken"` + + // 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的购买时长、是否设置自动续费等属性。若指定实例的付费模式为预付费则该参数必传。 + HostChargePrepaid *ChargePrepaid `json:"HostChargePrepaid,omitempty" name:"HostChargePrepaid"` + + // 实例计费类型。目前仅支持:PREPAID(预付费,即包年包月模式),默认为:'PREPAID'。 + HostChargeType *string `json:"HostChargeType,omitempty" name:"HostChargeType"` + + // CDH实例机型,默认为:'HS1'。 + HostType *string `json:"HostType,omitempty" name:"HostType"` + + // 购买CDH实例数量,默认为:1。 + HostCount *uint64 `json:"HostCount,omitempty" name:"HostCount"` + + // 标签描述列表。通过指定该参数可以同时绑定标签到相应的资源实例。 + TagSpecification []*TagSpecification `json:"TagSpecification,omitempty" name:"TagSpecification" list` +} + +func (r *AllocateHostsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *AllocateHostsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type AllocateHostsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 新创建云子机的实例id列表。 + HostIdSet []*string `json:"HostIdSet,omitempty" name:"HostIdSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *AllocateHostsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *AllocateHostsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type AssociateInstancesKeyPairsRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID,每次请求批量实例的上限为100。
    可以通过以下方式获取可用的实例ID:
  • 通过登录[控制台](https://console.cloud.tencent.com/cvm/index)查询实例ID。
  • 通过调用接口 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728) ,取返回信息中的`InstanceId`获取实例ID。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 一个或多个待操作的密钥对ID,每次请求批量密钥对的上限为100。密钥对ID形如:`skey-3glfot13`。
    可以通过以下方式获取可用的密钥ID:
  • 通过登录[控制台](https://console.cloud.tencent.com/cvm/sshkey)查询密钥ID。
  • 通过调用接口 [DescribeKeyPairs](https://cloud.tencent.com/document/api/213/15699) ,取返回信息中的`KeyId`获取密钥对ID。 + KeyIds []*string `json:"KeyIds,omitempty" name:"KeyIds" list` + + // 是否对运行中的实例选择强制关机。建议对运行中的实例先手动关机,然后再绑定密钥。取值范围:
  • TRUE:表示在正常关机失败后进行强制关机。
  • FALSE:表示在正常关机失败后不进行强制关机。
    默认取值:FALSE。 + ForceStop *bool `json:"ForceStop,omitempty" name:"ForceStop"` +} + +func (r *AssociateInstancesKeyPairsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *AssociateInstancesKeyPairsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type AssociateInstancesKeyPairsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *AssociateInstancesKeyPairsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *AssociateInstancesKeyPairsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type AssociateSecurityGroupsRequest struct { + *tchttp.BaseRequest + + // 要绑定的`安全组ID`,类似sg-efil73jd,只支持绑定单个安全组。 + SecurityGroupIds []*string `json:"SecurityGroupIds,omitempty" name:"SecurityGroupIds" list` + + // 被绑定的`实例ID`,类似ins-lesecurk,支持指定多个实例,每次请求批量实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` +} + +func (r *AssociateSecurityGroupsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *AssociateSecurityGroupsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type AssociateSecurityGroupsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *AssociateSecurityGroupsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *AssociateSecurityGroupsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ChargePrepaid struct { + + // 购买实例的时长,单位:月。取值范围:1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 24, 36。 + Period *uint64 `json:"Period,omitempty" name:"Period"` + + // 自动续费标识。取值范围:
  • NOTIFY_AND_AUTO_RENEW:通知过期且自动续费
  • NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费
  • DISABLE_NOTIFY_AND_MANUAL_RENEW:不通知过期不自动续费

    默认取值:NOTIFY_AND_AUTO_RENEW。若该参数指定为NOTIFY_AND_AUTO_RENEW,在账户余额充足的情况下,实例到期后将按月自动续费。 + RenewFlag *string `json:"RenewFlag,omitempty" name:"RenewFlag"` +} + +type CreateDisasterRecoverGroupRequest struct { + *tchttp.BaseRequest + + // 分散置放群组名称,长度1-60个字符,支持中、英文。 + Name *string `json:"Name,omitempty" name:"Name"` + + // 分散置放群组类型,取值范围:
  • HOST:物理机
  • SW:交换机
  • RACK:机架 + Type *string `json:"Type,omitempty" name:"Type"` + + // 用于保证请求幂等性的字符串。该字符串由客户生成,需保证不同请求之间唯一,最大值不超过64个ASCII字符。若不指定该参数,则无法保证请求的幂等性。
    更多详细信息请参阅:如何保证幂等性。 + ClientToken *string `json:"ClientToken,omitempty" name:"ClientToken"` +} + +func (r *CreateDisasterRecoverGroupRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *CreateDisasterRecoverGroupRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type CreateDisasterRecoverGroupResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 分散置放群组ID列表。 + DisasterRecoverGroupId *string `json:"DisasterRecoverGroupId,omitempty" name:"DisasterRecoverGroupId"` + + // 分散置放群组类型,取值范围:
  • HOST:物理机
  • SW:交换机
  • RACK:机架 + Type *string `json:"Type,omitempty" name:"Type"` + + // 分散置放群组名称,长度1-60个字符,支持中、英文。 + Name *string `json:"Name,omitempty" name:"Name"` + + // 置放群组内可容纳的云服务器数量。 + CvmQuotaTotal *int64 `json:"CvmQuotaTotal,omitempty" name:"CvmQuotaTotal"` + + // 置放群组内已有的云服务器数量。 + CurrentNum *int64 `json:"CurrentNum,omitempty" name:"CurrentNum"` + + // 置放群组创建时间。 + CreateTime *string `json:"CreateTime,omitempty" name:"CreateTime"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *CreateDisasterRecoverGroupResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *CreateDisasterRecoverGroupResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type CreateImageRequest struct { + *tchttp.BaseRequest + + // 镜像名称 + ImageName *string `json:"ImageName,omitempty" name:"ImageName"` + + // 需要制作镜像的实例ID。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` + + // 镜像描述 + ImageDescription *string `json:"ImageDescription,omitempty" name:"ImageDescription"` + + // 是否执行强制关机以制作镜像。 + // 取值范围:
  • TRUE:表示关机之后制作镜像
  • FALSE:表示开机状态制作镜像

    默认取值:FALSE。

    开机状态制作镜像,可能导致部分数据未备份,影响数据安全。 + ForcePoweroff *string `json:"ForcePoweroff,omitempty" name:"ForcePoweroff"` + + // 创建Windows镜像时是否启用Sysprep + Sysprep *string `json:"Sysprep,omitempty" name:"Sysprep"` + + // 基于实例创建整机镜像时,指定包含在镜像里的数据盘Id + DataDiskIds []*string `json:"DataDiskIds,omitempty" name:"DataDiskIds" list` + + // 基于快照创建镜像,指定快照ID,必须包含一个系统盘快照。不可与InstanceId同时传入。 + SnapshotIds []*string `json:"SnapshotIds,omitempty" name:"SnapshotIds" list` + + // 检测本次请求的是否成功,但不会对操作的资源产生任何影响 + DryRun *bool `json:"DryRun,omitempty" name:"DryRun"` +} + +func (r *CreateImageRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *CreateImageRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type CreateImageResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 镜像ID + // 注意:此字段可能返回 null,表示取不到有效值。 + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *CreateImageResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *CreateImageResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type CreateKeyPairRequest struct { + *tchttp.BaseRequest + + // 密钥对名称,可由数字,字母和下划线组成,长度不超过25个字符。 + KeyName *string `json:"KeyName,omitempty" name:"KeyName"` + + // 密钥对创建后所属的项目ID。 + // 可以通过以下方式获取项目ID: + //
  • 通过项目列表查询项目ID。 + //
  • 通过调用接口DescribeProject,取返回信息中的`projectId `获取项目ID。 + ProjectId *int64 `json:"ProjectId,omitempty" name:"ProjectId"` +} + +func (r *CreateKeyPairRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *CreateKeyPairRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type CreateKeyPairResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 密钥对信息。 + KeyPair *KeyPair `json:"KeyPair,omitempty" name:"KeyPair"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *CreateKeyPairResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *CreateKeyPairResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DataDisk struct { + + // 数据盘大小,单位:GB。最小调整步长为10G,不同数据盘类型取值范围不同,具体限制详见:[存储概述](https://cloud.tencent.com/document/product/213/4952)。默认值为0,表示不购买数据盘。更多限制详见产品文档。 + DiskSize *int64 `json:"DiskSize,omitempty" name:"DiskSize"` + + // 数据盘类型。数据盘类型限制详见[存储概述](https://cloud.tencent.com/document/product/213/4952)。取值范围:
  • LOCAL_BASIC:本地硬盘
  • LOCAL_SSD:本地SSD硬盘
  • CLOUD_BASIC:普通云硬盘
  • CLOUD_PREMIUM:高性能云硬盘
  • CLOUD_SSD:SSD云硬盘

    默认取值:LOCAL_BASIC。

    该参数对`ResizeInstanceDisk`接口无效。 + DiskType *string `json:"DiskType,omitempty" name:"DiskType"` + + // 数据盘ID。LOCAL_BASIC 和 LOCAL_SSD 类型没有ID,暂时不支持该参数。 + DiskId *string `json:"DiskId,omitempty" name:"DiskId"` + + // 数据盘是否随子机销毁。取值范围: + //
  • TRUE:子机销毁时,销毁数据盘,只支持按小时后付费云盘 + //
  • FALSE:子机销毁时,保留数据盘
    + // 默认取值:TRUE
    + // 该参数目前仅用于 `RunInstances` 接口。 + // 注意:此字段可能返回 null,表示取不到有效值。 + DeleteWithInstance *bool `json:"DeleteWithInstance,omitempty" name:"DeleteWithInstance"` + + // 数据盘快照ID。选择的数据盘快照大小需小于数据盘大小。 + // 注意:此字段可能返回 null,表示取不到有效值。 + SnapshotId *string `json:"SnapshotId,omitempty" name:"SnapshotId"` + + // 数据盘是加密。取值范围: + //
  • TRUE:加密 + //
  • FALSE:不加密
    + // 默认取值:FALSE
    + // 该参数目前仅用于 `RunInstances` 接口。 + // 注意:此字段可能返回 null,表示取不到有效值。 + Encrypt *bool `json:"Encrypt,omitempty" name:"Encrypt"` +} + +type DeleteDisasterRecoverGroupsRequest struct { + *tchttp.BaseRequest + + // 分散置放群组ID列表,可通过[DescribeDisasterRecoverGroups](https://cloud.tencent.com/document/api/213/17810)接口获取。每次请求允许操作的分散置放群组数量上限是100。 + DisasterRecoverGroupIds []*string `json:"DisasterRecoverGroupIds,omitempty" name:"DisasterRecoverGroupIds" list` +} + +func (r *DeleteDisasterRecoverGroupsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DeleteDisasterRecoverGroupsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DeleteDisasterRecoverGroupsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DeleteDisasterRecoverGroupsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DeleteDisasterRecoverGroupsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DeleteImagesRequest struct { + *tchttp.BaseRequest + + // 准备删除的镜像Id列表 + ImageIds []*string `json:"ImageIds,omitempty" name:"ImageIds" list` +} + +func (r *DeleteImagesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DeleteImagesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DeleteImagesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DeleteImagesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DeleteImagesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DeleteKeyPairsRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的密钥对ID。每次请求批量密钥对的上限为100。
    可以通过以下方式获取可用的密钥ID:
  • 通过登录[控制台](https://console.cloud.tencent.com/cvm/sshkey)查询密钥ID。
  • 通过调用接口 [DescribeKeyPairs](https://cloud.tencent.com/document/api/213/15699) ,取返回信息中的 `KeyId` 获取密钥对ID。 + KeyIds []*string `json:"KeyIds,omitempty" name:"KeyIds" list` +} + +func (r *DeleteKeyPairsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DeleteKeyPairsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DeleteKeyPairsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DeleteKeyPairsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DeleteKeyPairsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeDisasterRecoverGroupQuotaRequest struct { + *tchttp.BaseRequest +} + +func (r *DescribeDisasterRecoverGroupQuotaRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeDisasterRecoverGroupQuotaRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeDisasterRecoverGroupQuotaResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 可创建置放群组数量的上限。 + GroupQuota *int64 `json:"GroupQuota,omitempty" name:"GroupQuota"` + + // 当前用户已经创建的置放群组数量。 + CurrentNum *int64 `json:"CurrentNum,omitempty" name:"CurrentNum"` + + // 物理机类型容灾组内实例的配额数。 + CvmInHostGroupQuota *int64 `json:"CvmInHostGroupQuota,omitempty" name:"CvmInHostGroupQuota"` + + // 交换机类型容灾组内实例的配额数。 + CvmInSwGroupQuota *int64 `json:"CvmInSwGroupQuota,omitempty" name:"CvmInSwGroupQuota"` + + // 机架类型容灾组内实例的配额数。 + CvmInRackGroupQuota *int64 `json:"CvmInRackGroupQuota,omitempty" name:"CvmInRackGroupQuota"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeDisasterRecoverGroupQuotaResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeDisasterRecoverGroupQuotaResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeDisasterRecoverGroupsRequest struct { + *tchttp.BaseRequest + + // 分散置放群组ID列表。每次请求允许操作的分散置放群组数量上限是100。 + DisasterRecoverGroupIds []*string `json:"DisasterRecoverGroupIds,omitempty" name:"DisasterRecoverGroupIds" list` + + // 分散置放群组名称,支持模糊匹配。 + Name *string `json:"Name,omitempty" name:"Name"` + + // 偏移量,默认为0。关于`Offset`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Offset *int64 `json:"Offset,omitempty" name:"Offset"` + + // 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Limit *int64 `json:"Limit,omitempty" name:"Limit"` +} + +func (r *DescribeDisasterRecoverGroupsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeDisasterRecoverGroupsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeDisasterRecoverGroupsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 分散置放群组信息列表。 + DisasterRecoverGroupSet []*DisasterRecoverGroup `json:"DisasterRecoverGroupSet,omitempty" name:"DisasterRecoverGroupSet" list` + + // 用户置放群组总量。 + TotalCount *int64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeDisasterRecoverGroupsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeDisasterRecoverGroupsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeHostsRequest struct { + *tchttp.BaseRequest + + //
  • zone
  • + //

    按照【可用区】进行过滤。可用区形如:ap-guangzhou-1。

    类型:String

    必选:否

    可选项:可用区列表

    + //
  • project-id
  • + //

    按照【项目ID】进行过滤,可通过调用[DescribeProject](https://cloud.tencent.com/document/api/378/4400)查询已创建的项目列表或登录[控制台](https://console.cloud.tencent.com/cvm/index)进行查看;也可以调用[AddProject](https://cloud.tencent.com/document/api/378/4398)创建新的项目。项目ID形如:1002189。

    类型:Integer

    必选:否

    + //
  • host-id
  • + //

    按照【[CDH](https://cloud.tencent.com/document/product/416) ID】进行过滤。[CDH](https://cloud.tencent.com/document/product/416) ID形如:host-xxxxxxxx。

    类型:String

    必选:否

    + //
  • host-name
  • + //

    按照【CDH实例名称】进行过滤。

    类型:String

    必选:否

    + //
  • host-state
  • + //

    按照【CDH实例状态】进行过滤。(PENDING:创建中 | LAUNCH_FAILURE:创建失败 | RUNNING:运行中 | EXPIRED:已过期)

    类型:String

    必选:否

    + // 每次请求的`Filters`的上限为10,`Filter.Values`的上限为5。 + Filters []*Filter `json:"Filters,omitempty" name:"Filters" list` + + // 偏移量,默认为0。 + Offset *uint64 `json:"Offset,omitempty" name:"Offset"` + + // 返回数量,默认为20,最大值为100。 + Limit *uint64 `json:"Limit,omitempty" name:"Limit"` +} + +func (r *DescribeHostsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeHostsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeHostsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 符合查询条件的cdh实例总数 + TotalCount *uint64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // cdh实例详细信息列表 + HostSet []*HostItem `json:"HostSet,omitempty" name:"HostSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeHostsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeHostsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeImageQuotaRequest struct { + *tchttp.BaseRequest +} + +func (r *DescribeImageQuotaRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeImageQuotaRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeImageQuotaResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 账户的镜像配额 + ImageNumQuota *int64 `json:"ImageNumQuota,omitempty" name:"ImageNumQuota"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeImageQuotaResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeImageQuotaResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeImageSharePermissionRequest struct { + *tchttp.BaseRequest + + // 需要共享的镜像Id + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` +} + +func (r *DescribeImageSharePermissionRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeImageSharePermissionRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeImageSharePermissionResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 镜像共享信息 + SharePermissionSet []*SharePermission `json:"SharePermissionSet,omitempty" name:"SharePermissionSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeImageSharePermissionResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeImageSharePermissionResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeImagesRequest struct { + *tchttp.BaseRequest + + // 镜像ID列表 。镜像ID如:`img-gvbnzy6f`。array型参数的格式可以参考[API简介](https://cloud.tencent.com/document/api/213/15688)。镜像ID可以通过如下方式获取:
  • 通过[DescribeImages](https://cloud.tencent.com/document/api/213/15715)接口返回的`ImageId`获取。
  • 通过[镜像控制台](https://console.cloud.tencent.com/cvm/image)获取。 + ImageIds []*string `json:"ImageIds,omitempty" name:"ImageIds" list` + + // 过滤条件,每次请求的`Filters`的上限为0,`Filters.Values`的上限为5。参数不可以同时指定`ImageIds`和`Filters`。详细的过滤条件如下: + //
  • image-id - String - 是否必填: 否 - (过滤条件)按照镜像ID进行过滤
  • + //
  • image-type - String - 是否必填: 否 - (过滤条件)按照镜像类型进行过滤。取值范围: + // PRIVATE_IMAGE: 私有镜像 (本账户创建的镜像) + // PUBLIC_IMAGE: 公共镜像 (腾讯云官方镜像) + // SHARED_IMAGE: 共享镜像(其他账户共享给本账户的镜像) 。
  • + Filters []*Filter `json:"Filters,omitempty" name:"Filters" list` + + // 偏移量,默认为0。关于Offset详见[API简介](/document/api/213/568#.E8.BE.93.E5.85.A5.E5.8F.82.E6.95.B0.E4.B8.8E.E8.BF.94.E5.9B.9E.E5.8F.82.E6.95.B0.E9.87.8A.E4.B9.89)。 + Offset *uint64 `json:"Offset,omitempty" name:"Offset"` + + // 数量限制,默认为20,最大值为100。关于Limit详见[API简介](/document/api/213/568#.E8.BE.93.E5.85.A5.E5.8F.82.E6.95.B0.E4.B8.8E.E8.BF.94.E5.9B.9E.E5.8F.82.E6.95.B0.E9.87.8A.E4.B9.89)。 + Limit *uint64 `json:"Limit,omitempty" name:"Limit"` + + // 实例类型,如 `S1.SMALL1` + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` +} + +func (r *DescribeImagesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeImagesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeImagesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 一个关于镜像详细信息的结构体,主要包括镜像的主要状态与属性。 + ImageSet []*Image `json:"ImageSet,omitempty" name:"ImageSet" list` + + // 符合要求的镜像数量。 + TotalCount *int64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeImagesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeImagesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeImportImageOsRequest struct { + *tchttp.BaseRequest +} + +func (r *DescribeImportImageOsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeImportImageOsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeImportImageOsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 支持的导入镜像的操作系统类型。 + ImportImageOsListSupported *ImageOsList `json:"ImportImageOsListSupported,omitempty" name:"ImportImageOsListSupported"` + + // 支持的导入镜像的操作系统版本。 + ImportImageOsVersionSet []*OsVersion `json:"ImportImageOsVersionSet,omitempty" name:"ImportImageOsVersionSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeImportImageOsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeImportImageOsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstanceFamilyConfigsRequest struct { + *tchttp.BaseRequest +} + +func (r *DescribeInstanceFamilyConfigsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstanceFamilyConfigsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstanceFamilyConfigsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 实例机型组配置的列表信息 + InstanceFamilyConfigSet []*InstanceFamilyConfig `json:"InstanceFamilyConfigSet,omitempty" name:"InstanceFamilyConfigSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeInstanceFamilyConfigsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstanceFamilyConfigsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstanceInternetBandwidthConfigsRequest struct { + *tchttp.BaseRequest + + // 待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` +} + +func (r *DescribeInstanceInternetBandwidthConfigsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstanceInternetBandwidthConfigsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstanceInternetBandwidthConfigsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 带宽配置信息列表。 + InternetBandwidthConfigSet []*InternetBandwidthConfig `json:"InternetBandwidthConfigSet,omitempty" name:"InternetBandwidthConfigSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeInstanceInternetBandwidthConfigsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstanceInternetBandwidthConfigsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstanceTypeConfigsRequest struct { + *tchttp.BaseRequest + + //
  • zone
  • + //

    按照【可用区】进行过滤。可用区形如:ap-guangzhou-1。

    类型:String

    必选:否

    可选项:可用区列表

    + //
  • instance-family
  • + //

    按照【实例机型系列】进行过滤。实例机型系列形如:S1、I1、M1等。

    类型:Integer

    必选:否

    + // 每次请求的`Filters`的上限为10,`Filter.Values`的上限为1。 + Filters []*Filter `json:"Filters,omitempty" name:"Filters" list` +} + +func (r *DescribeInstanceTypeConfigsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstanceTypeConfigsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstanceTypeConfigsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 实例机型配置列表。 + InstanceTypeConfigSet []*InstanceTypeConfig `json:"InstanceTypeConfigSet,omitempty" name:"InstanceTypeConfigSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeInstanceTypeConfigsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstanceTypeConfigsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstanceVncUrlRequest struct { + *tchttp.BaseRequest + + // 一个操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728) API返回值中的`InstanceId`获取。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` +} + +func (r *DescribeInstanceVncUrlRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstanceVncUrlRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstanceVncUrlResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 实例的管理终端地址。 + InstanceVncUrl *string `json:"InstanceVncUrl,omitempty" name:"InstanceVncUrl"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeInstanceVncUrlResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstanceVncUrlResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstancesOperationLimitRequest struct { + *tchttp.BaseRequest + + // 按照一个或者多个实例ID查询,可通过[DescribeInstances](https://cloud.tencent.com/document/api/213/15728)API返回值中的InstanceId获取。实例ID形如:ins-xxxxxxxx。(此参数的具体格式可参考API[简介](https://cloud.tencent.com/document/api/213/15688)的ids.N一节)。每次请求的实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 实例操作。 + //
  • INSTANCE_DEGRADE:实例降配操作
  • + Operation *string `json:"Operation,omitempty" name:"Operation"` +} + +func (r *DescribeInstancesOperationLimitRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstancesOperationLimitRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstancesOperationLimitResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 该参数表示调整配置操作(降配)限制次数查询。 + InstanceOperationLimitSet []*OperationCountLimit `json:"InstanceOperationLimitSet,omitempty" name:"InstanceOperationLimitSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeInstancesOperationLimitResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstancesOperationLimitResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstancesRequest struct { + *tchttp.BaseRequest + + // 按照一个或者多个实例ID查询。实例ID形如:`ins-xxxxxxxx`。(此参数的具体格式可参考API[简介](https://cloud.tencent.com/document/api/213/15688)的`ids.N`一节)。每次请求的实例的上限为100。参数不支持同时指定`InstanceIds`和`Filters`。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + //
  • zone
  • + //

    按照【可用区】进行过滤。可用区形如:ap-guangzhou-1。

    类型:String

    必选:否

    可选项:可用区列表

    + //
  • project-id
  • + //

    按照【项目ID】进行过滤,可通过调用[DescribeProject](https://cloud.tencent.com/document/api/378/4400)查询已创建的项目列表或登录[控制台](https://console.cloud.tencent.com/cvm/index)进行查看;也可以调用[AddProject](https://cloud.tencent.com/document/api/378/4398)创建新的项目。项目ID形如:1002189。

    类型:Integer

    必选:否

    + //
  • host-id
  • + //

    按照【[CDH](https://cloud.tencent.com/document/product/416) ID】进行过滤。[CDH](https://cloud.tencent.com/document/product/416) ID形如:host-xxxxxxxx。

    类型:String

    必选:否

    + //
  • vpc-id
  • + //

    按照【VPC ID】进行过滤。VPC ID形如:vpc-xxxxxxxx。

    类型:String

    必选:否

    + //
  • subnet-id
  • + //

    按照【子网ID】进行过滤。子网ID形如:subnet-xxxxxxxx。

    类型:String

    必选:否

    + //
  • instance-id
  • + //

    按照【实例ID】进行过滤。实例ID形如:ins-xxxxxxxx。

    类型:String

    必选:否

    + //
  • security-group-id
  • + //

    按照【安全组ID】进行过滤。安全组ID形如: sg-8jlk3f3r。

    类型:String

    必选:否

    + //
  • instance-name
  • + //

    按照【实例名称】进行过滤。

    类型:String

    必选:否

    + //
  • instance-charge-type
  • + //

    按照【实例计费模式】进行过滤。(PREPAID:表示预付费,即包年包月 | POSTPAID_BY_HOUR:表示后付费,即按量计费 | CDHPAID:表示[CDH](https://cloud.tencent.com/document/product/416)付费,即只对[CDH](https://cloud.tencent.com/document/product/416)计费,不对[CDH](https://cloud.tencent.com/document/product/416)上的实例计费。)

    类型:String

    必选:否

    + //
  • private-ip-address
  • + //

    按照【实例主网卡的内网IP】进行过滤。

    类型:String

    必选:否

    + //
  • public-ip-address
  • + //

    按照【实例主网卡的公网IP】进行过滤,包含实例创建时自动分配的IP和实例创建后手动绑定的弹性IP。

    类型:String

    必选:否

    + //
  • tag-key
  • + //

    按照【标签键】进行过滤。

    类型:String

    必选:否

    + //
  • tag-value
  • + //

    按照【标签值】进行过滤。

    类型:String

    必选:否

    + //
  • tag:tag-key
  • + //

    按照【标签键值对】进行过滤。tag-key使用具体的标签键进行替换。使用请参考示例2。

    类型:String

    必选:否

    + // 每次请求的`Filters`的上限为10,`Filter.Values`的上限为5。参数不支持同时指定`InstanceIds`和`Filters`。 + Filters []*Filter `json:"Filters,omitempty" name:"Filters" list` + + // 偏移量,默认为0。关于`Offset`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Offset *int64 `json:"Offset,omitempty" name:"Offset"` + + // 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Limit *int64 `json:"Limit,omitempty" name:"Limit"` +} + +func (r *DescribeInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 符合条件的实例数量。 + TotalCount *int64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // 实例详细信息列表。 + InstanceSet []*Instance `json:"InstanceSet,omitempty" name:"InstanceSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstancesStatusRequest struct { + *tchttp.BaseRequest + + // 按照一个或者多个实例ID查询。实例ID形如:`ins-11112222`。此参数的具体格式可参考API[简介](https://cloud.tencent.com/document/api/213/15688)的`ids.N`一节)。每次请求的实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 偏移量,默认为0。关于`Offset`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Offset *int64 `json:"Offset,omitempty" name:"Offset"` + + // 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Limit *int64 `json:"Limit,omitempty" name:"Limit"` +} + +func (r *DescribeInstancesStatusRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstancesStatusRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInstancesStatusResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 符合条件的实例状态数量。 + TotalCount *int64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // [实例状态](https://cloud.tencent.com/document/api/213/15753#InstanceStatus) 列表。 + InstanceStatusSet []*InstanceStatus `json:"InstanceStatusSet,omitempty" name:"InstanceStatusSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeInstancesStatusResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInstancesStatusResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInternetChargeTypeConfigsRequest struct { + *tchttp.BaseRequest +} + +func (r *DescribeInternetChargeTypeConfigsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInternetChargeTypeConfigsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeInternetChargeTypeConfigsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 网络计费类型配置。 + InternetChargeTypeConfigSet []*InternetChargeTypeConfig `json:"InternetChargeTypeConfigSet,omitempty" name:"InternetChargeTypeConfigSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeInternetChargeTypeConfigsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeInternetChargeTypeConfigsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeKeyPairsRequest struct { + *tchttp.BaseRequest + + // 密钥对ID,密钥对ID形如:`skey-11112222`(此接口支持同时传入多个ID进行过滤。此参数的具体格式可参考 API [简介](https://cloud.tencent.com/document/api/213/15688)的 `id.N` 一节)。参数不支持同时指定 `KeyIds` 和 `Filters`。密钥对ID可以通过登录[控制台](https://console.cloud.tencent.com/cvm/index)查询。 + KeyIds []*string `json:"KeyIds,omitempty" name:"KeyIds" list` + + // 过滤条件。 + //
  • project-id - Integer - 是否必填:否 -(过滤条件)按照项目ID过滤。可以通过[项目列表](https://console.cloud.tencent.com/project)查询项目ID,或者调用接口 [DescribeProject](https://cloud.tencent.com/document/api/378/4400),取返回信息中的projectId获取项目ID。
  • + //
  • key-name - String - 是否必填:否 -(过滤条件)按照密钥对名称过滤。
  • 参数不支持同时指定 `KeyIds` 和 `Filters`。 + Filters []*Filter `json:"Filters,omitempty" name:"Filters" list` + + // 偏移量,默认为0。关于 `Offset` 的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。返回数量,默认为20,最大值为100。关于 `Limit` 的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Offset *int64 `json:"Offset,omitempty" name:"Offset"` + + // 返回数量,默认为20,最大值为100。关于 `Limit` 的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Limit *int64 `json:"Limit,omitempty" name:"Limit"` +} + +func (r *DescribeKeyPairsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeKeyPairsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeKeyPairsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 符合条件的密钥对数量。 + TotalCount *int64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // 密钥对详细信息列表。 + KeyPairSet []*KeyPair `json:"KeyPairSet,omitempty" name:"KeyPairSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeKeyPairsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeKeyPairsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeRegionsRequest struct { + *tchttp.BaseRequest +} + +func (r *DescribeRegionsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeRegionsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeRegionsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 地域数量 + TotalCount *uint64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // 地域列表信息 + RegionSet []*RegionInfo `json:"RegionSet,omitempty" name:"RegionSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeRegionsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeRegionsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeReservedInstancesOfferingsRequest struct { + *tchttp.BaseRequest + + // 试运行, 默认为 false。 + DryRun *bool `json:"DryRun,omitempty" name:"DryRun"` + + // 偏移量,默认为0。关于`Offset`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Offset *int64 `json:"Offset,omitempty" name:"Offset"` + + // 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Limit *int64 `json:"Limit,omitempty" name:"Limit"` + + // 以最大有效期作为过滤参数。 + // 计量单位: 秒 + // 默认为 94608000。 + MaxDuration *int64 `json:"MaxDuration,omitempty" name:"MaxDuration"` + + // 以最小有效期作为过滤参数。 + // 计量单位: 秒 + // 默认为 2592000。 + MinDuration *int64 `json:"MinDuration,omitempty" name:"MinDuration"` + + //
  • zone
  • + //

    按照预留实例计费可购买的【可用区】进行过滤。形如:ap-guangzhou-1。

    类型:String

    必选:否

    可选项:可用区列表

    + //
  • duration
  • + //

    按照预留实例计费【有效期】即预留实例计费购买时长进行过滤。形如:31536000。

    类型:Integer

    计量单位:秒

    必选:否

    可选项:31536000 (1年) | 94608000(3年)

    + //
  • instance-type
  • + //

    按照【预留实例计费类型】进行过滤。形如:S3.MEDIUM4。

    类型:String

    必选:否

    可选项:预留实例计费类型列表

    + //
  • offering-type
  • + //

    按照【付款类型】进行过滤。形如:All Upfront (预付全部费用)。

    类型:String

    必选:否

    可选项:All Upfront (预付全部费用)

    + //
  • product-description
  • + //

    按照预留实例计费的【平台描述】(即操作系统)进行过滤。形如:linux。

    类型:String

    必选:否

    可选项:linux

    + //
  • reserved-instances-offering-id
  • + //

    按照【预留实例计费配置ID】进行过滤。形如:650c138f-ae7e-4750-952a-96841d6e9fc1。

    类型:String

    必选:否

    + // 每次请求的`Filters`的上限为10,`Filter.Values`的上限为5。 + Filters []*Filter `json:"Filters,omitempty" name:"Filters" list` +} + +func (r *DescribeReservedInstancesOfferingsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeReservedInstancesOfferingsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeReservedInstancesOfferingsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 符合条件的预留实例计费数量。 + TotalCount *int64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // 符合条件的预留实例计费列表。 + ReservedInstancesOfferingsSet []*ReservedInstancesOffering `json:"ReservedInstancesOfferingsSet,omitempty" name:"ReservedInstancesOfferingsSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeReservedInstancesOfferingsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeReservedInstancesOfferingsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeReservedInstancesRequest struct { + *tchttp.BaseRequest + + // 试运行。默认为 false。 + DryRun *bool `json:"DryRun,omitempty" name:"DryRun"` + + // 偏移量,默认为0。关于`Offset`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Offset *int64 `json:"Offset,omitempty" name:"Offset"` + + // 返回数量,默认为20,最大值为100。关于`Limit`的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。 + Limit *int64 `json:"Limit,omitempty" name:"Limit"` + + //
  • zone
  • + //

    按照预留实例计费可购买的【可用区】进行过滤。形如:ap-guangzhou-1。

    类型:String

    必选:否

    可选项:可用区列表

    + //
  • duration
  • + //

    按照预留实例计费【有效期】即预留实例计费购买时长进行过滤。形如:31536000。

    类型:Integer

    计量单位:秒

    必选:否

    可选项:31536000 (1年) | 94608000(3年)

    + //
  • instance-type
  • + //

    按照【预留实例计费类型】进行过滤。形如:S3.MEDIUM4。

    类型:String

    必选:否

    可选项:预留实例计费类型列表

    + //
  • offering-type
  • + //

    按照【付款类型】进行过滤。形如:All Upfront (预付全部费用)。

    类型:String

    必选:否

    可选项:All Upfront (预付全部费用)

    + //
  • product-description
  • + //

    按照预留实例计费的【平台描述】(即操作系统)进行过滤。形如:linux。

    类型:String

    必选:否

    可选项:linux

    + //
  • reserved-instances-id
  • + //

    按照已购买【预留实例计费ID】进行过滤。形如:650c138f-ae7e-4750-952a-96841d6e9fc1。

    类型:String

    必选:否

    + //
  • state
  • + //

    按照已购买【预留实例计费状态】进行过滤。形如:active。

    类型:String

    必选:否

    可选项:active (以创建) | pending (等待被创建) | retired (过期)

    + // 每次请求的`Filters`的上限为10,`Filter.Values`的上限为5。 + Filters []*Filter `json:"Filters,omitempty" name:"Filters" list` +} + +func (r *DescribeReservedInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeReservedInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeReservedInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 符合条件的预留实例计费数量。 + TotalCount *int64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // 符合条件的预留实例计费列表。 + ReservedInstancesSet []*ReservedInstances `json:"ReservedInstancesSet,omitempty" name:"ReservedInstancesSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeReservedInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeReservedInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeZoneInstanceConfigInfosRequest struct { + *tchttp.BaseRequest + + //
  • zone
  • + //

    按照【可用区】进行过滤。可用区形如:ap-guangzhou-1。

    类型:String

    必选:否

    可选项:可用区列表

    + //
  • instance-family
  • + //

    按照【实例机型系列】进行过滤。实例机型系列形如:S1、I1、M1等。

    类型:Integer

    必选:否

    + //
  • instance-type
  • + //

    按照【实例机型】进行过滤。不同实例机型指定了不同的资源规格,具体取值可通过调用接口 [DescribeInstanceTypeConfigs](https://cloud.tencent.com/document/product/213/15749) 来获得最新的规格表或参见[实例类型](https://cloud.tencent.com/document/product/213/11518)描述。若不指定该参数,则默认机型为S1.SMALL1。

    类型:String

    必选:否

    + //
  • instance-charge-type
  • + //

    按照【实例计费模式】进行过滤。(PREPAID:表示预付费,即包年包月 | POSTPAID_BY_HOUR:表示后付费,即按量计费 | CDHPAID:表示[CDH](https://cloud.tencent.com/document/product/416)付费,即只对[CDH](https://cloud.tencent.com/document/product/416)计费,不对[CDH](https://cloud.tencent.com/document/product/416)上的实例计费。)

    类型:String

    必选:否

    + // 每次请求的`Filters`的上限为10,`Filter.Values`的上限为5。 + Filters []*Filter `json:"Filters,omitempty" name:"Filters" list` +} + +func (r *DescribeZoneInstanceConfigInfosRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeZoneInstanceConfigInfosRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeZoneInstanceConfigInfosResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 可用区机型配置列表。 + InstanceTypeQuotaSet []*InstanceTypeQuotaItem `json:"InstanceTypeQuotaSet,omitempty" name:"InstanceTypeQuotaSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeZoneInstanceConfigInfosResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeZoneInstanceConfigInfosResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeZonesRequest struct { + *tchttp.BaseRequest +} + +func (r *DescribeZonesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeZonesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DescribeZonesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 可用区数量。 + TotalCount *uint64 `json:"TotalCount,omitempty" name:"TotalCount"` + + // 可用区列表信息。 + ZoneSet []*ZoneInfo `json:"ZoneSet,omitempty" name:"ZoneSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DescribeZonesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DescribeZonesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DisassociateInstancesKeyPairsRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID,每次请求批量实例的上限为100。

    可以通过以下方式获取可用的实例ID:
  • 通过登录[控制台](https://console.cloud.tencent.com/cvm/index)查询实例ID。
  • 通过调用接口 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728) ,取返回信息中的 `InstanceId` 获取实例ID。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 密钥对ID列表,每次请求批量密钥对的上限为100。密钥对ID形如:`skey-11112222`。

    可以通过以下方式获取可用的密钥ID:
  • 通过登录[控制台](https://console.cloud.tencent.com/cvm/sshkey)查询密钥ID。
  • 通过调用接口 [DescribeKeyPairs](https://cloud.tencent.com/document/api/213/15699) ,取返回信息中的 `KeyId` 获取密钥对ID。 + KeyIds []*string `json:"KeyIds,omitempty" name:"KeyIds" list` + + // 是否对运行中的实例选择强制关机。建议对运行中的实例先手动关机,然后再解绑密钥。取值范围:
  • TRUE:表示在正常关机失败后进行强制关机。
  • FALSE:表示在正常关机失败后不进行强制关机。

    默认取值:FALSE。 + ForceStop *bool `json:"ForceStop,omitempty" name:"ForceStop"` +} + +func (r *DisassociateInstancesKeyPairsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DisassociateInstancesKeyPairsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DisassociateInstancesKeyPairsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DisassociateInstancesKeyPairsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DisassociateInstancesKeyPairsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DisassociateSecurityGroupsRequest struct { + *tchttp.BaseRequest + + // 要解绑的`安全组ID`,类似sg-efil73jd,只支持解绑单个安全组。 + SecurityGroupIds []*string `json:"SecurityGroupIds,omitempty" name:"SecurityGroupIds" list` + + // 被解绑的`实例ID`,类似ins-lesecurk,支持指定多个实例 。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` +} + +func (r *DisassociateSecurityGroupsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DisassociateSecurityGroupsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DisassociateSecurityGroupsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *DisassociateSecurityGroupsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *DisassociateSecurityGroupsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type DisasterRecoverGroup struct { + + // 分散置放群组id。 + DisasterRecoverGroupId *string `json:"DisasterRecoverGroupId,omitempty" name:"DisasterRecoverGroupId"` + + // 分散置放群组名称,长度1-60个字符。 + Name *string `json:"Name,omitempty" name:"Name"` + + // 分散置放群组类型,取值范围:
  • HOST:物理机
  • SW:交换机
  • RACK:机架 + Type *string `json:"Type,omitempty" name:"Type"` + + // 分散置放群组内最大容纳云服务器数量。 + CvmQuotaTotal *int64 `json:"CvmQuotaTotal,omitempty" name:"CvmQuotaTotal"` + + // 分散置放群组内云服务器当前数量。 + CurrentNum *int64 `json:"CurrentNum,omitempty" name:"CurrentNum"` + + // 分散置放群组内,云服务器id列表。 + // 注意:此字段可能返回 null,表示取不到有效值。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 分散置放群组创建时间。 + // 注意:此字段可能返回 null,表示取不到有效值。 + CreateTime *string `json:"CreateTime,omitempty" name:"CreateTime"` +} + +type EnhancedService struct { + + // 开启云安全服务。若不指定该参数,则默认开启云安全服务。 + SecurityService *RunSecurityServiceEnabled `json:"SecurityService,omitempty" name:"SecurityService"` + + // 开启云监控服务。若不指定该参数,则默认开启云监控服务。 + MonitorService *RunMonitorServiceEnabled `json:"MonitorService,omitempty" name:"MonitorService"` +} + +type Externals struct { + + // 释放地址 + // 注意:此字段可能返回 null,表示取不到有效值。 + ReleaseAddress *bool `json:"ReleaseAddress,omitempty" name:"ReleaseAddress"` + + // 不支持的网络类型,取值范围:
  • BASIC:基础网络
  • VPC1.0:私有网络VPC1.0 + // 注意:此字段可能返回 null,表示取不到有效值。 + UnsupportNetworks []*string `json:"UnsupportNetworks,omitempty" name:"UnsupportNetworks" list` + + // HDD本地存储属性 + // 注意:此字段可能返回 null,表示取不到有效值。 + StorageBlockAttr *StorageBlock `json:"StorageBlockAttr,omitempty" name:"StorageBlockAttr"` +} + +type Filter struct { + + // 需要过滤的字段。 + Name *string `json:"Name,omitempty" name:"Name"` + + // 字段的过滤值。 + Values []*string `json:"Values,omitempty" name:"Values" list` +} + +type HostItem struct { + + // cdh实例所在的位置。通过该参数可以指定实例所属可用区,所属项目等属性。 + Placement *Placement `json:"Placement,omitempty" name:"Placement"` + + // cdh实例id + HostId *string `json:"HostId,omitempty" name:"HostId"` + + // cdh实例类型 + HostType *string `json:"HostType,omitempty" name:"HostType"` + + // cdh实例名称 + HostName *string `json:"HostName,omitempty" name:"HostName"` + + // cdh实例付费模式 + HostChargeType *string `json:"HostChargeType,omitempty" name:"HostChargeType"` + + // cdh实例自动续费标记 + RenewFlag *string `json:"RenewFlag,omitempty" name:"RenewFlag"` + + // cdh实例创建时间 + CreatedTime *string `json:"CreatedTime,omitempty" name:"CreatedTime"` + + // cdh实例过期时间 + ExpiredTime *string `json:"ExpiredTime,omitempty" name:"ExpiredTime"` + + // cdh实例上已创建云子机的实例id列表 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // cdh实例状态 + HostState *string `json:"HostState,omitempty" name:"HostState"` + + // cdh实例ip + HostIp *string `json:"HostIp,omitempty" name:"HostIp"` + + // cdh实例资源信息 + HostResource *HostResource `json:"HostResource,omitempty" name:"HostResource"` + + // 专用宿主机所属的围笼ID。该字段仅对金融专区围笼内的专用宿主机有效。 + // 注意:此字段可能返回 null,表示取不到有效值。 + CageId *string `json:"CageId,omitempty" name:"CageId"` +} + +type HostResource struct { + + // cdh实例总cpu核数 + CpuTotal *uint64 `json:"CpuTotal,omitempty" name:"CpuTotal"` + + // cdh实例可用cpu核数 + CpuAvailable *uint64 `json:"CpuAvailable,omitempty" name:"CpuAvailable"` + + // cdh实例总内存大小(单位为:GiB) + MemTotal *float64 `json:"MemTotal,omitempty" name:"MemTotal"` + + // cdh实例可用内存大小(单位为:GiB) + MemAvailable *float64 `json:"MemAvailable,omitempty" name:"MemAvailable"` + + // cdh实例总磁盘大小(单位为:GiB) + DiskTotal *uint64 `json:"DiskTotal,omitempty" name:"DiskTotal"` + + // cdh实例可用磁盘大小(单位为:GiB) + DiskAvailable *uint64 `json:"DiskAvailable,omitempty" name:"DiskAvailable"` + + // cdh实例磁盘类型 + DiskType *string `json:"DiskType,omitempty" name:"DiskType"` +} + +type Image struct { + + // 镜像ID + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` + + // 镜像操作系统 + OsName *string `json:"OsName,omitempty" name:"OsName"` + + // 镜像类型 + ImageType *string `json:"ImageType,omitempty" name:"ImageType"` + + // 镜像创建时间 + CreatedTime *string `json:"CreatedTime,omitempty" name:"CreatedTime"` + + // 镜像名称 + ImageName *string `json:"ImageName,omitempty" name:"ImageName"` + + // 镜像描述 + ImageDescription *string `json:"ImageDescription,omitempty" name:"ImageDescription"` + + // 镜像大小 + ImageSize *int64 `json:"ImageSize,omitempty" name:"ImageSize"` + + // 镜像架构 + Architecture *string `json:"Architecture,omitempty" name:"Architecture"` + + // 镜像状态: + // CREATING-创建中 + // NORMAL-正常 + // CREATEFAILED-创建失败 + // USING-使用中 + // SYNCING-同步中 + // IMPORTING-导入中 + // IMPORTFAILED-导入失败 + ImageState *string `json:"ImageState,omitempty" name:"ImageState"` + + // 镜像来源平台 + Platform *string `json:"Platform,omitempty" name:"Platform"` + + // 镜像创建者 + ImageCreator *string `json:"ImageCreator,omitempty" name:"ImageCreator"` + + // 镜像来源 + ImageSource *string `json:"ImageSource,omitempty" name:"ImageSource"` + + // 同步百分比 + // 注意:此字段可能返回 null,表示取不到有效值。 + SyncPercent *int64 `json:"SyncPercent,omitempty" name:"SyncPercent"` + + // 镜像是否支持cloud-init + // 注意:此字段可能返回 null,表示取不到有效值。 + IsSupportCloudinit *bool `json:"IsSupportCloudinit,omitempty" name:"IsSupportCloudinit"` + + // 镜像关联的快照信息 + // 注意:此字段可能返回 null,表示取不到有效值。 + SnapshotSet []*Snapshot `json:"SnapshotSet,omitempty" name:"SnapshotSet" list` +} + +type ImageOsList struct { + + // 支持的windows操作系统。 + // 注意:此字段可能返回 null,表示取不到有效值。 + Windows []*string `json:"Windows,omitempty" name:"Windows" list` + + // 支持的linux操作系统 + // 注意:此字段可能返回 null,表示取不到有效值。 + Linux []*string `json:"Linux,omitempty" name:"Linux" list` +} + +type ImportImageRequest struct { + *tchttp.BaseRequest + + // 导入镜像的操作系统架构,`x86_64` 或 `i386` + Architecture *string `json:"Architecture,omitempty" name:"Architecture"` + + // 导入镜像的操作系统类型,通过`DescribeImportImageOs`获取 + OsType *string `json:"OsType,omitempty" name:"OsType"` + + // 导入镜像的操作系统版本,通过`DescribeImportImageOs`获取 + OsVersion *string `json:"OsVersion,omitempty" name:"OsVersion"` + + // 导入镜像存放的cos地址 + ImageUrl *string `json:"ImageUrl,omitempty" name:"ImageUrl"` + + // 镜像名称 + ImageName *string `json:"ImageName,omitempty" name:"ImageName"` + + // 镜像描述 + ImageDescription *string `json:"ImageDescription,omitempty" name:"ImageDescription"` + + // 只检查参数,不执行任务 + DryRun *bool `json:"DryRun,omitempty" name:"DryRun"` + + // 是否强制导入,参考[强制导入镜像](https://cloud.tencent.com/document/product/213/12849) + Force *bool `json:"Force,omitempty" name:"Force"` +} + +func (r *ImportImageRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ImportImageRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ImportImageResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ImportImageResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ImportImageResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ImportKeyPairRequest struct { + *tchttp.BaseRequest + + // 密钥对名称,可由数字,字母和下划线组成,长度不超过25个字符。 + KeyName *string `json:"KeyName,omitempty" name:"KeyName"` + + // 密钥对创建后所属的[项目](https://cloud.tencent.com/document/product/378/10861)ID。

    可以通过以下方式获取项目ID:
  • 通过[项目列表](https://console.cloud.tencent.com/project)查询项目ID。
  • 通过调用接口 [DescribeProject](https://cloud.tencent.com/document/api/378/4400),取返回信息中的 `projectId ` 获取项目ID。 + // + // 如果是默认项目,直接填0就可以。 + ProjectId *int64 `json:"ProjectId,omitempty" name:"ProjectId"` + + // 密钥对的公钥内容,`OpenSSH RSA` 格式。 + PublicKey *string `json:"PublicKey,omitempty" name:"PublicKey"` +} + +func (r *ImportKeyPairRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ImportKeyPairRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ImportKeyPairResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 密钥对ID。 + KeyId *string `json:"KeyId,omitempty" name:"KeyId"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ImportKeyPairResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ImportKeyPairResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceModifyInstancesChargeTypeRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。每次请求批量实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 实例[计费类型](https://cloud.tencent.com/document/product/213/2180)。
  • PREPAID:预付费,即包年包月。 + InstanceChargeType *string `json:"InstanceChargeType,omitempty" name:"InstanceChargeType"` + + // 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的续费时长、是否设置自动续费等属性。 + InstanceChargePrepaid *InstanceChargePrepaid `json:"InstanceChargePrepaid,omitempty" name:"InstanceChargePrepaid"` +} + +func (r *InquiryPriceModifyInstancesChargeTypeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceModifyInstancesChargeTypeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceModifyInstancesChargeTypeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 该参数表示对应配置实例转换计费模式的价格。 + Price *Price `json:"Price,omitempty" name:"Price"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *InquiryPriceModifyInstancesChargeTypeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceModifyInstancesChargeTypeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceRenewInstancesRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。每次请求批量实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的续费时长、是否设置自动续费等属性。 + InstanceChargePrepaid *InstanceChargePrepaid `json:"InstanceChargePrepaid,omitempty" name:"InstanceChargePrepaid"` + + // 试运行,测试使用,不执行具体逻辑。取值范围:
  • TRUE:跳过执行逻辑
  • FALSE:执行逻辑

    默认取值:FALSE。 + DryRun *bool `json:"DryRun,omitempty" name:"DryRun"` + + // 是否续费弹性数据盘。取值范围:
  • TRUE:表示续费包年包月实例同时续费其挂载的弹性数据盘
  • FALSE:表示续费包年包月实例同时不再续费其挂载的弹性数据盘

    默认取值:TRUE。 + RenewPortableDataDisk *bool `json:"RenewPortableDataDisk,omitempty" name:"RenewPortableDataDisk"` +} + +func (r *InquiryPriceRenewInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceRenewInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceRenewInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 该参数表示对应配置实例的价格。 + Price *Price `json:"Price,omitempty" name:"Price"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *InquiryPriceRenewInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceRenewInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceResetInstanceRequest struct { + *tchttp.BaseRequest + + // 实例ID。可通过 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728) API返回值中的`InstanceId`获取。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` + + // 指定有效的[镜像](/document/product/213/4940)ID,格式形如`img-xxx`。镜像类型分为四种:
  • 公共镜像
  • 自定义镜像
  • 共享镜像
  • 服务市场镜像

  • 可通过以下方式获取可用的镜像ID:
  • `公共镜像`、`自定义镜像`、`共享镜像`的镜像ID可通过登录[控制台](https://console.cloud.tencent.com/cvm/image?rid=1&imageType=PUBLIC_IMAGE)查询;`服务镜像市场`的镜像ID可通过[云市场](https://market.cloud.tencent.com/list)查询。
  • 通过调用接口 [DescribeImages](https://cloud.tencent.com/document/api/213/15715) ,取返回信息中的`ImageId`字段。
  • + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` + + // 实例系统盘配置信息。系统盘为云盘的实例可以通过该参数指定重装后的系统盘大小来实现对系统盘的扩容操作,若不指定则默认系统盘大小保持不变。系统盘大小只支持扩容不支持缩容;重装只支持修改系统盘的大小,不能修改系统盘的类型。 + SystemDisk *SystemDisk `json:"SystemDisk,omitempty" name:"SystemDisk"` + + // 实例登录设置。通过该参数可以设置实例的登录方式密码、密钥或保持镜像的原始登录设置。默认情况下会随机生成密码,并以站内信方式知会到用户。 + LoginSettings *LoginSettings `json:"LoginSettings,omitempty" name:"LoginSettings"` + + // 增强服务。通过该参数可以指定是否开启云安全、云监控等服务。若不指定该参数,则默认开启云监控、云安全服务。 + EnhancedService *EnhancedService `json:"EnhancedService,omitempty" name:"EnhancedService"` +} + +func (r *InquiryPriceResetInstanceRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceResetInstanceRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceResetInstanceResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 该参数表示重装成对应配置实例的价格。 + Price *Price `json:"Price,omitempty" name:"Price"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *InquiryPriceResetInstanceResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceResetInstanceResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceResetInstancesInternetMaxBandwidthRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。每次请求批量实例的上限为100。当调整 `BANDWIDTH_PREPAID` 和 `BANDWIDTH_POSTPAID_BY_HOUR` 计费方式的带宽时,只支持一个实例。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 公网出带宽配置。不同机型带宽上限范围不一致,具体限制详见带宽限制对账表。暂时只支持`InternetMaxBandwidthOut`参数。 + InternetAccessible *InternetAccessible `json:"InternetAccessible,omitempty" name:"InternetAccessible"` + + // 带宽生效的起始时间。格式:`YYYY-MM-DD`,例如:`2016-10-30`。起始时间不能早于当前时间。如果起始时间是今天则新设置的带宽立即生效。该参数只对包年包月带宽有效,其他模式带宽不支持该参数,否则接口会以相应错误码返回。 + StartTime *string `json:"StartTime,omitempty" name:"StartTime"` + + // 带宽生效的终止时间。格式:`YYYY-MM-DD`,例如:`2016-10-30`。新设置的带宽的有效期包含终止时间此日期。终止时间不能晚于包年包月实例的到期时间。实例的到期时间可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`ExpiredTime`获取。该参数只对包年包月带宽有效,其他模式带宽不支持该参数,否则接口会以相应错误码返回。 + EndTime *string `json:"EndTime,omitempty" name:"EndTime"` +} + +func (r *InquiryPriceResetInstancesInternetMaxBandwidthRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceResetInstancesInternetMaxBandwidthRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceResetInstancesInternetMaxBandwidthResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 该参数表示带宽调整为对应大小之后的价格。 + Price *Price `json:"Price,omitempty" name:"Price"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *InquiryPriceResetInstancesInternetMaxBandwidthResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceResetInstancesInternetMaxBandwidthResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceResetInstancesTypeRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。本接口每次请求批量实例的上限为1。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 实例机型。不同实例机型指定了不同的资源规格,具体取值可参见附表[实例资源规格](https://cloud.tencent.com/document/product/213/11518)对照表,也可以调用查询[实例资源规格列表](https://cloud.tencent.com/document/product/213/15749)接口获得最新的规格表。 + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` +} + +func (r *InquiryPriceResetInstancesTypeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceResetInstancesTypeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceResetInstancesTypeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 该参数表示调整成对应机型实例的价格。 + Price *Price `json:"Price,omitempty" name:"Price"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *InquiryPriceResetInstancesTypeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceResetInstancesTypeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceResizeInstanceDisksRequest struct { + *tchttp.BaseRequest + + // 待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` + + // 待扩容的数据盘配置信息。只支持扩容非弹性数据盘([`DescribeDisks`](https://cloud.tencent.com/document/api/362/16315)接口返回值中的`Portable`为`false`表示非弹性),且[数据盘类型](https://cloud.tencent.com/document/product/213/15753#DataDisk)为:`CLOUD_BASIC`、`CLOUD_PREMIUM`、`CLOUD_SSD`。数据盘容量单位:GB。最小扩容步长:10G。关于数据盘类型的选择请参考硬盘产品简介。可选数据盘类型受到实例类型`InstanceType`限制。另外允许扩容的最大容量也因数据盘类型的不同而有所差异。 + DataDisks []*DataDisk `json:"DataDisks,omitempty" name:"DataDisks" list` + + // 是否对运行中的实例选择强制关机。建议对运行中的实例先手动关机,然后再重置用户密码。取值范围:
  • TRUE:表示在正常关机失败后进行强制关机
  • FALSE:表示在正常关机失败后不进行强制关机

    默认取值:FALSE。

    强制关机的效果等同于关闭物理计算机的电源开关。强制关机可能会导致数据丢失或文件系统损坏,请仅在服务器不能正常关机时使用。 + ForceStop *bool `json:"ForceStop,omitempty" name:"ForceStop"` +} + +func (r *InquiryPriceResizeInstanceDisksRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceResizeInstanceDisksRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceResizeInstanceDisksResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 该参数表示磁盘扩容成对应配置的价格。 + Price *Price `json:"Price,omitempty" name:"Price"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *InquiryPriceResizeInstanceDisksResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceResizeInstanceDisksResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceRunInstancesRequest struct { + *tchttp.BaseRequest + + // 实例所在的位置。通过该参数可以指定实例所属可用区,所属项目等属性。 + Placement *Placement `json:"Placement,omitempty" name:"Placement"` + + // 指定有效的[镜像](https://cloud.tencent.com/document/product/213/4940)ID,格式形如`img-xxx`。镜像类型分为四种:
  • 公共镜像
  • 自定义镜像
  • 共享镜像
  • 服务市场镜像

  • 可通过以下方式获取可用的镜像ID:
  • `公共镜像`、`自定义镜像`、`共享镜像`的镜像ID可通过登录[控制台](https://console.cloud.tencent.com/cvm/image?rid=1&imageType=PUBLIC_IMAGE)查询;`服务镜像市场`的镜像ID可通过[云市场](https://market.cloud.tencent.com/list)查询。
  • 通过调用接口 [DescribeImages](https://cloud.tencent.com/document/api/213/15715) ,取返回信息中的`ImageId`字段。
  • + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` + + // 实例[计费类型](https://cloud.tencent.com/document/product/213/2180)。
  • PREPAID:预付费,即包年包月
  • POSTPAID_BY_HOUR:按小时后付费
  • SPOTPAID:竞价付费
    默认值:POSTPAID_BY_HOUR。 + InstanceChargeType *string `json:"InstanceChargeType,omitempty" name:"InstanceChargeType"` + + // 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的购买时长、是否设置自动续费等属性。若指定实例的付费模式为预付费则该参数必传。 + InstanceChargePrepaid *InstanceChargePrepaid `json:"InstanceChargePrepaid,omitempty" name:"InstanceChargePrepaid"` + + // 实例机型。不同实例机型指定了不同的资源规格,具体取值可通过调用接口[DescribeInstanceTypeConfigs](https://cloud.tencent.com/document/api/213/15749)来获得最新的规格表或参见[实例规格](https://cloud.tencent.com/document/product/213/11518)描述。若不指定该参数,则默认机型为S1.SMALL1。 + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` + + // 实例系统盘配置信息。若不指定该参数,则按照系统默认值进行分配。 + SystemDisk *SystemDisk `json:"SystemDisk,omitempty" name:"SystemDisk"` + + // 实例数据盘配置信息。若不指定该参数,则默认不购买数据盘。支持购买的时候指定21块数据盘,其中最多包含1块LOCAL_BASIC数据盘或者LOCAL_SSD数据盘,最多包含20块CLOUD_BASIC数据盘、CLOUD_PREMIUM数据盘或者CLOUD_SSD数据盘。 + DataDisks []*DataDisk `json:"DataDisks,omitempty" name:"DataDisks" list` + + // 私有网络相关信息配置。通过该参数可以指定私有网络的ID,子网ID等信息。若不指定该参数,则默认使用基础网络。若在此参数中指定了私有网络IP,那么InstanceCount参数只能为1。 + VirtualPrivateCloud *VirtualPrivateCloud `json:"VirtualPrivateCloud,omitempty" name:"VirtualPrivateCloud"` + + // 公网带宽相关信息设置。若不指定该参数,则默认公网带宽为0Mbps。 + InternetAccessible *InternetAccessible `json:"InternetAccessible,omitempty" name:"InternetAccessible"` + + // 购买实例数量。取值范围:[1,100]。默认取值:1。指定购买实例的数量不能超过用户所能购买的剩余配额数量,具体配额相关限制详见[CVM实例购买限制](https://cloud.tencent.com/document/product/213/2664)。 + InstanceCount *int64 `json:"InstanceCount,omitempty" name:"InstanceCount"` + + // 实例显示名称。
  • 不指定实例显示名称则默认显示‘未命名’。
  • 购买多台实例,如果指定模式串`{R:x}`,表示生成数字`[x, x+n-1]`,其中`n`表示购买实例的数量,例如`server_{R:3}`,购买1台时,实例显示名称为`server_3`;购买2台时,实例显示名称分别为`server_3`,`server_4`。支持指定多个模式串`{R:x}`。
  • 购买多台实例,如果不指定模式串,则在实例显示名称添加后缀`1、2...n`,其中`n`表示购买实例的数量,例如`server_`,购买2台时,实例显示名称分别为`server_1`,`server_2`。
  • 最多支持60个字符(包含模式串)。 + InstanceName *string `json:"InstanceName,omitempty" name:"InstanceName"` + + // 实例登录设置。通过该参数可以设置实例的登录方式密码、密钥或保持镜像的原始登录设置。默认情况下会随机生成密码,并以站内信方式知会到用户。 + LoginSettings *LoginSettings `json:"LoginSettings,omitempty" name:"LoginSettings"` + + // 实例所属安全组。该参数可以通过调用 [DescribeSecurityGroups](https://cloud.tencent.com/document/api/215/15808) 的返回值中的sgId字段来获取。若不指定该参数,则默认不绑定安全组。 + SecurityGroupIds []*string `json:"SecurityGroupIds,omitempty" name:"SecurityGroupIds" list` + + // 增强服务。通过该参数可以指定是否开启云安全、云监控等服务。若不指定该参数,则默认开启云监控、云安全服务。 + EnhancedService *EnhancedService `json:"EnhancedService,omitempty" name:"EnhancedService"` + + // 用于保证请求幂等性的字符串。该字符串由客户生成,需保证不同请求之间唯一,最大值不超过64个ASCII字符。若不指定该参数,则无法保证请求的幂等性。
    更多详细信息请参阅:如何保证幂等性。 + ClientToken *string `json:"ClientToken,omitempty" name:"ClientToken"` + + // 云服务器的主机名。
  • 点号(.)和短横线(-)不能作为 HostName 的首尾字符,不能连续使用。
  • Windows 实例:名字符长度为[2, 15],允许字母(不限制大小写)、数字和短横线(-)组成,不支持点号(.),不能全是数字。
  • 其他类型(Linux 等)实例:字符长度为[2, 30],允许支持多个点号,点之间为一段,每段允许字母(不限制大小写)、数字和短横线(-)组成。 + HostName *string `json:"HostName,omitempty" name:"HostName"` + + // 标签描述列表。通过指定该参数可以同时绑定标签到相应的资源实例,当前仅支持绑定标签到云服务器实例。 + TagSpecification []*TagSpecification `json:"TagSpecification,omitempty" name:"TagSpecification" list` + + // 实例的市场相关选项,如竞价实例相关参数 + InstanceMarketOptions *InstanceMarketOptionsRequest `json:"InstanceMarketOptions,omitempty" name:"InstanceMarketOptions"` +} + +func (r *InquiryPriceRunInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceRunInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type InquiryPriceRunInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 该参数表示对应配置实例的价格。 + Price *Price `json:"Price,omitempty" name:"Price"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *InquiryPriceRunInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *InquiryPriceRunInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type Instance struct { + + // 实例所在的位置。 + Placement *Placement `json:"Placement,omitempty" name:"Placement"` + + // 实例`ID`。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` + + // 实例机型。 + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` + + // 实例的CPU核数,单位:核。 + CPU *int64 `json:"CPU,omitempty" name:"CPU"` + + // 实例内存容量,单位:`GB`。 + Memory *int64 `json:"Memory,omitempty" name:"Memory"` + + // 实例业务状态。取值范围:
  • NORMAL:表示正常状态的实例
  • EXPIRED:表示过期的实例
  • PROTECTIVELY_ISOLATED:表示被安全隔离的实例。 + RestrictState *string `json:"RestrictState,omitempty" name:"RestrictState"` + + // 实例名称。 + InstanceName *string `json:"InstanceName,omitempty" name:"InstanceName"` + + // 实例计费模式。取值范围:
  • `PREPAID`:表示预付费,即包年包月
  • `POSTPAID_BY_HOUR`:表示后付费,即按量计费
  • `CDHPAID`:`CDH`付费,即只对`CDH`计费,不对`CDH`上的实例计费。
  • `SPOTPAID`:表示竞价实例付费。 + InstanceChargeType *string `json:"InstanceChargeType,omitempty" name:"InstanceChargeType"` + + // 实例系统盘信息。 + SystemDisk *SystemDisk `json:"SystemDisk,omitempty" name:"SystemDisk"` + + // 实例数据盘信息。只包含随实例购买的数据盘。 + DataDisks []*DataDisk `json:"DataDisks,omitempty" name:"DataDisks" list` + + // 实例主网卡的内网`IP`列表。 + PrivateIpAddresses []*string `json:"PrivateIpAddresses,omitempty" name:"PrivateIpAddresses" list` + + // 实例主网卡的公网`IP`列表。 + // 注意:此字段可能返回 null,表示取不到有效值。 + PublicIpAddresses []*string `json:"PublicIpAddresses,omitempty" name:"PublicIpAddresses" list` + + // 实例带宽信息。 + InternetAccessible *InternetAccessible `json:"InternetAccessible,omitempty" name:"InternetAccessible"` + + // 实例所属虚拟私有网络信息。 + VirtualPrivateCloud *VirtualPrivateCloud `json:"VirtualPrivateCloud,omitempty" name:"VirtualPrivateCloud"` + + // 生产实例所使用的镜像`ID`。 + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` + + // 自动续费标识。取值范围:
  • `NOTIFY_AND_MANUAL_RENEW`:表示通知即将过期,但不自动续费
  • `NOTIFY_AND_AUTO_RENEW`:表示通知即将过期,而且自动续费
  • `DISABLE_NOTIFY_AND_MANUAL_RENEW`:表示不通知即将过期,也不自动续费。 + //
  • 注意:后付费模式本项为null + RenewFlag *string `json:"RenewFlag,omitempty" name:"RenewFlag"` + + // 创建时间。按照`ISO8601`标准表示,并且使用`UTC`时间。格式为:`YYYY-MM-DDThh:mm:ssZ`。 + CreatedTime *string `json:"CreatedTime,omitempty" name:"CreatedTime"` + + // 到期时间。按照`ISO8601`标准表示,并且使用`UTC`时间。格式为:`YYYY-MM-DDThh:mm:ssZ`。注意:后付费模式本项为null + ExpiredTime *string `json:"ExpiredTime,omitempty" name:"ExpiredTime"` + + // 操作系统名称。 + OsName *string `json:"OsName,omitempty" name:"OsName"` + + // 实例所属安全组。该参数可以通过调用 [DescribeSecurityGroups](https://cloud.tencent.com/document/api/215/15808) 的返回值中的sgId字段来获取。 + SecurityGroupIds []*string `json:"SecurityGroupIds,omitempty" name:"SecurityGroupIds" list` + + // 实例登录设置。目前只返回实例所关联的密钥。 + LoginSettings *LoginSettings `json:"LoginSettings,omitempty" name:"LoginSettings"` + + // 实例状态。取值范围:
  • PENDING:表示创建中
  • LAUNCH_FAILED:表示创建失败
  • RUNNING:表示运行中
  • STOPPED:表示关机
  • STARTING:表示开机中
  • STOPPING:表示关机中
  • REBOOTING:表示重启中
  • SHUTDOWN:表示停止待销毁
  • TERMINATING:表示销毁中。
  • + InstanceState *string `json:"InstanceState,omitempty" name:"InstanceState"` + + // 实例关联的标签列表。 + Tags []*Tag `json:"Tags,omitempty" name:"Tags" list` + + // 实例的关机计费模式。 + // 取值范围:
  • KEEP_CHARGING:关机继续收费
  • STOP_CHARGING:关机停止收费
  • NOT_APPLICABLE:实例处于非关机状态或者不适用关机停止计费的条件
    + StopChargingMode *string `json:"StopChargingMode,omitempty" name:"StopChargingMode"` + + // 实例全局唯一ID + Uuid *string `json:"Uuid,omitempty" name:"Uuid"` + + // 实例的最新操作。例:StopInstances、ResetInstance。 + // 注意:此字段可能返回 null,表示取不到有效值。 + LatestOperation *string `json:"LatestOperation,omitempty" name:"LatestOperation"` + + // 实例的最新操作状态。取值范围:
  • SUCCESS:表示操作成功
  • OPERATING:表示操作执行中
  • FAILED:表示操作失败 + // 注意:此字段可能返回 null,表示取不到有效值。 + LatestOperationState *string `json:"LatestOperationState,omitempty" name:"LatestOperationState"` + + // 实例最新操作的唯一请求 ID。 + // 注意:此字段可能返回 null,表示取不到有效值。 + LatestOperationRequestId *string `json:"LatestOperationRequestId,omitempty" name:"LatestOperationRequestId"` + + // 分散置放群组ID。 + // 注意:此字段可能返回 null,表示取不到有效值。 + DisasterRecoverGroupId *string `json:"DisasterRecoverGroupId,omitempty" name:"DisasterRecoverGroupId"` + + // 实例的IPv6地址。 + // 注意:此字段可能返回 null,表示取不到有效值。 + IPv6Addresses []*string `json:"IPv6Addresses,omitempty" name:"IPv6Addresses" list` + + // CAM角色名。 + // 注意:此字段可能返回 null,表示取不到有效值。 + CamRoleName *string `json:"CamRoleName,omitempty" name:"CamRoleName"` +} + +type InstanceChargePrepaid struct { + + // 购买实例的时长,单位:月。取值范围:1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 24, 36, 48, 60。 + Period *int64 `json:"Period,omitempty" name:"Period"` + + // 自动续费标识。取值范围:
  • NOTIFY_AND_AUTO_RENEW:通知过期且自动续费
  • NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费
  • DISABLE_NOTIFY_AND_MANUAL_RENEW:不通知过期不自动续费

    默认取值:NOTIFY_AND_MANUAL_RENEW。若该参数指定为NOTIFY_AND_AUTO_RENEW,在账户余额充足的情况下,实例到期后将按月自动续费。 + RenewFlag *string `json:"RenewFlag,omitempty" name:"RenewFlag"` +} + +type InstanceFamilyConfig struct { + + // 机型族名称的中文全称。 + InstanceFamilyName *string `json:"InstanceFamilyName,omitempty" name:"InstanceFamilyName"` + + // 机型族名称的英文简称。 + InstanceFamily *string `json:"InstanceFamily,omitempty" name:"InstanceFamily"` +} + +type InstanceMarketOptionsRequest struct { + + // 竞价相关选项 + SpotOptions *SpotMarketOptions `json:"SpotOptions,omitempty" name:"SpotOptions"` + + // 市场选项类型,当前只支持取值:spot + MarketType *string `json:"MarketType,omitempty" name:"MarketType"` +} + +type InstanceStatus struct { + + // 实例`ID`。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` + + // 实例状态。取值范围:
  • PENDING:表示创建中
  • LAUNCH_FAILED:表示创建失败
  • RUNNING:表示运行中
  • STOPPED:表示关机
  • STARTING:表示开机中
  • STOPPING:表示关机中
  • REBOOTING:表示重启中
  • SHUTDOWN:表示停止待销毁
  • TERMINATING:表示销毁中。
  • + InstanceState *string `json:"InstanceState,omitempty" name:"InstanceState"` +} + +type InstanceTypeConfig struct { + + // 可用区。 + Zone *string `json:"Zone,omitempty" name:"Zone"` + + // 实例机型。 + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` + + // 实例机型系列。 + InstanceFamily *string `json:"InstanceFamily,omitempty" name:"InstanceFamily"` + + // GPU核数,单位:核。 + GPU *int64 `json:"GPU,omitempty" name:"GPU"` + + // CPU核数,单位:核。 + CPU *int64 `json:"CPU,omitempty" name:"CPU"` + + // 内存容量,单位:`GB`。 + Memory *int64 `json:"Memory,omitempty" name:"Memory"` + + // FPGA核数,单位:核。 + FPGA *int64 `json:"FPGA,omitempty" name:"FPGA"` +} + +type InstanceTypeQuotaItem struct { + + // 可用区。 + Zone *string `json:"Zone,omitempty" name:"Zone"` + + // 实例机型。 + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` + + // 实例计费模式。取值范围:
  • PREPAID:表示预付费,即包年包月
  • POSTPAID_BY_HOUR:表示后付费,即按量计费
  • CDHPAID:表示[CDH](https://cloud.tencent.com/document/product/416)付费,即只对CDH计费,不对CDH上的实例计费。
  • `SPOTPAID`:表示竞价实例付费。 + InstanceChargeType *string `json:"InstanceChargeType,omitempty" name:"InstanceChargeType"` + + // 网卡类型,例如:25代表25G网卡 + NetworkCard *int64 `json:"NetworkCard,omitempty" name:"NetworkCard"` + + // 扩展属性。 + // 注意:此字段可能返回 null,表示取不到有效值。 + Externals *Externals `json:"Externals,omitempty" name:"Externals"` + + // 实例的CPU核数,单位:核。 + Cpu *int64 `json:"Cpu,omitempty" name:"Cpu"` + + // 实例内存容量,单位:`GB`。 + Memory *int64 `json:"Memory,omitempty" name:"Memory"` + + // 实例机型系列。 + InstanceFamily *string `json:"InstanceFamily,omitempty" name:"InstanceFamily"` + + // 机型名称。 + TypeName *string `json:"TypeName,omitempty" name:"TypeName"` + + // 本地磁盘规格列表。当该参数返回为空值时,表示当前情况下无法创建本地盘。 + LocalDiskTypeList []*LocalDiskType `json:"LocalDiskTypeList,omitempty" name:"LocalDiskTypeList" list` + + // 实例是否售卖。取值范围:
  • SELL:表示实例可购买
  • SOLD_OUT:表示实例已售罄。 + Status *string `json:"Status,omitempty" name:"Status"` + + // 实例的售卖价格。 + Price *ItemPrice `json:"Price,omitempty" name:"Price"` + + // 售罄原因。 + // 注意:此字段可能返回 null,表示取不到有效值。 + SoldOutReason *string `json:"SoldOutReason,omitempty" name:"SoldOutReason"` +} + +type InternetAccessible struct { + + // 网络计费类型。取值范围:
  • BANDWIDTH_PREPAID:预付费按带宽结算
  • TRAFFIC_POSTPAID_BY_HOUR:流量按小时后付费
  • BANDWIDTH_POSTPAID_BY_HOUR:带宽按小时后付费
  • BANDWIDTH_PACKAGE:带宽包用户
    默认取值:非带宽包用户默认与子机付费类型保持一致。 + InternetChargeType *string `json:"InternetChargeType,omitempty" name:"InternetChargeType"` + + // 公网出带宽上限,单位:Mbps。默认值:0Mbps。不同机型带宽上限范围不一致,具体限制详见[购买网络带宽](https://cloud.tencent.com/document/product/213/12523)。 + InternetMaxBandwidthOut *int64 `json:"InternetMaxBandwidthOut,omitempty" name:"InternetMaxBandwidthOut"` + + // 是否分配公网IP。取值范围:
  • TRUE:表示分配公网IP
  • FALSE:表示不分配公网IP

    当公网带宽大于0Mbps时,可自由选择开通与否,默认开通公网IP;当公网带宽为0,则不允许分配公网IP。该参数仅在RunInstances接口中作为入参使用。 + PublicIpAssigned *bool `json:"PublicIpAssigned,omitempty" name:"PublicIpAssigned"` + + // 带宽包ID。可通过[`DescribeBandwidthPackages`](https://cloud.tencent.com/document/api/215/19209)接口返回值中的`BandwidthPackageId`获取。该参数仅在RunInstances接口中作为入参使用。 + BandwidthPackageId *string `json:"BandwidthPackageId,omitempty" name:"BandwidthPackageId"` +} + +type InternetBandwidthConfig struct { + + // 开始时间。按照`ISO8601`标准表示,并且使用`UTC`时间。格式为:`YYYY-MM-DDThh:mm:ssZ`。 + StartTime *string `json:"StartTime,omitempty" name:"StartTime"` + + // 结束时间。按照`ISO8601`标准表示,并且使用`UTC`时间。格式为:`YYYY-MM-DDThh:mm:ssZ`。 + EndTime *string `json:"EndTime,omitempty" name:"EndTime"` + + // 实例带宽信息。 + InternetAccessible *InternetAccessible `json:"InternetAccessible,omitempty" name:"InternetAccessible"` +} + +type InternetChargeTypeConfig struct { + + // 网络计费模式。 + InternetChargeType *string `json:"InternetChargeType,omitempty" name:"InternetChargeType"` + + // 网络计费模式描述信息。 + Description *string `json:"Description,omitempty" name:"Description"` +} + +type ItemPrice struct { + + // 后续合计费用的原价,后付费模式使用,单位:元。
  • 如返回了其他时间区间项,如UnitPriceSecondStep,则本项代表时间区间在(0, 96)小时;若未返回其他时间区间项,则本项代表全时段,即(0, ∞)小时 + // 注意:此字段可能返回 null,表示取不到有效值。 + UnitPrice *float64 `json:"UnitPrice,omitempty" name:"UnitPrice"` + + // 后续计价单元,后付费模式使用,可取值范围:
  • HOUR:表示计价单元是按每小时来计算。当前涉及该计价单元的场景有:实例按小时后付费(POSTPAID_BY_HOUR)、带宽按小时后付费(BANDWIDTH_POSTPAID_BY_HOUR):
  • GB:表示计价单元是按每GB来计算。当前涉及该计价单元的场景有:流量按小时后付费(TRAFFIC_POSTPAID_BY_HOUR)。 + // 注意:此字段可能返回 null,表示取不到有效值。 + ChargeUnit *string `json:"ChargeUnit,omitempty" name:"ChargeUnit"` + + // 预支合计费用的原价,预付费模式使用,单位:元。 + // 注意:此字段可能返回 null,表示取不到有效值。 + OriginalPrice *float64 `json:"OriginalPrice,omitempty" name:"OriginalPrice"` + + // 预支合计费用的折扣价,预付费模式使用,单位:元。 + // 注意:此字段可能返回 null,表示取不到有效值。 + DiscountPrice *float64 `json:"DiscountPrice,omitempty" name:"DiscountPrice"` + + // 折扣,如20.0代表2折 + // 注意:此字段可能返回 null,表示取不到有效值。 + Discount *float64 `json:"Discount,omitempty" name:"Discount"` + + // 后续合计费用的折扣价,后付费模式使用,单位:元
  • 如返回了其他时间区间项,如UnitPriceDiscountSecondStep,则本项代表时间区间在(0, 96)小时;若未返回其他时间区间项,则本项代表全时段,即(0, ∞)小时 + // 注意:此字段可能返回 null,表示取不到有效值。 + UnitPriceDiscount *float64 `json:"UnitPriceDiscount,omitempty" name:"UnitPriceDiscount"` + + // 使用时间区间在(96, 360)小时的后续合计费用的原价,后付费模式使用,单位:元。 + // 注意:此字段可能返回 null,表示取不到有效值。 + UnitPriceSecondStep *float64 `json:"UnitPriceSecondStep,omitempty" name:"UnitPriceSecondStep"` + + // 使用时间区间在(96, 360)小时的后续合计费用的折扣价,后付费模式使用,单位:元 + // 注意:此字段可能返回 null,表示取不到有效值。 + UnitPriceDiscountSecondStep *float64 `json:"UnitPriceDiscountSecondStep,omitempty" name:"UnitPriceDiscountSecondStep"` + + // 使用时间区间在(360, ∞)小时的后续合计费用的原价,后付费模式使用,单位:元。 + // 注意:此字段可能返回 null,表示取不到有效值。 + UnitPriceThirdStep *float64 `json:"UnitPriceThirdStep,omitempty" name:"UnitPriceThirdStep"` + + // 使用时间区间在(360, ∞)小时的后续合计费用的折扣价,后付费模式使用,单位:元 + // 注意:此字段可能返回 null,表示取不到有效值。 + UnitPriceDiscountThirdStep *float64 `json:"UnitPriceDiscountThirdStep,omitempty" name:"UnitPriceDiscountThirdStep"` +} + +type KeyPair struct { + + // 密钥对的`ID`,是密钥对的唯一标识。 + KeyId *string `json:"KeyId,omitempty" name:"KeyId"` + + // 密钥对名称。 + KeyName *string `json:"KeyName,omitempty" name:"KeyName"` + + // 密钥对所属的项目`ID`。 + ProjectId *int64 `json:"ProjectId,omitempty" name:"ProjectId"` + + // 密钥对描述信息。 + Description *string `json:"Description,omitempty" name:"Description"` + + // 密钥对的纯文本公钥。 + PublicKey *string `json:"PublicKey,omitempty" name:"PublicKey"` + + // 密钥对的纯文本私钥。腾讯云不会保管私钥,请用户自行妥善保存。 + PrivateKey *string `json:"PrivateKey,omitempty" name:"PrivateKey"` + + // 密钥关联的实例`ID`列表。 + AssociatedInstanceIds []*string `json:"AssociatedInstanceIds,omitempty" name:"AssociatedInstanceIds" list` + + // 创建时间。按照`ISO8601`标准表示,并且使用`UTC`时间。格式为:`YYYY-MM-DDThh:mm:ssZ`。 + CreatedTime *string `json:"CreatedTime,omitempty" name:"CreatedTime"` +} + +type LocalDiskType struct { + + // 本地磁盘类型。 + Type *string `json:"Type,omitempty" name:"Type"` + + // 本地磁盘属性。 + PartitionType *string `json:"PartitionType,omitempty" name:"PartitionType"` + + // 本地磁盘最小值。 + MinSize *int64 `json:"MinSize,omitempty" name:"MinSize"` + + // 本地磁盘最大值。 + MaxSize *int64 `json:"MaxSize,omitempty" name:"MaxSize"` + + // 购买时本地盘是否为必选。取值范围:
  • REQUIRED:表示必选
  • OPTIONAL:表示可选。 + Required *string `json:"Required,omitempty" name:"Required"` +} + +type LoginSettings struct { + + // 实例登录密码。不同操作系统类型密码复杂度限制不一样,具体如下:
  • Linux实例密码必须8到30位,至少包括两项[a-z],[A-Z]、[0-9] 和 [( ) \` ~ ! @ # $ % ^ & * - + = | { } [ ] : ; ' , . ? / ]中的特殊符号。
  • Windows实例密码必须12到30位,至少包括三项[a-z],[A-Z],[0-9] 和 [( ) \` ~ ! @ # $ % ^ & * - + = | { } [ ] : ; ' , . ? /]中的特殊符号。

    若不指定该参数,则由系统随机生成密码,并通过站内信方式通知到用户。 + // 注意:此字段可能返回 null,表示取不到有效值。 + Password *string `json:"Password,omitempty" name:"Password"` + + // 密钥ID列表。关联密钥后,就可以通过对应的私钥来访问实例;KeyId可通过接口[DescribeKeyPairs](https://cloud.tencent.com/document/api/213/15699)获取,密钥与密码不能同时指定,同时Windows操作系统不支持指定密钥。当前仅支持购买的时候指定一个密钥。 + // 注意:此字段可能返回 null,表示取不到有效值。 + KeyIds []*string `json:"KeyIds,omitempty" name:"KeyIds" list` + + // 保持镜像的原始设置。该参数与Password或KeyIds.N不能同时指定。只有使用自定义镜像、共享镜像或外部导入镜像创建实例时才能指定该参数为TRUE。取值范围:
  • TRUE:表示保持镜像的登录设置
  • FALSE:表示不保持镜像的登录设置

    默认取值:FALSE。 + // 注意:此字段可能返回 null,表示取不到有效值。 + KeepImageLogin *string `json:"KeepImageLogin,omitempty" name:"KeepImageLogin"` +} + +type ModifyDisasterRecoverGroupAttributeRequest struct { + *tchttp.BaseRequest + + // 分散置放群组ID,可使用[DescribeDisasterRecoverGroups](https://cloud.tencent.com/document/api/213/17810)接口获取。 + DisasterRecoverGroupId *string `json:"DisasterRecoverGroupId,omitempty" name:"DisasterRecoverGroupId"` + + // 分散置放群组名称,长度1-60个字符,支持中、英文。 + Name *string `json:"Name,omitempty" name:"Name"` +} + +func (r *ModifyDisasterRecoverGroupAttributeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyDisasterRecoverGroupAttributeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyDisasterRecoverGroupAttributeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyDisasterRecoverGroupAttributeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyDisasterRecoverGroupAttributeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyHostsAttributeRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的CDH实例ID。 + HostIds []*string `json:"HostIds,omitempty" name:"HostIds" list` + + // CDH实例显示名称。可任意命名,但不得超过60个字符。 + HostName *string `json:"HostName,omitempty" name:"HostName"` + + // 自动续费标识。取值范围:
  • NOTIFY_AND_AUTO_RENEW:通知过期且自动续费
  • NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费
  • DISABLE_NOTIFY_AND_MANUAL_RENEW:不通知过期不自动续费

    若该参数指定为NOTIFY_AND_AUTO_RENEW,在账户余额充足的情况下,实例到期后将按月自动续费。 + RenewFlag *string `json:"RenewFlag,omitempty" name:"RenewFlag"` + + // 项目ID。项目可以使用[AddProject](https://cloud.tencent.com/doc/api/403/4398)接口创建。可通过[`DescribeProject`](https://cloud.tencent.com/document/product/378/4400) API返回值中的`projectId`获取。后续使用[DescribeHosts](https://cloud.tencent.com/document/api/213/16474)接口查询实例时,项目ID可用于过滤结果。 + ProjectId *uint64 `json:"ProjectId,omitempty" name:"ProjectId"` +} + +func (r *ModifyHostsAttributeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyHostsAttributeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyHostsAttributeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyHostsAttributeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyHostsAttributeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyImageAttributeRequest struct { + *tchttp.BaseRequest + + // 镜像ID,形如`img-gvbnzy6f`。镜像ID可以通过如下方式获取:
  • 通过[DescribeImages](https://cloud.tencent.com/document/api/213/15715)接口返回的`ImageId`获取。
  • 通过[镜像控制台](https://console.cloud.tencent.com/cvm/image)获取。 + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` + + // 设置新的镜像名称;必须满足下列限制:
  • 不得超过20个字符。
  • 镜像名称不能与已有镜像重复。 + ImageName *string `json:"ImageName,omitempty" name:"ImageName"` + + // 设置新的镜像描述;必须满足下列限制:
  • 不得超过60个字符。 + ImageDescription *string `json:"ImageDescription,omitempty" name:"ImageDescription"` +} + +func (r *ModifyImageAttributeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyImageAttributeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyImageAttributeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyImageAttributeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyImageAttributeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyImageSharePermissionRequest struct { + *tchttp.BaseRequest + + // 镜像ID,形如`img-gvbnzy6f`。镜像Id可以通过如下方式获取:
  • 通过[DescribeImages](https://cloud.tencent.com/document/api/213/15715)接口返回的`ImageId`获取。
  • 通过[镜像控制台](https://console.cloud.tencent.com/cvm/image)获取。
    镜像ID必须指定为状态为`NORMAL`的镜像。镜像状态请参考[镜像数据表](https://cloud.tencent.com/document/product/213/15753#Image)。 + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` + + // 接收分享镜像的账号Id列表,array型参数的格式可以参考[API简介](/document/api/213/568)。帐号ID不同于QQ号,查询用户帐号ID请查看[帐号信息](https://console.cloud.tencent.com/developer)中的帐号ID栏。 + AccountIds []*string `json:"AccountIds,omitempty" name:"AccountIds" list` + + // 操作,包括 `SHARE`,`CANCEL`。其中`SHARE`代表分享操作,`CANCEL`代表取消分享操作。 + Permission *string `json:"Permission,omitempty" name:"Permission"` +} + +func (r *ModifyImageSharePermissionRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyImageSharePermissionRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyImageSharePermissionResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyImageSharePermissionResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyImageSharePermissionResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesAttributeRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728) API返回值中的`InstanceId`获取。每次请求允许操作的实例数量上限是100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 实例名称。可任意命名,但不得超过60个字符。 + InstanceName *string `json:"InstanceName,omitempty" name:"InstanceName"` + + // 指定实例的安全组Id列表,子机将重新关联指定列表的安全组,原本关联的安全组会被解绑。 + SecurityGroups []*string `json:"SecurityGroups,omitempty" name:"SecurityGroups" list` +} + +func (r *ModifyInstancesAttributeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesAttributeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesAttributeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyInstancesAttributeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesAttributeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesChargeTypeRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。每次请求批量实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 实例[计费类型](https://cloud.tencent.com/document/product/213/2180)。
  • PREPAID:预付费,即包年包月。 + InstanceChargeType *string `json:"InstanceChargeType,omitempty" name:"InstanceChargeType"` + + // 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的购买时长、是否设置自动续费等属性。若指定实例的付费模式为预付费则该参数必传。 + InstanceChargePrepaid *InstanceChargePrepaid `json:"InstanceChargePrepaid,omitempty" name:"InstanceChargePrepaid"` +} + +func (r *ModifyInstancesChargeTypeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesChargeTypeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesChargeTypeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyInstancesChargeTypeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesChargeTypeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesProjectRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728) API返回值中的`InstanceId`获取。每次请求允许操作的实例数量上限是100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 项目ID。项目可以使用[AddProject](https://cloud.tencent.com/doc/api/403/4398)接口创建。可通过[`DescribeProject`](https://cloud.tencent.com/document/product/378/4400) API返回值中的`projectId`获取。后续使用[DescribeInstances](https://cloud.tencent.com/document/api/213/15728)接口查询实例时,项目ID可用于过滤结果。 + ProjectId *int64 `json:"ProjectId,omitempty" name:"ProjectId"` +} + +func (r *ModifyInstancesProjectRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesProjectRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesProjectResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyInstancesProjectResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesProjectResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesRenewFlagRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728) API返回值中的`InstanceId`获取。每次请求允许操作的实例数量上限是100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 自动续费标识。取值范围:
  • NOTIFY_AND_AUTO_RENEW:通知过期且自动续费
  • NOTIFY_AND_MANUAL_RENEW:通知过期不自动续费
  • DISABLE_NOTIFY_AND_MANUAL_RENEW:不通知过期不自动续费

    若该参数指定为NOTIFY_AND_AUTO_RENEW,在账户余额充足的情况下,实例到期后将按月自动续费。 + RenewFlag *string `json:"RenewFlag,omitempty" name:"RenewFlag"` +} + +func (r *ModifyInstancesRenewFlagRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesRenewFlagRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesRenewFlagResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyInstancesRenewFlagResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesRenewFlagResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesVpcAttributeRequest struct { + *tchttp.BaseRequest + + // 待操作的实例ID数组。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 私有网络相关信息配置,通过该参数指定私有网络的ID,子网ID,私有网络ip等信息。
  • 当指定私有网络ID和子网ID(子网必须在实例所在的可用区)与指定实例所在私有网络不一致时,会将实例迁移至指定的私有网络的子网下。
  • 可通过`PrivateIpAddresses`指定私有网络子网IP,若需指定则所有已指定的实例均需要指定子网IP,此时`InstanceIds`与`PrivateIpAddresses`一一对应。
  • 不指定`PrivateIpAddresses`时随机分配私有网络子网IP。 + VirtualPrivateCloud *VirtualPrivateCloud `json:"VirtualPrivateCloud,omitempty" name:"VirtualPrivateCloud"` + + // 是否对运行中的实例选择强制关机。默认为TRUE。 + ForceStop *bool `json:"ForceStop,omitempty" name:"ForceStop"` + + // 是否保留主机名。默认为FALSE。 + ReserveHostName *bool `json:"ReserveHostName,omitempty" name:"ReserveHostName"` +} + +func (r *ModifyInstancesVpcAttributeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesVpcAttributeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyInstancesVpcAttributeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyInstancesVpcAttributeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyInstancesVpcAttributeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyKeyPairAttributeRequest struct { + *tchttp.BaseRequest + + // 密钥对ID,密钥对ID形如:`skey-xxxxxxxx`。

    可以通过以下方式获取可用的密钥 ID:
  • 通过登录[控制台](https://console.cloud.tencent.com/cvm/sshkey)查询密钥 ID。
  • 通过调用接口 [DescribeKeyPairs](https://cloud.tencent.com/document/api/213/9403) ,取返回信息中的 `KeyId` 获取密钥对 ID。 + KeyId *string `json:"KeyId,omitempty" name:"KeyId"` + + // 修改后的密钥对名称,可由数字,字母和下划线组成,长度不超过25个字符。 + KeyName *string `json:"KeyName,omitempty" name:"KeyName"` + + // 修改后的密钥对描述信息。可任意命名,但不得超过60个字符。 + Description *string `json:"Description,omitempty" name:"Description"` +} + +func (r *ModifyKeyPairAttributeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyKeyPairAttributeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ModifyKeyPairAttributeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ModifyKeyPairAttributeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ModifyKeyPairAttributeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type OperationCountLimit struct { + + // 实例操作。取值范围:
  • `INSTANCE_DEGRADE`:降配操作
  • `INTERNET_CHARGE_TYPE_CHANGE`:修改网络带宽计费模式 + Operation *string `json:"Operation,omitempty" name:"Operation"` + + // 实例ID。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` + + // 当前已使用次数,如果返回值为-1表示该操作无次数限制。 + CurrentCount *int64 `json:"CurrentCount,omitempty" name:"CurrentCount"` + + // 操作次数最高额度,如果返回值为-1表示该操作无次数限制,如果返回值为0表示不支持调整配置。 + LimitCount *int64 `json:"LimitCount,omitempty" name:"LimitCount"` +} + +type OsVersion struct { + + // 操作系统类型 + OsName *string `json:"OsName,omitempty" name:"OsName"` + + // 支持的操作系统版本 + OsVersions []*string `json:"OsVersions,omitempty" name:"OsVersions" list` + + // 支持的操作系统架构 + Architecture []*string `json:"Architecture,omitempty" name:"Architecture" list` +} + +type Placement struct { + + // 实例所属的[可用区](https://cloud.tencent.com/document/product/213/15753#ZoneInfo)ID。该参数也可以通过调用 [DescribeZones](https://cloud.tencent.com/document/product/213/15707) 的返回值中的Zone字段来获取。 + Zone *string `json:"Zone,omitempty" name:"Zone"` + + // 实例所属项目ID。该参数可以通过调用 [DescribeProject](/document/api/378/4400) 的返回值中的 projectId 字段来获取。不填为默认项目。 + ProjectId *int64 `json:"ProjectId,omitempty" name:"ProjectId"` + + // 实例所属的专用宿主机ID列表,仅用于入参。如果您有购买专用宿主机并且指定了该参数,则您购买的实例就会随机的部署在这些专用宿主机上。 + HostIds []*string `json:"HostIds,omitempty" name:"HostIds" list` + + // 指定母机ip生产子机 + HostIps []*string `json:"HostIps,omitempty" name:"HostIps" list` + + // 实例所属的专用宿主机ID,仅用于出参。 + HostId *string `json:"HostId,omitempty" name:"HostId"` +} + +type Price struct { + + // 描述了实例价格。 + InstancePrice *ItemPrice `json:"InstancePrice,omitempty" name:"InstancePrice"` + + // 描述了网络价格。 + BandwidthPrice *ItemPrice `json:"BandwidthPrice,omitempty" name:"BandwidthPrice"` +} + +type PurchaseReservedInstancesOfferingRequest struct { + *tchttp.BaseRequest + + // 购买预留实例计费数量 + InstanceCount *int64 `json:"InstanceCount,omitempty" name:"InstanceCount"` + + // 预留实例计费配置ID + ReservedInstancesOfferingId *string `json:"ReservedInstancesOfferingId,omitempty" name:"ReservedInstancesOfferingId"` + + // 试运行 + DryRun *bool `json:"DryRun,omitempty" name:"DryRun"` + + // 用于保证请求幂等性的字符串。该字符串由客户生成,需保证不同请求之间唯一,最大值不超过64个ASCII字符。若不指定该参数,则无法保证请求的幂等性。
    更多详细信息请参阅:如何保证幂等性 + ClientToken *string `json:"ClientToken,omitempty" name:"ClientToken"` +} + +func (r *PurchaseReservedInstancesOfferingRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *PurchaseReservedInstancesOfferingRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type PurchaseReservedInstancesOfferingResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 已购买预留实例计费ID + ReservedInstanceId *string `json:"ReservedInstanceId,omitempty" name:"ReservedInstanceId"` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *PurchaseReservedInstancesOfferingResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *PurchaseReservedInstancesOfferingResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type RebootInstancesRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。每次请求批量实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 是否在正常重启失败后选择强制重启实例。取值范围:
  • TRUE:表示在正常重启失败后进行强制重启
  • FALSE:表示在正常重启失败后不进行强制重启

    默认取值:FALSE。 + ForceReboot *bool `json:"ForceReboot,omitempty" name:"ForceReboot"` + + // 关机类型。取值范围:
  • SOFT:表示软关机
  • HARD:表示硬关机
  • SOFT_FIRST:表示优先软关机,失败再执行硬关机

    默认取值:SOFT。 + StopType *string `json:"StopType,omitempty" name:"StopType"` +} + +func (r *RebootInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *RebootInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type RebootInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *RebootInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *RebootInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type RegionInfo struct { + + // 地域名称,例如,ap-guangzhou + Region *string `json:"Region,omitempty" name:"Region"` + + // 地域描述,例如,华南地区(广州) + RegionName *string `json:"RegionName,omitempty" name:"RegionName"` + + // 地域是否可用状态 + RegionState *string `json:"RegionState,omitempty" name:"RegionState"` +} + +type RenewHostsRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的CDH实例ID。每次请求的CDH实例的上限为100。 + HostIds []*string `json:"HostIds,omitempty" name:"HostIds" list` + + // 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的购买时长、是否设置自动续费等属性。若指定实例的付费模式为预付费则该参数必传。 + HostChargePrepaid *ChargePrepaid `json:"HostChargePrepaid,omitempty" name:"HostChargePrepaid"` +} + +func (r *RenewHostsRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *RenewHostsRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type RenewHostsResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *RenewHostsResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *RenewHostsResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type RenewInstancesRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。每次请求批量实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的续费时长、是否设置自动续费等属性。包年包月实例该参数为必传参数。 + InstanceChargePrepaid *InstanceChargePrepaid `json:"InstanceChargePrepaid,omitempty" name:"InstanceChargePrepaid"` + + // 是否续费弹性数据盘。取值范围:
  • TRUE:表示续费包年包月实例同时续费其挂载的弹性数据盘
  • FALSE:表示续费包年包月实例同时不再续费其挂载的弹性数据盘

    默认取值:TRUE。 + RenewPortableDataDisk *bool `json:"RenewPortableDataDisk,omitempty" name:"RenewPortableDataDisk"` +} + +func (r *RenewInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *RenewInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type RenewInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *RenewInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *RenewInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ReservedInstances struct { + + // 已购买的预留实例计费ID。形如:650c138f-ae7e-4750-952a-96841d6e9fc1。 + ReservedInstancesId *string `json:"ReservedInstancesId,omitempty" name:"ReservedInstancesId"` + + // 预留实例计费的类型。形如:S3.MEDIUM4。 + // 返回项:预留实例计费类型列表 + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` + + // 预留实例计费可购买的可用区。形如:ap-guangzhou-1。 + // 返回项:可用区列表 + Zone *string `json:"Zone,omitempty" name:"Zone"` + + // 预留实例计费开始时间。形如:1949-10-01 00:00:00 + StartTime *string `json:"StartTime,omitempty" name:"StartTime"` + + // 预留实例计费到期时间。形如:1949-10-01 00:00:00 + EndTime *string `json:"EndTime,omitempty" name:"EndTime"` + + // 预留实例计费【有效期】即预留实例计费购买时长。形如:31536000。 + // 计量单位:秒。 + Duration *int64 `json:"Duration,omitempty" name:"Duration"` + + // 已购买的预留实例计费个数。形如:10。 + InstanceCount *int64 `json:"InstanceCount,omitempty" name:"InstanceCount"` + + // 描述预留实例计费的平台描述(即操作系统)。形如:linux。 + // 返回项: linux 。 + ProductDescription *string `json:"ProductDescription,omitempty" name:"ProductDescription"` + + // 预留实例计费购买的状态。形如:active + // 返回项: active (以创建) | pending (等待被创建) | retired (过期)。 + State *string `json:"State,omitempty" name:"State"` + + // 可购买的预留实例计费类型的结算货币,使用ISO 4217标准货币代码。形如:USD。 + // 返回项:USD(美元)。 + CurrencyCode *string `json:"CurrencyCode,omitempty" name:"CurrencyCode"` + + // 预留实例计费的付款类型。形如:All Upfront。 + // 返回项: All Upfront (预付全部费用)。 + OfferingType *string `json:"OfferingType,omitempty" name:"OfferingType"` +} + +type ReservedInstancesOffering struct { + + // 预留实例计费可购买的可用区。形如:ap-guangzhou-1。 + // 返回项:可用区列表 + Zone *string `json:"Zone,omitempty" name:"Zone"` + + // 可购买的预留实例计费类型的结算货币,使用ISO 4217标准货币代码。 + // 返回项:USD(美元)。 + CurrencyCode *string `json:"CurrencyCode,omitempty" name:"CurrencyCode"` + + // 预留实例计费【有效期】即预留实例计费购买时长。形如:31536000。 + // 计量单位:秒 + Duration *int64 `json:"Duration,omitempty" name:"Duration"` + + // 预留实例计费的购买价格。形如:4000.0。 + // 计量单位:与 currencyCode 一致,目前支持 USD(美元) + FixedPrice *float64 `json:"FixedPrice,omitempty" name:"FixedPrice"` + + // 预留实例计费的实例类型。形如:S3.MEDIUM4。 + // 返回项:预留实例计费类型列表 + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` + + // 预留实例计费的付款类型。形如:All Upfront。 + // 返回项: All Upfront (预付全部费用)。 + OfferingType *string `json:"OfferingType,omitempty" name:"OfferingType"` + + // 可购买的预留实例计费配置ID。形如:650c138f-ae7e-4750-952a-96841d6e9fc1。 + ReservedInstancesOfferingId *string `json:"ReservedInstancesOfferingId,omitempty" name:"ReservedInstancesOfferingId"` + + // 预留实例计费的平台描述(即操作系统)。形如:linux。 + // 返回项: linux 。 + ProductDescription *string `json:"ProductDescription,omitempty" name:"ProductDescription"` + + // 扣除预付费之后的使用价格 (按小时计费)。形如:0.0。 + // 目前,因为只支持 All Upfront 付款类型,所以默认为 0元/小时。 + // 计量单位:元/小时,货币单位与 currencyCode 一致,目前支持 USD(美元) + UsagePrice *float64 `json:"UsagePrice,omitempty" name:"UsagePrice"` +} + +type ResetInstanceRequest struct { + *tchttp.BaseRequest + + // 实例ID。可通过 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728) API返回值中的`InstanceId`获取。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` + + // 指定有效的[镜像](https://cloud.tencent.com/document/product/213/4940)ID,格式形如`img-xxx`。镜像类型分为四种:
  • 公共镜像
  • 自定义镜像
  • 共享镜像
  • 服务市场镜像

  • 可通过以下方式获取可用的镜像ID:
  • `公共镜像`、`自定义镜像`、`共享镜像`的镜像ID可通过登录[控制台](https://console.cloud.tencent.com/cvm/image?rid=1&imageType=PUBLIC_IMAGE)查询;`服务镜像市场`的镜像ID可通过[云市场](https://market.cloud.tencent.com/list)查询。
  • 通过调用接口 [DescribeImages](https://cloud.tencent.com/document/api/213/15715) ,取返回信息中的`ImageId`字段。
  • + //
    默认取值:默认使用当前镜像。 + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` + + // 实例系统盘配置信息。系统盘为云盘的实例可以通过该参数指定重装后的系统盘大小来实现对系统盘的扩容操作,若不指定大小且原系统盘大小小于镜像大小,则会自动扩容,产生多余的磁盘费用。系统盘大小只支持扩容不支持缩容;重装只支持修改系统盘的大小,不能修改系统盘的类型。 + SystemDisk *SystemDisk `json:"SystemDisk,omitempty" name:"SystemDisk"` + + // 实例登录设置。通过该参数可以设置实例的登录方式密码、密钥或保持镜像的原始登录设置。默认情况下会随机生成密码,并以站内信方式知会到用户。 + LoginSettings *LoginSettings `json:"LoginSettings,omitempty" name:"LoginSettings"` + + // 增强服务。通过该参数可以指定是否开启云安全、云监控等服务。若不指定该参数,则默认开启云监控、云安全服务。 + EnhancedService *EnhancedService `json:"EnhancedService,omitempty" name:"EnhancedService"` + + // 重装系统时,可以指定修改实例的主机名。
  • 点号(.)和短横线(-)不能作为 HostName 的首尾字符,不能连续使用。
  • Windows 实例:名字符长度为[2, 15],允许字母(不限制大小写)、数字和短横线(-)组成,不支持点号(.),不能全是数字。
  • 其他类型(Linux 等)实例:字符长度为[2, 60],允许支持多个点号,点之间为一段,每段允许字母(不限制大小写)、数字和短横线(-)组成。 + HostName *string `json:"HostName,omitempty" name:"HostName"` +} + +func (r *ResetInstanceRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResetInstanceRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ResetInstanceResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ResetInstanceResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResetInstanceResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ResetInstancesInternetMaxBandwidthRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/9388)接口返回值中的 `InstanceId` 获取。 每次请求批量实例的上限为100。当调整 `BANDWIDTH_PREPAID` 和 `BANDWIDTH_POSTPAID_BY_HOUR` 计费方式的带宽时,只支持一个实例。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 公网出带宽配置。不同机型带宽上限范围不一致,具体限制详见带宽限制对账表。暂时只支持 `InternetMaxBandwidthOut` 参数。 + InternetAccessible *InternetAccessible `json:"InternetAccessible,omitempty" name:"InternetAccessible"` + + // 带宽生效的起始时间。格式:`YYYY-MM-DD`,例如:`2016-10-30`。起始时间不能早于当前时间。如果起始时间是今天则新设置的带宽立即生效。该参数只对包年包月带宽有效,其他模式带宽不支持该参数,否则接口会以相应错误码返回。 + StartTime *string `json:"StartTime,omitempty" name:"StartTime"` + + // 带宽生效的终止时间。格式: `YYYY-MM-DD` ,例如:`2016-10-30` 。新设置的带宽的有效期包含终止时间此日期。终止时间不能晚于包年包月实例的到期时间。实例的到期时间可通过 [`DescribeInstances`](https://cloud.tencent.com/document/api/213/9388)接口返回值中的`ExpiredTime`获取。该参数只对包年包月带宽有效,其他模式带宽不支持该参数,否则接口会以相应错误码返回。 + EndTime *string `json:"EndTime,omitempty" name:"EndTime"` +} + +func (r *ResetInstancesInternetMaxBandwidthRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResetInstancesInternetMaxBandwidthRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ResetInstancesInternetMaxBandwidthResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ResetInstancesInternetMaxBandwidthResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResetInstancesInternetMaxBandwidthResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ResetInstancesPasswordRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728) API返回值中的`InstanceId`获取。每次请求允许操作的实例数量上限是100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 实例登录密码。不同操作系统类型密码复杂度限制不一样,具体如下: + // Linux实例密码必须8-30位,推荐使用12位以上密码,不能以“/”开头,至少包含以下字符中的三种不同字符,字符种类:
  • 小写字母:[a-z]
  • 大写字母:[A-Z]
  • 数字:0-9
  • 特殊字符: ()\`~!@#$%^&\*-+=\_|{}[]:;'<>,.?/ + // Windows实例密码必须12~30位,不能以“/”开头且不包括用户名,至少包含以下字符中的三种不同字符
  • 小写字母:[a-z]
  • 大写字母:[A-Z]
  • 数字: 0-9
  • 特殊字符:()\`~!@#$%^&\*-+=\_|{}[]:;' <>,.?/
  • 如果实例即包含`Linux`实例又包含`Windows`实例,则密码复杂度限制按照`Windows`实例的限制。 + Password *string `json:"Password,omitempty" name:"Password"` + + // 待重置密码的实例操作系统的用户名。不得超过64个字符。 + UserName *string `json:"UserName,omitempty" name:"UserName"` + + // 是否对运行中的实例选择强制关机。建议对运行中的实例先手动关机,然后再重置用户密码。取值范围:
  • TRUE:表示在正常关机失败后进行强制关机
  • FALSE:表示在正常关机失败后不进行强制关机

    默认取值:FALSE。

    强制关机的效果等同于关闭物理计算机的电源开关。强制关机可能会导致数据丢失或文件系统损坏,请仅在服务器不能正常关机时使用。 + ForceStop *bool `json:"ForceStop,omitempty" name:"ForceStop"` +} + +func (r *ResetInstancesPasswordRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResetInstancesPasswordRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ResetInstancesPasswordResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ResetInstancesPasswordResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResetInstancesPasswordResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ResetInstancesTypeRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。本接口目前仅支持每次操作1个实例。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 实例机型。不同实例机型指定了不同的资源规格,具体取值可通过调用接口[`DescribeInstanceTypeConfigs`](https://cloud.tencent.com/document/api/213/15749)来获得最新的规格表或参见[实例类型](https://cloud.tencent.com/document/product/213/11518)描述。 + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` + + // 是否对运行中的实例选择强制关机。建议对运行中的实例先手动关机。取值范围:
  • TRUE:表示在正常关机失败后进行强制关机
  • FALSE:表示在正常关机失败后不进行强制关机

    默认取值:FALSE。

    强制关机的效果等同于关闭物理计算机的电源开关。强制关机可能会导致数据丢失或文件系统损坏,请仅在服务器不能正常关机时使用。 + ForceStop *bool `json:"ForceStop,omitempty" name:"ForceStop"` +} + +func (r *ResetInstancesTypeRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResetInstancesTypeRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ResetInstancesTypeResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ResetInstancesTypeResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResetInstancesTypeResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ResizeInstanceDisksRequest struct { + *tchttp.BaseRequest + + // 待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。 + InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` + + // 待扩容的数据盘配置信息。只支持扩容非弹性数据盘([`DescribeDisks`](https://cloud.tencent.com/document/api/362/16315)接口返回值中的`Portable`为`false`表示非弹性),且[数据盘类型](/document/api/213/9452#block_device)为:`CLOUD_BASIC`、`CLOUD_PREMIUM`、`CLOUD_SSD`。数据盘容量单位:GB。最小扩容步长:10G。关于数据盘类型的选择请参考[硬盘产品简介](https://cloud.tencent.com/document/product/362/2353)。可选数据盘类型受到实例类型`InstanceType`限制。另外允许扩容的最大容量也因数据盘类型的不同而有所差异。 + DataDisks []*DataDisk `json:"DataDisks,omitempty" name:"DataDisks" list` + + // 是否对运行中的实例选择强制关机。建议对运行中的实例先手动关机,然后再重置用户密码。取值范围:
  • TRUE:表示在正常关机失败后进行强制关机
  • FALSE:表示在正常关机失败后不进行强制关机

    默认取值:FALSE。

    强制关机的效果等同于关闭物理计算机的电源开关。强制关机可能会导致数据丢失或文件系统损坏,请仅在服务器不能正常关机时使用。 + ForceStop *bool `json:"ForceStop,omitempty" name:"ForceStop"` +} + +func (r *ResizeInstanceDisksRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResizeInstanceDisksRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type ResizeInstanceDisksResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *ResizeInstanceDisksResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *ResizeInstanceDisksResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type RunInstancesRequest struct { + *tchttp.BaseRequest + + // 实例所在的位置。通过该参数可以指定实例所属可用区,所属项目,所属宿主机(在专用宿主机上创建子机时指定)等属性。 + Placement *Placement `json:"Placement,omitempty" name:"Placement"` + + // 指定有效的[镜像](https://cloud.tencent.com/document/product/213/4940)ID,格式形如`img-xxx`。镜像类型分为四种:
  • 公共镜像
  • 自定义镜像
  • 共享镜像
  • 服务市场镜像

  • 可通过以下方式获取可用的镜像ID:
  • `公共镜像`、`自定义镜像`、`共享镜像`的镜像ID可通过登录[控制台](https://console.cloud.tencent.com/cvm/image?rid=1&imageType=PUBLIC_IMAGE)查询;`服务镜像市场`的镜像ID可通过[云市场](https://market.cloud.tencent.com/list)查询。
  • 通过调用接口 [DescribeImages](https://cloud.tencent.com/document/api/213/15715) ,传入InstanceType获取当前机型支持的镜像列表,取返回信息中的`ImageId`字段。
  • + ImageId *string `json:"ImageId,omitempty" name:"ImageId"` + + // 实例[计费类型](https://cloud.tencent.com/document/product/213/2180)。
  • PREPAID:预付费,即包年包月
  • POSTPAID_BY_HOUR:按小时后付费
  • CDHPAID:独享子机(基于专用宿主机创建,宿主机部分的资源不收费)
  • SPOTPAID:竞价付费
    默认值:POSTPAID_BY_HOUR。 + InstanceChargeType *string `json:"InstanceChargeType,omitempty" name:"InstanceChargeType"` + + // 预付费模式,即包年包月相关参数设置。通过该参数可以指定包年包月实例的购买时长、是否设置自动续费等属性。若指定实例的付费模式为预付费则该参数必传。 + InstanceChargePrepaid *InstanceChargePrepaid `json:"InstanceChargePrepaid,omitempty" name:"InstanceChargePrepaid"` + + // 实例机型。不同实例机型指定了不同的资源规格。 + //
  • 对于付费模式为PREPAID或POSTPAID\_BY\_HOUR的实例创建,具体取值可通过调用接口[DescribeInstanceTypeConfigs](https://cloud.tencent.com/document/api/213/15749)来获得最新的规格表或参见[实例规格](https://cloud.tencent.com/document/product/213/11518)描述。若不指定该参数,则默认机型为S1.SMALL1。
  • 对于付费模式为CDHPAID的实例创建,该参数以"CDH_"为前缀,根据CPU和内存配置生成,具体形式为:CDH_XCXG,例如对于创建CPU为1核,内存为1G大小的专用宿主机的实例,该参数应该为CDH_1C1G。 + InstanceType *string `json:"InstanceType,omitempty" name:"InstanceType"` + + // 实例系统盘配置信息。若不指定该参数,则按照系统默认值进行分配。 + SystemDisk *SystemDisk `json:"SystemDisk,omitempty" name:"SystemDisk"` + + // 实例数据盘配置信息。若不指定该参数,则默认不购买数据盘。支持购买的时候指定21块数据盘,其中最多包含1块LOCAL_BASIC数据盘或者LOCAL_SSD数据盘,最多包含20块CLOUD_BASIC数据盘、CLOUD_PREMIUM数据盘或者CLOUD_SSD数据盘。 + DataDisks []*DataDisk `json:"DataDisks,omitempty" name:"DataDisks" list` + + // 私有网络相关信息配置。通过该参数可以指定私有网络的ID,子网ID等信息。若不指定该参数,则默认使用基础网络。若在此参数中指定了私有网络IP,表示每个实例的主网卡IP,而且InstanceCount参数必须与私有网络IP的个数一致。 + VirtualPrivateCloud *VirtualPrivateCloud `json:"VirtualPrivateCloud,omitempty" name:"VirtualPrivateCloud"` + + // 公网带宽相关信息设置。若不指定该参数,则默认公网带宽为0Mbps。 + InternetAccessible *InternetAccessible `json:"InternetAccessible,omitempty" name:"InternetAccessible"` + + // 购买实例数量。包年包月实例取值范围:[1,300],按量计费实例取值范围:[1,100]。默认取值:1。指定购买实例的数量不能超过用户所能购买的剩余配额数量,具体配额相关限制详见[CVM实例购买限制](https://cloud.tencent.com/document/product/213/2664)。 + InstanceCount *int64 `json:"InstanceCount,omitempty" name:"InstanceCount"` + + // 实例显示名称。
  • 不指定实例显示名称则默认显示‘未命名’。
  • 购买多台实例,如果指定模式串`{R:x}`,表示生成数字`[x, x+n-1]`,其中`n`表示购买实例的数量,例如`server_{R:3}`,购买1台时,实例显示名称为`server_3`;购买2台时,实例显示名称分别为`server_3`,`server_4`。支持指定多个模式串`{R:x}`。
  • 购买多台实例,如果不指定模式串,则在实例显示名称添加后缀`1、2...n`,其中`n`表示购买实例的数量,例如`server_`,购买2台时,实例显示名称分别为`server_1`,`server_2`。
  • 最多支持60个字符(包含模式串)。 + InstanceName *string `json:"InstanceName,omitempty" name:"InstanceName"` + + // 实例登录设置。通过该参数可以设置实例的登录方式密码、密钥或保持镜像的原始登录设置。默认情况下会随机生成密码,并以站内信方式知会到用户。 + LoginSettings *LoginSettings `json:"LoginSettings,omitempty" name:"LoginSettings"` + + // 实例所属安全组。该参数可以通过调用 [DescribeSecurityGroups](https://cloud.tencent.com/document/api/215/15808) 的返回值中的sgId字段来获取。若不指定该参数,则绑定默认安全组。 + SecurityGroupIds []*string `json:"SecurityGroupIds,omitempty" name:"SecurityGroupIds" list` + + // 增强服务。通过该参数可以指定是否开启云安全、云监控等服务。若不指定该参数,则默认公共镜像开启云监控、云安全服务;自定义镜像与镜像市场镜像默认不开启云监控,云安全服务,而使用镜像里保留的服务。 + EnhancedService *EnhancedService `json:"EnhancedService,omitempty" name:"EnhancedService"` + + // 用于保证请求幂等性的字符串。该字符串由客户生成,需保证不同请求之间唯一,最大值不超过64个ASCII字符。若不指定该参数,则无法保证请求的幂等性。 + ClientToken *string `json:"ClientToken,omitempty" name:"ClientToken"` + + // 云服务器的主机名。
  • 点号(.)和短横线(-)不能作为 HostName 的首尾字符,不能连续使用。
  • Windows 实例:名字符长度为[2, 15],允许字母(不限制大小写)、数字和短横线(-)组成,不支持点号(.),不能全是数字。
  • 其他类型(Linux 等)实例:字符长度为[2, 60],允许支持多个点号,点之间为一段,每段允许字母(不限制大小写)、数字和短横线(-)组成。 + HostName *string `json:"HostName,omitempty" name:"HostName"` + + // 定时任务。通过该参数可以为实例指定定时任务,目前仅支持定时销毁。 + ActionTimer *ActionTimer `json:"ActionTimer,omitempty" name:"ActionTimer"` + + // 置放群组id,仅支持指定一个。 + DisasterRecoverGroupIds []*string `json:"DisasterRecoverGroupIds,omitempty" name:"DisasterRecoverGroupIds" list` + + // 标签描述列表。通过指定该参数可以同时绑定标签到相应的资源实例,当前仅支持绑定标签到云服务器实例。 + TagSpecification []*TagSpecification `json:"TagSpecification,omitempty" name:"TagSpecification" list` + + // 实例的市场相关选项,如竞价实例相关参数,若指定实例的付费模式为竞价付费则该参数必传。 + InstanceMarketOptions *InstanceMarketOptionsRequest `json:"InstanceMarketOptions,omitempty" name:"InstanceMarketOptions"` + + // 提供给实例使用的用户数据,需要以 base64 方式编码,支持的最大数据大小为 16KB。关于获取此参数的详细介绍,请参阅[Windows](https://cloud.tencent.com/document/product/213/17526)和[Linux](https://cloud.tencent.com/document/product/213/17525)启动时运行命令。 + UserData *string `json:"UserData,omitempty" name:"UserData"` + + // 是否只预检此次请求。 + // true:发送检查请求,不会创建实例。检查项包括是否填写了必需参数,请求格式,业务限制和云服务器库存。 + // 如果检查不通过,则返回对应错误码; + // 如果检查通过,则返回RequestId. + // false(默认):发送正常请求,通过检查后直接创建实例 + DryRun *bool `json:"DryRun,omitempty" name:"DryRun"` +} + +func (r *RunInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *RunInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type RunInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 当通过本接口来创建实例时会返回该参数,表示一个或多个实例`ID`。返回实例`ID`列表并不代表实例创建成功,可根据 [DescribeInstances](https://cloud.tencent.com/document/api/213/15728) 接口查询返回的InstancesSet中对应实例的`ID`的状态来判断创建是否完成;如果实例状态由“准备中”变为“正在运行”,则为创建成功。 + InstanceIdSet []*string `json:"InstanceIdSet,omitempty" name:"InstanceIdSet" list` + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *RunInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *RunInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type RunMonitorServiceEnabled struct { + + // 是否开启[云监控](/document/product/248)服务。取值范围:
  • TRUE:表示开启云监控服务
  • FALSE:表示不开启云监控服务

    默认取值:TRUE。 + Enabled *bool `json:"Enabled,omitempty" name:"Enabled"` +} + +type RunSecurityServiceEnabled struct { + + // 是否开启[云安全](/document/product/296)服务。取值范围:
  • TRUE:表示开启云安全服务
  • FALSE:表示不开启云安全服务

    默认取值:TRUE。 + Enabled *bool `json:"Enabled,omitempty" name:"Enabled"` +} + +type SharePermission struct { + + // 镜像分享时间 + CreatedTime *string `json:"CreatedTime,omitempty" name:"CreatedTime"` + + // 镜像分享的账户ID + AccountId *string `json:"AccountId,omitempty" name:"AccountId"` +} + +type Snapshot struct { + + // 快照Id。 + SnapshotId *string `json:"SnapshotId,omitempty" name:"SnapshotId"` + + // 创建此快照的云硬盘类型。取值范围: + // SYSTEM_DISK:系统盘 + // DATA_DISK:数据盘。 + DiskUsage *string `json:"DiskUsage,omitempty" name:"DiskUsage"` + + // 创建此快照的云硬盘大小,单位GB。 + DiskSize *int64 `json:"DiskSize,omitempty" name:"DiskSize"` +} + +type SpotMarketOptions struct { + + // 竞价出价 + MaxPrice *string `json:"MaxPrice,omitempty" name:"MaxPrice"` + + // 竞价请求类型,当前仅支持类型:one-time + SpotInstanceType *string `json:"SpotInstanceType,omitempty" name:"SpotInstanceType"` +} + +type StartInstancesRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。每次请求批量实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` +} + +func (r *StartInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *StartInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type StartInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *StartInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *StartInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type StopInstancesRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。每次请求批量实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` + + // 是否在正常关闭失败后选择强制关闭实例。取值范围:
  • TRUE:表示在正常关闭失败后进行强制关闭
  • FALSE:表示在正常关闭失败后不进行强制关闭

    默认取值:FALSE。 + ForceStop *bool `json:"ForceStop,omitempty" name:"ForceStop"` + + // 实例的关闭模式。取值范围:
  • SOFT_FIRST:表示在正常关闭失败后进行强制关闭
  • HARD:直接强制关闭
  • SOFT:仅软关机
    默认取值:SOFT。 + StopType *string `json:"StopType,omitempty" name:"StopType"` + + // 按量计费实例关机收费模式。 + // 取值范围:
  • KEEP_CHARGING:关机继续收费
  • STOP_CHARGING:关机停止收费
    默认取值:KEEP_CHARGING。 + // 该参数只针对部分按量计费云硬盘实例生效,详情参考[按量计费实例关机不收费说明](https://cloud.tencent.com/document/product/213/19918) + StoppedMode *string `json:"StoppedMode,omitempty" name:"StoppedMode"` +} + +func (r *StopInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *StopInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type StopInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *StopInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *StopInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type StorageBlock struct { + + // HDD本地存储类型,值为:LOCAL_PRO. + // 注意:此字段可能返回 null,表示取不到有效值。 + Type *string `json:"Type,omitempty" name:"Type"` + + // HDD本地存储的最小容量 + // 注意:此字段可能返回 null,表示取不到有效值。 + MinSize *int64 `json:"MinSize,omitempty" name:"MinSize"` + + // HDD本地存储的最大容量 + // 注意:此字段可能返回 null,表示取不到有效值。 + MaxSize *int64 `json:"MaxSize,omitempty" name:"MaxSize"` +} + +type SyncImagesRequest struct { + *tchttp.BaseRequest + + // 镜像ID列表 ,镜像ID可以通过如下方式获取:
  • 通过[DescribeImages](https://cloud.tencent.com/document/api/213/15715)接口返回的`ImageId`获取。
  • 通过[镜像控制台](https://console.cloud.tencent.com/cvm/image)获取。
    镜像ID必须满足限制:
  • 镜像ID对应的镜像状态必须为`NORMAL`。
  • 镜像大小小于50GB。
    镜像状态请参考[镜像数据表](https://cloud.tencent.com/document/product/213/15753#Image)。 + ImageIds []*string `json:"ImageIds,omitempty" name:"ImageIds" list` + + // 目的同步地域列表;必须满足限制:
  • 不能为源地域,
  • 必须是一个合法的Region。
  • 暂不支持部分地域同步。
    具体地域参数请参考[Region](https://cloud.tencent.com/document/product/213/6091)。 + DestinationRegions []*string `json:"DestinationRegions,omitempty" name:"DestinationRegions" list` +} + +func (r *SyncImagesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *SyncImagesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type SyncImagesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *SyncImagesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *SyncImagesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type SystemDisk struct { + + // 系统盘类型。系统盘类型限制详见[存储概述](https://cloud.tencent.com/document/product/213/4952)。取值范围:
  • LOCAL_BASIC:本地硬盘
  • LOCAL_SSD:本地SSD硬盘
  • CLOUD_BASIC:普通云硬盘
  • CLOUD_SSD:SSD云硬盘
  • CLOUD_PREMIUM:高性能云硬盘

    默认取值:CLOUD_BASIC。 + DiskType *string `json:"DiskType,omitempty" name:"DiskType"` + + // 系统盘ID。LOCAL_BASIC 和 LOCAL_SSD 类型没有ID。暂时不支持该参数。 + DiskId *string `json:"DiskId,omitempty" name:"DiskId"` + + // 系统盘大小,单位:GB。默认值为 50 + DiskSize *int64 `json:"DiskSize,omitempty" name:"DiskSize"` +} + +type Tag struct { + + // 标签键 + Key *string `json:"Key,omitempty" name:"Key"` + + // 标签值 + Value *string `json:"Value,omitempty" name:"Value"` +} + +type TagSpecification struct { + + // 标签绑定的资源类型,当前支持类型:"instance"和"host" + ResourceType *string `json:"ResourceType,omitempty" name:"ResourceType"` + + // 标签对列表 + Tags []*Tag `json:"Tags,omitempty" name:"Tags" list` +} + +type TerminateInstancesRequest struct { + *tchttp.BaseRequest + + // 一个或多个待操作的实例ID。可通过[`DescribeInstances`](https://cloud.tencent.com/document/api/213/15728)接口返回值中的`InstanceId`获取。每次请求批量实例的上限为100。 + InstanceIds []*string `json:"InstanceIds,omitempty" name:"InstanceIds" list` +} + +func (r *TerminateInstancesRequest) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *TerminateInstancesRequest) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type TerminateInstancesResponse struct { + *tchttp.BaseResponse + Response *struct { + + // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 + RequestId *string `json:"RequestId,omitempty" name:"RequestId"` + } `json:"Response"` +} + +func (r *TerminateInstancesResponse) ToJsonString() string { + b, _ := json.Marshal(r) + return string(b) +} + +func (r *TerminateInstancesResponse) FromJsonString(s string) error { + return json.Unmarshal([]byte(s), &r) +} + +type VirtualPrivateCloud struct { + + // 私有网络ID,形如`vpc-xxx`。有效的VpcId可通过登录[控制台](https://console.cloud.tencent.com/vpc/vpc?rid=1)查询;也可以调用接口 [DescribeVpcEx](/document/api/215/1372) ,从接口返回中的`unVpcId`字段获取。若在创建子机时VpcId与SubnetId同时传入`DEFAULT`,则强制使用默认vpc网络。 + VpcId *string `json:"VpcId,omitempty" name:"VpcId"` + + // 私有网络子网ID,形如`subnet-xxx`。有效的私有网络子网ID可通过登录[控制台](https://console.cloud.tencent.com/vpc/subnet?rid=1)查询;也可以调用接口 [DescribeSubnets](/document/api/215/15784) ,从接口返回中的`unSubnetId`字段获取。若在创建子机时SubnetId与VpcId同时传入`DEFAULT`,则强制使用默认vpc网络。 + SubnetId *string `json:"SubnetId,omitempty" name:"SubnetId"` + + // 是否用作公网网关。公网网关只有在实例拥有公网IP以及处于私有网络下时才能正常使用。取值范围:
  • TRUE:表示用作公网网关
  • FALSE:表示不用作公网网关

    默认取值:FALSE。 + AsVpcGateway *bool `json:"AsVpcGateway,omitempty" name:"AsVpcGateway"` + + // 私有网络子网 IP 数组,在创建实例、修改实例vpc属性操作中可使用此参数。当前仅批量创建多台实例时支持传入相同子网的多个 IP。 + PrivateIpAddresses []*string `json:"PrivateIpAddresses,omitempty" name:"PrivateIpAddresses" list` + + // 为弹性网卡指定随机生成的 IPv6 地址数量。 + Ipv6AddressCount *uint64 `json:"Ipv6AddressCount,omitempty" name:"Ipv6AddressCount"` +} + +type ZoneInfo struct { + + // 可用区名称,例如,ap-guangzhou-3 + // 全网可用区名称如下: + //
  • ap-chongqing-1
  • + //
  • ap-seoul-1
  • + //
  • ap-chengdu-1
  • + //
  • ap-chengdu-2
  • + //
  • ap-hongkong-1
  • + //
  • ap-hongkong-2
  • + //
  • ap-shenzhen-fsi-1
  • + //
  • ap-shenzhen-fsi-2
  • + //
  • ap-shenzhen-fsi-3
  • + //
  • ap-guangzhou-1(售罄)
  • + //
  • ap-guangzhou-2(售罄)
  • + //
  • ap-guangzhou-3
  • + //
  • ap-guangzhou-4
  • + //
  • ap-tokyo-1
  • + //
  • ap-singapore-1
  • + //
  • ap-shanghai-fsi-1
  • + //
  • ap-shanghai-fsi-2
  • + //
  • ap-shanghai-fsi-3
  • + //
  • ap-bangkok-1
  • + //
  • ap-shanghai-1(售罄)
  • + //
  • ap-shanghai-2
  • + //
  • ap-shanghai-3
  • + //
  • ap-shanghai-4
  • + //
  • ap-mumbai-1
  • + //
  • ap-mumbai-2
  • + //
  • eu-moscow-1
  • + //
  • ap-beijing-1
  • + //
  • ap-beijing-2
  • + //
  • ap-beijing-3
  • + //
  • ap-beijing-4
  • + //
  • na-siliconvalley-1
  • + //
  • na-siliconvalley-2
  • + //
  • eu-frankfurt-1
  • + //
  • na-toronto-1
  • + //
  • na-ashburn-1
  • + //
  • na-ashburn-2
  • + //
  • ap-nanjing-1
  • + //
  • ap-nanjing-2
  • + Zone *string `json:"Zone,omitempty" name:"Zone"` + + // 可用区描述,例如,广州三区 + ZoneName *string `json:"ZoneName,omitempty" name:"ZoneName"` + + // 可用区ID + ZoneId *string `json:"ZoneId,omitempty" name:"ZoneId"` + + // 可用区状态,包含AVAILABLE和UNAVAILABLE。AVAILABLE代表可用,UNAVAILABLE代表不可用。 + ZoneState *string `json:"ZoneState,omitempty" name:"ZoneState"` +} diff --git a/vendor/github.com/vmware/govmomi/.gitignore b/vendor/github.com/vmware/govmomi/.gitignore new file mode 100644 index 000000000000..7f4c3119f7d1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +dist/ diff --git a/vendor/github.com/vmware/govmomi/.goreleaser.yml b/vendor/github.com/vmware/govmomi/.goreleaser.yml new file mode 100644 index 000000000000..a15c5d7d7c1f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/.goreleaser.yml @@ -0,0 +1,57 @@ +--- +project_name: govc +builds: +- goos: + - linux + - darwin + - windows + - freebsd + goarch: + - amd64 + - 386 + env: + - CGO_ENABLED=0 + main: ./govc/main.go + binary: govc + flags: -compiler gc + ldflags: -X github.com/vmware/govmomi/govc/flags.GitVersion={{.Version}} +archive: + name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}' + format: tar.gz + format_overrides: + - goos: windows + format: zip + files: + - none* +checksum: + name_template: '{{ .ProjectName }}_{{ .Version }}_checksums.txt' +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' + - Merge pull request + - Merge branch +brew: + github: + owner: govmomi + name: homebrew-tap + commit_author: + name: Alfred the Narwhal + email: cna-alfred@vmware.com + folder: Formula + homepage: "https://github.com/vmware/govmomi/blob/master/govc/README.md" + description: "govc is a vSphere CLI built on top of govmomi." + test: | + system "#{bin}/govc version" +dockers: + - image: vmware/govc + goos: linux + goarch: amd64 + binary: govc + tag_templates: + - "{{ .Tag }}" + - "v{{ .Major }}" + - "v{{ .Major }}.{{ .Minor }}" + - latest diff --git a/vendor/github.com/vmware/govmomi/.mailmap b/vendor/github.com/vmware/govmomi/.mailmap new file mode 100644 index 000000000000..a012bc9ff414 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/.mailmap @@ -0,0 +1,20 @@ +Amit Bathla +Bruce Downs +Bruce Downs +Clint Greenwood +Cédric Blomart +Cédric Blomart cedric +David Stark +Eric Gray +Eric Yutao eric +Fabio Rapposelli +Henrik Hodne +Jeremy Canady +Pieter Noordhuis +Takaaki Furukawa takaaki.furukawa +Takaaki Furukawa tkak +Vadim Egorov +Anfernee Yongkun Gui +Anfernee Yongkun Gui Yongkun Anfernee Gui +Zach Tucker +Zee Yang diff --git a/vendor/github.com/vmware/govmomi/.travis.yml b/vendor/github.com/vmware/govmomi/.travis.yml new file mode 100644 index 000000000000..0a3834fffa65 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/.travis.yml @@ -0,0 +1,30 @@ +sudo: false + +language: go + +go: + - 1.8.x + - 1.9.x + - '1.10' +go_import_path: github.com/vmware/govmomi + +before_install: + - sudo apt-get -qq update + - sudo apt-get install -y xmlstarlet + - make vendor + +script: + - make check test + - GOOS=windows make install + +after_success: + - test -n "$TRAVIS_TAG" && docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD" + +deploy: +- provider: script + skip_cleanup: true + script: curl -sL http://git.io/goreleaser | bash + on: + tags: true + condition: $TRAVIS_OS_NAME = linux + go: '1.10' \ No newline at end of file diff --git a/vendor/github.com/vmware/govmomi/CHANGELOG.md b/vendor/github.com/vmware/govmomi/CHANGELOG.md new file mode 100644 index 000000000000..1e0c1618b8d1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/CHANGELOG.md @@ -0,0 +1,313 @@ +# changelog + +### 0.18.0 (2018-05-24) + +* Add VirtualDiskManager wrapper to set UUID + +* Add vmxnet2, pcnet32 and sriov to VirtualDeviceList.EthernetCardTypes + +* Add new vSphere 6.7 APIs + +* Decrease LoginExtensionByCertificate tunnel usage + +* SAML token authentication support via SessionManager.LoginByToken + +* New SSO admin client for managing users + +* New STS client for issuing and renewing SAML tokens + +* New Lookup Service client for discovering endpoints such as STS and ssoadmin + +* Switch from gvt to go dep for managing dependencies + +### 0.17.1 (2018-03-19) + +* vcsim: add Destroy method for Folder and Datacenter types + +* In progress.Reader emit final report on EOF. + +* vcsim: add EventManager.QueryEvents + +### 0.17.0 (2018-02-28) + +* Add HostStorageSystem.AttachScsiLun method + +* Avoid possible panic in Datastore.Stat (#969) + +* Destroy event history collectors (#962) + +* Add VirtualDiskManager.CreateChildDisk method + +### 0.16.0 (2017-11-08) + +* Add support for SOAP request operation ID header + +* Moved ovf helpers from govc import.ovf command to ovf and nfc packages + +* Added guest/toolbox (client) package + +* Added toolbox package and toolbox command + +* Added simulator package and vcsim command + +### 0.15.0 (2017-06-19) + +* WaitOptions.MaxWaitSeconds is now optional + +* Support removal of ExtraConfig entries + +* GuestPosixFileAttributes OwnerId and GroupId fields are now pointers, + rather than omitempty ints to allow chown with root uid:gid + +* Updated examples/ using view package + +* Add DatastoreFile.TailFunc method + +* Export VirtualMachine.FindSnapshot method + +* Add AuthorizationManager {Enable,Disable}Methods + +* Add PBM client + +### 0.14.0 (2017-04-08) + +* Add view.ContainerView type and methods + +* Add Collector.RetrieveWithFilter method + +* Add property.Filter type + +* Implement EthernetCardBackingInfo for OpaqueNetwork + +* Finder: support changing object root in find mode + +* Add VirtualDiskManager.QueryVirtualDiskInfo + +* Add performance.Manager APIs + +### 0.13.0 (2017-03-02) + +* Add DatastoreFileManager API wrapper + +* Add HostVsanInternalSystem API wrappers + +* Add Container support to view package + +* Finder supports Folder recursion without specifying a path + +* Add VirtualMachine.QueryConfigTarget method + +* Add device option to VirtualMachine.WaitForNetIP + +* Remove _Task suffix from vapp methods + +### 0.12.1 (2016-12-19) + +* Add DiagnosticLog helper + +* Add DatastorePath helper + +### 0.12.0 (2016-12-01) + +* Disable use of service ticket for datastore HTTP access by default + +* Attach context to HTTP requests for cancellations + +* Update to vim25/6.5 API + +### 0.11.4 (2016-11-15) + +* Add object.AuthorizationManager methods: RetrieveRolePermissions, RetrieveAllPermissions, AddRole, RemoveRole, UpdateRole + +### 0.11.3 (2016-11-08) + +* Allow DatastoreFile.Follow reader to drain current body after stopping + +### 0.11.2 (2016-11-01) + +* Avoid possible NPE in VirtualMachine.Device method + +* Add support for OpaqueNetwork type to Finder + +* Add HostConfigManager.AccountManager support for ESX 5.5 + +### 0.11.1 (2016-10-27) + +* Add Finder.ResourcePoolListAll method + +### 0.11.0 (2016-10-25) + +* Add object.DistributedVirtualPortgroup.Reconfigure method + +### 0.10.0 (2016-10-20) + +* Add option to set soap.Client.UserAgent + +* Add service ticket thumbprint validation + +* Update use of http.DefaultTransport fields to 1.7 + +* Set default locale to en_US (override with GOVMOMI_LOCALE env var) + +* Add object.HostCertificateInfo (types.HostCertificateManagerCertificateInfo helpers) + +* Add object.HostCertificateManager type and HostConfigManager.CertificateManager method + +* Add soap.Client SetRootCAs and SetDialTLS methods + +### 0.9.0 (2016-09-09) + +* Add object.DatastoreFile helpers for streaming and tailing datastore files + +* Add object VirtualMachine.Unregister method + +* Add object.ListView methods: Add, Remove, Reset + +* Update to Go 1.7 - using stdlib's context package + +### 0.8.0 (2016-06-30) + +* Add session.Manager.AcquireLocalTicket + +* Include StoragePod in Finder.FolderList + +* Add Finder methods for finding by ManagedObjectReference: Element, ObjectReference + +* Add mo.ManagedObjectReference methods: Reference, String, FromString + +* Add support using SessionManagerGenericServiceTicket.HostName for Datastore HTTP access + +### 0.7.1 (2016-06-03) + +* Fix object.ObjectName method + +### 0.7.0 (2016-06-02) + +* Move InventoryPath field to object.Common + +* Add HostDatastoreSystem.CreateLocalDatastore method + +* Add DatastoreNamespaceManager methods: CreateDirectory, DeleteDirectory + +* Add HostServiceSystem + +* Add HostStorageSystem methods: MarkAsSdd, MarkAsNonSdd, MarkAsLocal, MarkAsNonLocal + +* Add HostStorageSystem.RescanAllHba method + +### 0.6.2 (2016-05-11) + +* Get complete file details in Datastore.Stat + +* SOAP decoding fixes + +* Add VirtualMachine.RemoveAllSnapshot + +### 0.6.1 (2016-04-30) + +* Fix mo.Entity interface + +### 0.6.0 (2016-04-29) + +* Add Common.Rename method + +* Add mo.Entity interface + +* Add OptionManager + +* Add Finder.FolderList method + +* Add VirtualMachine.WaitForNetIP method + +* Add VirtualMachine.RevertToSnapshot method + +* Add Datastore.Download method + +### 0.5.0 (2016-03-30) + +Generated fields using xsd type 'int' change to Go type 'int32' + +VirtualDevice.UnitNumber field changed to pointer type + +### 0.4.0 (2016-02-26) + +* Add method to convert virtual device list to array with virtual device + changes that can be used in the VirtualMachineConfigSpec. + +* Make datastore cluster traversable in lister + +* Add finder.DatastoreCluster methods (also known as storage pods) + +* Add Drone CI check + +* Add object.Datastore Type and AttachedClusterHosts methods + +* Add finder.*OrDefault methods + +### 0.3.0 (2016-01-16) + +* Add object.VirtualNicManager wrapper + +* Add object.HostVsanSystem wrapper + +* Add object.HostSystem methods: EnterMaintenanceMode, ExitMaintenanceMode, Disconnect, Reconnect + +* Add finder.Folder method + +* Add object.Common.Destroy method + +* Add object.ComputeResource.Reconfigure method + +* Add license.AssignmentManager wrapper + +* Add object.HostFirewallSystem wrapper + +* Add object.DiagnosticManager wrapper + +* Add LoginExtensionByCertificate support + +* Add object.ExtensionManager + +... + +### 0.2.0 (2015-09-15) + +* Update to vim25/6.0 API + +* Stop returning children from `ManagedObjectList` + + Change the `ManagedObjectList` function in the `find` package to only + return the managed objects specified by the path argument and not their + children. The original behavior was used by govc's `ls` command and is + now available in the newly added function `ManagedObjectListChildren`. + +* Add retry functionality to vim25 package + +* Change finder functions to no longer take varargs + + The `find` package had functions to return a list of objects, given a + variable number of patterns. This makes it impossible to distinguish which + patterns produced results and which ones didn't. + + In particular for govc, where multiple arguments can be passed from the + command line, it is useful to let the user know which ones produce results + and which ones don't. + + To evaluate multiple patterns, the user should call the find functions + multiple times (either serially or in parallel). + +* Make optional boolean fields pointers (`vim25/types`). + + False is the zero value of a boolean field, which means they are not serialized + if the field is marked "omitempty". If the field is a pointer instead, the zero + value will be the nil pointer, and both true and false values are serialized. + +### 0.1.0 (2015-03-17) + +Prior to this version the API of this library was in flux. + +Notable changes w.r.t. the state of this library before March 2015 are: + +* All functions that may execute a request take a `context.Context` parameter. +* The `vim25` package contains a minimal client implementation. +* The property collector and its convenience functions live in the `property` package. diff --git a/vendor/github.com/vmware/govmomi/CONTRIBUTING.md b/vendor/github.com/vmware/govmomi/CONTRIBUTING.md new file mode 100644 index 000000000000..f6645cbf4f10 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/CONTRIBUTING.md @@ -0,0 +1,101 @@ +# Contributing to govmomi + +## Getting started + +First, fork the repository on GitHub to your personal account. + +Note that _GOPATH_ can be any directory, the example below uses _$HOME/govmomi_. +Change _$USER_ below to your github username if they are not the same. + +``` shell +export GOPATH=$HOME/govmomi +go get github.com/vmware/govmomi +cd $GOPATH/src/github.com/vmware/govmomi +git config push.default nothing # anything to avoid pushing to vmware/govmomi by default +git remote rename origin vmware +git remote add $USER git@github.com:$USER/govmomi.git +git fetch $USER +``` + +## Installing from source + +Compile the govmomi libraries and install govc using: + +``` shell +go install -v github.com/vmware/govmomi/govc +``` + +Note that **govc/build.sh** is only used for building release binaries. + +## Contribution flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work. +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Update CHANGELOG.md and/or govc/CHANGELOG.md when appropriate. +- Push your changes to a topic branch in your fork of the repository. +- Submit a pull request to vmware/govmomi. + +Example: + +``` shell +git checkout -b my-new-feature vmware/master +git commit -a +git push $USER my-new-feature +``` + +### Stay in sync with upstream + +When your branch gets out of sync with the vmware/master branch, use the following to update: + +``` shell +git checkout my-new-feature +git fetch -a +git rebase vmware/master +git push --force-with-lease $USER my-new-feature +``` + +### Updating pull requests + +If your PR fails to pass CI or needs changes based on code review, you'll most likely want to squash these changes into +existing commits. + +If your pull request contains a single commit or your changes are related to the most recent commit, you can simply +amend the commit. + +``` shell +git add . +git commit --amend +git push --force-with-lease $USER my-new-feature +``` + +If you need to squash changes into an earlier commit, you can use: + +``` shell +git add . +git commit --fixup +git rebase -i --autosquash vmware/master +git push --force-with-lease $USER my-new-feature +``` + +Be sure to add a comment to the PR indicating your new changes are ready to review, as github does not generate a +notification when you git push. + +### Code style + +The coding style suggested by the Golang community is used in govmomi. See the +[style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details. + +Try to limit column width to 120 characters for both code and markdown documents such as this one. + +### Format of the Commit Message + +We follow the conventions on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/). + +Be sure to include any related GitHub issue references in the commit message. + +## Reporting Bugs and Creating Issues + +When opening a new issue, try to roughly follow the commit message format conventions above. diff --git a/vendor/github.com/vmware/govmomi/CONTRIBUTORS b/vendor/github.com/vmware/govmomi/CONTRIBUTORS new file mode 100644 index 000000000000..1fd37b609d16 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/CONTRIBUTORS @@ -0,0 +1,83 @@ +# People who can (and typically have) contributed to this repository. +# +# This script is generated by contributors.sh +# + +Abhijeet Kasurde +abrarshivani +Adam Shannon +akutz +Alessandro Cortiana +Alex Bozhenko +Alvaro Miranda +amandahla +Amanda H. L. de Andrade +Amit Bathla +amit bezalel +Andrew Chin +Anfernee Yongkun Gui +aniketGslab +Arran Walker +Aryeh Weinreb +Austin Parker +Balu Dontu +bastienbc +Bob Killen +Brad Fitzpatrick +Bruce Downs +Cédric Blomart +Chris Marchesi +Christian Höltje +Clint Greenwood +Danny Lockard +Dave Tucker +Davide Agnello +David Stark +Deric Crago +Doug MacEachern +Eloy Coto +Eric Gray +Eric Yutao +Erik Hollensbe +Fabio Rapposelli +Faiyaz Ahmed +forkbomber +Gavin Gray +Gavrie Philipson +George Hicken +Gerrit Renker +gthombare +Hasan Mahmood +Henrik Hodne +Isaac Rodman +Ivan Porto Carrero +Jason Kincl +Jeremy Canady +jeremy-clerc +João Pereira +Jorge Sevilla +leslie-qiwa +Louie Jiang +Marc Carmier +Matthew Cosgrove +Mevan Samaratunga +Nicolas Lamirault +Omar Kohl +Parham Alvani +Pieter Noordhuis +runner.mei +S.Çağlar Onur +Sergey Ignatov +Steve Purcell +Takaaki Furukawa +tanishi +Ted Zlatanov +Thibaut Ackermann +Trevor Dawe +Vadim Egorov +Volodymyr Bobyr +Witold Krecicki +Yang Yang +Yuya Kusakabe +Zach Tucker +Zee Yang diff --git a/vendor/github.com/vmware/govmomi/Dockerfile b/vendor/github.com/vmware/govmomi/Dockerfile new file mode 100644 index 000000000000..4f84fadaafbb --- /dev/null +++ b/vendor/github.com/vmware/govmomi/Dockerfile @@ -0,0 +1,4 @@ +FROM scratch +LABEL maintainer="fabio@vmware.com" +COPY govc / +ENTRYPOINT [ "/govc" ] \ No newline at end of file diff --git a/vendor/github.com/vmware/govmomi/Gopkg.lock b/vendor/github.com/vmware/govmomi/Gopkg.lock new file mode 100644 index 000000000000..0f9a8f8c2658 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/Gopkg.lock @@ -0,0 +1,44 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "improvements" + name = "github.com/davecgh/go-xdr" + packages = ["xdr2"] + revision = "4930550ba2e22f87187498acfd78348b15f4e7a8" + source = "https://github.com/rasky/go-xdr" + +[[projects]] + name = "github.com/google/uuid" + packages = ["."] + revision = "6a5e28554805e78ea6141142aba763936c4761c0" + +[[projects]] + branch = "govmomi" + name = "github.com/kr/pretty" + packages = ["."] + revision = "2ee9d7453c02ef7fa518a83ae23644eb8872186a" + source = "https://github.com/dougm/pretty" + +[[projects]] + branch = "master" + name = "github.com/kr/text" + packages = ["."] + revision = "7cafcd837844e784b526369c9bce262804aebc60" + +[[projects]] + branch = "master" + name = "github.com/vmware/vmw-guestinfo" + packages = [ + "bdoor", + "message", + "vmcheck" + ] + revision = "25eff159a728be87e103a0b8045e08273f4dbec4" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "376638fa6c0621cbd980caf8fc53494d880886f100663da8de47ecb6e596e439" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/vmware/govmomi/Gopkg.toml b/vendor/github.com/vmware/govmomi/Gopkg.toml new file mode 100644 index 000000000000..4c4d6765e6fa --- /dev/null +++ b/vendor/github.com/vmware/govmomi/Gopkg.toml @@ -0,0 +1,19 @@ +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# Refer to https://github.com/toml-lang/toml for detailed TOML docs. + +[prune] + non-go = true + go-tests = true + unused-packages = true + +[[constraint]] + branch = "improvements" + name = "github.com/davecgh/go-xdr" + source = "https://github.com/rasky/go-xdr" + +[[constraint]] + branch = "govmomi" + name = "github.com/kr/pretty" + source = "https://github.com/dougm/pretty" diff --git a/vendor/github.com/vmware/govmomi/LICENSE.txt b/vendor/github.com/vmware/govmomi/LICENSE.txt new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vmware/govmomi/Makefile b/vendor/github.com/vmware/govmomi/Makefile new file mode 100644 index 000000000000..e0e03ecd3f5f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/Makefile @@ -0,0 +1,29 @@ +.PHONY: test + +all: check test + +check: goimports govet + +goimports: + @echo checking go imports... + @go get golang.org/x/tools/cmd/goimports + @! goimports -d . 2>&1 | egrep -v '^$$' + +govet: + @echo checking go vet... + @go tool vet -structtags=false -methods=false $$(find . -mindepth 1 -maxdepth 1 -type d -not -name vendor) + +install: + go install -v github.com/vmware/govmomi/govc + go install -v github.com/vmware/govmomi/vcsim + +go-test: + go test -race -v $(TEST_OPTS) ./... + +govc-test: install + (cd govc/test && ./vendor/github.com/sstephenson/bats/libexec/bats -t .) + +test: go-test govc-test + +doc: install + ./govc/usage.sh > ./govc/USAGE.md diff --git a/vendor/github.com/vmware/govmomi/README.md b/vendor/github.com/vmware/govmomi/README.md new file mode 100644 index 000000000000..08bc8df80897 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/README.md @@ -0,0 +1,86 @@ +[![Build Status](https://travis-ci.org/vmware/govmomi.png?branch=master)](https://travis-ci.org/vmware/govmomi) +[![Go Report Card](https://goreportcard.com/badge/github.com/vmware/govmomi)](https://goreportcard.com/report/github.com/vmware/govmomi) + +# govmomi + +A Go library for interacting with VMware vSphere APIs (ESXi and/or vCenter). + +In addition to the vSphere API client, this repository includes: + +* [govc](./govc) - vSphere CLI + +* [vcsim](./vcsim) - vSphere API mock framework + +* [toolbox](./toolbox) - VM guest tools framework + +## Compatibility + +This library is built for and tested against ESXi and vCenter 6.0, 6.5 and 6.7. + +It may work with versions 5.5 and 5.1, but neither are officially supported. + +## Documentation + +The APIs exposed by this library very closely follow the API described in the [VMware vSphere API Reference Documentation][apiref]. +Refer to this document to become familiar with the upstream API. + +The code in the `govmomi` package is a wrapper for the code that is generated from the vSphere API description. +It primarily provides convenience functions for working with the vSphere API. +See [godoc.org][godoc] for documentation. + +[apiref]:http://pubs.vmware.com/vsphere-6-5/index.jsp#com.vmware.wssdk.apiref.doc/right-pane.html +[godoc]:http://godoc.org/github.com/vmware/govmomi + +## Installation + +```sh +go get -u github.com/vmware/govmomi +``` + +## Discussion + +Contributors and users are encouraged to collaborate using GitHub issues and/or +[Slack](https://vmwarecode.slack.com/messages/govmomi). +Access to Slack requires a [VMware {code} membership](https://code.vmware.com/join/). + +## Status + +Changes to the API are subject to [semantic versioning](http://semver.org). + +Refer to the [CHANGELOG](CHANGELOG.md) for version to version changes. + +## Projects using govmomi + +* [Docker Machine](https://github.com/docker/machine/tree/master/drivers/vmwarevsphere) + +* [Docker InfraKit](https://github.com/docker/infrakit/tree/master/pkg/provider/vsphere) + +* [Docker LinuxKit](https://github.com/linuxkit/linuxkit/tree/master/src/cmd/linuxkit) + +* [Kubernetes](https://github.com/kubernetes/kubernetes/tree/master/pkg/cloudprovider/providers/vsphere) + +* [Kubernetes kops](https://github.com/kubernetes/kops/tree/master/upup/pkg/fi/cloudup/vsphere) + +* [Terraform](https://github.com/terraform-providers/terraform-provider-vsphere) + +* [Packer](https://github.com/jetbrains-infra/packer-builder-vsphere) + +* [VMware VIC Engine](https://github.com/vmware/vic) + +* [Travis CI](https://github.com/travis-ci/jupiter-brain) + +* [collectd-vsphere](https://github.com/travis-ci/collectd-vsphere) + +* [Gru](https://github.com/dnaeon/gru) + +* [Libretto](https://github.com/apcera/libretto/tree/master/virtualmachine/vsphere) + +## Related projects + +* [rbvmomi](https://github.com/vmware/rbvmomi) + +* [pyvmomi](https://github.com/vmware/pyvmomi) + +## License + +govmomi is available under the [Apache 2 license](LICENSE). diff --git a/vendor/github.com/vmware/govmomi/client.go b/vendor/github.com/vmware/govmomi/client.go new file mode 100644 index 000000000000..ad49fe6bf7de --- /dev/null +++ b/vendor/github.com/vmware/govmomi/client.go @@ -0,0 +1,136 @@ +/* +Copyright (c) 2014-2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This package is the root package of the govmomi library. + +The library is structured as follows: + +Package vim25 + +The minimal usable functionality is available through the vim25 package. +It contains subpackages that contain generated types, managed objects, and all +available methods. The vim25 package is entirely independent of the other +packages in the govmomi tree -- it has no dependencies on its peers. + +The vim25 package itself contains a client structure that is +passed around throughout the entire library. It abstracts a session and its +immutable state. See the vim25 package for more information. + +Package session + +The session package contains an abstraction for the session manager that allows +a user to login and logout. It also provides access to the current session +(i.e. to determine if the user is in fact logged in) + +Package object + +The object package contains wrappers for a selection of managed objects. The +constructors of these objects all take a *vim25.Client, which they pass along +to derived objects, if applicable. + +Package govc + +The govc package contains the govc CLI. The code in this tree is not intended +to be used as a library. Any functionality that govc contains that _could_ be +used as a library function but isn't, _should_ live in a root level package. + +Other packages + +Other packages, such as "event", "guest", or "license", provide wrappers for +the respective subsystems. They are typically not needed in normal workflows so +are kept outside the object package. +*/ +package govmomi + +import ( + "context" + "net/url" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type Client struct { + *vim25.Client + + SessionManager *session.Manager +} + +// NewClient creates a new client from a URL. The client authenticates with the +// server with username/password before returning if the URL contains user information. +func NewClient(ctx context.Context, u *url.URL, insecure bool) (*Client, error) { + soapClient := soap.NewClient(u, insecure) + vimClient, err := vim25.NewClient(ctx, soapClient) + if err != nil { + return nil, err + } + + c := &Client{ + Client: vimClient, + SessionManager: session.NewManager(vimClient), + } + + // Only login if the URL contains user information. + if u.User != nil { + err = c.Login(ctx, u.User) + if err != nil { + return nil, err + } + } + + return c, nil +} + +// Login dispatches to the SessionManager. +func (c *Client) Login(ctx context.Context, u *url.Userinfo) error { + return c.SessionManager.Login(ctx, u) +} + +// Logout dispatches to the SessionManager. +func (c *Client) Logout(ctx context.Context) error { + // Close any idle connections after logging out. + defer c.Client.CloseIdleConnections() + return c.SessionManager.Logout(ctx) +} + +// PropertyCollector returns the session's default property collector. +func (c *Client) PropertyCollector() *property.Collector { + return property.DefaultCollector(c.Client) +} + +// RetrieveOne dispatches to the Retrieve function on the default property collector. +func (c *Client) RetrieveOne(ctx context.Context, obj types.ManagedObjectReference, p []string, dst interface{}) error { + return c.PropertyCollector().RetrieveOne(ctx, obj, p, dst) +} + +// Retrieve dispatches to the Retrieve function on the default property collector. +func (c *Client) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, p []string, dst interface{}) error { + return c.PropertyCollector().Retrieve(ctx, objs, p, dst) +} + +// Wait dispatches to property.Wait. +func (c *Client) Wait(ctx context.Context, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error { + return property.Wait(ctx, c.PropertyCollector(), obj, ps, f) +} + +// IsVC returns true if we are connected to a vCenter +func (c *Client) IsVC() bool { + return c.Client.IsVC() +} diff --git a/vendor/github.com/vmware/govmomi/find/doc.go b/vendor/github.com/vmware/govmomi/find/doc.go new file mode 100644 index 000000000000..0c8acee01638 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/find/doc.go @@ -0,0 +1,37 @@ +/* +Copyright (c) 2014-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package find implements inventory listing and searching. + +The Finder is an alternative to the object.SearchIndex FindByInventoryPath() and FindChild() methods. +SearchIndex.FindByInventoryPath requires an absolute path, whereas the Finder also supports relative paths +and patterns via path.Match. +SearchIndex.FindChild requires a parent to find the child, whereas the Finder also supports an ancestor via +recursive object traversal. + +The various Finder methods accept a "path" argument, which can absolute or relative to the Folder for the object type. +The Finder supports two modes, "list" and "find". The "list" mode behaves like the "ls" command, only searching within +the immediate path. The "find" mode behaves like the "find" command, with the search starting at the immediate path but +also recursing into sub Folders relative to the Datacenter. The default mode is "list" if the given path contains a "/", +otherwise "find" mode is used. + +The exception is to use a "..." wildcard with a path to find all objects recursively underneath any root object. +For example: VirtualMachineList("/DC1/...") + +See also: https://github.com/vmware/govmomi/blob/master/govc/README.md#usage +*/ +package find diff --git a/vendor/github.com/vmware/govmomi/find/error.go b/vendor/github.com/vmware/govmomi/find/error.go new file mode 100644 index 000000000000..684526dab768 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/find/error.go @@ -0,0 +1,64 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package find + +import "fmt" + +type NotFoundError struct { + kind string + path string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("%s '%s' not found", e.kind, e.path) +} + +type MultipleFoundError struct { + kind string + path string +} + +func (e *MultipleFoundError) Error() string { + return fmt.Sprintf("path '%s' resolves to multiple %ss", e.path, e.kind) +} + +type DefaultNotFoundError struct { + kind string +} + +func (e *DefaultNotFoundError) Error() string { + return fmt.Sprintf("no default %s found", e.kind) +} + +type DefaultMultipleFoundError struct { + kind string +} + +func (e DefaultMultipleFoundError) Error() string { + return fmt.Sprintf("default %s resolves to multiple instances, please specify", e.kind) +} + +func toDefaultError(err error) error { + switch e := err.(type) { + case *NotFoundError: + return &DefaultNotFoundError{e.kind} + case *MultipleFoundError: + return &DefaultMultipleFoundError{e.kind} + default: + return err + } +} diff --git a/vendor/github.com/vmware/govmomi/find/finder.go b/vendor/github.com/vmware/govmomi/find/finder.go new file mode 100644 index 000000000000..70a2b535945f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/find/finder.go @@ -0,0 +1,1042 @@ +/* +Copyright (c) 2014-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package find + +import ( + "context" + "errors" + "path" + "strings" + + "github.com/vmware/govmomi/list" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type Finder struct { + client *vim25.Client + r recurser + dc *object.Datacenter + si *object.SearchIndex + folders *object.DatacenterFolders +} + +func NewFinder(client *vim25.Client, all bool) *Finder { + f := &Finder{ + client: client, + si: object.NewSearchIndex(client), + r: recurser{ + Collector: property.DefaultCollector(client), + All: all, + }, + } + + return f +} + +func (f *Finder) SetDatacenter(dc *object.Datacenter) *Finder { + f.dc = dc + f.folders = nil + return f +} + +// findRoot makes it possible to use "find" mode with a different root path. +// Example: ResourcePoolList("/dc1/host/cluster1/...") +func (f *Finder) findRoot(ctx context.Context, root *list.Element, parts []string) bool { + if len(parts) == 0 { + return false + } + + ix := len(parts) - 1 + + if parts[ix] != "..." { + return false + } + + if ix == 0 { + return true // We already have the Object for root.Path + } + + // Lookup the Object for the new root.Path + rootPath := path.Join(root.Path, path.Join(parts[:ix]...)) + + ref, err := f.si.FindByInventoryPath(ctx, rootPath) + if err != nil || ref == nil { + // If we get an error or fail to match, fall through to find() with the original root and path + return false + } + + root.Path = rootPath + root.Object = ref + + return true +} + +func (f *Finder) find(ctx context.Context, arg string, s *spec) ([]list.Element, error) { + isPath := strings.Contains(arg, "/") + + root := list.Element{ + Path: "/", + Object: object.NewRootFolder(f.client), + } + + parts := list.ToParts(arg) + + if len(parts) > 0 { + switch parts[0] { + case "..": // Not supported; many edge case, little value + return nil, errors.New("cannot traverse up a tree") + case ".": // Relative to whatever + pivot, err := s.Relative(ctx) + if err != nil { + return nil, err + } + + mes, err := mo.Ancestors(ctx, f.client, f.client.ServiceContent.PropertyCollector, pivot.Reference()) + if err != nil { + return nil, err + } + + for _, me := range mes { + // Skip root entity in building inventory path. + if me.Parent == nil { + continue + } + root.Path = path.Join(root.Path, me.Name) + } + + root.Object = pivot + parts = parts[1:] + } + } + + if s.listMode(isPath) { + if f.findRoot(ctx, &root, parts) { + parts = []string{"*"} + } else { + return f.r.List(ctx, s, root, parts) + } + } + + s.Parents = append(s.Parents, s.Nested...) + + return f.r.Find(ctx, s, root, parts) +} + +func (f *Finder) datacenter() (*object.Datacenter, error) { + if f.dc == nil { + return nil, errors.New("please specify a datacenter") + } + + return f.dc, nil +} + +// datacenterPath returns the absolute path to the Datacenter containing the given ref +func (f *Finder) datacenterPath(ctx context.Context, ref types.ManagedObjectReference) (string, error) { + mes, err := mo.Ancestors(ctx, f.client, f.client.ServiceContent.PropertyCollector, ref) + if err != nil { + return "", err + } + + // Chop leaves under the Datacenter + for i := len(mes) - 1; i > 0; i-- { + if mes[i].Self.Type == "Datacenter" { + break + } + mes = mes[:i] + } + + var p string + + for _, me := range mes { + // Skip root entity in building inventory path. + if me.Parent == nil { + continue + } + + p = p + "/" + me.Name + } + + return p, nil +} + +func (f *Finder) dcFolders(ctx context.Context) (*object.DatacenterFolders, error) { + if f.folders != nil { + return f.folders, nil + } + + dc, err := f.datacenter() + if err != nil { + return nil, err + } + + folders, err := dc.Folders(ctx) + if err != nil { + return nil, err + } + + f.folders = folders + + return f.folders, nil +} + +func (f *Finder) dcReference(_ context.Context) (object.Reference, error) { + dc, err := f.datacenter() + if err != nil { + return nil, err + } + + return dc, nil +} + +func (f *Finder) vmFolder(ctx context.Context) (object.Reference, error) { + folders, err := f.dcFolders(ctx) + if err != nil { + return nil, err + } + + return folders.VmFolder, nil +} + +func (f *Finder) hostFolder(ctx context.Context) (object.Reference, error) { + folders, err := f.dcFolders(ctx) + if err != nil { + return nil, err + } + + return folders.HostFolder, nil +} + +func (f *Finder) datastoreFolder(ctx context.Context) (object.Reference, error) { + folders, err := f.dcFolders(ctx) + if err != nil { + return nil, err + } + + return folders.DatastoreFolder, nil +} + +func (f *Finder) networkFolder(ctx context.Context) (object.Reference, error) { + folders, err := f.dcFolders(ctx) + if err != nil { + return nil, err + } + + return folders.NetworkFolder, nil +} + +func (f *Finder) rootFolder(_ context.Context) (object.Reference, error) { + return object.NewRootFolder(f.client), nil +} + +func (f *Finder) managedObjectList(ctx context.Context, path string, tl bool, include []string) ([]list.Element, error) { + fn := f.rootFolder + + if f.dc != nil { + fn = f.dcReference + } + + if len(path) == 0 { + path = "." + } + + s := &spec{ + Relative: fn, + Parents: []string{"ComputeResource", "ClusterComputeResource", "HostSystem", "VirtualApp", "StoragePod"}, + Include: include, + } + + if tl { + s.Contents = true + s.ListMode = types.NewBool(true) + } + + return f.find(ctx, path, s) +} + +// Element returns an Element for the given ManagedObjectReference +// This method is only useful for looking up the InventoryPath of a ManagedObjectReference. +func (f *Finder) Element(ctx context.Context, ref types.ManagedObjectReference) (*list.Element, error) { + rl := func(_ context.Context) (object.Reference, error) { + return ref, nil + } + + s := &spec{ + Relative: rl, + } + + e, err := f.find(ctx, "./", s) + if err != nil { + return nil, err + } + + if len(e) == 0 { + return nil, &NotFoundError{ref.Type, ref.Value} + } + + if len(e) > 1 { + panic("ManagedObjectReference must be unique") + } + + return &e[0], nil +} + +// ObjectReference converts the given ManagedObjectReference to a type from the object package via object.NewReference +// with the object.Common.InventoryPath field set. +func (f *Finder) ObjectReference(ctx context.Context, ref types.ManagedObjectReference) (object.Reference, error) { + e, err := f.Element(ctx, ref) + if err != nil { + return nil, err + } + + r := object.NewReference(f.client, ref) + + type common interface { + SetInventoryPath(string) + } + + r.(common).SetInventoryPath(e.Path) + + if f.dc != nil { + if ds, ok := r.(*object.Datastore); ok { + ds.DatacenterPath = f.dc.InventoryPath + } + } + + return r, nil +} + +func (f *Finder) ManagedObjectList(ctx context.Context, path string, include ...string) ([]list.Element, error) { + return f.managedObjectList(ctx, path, false, include) +} + +func (f *Finder) ManagedObjectListChildren(ctx context.Context, path string, include ...string) ([]list.Element, error) { + return f.managedObjectList(ctx, path, true, include) +} + +func (f *Finder) DatacenterList(ctx context.Context, path string) ([]*object.Datacenter, error) { + s := &spec{ + Relative: f.rootFolder, + Include: []string{"Datacenter"}, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var dcs []*object.Datacenter + for _, e := range es { + ref := e.Object.Reference() + if ref.Type == "Datacenter" { + dc := object.NewDatacenter(f.client, ref) + dc.InventoryPath = e.Path + dcs = append(dcs, dc) + } + } + + if len(dcs) == 0 { + return nil, &NotFoundError{"datacenter", path} + } + + return dcs, nil +} + +func (f *Finder) Datacenter(ctx context.Context, path string) (*object.Datacenter, error) { + dcs, err := f.DatacenterList(ctx, path) + if err != nil { + return nil, err + } + + if len(dcs) > 1 { + return nil, &MultipleFoundError{"datacenter", path} + } + + return dcs[0], nil +} + +func (f *Finder) DefaultDatacenter(ctx context.Context) (*object.Datacenter, error) { + dc, err := f.Datacenter(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return dc, nil +} + +func (f *Finder) DatacenterOrDefault(ctx context.Context, path string) (*object.Datacenter, error) { + if path != "" { + dc, err := f.Datacenter(ctx, path) + if err != nil { + return nil, err + } + return dc, nil + } + + return f.DefaultDatacenter(ctx) +} + +func (f *Finder) DatastoreList(ctx context.Context, path string) ([]*object.Datastore, error) { + s := &spec{ + Relative: f.datastoreFolder, + Parents: []string{"StoragePod"}, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var dss []*object.Datastore + for _, e := range es { + ref := e.Object.Reference() + if ref.Type == "Datastore" { + ds := object.NewDatastore(f.client, ref) + ds.InventoryPath = e.Path + + if f.dc == nil { + // In this case SetDatacenter was not called and path is absolute + ds.DatacenterPath, err = f.datacenterPath(ctx, ref) + if err != nil { + return nil, err + } + } else { + ds.DatacenterPath = f.dc.InventoryPath + } + + dss = append(dss, ds) + } + } + + if len(dss) == 0 { + return nil, &NotFoundError{"datastore", path} + } + + return dss, nil +} + +func (f *Finder) Datastore(ctx context.Context, path string) (*object.Datastore, error) { + dss, err := f.DatastoreList(ctx, path) + if err != nil { + return nil, err + } + + if len(dss) > 1 { + return nil, &MultipleFoundError{"datastore", path} + } + + return dss[0], nil +} + +func (f *Finder) DefaultDatastore(ctx context.Context) (*object.Datastore, error) { + ds, err := f.Datastore(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return ds, nil +} + +func (f *Finder) DatastoreOrDefault(ctx context.Context, path string) (*object.Datastore, error) { + if path != "" { + ds, err := f.Datastore(ctx, path) + if err != nil { + return nil, err + } + return ds, nil + } + + return f.DefaultDatastore(ctx) +} + +func (f *Finder) DatastoreClusterList(ctx context.Context, path string) ([]*object.StoragePod, error) { + s := &spec{ + Relative: f.datastoreFolder, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var sps []*object.StoragePod + for _, e := range es { + ref := e.Object.Reference() + if ref.Type == "StoragePod" { + sp := object.NewStoragePod(f.client, ref) + sp.InventoryPath = e.Path + sps = append(sps, sp) + } + } + + if len(sps) == 0 { + return nil, &NotFoundError{"datastore cluster", path} + } + + return sps, nil +} + +func (f *Finder) DatastoreCluster(ctx context.Context, path string) (*object.StoragePod, error) { + sps, err := f.DatastoreClusterList(ctx, path) + if err != nil { + return nil, err + } + + if len(sps) > 1 { + return nil, &MultipleFoundError{"datastore cluster", path} + } + + return sps[0], nil +} + +func (f *Finder) DefaultDatastoreCluster(ctx context.Context) (*object.StoragePod, error) { + sp, err := f.DatastoreCluster(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return sp, nil +} + +func (f *Finder) DatastoreClusterOrDefault(ctx context.Context, path string) (*object.StoragePod, error) { + if path != "" { + sp, err := f.DatastoreCluster(ctx, path) + if err != nil { + return nil, err + } + return sp, nil + } + + return f.DefaultDatastoreCluster(ctx) +} + +func (f *Finder) ComputeResourceList(ctx context.Context, path string) ([]*object.ComputeResource, error) { + s := &spec{ + Relative: f.hostFolder, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var crs []*object.ComputeResource + for _, e := range es { + var cr *object.ComputeResource + + switch o := e.Object.(type) { + case mo.ComputeResource, mo.ClusterComputeResource: + cr = object.NewComputeResource(f.client, o.Reference()) + default: + continue + } + + cr.InventoryPath = e.Path + crs = append(crs, cr) + } + + if len(crs) == 0 { + return nil, &NotFoundError{"compute resource", path} + } + + return crs, nil +} + +func (f *Finder) ComputeResource(ctx context.Context, path string) (*object.ComputeResource, error) { + crs, err := f.ComputeResourceList(ctx, path) + if err != nil { + return nil, err + } + + if len(crs) > 1 { + return nil, &MultipleFoundError{"compute resource", path} + } + + return crs[0], nil +} + +func (f *Finder) DefaultComputeResource(ctx context.Context) (*object.ComputeResource, error) { + cr, err := f.ComputeResource(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return cr, nil +} + +func (f *Finder) ComputeResourceOrDefault(ctx context.Context, path string) (*object.ComputeResource, error) { + if path != "" { + cr, err := f.ComputeResource(ctx, path) + if err != nil { + return nil, err + } + return cr, nil + } + + return f.DefaultComputeResource(ctx) +} + +func (f *Finder) ClusterComputeResourceList(ctx context.Context, path string) ([]*object.ClusterComputeResource, error) { + s := &spec{ + Relative: f.hostFolder, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var ccrs []*object.ClusterComputeResource + for _, e := range es { + var ccr *object.ClusterComputeResource + + switch o := e.Object.(type) { + case mo.ClusterComputeResource: + ccr = object.NewClusterComputeResource(f.client, o.Reference()) + default: + continue + } + + ccr.InventoryPath = e.Path + ccrs = append(ccrs, ccr) + } + + if len(ccrs) == 0 { + return nil, &NotFoundError{"cluster", path} + } + + return ccrs, nil +} + +func (f *Finder) DefaultClusterComputeResource(ctx context.Context) (*object.ClusterComputeResource, error) { + cr, err := f.ClusterComputeResource(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return cr, nil +} + +func (f *Finder) ClusterComputeResource(ctx context.Context, path string) (*object.ClusterComputeResource, error) { + ccrs, err := f.ClusterComputeResourceList(ctx, path) + if err != nil { + return nil, err + } + + if len(ccrs) > 1 { + return nil, &MultipleFoundError{"cluster", path} + } + + return ccrs[0], nil +} + +func (f *Finder) ClusterComputeResourceOrDefault(ctx context.Context, path string) (*object.ClusterComputeResource, error) { + if path != "" { + cr, err := f.ClusterComputeResource(ctx, path) + if err != nil { + return nil, err + } + return cr, nil + } + + return f.DefaultClusterComputeResource(ctx) +} + +func (f *Finder) HostSystemList(ctx context.Context, path string) ([]*object.HostSystem, error) { + s := &spec{ + Relative: f.hostFolder, + Parents: []string{"ComputeResource", "ClusterComputeResource"}, + Include: []string{"HostSystem"}, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var hss []*object.HostSystem + for _, e := range es { + var hs *object.HostSystem + + switch o := e.Object.(type) { + case mo.HostSystem: + hs = object.NewHostSystem(f.client, o.Reference()) + + hs.InventoryPath = e.Path + hss = append(hss, hs) + case mo.ComputeResource, mo.ClusterComputeResource: + cr := object.NewComputeResource(f.client, o.Reference()) + + cr.InventoryPath = e.Path + + hosts, err := cr.Hosts(ctx) + if err != nil { + return nil, err + } + + hss = append(hss, hosts...) + } + } + + if len(hss) == 0 { + return nil, &NotFoundError{"host", path} + } + + return hss, nil +} + +func (f *Finder) HostSystem(ctx context.Context, path string) (*object.HostSystem, error) { + hss, err := f.HostSystemList(ctx, path) + if err != nil { + return nil, err + } + + if len(hss) > 1 { + return nil, &MultipleFoundError{"host", path} + } + + return hss[0], nil +} + +func (f *Finder) DefaultHostSystem(ctx context.Context) (*object.HostSystem, error) { + hs, err := f.HostSystem(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return hs, nil +} + +func (f *Finder) HostSystemOrDefault(ctx context.Context, path string) (*object.HostSystem, error) { + if path != "" { + hs, err := f.HostSystem(ctx, path) + if err != nil { + return nil, err + } + return hs, nil + } + + return f.DefaultHostSystem(ctx) +} + +func (f *Finder) NetworkList(ctx context.Context, path string) ([]object.NetworkReference, error) { + s := &spec{ + Relative: f.networkFolder, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var ns []object.NetworkReference + for _, e := range es { + ref := e.Object.Reference() + switch ref.Type { + case "Network": + r := object.NewNetwork(f.client, ref) + r.InventoryPath = e.Path + ns = append(ns, r) + case "OpaqueNetwork": + r := object.NewOpaqueNetwork(f.client, ref) + r.InventoryPath = e.Path + ns = append(ns, r) + case "DistributedVirtualPortgroup": + r := object.NewDistributedVirtualPortgroup(f.client, ref) + r.InventoryPath = e.Path + ns = append(ns, r) + case "DistributedVirtualSwitch", "VmwareDistributedVirtualSwitch": + r := object.NewDistributedVirtualSwitch(f.client, ref) + r.InventoryPath = e.Path + ns = append(ns, r) + } + } + + if len(ns) == 0 { + return nil, &NotFoundError{"network", path} + } + + return ns, nil +} + +func (f *Finder) Network(ctx context.Context, path string) (object.NetworkReference, error) { + networks, err := f.NetworkList(ctx, path) + if err != nil { + return nil, err + } + + if len(networks) > 1 { + return nil, &MultipleFoundError{"network", path} + } + + return networks[0], nil +} + +func (f *Finder) DefaultNetwork(ctx context.Context) (object.NetworkReference, error) { + network, err := f.Network(ctx, "*") + if err != nil { + return nil, toDefaultError(err) + } + + return network, nil +} + +func (f *Finder) NetworkOrDefault(ctx context.Context, path string) (object.NetworkReference, error) { + if path != "" { + network, err := f.Network(ctx, path) + if err != nil { + return nil, err + } + return network, nil + } + + return f.DefaultNetwork(ctx) +} + +func (f *Finder) ResourcePoolList(ctx context.Context, path string) ([]*object.ResourcePool, error) { + s := &spec{ + Relative: f.hostFolder, + Parents: []string{"ComputeResource", "ClusterComputeResource", "VirtualApp"}, + Nested: []string{"ResourcePool"}, + Contents: true, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var rps []*object.ResourcePool + for _, e := range es { + var rp *object.ResourcePool + + switch o := e.Object.(type) { + case mo.ResourcePool: + rp = object.NewResourcePool(f.client, o.Reference()) + rp.InventoryPath = e.Path + rps = append(rps, rp) + } + } + + if len(rps) == 0 { + return nil, &NotFoundError{"resource pool", path} + } + + return rps, nil +} + +func (f *Finder) ResourcePool(ctx context.Context, path string) (*object.ResourcePool, error) { + rps, err := f.ResourcePoolList(ctx, path) + if err != nil { + return nil, err + } + + if len(rps) > 1 { + return nil, &MultipleFoundError{"resource pool", path} + } + + return rps[0], nil +} + +func (f *Finder) DefaultResourcePool(ctx context.Context) (*object.ResourcePool, error) { + rp, err := f.ResourcePool(ctx, "*/Resources") + if err != nil { + return nil, toDefaultError(err) + } + + return rp, nil +} + +func (f *Finder) ResourcePoolOrDefault(ctx context.Context, path string) (*object.ResourcePool, error) { + if path != "" { + rp, err := f.ResourcePool(ctx, path) + if err != nil { + return nil, err + } + return rp, nil + } + + return f.DefaultResourcePool(ctx) +} + +// ResourcePoolListAll combines ResourcePoolList and VirtualAppList +// VirtualAppList is only called if ResourcePoolList does not find any pools with the given path. +func (f *Finder) ResourcePoolListAll(ctx context.Context, path string) ([]*object.ResourcePool, error) { + pools, err := f.ResourcePoolList(ctx, path) + if err != nil { + if _, ok := err.(*NotFoundError); !ok { + return nil, err + } + + vapps, _ := f.VirtualAppList(ctx, path) + + if len(vapps) == 0 { + return nil, err + } + + for _, vapp := range vapps { + pools = append(pools, vapp.ResourcePool) + } + } + + return pools, nil +} + +func (f *Finder) DefaultFolder(ctx context.Context) (*object.Folder, error) { + ref, err := f.vmFolder(ctx) + if err != nil { + return nil, toDefaultError(err) + } + folder := object.NewFolder(f.client, ref.Reference()) + + return folder, nil +} + +func (f *Finder) FolderOrDefault(ctx context.Context, path string) (*object.Folder, error) { + if path != "" { + folder, err := f.Folder(ctx, path) + if err != nil { + return nil, err + } + return folder, nil + } + return f.DefaultFolder(ctx) +} + +func (f *Finder) VirtualMachineList(ctx context.Context, path string) ([]*object.VirtualMachine, error) { + s := &spec{ + Relative: f.vmFolder, + Parents: []string{"VirtualApp"}, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var vms []*object.VirtualMachine + for _, e := range es { + switch o := e.Object.(type) { + case mo.VirtualMachine: + vm := object.NewVirtualMachine(f.client, o.Reference()) + vm.InventoryPath = e.Path + vms = append(vms, vm) + } + } + + if len(vms) == 0 { + return nil, &NotFoundError{"vm", path} + } + + return vms, nil +} + +func (f *Finder) VirtualMachine(ctx context.Context, path string) (*object.VirtualMachine, error) { + vms, err := f.VirtualMachineList(ctx, path) + if err != nil { + return nil, err + } + + if len(vms) > 1 { + return nil, &MultipleFoundError{"vm", path} + } + + return vms[0], nil +} + +func (f *Finder) VirtualAppList(ctx context.Context, path string) ([]*object.VirtualApp, error) { + s := &spec{ + Relative: f.vmFolder, + } + + es, err := f.find(ctx, path, s) + if err != nil { + return nil, err + } + + var apps []*object.VirtualApp + for _, e := range es { + switch o := e.Object.(type) { + case mo.VirtualApp: + app := object.NewVirtualApp(f.client, o.Reference()) + app.InventoryPath = e.Path + apps = append(apps, app) + } + } + + if len(apps) == 0 { + return nil, &NotFoundError{"app", path} + } + + return apps, nil +} + +func (f *Finder) VirtualApp(ctx context.Context, path string) (*object.VirtualApp, error) { + apps, err := f.VirtualAppList(ctx, path) + if err != nil { + return nil, err + } + + if len(apps) > 1 { + return nil, &MultipleFoundError{"app", path} + } + + return apps[0], nil +} + +func (f *Finder) FolderList(ctx context.Context, path string) ([]*object.Folder, error) { + es, err := f.ManagedObjectList(ctx, path) + if err != nil { + return nil, err + } + + var folders []*object.Folder + + for _, e := range es { + switch o := e.Object.(type) { + case mo.Folder, mo.StoragePod: + folder := object.NewFolder(f.client, o.Reference()) + folder.InventoryPath = e.Path + folders = append(folders, folder) + case *object.Folder: + // RootFolder + folders = append(folders, o) + } + } + + if len(folders) == 0 { + return nil, &NotFoundError{"folder", path} + } + + return folders, nil +} + +func (f *Finder) Folder(ctx context.Context, path string) (*object.Folder, error) { + folders, err := f.FolderList(ctx, path) + if err != nil { + return nil, err + } + + if len(folders) > 1 { + return nil, &MultipleFoundError{"folder", path} + } + + return folders[0], nil +} diff --git a/vendor/github.com/vmware/govmomi/find/recurser.go b/vendor/github.com/vmware/govmomi/find/recurser.go new file mode 100644 index 000000000000..80d958a264f4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/find/recurser.go @@ -0,0 +1,253 @@ +/* +Copyright (c) 2014-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package find + +import ( + "context" + "os" + "path" + "strings" + + "github.com/vmware/govmomi/list" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" +) + +// spec is used to specify per-search configuration, independent of the Finder instance. +type spec struct { + // Relative returns the root object to resolve Relative paths (starts with ".") + Relative func(ctx context.Context) (object.Reference, error) + + // ListMode can be used to optionally force "ls" behavior, rather than "find" behavior + ListMode *bool + + // Contents configures the Recurser to list the Contents of traversable leaf nodes. + // This is typically set to true when used from the ls command, where listing + // a folder means listing its Contents. This is typically set to false for + // commands that take managed entities that are not folders as input. + Contents bool + + // Parents specifies the types which can contain the child types being searched for. + // for example, when searching for a HostSystem, parent types can be + // "ComputeResource" or "ClusterComputeResource". + Parents []string + + // Include specifies which types to be included in the results, used only in "find" mode. + Include []string + + // Nested should be set to types that can be Nested, used only in "find" mode. + Nested []string + + // ChildType avoids traversing into folders that can't contain the Include types, used only in "find" mode. + ChildType []string +} + +func (s *spec) traversable(o mo.Reference) bool { + ref := o.Reference() + + switch ref.Type { + case "Datacenter": + if len(s.Include) == 1 && s.Include[0] == "Datacenter" { + // No point in traversing deeper as Datacenters cannot be nested + return false + } + return true + case "Folder": + if f, ok := o.(mo.Folder); ok { + // TODO: Not making use of this yet, but here we can optimize when searching the entire + // inventory across Datacenters for specific types, for example: 'govc ls -t VirtualMachine /**' + // should not traverse into a Datacenter's host, network or datatore folders. + if !s.traversableChildType(f.ChildType) { + return false + } + } + + return true + } + + for _, kind := range s.Parents { + if kind == ref.Type { + return true + } + } + + return false +} + +func (s *spec) traversableChildType(ctypes []string) bool { + if len(s.ChildType) == 0 { + return true + } + + for _, t := range ctypes { + for _, c := range s.ChildType { + if t == c { + return true + } + } + } + + return false +} + +func (s *spec) wanted(e list.Element) bool { + if len(s.Include) == 0 { + return true + } + + w := e.Object.Reference().Type + + for _, kind := range s.Include { + if w == kind { + return true + } + } + + return false +} + +// listMode is a global option to revert to the original Finder behavior, +// disabling the newer "find" mode. +var listMode = os.Getenv("GOVMOMI_FINDER_LIST_MODE") == "true" + +func (s *spec) listMode(isPath bool) bool { + if listMode { + return true + } + + if s.ListMode != nil { + return *s.ListMode + } + + return isPath +} + +type recurser struct { + Collector *property.Collector + + // All configures the recurses to fetch complete objects for leaf nodes. + All bool +} + +func (r recurser) List(ctx context.Context, s *spec, root list.Element, parts []string) ([]list.Element, error) { + if len(parts) == 0 { + // Include non-traversable leaf elements in result. For example, consider + // the pattern "./vm/my-vm-*", where the pattern should match the VMs and + // not try to traverse them. + // + // Include traversable leaf elements in result, if the contents + // field is set to false. + // + if !s.Contents || !s.traversable(root.Object.Reference()) { + return []list.Element{root}, nil + } + } + + k := list.Lister{ + Collector: r.Collector, + Reference: root.Object.Reference(), + Prefix: root.Path, + } + + if r.All && len(parts) < 2 { + k.All = true + } + + in, err := k.List(ctx) + if err != nil { + return nil, err + } + + // This folder is a leaf as far as the glob goes. + if len(parts) == 0 { + return in, nil + } + + all := parts + pattern := parts[0] + parts = parts[1:] + + var out []list.Element + for _, e := range in { + matched, err := path.Match(pattern, path.Base(e.Path)) + if err != nil { + return nil, err + } + + if !matched { + matched = strings.HasSuffix(e.Path, "/"+path.Join(all...)) + if matched { + // name contains a '/' + out = append(out, e) + } + + continue + } + + nres, err := r.List(ctx, s, e, parts) + if err != nil { + return nil, err + } + + out = append(out, nres...) + } + + return out, nil +} + +func (r recurser) Find(ctx context.Context, s *spec, root list.Element, parts []string) ([]list.Element, error) { + var out []list.Element + + if len(parts) > 0 { + pattern := parts[0] + matched, err := path.Match(pattern, path.Base(root.Path)) + if err != nil { + return nil, err + } + + if matched && s.wanted(root) { + out = append(out, root) + } + } + + if !s.traversable(root.Object) { + return out, nil + } + + k := list.Lister{ + Collector: r.Collector, + Reference: root.Object.Reference(), + Prefix: root.Path, + } + + in, err := k.List(ctx) + if err != nil { + return nil, err + } + + for _, e := range in { + nres, err := r.Find(ctx, s, e, parts) + if err != nil { + return nil, err + } + + out = append(out, nres...) + } + + return out, nil +} diff --git a/vendor/github.com/vmware/govmomi/list/lister.go b/vendor/github.com/vmware/govmomi/list/lister.go new file mode 100644 index 000000000000..2ee32e6bc174 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/list/lister.go @@ -0,0 +1,563 @@ +/* +Copyright (c) 2014-2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package list + +import ( + "context" + "fmt" + "path" + "reflect" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type Element struct { + Path string + Object mo.Reference +} + +func (e Element) String() string { + return fmt.Sprintf("%s @ %s", e.Object.Reference(), e.Path) +} + +func ToElement(r mo.Reference, prefix string) Element { + var name string + + // Comments about types to be expected in folders copied from the + // documentation of the Folder managed object: + // http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/vim.Folder.html + switch m := r.(type) { + case mo.Folder: + name = m.Name + case mo.StoragePod: + name = m.Name + + // { "vim.Datacenter" } - Identifies the root folder and its descendant + // folders. Data center folders can contain child data center folders and + // Datacenter managed objects. Datacenter objects contain virtual machine, + // compute resource, network entity, and datastore folders. + case mo.Datacenter: + name = m.Name + + // { "vim.Virtualmachine", "vim.VirtualApp" } - Identifies a virtual machine + // folder. A virtual machine folder may contain child virtual machine + // folders. It also can contain VirtualMachine managed objects, templates, + // and VirtualApp managed objects. + case mo.VirtualMachine: + name = m.Name + case mo.VirtualApp: + name = m.Name + + // { "vim.ComputeResource" } - Identifies a compute resource + // folder, which contains child compute resource folders and ComputeResource + // hierarchies. + case mo.ComputeResource: + name = m.Name + case mo.ClusterComputeResource: + name = m.Name + case mo.HostSystem: + name = m.Name + case mo.ResourcePool: + name = m.Name + + // { "vim.Network" } - Identifies a network entity folder. + // Network entity folders on a vCenter Server can contain Network, + // DistributedVirtualSwitch, and DistributedVirtualPortgroup managed objects. + // Network entity folders on an ESXi host can contain only Network objects. + case mo.Network: + name = m.Name + case mo.OpaqueNetwork: + name = m.Name + case mo.DistributedVirtualSwitch: + name = m.Name + case mo.DistributedVirtualPortgroup: + name = m.Name + case mo.VmwareDistributedVirtualSwitch: + name = m.Name + + // { "vim.Datastore" } - Identifies a datastore folder. Datastore folders can + // contain child datastore folders and Datastore managed objects. + case mo.Datastore: + name = m.Name + + default: + panic("not implemented for type " + reflect.TypeOf(r).String()) + } + + e := Element{ + Path: path.Join(prefix, name), + Object: r, + } + + return e +} + +type Lister struct { + Collector *property.Collector + Reference types.ManagedObjectReference + Prefix string + All bool +} + +func (l Lister) retrieveProperties(ctx context.Context, req types.RetrieveProperties, dst *[]interface{}) error { + res, err := l.Collector.RetrieveProperties(ctx, req) + if err != nil { + return err + } + + // Instead of using mo.LoadRetrievePropertiesResponse, use a custom loop to + // iterate over the results and ignore entries that have properties that + // could not be retrieved (a non-empty `missingSet` property). Since the + // returned objects are enumerated by vSphere in the first place, any object + // that has a non-empty `missingSet` property is indicative of a race + // condition in vSphere where the object was enumerated initially, but was + // removed before its properties could be collected. + for _, p := range res.Returnval { + v, err := mo.ObjectContentToType(p) + if err != nil { + // Ignore fault if it is ManagedObjectNotFound + if soap.IsVimFault(err) { + switch soap.ToVimFault(err).(type) { + case *types.ManagedObjectNotFound: + continue + } + } + + return err + } + + *dst = append(*dst, v) + } + + return nil +} + +func (l Lister) List(ctx context.Context) ([]Element, error) { + switch l.Reference.Type { + case "Folder", "StoragePod": + return l.ListFolder(ctx) + case "Datacenter": + return l.ListDatacenter(ctx) + case "ComputeResource", "ClusterComputeResource": + // Treat ComputeResource and ClusterComputeResource as one and the same. + // It doesn't matter from the perspective of the lister. + return l.ListComputeResource(ctx) + case "ResourcePool": + return l.ListResourcePool(ctx) + case "HostSystem": + return l.ListHostSystem(ctx) + case "VirtualApp": + return l.ListVirtualApp(ctx) + default: + return nil, fmt.Errorf("cannot traverse type " + l.Reference.Type) + } +} + +func (l Lister) ListFolder(ctx context.Context) ([]Element, error) { + spec := types.PropertyFilterSpec{ + ObjectSet: []types.ObjectSpec{ + { + Obj: l.Reference, + SelectSet: []types.BaseSelectionSpec{ + &types.TraversalSpec{ + Path: "childEntity", + Skip: types.NewBool(false), + Type: "Folder", + }, + }, + Skip: types.NewBool(true), + }, + }, + } + + // Retrieve all objects that we can deal with + childTypes := []string{ + "Folder", + "Datacenter", + "VirtualApp", + "VirtualMachine", + "Network", + "ComputeResource", + "ClusterComputeResource", + "Datastore", + "DistributedVirtualSwitch", + } + + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + + // Additional basic properties. + switch t { + case "Folder": + pspec.PathSet = append(pspec.PathSet, "childType") + case "ComputeResource", "ClusterComputeResource": + // The ComputeResource and ClusterComputeResource are dereferenced in + // the ResourcePoolFlag. Make sure they always have their resourcePool + // field populated. + pspec.PathSet = append(pspec.PathSet, "resourcePool") + } + } + + spec.PropSet = append(spec.PropSet, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{spec}, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListDatacenter(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + // Include every datastore folder in the select set + fields := []string{ + "vmFolder", + "hostFolder", + "datastoreFolder", + "networkFolder", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "Datacenter", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + pspec := types.PropertySpec{ + Type: "Folder", + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name", "childType"} + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: []types.PropertySpec{pspec}, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListComputeResource(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "host", + "resourcePool", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "ComputeResource", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "HostSystem", + "ResourcePool", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListResourcePool(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "resourcePool", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "ResourcePool", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "ResourcePool", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListHostSystem(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "datastore", + "network", + "vm", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "HostSystem", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "Datastore", + "Network", + "VirtualMachine", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + +func (l Lister) ListVirtualApp(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "resourcePool", + "vm", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "VirtualApp", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "ResourcePool", + "VirtualMachine", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} diff --git a/vendor/github.com/vmware/govmomi/list/path.go b/vendor/github.com/vmware/govmomi/list/path.go new file mode 100644 index 000000000000..f3a106520155 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/list/path.go @@ -0,0 +1,44 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package list + +import ( + "path" + "strings" +) + +func ToParts(p string) []string { + p = path.Clean(p) + if p == "/" { + return []string{} + } + + if len(p) > 0 { + // Prefix ./ if relative + if p[0] != '/' && p[0] != '.' { + p = "./" + p + } + } + + ps := strings.Split(p, "/") + if ps[0] == "" { + // Start at root + ps = ps[1:] + } + + return ps +} diff --git a/vendor/github.com/vmware/govmomi/nfc/lease.go b/vendor/github.com/vmware/govmomi/nfc/lease.go new file mode 100644 index 000000000000..3fb85ee6f6e1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/nfc/lease.go @@ -0,0 +1,238 @@ +/* +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nfc + +import ( + "context" + "errors" + "fmt" + "io" + "path" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type Lease struct { + types.ManagedObjectReference + + c *vim25.Client +} + +func NewLease(c *vim25.Client, ref types.ManagedObjectReference) *Lease { + return &Lease{ref, c} +} + +// Abort wraps methods.Abort +func (l *Lease) Abort(ctx context.Context, fault *types.LocalizedMethodFault) error { + req := types.HttpNfcLeaseAbort{ + This: l.Reference(), + Fault: fault, + } + + _, err := methods.HttpNfcLeaseAbort(ctx, l.c, &req) + if err != nil { + return err + } + + return nil +} + +// Complete wraps methods.Complete +func (l *Lease) Complete(ctx context.Context) error { + req := types.HttpNfcLeaseComplete{ + This: l.Reference(), + } + + _, err := methods.HttpNfcLeaseComplete(ctx, l.c, &req) + if err != nil { + return err + } + + return nil +} + +// GetManifest wraps methods.GetManifest +func (l *Lease) GetManifest(ctx context.Context) error { + req := types.HttpNfcLeaseGetManifest{ + This: l.Reference(), + } + + _, err := methods.HttpNfcLeaseGetManifest(ctx, l.c, &req) + if err != nil { + return err + } + + return nil +} + +// Progress wraps methods.Progress +func (l *Lease) Progress(ctx context.Context, percent int32) error { + req := types.HttpNfcLeaseProgress{ + This: l.Reference(), + Percent: percent, + } + + _, err := methods.HttpNfcLeaseProgress(ctx, l.c, &req) + if err != nil { + return err + } + + return nil +} + +type LeaseInfo struct { + types.HttpNfcLeaseInfo + + Items []FileItem +} + +func (l *Lease) newLeaseInfo(li *types.HttpNfcLeaseInfo, items []types.OvfFileItem) (*LeaseInfo, error) { + info := &LeaseInfo{ + HttpNfcLeaseInfo: *li, + } + + for _, device := range li.DeviceUrl { + u, err := l.c.ParseURL(device.Url) + if err != nil { + return nil, err + } + + if device.SslThumbprint != "" { + // TODO: prefer host management IP + l.c.SetThumbprint(u.Host, device.SslThumbprint) + } + + if len(items) == 0 { + // this is an export + item := types.OvfFileItem{ + DeviceId: device.Key, + Path: device.TargetId, + Size: device.FileSize, + } + + if item.Size == 0 { + item.Size = li.TotalDiskCapacityInKB * 1024 + } + + if item.Path == "" { + item.Path = path.Base(device.Url) + } + + info.Items = append(info.Items, NewFileItem(u, item)) + + continue + } + + // this is an import + for _, item := range items { + if device.ImportKey == item.DeviceId { + info.Items = append(info.Items, NewFileItem(u, item)) + break + } + } + } + + return info, nil +} + +func (l *Lease) Wait(ctx context.Context, items []types.OvfFileItem) (*LeaseInfo, error) { + var lease mo.HttpNfcLease + + pc := property.DefaultCollector(l.c) + err := property.Wait(ctx, pc, l.Reference(), []string{"state", "info", "error"}, func(pc []types.PropertyChange) bool { + done := false + + for _, c := range pc { + if c.Val == nil { + continue + } + + switch c.Name { + case "error": + val := c.Val.(types.LocalizedMethodFault) + lease.Error = &val + done = true + case "info": + val := c.Val.(types.HttpNfcLeaseInfo) + lease.Info = &val + case "state": + lease.State = c.Val.(types.HttpNfcLeaseState) + if lease.State != types.HttpNfcLeaseStateInitializing { + done = true + } + } + } + + return done + }) + + if err != nil { + return nil, err + } + + if lease.State == types.HttpNfcLeaseStateReady { + return l.newLeaseInfo(lease.Info, items) + } + + if lease.Error != nil { + return nil, errors.New(lease.Error.LocalizedMessage) + } + + return nil, fmt.Errorf("unexpected nfc lease state: %s", lease.State) +} + +func (l *Lease) StartUpdater(ctx context.Context, info *LeaseInfo) *LeaseUpdater { + return newLeaseUpdater(ctx, l, info) +} + +func (l *Lease) Upload(ctx context.Context, item FileItem, f io.Reader, opts soap.Upload) error { + if opts.Progress == nil { + opts.Progress = item + } else { + opts.Progress = progress.Tee(item, opts.Progress) + } + + // Non-disk files (such as .iso) use the PUT method. + // Overwrite: t header is also required in this case (ovftool does the same) + if item.Create { + opts.Method = "PUT" + opts.Headers = map[string]string{ + "Overwrite": "t", + } + } else { + opts.Method = "POST" + opts.Type = "application/x-vnd.vmware-streamVmdk" + } + + return l.c.Upload(ctx, f, item.URL, &opts) +} + +func (l *Lease) DownloadFile(ctx context.Context, file string, item FileItem, opts soap.Download) error { + if opts.Progress == nil { + opts.Progress = item + } else { + opts.Progress = progress.Tee(item, opts.Progress) + } + + return l.c.DownloadFile(ctx, file, item.URL, &opts) +} diff --git a/vendor/github.com/vmware/govmomi/nfc/lease_updater.go b/vendor/github.com/vmware/govmomi/nfc/lease_updater.go new file mode 100644 index 000000000000..d3face81a4c4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/nfc/lease_updater.go @@ -0,0 +1,146 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nfc + +import ( + "context" + "log" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/types" +) + +type FileItem struct { + types.OvfFileItem + URL *url.URL + + ch chan progress.Report +} + +func NewFileItem(u *url.URL, item types.OvfFileItem) FileItem { + return FileItem{ + OvfFileItem: item, + URL: u, + ch: make(chan progress.Report), + } +} + +func (o FileItem) Sink() chan<- progress.Report { + return o.ch +} + +// File converts the FileItem.OvfFileItem to an OvfFile +func (o FileItem) File() types.OvfFile { + return types.OvfFile{ + DeviceId: o.DeviceId, + Path: o.Path, + Size: o.Size, + } +} + +type LeaseUpdater struct { + lease *Lease + + pos int64 // Number of bytes + total int64 // Total number of bytes + + done chan struct{} // When lease updater should stop + + wg sync.WaitGroup // Track when update loop is done +} + +func newLeaseUpdater(ctx context.Context, lease *Lease, info *LeaseInfo) *LeaseUpdater { + l := LeaseUpdater{ + lease: lease, + + done: make(chan struct{}), + } + + for _, item := range info.Items { + l.total += item.Size + go l.waitForProgress(item) + } + + // Kickstart update loop + l.wg.Add(1) + go l.run() + + return &l +} + +func (l *LeaseUpdater) waitForProgress(item FileItem) { + var pos, total int64 + + total = item.Size + + for { + select { + case <-l.done: + return + case p, ok := <-item.ch: + // Return in case of error + if ok && p.Error() != nil { + return + } + + if !ok { + // Last element on the channel, add to total + atomic.AddInt64(&l.pos, total-pos) + return + } + + // Approximate progress in number of bytes + x := int64(float32(total) * (p.Percentage() / 100.0)) + atomic.AddInt64(&l.pos, x-pos) + pos = x + } + } +} + +func (l *LeaseUpdater) run() { + defer l.wg.Done() + + tick := time.NewTicker(2 * time.Second) + defer tick.Stop() + + for { + select { + case <-l.done: + return + case <-tick.C: + // From the vim api HttpNfcLeaseProgress(percent) doc, percent == + // "Completion status represented as an integer in the 0-100 range." + // Always report the current value of percent, as it will renew the + // lease even if the value hasn't changed or is 0. + percent := int32(float32(100*atomic.LoadInt64(&l.pos)) / float32(l.total)) + err := l.lease.Progress(context.TODO(), percent) + if err != nil { + log.Printf("NFC lease progress: %s", err) + return + } + } + } +} + +func (l *LeaseUpdater) Done() { + close(l.done) + l.wg.Wait() +} diff --git a/vendor/github.com/vmware/govmomi/object/authorization_manager.go b/vendor/github.com/vmware/govmomi/object/authorization_manager.go new file mode 100644 index 000000000000..b703258fe7a3 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/authorization_manager.go @@ -0,0 +1,174 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type AuthorizationManager struct { + Common +} + +func NewAuthorizationManager(c *vim25.Client) *AuthorizationManager { + m := AuthorizationManager{ + Common: NewCommon(c, *c.ServiceContent.AuthorizationManager), + } + + return &m +} + +type AuthorizationRoleList []types.AuthorizationRole + +func (l AuthorizationRoleList) ById(id int32) *types.AuthorizationRole { + for _, role := range l { + if role.RoleId == id { + return &role + } + } + + return nil +} + +func (l AuthorizationRoleList) ByName(name string) *types.AuthorizationRole { + for _, role := range l { + if role.Name == name { + return &role + } + } + + return nil +} + +func (m AuthorizationManager) RoleList(ctx context.Context) (AuthorizationRoleList, error) { + var am mo.AuthorizationManager + + err := m.Properties(ctx, m.Reference(), []string{"roleList"}, &am) + if err != nil { + return nil, err + } + + return AuthorizationRoleList(am.RoleList), nil +} + +func (m AuthorizationManager) RetrieveEntityPermissions(ctx context.Context, entity types.ManagedObjectReference, inherited bool) ([]types.Permission, error) { + req := types.RetrieveEntityPermissions{ + This: m.Reference(), + Entity: entity, + Inherited: inherited, + } + + res, err := methods.RetrieveEntityPermissions(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AuthorizationManager) RemoveEntityPermission(ctx context.Context, entity types.ManagedObjectReference, user string, isGroup bool) error { + req := types.RemoveEntityPermission{ + This: m.Reference(), + Entity: entity, + User: user, + IsGroup: isGroup, + } + + _, err := methods.RemoveEntityPermission(ctx, m.Client(), &req) + return err +} + +func (m AuthorizationManager) SetEntityPermissions(ctx context.Context, entity types.ManagedObjectReference, permission []types.Permission) error { + req := types.SetEntityPermissions{ + This: m.Reference(), + Entity: entity, + Permission: permission, + } + + _, err := methods.SetEntityPermissions(ctx, m.Client(), &req) + return err +} + +func (m AuthorizationManager) RetrieveRolePermissions(ctx context.Context, id int32) ([]types.Permission, error) { + req := types.RetrieveRolePermissions{ + This: m.Reference(), + RoleId: id, + } + + res, err := methods.RetrieveRolePermissions(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AuthorizationManager) RetrieveAllPermissions(ctx context.Context) ([]types.Permission, error) { + req := types.RetrieveAllPermissions{ + This: m.Reference(), + } + + res, err := methods.RetrieveAllPermissions(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AuthorizationManager) AddRole(ctx context.Context, name string, ids []string) (int32, error) { + req := types.AddAuthorizationRole{ + This: m.Reference(), + Name: name, + PrivIds: ids, + } + + res, err := methods.AddAuthorizationRole(ctx, m.Client(), &req) + if err != nil { + return -1, err + } + + return res.Returnval, nil +} + +func (m AuthorizationManager) RemoveRole(ctx context.Context, id int32, failIfUsed bool) error { + req := types.RemoveAuthorizationRole{ + This: m.Reference(), + RoleId: id, + FailIfUsed: failIfUsed, + } + + _, err := methods.RemoveAuthorizationRole(ctx, m.Client(), &req) + return err +} + +func (m AuthorizationManager) UpdateRole(ctx context.Context, id int32, name string, ids []string) error { + req := types.UpdateAuthorizationRole{ + This: m.Reference(), + RoleId: id, + NewName: name, + PrivIds: ids, + } + + _, err := methods.UpdateAuthorizationRole(ctx, m.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/authorization_manager_internal.go b/vendor/github.com/vmware/govmomi/object/authorization_manager_internal.go new file mode 100644 index 000000000000..4fa520f5add2 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/authorization_manager_internal.go @@ -0,0 +1,86 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type DisabledMethodRequest struct { + Method string `xml:"method"` + Reason string `xml:"reasonId"` +} + +type disableMethodsRequest struct { + This types.ManagedObjectReference `xml:"_this"` + Entity []types.ManagedObjectReference `xml:"entity"` + Method []DisabledMethodRequest `xml:"method"` + Source string `xml:"sourceId"` + Scope bool `xml:"sessionScope,omitempty"` +} + +type disableMethodsBody struct { + Req *disableMethodsRequest `xml:"urn:internalvim25 DisableMethods,omitempty"` + Res interface{} `xml:"urn:vim25 DisableMethodsResponse,omitempty"` + Err *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *disableMethodsBody) Fault() *soap.Fault { return b.Err } + +func (m AuthorizationManager) DisableMethods(ctx context.Context, entity []types.ManagedObjectReference, method []DisabledMethodRequest, source string) error { + var reqBody, resBody disableMethodsBody + + reqBody.Req = &disableMethodsRequest{ + This: m.Reference(), + Entity: entity, + Method: method, + Source: source, + } + + return m.Client().RoundTrip(ctx, &reqBody, &resBody) +} + +type enableMethodsRequest struct { + This types.ManagedObjectReference `xml:"_this"` + Entity []types.ManagedObjectReference `xml:"entity"` + Method []string `xml:"method"` + Source string `xml:"sourceId"` +} + +type enableMethodsBody struct { + Req *enableMethodsRequest `xml:"urn:internalvim25 EnableMethods,omitempty"` + Res interface{} `xml:"urn:vim25 EnableMethodsResponse,omitempty"` + Err *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *enableMethodsBody) Fault() *soap.Fault { return b.Err } + +func (m AuthorizationManager) EnableMethods(ctx context.Context, entity []types.ManagedObjectReference, method []string, source string) error { + var reqBody, resBody enableMethodsBody + + reqBody.Req = &enableMethodsRequest{ + This: m.Reference(), + Entity: entity, + Method: method, + Source: source, + } + + return m.Client().RoundTrip(ctx, &reqBody, &resBody) +} diff --git a/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go b/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go new file mode 100644 index 000000000000..c9fe3aa035b1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go @@ -0,0 +1,70 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type ClusterComputeResource struct { + ComputeResource +} + +func NewClusterComputeResource(c *vim25.Client, ref types.ManagedObjectReference) *ClusterComputeResource { + return &ClusterComputeResource{ + ComputeResource: *NewComputeResource(c, ref), + } +} + +func (c ClusterComputeResource) Configuration(ctx context.Context) (*types.ClusterConfigInfoEx, error) { + var obj mo.ClusterComputeResource + + err := c.Properties(ctx, c.Reference(), []string{"configurationEx"}, &obj) + if err != nil { + return nil, err + } + + return obj.ConfigurationEx.(*types.ClusterConfigInfoEx), nil +} + +func (c ClusterComputeResource) AddHost(ctx context.Context, spec types.HostConnectSpec, asConnected bool, license *string, resourcePool *types.ManagedObjectReference) (*Task, error) { + req := types.AddHost_Task{ + This: c.Reference(), + Spec: spec, + AsConnected: asConnected, + } + + if license != nil { + req.License = *license + } + + if resourcePool != nil { + req.ResourcePool = resourcePool + } + + res, err := methods.AddHost_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/common.go b/vendor/github.com/vmware/govmomi/object/common.go new file mode 100644 index 000000000000..dfeee4a365ed --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/common.go @@ -0,0 +1,132 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "errors" + "fmt" + "path" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +var ( + ErrNotSupported = errors.New("product/version specific feature not supported by target") +) + +// Common contains the fields and functions common to all objects. +type Common struct { + InventoryPath string + + c *vim25.Client + r types.ManagedObjectReference +} + +func (c Common) String() string { + ref := fmt.Sprintf("%v", c.Reference()) + + if c.InventoryPath == "" { + return ref + } + + return fmt.Sprintf("%s @ %s", ref, c.InventoryPath) +} + +func NewCommon(c *vim25.Client, r types.ManagedObjectReference) Common { + return Common{c: c, r: r} +} + +func (c Common) Reference() types.ManagedObjectReference { + return c.r +} + +func (c Common) Client() *vim25.Client { + return c.c +} + +// Name returns the base name of the InventoryPath field +func (c Common) Name() string { + if c.InventoryPath == "" { + return "" + } + return path.Base(c.InventoryPath) +} + +func (c *Common) SetInventoryPath(p string) { + c.InventoryPath = p +} + +// ObjectName returns the base name of the InventoryPath field if set, +// otherwise fetches the mo.ManagedEntity.Name field via the property collector. +func (c Common) ObjectName(ctx context.Context) (string, error) { + var o mo.ManagedEntity + + err := c.Properties(ctx, c.Reference(), []string{"name"}, &o) + if err != nil { + return "", err + } + + if o.Name != "" { + return o.Name, nil + } + + // Network has its own "name" field... + var n mo.Network + + err = c.Properties(ctx, c.Reference(), []string{"name"}, &n) + if err != nil { + return "", err + } + + return n.Name, nil +} + +func (c Common) Properties(ctx context.Context, r types.ManagedObjectReference, ps []string, dst interface{}) error { + return property.DefaultCollector(c.c).RetrieveOne(ctx, r, ps, dst) +} + +func (c Common) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: c.Reference(), + } + + res, err := methods.Destroy_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} + +func (c Common) Rename(ctx context.Context, name string) (*Task, error) { + req := types.Rename_Task{ + This: c.Reference(), + NewName: name, + } + + res, err := methods.Rename_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/compute_resource.go b/vendor/github.com/vmware/govmomi/object/compute_resource.go new file mode 100644 index 000000000000..7645fddaf313 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/compute_resource.go @@ -0,0 +1,111 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "path" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type ComputeResource struct { + Common +} + +func NewComputeResource(c *vim25.Client, ref types.ManagedObjectReference) *ComputeResource { + return &ComputeResource{ + Common: NewCommon(c, ref), + } +} + +func (c ComputeResource) Hosts(ctx context.Context) ([]*HostSystem, error) { + var cr mo.ComputeResource + + err := c.Properties(ctx, c.Reference(), []string{"host"}, &cr) + if err != nil { + return nil, err + } + + if len(cr.Host) == 0 { + return nil, nil + } + + var hs []mo.HostSystem + pc := property.DefaultCollector(c.Client()) + err = pc.Retrieve(ctx, cr.Host, []string{"name"}, &hs) + if err != nil { + return nil, err + } + + var hosts []*HostSystem + + for _, h := range hs { + host := NewHostSystem(c.Client(), h.Reference()) + host.InventoryPath = path.Join(c.InventoryPath, h.Name) + hosts = append(hosts, host) + } + + return hosts, nil +} + +func (c ComputeResource) Datastores(ctx context.Context) ([]*Datastore, error) { + var cr mo.ComputeResource + + err := c.Properties(ctx, c.Reference(), []string{"datastore"}, &cr) + if err != nil { + return nil, err + } + + var dss []*Datastore + for _, ref := range cr.Datastore { + ds := NewDatastore(c.c, ref) + dss = append(dss, ds) + } + + return dss, nil +} + +func (c ComputeResource) ResourcePool(ctx context.Context) (*ResourcePool, error) { + var cr mo.ComputeResource + + err := c.Properties(ctx, c.Reference(), []string{"resourcePool"}, &cr) + if err != nil { + return nil, err + } + + return NewResourcePool(c.c, *cr.ResourcePool), nil +} + +func (c ComputeResource) Reconfigure(ctx context.Context, spec types.BaseComputeResourceConfigSpec, modify bool) (*Task, error) { + req := types.ReconfigureComputeResource_Task{ + This: c.Reference(), + Spec: spec, + Modify: modify, + } + + res, err := methods.ReconfigureComputeResource_Task(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return NewTask(c.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go b/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go new file mode 100644 index 000000000000..ef748ef2c131 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/custom_fields_manager.go @@ -0,0 +1,146 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "errors" + "strconv" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +var ( + ErrKeyNameNotFound = errors.New("key name not found") +) + +type CustomFieldsManager struct { + Common +} + +// GetCustomFieldsManager wraps NewCustomFieldsManager, returning ErrNotSupported +// when the client is not connected to a vCenter instance. +func GetCustomFieldsManager(c *vim25.Client) (*CustomFieldsManager, error) { + if c.ServiceContent.CustomFieldsManager == nil { + return nil, ErrNotSupported + } + return NewCustomFieldsManager(c), nil +} + +func NewCustomFieldsManager(c *vim25.Client) *CustomFieldsManager { + m := CustomFieldsManager{ + Common: NewCommon(c, *c.ServiceContent.CustomFieldsManager), + } + + return &m +} + +func (m CustomFieldsManager) Add(ctx context.Context, name string, moType string, fieldDefPolicy *types.PrivilegePolicyDef, fieldPolicy *types.PrivilegePolicyDef) (*types.CustomFieldDef, error) { + req := types.AddCustomFieldDef{ + This: m.Reference(), + Name: name, + MoType: moType, + FieldDefPolicy: fieldDefPolicy, + FieldPolicy: fieldPolicy, + } + + res, err := methods.AddCustomFieldDef(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (m CustomFieldsManager) Remove(ctx context.Context, key int32) error { + req := types.RemoveCustomFieldDef{ + This: m.Reference(), + Key: key, + } + + _, err := methods.RemoveCustomFieldDef(ctx, m.c, &req) + return err +} + +func (m CustomFieldsManager) Rename(ctx context.Context, key int32, name string) error { + req := types.RenameCustomFieldDef{ + This: m.Reference(), + Key: key, + Name: name, + } + + _, err := methods.RenameCustomFieldDef(ctx, m.c, &req) + return err +} + +func (m CustomFieldsManager) Set(ctx context.Context, entity types.ManagedObjectReference, key int32, value string) error { + req := types.SetField{ + This: m.Reference(), + Entity: entity, + Key: key, + Value: value, + } + + _, err := methods.SetField(ctx, m.c, &req) + return err +} + +type CustomFieldDefList []types.CustomFieldDef + +func (m CustomFieldsManager) Field(ctx context.Context) (CustomFieldDefList, error) { + var fm mo.CustomFieldsManager + + err := m.Properties(ctx, m.Reference(), []string{"field"}, &fm) + if err != nil { + return nil, err + } + + return fm.Field, nil +} + +func (m CustomFieldsManager) FindKey(ctx context.Context, name string) (int32, error) { + field, err := m.Field(ctx) + if err != nil { + return -1, err + } + + for _, def := range field { + if def.Name == name { + return def.Key, nil + } + } + + k, err := strconv.Atoi(name) + if err == nil { + // assume literal int key + return int32(k), nil + } + + return -1, ErrKeyNameNotFound +} + +func (l CustomFieldDefList) ByKey(key int32) *types.CustomFieldDef { + for _, def := range l { + if def.Key == key { + return &def + } + } + return nil +} diff --git a/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go b/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go new file mode 100644 index 000000000000..cb8b965dc6e4 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go @@ -0,0 +1,166 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type CustomizationSpecManager struct { + Common +} + +func NewCustomizationSpecManager(c *vim25.Client) *CustomizationSpecManager { + cs := CustomizationSpecManager{ + Common: NewCommon(c, *c.ServiceContent.CustomizationSpecManager), + } + + return &cs +} + +func (cs CustomizationSpecManager) DoesCustomizationSpecExist(ctx context.Context, name string) (bool, error) { + req := types.DoesCustomizationSpecExist{ + This: cs.Reference(), + Name: name, + } + + res, err := methods.DoesCustomizationSpecExist(ctx, cs.c, &req) + + if err != nil { + return false, err + } + + return res.Returnval, nil +} + +func (cs CustomizationSpecManager) GetCustomizationSpec(ctx context.Context, name string) (*types.CustomizationSpecItem, error) { + req := types.GetCustomizationSpec{ + This: cs.Reference(), + Name: name, + } + + res, err := methods.GetCustomizationSpec(ctx, cs.c, &req) + + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (cs CustomizationSpecManager) CreateCustomizationSpec(ctx context.Context, item types.CustomizationSpecItem) error { + req := types.CreateCustomizationSpec{ + This: cs.Reference(), + Item: item, + } + + _, err := methods.CreateCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) OverwriteCustomizationSpec(ctx context.Context, item types.CustomizationSpecItem) error { + req := types.OverwriteCustomizationSpec{ + This: cs.Reference(), + Item: item, + } + + _, err := methods.OverwriteCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) DeleteCustomizationSpec(ctx context.Context, name string) error { + req := types.DeleteCustomizationSpec{ + This: cs.Reference(), + Name: name, + } + + _, err := methods.DeleteCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) DuplicateCustomizationSpec(ctx context.Context, name string, newName string) error { + req := types.DuplicateCustomizationSpec{ + This: cs.Reference(), + Name: name, + NewName: newName, + } + + _, err := methods.DuplicateCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) RenameCustomizationSpec(ctx context.Context, name string, newName string) error { + req := types.RenameCustomizationSpec{ + This: cs.Reference(), + Name: name, + NewName: newName, + } + + _, err := methods.RenameCustomizationSpec(ctx, cs.c, &req) + if err != nil { + return err + } + + return nil +} + +func (cs CustomizationSpecManager) CustomizationSpecItemToXml(ctx context.Context, item types.CustomizationSpecItem) (string, error) { + req := types.CustomizationSpecItemToXml{ + This: cs.Reference(), + Item: item, + } + + res, err := methods.CustomizationSpecItemToXml(ctx, cs.c, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +func (cs CustomizationSpecManager) XmlToCustomizationSpecItem(ctx context.Context, xml string) (*types.CustomizationSpecItem, error) { + req := types.XmlToCustomizationSpecItem{ + This: cs.Reference(), + SpecItemXml: xml, + } + + res, err := methods.XmlToCustomizationSpecItem(ctx, cs.c, &req) + if err != nil { + return nil, err + } + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/datacenter.go b/vendor/github.com/vmware/govmomi/object/datacenter.go new file mode 100644 index 000000000000..41fa3526571b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/datacenter.go @@ -0,0 +1,129 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "fmt" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type DatacenterFolders struct { + VmFolder *Folder + HostFolder *Folder + DatastoreFolder *Folder + NetworkFolder *Folder +} + +type Datacenter struct { + Common +} + +func NewDatacenter(c *vim25.Client, ref types.ManagedObjectReference) *Datacenter { + return &Datacenter{ + Common: NewCommon(c, ref), + } +} + +func (d *Datacenter) Folders(ctx context.Context) (*DatacenterFolders, error) { + var md mo.Datacenter + + ps := []string{"name", "vmFolder", "hostFolder", "datastoreFolder", "networkFolder"} + err := d.Properties(ctx, d.Reference(), ps, &md) + if err != nil { + return nil, err + } + + df := &DatacenterFolders{ + VmFolder: NewFolder(d.c, md.VmFolder), + HostFolder: NewFolder(d.c, md.HostFolder), + DatastoreFolder: NewFolder(d.c, md.DatastoreFolder), + NetworkFolder: NewFolder(d.c, md.NetworkFolder), + } + + paths := []struct { + name string + path *string + }{ + {"vm", &df.VmFolder.InventoryPath}, + {"host", &df.HostFolder.InventoryPath}, + {"datastore", &df.DatastoreFolder.InventoryPath}, + {"network", &df.NetworkFolder.InventoryPath}, + } + + for _, p := range paths { + *p.path = fmt.Sprintf("/%s/%s", md.Name, p.name) + } + + return df, nil +} + +func (d Datacenter) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: d.Reference(), + } + + res, err := methods.Destroy_Task(ctx, d.c, &req) + if err != nil { + return nil, err + } + + return NewTask(d.c, res.Returnval), nil +} + +// PowerOnVM powers on multiple virtual machines with a single vCenter call. +// If called against ESX, serially powers on the list of VMs and the returned *Task will always be nil. +func (d Datacenter) PowerOnVM(ctx context.Context, vm []types.ManagedObjectReference, option ...types.BaseOptionValue) (*Task, error) { + if d.Client().IsVC() { + req := types.PowerOnMultiVM_Task{ + This: d.Reference(), + Vm: vm, + Option: option, + } + + res, err := methods.PowerOnMultiVM_Task(ctx, d.c, &req) + if err != nil { + return nil, err + } + + return NewTask(d.c, res.Returnval), nil + } + + for _, ref := range vm { + obj := NewVirtualMachine(d.Client(), ref) + task, err := obj.PowerOn(ctx) + if err != nil { + return nil, err + } + + err = task.Wait(ctx) + if err != nil { + // Ignore any InvalidPowerState fault, as it indicates the VM is already powered on + if f, ok := err.(types.HasFault); ok { + if _, ok = f.Fault().(*types.InvalidPowerState); !ok { + return nil, err + } + } + } + } + + return nil, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/datastore.go b/vendor/github.com/vmware/govmomi/object/datastore.go new file mode 100644 index 000000000000..dfe6603aa5c8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/datastore.go @@ -0,0 +1,435 @@ +/* +Copyright (c) 2015-2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "fmt" + "io" + "math/rand" + "os" + "path" + "strings" + + "context" + "net/http" + "net/url" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +// DatastoreNoSuchDirectoryError is returned when a directory could not be found. +type DatastoreNoSuchDirectoryError struct { + verb string + subject string +} + +func (e DatastoreNoSuchDirectoryError) Error() string { + return fmt.Sprintf("cannot %s '%s': No such directory", e.verb, e.subject) +} + +// DatastoreNoSuchFileError is returned when a file could not be found. +type DatastoreNoSuchFileError struct { + verb string + subject string +} + +func (e DatastoreNoSuchFileError) Error() string { + return fmt.Sprintf("cannot %s '%s': No such file", e.verb, e.subject) +} + +type Datastore struct { + Common + + DatacenterPath string +} + +func NewDatastore(c *vim25.Client, ref types.ManagedObjectReference) *Datastore { + return &Datastore{ + Common: NewCommon(c, ref), + } +} + +func (d Datastore) Path(path string) string { + return (&DatastorePath{ + Datastore: d.Name(), + Path: path, + }).String() +} + +// NewURL constructs a url.URL with the given file path for datastore access over HTTP. +func (d Datastore) NewURL(path string) *url.URL { + u := d.c.URL() + + return &url.URL{ + Scheme: u.Scheme, + Host: u.Host, + Path: fmt.Sprintf("/folder/%s", path), + RawQuery: url.Values{ + "dcPath": []string{d.DatacenterPath}, + "dsName": []string{d.Name()}, + }.Encode(), + } +} + +// URL is deprecated, use NewURL instead. +func (d Datastore) URL(ctx context.Context, dc *Datacenter, path string) (*url.URL, error) { + return d.NewURL(path), nil +} + +func (d Datastore) Browser(ctx context.Context) (*HostDatastoreBrowser, error) { + var do mo.Datastore + + err := d.Properties(ctx, d.Reference(), []string{"browser"}, &do) + if err != nil { + return nil, err + } + + return NewHostDatastoreBrowser(d.c, do.Browser), nil +} + +func (d Datastore) useServiceTicket() bool { + // If connected to workstation, service ticketing not supported + // If connected to ESX, service ticketing not needed + if !d.c.IsVC() { + return false + } + + key := "GOVMOMI_USE_SERVICE_TICKET" + + val := d.c.URL().Query().Get(key) + if val == "" { + val = os.Getenv(key) + } + + if val == "1" || val == "true" { + return true + } + + return false +} + +func (d Datastore) useServiceTicketHostName(name string) bool { + // No need if talking directly to ESX. + if !d.c.IsVC() { + return false + } + + // If version happens to be < 5.1 + if name == "" { + return false + } + + // If the HostSystem is using DHCP on a network without dynamic DNS, + // HostSystem.Config.Network.DnsConfig.HostName is set to "localhost" by default. + // This resolves to "localhost.localdomain" by default via /etc/hosts on ESX. + // In that case, we will stick with the HostSystem.Name which is the IP address that + // was used to connect the host to VC. + if name == "localhost.localdomain" { + return false + } + + // Still possible to have HostName that don't resolve via DNS, + // so we default to false. + key := "GOVMOMI_USE_SERVICE_TICKET_HOSTNAME" + + val := d.c.URL().Query().Get(key) + if val == "" { + val = os.Getenv(key) + } + + if val == "1" || val == "true" { + return true + } + + return false +} + +type datastoreServiceTicketHostKey struct{} + +// HostContext returns a Context where the given host will be used for datastore HTTP access +// via the ServiceTicket method. +func (d Datastore) HostContext(ctx context.Context, host *HostSystem) context.Context { + return context.WithValue(ctx, datastoreServiceTicketHostKey{}, host) +} + +// ServiceTicket obtains a ticket via AcquireGenericServiceTicket and returns it an http.Cookie with the url.URL +// that can be used along with the ticket cookie to access the given path. An host is chosen at random unless the +// the given Context was created with a specific host via the HostContext method. +func (d Datastore) ServiceTicket(ctx context.Context, path string, method string) (*url.URL, *http.Cookie, error) { + u := d.NewURL(path) + + host, ok := ctx.Value(datastoreServiceTicketHostKey{}).(*HostSystem) + + if !ok { + if !d.useServiceTicket() { + return u, nil, nil + } + + hosts, err := d.AttachedHosts(ctx) + if err != nil { + return nil, nil, err + } + + if len(hosts) == 0 { + // Fallback to letting vCenter choose a host + return u, nil, nil + } + + // Pick a random attached host + host = hosts[rand.Intn(len(hosts))] + } + + ips, err := host.ManagementIPs(ctx) + if err != nil { + return nil, nil, err + } + + if len(ips) > 0 { + // prefer a ManagementIP + u.Host = ips[0].String() + } else { + // fallback to inventory name + u.Host, err = host.ObjectName(ctx) + if err != nil { + return nil, nil, err + } + } + + // VC datacenter path will not be valid against ESX + q := u.Query() + delete(q, "dcPath") + u.RawQuery = q.Encode() + + spec := types.SessionManagerHttpServiceRequestSpec{ + Url: u.String(), + // See SessionManagerHttpServiceRequestSpecMethod enum + Method: fmt.Sprintf("http%s%s", method[0:1], strings.ToLower(method[1:])), + } + + sm := session.NewManager(d.Client()) + + ticket, err := sm.AcquireGenericServiceTicket(ctx, &spec) + if err != nil { + return nil, nil, err + } + + cookie := &http.Cookie{ + Name: "vmware_cgi_ticket", + Value: ticket.Id, + } + + if d.useServiceTicketHostName(ticket.HostName) { + u.Host = ticket.HostName + } + + d.Client().SetThumbprint(u.Host, ticket.SslThumbprint) + + return u, cookie, nil +} + +func (d Datastore) uploadTicket(ctx context.Context, path string, param *soap.Upload) (*url.URL, *soap.Upload, error) { + p := soap.DefaultUpload + if param != nil { + p = *param // copy + } + + u, ticket, err := d.ServiceTicket(ctx, path, p.Method) + if err != nil { + return nil, nil, err + } + + p.Ticket = ticket + + return u, &p, nil +} + +func (d Datastore) downloadTicket(ctx context.Context, path string, param *soap.Download) (*url.URL, *soap.Download, error) { + p := soap.DefaultDownload + if param != nil { + p = *param // copy + } + + u, ticket, err := d.ServiceTicket(ctx, path, p.Method) + if err != nil { + return nil, nil, err + } + + p.Ticket = ticket + + return u, &p, nil +} + +// Upload via soap.Upload with an http service ticket +func (d Datastore) Upload(ctx context.Context, f io.Reader, path string, param *soap.Upload) error { + u, p, err := d.uploadTicket(ctx, path, param) + if err != nil { + return err + } + return d.Client().Upload(ctx, f, u, p) +} + +// UploadFile via soap.Upload with an http service ticket +func (d Datastore) UploadFile(ctx context.Context, file string, path string, param *soap.Upload) error { + u, p, err := d.uploadTicket(ctx, path, param) + if err != nil { + return err + } + return d.Client().UploadFile(ctx, file, u, p) +} + +// Download via soap.Download with an http service ticket +func (d Datastore) Download(ctx context.Context, path string, param *soap.Download) (io.ReadCloser, int64, error) { + u, p, err := d.downloadTicket(ctx, path, param) + if err != nil { + return nil, 0, err + } + return d.Client().Download(ctx, u, p) +} + +// DownloadFile via soap.Download with an http service ticket +func (d Datastore) DownloadFile(ctx context.Context, path string, file string, param *soap.Download) error { + u, p, err := d.downloadTicket(ctx, path, param) + if err != nil { + return err + } + return d.Client().DownloadFile(ctx, file, u, p) +} + +// AttachedHosts returns hosts that have this Datastore attached, accessible and writable. +func (d Datastore) AttachedHosts(ctx context.Context) ([]*HostSystem, error) { + var ds mo.Datastore + var hosts []*HostSystem + + pc := property.DefaultCollector(d.Client()) + err := pc.RetrieveOne(ctx, d.Reference(), []string{"host"}, &ds) + if err != nil { + return nil, err + } + + mounts := make(map[types.ManagedObjectReference]types.DatastoreHostMount) + var refs []types.ManagedObjectReference + for _, host := range ds.Host { + refs = append(refs, host.Key) + mounts[host.Key] = host + } + + var hs []mo.HostSystem + err = pc.Retrieve(ctx, refs, []string{"runtime.connectionState", "runtime.powerState"}, &hs) + if err != nil { + return nil, err + } + + for _, host := range hs { + if host.Runtime.ConnectionState == types.HostSystemConnectionStateConnected && + host.Runtime.PowerState == types.HostSystemPowerStatePoweredOn { + + mount := mounts[host.Reference()] + info := mount.MountInfo + + if *info.Mounted && *info.Accessible && info.AccessMode == string(types.HostMountModeReadWrite) { + hosts = append(hosts, NewHostSystem(d.Client(), mount.Key)) + } + } + } + + return hosts, nil +} + +// AttachedClusterHosts returns hosts that have this Datastore attached, accessible and writable and are members of the given cluster. +func (d Datastore) AttachedClusterHosts(ctx context.Context, cluster *ComputeResource) ([]*HostSystem, error) { + var hosts []*HostSystem + + clusterHosts, err := cluster.Hosts(ctx) + if err != nil { + return nil, err + } + + attachedHosts, err := d.AttachedHosts(ctx) + if err != nil { + return nil, err + } + + refs := make(map[types.ManagedObjectReference]bool) + for _, host := range attachedHosts { + refs[host.Reference()] = true + } + + for _, host := range clusterHosts { + if refs[host.Reference()] { + hosts = append(hosts, host) + } + } + + return hosts, nil +} + +func (d Datastore) Stat(ctx context.Context, file string) (types.BaseFileInfo, error) { + b, err := d.Browser(ctx) + if err != nil { + return nil, err + } + + spec := types.HostDatastoreBrowserSearchSpec{ + Details: &types.FileQueryFlags{ + FileType: true, + FileSize: true, + Modification: true, + FileOwner: types.NewBool(true), + }, + MatchPattern: []string{path.Base(file)}, + } + + dsPath := d.Path(path.Dir(file)) + task, err := b.SearchDatastore(ctx, dsPath, &spec) + if err != nil { + return nil, err + } + + info, err := task.WaitForResult(ctx, nil) + if err != nil { + if types.IsFileNotFound(err) { + // FileNotFound means the base path doesn't exist. + return nil, DatastoreNoSuchDirectoryError{"stat", dsPath} + } + + return nil, err + } + + res := info.Result.(types.HostDatastoreBrowserSearchResults) + if len(res.File) == 0 { + // File doesn't exist + return nil, DatastoreNoSuchFileError{"stat", d.Path(file)} + } + + return res.File[0], nil + +} + +// Type returns the type of file system volume. +func (d Datastore) Type(ctx context.Context) (types.HostFileSystemVolumeFileSystemType, error) { + var mds mo.Datastore + + if err := d.Properties(ctx, d.Reference(), []string{"summary.type"}, &mds); err != nil { + return types.HostFileSystemVolumeFileSystemType(""), err + } + return types.HostFileSystemVolumeFileSystemType(mds.Summary.Type), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/datastore_file.go b/vendor/github.com/vmware/govmomi/object/datastore_file.go new file mode 100644 index 000000000000..bc010fc36e81 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/datastore_file.go @@ -0,0 +1,414 @@ +/* +Copyright (c) 2016-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "path" + "sync" + "time" + + "github.com/vmware/govmomi/vim25/soap" +) + +// DatastoreFile implements io.Reader, io.Seeker and io.Closer interfaces for datastore file access. +type DatastoreFile struct { + d Datastore + ctx context.Context + name string + + buf io.Reader + body io.ReadCloser + length int64 + offset struct { + read, seek int64 + } +} + +// Open opens the named file relative to the Datastore. +func (d Datastore) Open(ctx context.Context, name string) (*DatastoreFile, error) { + return &DatastoreFile{ + d: d, + name: name, + length: -1, + ctx: ctx, + }, nil +} + +// Read reads up to len(b) bytes from the DatastoreFile. +func (f *DatastoreFile) Read(b []byte) (int, error) { + if f.offset.read != f.offset.seek { + // A Seek() call changed the offset, we need to issue a new GET + _ = f.Close() + + f.offset.read = f.offset.seek + } else if f.buf != nil { + // f.buf + f behaves like an io.MultiReader + n, err := f.buf.Read(b) + if err == io.EOF { + f.buf = nil // buffer has been drained + } + if n > 0 { + return n, nil + } + } + + body, err := f.get() + if err != nil { + return 0, err + } + + n, err := body.Read(b) + + f.offset.read += int64(n) + f.offset.seek += int64(n) + + return n, err +} + +// Close closes the DatastoreFile. +func (f *DatastoreFile) Close() error { + var err error + + if f.body != nil { + err = f.body.Close() + f.body = nil + } + + f.buf = nil + + return err +} + +// Seek sets the offset for the next Read on the DatastoreFile. +func (f *DatastoreFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + case io.SeekCurrent: + offset += f.offset.seek + case io.SeekEnd: + if f.length < 0 { + _, err := f.Stat() + if err != nil { + return 0, err + } + } + offset += f.length + default: + return 0, errors.New("Seek: invalid whence") + } + + // allow negative SeekStart for initial Range request + if offset < 0 { + return 0, errors.New("Seek: invalid offset") + } + + f.offset.seek = offset + + return offset, nil +} + +type fileStat struct { + file *DatastoreFile + header http.Header +} + +func (s *fileStat) Name() string { + return path.Base(s.file.name) +} + +func (s *fileStat) Size() int64 { + return s.file.length +} + +func (s *fileStat) Mode() os.FileMode { + return 0 +} + +func (s *fileStat) ModTime() time.Time { + return time.Now() // no Last-Modified +} + +func (s *fileStat) IsDir() bool { + return false +} + +func (s *fileStat) Sys() interface{} { + return s.header +} + +func statusError(res *http.Response) error { + if res.StatusCode == http.StatusNotFound { + return os.ErrNotExist + } + return errors.New(res.Status) +} + +// Stat returns the os.FileInfo interface describing file. +func (f *DatastoreFile) Stat() (os.FileInfo, error) { + // TODO: consider using Datastore.Stat() instead + u, p, err := f.d.downloadTicket(f.ctx, f.name, &soap.Download{Method: "HEAD"}) + if err != nil { + return nil, err + } + + res, err := f.d.Client().DownloadRequest(f.ctx, u, p) + if err != nil { + return nil, err + } + + if res.StatusCode != http.StatusOK { + return nil, statusError(res) + } + + f.length = res.ContentLength + + return &fileStat{f, res.Header}, nil +} + +func (f *DatastoreFile) get() (io.Reader, error) { + if f.body != nil { + return f.body, nil + } + + u, p, err := f.d.downloadTicket(f.ctx, f.name, nil) + if err != nil { + return nil, err + } + + if f.offset.read != 0 { + p.Headers = map[string]string{ + "Range": fmt.Sprintf("bytes=%d-", f.offset.read), + } + } + + res, err := f.d.Client().DownloadRequest(f.ctx, u, p) + if err != nil { + return nil, err + } + + switch res.StatusCode { + case http.StatusOK: + f.length = res.ContentLength + case http.StatusPartialContent: + var start, end int + cr := res.Header.Get("Content-Range") + _, err = fmt.Sscanf(cr, "bytes %d-%d/%d", &start, &end, &f.length) + if err != nil { + f.length = -1 + } + case http.StatusRequestedRangeNotSatisfiable: + // ok: Read() will return io.EOF + default: + return nil, statusError(res) + } + + if f.length < 0 { + _ = res.Body.Close() + return nil, errors.New("unable to determine file size") + } + + f.body = res.Body + + return f.body, nil +} + +func lastIndexLines(s []byte, line *int, include func(l int, m string) bool) (int64, bool) { + i := len(s) - 1 + done := false + + for i > 0 { + o := bytes.LastIndexByte(s[:i], '\n') + if o < 0 { + break + } + + msg := string(s[o+1 : i+1]) + if !include(*line, msg) { + done = true + break + } else { + i = o + *line++ + } + } + + return int64(i), done +} + +// Tail seeks to the position of the last N lines of the file. +func (f *DatastoreFile) Tail(n int) error { + return f.TailFunc(n, func(line int, _ string) bool { return n > line }) +} + +// TailFunc will seek backwards in the datastore file until it hits a line that does +// not satisfy the supplied `include` function. +func (f *DatastoreFile) TailFunc(lines int, include func(line int, message string) bool) error { + // Read the file in reverse using bsize chunks + const bsize = int64(1024 * 16) + + fsize, err := f.Seek(0, io.SeekEnd) + if err != nil { + return err + } + + if lines == 0 { + return nil + } + + chunk := int64(-1) + + buf := bytes.NewBuffer(make([]byte, 0, bsize)) + line := 0 + + for { + var eof bool + var pos int64 + + nread := bsize + + offset := chunk * bsize + remain := fsize + offset + + if remain < 0 { + if pos, err = f.Seek(0, io.SeekStart); err != nil { + return err + } + + nread = bsize + remain + eof = true + } else { + if pos, err = f.Seek(offset, io.SeekEnd); err != nil { + return err + } + } + + if _, err = io.CopyN(buf, f, nread); err != nil { + if err != io.EOF { + return err + } + } + + b := buf.Bytes() + idx, done := lastIndexLines(b, &line, include) + + if done { + if chunk == -1 { + // We found all N lines in the last chunk of the file. + // The seek offset is also now at the current end of file. + // Save this buffer to avoid another GET request when Read() is called. + buf.Next(int(idx + 1)) + f.buf = buf + return nil + } + + if _, err = f.Seek(pos+idx+1, io.SeekStart); err != nil { + return err + } + + break + } + + if eof { + if remain < 0 { + // We found < N lines in the entire file, so seek to the start. + _, _ = f.Seek(0, io.SeekStart) + } + break + } + + chunk-- + buf.Reset() + } + + return nil +} + +type followDatastoreFile struct { + r *DatastoreFile + c chan struct{} + i time.Duration + o sync.Once +} + +// Read reads up to len(b) bytes from the DatastoreFile being followed. +// This method will block until data is read, an error other than io.EOF is returned or Close() is called. +func (f *followDatastoreFile) Read(p []byte) (int, error) { + offset := f.r.offset.seek + stop := false + + for { + n, err := f.r.Read(p) + if err != nil && err == io.EOF { + _ = f.r.Close() // GET request body has been drained. + if stop { + return n, err + } + err = nil + } + + if n > 0 { + return n, err + } + + select { + case <-f.c: + // Wake up and stop polling once the body has been drained + stop = true + case <-time.After(f.i): + } + + info, serr := f.r.Stat() + if serr != nil { + // Return EOF rather than 404 if the file goes away + if serr == os.ErrNotExist { + _ = f.r.Close() + return 0, io.EOF + } + return 0, serr + } + + if info.Size() < offset { + // assume file has be truncated + offset, err = f.r.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + } + } +} + +// Close will stop Follow polling and close the underlying DatastoreFile. +func (f *followDatastoreFile) Close() error { + f.o.Do(func() { close(f.c) }) + return nil +} + +// Follow returns an io.ReadCloser to stream the file contents as data is appended. +func (f *DatastoreFile) Follow(interval time.Duration) io.ReadCloser { + return &followDatastoreFile{ + r: f, + c: make(chan struct{}), + i: interval, + } +} diff --git a/vendor/github.com/vmware/govmomi/object/datastore_file_manager.go b/vendor/github.com/vmware/govmomi/object/datastore_file_manager.go new file mode 100644 index 000000000000..a6e29c2c5634 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/datastore_file_manager.go @@ -0,0 +1,228 @@ +/* +Copyright (c) 2017-2018 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "log" + "path" + "strings" + + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/soap" +) + +// DatastoreFileManager combines FileManager and VirtualDiskManager to manage files on a Datastore +type DatastoreFileManager struct { + Datacenter *Datacenter + Datastore *Datastore + FileManager *FileManager + VirtualDiskManager *VirtualDiskManager + + Force bool + DatacenterTarget *Datacenter +} + +// NewFileManager creates a new instance of DatastoreFileManager +func (d Datastore) NewFileManager(dc *Datacenter, force bool) *DatastoreFileManager { + c := d.Client() + + m := &DatastoreFileManager{ + Datacenter: dc, + Datastore: &d, + FileManager: NewFileManager(c), + VirtualDiskManager: NewVirtualDiskManager(c), + Force: force, + DatacenterTarget: dc, + } + + return m +} + +func (m *DatastoreFileManager) WithProgress(ctx context.Context, s progress.Sinker) context.Context { + return context.WithValue(ctx, m, s) +} + +func (m *DatastoreFileManager) wait(ctx context.Context, task *Task) error { + var logger progress.Sinker + if s, ok := ctx.Value(m).(progress.Sinker); ok { + logger = s + } + _, err := task.WaitForResult(ctx, logger) + return err +} + +// Delete dispatches to the appropriate Delete method based on file name extension +func (m *DatastoreFileManager) Delete(ctx context.Context, name string) error { + switch path.Ext(name) { + case ".vmdk": + return m.DeleteVirtualDisk(ctx, name) + default: + return m.DeleteFile(ctx, name) + } +} + +// DeleteFile calls FileManager.DeleteDatastoreFile +func (m *DatastoreFileManager) DeleteFile(ctx context.Context, name string) error { + p := m.Path(name) + + task, err := m.FileManager.DeleteDatastoreFile(ctx, p.String(), m.Datacenter) + if err != nil { + return err + } + + return m.wait(ctx, task) +} + +// DeleteVirtualDisk calls VirtualDiskManager.DeleteVirtualDisk +// Regardless of the Datastore type, DeleteVirtualDisk will fail if 'ddb.deletable=false', +// so if Force=true this method attempts to set 'ddb.deletable=true' before starting the delete task. +func (m *DatastoreFileManager) DeleteVirtualDisk(ctx context.Context, name string) error { + p := m.Path(name) + + var merr error + + if m.Force { + merr = m.markDiskAsDeletable(ctx, p) + } + + task, err := m.VirtualDiskManager.DeleteVirtualDisk(ctx, p.String(), m.Datacenter) + if err != nil { + log.Printf("markDiskAsDeletable(%s): %s", p, merr) + return err + } + + return m.wait(ctx, task) +} + +// CopyFile calls FileManager.CopyDatastoreFile +func (m *DatastoreFileManager) CopyFile(ctx context.Context, src string, dst string) error { + srcp := m.Path(src) + dstp := m.Path(dst) + + task, err := m.FileManager.CopyDatastoreFile(ctx, srcp.String(), m.Datacenter, dstp.String(), m.DatacenterTarget, m.Force) + if err != nil { + return err + } + + return m.wait(ctx, task) +} + +// Copy dispatches to the appropriate FileManager or VirtualDiskManager Copy method based on file name extension +func (m *DatastoreFileManager) Copy(ctx context.Context, src string, dst string) error { + srcp := m.Path(src) + dstp := m.Path(dst) + + f := m.FileManager.CopyDatastoreFile + + if srcp.IsVMDK() { + // types.VirtualDiskSpec=nil as it is not implemented by vCenter + f = func(ctx context.Context, src string, srcDC *Datacenter, dst string, dstDC *Datacenter, force bool) (*Task, error) { + return m.VirtualDiskManager.CopyVirtualDisk(ctx, src, srcDC, dst, dstDC, nil, force) + } + } + + task, err := f(ctx, srcp.String(), m.Datacenter, dstp.String(), m.DatacenterTarget, m.Force) + if err != nil { + return err + } + + return m.wait(ctx, task) +} + +// MoveFile calls FileManager.MoveDatastoreFile +func (m *DatastoreFileManager) MoveFile(ctx context.Context, src string, dst string) error { + srcp := m.Path(src) + dstp := m.Path(dst) + + task, err := m.FileManager.MoveDatastoreFile(ctx, srcp.String(), m.Datacenter, dstp.String(), m.DatacenterTarget, m.Force) + if err != nil { + return err + } + + return m.wait(ctx, task) +} + +// Move dispatches to the appropriate FileManager or VirtualDiskManager Move method based on file name extension +func (m *DatastoreFileManager) Move(ctx context.Context, src string, dst string) error { + srcp := m.Path(src) + dstp := m.Path(dst) + + f := m.FileManager.MoveDatastoreFile + + if srcp.IsVMDK() { + f = m.VirtualDiskManager.MoveVirtualDisk + } + + task, err := f(ctx, srcp.String(), m.Datacenter, dstp.String(), m.DatacenterTarget, m.Force) + if err != nil { + return err + } + + return m.wait(ctx, task) +} + +// Path converts path name to a DatastorePath +func (m *DatastoreFileManager) Path(name string) *DatastorePath { + var p DatastorePath + + if !p.FromString(name) { + p.Path = name + p.Datastore = m.Datastore.Name() + } + + return &p +} + +func (m *DatastoreFileManager) markDiskAsDeletable(ctx context.Context, path *DatastorePath) error { + r, _, err := m.Datastore.Download(ctx, path.Path, &soap.DefaultDownload) + if err != nil { + return err + } + + defer r.Close() + + hasFlag := false + buf := new(bytes.Buffer) + + s := bufio.NewScanner(&io.LimitedReader{R: r, N: 2048}) // should be only a few hundred bytes, limit to be sure + + for s.Scan() { + line := s.Text() + if strings.HasPrefix(line, "ddb.deletable") { + hasFlag = true + continue + } + + fmt.Fprintln(buf, line) + } + + if err := s.Err(); err != nil { + return err // any error other than EOF + } + + if !hasFlag { + return nil // already deletable, so leave as-is + } + + // rewrite the .vmdk with ddb.deletable flag removed (the default is true) + return m.Datastore.Upload(ctx, buf, path.Path, &soap.DefaultUpload) +} diff --git a/vendor/github.com/vmware/govmomi/object/datastore_path.go b/vendor/github.com/vmware/govmomi/object/datastore_path.go new file mode 100644 index 000000000000..1563ee1e11db --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/datastore_path.go @@ -0,0 +1,71 @@ +/* +Copyright (c) 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "fmt" + "path" + "strings" +) + +// DatastorePath contains the components of a datastore path. +type DatastorePath struct { + Datastore string + Path string +} + +// FromString parses a datastore path. +// Returns true if the path could be parsed, false otherwise. +func (p *DatastorePath) FromString(s string) bool { + if len(s) == 0 { + return false + } + + s = strings.TrimSpace(s) + + if !strings.HasPrefix(s, "[") { + return false + } + + s = s[1:] + + ix := strings.Index(s, "]") + if ix < 0 { + return false + } + + p.Datastore = s[:ix] + p.Path = strings.TrimSpace(s[ix+1:]) + + return true +} + +// String formats a datastore path. +func (p *DatastorePath) String() string { + s := fmt.Sprintf("[%s]", p.Datastore) + + if p.Path == "" { + return s + } + + return strings.Join([]string{s, p.Path}, " ") +} + +// IsVMDK returns true if Path has a ".vmdk" extension +func (p *DatastorePath) IsVMDK() bool { + return path.Ext(p.Path) == ".vmdk" +} diff --git a/vendor/github.com/vmware/govmomi/object/diagnostic_log.go b/vendor/github.com/vmware/govmomi/object/diagnostic_log.go new file mode 100644 index 000000000000..466d0ee915be --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/diagnostic_log.go @@ -0,0 +1,76 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "fmt" + "io" + "math" +) + +// DiagnosticLog wraps DiagnosticManager.BrowseLog +type DiagnosticLog struct { + m DiagnosticManager + + Key string + Host *HostSystem + + Start int32 +} + +// Seek to log position starting at the last nlines of the log +func (l *DiagnosticLog) Seek(ctx context.Context, nlines int32) error { + h, err := l.m.BrowseLog(ctx, l.Host, l.Key, math.MaxInt32, 0) + if err != nil { + return err + } + + l.Start = h.LineEnd - nlines + + return nil +} + +// Copy log starting from l.Start to the given io.Writer +// Returns on error or when end of log is reached. +func (l *DiagnosticLog) Copy(ctx context.Context, w io.Writer) (int, error) { + const max = 500 // VC max == 500, ESX max == 1000 + written := 0 + + for { + h, err := l.m.BrowseLog(ctx, l.Host, l.Key, l.Start, max) + if err != nil { + return 0, err + } + + for _, line := range h.LineText { + n, err := fmt.Fprintln(w, line) + written += n + if err != nil { + return written, err + } + } + + l.Start += int32(len(h.LineText)) + + if l.Start >= h.LineEnd { + break + } + } + + return written, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go b/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go new file mode 100644 index 000000000000..5baf1ad90383 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go @@ -0,0 +1,104 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type DiagnosticManager struct { + Common +} + +func NewDiagnosticManager(c *vim25.Client) *DiagnosticManager { + m := DiagnosticManager{ + Common: NewCommon(c, *c.ServiceContent.DiagnosticManager), + } + + return &m +} + +func (m DiagnosticManager) Log(ctx context.Context, host *HostSystem, key string) *DiagnosticLog { + return &DiagnosticLog{ + m: m, + Key: key, + Host: host, + } +} + +func (m DiagnosticManager) BrowseLog(ctx context.Context, host *HostSystem, key string, start, lines int32) (*types.DiagnosticManagerLogHeader, error) { + req := types.BrowseDiagnosticLog{ + This: m.Reference(), + Key: key, + Start: start, + Lines: lines, + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.BrowseDiagnosticLog(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (m DiagnosticManager) GenerateLogBundles(ctx context.Context, includeDefault bool, host []*HostSystem) (*Task, error) { + req := types.GenerateLogBundles_Task{ + This: m.Reference(), + IncludeDefault: includeDefault, + } + + if host != nil { + for _, h := range host { + req.Host = append(req.Host, h.Reference()) + } + } + + res, err := methods.GenerateLogBundles_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +func (m DiagnosticManager) QueryDescriptions(ctx context.Context, host *HostSystem) ([]types.DiagnosticManagerLogDescriptor, error) { + req := types.QueryDescriptions{ + This: m.Reference(), + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.QueryDescriptions(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go new file mode 100644 index 000000000000..f8ac5512c1f0 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go @@ -0,0 +1,80 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "fmt" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type DistributedVirtualPortgroup struct { + Common +} + +func NewDistributedVirtualPortgroup(c *vim25.Client, ref types.ManagedObjectReference) *DistributedVirtualPortgroup { + return &DistributedVirtualPortgroup{ + Common: NewCommon(c, ref), + } +} + +// EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this DistributedVirtualPortgroup +func (p DistributedVirtualPortgroup) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { + var dvp mo.DistributedVirtualPortgroup + var dvs mo.DistributedVirtualSwitch + prop := "config.distributedVirtualSwitch" + + if err := p.Properties(ctx, p.Reference(), []string{"key", prop}, &dvp); err != nil { + return nil, err + } + + // "This property should always be set unless the user's setting does not have System.Read privilege on the object referred to by this property." + if dvp.Config.DistributedVirtualSwitch == nil { + return nil, fmt.Errorf("no System.Read privilege on: %s.%s", p.Reference(), prop) + } + + if err := p.Properties(ctx, *dvp.Config.DistributedVirtualSwitch, []string{"uuid"}, &dvs); err != nil { + return nil, err + } + + backing := &types.VirtualEthernetCardDistributedVirtualPortBackingInfo{ + Port: types.DistributedVirtualSwitchPortConnection{ + PortgroupKey: dvp.Key, + SwitchUuid: dvs.Uuid, + }, + } + + return backing, nil +} + +func (p DistributedVirtualPortgroup) Reconfigure(ctx context.Context, spec types.DVPortgroupConfigSpec) (*Task, error) { + req := types.ReconfigureDVPortgroup_Task{ + This: p.Reference(), + Spec: spec, + } + + res, err := methods.ReconfigureDVPortgroup_Task(ctx, p.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(p.Client(), res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go b/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go new file mode 100644 index 000000000000..526ce4bf7871 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go @@ -0,0 +1,80 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type DistributedVirtualSwitch struct { + Common +} + +func NewDistributedVirtualSwitch(c *vim25.Client, ref types.ManagedObjectReference) *DistributedVirtualSwitch { + return &DistributedVirtualSwitch{ + Common: NewCommon(c, ref), + } +} + +func (s DistributedVirtualSwitch) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { + return nil, ErrNotSupported // TODO: just to satisfy NetworkReference interface for the finder +} + +func (s DistributedVirtualSwitch) Reconfigure(ctx context.Context, spec types.BaseDVSConfigSpec) (*Task, error) { + req := types.ReconfigureDvs_Task{ + This: s.Reference(), + Spec: spec, + } + + res, err := methods.ReconfigureDvs_Task(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(s.Client(), res.Returnval), nil +} + +func (s DistributedVirtualSwitch) AddPortgroup(ctx context.Context, spec []types.DVPortgroupConfigSpec) (*Task, error) { + req := types.AddDVPortgroup_Task{ + This: s.Reference(), + Spec: spec, + } + + res, err := methods.AddDVPortgroup_Task(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(s.Client(), res.Returnval), nil +} + +func (s DistributedVirtualSwitch) FetchDVPorts(ctx context.Context, criteria *types.DistributedVirtualSwitchPortCriteria) ([]types.DistributedVirtualPort, error) { + req := &types.FetchDVPorts{ + This: s.Reference(), + Criteria: criteria, + } + + res, err := methods.FetchDVPorts(ctx, s.Client(), req) + if err != nil { + return nil, err + } + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/extension_manager.go b/vendor/github.com/vmware/govmomi/object/extension_manager.go new file mode 100644 index 000000000000..94ade017c2ad --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/extension_manager.go @@ -0,0 +1,113 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type ExtensionManager struct { + Common +} + +// GetExtensionManager wraps NewExtensionManager, returning ErrNotSupported +// when the client is not connected to a vCenter instance. +func GetExtensionManager(c *vim25.Client) (*ExtensionManager, error) { + if c.ServiceContent.ExtensionManager == nil { + return nil, ErrNotSupported + } + return NewExtensionManager(c), nil +} + +func NewExtensionManager(c *vim25.Client) *ExtensionManager { + o := ExtensionManager{ + Common: NewCommon(c, *c.ServiceContent.ExtensionManager), + } + + return &o +} + +func (m ExtensionManager) List(ctx context.Context) ([]types.Extension, error) { + var em mo.ExtensionManager + + err := m.Properties(ctx, m.Reference(), []string{"extensionList"}, &em) + if err != nil { + return nil, err + } + + return em.ExtensionList, nil +} + +func (m ExtensionManager) Find(ctx context.Context, key string) (*types.Extension, error) { + req := types.FindExtension{ + This: m.Reference(), + ExtensionKey: key, + } + + res, err := methods.FindExtension(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m ExtensionManager) Register(ctx context.Context, extension types.Extension) error { + req := types.RegisterExtension{ + This: m.Reference(), + Extension: extension, + } + + _, err := methods.RegisterExtension(ctx, m.c, &req) + return err +} + +func (m ExtensionManager) SetCertificate(ctx context.Context, key string, certificatePem string) error { + req := types.SetExtensionCertificate{ + This: m.Reference(), + ExtensionKey: key, + CertificatePem: certificatePem, + } + + _, err := methods.SetExtensionCertificate(ctx, m.c, &req) + return err +} + +func (m ExtensionManager) Unregister(ctx context.Context, key string) error { + req := types.UnregisterExtension{ + This: m.Reference(), + ExtensionKey: key, + } + + _, err := methods.UnregisterExtension(ctx, m.c, &req) + return err +} + +func (m ExtensionManager) Update(ctx context.Context, extension types.Extension) error { + req := types.UpdateExtension{ + This: m.Reference(), + Extension: extension, + } + + _, err := methods.UpdateExtension(ctx, m.c, &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/file_manager.go b/vendor/github.com/vmware/govmomi/object/file_manager.go new file mode 100644 index 000000000000..ba947be20d92 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/file_manager.go @@ -0,0 +1,126 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type FileManager struct { + Common +} + +func NewFileManager(c *vim25.Client) *FileManager { + f := FileManager{ + Common: NewCommon(c, *c.ServiceContent.FileManager), + } + + return &f +} + +func (f FileManager) CopyDatastoreFile(ctx context.Context, sourceName string, sourceDatacenter *Datacenter, destinationName string, destinationDatacenter *Datacenter, force bool) (*Task, error) { + req := types.CopyDatastoreFile_Task{ + This: f.Reference(), + SourceName: sourceName, + DestinationName: destinationName, + Force: types.NewBool(force), + } + + if sourceDatacenter != nil { + ref := sourceDatacenter.Reference() + req.SourceDatacenter = &ref + } + + if destinationDatacenter != nil { + ref := destinationDatacenter.Reference() + req.DestinationDatacenter = &ref + } + + res, err := methods.CopyDatastoreFile_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +// DeleteDatastoreFile deletes the specified file or folder from the datastore. +func (f FileManager) DeleteDatastoreFile(ctx context.Context, name string, dc *Datacenter) (*Task, error) { + req := types.DeleteDatastoreFile_Task{ + This: f.Reference(), + Name: name, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.DeleteDatastoreFile_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +// MakeDirectory creates a folder using the specified name. +func (f FileManager) MakeDirectory(ctx context.Context, name string, dc *Datacenter, createParentDirectories bool) error { + req := types.MakeDirectory{ + This: f.Reference(), + Name: name, + CreateParentDirectories: types.NewBool(createParentDirectories), + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + _, err := methods.MakeDirectory(ctx, f.c, &req) + return err +} + +func (f FileManager) MoveDatastoreFile(ctx context.Context, sourceName string, sourceDatacenter *Datacenter, destinationName string, destinationDatacenter *Datacenter, force bool) (*Task, error) { + req := types.MoveDatastoreFile_Task{ + This: f.Reference(), + SourceName: sourceName, + DestinationName: destinationName, + Force: types.NewBool(force), + } + + if sourceDatacenter != nil { + ref := sourceDatacenter.Reference() + req.SourceDatacenter = &ref + } + + if destinationDatacenter != nil { + ref := destinationDatacenter.Reference() + req.DestinationDatacenter = &ref + } + + res, err := methods.MoveDatastoreFile_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/folder.go b/vendor/github.com/vmware/govmomi/object/folder.go new file mode 100644 index 000000000000..7a69949f95af --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/folder.go @@ -0,0 +1,227 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type Folder struct { + Common +} + +func NewFolder(c *vim25.Client, ref types.ManagedObjectReference) *Folder { + return &Folder{ + Common: NewCommon(c, ref), + } +} + +func NewRootFolder(c *vim25.Client) *Folder { + f := NewFolder(c, c.ServiceContent.RootFolder) + f.InventoryPath = "/" + return f +} + +func (f Folder) Children(ctx context.Context) ([]Reference, error) { + var mf mo.Folder + + err := f.Properties(ctx, f.Reference(), []string{"childEntity"}, &mf) + if err != nil { + return nil, err + } + + var rs []Reference + for _, e := range mf.ChildEntity { + if r := NewReference(f.c, e); r != nil { + rs = append(rs, r) + } + } + + return rs, nil +} + +func (f Folder) CreateDatacenter(ctx context.Context, datacenter string) (*Datacenter, error) { + req := types.CreateDatacenter{ + This: f.Reference(), + Name: datacenter, + } + + res, err := methods.CreateDatacenter(ctx, f.c, &req) + if err != nil { + return nil, err + } + + // Response will be nil if this is an ESX host that does not belong to a vCenter + if res == nil { + return nil, nil + } + + return NewDatacenter(f.c, res.Returnval), nil +} + +func (f Folder) CreateCluster(ctx context.Context, cluster string, spec types.ClusterConfigSpecEx) (*ClusterComputeResource, error) { + req := types.CreateClusterEx{ + This: f.Reference(), + Name: cluster, + Spec: spec, + } + + res, err := methods.CreateClusterEx(ctx, f.c, &req) + if err != nil { + return nil, err + } + + // Response will be nil if this is an ESX host that does not belong to a vCenter + if res == nil { + return nil, nil + } + + return NewClusterComputeResource(f.c, res.Returnval), nil +} + +func (f Folder) CreateFolder(ctx context.Context, name string) (*Folder, error) { + req := types.CreateFolder{ + This: f.Reference(), + Name: name, + } + + res, err := methods.CreateFolder(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewFolder(f.c, res.Returnval), err +} + +func (f Folder) CreateStoragePod(ctx context.Context, name string) (*StoragePod, error) { + req := types.CreateStoragePod{ + This: f.Reference(), + Name: name, + } + + res, err := methods.CreateStoragePod(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewStoragePod(f.c, res.Returnval), err +} + +func (f Folder) AddStandaloneHost(ctx context.Context, spec types.HostConnectSpec, addConnected bool, license *string, compResSpec *types.BaseComputeResourceConfigSpec) (*Task, error) { + req := types.AddStandaloneHost_Task{ + This: f.Reference(), + Spec: spec, + AddConnected: addConnected, + } + + if license != nil { + req.License = *license + } + + if compResSpec != nil { + req.CompResSpec = *compResSpec + } + + res, err := methods.AddStandaloneHost_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +func (f Folder) CreateVM(ctx context.Context, config types.VirtualMachineConfigSpec, pool *ResourcePool, host *HostSystem) (*Task, error) { + req := types.CreateVM_Task{ + This: f.Reference(), + Config: config, + Pool: pool.Reference(), + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.CreateVM_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +func (f Folder) RegisterVM(ctx context.Context, path string, name string, asTemplate bool, pool *ResourcePool, host *HostSystem) (*Task, error) { + req := types.RegisterVM_Task{ + This: f.Reference(), + Path: path, + AsTemplate: asTemplate, + } + + if name != "" { + req.Name = name + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + if pool != nil { + ref := pool.Reference() + req.Pool = &ref + } + + res, err := methods.RegisterVM_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +func (f Folder) CreateDVS(ctx context.Context, spec types.DVSCreateSpec) (*Task, error) { + req := types.CreateDVS_Task{ + This: f.Reference(), + Spec: spec, + } + + res, err := methods.CreateDVS_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} + +func (f Folder) MoveInto(ctx context.Context, list []types.ManagedObjectReference) (*Task, error) { + req := types.MoveIntoFolder_Task{ + This: f.Reference(), + List: list, + } + + res, err := methods.MoveIntoFolder_Task(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return NewTask(f.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/history_collector.go b/vendor/github.com/vmware/govmomi/object/history_collector.go new file mode 100644 index 000000000000..afdcab78b607 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/history_collector.go @@ -0,0 +1,72 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type HistoryCollector struct { + Common +} + +func NewHistoryCollector(c *vim25.Client, ref types.ManagedObjectReference) *HistoryCollector { + return &HistoryCollector{ + Common: NewCommon(c, ref), + } +} + +func (h HistoryCollector) Destroy(ctx context.Context) error { + req := types.DestroyCollector{ + This: h.Reference(), + } + + _, err := methods.DestroyCollector(ctx, h.c, &req) + return err +} + +func (h HistoryCollector) Reset(ctx context.Context) error { + req := types.ResetCollector{ + This: h.Reference(), + } + + _, err := methods.ResetCollector(ctx, h.c, &req) + return err +} + +func (h HistoryCollector) Rewind(ctx context.Context) error { + req := types.RewindCollector{ + This: h.Reference(), + } + + _, err := methods.RewindCollector(ctx, h.c, &req) + return err +} + +func (h HistoryCollector) SetPageSize(ctx context.Context, maxCount int32) error { + req := types.SetCollectorPageSize{ + This: h.Reference(), + MaxCount: maxCount, + } + + _, err := methods.SetCollectorPageSize(ctx, h.c, &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/host_account_manager.go b/vendor/github.com/vmware/govmomi/object/host_account_manager.go new file mode 100644 index 000000000000..640aff860319 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_account_manager.go @@ -0,0 +1,65 @@ +/* +Copyright (c) 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type HostAccountManager struct { + Common +} + +func NewHostAccountManager(c *vim25.Client, ref types.ManagedObjectReference) *HostAccountManager { + return &HostAccountManager{ + Common: NewCommon(c, ref), + } +} + +func (m HostAccountManager) Create(ctx context.Context, user *types.HostAccountSpec) error { + req := types.CreateUser{ + This: m.Reference(), + User: user, + } + + _, err := methods.CreateUser(ctx, m.Client(), &req) + return err +} + +func (m HostAccountManager) Update(ctx context.Context, user *types.HostAccountSpec) error { + req := types.UpdateUser{ + This: m.Reference(), + User: user, + } + + _, err := methods.UpdateUser(ctx, m.Client(), &req) + return err +} + +func (m HostAccountManager) Remove(ctx context.Context, userName string) error { + req := types.RemoveUser{ + This: m.Reference(), + UserName: userName, + } + + _, err := methods.RemoveUser(ctx, m.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/host_certificate_info.go b/vendor/github.com/vmware/govmomi/object/host_certificate_info.go new file mode 100644 index 000000000000..52c26a9dd687 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_certificate_info.go @@ -0,0 +1,250 @@ +/* +Copyright (c) 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + "io" + "net/url" + "strings" + "text/tabwriter" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +// HostCertificateInfo provides helpers for types.HostCertificateManagerCertificateInfo +type HostCertificateInfo struct { + types.HostCertificateManagerCertificateInfo + + ThumbprintSHA1 string + ThumbprintSHA256 string + + Err error + Certificate *x509.Certificate `json:"-"` + + subjectName *pkix.Name + issuerName *pkix.Name +} + +// FromCertificate converts x509.Certificate to HostCertificateInfo +func (info *HostCertificateInfo) FromCertificate(cert *x509.Certificate) *HostCertificateInfo { + info.Certificate = cert + info.subjectName = &cert.Subject + info.issuerName = &cert.Issuer + + info.Issuer = info.fromName(info.issuerName) + info.NotBefore = &cert.NotBefore + info.NotAfter = &cert.NotAfter + info.Subject = info.fromName(info.subjectName) + + info.ThumbprintSHA1 = soap.ThumbprintSHA1(cert) + + // SHA-256 for info purposes only, API fields all use SHA-1 + sum := sha256.Sum256(cert.Raw) + hex := make([]string, len(sum)) + for i, b := range sum { + hex[i] = fmt.Sprintf("%02X", b) + } + info.ThumbprintSHA256 = strings.Join(hex, ":") + + if info.Status == "" { + info.Status = string(types.HostCertificateManagerCertificateInfoCertificateStatusUnknown) + } + + return info +} + +// FromURL connects to the given URL.Host via tls.Dial with the given tls.Config and populates the HostCertificateInfo +// via tls.ConnectionState. If the certificate was verified with the given tls.Config, the Err field will be nil. +// Otherwise, Err will be set to the x509.UnknownAuthorityError or x509.HostnameError. +// If tls.Dial returns an error of any other type, that error is returned. +func (info *HostCertificateInfo) FromURL(u *url.URL, config *tls.Config) error { + addr := u.Host + if !(strings.LastIndex(addr, ":") > strings.LastIndex(addr, "]")) { + addr += ":443" + } + + conn, err := tls.Dial("tcp", addr, config) + if err != nil { + switch err.(type) { + case x509.UnknownAuthorityError: + case x509.HostnameError: + default: + return err + } + + info.Err = err + + conn, err = tls.Dial("tcp", addr, &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + } else { + info.Status = string(types.HostCertificateManagerCertificateInfoCertificateStatusGood) + } + + state := conn.ConnectionState() + _ = conn.Close() + info.FromCertificate(state.PeerCertificates[0]) + + return nil +} + +var emailAddressOID = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 1} + +func (info *HostCertificateInfo) fromName(name *pkix.Name) string { + var attrs []string + + oids := map[string]string{ + emailAddressOID.String(): "emailAddress", + } + + for _, attr := range name.Names { + if key, ok := oids[attr.Type.String()]; ok { + attrs = append(attrs, fmt.Sprintf("%s=%s", key, attr.Value)) + } + } + + attrs = append(attrs, fmt.Sprintf("CN=%s", name.CommonName)) + + add := func(key string, vals []string) { + for _, val := range vals { + attrs = append(attrs, fmt.Sprintf("%s=%s", key, val)) + } + } + + elts := []struct { + key string + val []string + }{ + {"OU", name.OrganizationalUnit}, + {"O", name.Organization}, + {"L", name.Locality}, + {"ST", name.Province}, + {"C", name.Country}, + } + + for _, elt := range elts { + add(elt.key, elt.val) + } + + return strings.Join(attrs, ",") +} + +func (info *HostCertificateInfo) toName(s string) *pkix.Name { + var name pkix.Name + + for _, pair := range strings.Split(s, ",") { + attr := strings.SplitN(pair, "=", 2) + if len(attr) != 2 { + continue + } + + v := attr[1] + + switch strings.ToLower(attr[0]) { + case "cn": + name.CommonName = v + case "ou": + name.OrganizationalUnit = append(name.OrganizationalUnit, v) + case "o": + name.Organization = append(name.Organization, v) + case "l": + name.Locality = append(name.Locality, v) + case "st": + name.Province = append(name.Province, v) + case "c": + name.Country = append(name.Country, v) + case "emailaddress": + name.Names = append(name.Names, pkix.AttributeTypeAndValue{Type: emailAddressOID, Value: v}) + } + } + + return &name +} + +// SubjectName parses Subject into a pkix.Name +func (info *HostCertificateInfo) SubjectName() *pkix.Name { + if info.subjectName != nil { + return info.subjectName + } + + return info.toName(info.Subject) +} + +// IssuerName parses Issuer into a pkix.Name +func (info *HostCertificateInfo) IssuerName() *pkix.Name { + if info.issuerName != nil { + return info.issuerName + } + + return info.toName(info.Issuer) +} + +// Write outputs info similar to the Chrome Certificate Viewer. +func (info *HostCertificateInfo) Write(w io.Writer) error { + tw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0) + + s := func(val string) string { + if val != "" { + return val + } + return "" + } + + ss := func(val []string) string { + return s(strings.Join(val, ",")) + } + + name := func(n *pkix.Name) { + fmt.Fprintf(tw, " Common Name (CN):\t%s\n", s(n.CommonName)) + fmt.Fprintf(tw, " Organization (O):\t%s\n", ss(n.Organization)) + fmt.Fprintf(tw, " Organizational Unit (OU):\t%s\n", ss(n.OrganizationalUnit)) + } + + status := info.Status + if info.Err != nil { + status = fmt.Sprintf("ERROR %s", info.Err) + } + fmt.Fprintf(tw, "Certificate Status:\t%s\n", status) + + fmt.Fprintln(tw, "Issued To:\t") + name(info.SubjectName()) + + fmt.Fprintln(tw, "Issued By:\t") + name(info.IssuerName()) + + fmt.Fprintln(tw, "Validity Period:\t") + fmt.Fprintf(tw, " Issued On:\t%s\n", info.NotBefore) + fmt.Fprintf(tw, " Expires On:\t%s\n", info.NotAfter) + + if info.ThumbprintSHA1 != "" { + fmt.Fprintln(tw, "Thumbprints:\t") + if info.ThumbprintSHA256 != "" { + fmt.Fprintf(tw, " SHA-256 Thumbprint:\t%s\n", info.ThumbprintSHA256) + } + fmt.Fprintf(tw, " SHA-1 Thumbprint:\t%s\n", info.ThumbprintSHA1) + } + + return tw.Flush() +} diff --git a/vendor/github.com/vmware/govmomi/object/host_certificate_manager.go b/vendor/github.com/vmware/govmomi/object/host_certificate_manager.go new file mode 100644 index 000000000000..2875a9fc1add --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_certificate_manager.go @@ -0,0 +1,162 @@ +/* +Copyright (c) 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// HostCertificateManager provides helper methods around the HostSystem.ConfigManager.CertificateManager +type HostCertificateManager struct { + Common + Host *HostSystem +} + +// NewHostCertificateManager creates a new HostCertificateManager helper +func NewHostCertificateManager(c *vim25.Client, ref types.ManagedObjectReference, host types.ManagedObjectReference) *HostCertificateManager { + return &HostCertificateManager{ + Common: NewCommon(c, ref), + Host: NewHostSystem(c, host), + } +} + +// CertificateInfo wraps the host CertificateManager certificateInfo property with the HostCertificateInfo helper. +// The ThumbprintSHA1 field is set to HostSystem.Summary.Config.SslThumbprint if the host system is managed by a vCenter. +func (m HostCertificateManager) CertificateInfo(ctx context.Context) (*HostCertificateInfo, error) { + var hs mo.HostSystem + var cm mo.HostCertificateManager + + pc := property.DefaultCollector(m.Client()) + + err := pc.RetrieveOne(ctx, m.Reference(), []string{"certificateInfo"}, &cm) + if err != nil { + return nil, err + } + + _ = pc.RetrieveOne(ctx, m.Host.Reference(), []string{"summary.config.sslThumbprint"}, &hs) + + return &HostCertificateInfo{ + HostCertificateManagerCertificateInfo: cm.CertificateInfo, + ThumbprintSHA1: hs.Summary.Config.SslThumbprint, + }, nil +} + +// GenerateCertificateSigningRequest requests the host system to generate a certificate-signing request (CSR) for itself. +// The CSR is then typically provided to a Certificate Authority to sign and issue the SSL certificate for the host system. +// Use InstallServerCertificate to import this certificate. +func (m HostCertificateManager) GenerateCertificateSigningRequest(ctx context.Context, useIPAddressAsCommonName bool) (string, error) { + req := types.GenerateCertificateSigningRequest{ + This: m.Reference(), + UseIpAddressAsCommonName: useIPAddressAsCommonName, + } + + res, err := methods.GenerateCertificateSigningRequest(ctx, m.Client(), &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +// GenerateCertificateSigningRequestByDn requests the host system to generate a certificate-signing request (CSR) for itself. +// Alternative version similar to GenerateCertificateSigningRequest but takes a Distinguished Name (DN) as a parameter. +func (m HostCertificateManager) GenerateCertificateSigningRequestByDn(ctx context.Context, distinguishedName string) (string, error) { + req := types.GenerateCertificateSigningRequestByDn{ + This: m.Reference(), + DistinguishedName: distinguishedName, + } + + res, err := methods.GenerateCertificateSigningRequestByDn(ctx, m.Client(), &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +// InstallServerCertificate imports the given SSL certificate to the host system. +func (m HostCertificateManager) InstallServerCertificate(ctx context.Context, cert string) error { + req := types.InstallServerCertificate{ + This: m.Reference(), + Cert: cert, + } + + _, err := methods.InstallServerCertificate(ctx, m.Client(), &req) + if err != nil { + return err + } + + // NotifyAffectedService is internal, not exposing as we don't have a use case other than with InstallServerCertificate + // Without this call, hostd needs to be restarted to use the updated certificate + // Note: using Refresh as it has the same struct/signature, we just need to use different xml name tags + body := struct { + Req *types.Refresh `xml:"urn:vim25 NotifyAffectedServices,omitempty"` + Res *types.RefreshResponse `xml:"urn:vim25 NotifyAffectedServicesResponse,omitempty"` + methods.RefreshBody + }{ + Req: &types.Refresh{This: m.Reference()}, + } + + return m.Client().RoundTrip(ctx, &body, &body) +} + +// ListCACertificateRevocationLists returns the SSL CRLs of Certificate Authorities that are trusted by the host system. +func (m HostCertificateManager) ListCACertificateRevocationLists(ctx context.Context) ([]string, error) { + req := types.ListCACertificateRevocationLists{ + This: m.Reference(), + } + + res, err := methods.ListCACertificateRevocationLists(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +// ListCACertificates returns the SSL certificates of Certificate Authorities that are trusted by the host system. +func (m HostCertificateManager) ListCACertificates(ctx context.Context) ([]string, error) { + req := types.ListCACertificates{ + This: m.Reference(), + } + + res, err := methods.ListCACertificates(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +// ReplaceCACertificatesAndCRLs replaces the trusted CA certificates and CRL used by the host system. +// These determine whether the server can verify the identity of an external entity. +func (m HostCertificateManager) ReplaceCACertificatesAndCRLs(ctx context.Context, caCert []string, caCrl []string) error { + req := types.ReplaceCACertificatesAndCRLs{ + This: m.Reference(), + CaCert: caCert, + CaCrl: caCrl, + } + + _, err := methods.ReplaceCACertificatesAndCRLs(ctx, m.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/host_config_manager.go b/vendor/github.com/vmware/govmomi/object/host_config_manager.go new file mode 100644 index 000000000000..123227ecc706 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_config_manager.go @@ -0,0 +1,196 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type HostConfigManager struct { + Common +} + +func NewHostConfigManager(c *vim25.Client, ref types.ManagedObjectReference) *HostConfigManager { + return &HostConfigManager{ + Common: NewCommon(c, ref), + } +} + +func (m HostConfigManager) DatastoreSystem(ctx context.Context) (*HostDatastoreSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.datastoreSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostDatastoreSystem(m.c, *h.ConfigManager.DatastoreSystem), nil +} + +func (m HostConfigManager) NetworkSystem(ctx context.Context) (*HostNetworkSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.networkSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostNetworkSystem(m.c, *h.ConfigManager.NetworkSystem), nil +} + +func (m HostConfigManager) FirewallSystem(ctx context.Context) (*HostFirewallSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.firewallSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostFirewallSystem(m.c, *h.ConfigManager.FirewallSystem), nil +} + +func (m HostConfigManager) StorageSystem(ctx context.Context) (*HostStorageSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.storageSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostStorageSystem(m.c, *h.ConfigManager.StorageSystem), nil +} + +func (m HostConfigManager) VirtualNicManager(ctx context.Context) (*HostVirtualNicManager, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.virtualNicManager"}, &h) + if err != nil { + return nil, err + } + + return NewHostVirtualNicManager(m.c, *h.ConfigManager.VirtualNicManager, m.Reference()), nil +} + +func (m HostConfigManager) VsanSystem(ctx context.Context) (*HostVsanSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.vsanSystem"}, &h) + if err != nil { + return nil, err + } + + // Added in 5.5 + if h.ConfigManager.VsanSystem == nil { + return nil, ErrNotSupported + } + + return NewHostVsanSystem(m.c, *h.ConfigManager.VsanSystem), nil +} + +func (m HostConfigManager) VsanInternalSystem(ctx context.Context) (*HostVsanInternalSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.vsanInternalSystem"}, &h) + if err != nil { + return nil, err + } + + // Added in 5.5 + if h.ConfigManager.VsanInternalSystem == nil { + return nil, ErrNotSupported + } + + return NewHostVsanInternalSystem(m.c, *h.ConfigManager.VsanInternalSystem), nil +} + +func (m HostConfigManager) AccountManager(ctx context.Context) (*HostAccountManager, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.accountManager"}, &h) + if err != nil { + return nil, err + } + + ref := h.ConfigManager.AccountManager // Added in 6.0 + if ref == nil { + // Versions < 5.5 can use the ServiceContent ref, + // but we can only use it when connected directly to ESX. + c := m.Client() + if !c.IsVC() { + ref = c.ServiceContent.AccountManager + } + + if ref == nil { + return nil, ErrNotSupported + } + } + + return NewHostAccountManager(m.c, *ref), nil +} + +func (m HostConfigManager) OptionManager(ctx context.Context) (*OptionManager, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.advancedOption"}, &h) + if err != nil { + return nil, err + } + + return NewOptionManager(m.c, *h.ConfigManager.AdvancedOption), nil +} + +func (m HostConfigManager) ServiceSystem(ctx context.Context) (*HostServiceSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.serviceSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostServiceSystem(m.c, *h.ConfigManager.ServiceSystem), nil +} + +func (m HostConfigManager) CertificateManager(ctx context.Context) (*HostCertificateManager, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.certificateManager"}, &h) + if err != nil { + return nil, err + } + + // Added in 6.0 + if h.ConfigManager.CertificateManager == nil { + return nil, ErrNotSupported + } + + return NewHostCertificateManager(m.c, *h.ConfigManager.CertificateManager, m.Reference()), nil +} + +func (m HostConfigManager) DateTimeSystem(ctx context.Context) (*HostDateTimeSystem, error) { + var h mo.HostSystem + + err := m.Properties(ctx, m.Reference(), []string{"configManager.dateTimeSystem"}, &h) + if err != nil { + return nil, err + } + + return NewHostDateTimeSystem(m.c, *h.ConfigManager.DateTimeSystem), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_datastore_browser.go b/vendor/github.com/vmware/govmomi/object/host_datastore_browser.go new file mode 100644 index 000000000000..b0c9e08a12ef --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_datastore_browser.go @@ -0,0 +1,65 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type HostDatastoreBrowser struct { + Common +} + +func NewHostDatastoreBrowser(c *vim25.Client, ref types.ManagedObjectReference) *HostDatastoreBrowser { + return &HostDatastoreBrowser{ + Common: NewCommon(c, ref), + } +} + +func (b HostDatastoreBrowser) SearchDatastore(ctx context.Context, datastorePath string, searchSpec *types.HostDatastoreBrowserSearchSpec) (*Task, error) { + req := types.SearchDatastore_Task{ + This: b.Reference(), + DatastorePath: datastorePath, + SearchSpec: searchSpec, + } + + res, err := methods.SearchDatastore_Task(ctx, b.c, &req) + if err != nil { + return nil, err + } + + return NewTask(b.c, res.Returnval), nil +} + +func (b HostDatastoreBrowser) SearchDatastoreSubFolders(ctx context.Context, datastorePath string, searchSpec *types.HostDatastoreBrowserSearchSpec) (*Task, error) { + req := types.SearchDatastoreSubFolders_Task{ + This: b.Reference(), + DatastorePath: datastorePath, + SearchSpec: searchSpec, + } + + res, err := methods.SearchDatastoreSubFolders_Task(ctx, b.c, &req) + if err != nil { + return nil, err + } + + return NewTask(b.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_datastore_system.go b/vendor/github.com/vmware/govmomi/object/host_datastore_system.go new file mode 100644 index 000000000000..7b738e611ed8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_datastore_system.go @@ -0,0 +1,119 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type HostDatastoreSystem struct { + Common +} + +func NewHostDatastoreSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostDatastoreSystem { + return &HostDatastoreSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostDatastoreSystem) CreateLocalDatastore(ctx context.Context, name string, path string) (*Datastore, error) { + req := types.CreateLocalDatastore{ + This: s.Reference(), + Name: name, + Path: path, + } + + res, err := methods.CreateLocalDatastore(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewDatastore(s.Client(), res.Returnval), nil +} + +func (s HostDatastoreSystem) CreateNasDatastore(ctx context.Context, spec types.HostNasVolumeSpec) (*Datastore, error) { + req := types.CreateNasDatastore{ + This: s.Reference(), + Spec: spec, + } + + res, err := methods.CreateNasDatastore(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewDatastore(s.Client(), res.Returnval), nil +} + +func (s HostDatastoreSystem) CreateVmfsDatastore(ctx context.Context, spec types.VmfsDatastoreCreateSpec) (*Datastore, error) { + req := types.CreateVmfsDatastore{ + This: s.Reference(), + Spec: spec, + } + + res, err := methods.CreateVmfsDatastore(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewDatastore(s.Client(), res.Returnval), nil +} + +func (s HostDatastoreSystem) Remove(ctx context.Context, ds *Datastore) error { + req := types.RemoveDatastore{ + This: s.Reference(), + Datastore: ds.Reference(), + } + + _, err := methods.RemoveDatastore(ctx, s.Client(), &req) + if err != nil { + return err + } + + return nil +} + +func (s HostDatastoreSystem) QueryAvailableDisksForVmfs(ctx context.Context) ([]types.HostScsiDisk, error) { + req := types.QueryAvailableDisksForVmfs{ + This: s.Reference(), + } + + res, err := methods.QueryAvailableDisksForVmfs(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (s HostDatastoreSystem) QueryVmfsDatastoreCreateOptions(ctx context.Context, devicePath string) ([]types.VmfsDatastoreOption, error) { + req := types.QueryVmfsDatastoreCreateOptions{ + This: s.Reference(), + DevicePath: devicePath, + } + + res, err := methods.QueryVmfsDatastoreCreateOptions(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_date_time_system.go b/vendor/github.com/vmware/govmomi/object/host_date_time_system.go new file mode 100644 index 000000000000..7c9203d7b67e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_date_time_system.go @@ -0,0 +1,69 @@ +/* +Copyright (c) 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "time" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type HostDateTimeSystem struct { + Common +} + +func NewHostDateTimeSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostDateTimeSystem { + return &HostDateTimeSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostDateTimeSystem) UpdateConfig(ctx context.Context, config types.HostDateTimeConfig) error { + req := types.UpdateDateTimeConfig{ + This: s.Reference(), + Config: config, + } + + _, err := methods.UpdateDateTimeConfig(ctx, s.c, &req) + return err +} + +func (s HostDateTimeSystem) Update(ctx context.Context, date time.Time) error { + req := types.UpdateDateTime{ + This: s.Reference(), + DateTime: date, + } + + _, err := methods.UpdateDateTime(ctx, s.c, &req) + return err +} + +func (s HostDateTimeSystem) Query(ctx context.Context) (*time.Time, error) { + req := types.QueryDateTime{ + This: s.Reference(), + } + + res, err := methods.QueryDateTime(ctx, s.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_firewall_system.go b/vendor/github.com/vmware/govmomi/object/host_firewall_system.go new file mode 100644 index 000000000000..0b14402531cb --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_firewall_system.go @@ -0,0 +1,181 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type HostFirewallSystem struct { + Common +} + +func NewHostFirewallSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostFirewallSystem { + return &HostFirewallSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostFirewallSystem) DisableRuleset(ctx context.Context, id string) error { + req := types.DisableRuleset{ + This: s.Reference(), + Id: id, + } + + _, err := methods.DisableRuleset(ctx, s.c, &req) + return err +} + +func (s HostFirewallSystem) EnableRuleset(ctx context.Context, id string) error { + req := types.EnableRuleset{ + This: s.Reference(), + Id: id, + } + + _, err := methods.EnableRuleset(ctx, s.c, &req) + return err +} + +func (s HostFirewallSystem) Refresh(ctx context.Context) error { + req := types.RefreshFirewall{ + This: s.Reference(), + } + + _, err := methods.RefreshFirewall(ctx, s.c, &req) + return err +} + +func (s HostFirewallSystem) Info(ctx context.Context) (*types.HostFirewallInfo, error) { + var fs mo.HostFirewallSystem + + err := s.Properties(ctx, s.Reference(), []string{"firewallInfo"}, &fs) + if err != nil { + return nil, err + } + + return fs.FirewallInfo, nil +} + +// HostFirewallRulesetList provides helpers for a slice of types.HostFirewallRuleset +type HostFirewallRulesetList []types.HostFirewallRuleset + +// ByRule returns a HostFirewallRulesetList where Direction, PortType and Protocol are equal and Port is within range +func (l HostFirewallRulesetList) ByRule(rule types.HostFirewallRule) HostFirewallRulesetList { + var matches HostFirewallRulesetList + + for _, rs := range l { + for _, r := range rs.Rule { + if r.PortType != rule.PortType || + r.Protocol != rule.Protocol || + r.Direction != rule.Direction { + continue + } + + if r.EndPort == 0 && rule.Port == r.Port || + rule.Port >= r.Port && rule.Port <= r.EndPort { + matches = append(matches, rs) + break + } + } + } + + return matches +} + +// EnabledByRule returns a HostFirewallRulesetList with Match(rule) applied and filtered via Enabled() +// if enabled param is true, otherwise filtered via Disabled(). +// An error is returned if the resulting list is empty. +func (l HostFirewallRulesetList) EnabledByRule(rule types.HostFirewallRule, enabled bool) (HostFirewallRulesetList, error) { + var matched, skipped HostFirewallRulesetList + var matchedKind, skippedKind string + + l = l.ByRule(rule) + + if enabled { + matched = l.Enabled() + matchedKind = "enabled" + + skipped = l.Disabled() + skippedKind = "disabled" + } else { + matched = l.Disabled() + matchedKind = "disabled" + + skipped = l.Enabled() + skippedKind = "enabled" + } + + if len(matched) == 0 { + msg := fmt.Sprintf("%d %s firewall rulesets match %s %s %s %d, %d %s rulesets match", + len(matched), matchedKind, + rule.Direction, rule.Protocol, rule.PortType, rule.Port, + len(skipped), skippedKind) + + if len(skipped) != 0 { + msg += fmt.Sprintf(": %s", strings.Join(skipped.Keys(), ", ")) + } + + return nil, errors.New(msg) + } + + return matched, nil +} + +// Enabled returns a HostFirewallRulesetList with enabled rules +func (l HostFirewallRulesetList) Enabled() HostFirewallRulesetList { + var matches HostFirewallRulesetList + + for _, rs := range l { + if rs.Enabled { + matches = append(matches, rs) + } + } + + return matches +} + +// Disabled returns a HostFirewallRulesetList with disabled rules +func (l HostFirewallRulesetList) Disabled() HostFirewallRulesetList { + var matches HostFirewallRulesetList + + for _, rs := range l { + if !rs.Enabled { + matches = append(matches, rs) + } + } + + return matches +} + +// Keys returns the HostFirewallRuleset.Key for each ruleset in the list +func (l HostFirewallRulesetList) Keys() []string { + var keys []string + + for _, rs := range l { + keys = append(keys, rs.Key) + } + + return keys +} diff --git a/vendor/github.com/vmware/govmomi/object/host_network_system.go b/vendor/github.com/vmware/govmomi/object/host_network_system.go new file mode 100644 index 000000000000..c21e1ec35d57 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_network_system.go @@ -0,0 +1,358 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type HostNetworkSystem struct { + Common +} + +func NewHostNetworkSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostNetworkSystem { + return &HostNetworkSystem{ + Common: NewCommon(c, ref), + } +} + +// AddPortGroup wraps methods.AddPortGroup +func (o HostNetworkSystem) AddPortGroup(ctx context.Context, portgrp types.HostPortGroupSpec) error { + req := types.AddPortGroup{ + This: o.Reference(), + Portgrp: portgrp, + } + + _, err := methods.AddPortGroup(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// AddServiceConsoleVirtualNic wraps methods.AddServiceConsoleVirtualNic +func (o HostNetworkSystem) AddServiceConsoleVirtualNic(ctx context.Context, portgroup string, nic types.HostVirtualNicSpec) (string, error) { + req := types.AddServiceConsoleVirtualNic{ + This: o.Reference(), + Portgroup: portgroup, + Nic: nic, + } + + res, err := methods.AddServiceConsoleVirtualNic(ctx, o.c, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +// AddVirtualNic wraps methods.AddVirtualNic +func (o HostNetworkSystem) AddVirtualNic(ctx context.Context, portgroup string, nic types.HostVirtualNicSpec) (string, error) { + req := types.AddVirtualNic{ + This: o.Reference(), + Portgroup: portgroup, + Nic: nic, + } + + res, err := methods.AddVirtualNic(ctx, o.c, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +// AddVirtualSwitch wraps methods.AddVirtualSwitch +func (o HostNetworkSystem) AddVirtualSwitch(ctx context.Context, vswitchName string, spec *types.HostVirtualSwitchSpec) error { + req := types.AddVirtualSwitch{ + This: o.Reference(), + VswitchName: vswitchName, + Spec: spec, + } + + _, err := methods.AddVirtualSwitch(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// QueryNetworkHint wraps methods.QueryNetworkHint +func (o HostNetworkSystem) QueryNetworkHint(ctx context.Context, device []string) error { + req := types.QueryNetworkHint{ + This: o.Reference(), + Device: device, + } + + _, err := methods.QueryNetworkHint(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RefreshNetworkSystem wraps methods.RefreshNetworkSystem +func (o HostNetworkSystem) RefreshNetworkSystem(ctx context.Context) error { + req := types.RefreshNetworkSystem{ + This: o.Reference(), + } + + _, err := methods.RefreshNetworkSystem(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RemovePortGroup wraps methods.RemovePortGroup +func (o HostNetworkSystem) RemovePortGroup(ctx context.Context, pgName string) error { + req := types.RemovePortGroup{ + This: o.Reference(), + PgName: pgName, + } + + _, err := methods.RemovePortGroup(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RemoveServiceConsoleVirtualNic wraps methods.RemoveServiceConsoleVirtualNic +func (o HostNetworkSystem) RemoveServiceConsoleVirtualNic(ctx context.Context, device string) error { + req := types.RemoveServiceConsoleVirtualNic{ + This: o.Reference(), + Device: device, + } + + _, err := methods.RemoveServiceConsoleVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RemoveVirtualNic wraps methods.RemoveVirtualNic +func (o HostNetworkSystem) RemoveVirtualNic(ctx context.Context, device string) error { + req := types.RemoveVirtualNic{ + This: o.Reference(), + Device: device, + } + + _, err := methods.RemoveVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RemoveVirtualSwitch wraps methods.RemoveVirtualSwitch +func (o HostNetworkSystem) RemoveVirtualSwitch(ctx context.Context, vswitchName string) error { + req := types.RemoveVirtualSwitch{ + This: o.Reference(), + VswitchName: vswitchName, + } + + _, err := methods.RemoveVirtualSwitch(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// RestartServiceConsoleVirtualNic wraps methods.RestartServiceConsoleVirtualNic +func (o HostNetworkSystem) RestartServiceConsoleVirtualNic(ctx context.Context, device string) error { + req := types.RestartServiceConsoleVirtualNic{ + This: o.Reference(), + Device: device, + } + + _, err := methods.RestartServiceConsoleVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateConsoleIpRouteConfig wraps methods.UpdateConsoleIpRouteConfig +func (o HostNetworkSystem) UpdateConsoleIpRouteConfig(ctx context.Context, config types.BaseHostIpRouteConfig) error { + req := types.UpdateConsoleIpRouteConfig{ + This: o.Reference(), + Config: config, + } + + _, err := methods.UpdateConsoleIpRouteConfig(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateDnsConfig wraps methods.UpdateDnsConfig +func (o HostNetworkSystem) UpdateDnsConfig(ctx context.Context, config types.BaseHostDnsConfig) error { + req := types.UpdateDnsConfig{ + This: o.Reference(), + Config: config, + } + + _, err := methods.UpdateDnsConfig(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateIpRouteConfig wraps methods.UpdateIpRouteConfig +func (o HostNetworkSystem) UpdateIpRouteConfig(ctx context.Context, config types.BaseHostIpRouteConfig) error { + req := types.UpdateIpRouteConfig{ + This: o.Reference(), + Config: config, + } + + _, err := methods.UpdateIpRouteConfig(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateIpRouteTableConfig wraps methods.UpdateIpRouteTableConfig +func (o HostNetworkSystem) UpdateIpRouteTableConfig(ctx context.Context, config types.HostIpRouteTableConfig) error { + req := types.UpdateIpRouteTableConfig{ + This: o.Reference(), + Config: config, + } + + _, err := methods.UpdateIpRouteTableConfig(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateNetworkConfig wraps methods.UpdateNetworkConfig +func (o HostNetworkSystem) UpdateNetworkConfig(ctx context.Context, config types.HostNetworkConfig, changeMode string) (*types.HostNetworkConfigResult, error) { + req := types.UpdateNetworkConfig{ + This: o.Reference(), + Config: config, + ChangeMode: changeMode, + } + + res, err := methods.UpdateNetworkConfig(ctx, o.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +// UpdatePhysicalNicLinkSpeed wraps methods.UpdatePhysicalNicLinkSpeed +func (o HostNetworkSystem) UpdatePhysicalNicLinkSpeed(ctx context.Context, device string, linkSpeed *types.PhysicalNicLinkInfo) error { + req := types.UpdatePhysicalNicLinkSpeed{ + This: o.Reference(), + Device: device, + LinkSpeed: linkSpeed, + } + + _, err := methods.UpdatePhysicalNicLinkSpeed(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdatePortGroup wraps methods.UpdatePortGroup +func (o HostNetworkSystem) UpdatePortGroup(ctx context.Context, pgName string, portgrp types.HostPortGroupSpec) error { + req := types.UpdatePortGroup{ + This: o.Reference(), + PgName: pgName, + Portgrp: portgrp, + } + + _, err := methods.UpdatePortGroup(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateServiceConsoleVirtualNic wraps methods.UpdateServiceConsoleVirtualNic +func (o HostNetworkSystem) UpdateServiceConsoleVirtualNic(ctx context.Context, device string, nic types.HostVirtualNicSpec) error { + req := types.UpdateServiceConsoleVirtualNic{ + This: o.Reference(), + Device: device, + Nic: nic, + } + + _, err := methods.UpdateServiceConsoleVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateVirtualNic wraps methods.UpdateVirtualNic +func (o HostNetworkSystem) UpdateVirtualNic(ctx context.Context, device string, nic types.HostVirtualNicSpec) error { + req := types.UpdateVirtualNic{ + This: o.Reference(), + Device: device, + Nic: nic, + } + + _, err := methods.UpdateVirtualNic(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} + +// UpdateVirtualSwitch wraps methods.UpdateVirtualSwitch +func (o HostNetworkSystem) UpdateVirtualSwitch(ctx context.Context, vswitchName string, spec types.HostVirtualSwitchSpec) error { + req := types.UpdateVirtualSwitch{ + This: o.Reference(), + VswitchName: vswitchName, + Spec: spec, + } + + _, err := methods.UpdateVirtualSwitch(ctx, o.c, &req) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_service_system.go b/vendor/github.com/vmware/govmomi/object/host_service_system.go new file mode 100644 index 000000000000..a66b32c17c1d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_service_system.go @@ -0,0 +1,88 @@ +/* +Copyright (c) 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type HostServiceSystem struct { + Common +} + +func NewHostServiceSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostServiceSystem { + return &HostServiceSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostServiceSystem) Service(ctx context.Context) ([]types.HostService, error) { + var ss mo.HostServiceSystem + + err := s.Properties(ctx, s.Reference(), []string{"serviceInfo.service"}, &ss) + if err != nil { + return nil, err + } + + return ss.ServiceInfo.Service, nil +} + +func (s HostServiceSystem) Start(ctx context.Context, id string) error { + req := types.StartService{ + This: s.Reference(), + Id: id, + } + + _, err := methods.StartService(ctx, s.Client(), &req) + return err +} + +func (s HostServiceSystem) Stop(ctx context.Context, id string) error { + req := types.StopService{ + This: s.Reference(), + Id: id, + } + + _, err := methods.StopService(ctx, s.Client(), &req) + return err +} + +func (s HostServiceSystem) Restart(ctx context.Context, id string) error { + req := types.RestartService{ + This: s.Reference(), + Id: id, + } + + _, err := methods.RestartService(ctx, s.Client(), &req) + return err +} + +func (s HostServiceSystem) UpdatePolicy(ctx context.Context, id string, policy string) error { + req := types.UpdateServicePolicy{ + This: s.Reference(), + Id: id, + Policy: policy, + } + + _, err := methods.UpdateServicePolicy(ctx, s.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/host_storage_system.go b/vendor/github.com/vmware/govmomi/object/host_storage_system.go new file mode 100644 index 000000000000..5990a1d4e6bb --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_storage_system.go @@ -0,0 +1,174 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "errors" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type HostStorageSystem struct { + Common +} + +func NewHostStorageSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostStorageSystem { + return &HostStorageSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostStorageSystem) RetrieveDiskPartitionInfo(ctx context.Context, devicePath string) (*types.HostDiskPartitionInfo, error) { + req := types.RetrieveDiskPartitionInfo{ + This: s.Reference(), + DevicePath: []string{devicePath}, + } + + res, err := methods.RetrieveDiskPartitionInfo(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil || len(res.Returnval) == 0 { + return nil, errors.New("no partition info") + } + + return &res.Returnval[0], nil +} + +func (s HostStorageSystem) ComputeDiskPartitionInfo(ctx context.Context, devicePath string, layout types.HostDiskPartitionLayout) (*types.HostDiskPartitionInfo, error) { + req := types.ComputeDiskPartitionInfo{ + This: s.Reference(), + DevicePath: devicePath, + Layout: layout, + } + + res, err := methods.ComputeDiskPartitionInfo(ctx, s.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (s HostStorageSystem) UpdateDiskPartitionInfo(ctx context.Context, devicePath string, spec types.HostDiskPartitionSpec) error { + req := types.UpdateDiskPartitions{ + This: s.Reference(), + DevicePath: devicePath, + Spec: spec, + } + + _, err := methods.UpdateDiskPartitions(ctx, s.c, &req) + return err +} + +func (s HostStorageSystem) RescanAllHba(ctx context.Context) error { + req := types.RescanAllHba{ + This: s.Reference(), + } + + _, err := methods.RescanAllHba(ctx, s.c, &req) + return err +} + +func (s HostStorageSystem) Refresh(ctx context.Context) error { + req := types.RefreshStorageSystem{ + This: s.Reference(), + } + + _, err := methods.RefreshStorageSystem(ctx, s.c, &req) + return err +} + +func (s HostStorageSystem) RescanVmfs(ctx context.Context) error { + req := types.RescanVmfs{ + This: s.Reference(), + } + + _, err := methods.RescanVmfs(ctx, s.c, &req) + return err +} + +func (s HostStorageSystem) MarkAsSsd(ctx context.Context, uuid string) (*Task, error) { + req := types.MarkAsSsd_Task{ + This: s.Reference(), + ScsiDiskUuid: uuid, + } + + res, err := methods.MarkAsSsd_Task(ctx, s.c, &req) + if err != nil { + return nil, err + } + + return NewTask(s.c, res.Returnval), nil +} + +func (s HostStorageSystem) MarkAsNonSsd(ctx context.Context, uuid string) (*Task, error) { + req := types.MarkAsNonSsd_Task{ + This: s.Reference(), + ScsiDiskUuid: uuid, + } + + res, err := methods.MarkAsNonSsd_Task(ctx, s.c, &req) + if err != nil { + return nil, err + } + + return NewTask(s.c, res.Returnval), nil +} + +func (s HostStorageSystem) MarkAsLocal(ctx context.Context, uuid string) (*Task, error) { + req := types.MarkAsLocal_Task{ + This: s.Reference(), + ScsiDiskUuid: uuid, + } + + res, err := methods.MarkAsLocal_Task(ctx, s.c, &req) + if err != nil { + return nil, err + } + + return NewTask(s.c, res.Returnval), nil +} + +func (s HostStorageSystem) MarkAsNonLocal(ctx context.Context, uuid string) (*Task, error) { + req := types.MarkAsNonLocal_Task{ + This: s.Reference(), + ScsiDiskUuid: uuid, + } + + res, err := methods.MarkAsNonLocal_Task(ctx, s.c, &req) + if err != nil { + return nil, err + } + + return NewTask(s.c, res.Returnval), nil +} + +func (s HostStorageSystem) AttachScsiLun(ctx context.Context, uuid string) error { + req := types.AttachScsiLun{ + This: s.Reference(), + LunUuid: uuid, + } + + _, err := methods.AttachScsiLun(ctx, s.c, &req) + + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/host_system.go b/vendor/github.com/vmware/govmomi/object/host_system.go new file mode 100644 index 000000000000..a350edfdebd6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_system.go @@ -0,0 +1,153 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "fmt" + "net" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type HostSystem struct { + Common +} + +func NewHostSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostSystem { + return &HostSystem{ + Common: NewCommon(c, ref), + } +} + +func (h HostSystem) ConfigManager() *HostConfigManager { + return NewHostConfigManager(h.c, h.Reference()) +} + +func (h HostSystem) ResourcePool(ctx context.Context) (*ResourcePool, error) { + var mh mo.HostSystem + + err := h.Properties(ctx, h.Reference(), []string{"parent"}, &mh) + if err != nil { + return nil, err + } + + var mcr *mo.ComputeResource + var parent interface{} + + switch mh.Parent.Type { + case "ComputeResource": + mcr = new(mo.ComputeResource) + parent = mcr + case "ClusterComputeResource": + mcc := new(mo.ClusterComputeResource) + mcr = &mcc.ComputeResource + parent = mcc + default: + return nil, fmt.Errorf("unknown host parent type: %s", mh.Parent.Type) + } + + err = h.Properties(ctx, *mh.Parent, []string{"resourcePool"}, parent) + if err != nil { + return nil, err + } + + pool := NewResourcePool(h.c, *mcr.ResourcePool) + return pool, nil +} + +func (h HostSystem) ManagementIPs(ctx context.Context) ([]net.IP, error) { + var mh mo.HostSystem + + err := h.Properties(ctx, h.Reference(), []string{"config.virtualNicManagerInfo.netConfig"}, &mh) + if err != nil { + return nil, err + } + + var ips []net.IP + for _, nc := range mh.Config.VirtualNicManagerInfo.NetConfig { + if nc.NicType == "management" && len(nc.CandidateVnic) > 0 { + ip := net.ParseIP(nc.CandidateVnic[0].Spec.Ip.IpAddress) + if ip != nil { + ips = append(ips, ip) + } + } + } + + return ips, nil +} + +func (h HostSystem) Disconnect(ctx context.Context) (*Task, error) { + req := types.DisconnectHost_Task{ + This: h.Reference(), + } + + res, err := methods.DisconnectHost_Task(ctx, h.c, &req) + if err != nil { + return nil, err + } + + return NewTask(h.c, res.Returnval), nil +} + +func (h HostSystem) Reconnect(ctx context.Context, cnxSpec *types.HostConnectSpec, reconnectSpec *types.HostSystemReconnectSpec) (*Task, error) { + req := types.ReconnectHost_Task{ + This: h.Reference(), + CnxSpec: cnxSpec, + ReconnectSpec: reconnectSpec, + } + + res, err := methods.ReconnectHost_Task(ctx, h.c, &req) + if err != nil { + return nil, err + } + + return NewTask(h.c, res.Returnval), nil +} + +func (h HostSystem) EnterMaintenanceMode(ctx context.Context, timeout int32, evacuate bool, spec *types.HostMaintenanceSpec) (*Task, error) { + req := types.EnterMaintenanceMode_Task{ + This: h.Reference(), + Timeout: timeout, + EvacuatePoweredOffVms: types.NewBool(evacuate), + MaintenanceSpec: spec, + } + + res, err := methods.EnterMaintenanceMode_Task(ctx, h.c, &req) + if err != nil { + return nil, err + } + + return NewTask(h.c, res.Returnval), nil +} + +func (h HostSystem) ExitMaintenanceMode(ctx context.Context, timeout int32) (*Task, error) { + req := types.ExitMaintenanceMode_Task{ + This: h.Reference(), + Timeout: timeout, + } + + res, err := methods.ExitMaintenanceMode_Task(ctx, h.c, &req) + if err != nil { + return nil, err + } + + return NewTask(h.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_virtual_nic_manager.go b/vendor/github.com/vmware/govmomi/object/host_virtual_nic_manager.go new file mode 100644 index 000000000000..01e7e9cd4b0c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_virtual_nic_manager.go @@ -0,0 +1,93 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type HostVirtualNicManager struct { + Common + Host *HostSystem +} + +func NewHostVirtualNicManager(c *vim25.Client, ref types.ManagedObjectReference, host types.ManagedObjectReference) *HostVirtualNicManager { + return &HostVirtualNicManager{ + Common: NewCommon(c, ref), + Host: NewHostSystem(c, host), + } +} + +func (m HostVirtualNicManager) Info(ctx context.Context) (*types.HostVirtualNicManagerInfo, error) { + var vnm mo.HostVirtualNicManager + + err := m.Properties(ctx, m.Reference(), []string{"info"}, &vnm) + if err != nil { + return nil, err + } + + return &vnm.Info, nil +} + +func (m HostVirtualNicManager) DeselectVnic(ctx context.Context, nicType string, device string) error { + if nicType == string(types.HostVirtualNicManagerNicTypeVsan) { + // Avoid fault.NotSupported: + // "Error deselecting device '$device': VSAN interfaces must be deselected using vim.host.VsanSystem" + s, err := m.Host.ConfigManager().VsanSystem(ctx) + if err != nil { + return err + } + + return s.updateVnic(ctx, device, false) + } + + req := types.DeselectVnicForNicType{ + This: m.Reference(), + NicType: nicType, + Device: device, + } + + _, err := methods.DeselectVnicForNicType(ctx, m.Client(), &req) + return err +} + +func (m HostVirtualNicManager) SelectVnic(ctx context.Context, nicType string, device string) error { + if nicType == string(types.HostVirtualNicManagerNicTypeVsan) { + // Avoid fault.NotSupported: + // "Error selecting device '$device': VSAN interfaces must be selected using vim.host.VsanSystem" + s, err := m.Host.ConfigManager().VsanSystem(ctx) + if err != nil { + return err + } + + return s.updateVnic(ctx, device, true) + } + + req := types.SelectVnicForNicType{ + This: m.Reference(), + NicType: nicType, + Device: device, + } + + _, err := methods.SelectVnicForNicType(ctx, m.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/host_vsan_internal_system.go b/vendor/github.com/vmware/govmomi/object/host_vsan_internal_system.go new file mode 100644 index 000000000000..1430e8a88221 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_vsan_internal_system.go @@ -0,0 +1,117 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "encoding/json" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type HostVsanInternalSystem struct { + Common +} + +func NewHostVsanInternalSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostVsanInternalSystem { + m := HostVsanInternalSystem{ + Common: NewCommon(c, ref), + } + + return &m +} + +// QueryVsanObjectUuidsByFilter returns vSAN DOM object uuids by filter. +func (m HostVsanInternalSystem) QueryVsanObjectUuidsByFilter(ctx context.Context, uuids []string, limit int32, version int32) ([]string, error) { + req := types.QueryVsanObjectUuidsByFilter{ + This: m.Reference(), + Uuids: uuids, + Limit: &limit, + Version: version, + } + + res, err := methods.QueryVsanObjectUuidsByFilter(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +type VsanObjExtAttrs struct { + Type string `json:"Object type"` + Class string `json:"Object class"` + Size string `json:"Object size"` + Path string `json:"Object path"` + Name string `json:"User friendly name"` +} + +func (a *VsanObjExtAttrs) DatastorePath(dir string) string { + l := len(dir) + path := a.Path + + if len(path) >= l { + path = a.Path[l:] + } + + if path != "" { + return path + } + + return a.Name // vmnamespace +} + +// GetVsanObjExtAttrs is internal and intended for troubleshooting/debugging situations in the field. +// WARNING: This API can be slow because we do IOs (reads) to all the objects. +func (m HostVsanInternalSystem) GetVsanObjExtAttrs(ctx context.Context, uuids []string) (map[string]VsanObjExtAttrs, error) { + req := types.GetVsanObjExtAttrs{ + This: m.Reference(), + Uuids: uuids, + } + + res, err := methods.GetVsanObjExtAttrs(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + var attrs map[string]VsanObjExtAttrs + + err = json.Unmarshal([]byte(res.Returnval), &attrs) + + return attrs, err +} + +// DeleteVsanObjects is internal and intended for troubleshooting/debugging only. +// WARNING: This API can be slow because we do IOs to all the objects. +// DOM won't allow access to objects which have lost quorum. Such objects can be deleted with the optional "force" flag. +// These objects may however re-appear with quorum if the absent components come back (network partition gets resolved, etc.) +func (m HostVsanInternalSystem) DeleteVsanObjects(ctx context.Context, uuids []string, force *bool) ([]types.HostVsanInternalSystemDeleteVsanObjectsResult, error) { + req := types.DeleteVsanObjects{ + This: m.Reference(), + Uuids: uuids, + Force: force, + } + + res, err := methods.DeleteVsanObjects(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_vsan_system.go b/vendor/github.com/vmware/govmomi/object/host_vsan_system.go new file mode 100644 index 000000000000..5ab234d5e53a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/host_vsan_system.go @@ -0,0 +1,88 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type HostVsanSystem struct { + Common +} + +func NewHostVsanSystem(c *vim25.Client, ref types.ManagedObjectReference) *HostVsanSystem { + return &HostVsanSystem{ + Common: NewCommon(c, ref), + } +} + +func (s HostVsanSystem) Update(ctx context.Context, config types.VsanHostConfigInfo) (*Task, error) { + req := types.UpdateVsan_Task{ + This: s.Reference(), + Config: config, + } + + res, err := methods.UpdateVsan_Task(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(s.Client(), res.Returnval), nil +} + +// updateVnic in support of the HostVirtualNicManager.{SelectVnic,DeselectVnic} methods +func (s HostVsanSystem) updateVnic(ctx context.Context, device string, enable bool) error { + var vsan mo.HostVsanSystem + + err := s.Properties(ctx, s.Reference(), []string{"config.networkInfo.port"}, &vsan) + if err != nil { + return err + } + + info := vsan.Config + + var port []types.VsanHostConfigInfoNetworkInfoPortConfig + + for _, p := range info.NetworkInfo.Port { + if p.Device == device { + continue + } + + port = append(port, p) + } + + if enable { + port = append(port, types.VsanHostConfigInfoNetworkInfoPortConfig{ + Device: device, + }) + } + + info.NetworkInfo.Port = port + + task, err := s.Update(ctx, info) + if err != nil { + return err + } + + _, err = task.WaitForResult(ctx, nil) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/namespace_manager.go b/vendor/github.com/vmware/govmomi/object/namespace_manager.go new file mode 100644 index 000000000000..f463b368cdea --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/namespace_manager.go @@ -0,0 +1,76 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type DatastoreNamespaceManager struct { + Common +} + +func NewDatastoreNamespaceManager(c *vim25.Client) *DatastoreNamespaceManager { + n := DatastoreNamespaceManager{ + Common: NewCommon(c, *c.ServiceContent.DatastoreNamespaceManager), + } + + return &n +} + +// CreateDirectory creates a top-level directory on the given vsan datastore, using +// the given user display name hint and opaque storage policy. +func (nm DatastoreNamespaceManager) CreateDirectory(ctx context.Context, ds *Datastore, displayName string, policy string) (string, error) { + + req := &types.CreateDirectory{ + This: nm.Reference(), + Datastore: ds.Reference(), + DisplayName: displayName, + Policy: policy, + } + + resp, err := methods.CreateDirectory(ctx, nm.c, req) + if err != nil { + return "", err + } + + return resp.Returnval, nil +} + +// DeleteDirectory deletes the given top-level directory from a vsan datastore. +func (nm DatastoreNamespaceManager) DeleteDirectory(ctx context.Context, dc *Datacenter, datastorePath string) error { + + req := &types.DeleteDirectory{ + This: nm.Reference(), + DatastorePath: datastorePath, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + if _, err := methods.DeleteDirectory(ctx, nm.c, req); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/object/network.go b/vendor/github.com/vmware/govmomi/object/network.go new file mode 100644 index 000000000000..d1dc7ce01f61 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/network.go @@ -0,0 +1,56 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type Network struct { + Common +} + +func NewNetwork(c *vim25.Client, ref types.ManagedObjectReference) *Network { + return &Network{ + Common: NewCommon(c, ref), + } +} + +// EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network +func (n Network) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { + var e mo.Network + + // Use Network.Name rather than Common.Name as the latter does not return the complete name if it contains a '/' + // We can't use Common.ObjectName here either as we need the ManagedEntity.Name field is not set since mo.Network + // has its own Name field. + err := n.Properties(ctx, n.Reference(), []string{"name"}, &e) + if err != nil { + return nil, err + } + + backing := &types.VirtualEthernetCardNetworkBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + DeviceName: e.Name, + }, + } + + return backing, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/network_reference.go b/vendor/github.com/vmware/govmomi/object/network_reference.go new file mode 100644 index 000000000000..7716bdb38fae --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/network_reference.go @@ -0,0 +1,31 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25/types" +) + +// The NetworkReference interface is implemented by managed objects +// which can be used as the backing for a VirtualEthernetCard. +type NetworkReference interface { + Reference + + EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) +} diff --git a/vendor/github.com/vmware/govmomi/object/opaque_network.go b/vendor/github.com/vmware/govmomi/object/opaque_network.go new file mode 100644 index 000000000000..47ce4cefe622 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/opaque_network.go @@ -0,0 +1,57 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "fmt" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type OpaqueNetwork struct { + Common +} + +func NewOpaqueNetwork(c *vim25.Client, ref types.ManagedObjectReference) *OpaqueNetwork { + return &OpaqueNetwork{ + Common: NewCommon(c, ref), + } +} + +// EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network +func (n OpaqueNetwork) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { + var net mo.OpaqueNetwork + + if err := n.Properties(ctx, n.Reference(), []string{"summary"}, &net); err != nil { + return nil, err + } + + summary, ok := net.Summary.(*types.OpaqueNetworkSummary) + if !ok { + return nil, fmt.Errorf("%s unsupported network type: %T", n, net.Summary) + } + + backing := &types.VirtualEthernetCardOpaqueNetworkBackingInfo{ + OpaqueNetworkId: summary.OpaqueNetworkId, + OpaqueNetworkType: summary.OpaqueNetworkType, + } + + return backing, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/option_manager.go b/vendor/github.com/vmware/govmomi/object/option_manager.go new file mode 100644 index 000000000000..7f93273aac37 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/option_manager.go @@ -0,0 +1,59 @@ +/* +Copyright (c) 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type OptionManager struct { + Common +} + +func NewOptionManager(c *vim25.Client, ref types.ManagedObjectReference) *OptionManager { + return &OptionManager{ + Common: NewCommon(c, ref), + } +} + +func (m OptionManager) Query(ctx context.Context, name string) ([]types.BaseOptionValue, error) { + req := types.QueryOptions{ + This: m.Reference(), + Name: name, + } + + res, err := methods.QueryOptions(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m OptionManager) Update(ctx context.Context, value []types.BaseOptionValue) error { + req := types.UpdateOptions{ + This: m.Reference(), + ChangedValue: value, + } + + _, err := methods.UpdateOptions(ctx, m.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/resource_pool.go b/vendor/github.com/vmware/govmomi/object/resource_pool.go new file mode 100644 index 000000000000..55c2e2b2fdea --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/resource_pool.go @@ -0,0 +1,138 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/nfc" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type ResourcePool struct { + Common +} + +func NewResourcePool(c *vim25.Client, ref types.ManagedObjectReference) *ResourcePool { + return &ResourcePool{ + Common: NewCommon(c, ref), + } +} + +func (p ResourcePool) ImportVApp(ctx context.Context, spec types.BaseImportSpec, folder *Folder, host *HostSystem) (*nfc.Lease, error) { + req := types.ImportVApp{ + This: p.Reference(), + Spec: spec, + } + + if folder != nil { + ref := folder.Reference() + req.Folder = &ref + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.ImportVApp(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return nfc.NewLease(p.c, res.Returnval), nil +} + +func (p ResourcePool) Create(ctx context.Context, name string, spec types.ResourceConfigSpec) (*ResourcePool, error) { + req := types.CreateResourcePool{ + This: p.Reference(), + Name: name, + Spec: spec, + } + + res, err := methods.CreateResourcePool(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewResourcePool(p.c, res.Returnval), nil +} + +func (p ResourcePool) CreateVApp(ctx context.Context, name string, resSpec types.ResourceConfigSpec, configSpec types.VAppConfigSpec, folder *Folder) (*VirtualApp, error) { + req := types.CreateVApp{ + This: p.Reference(), + Name: name, + ResSpec: resSpec, + ConfigSpec: configSpec, + } + + if folder != nil { + ref := folder.Reference() + req.VmFolder = &ref + } + + res, err := methods.CreateVApp(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewVirtualApp(p.c, res.Returnval), nil +} + +func (p ResourcePool) UpdateConfig(ctx context.Context, name string, config *types.ResourceConfigSpec) error { + req := types.UpdateConfig{ + This: p.Reference(), + Name: name, + Config: config, + } + + if config != nil && config.Entity == nil { + ref := p.Reference() + + // Create copy of config so changes won't leak back to the caller + newConfig := *config + newConfig.Entity = &ref + req.Config = &newConfig + } + + _, err := methods.UpdateConfig(ctx, p.c, &req) + return err +} + +func (p ResourcePool) DestroyChildren(ctx context.Context) error { + req := types.DestroyChildren{ + This: p.Reference(), + } + + _, err := methods.DestroyChildren(ctx, p.c, &req) + return err +} + +func (p ResourcePool) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: p.Reference(), + } + + res, err := methods.Destroy_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/search_index.go b/vendor/github.com/vmware/govmomi/object/search_index.go new file mode 100644 index 000000000000..4b0a525d5288 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/search_index.go @@ -0,0 +1,163 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type SearchIndex struct { + Common +} + +func NewSearchIndex(c *vim25.Client) *SearchIndex { + s := SearchIndex{ + Common: NewCommon(c, *c.ServiceContent.SearchIndex), + } + + return &s +} + +// FindByDatastorePath finds a virtual machine by its location on a datastore. +func (s SearchIndex) FindByDatastorePath(ctx context.Context, dc *Datacenter, path string) (Reference, error) { + req := types.FindByDatastorePath{ + This: s.Reference(), + Datacenter: dc.Reference(), + Path: path, + } + + res, err := methods.FindByDatastorePath(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindByDnsName finds a virtual machine or host by DNS name. +func (s SearchIndex) FindByDnsName(ctx context.Context, dc *Datacenter, dnsName string, vmSearch bool) (Reference, error) { + req := types.FindByDnsName{ + This: s.Reference(), + DnsName: dnsName, + VmSearch: vmSearch, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindByDnsName(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindByInventoryPath finds a managed entity based on its location in the inventory. +func (s SearchIndex) FindByInventoryPath(ctx context.Context, path string) (Reference, error) { + req := types.FindByInventoryPath{ + This: s.Reference(), + InventoryPath: path, + } + + res, err := methods.FindByInventoryPath(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindByIp finds a virtual machine or host by IP address. +func (s SearchIndex) FindByIp(ctx context.Context, dc *Datacenter, ip string, vmSearch bool) (Reference, error) { + req := types.FindByIp{ + This: s.Reference(), + Ip: ip, + VmSearch: vmSearch, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindByIp(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindByUuid finds a virtual machine or host by UUID. +func (s SearchIndex) FindByUuid(ctx context.Context, dc *Datacenter, uuid string, vmSearch bool, instanceUuid *bool) (Reference, error) { + req := types.FindByUuid{ + This: s.Reference(), + Uuid: uuid, + VmSearch: vmSearch, + InstanceUuid: instanceUuid, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindByUuid(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} + +// FindChild finds a particular child based on a managed entity name. +func (s SearchIndex) FindChild(ctx context.Context, entity Reference, name string) (Reference, error) { + req := types.FindChild{ + This: s.Reference(), + Entity: entity.Reference(), + Name: name, + } + + res, err := methods.FindChild(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if res.Returnval == nil { + return nil, nil + } + return NewReference(s.c, *res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/storage_pod.go b/vendor/github.com/vmware/govmomi/object/storage_pod.go new file mode 100644 index 000000000000..188b91a002e9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/storage_pod.go @@ -0,0 +1,34 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/types" +) + +type StoragePod struct { + *Folder +} + +func NewStoragePod(c *vim25.Client, ref types.ManagedObjectReference) *StoragePod { + return &StoragePod{ + Folder: &Folder{ + Common: NewCommon(c, ref), + }, + } +} diff --git a/vendor/github.com/vmware/govmomi/object/storage_resource_manager.go b/vendor/github.com/vmware/govmomi/object/storage_resource_manager.go new file mode 100644 index 000000000000..579bcd4d7ee0 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/storage_resource_manager.go @@ -0,0 +1,179 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type StorageResourceManager struct { + Common +} + +func NewStorageResourceManager(c *vim25.Client) *StorageResourceManager { + sr := StorageResourceManager{ + Common: NewCommon(c, *c.ServiceContent.StorageResourceManager), + } + + return &sr +} + +func (sr StorageResourceManager) ApplyStorageDrsRecommendation(ctx context.Context, key []string) (*Task, error) { + req := types.ApplyStorageDrsRecommendation_Task{ + This: sr.Reference(), + Key: key, + } + + res, err := methods.ApplyStorageDrsRecommendation_Task(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return NewTask(sr.c, res.Returnval), nil +} + +func (sr StorageResourceManager) ApplyStorageDrsRecommendationToPod(ctx context.Context, pod *StoragePod, key string) (*Task, error) { + req := types.ApplyStorageDrsRecommendationToPod_Task{ + This: sr.Reference(), + Key: key, + } + + if pod != nil { + req.Pod = pod.Reference() + } + + res, err := methods.ApplyStorageDrsRecommendationToPod_Task(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return NewTask(sr.c, res.Returnval), nil +} + +func (sr StorageResourceManager) CancelStorageDrsRecommendation(ctx context.Context, key []string) error { + req := types.CancelStorageDrsRecommendation{ + This: sr.Reference(), + Key: key, + } + + _, err := methods.CancelStorageDrsRecommendation(ctx, sr.c, &req) + + return err +} + +func (sr StorageResourceManager) ConfigureDatastoreIORM(ctx context.Context, datastore *Datastore, spec types.StorageIORMConfigSpec, key string) (*Task, error) { + req := types.ConfigureDatastoreIORM_Task{ + This: sr.Reference(), + Spec: spec, + } + + if datastore != nil { + req.Datastore = datastore.Reference() + } + + res, err := methods.ConfigureDatastoreIORM_Task(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return NewTask(sr.c, res.Returnval), nil +} + +func (sr StorageResourceManager) ConfigureStorageDrsForPod(ctx context.Context, pod *StoragePod, spec types.StorageDrsConfigSpec, modify bool) (*Task, error) { + req := types.ConfigureStorageDrsForPod_Task{ + This: sr.Reference(), + Spec: spec, + Modify: modify, + } + + if pod != nil { + req.Pod = pod.Reference() + } + + res, err := methods.ConfigureStorageDrsForPod_Task(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return NewTask(sr.c, res.Returnval), nil +} + +func (sr StorageResourceManager) QueryDatastorePerformanceSummary(ctx context.Context, datastore *Datastore) ([]types.StoragePerformanceSummary, error) { + req := types.QueryDatastorePerformanceSummary{ + This: sr.Reference(), + } + + if datastore != nil { + req.Datastore = datastore.Reference() + } + + res, err := methods.QueryDatastorePerformanceSummary(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (sr StorageResourceManager) QueryIORMConfigOption(ctx context.Context, host *HostSystem) (*types.StorageIORMConfigOption, error) { + req := types.QueryIORMConfigOption{ + This: sr.Reference(), + } + + if host != nil { + req.Host = host.Reference() + } + + res, err := methods.QueryIORMConfigOption(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (sr StorageResourceManager) RecommendDatastores(ctx context.Context, storageSpec types.StoragePlacementSpec) (*types.StoragePlacementResult, error) { + req := types.RecommendDatastores{ + This: sr.Reference(), + StorageSpec: storageSpec, + } + + res, err := methods.RecommendDatastores(ctx, sr.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (sr StorageResourceManager) RefreshStorageDrsRecommendation(ctx context.Context, pod *StoragePod) error { + req := types.RefreshStorageDrsRecommendation{ + This: sr.Reference(), + } + + if pod != nil { + req.Pod = pod.Reference() + } + + _, err := methods.RefreshStorageDrsRecommendation(ctx, sr.c, &req) + + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/task.go b/vendor/github.com/vmware/govmomi/object/task.go new file mode 100644 index 000000000000..2b66aa93b7f7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/task.go @@ -0,0 +1,62 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/task" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/types" +) + +// Task is a convenience wrapper around task.Task that keeps a reference to +// the client that was used to create it. This allows users to call the Wait() +// function with only a context parameter, instead of a context parameter, a +// soap.RoundTripper, and reference to the root property collector. +type Task struct { + Common +} + +func NewTask(c *vim25.Client, ref types.ManagedObjectReference) *Task { + t := Task{ + Common: NewCommon(c, ref), + } + + return &t +} + +func (t *Task) Wait(ctx context.Context) error { + _, err := t.WaitForResult(ctx, nil) + return err +} + +func (t *Task) WaitForResult(ctx context.Context, s progress.Sinker) (*types.TaskInfo, error) { + p := property.DefaultCollector(t.c) + return task.Wait(ctx, t.Reference(), p, s) +} + +func (t *Task) Cancel(ctx context.Context) error { + _, err := methods.CancelTask(ctx, t.Client(), &types.CancelTask{ + This: t.Reference(), + }) + + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/types.go b/vendor/github.com/vmware/govmomi/object/types.go new file mode 100644 index 000000000000..4eb8d1b8bbf7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/types.go @@ -0,0 +1,67 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/types" +) + +type Reference interface { + Reference() types.ManagedObjectReference +} + +func NewReference(c *vim25.Client, e types.ManagedObjectReference) Reference { + switch e.Type { + case "Folder": + return NewFolder(c, e) + case "StoragePod": + return &StoragePod{ + NewFolder(c, e), + } + case "Datacenter": + return NewDatacenter(c, e) + case "VirtualMachine": + return NewVirtualMachine(c, e) + case "VirtualApp": + return &VirtualApp{ + NewResourcePool(c, e), + } + case "ComputeResource": + return NewComputeResource(c, e) + case "ClusterComputeResource": + return NewClusterComputeResource(c, e) + case "HostSystem": + return NewHostSystem(c, e) + case "Network": + return NewNetwork(c, e) + case "OpaqueNetwork": + return NewOpaqueNetwork(c, e) + case "ResourcePool": + return NewResourcePool(c, e) + case "DistributedVirtualSwitch": + return NewDistributedVirtualSwitch(c, e) + case "VmwareDistributedVirtualSwitch": + return &VmwareDistributedVirtualSwitch{*NewDistributedVirtualSwitch(c, e)} + case "DistributedVirtualPortgroup": + return NewDistributedVirtualPortgroup(c, e) + case "Datastore": + return NewDatastore(c, e) + default: + panic("Unknown managed entity: " + e.Type) + } +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_app.go b/vendor/github.com/vmware/govmomi/object/virtual_app.go new file mode 100644 index 000000000000..4811178f167c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_app.go @@ -0,0 +1,105 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type VirtualApp struct { + *ResourcePool +} + +func NewVirtualApp(c *vim25.Client, ref types.ManagedObjectReference) *VirtualApp { + return &VirtualApp{ + ResourcePool: NewResourcePool(c, ref), + } +} + +func (p VirtualApp) CreateChildVM(ctx context.Context, config types.VirtualMachineConfigSpec, host *HostSystem) (*Task, error) { + req := types.CreateChildVM_Task{ + This: p.Reference(), + Config: config, + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.CreateChildVM_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} + +func (p VirtualApp) UpdateConfig(ctx context.Context, spec types.VAppConfigSpec) error { + req := types.UpdateVAppConfig{ + This: p.Reference(), + Spec: spec, + } + + _, err := methods.UpdateVAppConfig(ctx, p.c, &req) + return err +} + +func (p VirtualApp) PowerOn(ctx context.Context) (*Task, error) { + req := types.PowerOnVApp_Task{ + This: p.Reference(), + } + + res, err := methods.PowerOnVApp_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} + +func (p VirtualApp) PowerOff(ctx context.Context, force bool) (*Task, error) { + req := types.PowerOffVApp_Task{ + This: p.Reference(), + Force: force, + } + + res, err := methods.PowerOffVApp_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil + +} + +func (p VirtualApp) Suspend(ctx context.Context) (*Task, error) { + req := types.SuspendVApp_Task{ + This: p.Reference(), + } + + res, err := methods.SuspendVApp_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go new file mode 100644 index 000000000000..e1c35eff88e3 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go @@ -0,0 +1,935 @@ +/* +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + "fmt" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + + "github.com/vmware/govmomi/vim25/types" +) + +// Type values for use in BootOrder +const ( + DeviceTypeNone = "-" + DeviceTypeCdrom = "cdrom" + DeviceTypeDisk = "disk" + DeviceTypeEthernet = "ethernet" + DeviceTypeFloppy = "floppy" +) + +// VirtualDeviceList provides helper methods for working with a list of virtual devices. +type VirtualDeviceList []types.BaseVirtualDevice + +// SCSIControllerTypes are used for adding a new SCSI controller to a VM. +func SCSIControllerTypes() VirtualDeviceList { + // Return a mutable list of SCSI controller types, initialized with defaults. + return VirtualDeviceList([]types.BaseVirtualDevice{ + &types.VirtualLsiLogicController{}, + &types.VirtualBusLogicController{}, + &types.ParaVirtualSCSIController{}, + &types.VirtualLsiLogicSASController{}, + }).Select(func(device types.BaseVirtualDevice) bool { + c := device.(types.BaseVirtualSCSIController).GetVirtualSCSIController() + c.SharedBus = types.VirtualSCSISharingNoSharing + c.BusNumber = -1 + return true + }) +} + +// EthernetCardTypes are used for adding a new ethernet card to a VM. +func EthernetCardTypes() VirtualDeviceList { + return VirtualDeviceList([]types.BaseVirtualDevice{ + &types.VirtualE1000{}, + &types.VirtualE1000e{}, + &types.VirtualVmxnet2{}, + &types.VirtualVmxnet3{}, + &types.VirtualPCNet32{}, + &types.VirtualSriovEthernetCard{}, + }).Select(func(device types.BaseVirtualDevice) bool { + c := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() + c.GetVirtualDevice().Key = -1 + return true + }) +} + +// Select returns a new list containing all elements of the list for which the given func returns true. +func (l VirtualDeviceList) Select(f func(device types.BaseVirtualDevice) bool) VirtualDeviceList { + var found VirtualDeviceList + + for _, device := range l { + if f(device) { + found = append(found, device) + } + } + + return found +} + +// SelectByType returns a new list with devices that are equal to or extend the given type. +func (l VirtualDeviceList) SelectByType(deviceType types.BaseVirtualDevice) VirtualDeviceList { + dtype := reflect.TypeOf(deviceType) + if dtype == nil { + return nil + } + dname := dtype.Elem().Name() + + return l.Select(func(device types.BaseVirtualDevice) bool { + t := reflect.TypeOf(device) + + if t == dtype { + return true + } + + _, ok := t.Elem().FieldByName(dname) + + return ok + }) +} + +// SelectByBackingInfo returns a new list with devices matching the given backing info. +// If the value of backing is nil, any device with a backing of the same type will be returned. +func (l VirtualDeviceList) SelectByBackingInfo(backing types.BaseVirtualDeviceBackingInfo) VirtualDeviceList { + t := reflect.TypeOf(backing) + + return l.Select(func(device types.BaseVirtualDevice) bool { + db := device.GetVirtualDevice().Backing + if db == nil { + return false + } + + if reflect.TypeOf(db) != t { + return false + } + + if reflect.ValueOf(backing).IsNil() { + // selecting by backing type + return true + } + + switch a := db.(type) { + case *types.VirtualEthernetCardNetworkBackingInfo: + b := backing.(*types.VirtualEthernetCardNetworkBackingInfo) + return a.DeviceName == b.DeviceName + case *types.VirtualEthernetCardDistributedVirtualPortBackingInfo: + b := backing.(*types.VirtualEthernetCardDistributedVirtualPortBackingInfo) + return a.Port.SwitchUuid == b.Port.SwitchUuid && + a.Port.PortgroupKey == b.Port.PortgroupKey + case *types.VirtualDiskFlatVer2BackingInfo: + b := backing.(*types.VirtualDiskFlatVer2BackingInfo) + if a.Parent != nil && b.Parent != nil { + return a.Parent.FileName == b.Parent.FileName + } + return a.FileName == b.FileName + case *types.VirtualSerialPortURIBackingInfo: + b := backing.(*types.VirtualSerialPortURIBackingInfo) + return a.ServiceURI == b.ServiceURI + case types.BaseVirtualDeviceFileBackingInfo: + b := backing.(types.BaseVirtualDeviceFileBackingInfo) + return a.GetVirtualDeviceFileBackingInfo().FileName == b.GetVirtualDeviceFileBackingInfo().FileName + default: + return false + } + }) +} + +// Find returns the device matching the given name. +func (l VirtualDeviceList) Find(name string) types.BaseVirtualDevice { + for _, device := range l { + if l.Name(device) == name { + return device + } + } + return nil +} + +// FindByKey returns the device matching the given key. +func (l VirtualDeviceList) FindByKey(key int32) types.BaseVirtualDevice { + for _, device := range l { + if device.GetVirtualDevice().Key == key { + return device + } + } + return nil +} + +// FindIDEController will find the named IDE controller if given, otherwise will pick an available controller. +// An error is returned if the named controller is not found or not an IDE controller. Or, if name is not +// given and no available controller can be found. +func (l VirtualDeviceList) FindIDEController(name string) (*types.VirtualIDEController, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(*types.VirtualIDEController); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not an IDE controller", name) + } + + c := l.PickController((*types.VirtualIDEController)(nil)) + if c == nil { + return nil, errors.New("no available IDE controller") + } + + return c.(*types.VirtualIDEController), nil +} + +// CreateIDEController creates a new IDE controller. +func (l VirtualDeviceList) CreateIDEController() (types.BaseVirtualDevice, error) { + ide := &types.VirtualIDEController{} + ide.Key = l.NewKey() + return ide, nil +} + +// FindSCSIController will find the named SCSI controller if given, otherwise will pick an available controller. +// An error is returned if the named controller is not found or not an SCSI controller. Or, if name is not +// given and no available controller can be found. +func (l VirtualDeviceList) FindSCSIController(name string) (*types.VirtualSCSIController, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(types.BaseVirtualSCSIController); ok { + return c.GetVirtualSCSIController(), nil + } + return nil, fmt.Errorf("%s is not an SCSI controller", name) + } + + c := l.PickController((*types.VirtualSCSIController)(nil)) + if c == nil { + return nil, errors.New("no available SCSI controller") + } + + return c.(types.BaseVirtualSCSIController).GetVirtualSCSIController(), nil +} + +// CreateSCSIController creates a new SCSI controller of type name if given, otherwise defaults to lsilogic. +func (l VirtualDeviceList) CreateSCSIController(name string) (types.BaseVirtualDevice, error) { + ctypes := SCSIControllerTypes() + + if name == "scsi" || name == "" { + name = ctypes.Type(ctypes[0]) + } + + found := ctypes.Select(func(device types.BaseVirtualDevice) bool { + return l.Type(device) == name + }) + + if len(found) == 0 { + return nil, fmt.Errorf("unknown SCSI controller type '%s'", name) + } + + c, ok := found[0].(types.BaseVirtualSCSIController) + if !ok { + return nil, fmt.Errorf("invalid SCSI controller type '%s'", name) + } + + scsi := c.GetVirtualSCSIController() + scsi.BusNumber = l.newSCSIBusNumber() + scsi.Key = l.NewKey() + scsi.ScsiCtlrUnitNumber = 7 + return c.(types.BaseVirtualDevice), nil +} + +var scsiBusNumbers = []int{0, 1, 2, 3} + +// newSCSIBusNumber returns the bus number to use for adding a new SCSI bus device. +// -1 is returned if there are no bus numbers available. +func (l VirtualDeviceList) newSCSIBusNumber() int32 { + var used []int + + for _, d := range l.SelectByType((*types.VirtualSCSIController)(nil)) { + num := d.(types.BaseVirtualSCSIController).GetVirtualSCSIController().BusNumber + if num >= 0 { + used = append(used, int(num)) + } // else caller is creating a new vm using SCSIControllerTypes + } + + sort.Ints(used) + + for i, n := range scsiBusNumbers { + if i == len(used) || n != used[i] { + return int32(n) + } + } + + return -1 +} + +// FindNVMEController will find the named NVME controller if given, otherwise will pick an available controller. +// An error is returned if the named controller is not found or not an NVME controller. Or, if name is not +// given and no available controller can be found. +func (l VirtualDeviceList) FindNVMEController(name string) (*types.VirtualNVMEController, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(*types.VirtualNVMEController); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not an NVME controller", name) + } + + c := l.PickController((*types.VirtualNVMEController)(nil)) + if c == nil { + return nil, errors.New("no available NVME controller") + } + + return c.(*types.VirtualNVMEController), nil +} + +// CreateNVMEController creates a new NVMWE controller. +func (l VirtualDeviceList) CreateNVMEController() (types.BaseVirtualDevice, error) { + nvme := &types.VirtualNVMEController{} + nvme.BusNumber = l.newNVMEBusNumber() + nvme.Key = l.NewKey() + + return nvme, nil +} + +var nvmeBusNumbers = []int{0, 1, 2, 3} + +// newNVMEBusNumber returns the bus number to use for adding a new NVME bus device. +// -1 is returned if there are no bus numbers available. +func (l VirtualDeviceList) newNVMEBusNumber() int32 { + var used []int + + for _, d := range l.SelectByType((*types.VirtualNVMEController)(nil)) { + num := d.(types.BaseVirtualController).GetVirtualController().BusNumber + if num >= 0 { + used = append(used, int(num)) + } // else caller is creating a new vm using NVMEControllerTypes + } + + sort.Ints(used) + + for i, n := range nvmeBusNumbers { + if i == len(used) || n != used[i] { + return int32(n) + } + } + + return -1 +} + +// FindDiskController will find an existing ide or scsi disk controller. +func (l VirtualDeviceList) FindDiskController(name string) (types.BaseVirtualController, error) { + switch { + case name == "ide": + return l.FindIDEController("") + case name == "scsi" || name == "": + return l.FindSCSIController("") + case name == "nvme": + return l.FindNVMEController("") + default: + if c, ok := l.Find(name).(types.BaseVirtualController); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not a valid controller", name) + } +} + +// PickController returns a controller of the given type(s). +// If no controllers are found or have no available slots, then nil is returned. +func (l VirtualDeviceList) PickController(kind types.BaseVirtualController) types.BaseVirtualController { + l = l.SelectByType(kind.(types.BaseVirtualDevice)).Select(func(device types.BaseVirtualDevice) bool { + num := len(device.(types.BaseVirtualController).GetVirtualController().Device) + + switch device.(type) { + case types.BaseVirtualSCSIController: + return num < 15 + case *types.VirtualIDEController: + return num < 2 + case *types.VirtualNVMEController: + return num < 8 + default: + return true + } + }) + + if len(l) == 0 { + return nil + } + + return l[0].(types.BaseVirtualController) +} + +// newUnitNumber returns the unit number to use for attaching a new device to the given controller. +func (l VirtualDeviceList) newUnitNumber(c types.BaseVirtualController) int32 { + units := make([]bool, 30) + + switch sc := c.(type) { + case types.BaseVirtualSCSIController: + // The SCSI controller sits on its own bus + units[sc.GetVirtualSCSIController().ScsiCtlrUnitNumber] = true + } + + key := c.GetVirtualController().Key + + for _, device := range l { + d := device.GetVirtualDevice() + + if d.ControllerKey == key && d.UnitNumber != nil { + units[int(*d.UnitNumber)] = true + } + } + + for unit, used := range units { + if !used { + return int32(unit) + } + } + + return -1 +} + +// NewKey returns the key to use for adding a new device to the device list. +// The device list we're working with here may not be complete (e.g. when +// we're only adding new devices), so any positive keys could conflict with device keys +// that are already in use. To avoid this type of conflict, we can use negative keys +// here, which will be resolved to positive keys by vSphere as the reconfiguration is done. +func (l VirtualDeviceList) NewKey() int32 { + var key int32 = -200 + + for _, device := range l { + d := device.GetVirtualDevice() + if d.Key < key { + key = d.Key + } + } + + return key - 1 +} + +// AssignController assigns a device to a controller. +func (l VirtualDeviceList) AssignController(device types.BaseVirtualDevice, c types.BaseVirtualController) { + d := device.GetVirtualDevice() + d.ControllerKey = c.GetVirtualController().Key + d.UnitNumber = new(int32) + *d.UnitNumber = l.newUnitNumber(c) + if d.Key == 0 { + d.Key = -1 + } +} + +// CreateDisk creates a new VirtualDisk device which can be added to a VM. +func (l VirtualDeviceList) CreateDisk(c types.BaseVirtualController, ds types.ManagedObjectReference, name string) *types.VirtualDisk { + // If name is not specified, one will be chosen for you. + // But if when given, make sure it ends in .vmdk, otherwise it will be treated as a directory. + if len(name) > 0 && filepath.Ext(name) != ".vmdk" { + name += ".vmdk" + } + + device := &types.VirtualDisk{ + VirtualDevice: types.VirtualDevice{ + Backing: &types.VirtualDiskFlatVer2BackingInfo{ + DiskMode: string(types.VirtualDiskModePersistent), + ThinProvisioned: types.NewBool(true), + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: name, + Datastore: &ds, + }, + }, + }, + } + + l.AssignController(device, c) + return device +} + +// ChildDisk creates a new VirtualDisk device, linked to the given parent disk, which can be added to a VM. +func (l VirtualDeviceList) ChildDisk(parent *types.VirtualDisk) *types.VirtualDisk { + disk := *parent + backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) + p := new(DatastorePath) + p.FromString(backing.FileName) + p.Path = "" + + // Use specified disk as parent backing to a new disk. + disk.Backing = &types.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: p.String(), + Datastore: backing.Datastore, + }, + Parent: backing, + DiskMode: backing.DiskMode, + ThinProvisioned: backing.ThinProvisioned, + } + + return &disk +} + +func (l VirtualDeviceList) connectivity(device types.BaseVirtualDevice, v bool) error { + c := device.GetVirtualDevice().Connectable + if c == nil { + return fmt.Errorf("%s is not connectable", l.Name(device)) + } + + c.Connected = v + c.StartConnected = v + + return nil +} + +// Connect changes the device to connected, returns an error if the device is not connectable. +func (l VirtualDeviceList) Connect(device types.BaseVirtualDevice) error { + return l.connectivity(device, true) +} + +// Disconnect changes the device to disconnected, returns an error if the device is not connectable. +func (l VirtualDeviceList) Disconnect(device types.BaseVirtualDevice) error { + return l.connectivity(device, false) +} + +// FindCdrom finds a cdrom device with the given name, defaulting to the first cdrom device if any. +func (l VirtualDeviceList) FindCdrom(name string) (*types.VirtualCdrom, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(*types.VirtualCdrom); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not a cdrom device", name) + } + + c := l.SelectByType((*types.VirtualCdrom)(nil)) + if len(c) == 0 { + return nil, errors.New("no cdrom device found") + } + + return c[0].(*types.VirtualCdrom), nil +} + +// CreateCdrom creates a new VirtualCdrom device which can be added to a VM. +func (l VirtualDeviceList) CreateCdrom(c *types.VirtualIDEController) (*types.VirtualCdrom, error) { + device := &types.VirtualCdrom{} + + l.AssignController(device, c) + + l.setDefaultCdromBacking(device) + + device.Connectable = &types.VirtualDeviceConnectInfo{ + AllowGuestControl: true, + Connected: true, + StartConnected: true, + } + + return device, nil +} + +// InsertIso changes the cdrom device backing to use the given iso file. +func (l VirtualDeviceList) InsertIso(device *types.VirtualCdrom, iso string) *types.VirtualCdrom { + device.Backing = &types.VirtualCdromIsoBackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: iso, + }, + } + + return device +} + +// EjectIso removes the iso file based backing and replaces with the default cdrom backing. +func (l VirtualDeviceList) EjectIso(device *types.VirtualCdrom) *types.VirtualCdrom { + l.setDefaultCdromBacking(device) + return device +} + +func (l VirtualDeviceList) setDefaultCdromBacking(device *types.VirtualCdrom) { + device.Backing = &types.VirtualCdromAtapiBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + DeviceName: fmt.Sprintf("%s-%d-%d", DeviceTypeCdrom, device.ControllerKey, device.UnitNumber), + UseAutoDetect: types.NewBool(false), + }, + } +} + +// FindFloppy finds a floppy device with the given name, defaulting to the first floppy device if any. +func (l VirtualDeviceList) FindFloppy(name string) (*types.VirtualFloppy, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(*types.VirtualFloppy); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not a floppy device", name) + } + + c := l.SelectByType((*types.VirtualFloppy)(nil)) + if len(c) == 0 { + return nil, errors.New("no floppy device found") + } + + return c[0].(*types.VirtualFloppy), nil +} + +// CreateFloppy creates a new VirtualFloppy device which can be added to a VM. +func (l VirtualDeviceList) CreateFloppy() (*types.VirtualFloppy, error) { + device := &types.VirtualFloppy{} + + c := l.PickController((*types.VirtualSIOController)(nil)) + if c == nil { + return nil, errors.New("no available SIO controller") + } + + l.AssignController(device, c) + + l.setDefaultFloppyBacking(device) + + device.Connectable = &types.VirtualDeviceConnectInfo{ + AllowGuestControl: true, + Connected: true, + StartConnected: true, + } + + return device, nil +} + +// InsertImg changes the floppy device backing to use the given img file. +func (l VirtualDeviceList) InsertImg(device *types.VirtualFloppy, img string) *types.VirtualFloppy { + device.Backing = &types.VirtualFloppyImageBackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: img, + }, + } + + return device +} + +// EjectImg removes the img file based backing and replaces with the default floppy backing. +func (l VirtualDeviceList) EjectImg(device *types.VirtualFloppy) *types.VirtualFloppy { + l.setDefaultFloppyBacking(device) + return device +} + +func (l VirtualDeviceList) setDefaultFloppyBacking(device *types.VirtualFloppy) { + device.Backing = &types.VirtualFloppyDeviceBackingInfo{ + VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ + DeviceName: fmt.Sprintf("%s-%d", DeviceTypeFloppy, device.UnitNumber), + UseAutoDetect: types.NewBool(false), + }, + } +} + +// FindSerialPort finds a serial port device with the given name, defaulting to the first serial port device if any. +func (l VirtualDeviceList) FindSerialPort(name string) (*types.VirtualSerialPort, error) { + if name != "" { + d := l.Find(name) + if d == nil { + return nil, fmt.Errorf("device '%s' not found", name) + } + if c, ok := d.(*types.VirtualSerialPort); ok { + return c, nil + } + return nil, fmt.Errorf("%s is not a serial port device", name) + } + + c := l.SelectByType((*types.VirtualSerialPort)(nil)) + if len(c) == 0 { + return nil, errors.New("no serial port device found") + } + + return c[0].(*types.VirtualSerialPort), nil +} + +// CreateSerialPort creates a new VirtualSerialPort device which can be added to a VM. +func (l VirtualDeviceList) CreateSerialPort() (*types.VirtualSerialPort, error) { + device := &types.VirtualSerialPort{ + YieldOnPoll: true, + } + + c := l.PickController((*types.VirtualSIOController)(nil)) + if c == nil { + return nil, errors.New("no available SIO controller") + } + + l.AssignController(device, c) + + l.setDefaultSerialPortBacking(device) + + return device, nil +} + +// ConnectSerialPort connects a serial port to a server or client uri. +func (l VirtualDeviceList) ConnectSerialPort(device *types.VirtualSerialPort, uri string, client bool, proxyuri string) *types.VirtualSerialPort { + if strings.HasPrefix(uri, "[") { + device.Backing = &types.VirtualSerialPortFileBackingInfo{ + VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ + FileName: uri, + }, + } + + return device + } + + direction := types.VirtualDeviceURIBackingOptionDirectionServer + if client { + direction = types.VirtualDeviceURIBackingOptionDirectionClient + } + + device.Backing = &types.VirtualSerialPortURIBackingInfo{ + VirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{ + Direction: string(direction), + ServiceURI: uri, + ProxyURI: proxyuri, + }, + } + + return device +} + +// DisconnectSerialPort disconnects the serial port backing. +func (l VirtualDeviceList) DisconnectSerialPort(device *types.VirtualSerialPort) *types.VirtualSerialPort { + l.setDefaultSerialPortBacking(device) + return device +} + +func (l VirtualDeviceList) setDefaultSerialPortBacking(device *types.VirtualSerialPort) { + device.Backing = &types.VirtualSerialPortURIBackingInfo{ + VirtualDeviceURIBackingInfo: types.VirtualDeviceURIBackingInfo{ + Direction: "client", + ServiceURI: "localhost:0", + }, + } +} + +// CreateEthernetCard creates a new VirtualEthernetCard of the given name name and initialized with the given backing. +func (l VirtualDeviceList) CreateEthernetCard(name string, backing types.BaseVirtualDeviceBackingInfo) (types.BaseVirtualDevice, error) { + ctypes := EthernetCardTypes() + + if name == "" { + name = ctypes.deviceName(ctypes[0]) + } + + found := ctypes.Select(func(device types.BaseVirtualDevice) bool { + return l.deviceName(device) == name + }) + + if len(found) == 0 { + return nil, fmt.Errorf("unknown ethernet card type '%s'", name) + } + + c, ok := found[0].(types.BaseVirtualEthernetCard) + if !ok { + return nil, fmt.Errorf("invalid ethernet card type '%s'", name) + } + + c.GetVirtualEthernetCard().Backing = backing + + return c.(types.BaseVirtualDevice), nil +} + +// PrimaryMacAddress returns the MacAddress field of the primary VirtualEthernetCard +func (l VirtualDeviceList) PrimaryMacAddress() string { + eth0 := l.Find("ethernet-0") + + if eth0 == nil { + return "" + } + + return eth0.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard().MacAddress +} + +// convert a BaseVirtualDevice to a BaseVirtualMachineBootOptionsBootableDevice +var bootableDevices = map[string]func(device types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice{ + DeviceTypeNone: func(types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableDevice{} + }, + DeviceTypeCdrom: func(types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableCdromDevice{} + }, + DeviceTypeDisk: func(d types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableDiskDevice{ + DeviceKey: d.GetVirtualDevice().Key, + } + }, + DeviceTypeEthernet: func(d types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableEthernetDevice{ + DeviceKey: d.GetVirtualDevice().Key, + } + }, + DeviceTypeFloppy: func(types.BaseVirtualDevice) types.BaseVirtualMachineBootOptionsBootableDevice { + return &types.VirtualMachineBootOptionsBootableFloppyDevice{} + }, +} + +// BootOrder returns a list of devices which can be used to set boot order via VirtualMachine.SetBootOptions. +// The order can be any of "ethernet", "cdrom", "floppy" or "disk" or by specific device name. +// A value of "-" will clear the existing boot order on the VC/ESX side. +func (l VirtualDeviceList) BootOrder(order []string) []types.BaseVirtualMachineBootOptionsBootableDevice { + var devices []types.BaseVirtualMachineBootOptionsBootableDevice + + for _, name := range order { + if kind, ok := bootableDevices[name]; ok { + if name == DeviceTypeNone { + // Not covered in the API docs, nor obvious, but this clears the boot order on the VC/ESX side. + devices = append(devices, new(types.VirtualMachineBootOptionsBootableDevice)) + continue + } + + for _, device := range l { + if l.Type(device) == name { + devices = append(devices, kind(device)) + } + } + continue + } + + if d := l.Find(name); d != nil { + if kind, ok := bootableDevices[l.Type(d)]; ok { + devices = append(devices, kind(d)) + } + } + } + + return devices +} + +// SelectBootOrder returns an ordered list of devices matching the given bootable device order +func (l VirtualDeviceList) SelectBootOrder(order []types.BaseVirtualMachineBootOptionsBootableDevice) VirtualDeviceList { + var devices VirtualDeviceList + + for _, bd := range order { + for _, device := range l { + if kind, ok := bootableDevices[l.Type(device)]; ok { + if reflect.DeepEqual(kind(device), bd) { + devices = append(devices, device) + } + } + } + } + + return devices +} + +// TypeName returns the vmodl type name of the device +func (l VirtualDeviceList) TypeName(device types.BaseVirtualDevice) string { + dtype := reflect.TypeOf(device) + if dtype == nil { + return "" + } + return dtype.Elem().Name() +} + +var deviceNameRegexp = regexp.MustCompile(`(?:Virtual)?(?:Machine)?(\w+?)(?:Card|EthernetCard|Device|Controller)?$`) + +func (l VirtualDeviceList) deviceName(device types.BaseVirtualDevice) string { + name := "device" + typeName := l.TypeName(device) + + m := deviceNameRegexp.FindStringSubmatch(typeName) + if len(m) == 2 { + name = strings.ToLower(m[1]) + } + + return name +} + +// Type returns a human-readable name for the given device +func (l VirtualDeviceList) Type(device types.BaseVirtualDevice) string { + switch device.(type) { + case types.BaseVirtualEthernetCard: + return DeviceTypeEthernet + case *types.ParaVirtualSCSIController: + return "pvscsi" + case *types.VirtualLsiLogicSASController: + return "lsilogic-sas" + case *types.VirtualNVMEController: + return "nvme" + default: + return l.deviceName(device) + } +} + +// Name returns a stable, human-readable name for the given device +func (l VirtualDeviceList) Name(device types.BaseVirtualDevice) string { + var key string + var UnitNumber int32 + d := device.GetVirtualDevice() + if d.UnitNumber != nil { + UnitNumber = *d.UnitNumber + } + + dtype := l.Type(device) + switch dtype { + case DeviceTypeEthernet: + key = fmt.Sprintf("%d", UnitNumber-7) + case DeviceTypeDisk: + key = fmt.Sprintf("%d-%d", d.ControllerKey, UnitNumber) + default: + key = fmt.Sprintf("%d", d.Key) + } + + return fmt.Sprintf("%s-%s", dtype, key) +} + +// ConfigSpec creates a virtual machine configuration spec for +// the specified operation, for the list of devices in the device list. +func (l VirtualDeviceList) ConfigSpec(op types.VirtualDeviceConfigSpecOperation) ([]types.BaseVirtualDeviceConfigSpec, error) { + var fop types.VirtualDeviceConfigSpecFileOperation + switch op { + case types.VirtualDeviceConfigSpecOperationAdd: + fop = types.VirtualDeviceConfigSpecFileOperationCreate + case types.VirtualDeviceConfigSpecOperationEdit: + fop = types.VirtualDeviceConfigSpecFileOperationReplace + case types.VirtualDeviceConfigSpecOperationRemove: + fop = types.VirtualDeviceConfigSpecFileOperationDestroy + default: + panic("unknown op") + } + + var res []types.BaseVirtualDeviceConfigSpec + for _, device := range l { + config := &types.VirtualDeviceConfigSpec{ + Device: device, + Operation: op, + } + + if disk, ok := device.(*types.VirtualDisk); ok { + config.FileOperation = fop + + // Special case to attach an existing disk + if op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 { + childDisk := false + if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + childDisk = b.Parent != nil + } + + if !childDisk { + // Existing disk, clear file operation + config.FileOperation = "" + } + } + } + + res = append(res, config) + } + + return res, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_disk_manager.go b/vendor/github.com/vmware/govmomi/object/virtual_disk_manager.go new file mode 100644 index 000000000000..72439caf9c0c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_disk_manager.go @@ -0,0 +1,227 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type VirtualDiskManager struct { + Common +} + +func NewVirtualDiskManager(c *vim25.Client) *VirtualDiskManager { + m := VirtualDiskManager{ + Common: NewCommon(c, *c.ServiceContent.VirtualDiskManager), + } + + return &m +} + +// CopyVirtualDisk copies a virtual disk, performing conversions as specified in the spec. +func (m VirtualDiskManager) CopyVirtualDisk( + ctx context.Context, + sourceName string, sourceDatacenter *Datacenter, + destName string, destDatacenter *Datacenter, + destSpec *types.VirtualDiskSpec, force bool) (*Task, error) { + + req := types.CopyVirtualDisk_Task{ + This: m.Reference(), + SourceName: sourceName, + DestName: destName, + DestSpec: destSpec, + Force: types.NewBool(force), + } + + if sourceDatacenter != nil { + ref := sourceDatacenter.Reference() + req.SourceDatacenter = &ref + } + + if destDatacenter != nil { + ref := destDatacenter.Reference() + req.DestDatacenter = &ref + } + + res, err := methods.CopyVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +// CreateVirtualDisk creates a new virtual disk. +func (m VirtualDiskManager) CreateVirtualDisk( + ctx context.Context, + name string, datacenter *Datacenter, + spec types.BaseVirtualDiskSpec) (*Task, error) { + + req := types.CreateVirtualDisk_Task{ + This: m.Reference(), + Name: name, + Spec: spec, + } + + if datacenter != nil { + ref := datacenter.Reference() + req.Datacenter = &ref + } + + res, err := methods.CreateVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +// MoveVirtualDisk moves a virtual disk. +func (m VirtualDiskManager) MoveVirtualDisk( + ctx context.Context, + sourceName string, sourceDatacenter *Datacenter, + destName string, destDatacenter *Datacenter, + force bool) (*Task, error) { + req := types.MoveVirtualDisk_Task{ + This: m.Reference(), + SourceName: sourceName, + DestName: destName, + Force: types.NewBool(force), + } + + if sourceDatacenter != nil { + ref := sourceDatacenter.Reference() + req.SourceDatacenter = &ref + } + + if destDatacenter != nil { + ref := destDatacenter.Reference() + req.DestDatacenter = &ref + } + + res, err := methods.MoveVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +// DeleteVirtualDisk deletes a virtual disk. +func (m VirtualDiskManager) DeleteVirtualDisk(ctx context.Context, name string, dc *Datacenter) (*Task, error) { + req := types.DeleteVirtualDisk_Task{ + This: m.Reference(), + Name: name, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.DeleteVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +// InflateVirtualDisk inflates a virtual disk. +func (m VirtualDiskManager) InflateVirtualDisk(ctx context.Context, name string, dc *Datacenter) (*Task, error) { + req := types.InflateVirtualDisk_Task{ + This: m.Reference(), + Name: name, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.InflateVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +// ShrinkVirtualDisk shrinks a virtual disk. +func (m VirtualDiskManager) ShrinkVirtualDisk(ctx context.Context, name string, dc *Datacenter, copy *bool) (*Task, error) { + req := types.ShrinkVirtualDisk_Task{ + This: m.Reference(), + Name: name, + Copy: copy, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.ShrinkVirtualDisk_Task(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return NewTask(m.c, res.Returnval), nil +} + +// Queries virtual disk uuid +func (m VirtualDiskManager) QueryVirtualDiskUuid(ctx context.Context, name string, dc *Datacenter) (string, error) { + req := types.QueryVirtualDiskUuid{ + This: m.Reference(), + Name: name, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.QueryVirtualDiskUuid(ctx, m.c, &req) + if err != nil { + return "", err + } + + if res == nil { + return "", nil + } + + return res.Returnval, nil +} + +func (m VirtualDiskManager) SetVirtualDiskUuid(ctx context.Context, name string, dc *Datacenter, uuid string) error { + req := types.SetVirtualDiskUuid{ + This: m.Reference(), + Name: name, + Uuid: uuid, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + _, err := methods.SetVirtualDiskUuid(ctx, m.c, &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_disk_manager_internal.go b/vendor/github.com/vmware/govmomi/object/virtual_disk_manager_internal.go new file mode 100644 index 000000000000..faa9ecad5c3c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_disk_manager_internal.go @@ -0,0 +1,166 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "reflect" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +func init() { + types.Add("ArrayOfVirtualDiskInfo", reflect.TypeOf((*arrayOfVirtualDiskInfo)(nil)).Elem()) + + types.Add("VirtualDiskInfo", reflect.TypeOf((*VirtualDiskInfo)(nil)).Elem()) +} + +type arrayOfVirtualDiskInfo struct { + VirtualDiskInfo []VirtualDiskInfo `xml:"VirtualDiskInfo,omitempty"` +} + +type queryVirtualDiskInfoTaskRequest struct { + This types.ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *types.ManagedObjectReference `xml:"datacenter,omitempty"` + IncludeParents bool `xml:"includeParents"` +} + +type queryVirtualDiskInfoTaskResponse struct { + Returnval types.ManagedObjectReference `xml:"returnval"` +} + +type queryVirtualDiskInfoTaskBody struct { + Req *queryVirtualDiskInfoTaskRequest `xml:"urn:internalvim25 QueryVirtualDiskInfo_Task,omitempty"` + Res *queryVirtualDiskInfoTaskResponse `xml:"urn:vim25 QueryVirtualDiskInfo_TaskResponse,omitempty"` + InternalRes *queryVirtualDiskInfoTaskResponse `xml:"urn:internalvim25 QueryVirtualDiskInfo_TaskResponse,omitempty"` + Err *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *queryVirtualDiskInfoTaskBody) Fault() *soap.Fault { return b.Err } + +func queryVirtualDiskInfoTask(ctx context.Context, r soap.RoundTripper, req *queryVirtualDiskInfoTaskRequest) (*queryVirtualDiskInfoTaskResponse, error) { + var reqBody, resBody queryVirtualDiskInfoTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + if resBody.Res != nil { + return resBody.Res, nil + } + + return resBody.InternalRes, nil +} + +type VirtualDiskInfo struct { + Name string `xml:"unit>name"` + DiskType string `xml:"diskType"` + Parent string `xml:"parent,omitempty"` +} + +func (m VirtualDiskManager) QueryVirtualDiskInfo(ctx context.Context, name string, dc *Datacenter, includeParents bool) ([]VirtualDiskInfo, error) { + req := queryVirtualDiskInfoTaskRequest{ + This: m.Reference(), + Name: name, + IncludeParents: includeParents, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := queryVirtualDiskInfoTask(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + info, err := NewTask(m.Client(), res.Returnval).WaitForResult(ctx, nil) + if err != nil { + return nil, err + } + + return info.Result.(arrayOfVirtualDiskInfo).VirtualDiskInfo, nil +} + +type createChildDiskTaskRequest struct { + This types.ManagedObjectReference `xml:"_this"` + ChildName string `xml:"childName"` + ChildDatacenter *types.ManagedObjectReference `xml:"childDatacenter,omitempty"` + ParentName string `xml:"parentName"` + ParentDatacenter *types.ManagedObjectReference `xml:"parentDatacenter,omitempty"` + IsLinkedClone bool `xml:"isLinkedClone"` +} + +type createChildDiskTaskResponse struct { + Returnval types.ManagedObjectReference `xml:"returnval"` +} + +type createChildDiskTaskBody struct { + Req *createChildDiskTaskRequest `xml:"urn:internalvim25 CreateChildDisk_Task,omitempty"` + Res *createChildDiskTaskResponse `xml:"urn:vim25 CreateChildDisk_TaskResponse,omitempty"` + InternalRes *createChildDiskTaskResponse `xml:"urn:internalvim25 CreateChildDisk_TaskResponse,omitempty"` + Err *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *createChildDiskTaskBody) Fault() *soap.Fault { return b.Err } + +func createChildDiskTask(ctx context.Context, r soap.RoundTripper, req *createChildDiskTaskRequest) (*createChildDiskTaskResponse, error) { + var reqBody, resBody createChildDiskTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + if resBody.Res != nil { + return resBody.Res, nil // vim-version <= 6.5 + } + + return resBody.InternalRes, nil // vim-version >= 6.7 +} + +func (m VirtualDiskManager) CreateChildDisk(ctx context.Context, parent string, pdc *Datacenter, name string, dc *Datacenter, linked bool) (*Task, error) { + req := createChildDiskTaskRequest{ + This: m.Reference(), + ChildName: name, + ParentName: parent, + IsLinkedClone: linked, + } + + if dc != nil { + ref := dc.Reference() + req.ChildDatacenter = &ref + } + + if pdc != nil { + ref := pdc.Reference() + req.ParentDatacenter = &ref + } + + res, err := createChildDiskTask(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(m.Client(), res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_machine.go b/vendor/github.com/vmware/govmomi/object/virtual_machine.go new file mode 100644 index 000000000000..511f55723571 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/virtual_machine.go @@ -0,0 +1,801 @@ +/* +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "context" + "errors" + "fmt" + "net" + "path" + + "github.com/vmware/govmomi/nfc" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +const ( + PropRuntimePowerState = "summary.runtime.powerState" +) + +type VirtualMachine struct { + Common +} + +func NewVirtualMachine(c *vim25.Client, ref types.ManagedObjectReference) *VirtualMachine { + return &VirtualMachine{ + Common: NewCommon(c, ref), + } +} + +func (v VirtualMachine) PowerState(ctx context.Context) (types.VirtualMachinePowerState, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{PropRuntimePowerState}, &o) + if err != nil { + return "", err + } + + return o.Summary.Runtime.PowerState, nil +} + +func (v VirtualMachine) PowerOn(ctx context.Context) (*Task, error) { + req := types.PowerOnVM_Task{ + This: v.Reference(), + } + + res, err := methods.PowerOnVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) PowerOff(ctx context.Context) (*Task, error) { + req := types.PowerOffVM_Task{ + This: v.Reference(), + } + + res, err := methods.PowerOffVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Reset(ctx context.Context) (*Task, error) { + req := types.ResetVM_Task{ + This: v.Reference(), + } + + res, err := methods.ResetVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Suspend(ctx context.Context) (*Task, error) { + req := types.SuspendVM_Task{ + This: v.Reference(), + } + + res, err := methods.SuspendVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) ShutdownGuest(ctx context.Context) error { + req := types.ShutdownGuest{ + This: v.Reference(), + } + + _, err := methods.ShutdownGuest(ctx, v.c, &req) + return err +} + +func (v VirtualMachine) RebootGuest(ctx context.Context) error { + req := types.RebootGuest{ + This: v.Reference(), + } + + _, err := methods.RebootGuest(ctx, v.c, &req) + return err +} + +func (v VirtualMachine) Destroy(ctx context.Context) (*Task, error) { + req := types.Destroy_Task{ + This: v.Reference(), + } + + res, err := methods.Destroy_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Clone(ctx context.Context, folder *Folder, name string, config types.VirtualMachineCloneSpec) (*Task, error) { + req := types.CloneVM_Task{ + This: v.Reference(), + Folder: folder.Reference(), + Name: name, + Spec: config, + } + + res, err := methods.CloneVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Customize(ctx context.Context, spec types.CustomizationSpec) (*Task, error) { + req := types.CustomizeVM_Task{ + This: v.Reference(), + Spec: spec, + } + + res, err := methods.CustomizeVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Relocate(ctx context.Context, config types.VirtualMachineRelocateSpec, priority types.VirtualMachineMovePriority) (*Task, error) { + req := types.RelocateVM_Task{ + This: v.Reference(), + Spec: config, + Priority: priority, + } + + res, err := methods.RelocateVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Reconfigure(ctx context.Context, config types.VirtualMachineConfigSpec) (*Task, error) { + req := types.ReconfigVM_Task{ + This: v.Reference(), + Spec: config, + } + + res, err := methods.ReconfigVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) { + var ip string + + p := property.DefaultCollector(v.c) + err := property.Wait(ctx, p, v.Reference(), []string{"guest.ipAddress"}, func(pc []types.PropertyChange) bool { + for _, c := range pc { + if c.Name != "guest.ipAddress" { + continue + } + if c.Op != types.PropertyChangeOpAssign { + continue + } + if c.Val == nil { + continue + } + + ip = c.Val.(string) + return true + } + + return false + }) + + if err != nil { + return "", err + } + + return ip, nil +} + +// WaitForNetIP waits for the VM guest.net property to report an IP address for all VM NICs. +// Only consider IPv4 addresses if the v4 param is true. +// By default, wait for all NICs to get an IP address, unless 1 or more device is given. +// A device can be specified by the MAC address or the device name, e.g. "ethernet-0". +// Returns a map with MAC address as the key and IP address list as the value. +func (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool, device ...string) (map[string][]string, error) { + macs := make(map[string][]string) + eths := make(map[string]string) + + p := property.DefaultCollector(v.c) + + // Wait for all NICs to have a MacAddress, which may not be generated yet. + err := property.Wait(ctx, p, v.Reference(), []string{"config.hardware.device"}, func(pc []types.PropertyChange) bool { + for _, c := range pc { + if c.Op != types.PropertyChangeOpAssign { + continue + } + + devices := VirtualDeviceList(c.Val.(types.ArrayOfVirtualDevice).VirtualDevice) + for _, d := range devices { + if nic, ok := d.(types.BaseVirtualEthernetCard); ok { + mac := nic.GetVirtualEthernetCard().MacAddress + if mac == "" { + return false + } + macs[mac] = nil + eths[devices.Name(d)] = mac + } + } + } + + return true + }) + + if len(device) != 0 { + // Only wait for specific NIC(s) + macs = make(map[string][]string) + for _, mac := range device { + if eth, ok := eths[mac]; ok { + mac = eth // device name, e.g. "ethernet-0" + } + macs[mac] = nil + } + } + + err = property.Wait(ctx, p, v.Reference(), []string{"guest.net"}, func(pc []types.PropertyChange) bool { + for _, c := range pc { + if c.Op != types.PropertyChangeOpAssign { + continue + } + + nics := c.Val.(types.ArrayOfGuestNicInfo).GuestNicInfo + for _, nic := range nics { + mac := nic.MacAddress + if mac == "" || nic.IpConfig == nil { + continue + } + + for _, ip := range nic.IpConfig.IpAddress { + if _, ok := macs[mac]; !ok { + continue // Ignore any that don't correspond to a VM device + } + if v4 && net.ParseIP(ip.IpAddress).To4() == nil { + continue // Ignore non IPv4 address + } + macs[mac] = append(macs[mac], ip.IpAddress) + } + } + } + + for _, ips := range macs { + if len(ips) == 0 { + return false + } + } + + return true + }) + + if err != nil { + return nil, err + } + + return macs, nil +} + +// Device returns the VirtualMachine's config.hardware.device property. +func (v VirtualMachine) Device(ctx context.Context) (VirtualDeviceList, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"config.hardware.device", "summary.runtime.connectionState"}, &o) + if err != nil { + return nil, err + } + + // Quoting the SDK doc: + // The virtual machine configuration is not guaranteed to be available. + // For example, the configuration information would be unavailable if the server + // is unable to access the virtual machine files on disk, and is often also unavailable + // during the initial phases of virtual machine creation. + if o.Config == nil { + return nil, fmt.Errorf("%s Config is not available, connectionState=%s", + v.Reference(), o.Summary.Runtime.ConnectionState) + } + + return VirtualDeviceList(o.Config.Hardware.Device), nil +} + +func (v VirtualMachine) HostSystem(ctx context.Context) (*HostSystem, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"summary.runtime.host"}, &o) + if err != nil { + return nil, err + } + + host := o.Summary.Runtime.Host + if host == nil { + return nil, errors.New("VM doesn't have a HostSystem") + } + + return NewHostSystem(v.c, *host), nil +} + +func (v VirtualMachine) ResourcePool(ctx context.Context) (*ResourcePool, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"resourcePool"}, &o) + if err != nil { + return nil, err + } + + rp := o.ResourcePool + if rp == nil { + return nil, errors.New("VM doesn't have a resourcePool") + } + + return NewResourcePool(v.c, *rp), nil +} + +func (v VirtualMachine) configureDevice(ctx context.Context, op types.VirtualDeviceConfigSpecOperation, fop types.VirtualDeviceConfigSpecFileOperation, devices ...types.BaseVirtualDevice) error { + spec := types.VirtualMachineConfigSpec{} + + for _, device := range devices { + config := &types.VirtualDeviceConfigSpec{ + Device: device, + Operation: op, + } + + if disk, ok := device.(*types.VirtualDisk); ok { + config.FileOperation = fop + + // Special case to attach an existing disk + if op == types.VirtualDeviceConfigSpecOperationAdd && disk.CapacityInKB == 0 { + childDisk := false + if b, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + childDisk = b.Parent != nil + } + + if !childDisk { + config.FileOperation = "" // existing disk + } + } + } + + spec.DeviceChange = append(spec.DeviceChange, config) + } + + task, err := v.Reconfigure(ctx, spec) + if err != nil { + return err + } + + return task.Wait(ctx) +} + +// AddDevice adds the given devices to the VirtualMachine +func (v VirtualMachine) AddDevice(ctx context.Context, device ...types.BaseVirtualDevice) error { + return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationAdd, types.VirtualDeviceConfigSpecFileOperationCreate, device...) +} + +// EditDevice edits the given (existing) devices on the VirtualMachine +func (v VirtualMachine) EditDevice(ctx context.Context, device ...types.BaseVirtualDevice) error { + return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationEdit, types.VirtualDeviceConfigSpecFileOperationReplace, device...) +} + +// RemoveDevice removes the given devices on the VirtualMachine +func (v VirtualMachine) RemoveDevice(ctx context.Context, keepFiles bool, device ...types.BaseVirtualDevice) error { + fop := types.VirtualDeviceConfigSpecFileOperationDestroy + if keepFiles { + fop = "" + } + return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationRemove, fop, device...) +} + +// BootOptions returns the VirtualMachine's config.bootOptions property. +func (v VirtualMachine) BootOptions(ctx context.Context) (*types.VirtualMachineBootOptions, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"config.bootOptions"}, &o) + if err != nil { + return nil, err + } + + return o.Config.BootOptions, nil +} + +// SetBootOptions reconfigures the VirtualMachine with the given options. +func (v VirtualMachine) SetBootOptions(ctx context.Context, options *types.VirtualMachineBootOptions) error { + spec := types.VirtualMachineConfigSpec{} + + spec.BootOptions = options + + task, err := v.Reconfigure(ctx, spec) + if err != nil { + return err + } + + return task.Wait(ctx) +} + +// Answer answers a pending question. +func (v VirtualMachine) Answer(ctx context.Context, id, answer string) error { + req := types.AnswerVM{ + This: v.Reference(), + QuestionId: id, + AnswerChoice: answer, + } + + _, err := methods.AnswerVM(ctx, v.c, &req) + if err != nil { + return err + } + + return nil +} + +func (v VirtualMachine) AcquireTicket(ctx context.Context, kind string) (*types.VirtualMachineTicket, error) { + req := types.AcquireTicket{ + This: v.Reference(), + TicketType: kind, + } + + res, err := methods.AcquireTicket(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +// CreateSnapshot creates a new snapshot of a virtual machine. +func (v VirtualMachine) CreateSnapshot(ctx context.Context, name string, description string, memory bool, quiesce bool) (*Task, error) { + req := types.CreateSnapshot_Task{ + This: v.Reference(), + Name: name, + Description: description, + Memory: memory, + Quiesce: quiesce, + } + + res, err := methods.CreateSnapshot_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +// RemoveAllSnapshot removes all snapshots of a virtual machine +func (v VirtualMachine) RemoveAllSnapshot(ctx context.Context, consolidate *bool) (*Task, error) { + req := types.RemoveAllSnapshots_Task{ + This: v.Reference(), + Consolidate: consolidate, + } + + res, err := methods.RemoveAllSnapshots_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +type snapshotMap map[string][]types.ManagedObjectReference + +func (m snapshotMap) add(parent string, tree []types.VirtualMachineSnapshotTree) { + for i, st := range tree { + sname := st.Name + names := []string{sname, st.Snapshot.Value} + + if parent != "" { + sname = path.Join(parent, sname) + // Add full path as an option to resolve duplicate names + names = append(names, sname) + } + + for _, name := range names { + m[name] = append(m[name], tree[i].Snapshot) + } + + m.add(sname, st.ChildSnapshotList) + } +} + +// FindSnapshot supports snapshot lookup by name, where name can be: +// 1) snapshot ManagedObjectReference.Value (unique) +// 2) snapshot name (may not be unique) +// 3) snapshot tree path (may not be unique) +func (v VirtualMachine) FindSnapshot(ctx context.Context, name string) (*types.ManagedObjectReference, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"snapshot"}, &o) + if err != nil { + return nil, err + } + + if o.Snapshot == nil || len(o.Snapshot.RootSnapshotList) == 0 { + return nil, errors.New("No snapshots for this VM") + } + + m := make(snapshotMap) + m.add("", o.Snapshot.RootSnapshotList) + + s := m[name] + switch len(s) { + case 0: + return nil, fmt.Errorf("snapshot %q not found", name) + case 1: + return &s[0], nil + default: + return nil, fmt.Errorf("%q resolves to %d snapshots", name, len(s)) + } +} + +// RemoveSnapshot removes a named snapshot +func (v VirtualMachine) RemoveSnapshot(ctx context.Context, name string, removeChildren bool, consolidate *bool) (*Task, error) { + snapshot, err := v.FindSnapshot(ctx, name) + if err != nil { + return nil, err + } + + req := types.RemoveSnapshot_Task{ + This: snapshot.Reference(), + RemoveChildren: removeChildren, + Consolidate: consolidate, + } + + res, err := methods.RemoveSnapshot_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +// RevertToCurrentSnapshot reverts to the current snapshot +func (v VirtualMachine) RevertToCurrentSnapshot(ctx context.Context, suppressPowerOn bool) (*Task, error) { + req := types.RevertToCurrentSnapshot_Task{ + This: v.Reference(), + SuppressPowerOn: types.NewBool(suppressPowerOn), + } + + res, err := methods.RevertToCurrentSnapshot_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +// RevertToSnapshot reverts to a named snapshot +func (v VirtualMachine) RevertToSnapshot(ctx context.Context, name string, suppressPowerOn bool) (*Task, error) { + snapshot, err := v.FindSnapshot(ctx, name) + if err != nil { + return nil, err + } + + req := types.RevertToSnapshot_Task{ + This: snapshot.Reference(), + SuppressPowerOn: types.NewBool(suppressPowerOn), + } + + res, err := methods.RevertToSnapshot_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +// IsToolsRunning returns true if VMware Tools is currently running in the guest OS, and false otherwise. +func (v VirtualMachine) IsToolsRunning(ctx context.Context) (bool, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"guest.toolsRunningStatus"}, &o) + if err != nil { + return false, err + } + + return o.Guest.ToolsRunningStatus == string(types.VirtualMachineToolsRunningStatusGuestToolsRunning), nil +} + +// Wait for the VirtualMachine to change to the desired power state. +func (v VirtualMachine) WaitForPowerState(ctx context.Context, state types.VirtualMachinePowerState) error { + p := property.DefaultCollector(v.c) + err := property.Wait(ctx, p, v.Reference(), []string{PropRuntimePowerState}, func(pc []types.PropertyChange) bool { + for _, c := range pc { + if c.Name != PropRuntimePowerState { + continue + } + if c.Val == nil { + continue + } + + ps := c.Val.(types.VirtualMachinePowerState) + if ps == state { + return true + } + } + return false + }) + + return err +} + +func (v VirtualMachine) MarkAsTemplate(ctx context.Context) error { + req := types.MarkAsTemplate{ + This: v.Reference(), + } + + _, err := methods.MarkAsTemplate(ctx, v.c, &req) + if err != nil { + return err + } + + return nil +} + +func (v VirtualMachine) MarkAsVirtualMachine(ctx context.Context, pool ResourcePool, host *HostSystem) error { + req := types.MarkAsVirtualMachine{ + This: v.Reference(), + Pool: pool.Reference(), + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + _, err := methods.MarkAsVirtualMachine(ctx, v.c, &req) + if err != nil { + return err + } + + return nil +} + +func (v VirtualMachine) Migrate(ctx context.Context, pool *ResourcePool, host *HostSystem, priority types.VirtualMachineMovePriority, state types.VirtualMachinePowerState) (*Task, error) { + req := types.MigrateVM_Task{ + This: v.Reference(), + Priority: priority, + State: state, + } + + if pool != nil { + ref := pool.Reference() + req.Pool = &ref + } + + if host != nil { + ref := host.Reference() + req.Host = &ref + } + + res, err := methods.MigrateVM_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Unregister(ctx context.Context) error { + req := types.UnregisterVM{ + This: v.Reference(), + } + + _, err := methods.UnregisterVM(ctx, v.Client(), &req) + return err +} + +// QueryEnvironmentBrowser is a helper to get the environmentBrowser property. +func (v VirtualMachine) QueryConfigTarget(ctx context.Context) (*types.ConfigTarget, error) { + var vm mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{"environmentBrowser"}, &vm) + if err != nil { + return nil, err + } + + req := types.QueryConfigTarget{ + This: vm.EnvironmentBrowser, + } + + res, err := methods.QueryConfigTarget(ctx, v.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (v VirtualMachine) MountToolsInstaller(ctx context.Context) error { + req := types.MountToolsInstaller{ + This: v.Reference(), + } + + _, err := methods.MountToolsInstaller(ctx, v.Client(), &req) + return err +} + +func (v VirtualMachine) UnmountToolsInstaller(ctx context.Context) error { + req := types.UnmountToolsInstaller{ + This: v.Reference(), + } + + _, err := methods.UnmountToolsInstaller(ctx, v.Client(), &req) + return err +} + +func (v VirtualMachine) UpgradeTools(ctx context.Context, options string) (*Task, error) { + req := types.UpgradeTools_Task{ + This: v.Reference(), + InstallerOptions: options, + } + + res, err := methods.UpgradeTools_Task(ctx, v.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + +func (v VirtualMachine) Export(ctx context.Context) (*nfc.Lease, error) { + req := types.ExportVm{ + This: v.Reference(), + } + + res, err := methods.ExportVm(ctx, v.Client(), &req) + if err != nil { + return nil, err + } + + return nfc.NewLease(v.c, res.Returnval), nil +} + +func (v VirtualMachine) UpgradeVM(ctx context.Context, version string) (*Task, error) { + req := types.UpgradeVM_Task{ + This: v.Reference(), + Version: version, + } + + res, err := methods.UpgradeVM_Task(ctx, v.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go b/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go new file mode 100644 index 000000000000..f6caf9870837 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go @@ -0,0 +1,21 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +type VmwareDistributedVirtualSwitch struct { + DistributedVirtualSwitch +} diff --git a/vendor/github.com/vmware/govmomi/property/collector.go b/vendor/github.com/vmware/govmomi/property/collector.go new file mode 100644 index 000000000000..ccf712cf9d3b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/property/collector.go @@ -0,0 +1,205 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package property + +import ( + "context" + "errors" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +// Collector models the PropertyCollector managed object. +// +// For more information, see: +// http://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.wssdk.apiref.doc%2Fvmodl.query.PropertyCollector.html +// +type Collector struct { + roundTripper soap.RoundTripper + reference types.ManagedObjectReference +} + +// DefaultCollector returns the session's default property collector. +func DefaultCollector(c *vim25.Client) *Collector { + p := Collector{ + roundTripper: c, + reference: c.ServiceContent.PropertyCollector, + } + + return &p +} + +func (p Collector) Reference() types.ManagedObjectReference { + return p.reference +} + +// Create creates a new session-specific Collector that can be used to +// retrieve property updates independent of any other Collector. +func (p *Collector) Create(ctx context.Context) (*Collector, error) { + req := types.CreatePropertyCollector{ + This: p.Reference(), + } + + res, err := methods.CreatePropertyCollector(ctx, p.roundTripper, &req) + if err != nil { + return nil, err + } + + newp := Collector{ + roundTripper: p.roundTripper, + reference: res.Returnval, + } + + return &newp, nil +} + +// Destroy destroys this Collector. +func (p *Collector) Destroy(ctx context.Context) error { + req := types.DestroyPropertyCollector{ + This: p.Reference(), + } + + _, err := methods.DestroyPropertyCollector(ctx, p.roundTripper, &req) + if err != nil { + return err + } + + p.reference = types.ManagedObjectReference{} + return nil +} + +func (p *Collector) CreateFilter(ctx context.Context, req types.CreateFilter) error { + req.This = p.Reference() + + _, err := methods.CreateFilter(ctx, p.roundTripper, &req) + if err != nil { + return err + } + + return nil +} + +func (p *Collector) WaitForUpdates(ctx context.Context, v string) (*types.UpdateSet, error) { + req := types.WaitForUpdatesEx{ + This: p.Reference(), + Version: v, + } + + res, err := methods.WaitForUpdatesEx(ctx, p.roundTripper, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (p *Collector) RetrieveProperties(ctx context.Context, req types.RetrieveProperties) (*types.RetrievePropertiesResponse, error) { + req.This = p.Reference() + return methods.RetrieveProperties(ctx, p.roundTripper, &req) +} + +// Retrieve loads properties for a slice of managed objects. The dst argument +// must be a pointer to a []interface{}, which is populated with the instances +// of the specified managed objects, with the relevant properties filled in. If +// the properties slice is nil, all properties are loaded. +func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, ps []string, dst interface{}) error { + if len(objs) == 0 { + return errors.New("object references is empty") + } + + var propSpec *types.PropertySpec + var objectSet []types.ObjectSpec + + for _, obj := range objs { + // Ensure that all object reference types are the same + if propSpec == nil { + propSpec = &types.PropertySpec{ + Type: obj.Type, + } + + if ps == nil { + propSpec.All = types.NewBool(true) + } else { + propSpec.PathSet = ps + } + } else { + if obj.Type != propSpec.Type { + return errors.New("object references must have the same type") + } + } + + objectSpec := types.ObjectSpec{ + Obj: obj, + Skip: types.NewBool(false), + } + + objectSet = append(objectSet, objectSpec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: objectSet, + PropSet: []types.PropertySpec{*propSpec}, + }, + }, + } + + res, err := p.RetrieveProperties(ctx, req) + if err != nil { + return err + } + + if d, ok := dst.(*[]types.ObjectContent); ok { + *d = res.Returnval + return nil + } + + return mo.LoadRetrievePropertiesResponse(res, dst) +} + +// RetrieveWithFilter populates dst as Retrieve does, but only for entities matching the given filter. +func (p *Collector) RetrieveWithFilter(ctx context.Context, objs []types.ManagedObjectReference, ps []string, dst interface{}, filter Filter) error { + if len(filter) == 0 { + return p.Retrieve(ctx, objs, ps, dst) + } + + var content []types.ObjectContent + + err := p.Retrieve(ctx, objs, filter.Keys(), &content) + if err != nil { + return err + } + + objs = filter.MatchObjectContent(content) + + if len(objs) == 0 { + return nil + } + + return p.Retrieve(ctx, objs, ps, dst) +} + +// RetrieveOne calls Retrieve with a single managed object reference. +func (p *Collector) RetrieveOne(ctx context.Context, obj types.ManagedObjectReference, ps []string, dst interface{}) error { + var objs = []types.ManagedObjectReference{obj} + return p.Retrieve(ctx, objs, ps, dst) +} diff --git a/vendor/github.com/vmware/govmomi/property/filter.go b/vendor/github.com/vmware/govmomi/property/filter.go new file mode 100644 index 000000000000..a4bf16d0555d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/property/filter.go @@ -0,0 +1,139 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package property + +import ( + "fmt" + "path" + "reflect" + "strconv" + "strings" + + "github.com/vmware/govmomi/vim25/types" +) + +// Filter provides methods for matching against types.DynamicProperty +type Filter map[string]types.AnyType + +// Keys returns the Filter map keys as a []string +func (f Filter) Keys() []string { + keys := make([]string, 0, len(f)) + + for key := range f { + keys = append(keys, key) + } + + return keys +} + +// MatchProperty returns true if a Filter entry matches the given prop. +func (f Filter) MatchProperty(prop types.DynamicProperty) bool { + match, ok := f[prop.Name] + if !ok { + return false + } + + if match == prop.Val { + return true + } + + ptype := reflect.TypeOf(prop.Val) + + if strings.HasPrefix(ptype.Name(), "ArrayOf") { + pval := reflect.ValueOf(prop.Val).Field(0) + + for i := 0; i < pval.Len(); i++ { + prop.Val = pval.Index(i).Interface() + + if f.MatchProperty(prop) { + return true + } + } + + return false + } + + if reflect.TypeOf(match) != ptype { + s, ok := match.(string) + if !ok { + return false + } + + // convert if we can + switch prop.Val.(type) { + case bool: + match, _ = strconv.ParseBool(s) + case int16: + x, _ := strconv.ParseInt(s, 10, 16) + match = int16(x) + case int32: + x, _ := strconv.ParseInt(s, 10, 32) + match = int32(x) + case int64: + match, _ = strconv.ParseInt(s, 10, 64) + case float32: + x, _ := strconv.ParseFloat(s, 32) + match = float32(x) + case float64: + match, _ = strconv.ParseFloat(s, 64) + case fmt.Stringer: + prop.Val = prop.Val.(fmt.Stringer).String() + default: + if ptype.Kind() != reflect.String { + return false + } + // An enum type we can convert to a string type + prop.Val = reflect.ValueOf(prop.Val).String() + } + } + + switch pval := prop.Val.(type) { + case string: + s := match.(string) + if s == "*" { + return true // TODO: path.Match fails if s contains a '/' + } + m, _ := path.Match(s, pval) + return m + default: + return reflect.DeepEqual(match, pval) + } +} + +// MatchPropertyList returns true if all given props match the Filter. +func (f Filter) MatchPropertyList(props []types.DynamicProperty) bool { + for _, p := range props { + if !f.MatchProperty(p) { + return false + } + } + + return len(f) == len(props) // false if a property such as VM "guest" is unset +} + +// MatchObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches the Filter. +func (f Filter) MatchObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference { + var refs []types.ManagedObjectReference + + for _, o := range objects { + if f.MatchPropertyList(o.PropSet) { + refs = append(refs, o.Obj) + } + } + + return refs +} diff --git a/vendor/github.com/vmware/govmomi/property/wait.go b/vendor/github.com/vmware/govmomi/property/wait.go new file mode 100644 index 000000000000..fe847926ce9c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/property/wait.go @@ -0,0 +1,115 @@ +/* +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package property + +import ( + "context" + + "github.com/vmware/govmomi/vim25/types" +) + +// WaitFilter provides helpers to construct a types.CreateFilter for use with property.Wait +type WaitFilter struct { + types.CreateFilter +} + +// Add a new ObjectSpec and PropertySpec to the WaitFilter +func (f *WaitFilter) Add(obj types.ManagedObjectReference, kind string, ps []string, set ...types.BaseSelectionSpec) *WaitFilter { + spec := types.ObjectSpec{ + Obj: obj, + SelectSet: set, + } + + pset := types.PropertySpec{ + Type: kind, + PathSet: ps, + } + + if len(ps) == 0 { + pset.All = types.NewBool(true) + } + + f.Spec.ObjectSet = append(f.Spec.ObjectSet, spec) + + f.Spec.PropSet = append(f.Spec.PropSet, pset) + + return f +} + +// Wait creates a new WaitFilter and calls the specified function for each ObjectUpdate via WaitForUpdates +func Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error { + filter := new(WaitFilter).Add(obj, obj.Type, ps) + + return WaitForUpdates(ctx, c, filter, func(updates []types.ObjectUpdate) bool { + for _, update := range updates { + if f(update.ChangeSet) { + return true + } + } + + return false + }) +} + +// WaitForUpdates waits for any of the specified properties of the specified managed +// object to change. It calls the specified function for every update it +// receives. If this function returns false, it continues waiting for +// subsequent updates. If this function returns true, it stops waiting and +// returns. +// +// To only receive updates for the specified managed object, the function +// creates a new property collector and calls CreateFilter. A new property +// collector is required because filters can only be added, not removed. +// +// The newly created collector is destroyed before this function returns (both +// in case of success or error). +// +func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f func([]types.ObjectUpdate) bool) error { + p, err := c.Create(ctx) + if err != nil { + return err + } + + // Attempt to destroy the collector using the background context, as the + // specified context may have timed out or have been cancelled. + defer p.Destroy(context.Background()) + + err = p.CreateFilter(ctx, filter.CreateFilter) + if err != nil { + return err + } + + for version := ""; ; { + res, err := p.WaitForUpdates(ctx, version) + if err != nil { + return err + } + + // Retry if the result came back empty + if res == nil { + continue + } + + version = res.Version + + for _, fs := range res.FilterSet { + if f(fs.ObjectSet) { + return nil + } + } + } +} diff --git a/vendor/github.com/vmware/govmomi/session/keep_alive.go b/vendor/github.com/vmware/govmomi/session/keep_alive.go new file mode 100644 index 000000000000..a9d4c141c92c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/session/keep_alive.go @@ -0,0 +1,127 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package session + +import ( + "context" + "sync" + "time" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" +) + +type keepAlive struct { + sync.Mutex + + roundTripper soap.RoundTripper + idleTime time.Duration + notifyRequest chan struct{} + notifyStop chan struct{} + notifyWaitGroup sync.WaitGroup + + // keepAlive executes a request in the background with the purpose of + // keeping the session active. The response for this request is discarded. + keepAlive func(soap.RoundTripper) error +} + +func defaultKeepAlive(roundTripper soap.RoundTripper) error { + _, _ = methods.GetCurrentTime(context.Background(), roundTripper) + return nil +} + +// KeepAlive wraps the specified soap.RoundTripper and executes a meaningless +// API request in the background after the RoundTripper has been idle for the +// specified amount of idle time. The keep alive process only starts once a +// user logs in and runs until the user logs out again. +func KeepAlive(roundTripper soap.RoundTripper, idleTime time.Duration) soap.RoundTripper { + return KeepAliveHandler(roundTripper, idleTime, defaultKeepAlive) +} + +// KeepAliveHandler works as KeepAlive() does, but the handler param can decide how to handle errors. +// For example, if connectivity to ESX/VC is down long enough for a session to expire, a handler can choose to +// Login() on a types.NotAuthenticated error. If handler returns non-nil, the keep alive go routine will be stopped. +func KeepAliveHandler(roundTripper soap.RoundTripper, idleTime time.Duration, handler func(soap.RoundTripper) error) soap.RoundTripper { + k := &keepAlive{ + roundTripper: roundTripper, + idleTime: idleTime, + notifyRequest: make(chan struct{}), + } + + k.keepAlive = handler + + return k +} + +func (k *keepAlive) start() { + k.Lock() + defer k.Unlock() + + if k.notifyStop != nil { + return + } + + // This channel must be closed to terminate idle timer. + k.notifyStop = make(chan struct{}) + k.notifyWaitGroup.Add(1) + + go func() { + defer k.notifyWaitGroup.Done() + + for t := time.NewTimer(k.idleTime); ; { + select { + case <-k.notifyStop: + return + case <-k.notifyRequest: + t.Reset(k.idleTime) + case <-t.C: + if err := k.keepAlive(k.roundTripper); err != nil { + k.stop() + } + t = time.NewTimer(k.idleTime) + } + } + }() +} + +func (k *keepAlive) stop() { + k.Lock() + defer k.Unlock() + + if k.notifyStop != nil { + close(k.notifyStop) + k.notifyWaitGroup.Wait() + k.notifyStop = nil + } +} + +func (k *keepAlive) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + err := k.roundTripper.RoundTrip(ctx, req, res) + if err != nil { + return err + } + + // Start ticker on login, stop ticker on logout. + switch req.(type) { + case *methods.LoginBody, *methods.LoginExtensionByCertificateBody: + k.start() + case *methods.LogoutBody: + k.stop() + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/session/manager.go b/vendor/github.com/vmware/govmomi/session/manager.go new file mode 100644 index 000000000000..ad3219716a48 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/session/manager.go @@ -0,0 +1,267 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package session + +import ( + "context" + "net/http" + "net/url" + "os" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// Locale defaults to "en_US" and can be overridden via this var or the GOVMOMI_LOCALE env var. +// A value of "_" uses the server locale setting. +var Locale = os.Getenv("GOVMOMI_LOCALE") + +func init() { + if Locale == "_" { + Locale = "" + } else if Locale == "" { + Locale = "en_US" + } +} + +type Manager struct { + client *vim25.Client + userSession *types.UserSession +} + +func NewManager(client *vim25.Client) *Manager { + m := Manager{ + client: client, + } + + return &m +} + +func (sm Manager) Reference() types.ManagedObjectReference { + return *sm.client.ServiceContent.SessionManager +} + +func (sm *Manager) SetLocale(ctx context.Context, locale string) error { + req := types.SetLocale{ + This: sm.Reference(), + Locale: locale, + } + + _, err := methods.SetLocale(ctx, sm.client, &req) + return err +} + +func (sm *Manager) Login(ctx context.Context, u *url.Userinfo) error { + req := types.Login{ + This: sm.Reference(), + Locale: Locale, + } + + if u != nil { + req.UserName = u.Username() + if pw, ok := u.Password(); ok { + req.Password = pw + } + } + + login, err := methods.Login(ctx, sm.client, &req) + if err != nil { + return err + } + + sm.userSession = &login.Returnval + return nil +} + +// LoginExtensionByCertificate uses the vCenter SDK tunnel to login using a client certificate. +// The client certificate can be set using the soap.Client.SetCertificate method. +// See: https://kb.vmware.com/s/article/2004305 +func (sm *Manager) LoginExtensionByCertificate(ctx context.Context, key string) error { + c := sm.client + u := c.URL() + if u.Hostname() != "sdkTunnel" { + sc := c.Tunnel() + c = &vim25.Client{ + Client: sc, + RoundTripper: sc, + ServiceContent: c.ServiceContent, + } + // When http.Transport.Proxy is used, our thumbprint checker is bypassed, resulting in: + // "Post https://sdkTunnel:8089/sdk: x509: certificate is valid for $vcenter_hostname, not sdkTunnel" + // The only easy way around this is to disable verification for the call to LoginExtensionByCertificate(). + // TODO: find a way to avoid disabling InsecureSkipVerify. + c.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true + } + + req := types.LoginExtensionByCertificate{ + This: sm.Reference(), + ExtensionKey: key, + Locale: Locale, + } + + login, err := methods.LoginExtensionByCertificate(ctx, c, &req) + if err != nil { + return err + } + + // Copy the session cookie + sm.client.Jar.SetCookies(u, c.Jar.Cookies(c.URL())) + + sm.userSession = &login.Returnval + return nil +} + +func (sm *Manager) LoginByToken(ctx context.Context) error { + req := types.LoginByToken{ + This: sm.Reference(), + Locale: Locale, + } + + login, err := methods.LoginByToken(ctx, sm.client, &req) + if err != nil { + return err + } + + sm.userSession = &login.Returnval + return nil +} + +func (sm *Manager) Logout(ctx context.Context) error { + req := types.Logout{ + This: sm.Reference(), + } + + _, err := methods.Logout(ctx, sm.client, &req) + if err != nil { + return err + } + + sm.userSession = nil + return nil +} + +// UserSession retrieves and returns the SessionManager's CurrentSession field. +// Nil is returned if the session is not authenticated. +func (sm *Manager) UserSession(ctx context.Context) (*types.UserSession, error) { + var mgr mo.SessionManager + + pc := property.DefaultCollector(sm.client) + err := pc.RetrieveOne(ctx, sm.Reference(), []string{"currentSession"}, &mgr) + if err != nil { + // It's OK if we can't retrieve properties because we're not authenticated + if f, ok := err.(types.HasFault); ok { + switch f.Fault().(type) { + case *types.NotAuthenticated: + return nil, nil + } + } + + return nil, err + } + + return mgr.CurrentSession, nil +} + +func (sm *Manager) TerminateSession(ctx context.Context, sessionId []string) error { + req := types.TerminateSession{ + This: sm.Reference(), + SessionId: sessionId, + } + + _, err := methods.TerminateSession(ctx, sm.client, &req) + return err +} + +// SessionIsActive checks whether the session that was created at login is +// still valid. This function only works against vCenter. +func (sm *Manager) SessionIsActive(ctx context.Context) (bool, error) { + if sm.userSession == nil { + return false, nil + } + + req := types.SessionIsActive{ + This: sm.Reference(), + SessionID: sm.userSession.Key, + UserName: sm.userSession.UserName, + } + + active, err := methods.SessionIsActive(ctx, sm.client, &req) + if err != nil { + return false, err + } + + return active.Returnval, err +} + +func (sm *Manager) AcquireGenericServiceTicket(ctx context.Context, spec types.BaseSessionManagerServiceRequestSpec) (*types.SessionManagerGenericServiceTicket, error) { + req := types.AcquireGenericServiceTicket{ + This: sm.Reference(), + Spec: spec, + } + + res, err := methods.AcquireGenericServiceTicket(ctx, sm.client, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (sm *Manager) AcquireLocalTicket(ctx context.Context, userName string) (*types.SessionManagerLocalTicket, error) { + req := types.AcquireLocalTicket{ + This: sm.Reference(), + UserName: userName, + } + + res, err := methods.AcquireLocalTicket(ctx, sm.client, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} + +func (sm *Manager) AcquireCloneTicket(ctx context.Context) (string, error) { + req := types.AcquireCloneTicket{ + This: sm.Reference(), + } + + res, err := methods.AcquireCloneTicket(ctx, sm.client, &req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} + +func (sm *Manager) CloneSession(ctx context.Context, ticket string) error { + req := types.CloneSession{ + This: sm.Reference(), + CloneTicket: ticket, + } + + res, err := methods.CloneSession(ctx, sm.client, &req) + if err != nil { + return err + } + + sm.userSession = &res.Returnval + return nil +} diff --git a/vendor/github.com/vmware/govmomi/task/error.go b/vendor/github.com/vmware/govmomi/task/error.go new file mode 100644 index 000000000000..5f6b8503f5bc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/task/error.go @@ -0,0 +1,32 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package task + +import "github.com/vmware/govmomi/vim25/types" + +type Error struct { + *types.LocalizedMethodFault +} + +// Error returns the task's localized fault message. +func (e Error) Error() string { + return e.LocalizedMethodFault.LocalizedMessage +} + +func (e Error) Fault() types.BaseMethodFault { + return e.LocalizedMethodFault.Fault +} diff --git a/vendor/github.com/vmware/govmomi/task/wait.go b/vendor/github.com/vmware/govmomi/task/wait.go new file mode 100644 index 000000000000..19fee5384636 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/task/wait.go @@ -0,0 +1,132 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package task + +import ( + "context" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/types" +) + +type taskProgress struct { + info *types.TaskInfo +} + +func (t taskProgress) Percentage() float32 { + return float32(t.info.Progress) +} + +func (t taskProgress) Detail() string { + return "" +} + +func (t taskProgress) Error() error { + if t.info.Error != nil { + return Error{t.info.Error} + } + + return nil +} + +type taskCallback struct { + ch chan<- progress.Report + info *types.TaskInfo + err error +} + +func (t *taskCallback) fn(pc []types.PropertyChange) bool { + for _, c := range pc { + if c.Name != "info" { + continue + } + + if c.Op != types.PropertyChangeOpAssign { + continue + } + + if c.Val == nil { + continue + } + + ti := c.Val.(types.TaskInfo) + t.info = &ti + } + + // t.info could be nil if pc can't satify the rules above + if t.info == nil { + return false + } + + pr := taskProgress{t.info} + + // Store copy of error, so Wait() can return it as well. + t.err = pr.Error() + + switch t.info.State { + case types.TaskInfoStateQueued, types.TaskInfoStateRunning: + if t.ch != nil { + // Don't care if this is dropped + select { + case t.ch <- pr: + default: + } + } + return false + case types.TaskInfoStateSuccess, types.TaskInfoStateError: + if t.ch != nil { + // Last one must always be delivered + t.ch <- pr + } + return true + default: + panic("unknown state: " + t.info.State) + } +} + +// Wait waits for a task to finish with either success or failure. It does so +// by waiting for the "info" property of task managed object to change. The +// function returns when it finds the task in the "success" or "error" state. +// In the former case, the return value is nil. In the latter case the return +// value is an instance of this package's Error struct. +// +// Any error returned while waiting for property changes causes the function to +// return immediately and propagate the error. +// +// If the progress.Sinker argument is specified, any progress updates for the +// task are sent here. The completion percentage is passed through directly. +// The detail for the progress update is set to an empty string. If the task +// finishes in the error state, the error instance is passed through as well. +// Note that this error is the same error that is returned by this function. +// +func Wait(ctx context.Context, ref types.ManagedObjectReference, pc *property.Collector, s progress.Sinker) (*types.TaskInfo, error) { + cb := &taskCallback{} + + // Include progress sink if specified + if s != nil { + cb.ch = s.Sink() + defer close(cb.ch) + } + + err := property.Wait(ctx, pc, ref, []string{"info"}, cb.fn) + if err != nil { + return nil, err + } + + return cb.info, cb.err +} diff --git a/vendor/github.com/vmware/govmomi/vim25/client.go b/vendor/github.com/vmware/govmomi/vim25/client.go new file mode 100644 index 000000000000..1d26fb8b6a7f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/client.go @@ -0,0 +1,146 @@ +/* +Copyright (c) 2015-2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vim25 + +import ( + "context" + "encoding/json" + "strings" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +const ( + Namespace = "vim25" + Version = "6.7" + Path = "/sdk" +) + +var ( + ServiceInstance = types.ManagedObjectReference{ + Type: "ServiceInstance", + Value: "ServiceInstance", + } +) + +// Client is a tiny wrapper around the vim25/soap Client that stores session +// specific state (i.e. state that only needs to be retrieved once after the +// client has been created). This means the client can be reused after +// serialization without performing additional requests for initialization. +type Client struct { + *soap.Client + + ServiceContent types.ServiceContent + + // RoundTripper is a separate field such that the client's implementation of + // the RoundTripper interface can be wrapped by separate implementations for + // extra functionality (for example, reauthentication on session timeout). + RoundTripper soap.RoundTripper +} + +// NewClient creates and returns a new client wirh the ServiceContent field +// filled in. +func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) { + c := Client{ + RoundTripper: rt, + } + + // Set client if it happens to be a soap.Client + if sc, ok := rt.(*soap.Client); ok { + c.Client = sc + + if c.Namespace == "" { + c.Namespace = "urn:" + Namespace + } else if strings.Index(c.Namespace, ":") < 0 { + c.Namespace = "urn:" + c.Namespace // ensure valid URI format + } + if c.Version == "" { + c.Version = Version + } + } + + var err error + c.ServiceContent, err = methods.GetServiceContent(ctx, rt) + if err != nil { + return nil, err + } + + return &c, nil +} + +// RoundTrip dispatches to the RoundTripper field. +func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + return c.RoundTripper.RoundTrip(ctx, req, res) +} + +type marshaledClient struct { + SoapClient *soap.Client + ServiceContent types.ServiceContent +} + +func (c *Client) MarshalJSON() ([]byte, error) { + m := marshaledClient{ + SoapClient: c.Client, + ServiceContent: c.ServiceContent, + } + + return json.Marshal(m) +} + +func (c *Client) UnmarshalJSON(b []byte) error { + var m marshaledClient + + err := json.Unmarshal(b, &m) + if err != nil { + return err + } + + *c = Client{ + Client: m.SoapClient, + ServiceContent: m.ServiceContent, + RoundTripper: m.SoapClient, + } + + return nil +} + +// Valid returns whether or not the client is valid and ready for use. +// This should be called after unmarshalling the client. +func (c *Client) Valid() bool { + if c == nil { + return false + } + + if c.Client == nil { + return false + } + + // Use arbitrary pointer field in the service content. + // Doesn't matter which one, as long as it is populated by default. + if c.ServiceContent.SessionManager == nil { + return false + } + + return true +} + +// IsVC returns true if we are connected to a vCenter +func (c *Client) IsVC() bool { + return c.ServiceContent.About.ApiType == "VirtualCenter" +} diff --git a/vendor/github.com/vmware/govmomi/vim25/debug/debug.go b/vendor/github.com/vmware/govmomi/vim25/debug/debug.go new file mode 100644 index 000000000000..22d547178466 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/debug/debug.go @@ -0,0 +1,81 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "io" + "os" + "path" +) + +// Provider specified the interface types must implement to be used as a +// debugging sink. Having multiple such sink implementations allows it to be +// changed externally (for example when running tests). +type Provider interface { + NewFile(s string) io.WriteCloser + Flush() +} + +var currentProvider Provider = nil + +func SetProvider(p Provider) { + if currentProvider != nil { + currentProvider.Flush() + } + currentProvider = p +} + +// Enabled returns whether debugging is enabled or not. +func Enabled() bool { + return currentProvider != nil +} + +// NewFile dispatches to the current provider's NewFile function. +func NewFile(s string) io.WriteCloser { + return currentProvider.NewFile(s) +} + +// Flush dispatches to the current provider's Flush function. +func Flush() { + currentProvider.Flush() +} + +// FileProvider implements a debugging provider that creates a real file for +// every call to NewFile. It maintains a list of all files that it creates, +// such that it can close them when its Flush function is called. +type FileProvider struct { + Path string + + files []*os.File +} + +func (fp *FileProvider) NewFile(p string) io.WriteCloser { + f, err := os.Create(path.Join(fp.Path, p)) + if err != nil { + panic(err) + } + + fp.files = append(fp.files, f) + + return f +} + +func (fp *FileProvider) Flush() { + for _, f := range fp.files { + f.Close() + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/doc.go b/vendor/github.com/vmware/govmomi/vim25/doc.go new file mode 100644 index 000000000000..acb2c9f64dde --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/doc.go @@ -0,0 +1,29 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package vim25 provides a minimal client implementation to use with other +packages in the vim25 tree. The code in this package intentionally does not +take any dependendies outside the vim25 tree. + +The client implementation in this package embeds the soap.Client structure. +Additionally, it stores the value of the session's ServiceContent object. This +object stores references to a variety of subsystems, such as the root property +collector, the session manager, and the search index. The client is fully +functional after serialization and deserialization, without the need for +additional requests for initialization. +*/ +package vim25 diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/methods.go b/vendor/github.com/vmware/govmomi/vim25/methods/methods.go new file mode 100644 index 000000000000..c3ad23eefb46 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/methods/methods.go @@ -0,0 +1,17284 @@ +/* +Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package methods + +import ( + "context" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type AbdicateDomOwnershipBody struct { + Req *types.AbdicateDomOwnership `xml:"urn:vim25 AbdicateDomOwnership,omitempty"` + Res *types.AbdicateDomOwnershipResponse `xml:"urn:vim25 AbdicateDomOwnershipResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AbdicateDomOwnershipBody) Fault() *soap.Fault { return b.Fault_ } + +func AbdicateDomOwnership(ctx context.Context, r soap.RoundTripper, req *types.AbdicateDomOwnership) (*types.AbdicateDomOwnershipResponse, error) { + var reqBody, resBody AbdicateDomOwnershipBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcknowledgeAlarmBody struct { + Req *types.AcknowledgeAlarm `xml:"urn:vim25 AcknowledgeAlarm,omitempty"` + Res *types.AcknowledgeAlarmResponse `xml:"urn:vim25 AcknowledgeAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcknowledgeAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func AcknowledgeAlarm(ctx context.Context, r soap.RoundTripper, req *types.AcknowledgeAlarm) (*types.AcknowledgeAlarmResponse, error) { + var reqBody, resBody AcknowledgeAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireCimServicesTicketBody struct { + Req *types.AcquireCimServicesTicket `xml:"urn:vim25 AcquireCimServicesTicket,omitempty"` + Res *types.AcquireCimServicesTicketResponse `xml:"urn:vim25 AcquireCimServicesTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireCimServicesTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireCimServicesTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireCimServicesTicket) (*types.AcquireCimServicesTicketResponse, error) { + var reqBody, resBody AcquireCimServicesTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireCloneTicketBody struct { + Req *types.AcquireCloneTicket `xml:"urn:vim25 AcquireCloneTicket,omitempty"` + Res *types.AcquireCloneTicketResponse `xml:"urn:vim25 AcquireCloneTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireCloneTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireCloneTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireCloneTicket) (*types.AcquireCloneTicketResponse, error) { + var reqBody, resBody AcquireCloneTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireCredentialsInGuestBody struct { + Req *types.AcquireCredentialsInGuest `xml:"urn:vim25 AcquireCredentialsInGuest,omitempty"` + Res *types.AcquireCredentialsInGuestResponse `xml:"urn:vim25 AcquireCredentialsInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.AcquireCredentialsInGuest) (*types.AcquireCredentialsInGuestResponse, error) { + var reqBody, resBody AcquireCredentialsInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireGenericServiceTicketBody struct { + Req *types.AcquireGenericServiceTicket `xml:"urn:vim25 AcquireGenericServiceTicket,omitempty"` + Res *types.AcquireGenericServiceTicketResponse `xml:"urn:vim25 AcquireGenericServiceTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireGenericServiceTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireGenericServiceTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireGenericServiceTicket) (*types.AcquireGenericServiceTicketResponse, error) { + var reqBody, resBody AcquireGenericServiceTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireLocalTicketBody struct { + Req *types.AcquireLocalTicket `xml:"urn:vim25 AcquireLocalTicket,omitempty"` + Res *types.AcquireLocalTicketResponse `xml:"urn:vim25 AcquireLocalTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireLocalTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireLocalTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireLocalTicket) (*types.AcquireLocalTicketResponse, error) { + var reqBody, resBody AcquireLocalTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireMksTicketBody struct { + Req *types.AcquireMksTicket `xml:"urn:vim25 AcquireMksTicket,omitempty"` + Res *types.AcquireMksTicketResponse `xml:"urn:vim25 AcquireMksTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireMksTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireMksTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireMksTicket) (*types.AcquireMksTicketResponse, error) { + var reqBody, resBody AcquireMksTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AcquireTicketBody struct { + Req *types.AcquireTicket `xml:"urn:vim25 AcquireTicket,omitempty"` + Res *types.AcquireTicketResponse `xml:"urn:vim25 AcquireTicketResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AcquireTicketBody) Fault() *soap.Fault { return b.Fault_ } + +func AcquireTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireTicket) (*types.AcquireTicketResponse, error) { + var reqBody, resBody AcquireTicketBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddAuthorizationRoleBody struct { + Req *types.AddAuthorizationRole `xml:"urn:vim25 AddAuthorizationRole,omitempty"` + Res *types.AddAuthorizationRoleResponse `xml:"urn:vim25 AddAuthorizationRoleResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ } + +func AddAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.AddAuthorizationRole) (*types.AddAuthorizationRoleResponse, error) { + var reqBody, resBody AddAuthorizationRoleBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddCustomFieldDefBody struct { + Req *types.AddCustomFieldDef `xml:"urn:vim25 AddCustomFieldDef,omitempty"` + Res *types.AddCustomFieldDefResponse `xml:"urn:vim25 AddCustomFieldDefResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ } + +func AddCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.AddCustomFieldDef) (*types.AddCustomFieldDefResponse, error) { + var reqBody, resBody AddCustomFieldDefBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddDVPortgroup_TaskBody struct { + Req *types.AddDVPortgroup_Task `xml:"urn:vim25 AddDVPortgroup_Task,omitempty"` + Res *types.AddDVPortgroup_TaskResponse `xml:"urn:vim25 AddDVPortgroup_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AddDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDVPortgroup_Task) (*types.AddDVPortgroup_TaskResponse, error) { + var reqBody, resBody AddDVPortgroup_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddDisks_TaskBody struct { + Req *types.AddDisks_Task `xml:"urn:vim25 AddDisks_Task,omitempty"` + Res *types.AddDisks_TaskResponse `xml:"urn:vim25 AddDisks_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AddDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDisks_Task) (*types.AddDisks_TaskResponse, error) { + var reqBody, resBody AddDisks_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddFilterBody struct { + Req *types.AddFilter `xml:"urn:vim25 AddFilter,omitempty"` + Res *types.AddFilterResponse `xml:"urn:vim25 AddFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func AddFilter(ctx context.Context, r soap.RoundTripper, req *types.AddFilter) (*types.AddFilterResponse, error) { + var reqBody, resBody AddFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddFilterEntitiesBody struct { + Req *types.AddFilterEntities `xml:"urn:vim25 AddFilterEntities,omitempty"` + Res *types.AddFilterEntitiesResponse `xml:"urn:vim25 AddFilterEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddFilterEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func AddFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.AddFilterEntities) (*types.AddFilterEntitiesResponse, error) { + var reqBody, resBody AddFilterEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddGuestAliasBody struct { + Req *types.AddGuestAlias `xml:"urn:vim25 AddGuestAlias,omitempty"` + Res *types.AddGuestAliasResponse `xml:"urn:vim25 AddGuestAliasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddGuestAliasBody) Fault() *soap.Fault { return b.Fault_ } + +func AddGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.AddGuestAlias) (*types.AddGuestAliasResponse, error) { + var reqBody, resBody AddGuestAliasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddHost_TaskBody struct { + Req *types.AddHost_Task `xml:"urn:vim25 AddHost_Task,omitempty"` + Res *types.AddHost_TaskResponse `xml:"urn:vim25 AddHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AddHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddHost_Task) (*types.AddHost_TaskResponse, error) { + var reqBody, resBody AddHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddInternetScsiSendTargetsBody struct { + Req *types.AddInternetScsiSendTargets `xml:"urn:vim25 AddInternetScsiSendTargets,omitempty"` + Res *types.AddInternetScsiSendTargetsResponse `xml:"urn:vim25 AddInternetScsiSendTargetsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddInternetScsiSendTargetsBody) Fault() *soap.Fault { return b.Fault_ } + +func AddInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req *types.AddInternetScsiSendTargets) (*types.AddInternetScsiSendTargetsResponse, error) { + var reqBody, resBody AddInternetScsiSendTargetsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddInternetScsiStaticTargetsBody struct { + Req *types.AddInternetScsiStaticTargets `xml:"urn:vim25 AddInternetScsiStaticTargets,omitempty"` + Res *types.AddInternetScsiStaticTargetsResponse `xml:"urn:vim25 AddInternetScsiStaticTargetsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddInternetScsiStaticTargetsBody) Fault() *soap.Fault { return b.Fault_ } + +func AddInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, req *types.AddInternetScsiStaticTargets) (*types.AddInternetScsiStaticTargetsResponse, error) { + var reqBody, resBody AddInternetScsiStaticTargetsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddKeyBody struct { + Req *types.AddKey `xml:"urn:vim25 AddKey,omitempty"` + Res *types.AddKeyResponse `xml:"urn:vim25 AddKeyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddKeyBody) Fault() *soap.Fault { return b.Fault_ } + +func AddKey(ctx context.Context, r soap.RoundTripper, req *types.AddKey) (*types.AddKeyResponse, error) { + var reqBody, resBody AddKeyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddKeysBody struct { + Req *types.AddKeys `xml:"urn:vim25 AddKeys,omitempty"` + Res *types.AddKeysResponse `xml:"urn:vim25 AddKeysResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddKeysBody) Fault() *soap.Fault { return b.Fault_ } + +func AddKeys(ctx context.Context, r soap.RoundTripper, req *types.AddKeys) (*types.AddKeysResponse, error) { + var reqBody, resBody AddKeysBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddLicenseBody struct { + Req *types.AddLicense `xml:"urn:vim25 AddLicense,omitempty"` + Res *types.AddLicenseResponse `xml:"urn:vim25 AddLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func AddLicense(ctx context.Context, r soap.RoundTripper, req *types.AddLicense) (*types.AddLicenseResponse, error) { + var reqBody, resBody AddLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddMonitoredEntitiesBody struct { + Req *types.AddMonitoredEntities `xml:"urn:vim25 AddMonitoredEntities,omitempty"` + Res *types.AddMonitoredEntitiesResponse `xml:"urn:vim25 AddMonitoredEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddMonitoredEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func AddMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types.AddMonitoredEntities) (*types.AddMonitoredEntitiesResponse, error) { + var reqBody, resBody AddMonitoredEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddNetworkResourcePoolBody struct { + Req *types.AddNetworkResourcePool `xml:"urn:vim25 AddNetworkResourcePool,omitempty"` + Res *types.AddNetworkResourcePoolResponse `xml:"urn:vim25 AddNetworkResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func AddNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.AddNetworkResourcePool) (*types.AddNetworkResourcePoolResponse, error) { + var reqBody, resBody AddNetworkResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddPortGroupBody struct { + Req *types.AddPortGroup `xml:"urn:vim25 AddPortGroup,omitempty"` + Res *types.AddPortGroupResponse `xml:"urn:vim25 AddPortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddPortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func AddPortGroup(ctx context.Context, r soap.RoundTripper, req *types.AddPortGroup) (*types.AddPortGroupResponse, error) { + var reqBody, resBody AddPortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddServiceConsoleVirtualNicBody struct { + Req *types.AddServiceConsoleVirtualNic `xml:"urn:vim25 AddServiceConsoleVirtualNic,omitempty"` + Res *types.AddServiceConsoleVirtualNicResponse `xml:"urn:vim25 AddServiceConsoleVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func AddServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddServiceConsoleVirtualNic) (*types.AddServiceConsoleVirtualNicResponse, error) { + var reqBody, resBody AddServiceConsoleVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddStandaloneHost_TaskBody struct { + Req *types.AddStandaloneHost_Task `xml:"urn:vim25 AddStandaloneHost_Task,omitempty"` + Res *types.AddStandaloneHost_TaskResponse `xml:"urn:vim25 AddStandaloneHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddStandaloneHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AddStandaloneHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddStandaloneHost_Task) (*types.AddStandaloneHost_TaskResponse, error) { + var reqBody, resBody AddStandaloneHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddVirtualNicBody struct { + Req *types.AddVirtualNic `xml:"urn:vim25 AddVirtualNic,omitempty"` + Res *types.AddVirtualNicResponse `xml:"urn:vim25 AddVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func AddVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddVirtualNic) (*types.AddVirtualNicResponse, error) { + var reqBody, resBody AddVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AddVirtualSwitchBody struct { + Req *types.AddVirtualSwitch `xml:"urn:vim25 AddVirtualSwitch,omitempty"` + Res *types.AddVirtualSwitchResponse `xml:"urn:vim25 AddVirtualSwitchResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AddVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ } + +func AddVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.AddVirtualSwitch) (*types.AddVirtualSwitchResponse, error) { + var reqBody, resBody AddVirtualSwitchBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AllocateIpv4AddressBody struct { + Req *types.AllocateIpv4Address `xml:"urn:vim25 AllocateIpv4Address,omitempty"` + Res *types.AllocateIpv4AddressResponse `xml:"urn:vim25 AllocateIpv4AddressResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AllocateIpv4AddressBody) Fault() *soap.Fault { return b.Fault_ } + +func AllocateIpv4Address(ctx context.Context, r soap.RoundTripper, req *types.AllocateIpv4Address) (*types.AllocateIpv4AddressResponse, error) { + var reqBody, resBody AllocateIpv4AddressBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AllocateIpv6AddressBody struct { + Req *types.AllocateIpv6Address `xml:"urn:vim25 AllocateIpv6Address,omitempty"` + Res *types.AllocateIpv6AddressResponse `xml:"urn:vim25 AllocateIpv6AddressResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AllocateIpv6AddressBody) Fault() *soap.Fault { return b.Fault_ } + +func AllocateIpv6Address(ctx context.Context, r soap.RoundTripper, req *types.AllocateIpv6Address) (*types.AllocateIpv6AddressResponse, error) { + var reqBody, resBody AllocateIpv6AddressBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AnswerVMBody struct { + Req *types.AnswerVM `xml:"urn:vim25 AnswerVM,omitempty"` + Res *types.AnswerVMResponse `xml:"urn:vim25 AnswerVMResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AnswerVMBody) Fault() *soap.Fault { return b.Fault_ } + +func AnswerVM(ctx context.Context, r soap.RoundTripper, req *types.AnswerVM) (*types.AnswerVMResponse, error) { + var reqBody, resBody AnswerVMBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyEntitiesConfig_TaskBody struct { + Req *types.ApplyEntitiesConfig_Task `xml:"urn:vim25 ApplyEntitiesConfig_Task,omitempty"` + Res *types.ApplyEntitiesConfig_TaskResponse `xml:"urn:vim25 ApplyEntitiesConfig_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyEntitiesConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyEntitiesConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyEntitiesConfig_Task) (*types.ApplyEntitiesConfig_TaskResponse, error) { + var reqBody, resBody ApplyEntitiesConfig_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyEvcModeVM_TaskBody struct { + Req *types.ApplyEvcModeVM_Task `xml:"urn:vim25 ApplyEvcModeVM_Task,omitempty"` + Res *types.ApplyEvcModeVM_TaskResponse `xml:"urn:vim25 ApplyEvcModeVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyEvcModeVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyEvcModeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyEvcModeVM_Task) (*types.ApplyEvcModeVM_TaskResponse, error) { + var reqBody, resBody ApplyEvcModeVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyHostConfig_TaskBody struct { + Req *types.ApplyHostConfig_Task `xml:"urn:vim25 ApplyHostConfig_Task,omitempty"` + Res *types.ApplyHostConfig_TaskResponse `xml:"urn:vim25 ApplyHostConfig_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyHostConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyHostConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyHostConfig_Task) (*types.ApplyHostConfig_TaskResponse, error) { + var reqBody, resBody ApplyHostConfig_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyRecommendationBody struct { + Req *types.ApplyRecommendation `xml:"urn:vim25 ApplyRecommendation,omitempty"` + Res *types.ApplyRecommendationResponse `xml:"urn:vim25 ApplyRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyRecommendation(ctx context.Context, r soap.RoundTripper, req *types.ApplyRecommendation) (*types.ApplyRecommendationResponse, error) { + var reqBody, resBody ApplyRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyStorageDrsRecommendationToPod_TaskBody struct { + Req *types.ApplyStorageDrsRecommendationToPod_Task `xml:"urn:vim25 ApplyStorageDrsRecommendationToPod_Task,omitempty"` + Res *types.ApplyStorageDrsRecommendationToPod_TaskResponse `xml:"urn:vim25 ApplyStorageDrsRecommendationToPod_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyStorageDrsRecommendationToPod_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyStorageDrsRecommendationToPod_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyStorageDrsRecommendationToPod_Task) (*types.ApplyStorageDrsRecommendationToPod_TaskResponse, error) { + var reqBody, resBody ApplyStorageDrsRecommendationToPod_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ApplyStorageDrsRecommendation_TaskBody struct { + Req *types.ApplyStorageDrsRecommendation_Task `xml:"urn:vim25 ApplyStorageDrsRecommendation_Task,omitempty"` + Res *types.ApplyStorageDrsRecommendation_TaskResponse `xml:"urn:vim25 ApplyStorageDrsRecommendation_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ApplyStorageDrsRecommendation_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ApplyStorageDrsRecommendation_Task(ctx context.Context, r soap.RoundTripper, req *types.ApplyStorageDrsRecommendation_Task) (*types.ApplyStorageDrsRecommendation_TaskResponse, error) { + var reqBody, resBody ApplyStorageDrsRecommendation_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AreAlarmActionsEnabledBody struct { + Req *types.AreAlarmActionsEnabled `xml:"urn:vim25 AreAlarmActionsEnabled,omitempty"` + Res *types.AreAlarmActionsEnabledResponse `xml:"urn:vim25 AreAlarmActionsEnabledResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AreAlarmActionsEnabledBody) Fault() *soap.Fault { return b.Fault_ } + +func AreAlarmActionsEnabled(ctx context.Context, r soap.RoundTripper, req *types.AreAlarmActionsEnabled) (*types.AreAlarmActionsEnabledResponse, error) { + var reqBody, resBody AreAlarmActionsEnabledBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AssignUserToGroupBody struct { + Req *types.AssignUserToGroup `xml:"urn:vim25 AssignUserToGroup,omitempty"` + Res *types.AssignUserToGroupResponse `xml:"urn:vim25 AssignUserToGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AssignUserToGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func AssignUserToGroup(ctx context.Context, r soap.RoundTripper, req *types.AssignUserToGroup) (*types.AssignUserToGroupResponse, error) { + var reqBody, resBody AssignUserToGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AssociateProfileBody struct { + Req *types.AssociateProfile `xml:"urn:vim25 AssociateProfile,omitempty"` + Res *types.AssociateProfileResponse `xml:"urn:vim25 AssociateProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AssociateProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func AssociateProfile(ctx context.Context, r soap.RoundTripper, req *types.AssociateProfile) (*types.AssociateProfileResponse, error) { + var reqBody, resBody AssociateProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AttachDisk_TaskBody struct { + Req *types.AttachDisk_Task `xml:"urn:vim25 AttachDisk_Task,omitempty"` + Res *types.AttachDisk_TaskResponse `xml:"urn:vim25 AttachDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AttachDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AttachDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.AttachDisk_Task) (*types.AttachDisk_TaskResponse, error) { + var reqBody, resBody AttachDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AttachScsiLunBody struct { + Req *types.AttachScsiLun `xml:"urn:vim25 AttachScsiLun,omitempty"` + Res *types.AttachScsiLunResponse `xml:"urn:vim25 AttachScsiLunResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AttachScsiLunBody) Fault() *soap.Fault { return b.Fault_ } + +func AttachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.AttachScsiLun) (*types.AttachScsiLunResponse, error) { + var reqBody, resBody AttachScsiLunBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AttachScsiLunEx_TaskBody struct { + Req *types.AttachScsiLunEx_Task `xml:"urn:vim25 AttachScsiLunEx_Task,omitempty"` + Res *types.AttachScsiLunEx_TaskResponse `xml:"urn:vim25 AttachScsiLunEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AttachScsiLunEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AttachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.AttachScsiLunEx_Task) (*types.AttachScsiLunEx_TaskResponse, error) { + var reqBody, resBody AttachScsiLunEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AttachTagToVStorageObjectBody struct { + Req *types.AttachTagToVStorageObject `xml:"urn:vim25 AttachTagToVStorageObject,omitempty"` + Res *types.AttachTagToVStorageObjectResponse `xml:"urn:vim25 AttachTagToVStorageObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AttachTagToVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func AttachTagToVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.AttachTagToVStorageObject) (*types.AttachTagToVStorageObjectResponse, error) { + var reqBody, resBody AttachTagToVStorageObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AttachVmfsExtentBody struct { + Req *types.AttachVmfsExtent `xml:"urn:vim25 AttachVmfsExtent,omitempty"` + Res *types.AttachVmfsExtentResponse `xml:"urn:vim25 AttachVmfsExtentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AttachVmfsExtentBody) Fault() *soap.Fault { return b.Fault_ } + +func AttachVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.AttachVmfsExtent) (*types.AttachVmfsExtentResponse, error) { + var reqBody, resBody AttachVmfsExtentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AutoStartPowerOffBody struct { + Req *types.AutoStartPowerOff `xml:"urn:vim25 AutoStartPowerOff,omitempty"` + Res *types.AutoStartPowerOffResponse `xml:"urn:vim25 AutoStartPowerOffResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AutoStartPowerOffBody) Fault() *soap.Fault { return b.Fault_ } + +func AutoStartPowerOff(ctx context.Context, r soap.RoundTripper, req *types.AutoStartPowerOff) (*types.AutoStartPowerOffResponse, error) { + var reqBody, resBody AutoStartPowerOffBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type AutoStartPowerOnBody struct { + Req *types.AutoStartPowerOn `xml:"urn:vim25 AutoStartPowerOn,omitempty"` + Res *types.AutoStartPowerOnResponse `xml:"urn:vim25 AutoStartPowerOnResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AutoStartPowerOnBody) Fault() *soap.Fault { return b.Fault_ } + +func AutoStartPowerOn(ctx context.Context, r soap.RoundTripper, req *types.AutoStartPowerOn) (*types.AutoStartPowerOnResponse, error) { + var reqBody, resBody AutoStartPowerOnBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BackupFirmwareConfigurationBody struct { + Req *types.BackupFirmwareConfiguration `xml:"urn:vim25 BackupFirmwareConfiguration,omitempty"` + Res *types.BackupFirmwareConfigurationResponse `xml:"urn:vim25 BackupFirmwareConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BackupFirmwareConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func BackupFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req *types.BackupFirmwareConfiguration) (*types.BackupFirmwareConfigurationResponse, error) { + var reqBody, resBody BackupFirmwareConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BindVnicBody struct { + Req *types.BindVnic `xml:"urn:vim25 BindVnic,omitempty"` + Res *types.BindVnicResponse `xml:"urn:vim25 BindVnicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BindVnicBody) Fault() *soap.Fault { return b.Fault_ } + +func BindVnic(ctx context.Context, r soap.RoundTripper, req *types.BindVnic) (*types.BindVnicResponse, error) { + var reqBody, resBody BindVnicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BrowseDiagnosticLogBody struct { + Req *types.BrowseDiagnosticLog `xml:"urn:vim25 BrowseDiagnosticLog,omitempty"` + Res *types.BrowseDiagnosticLogResponse `xml:"urn:vim25 BrowseDiagnosticLogResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BrowseDiagnosticLogBody) Fault() *soap.Fault { return b.Fault_ } + +func BrowseDiagnosticLog(ctx context.Context, r soap.RoundTripper, req *types.BrowseDiagnosticLog) (*types.BrowseDiagnosticLogResponse, error) { + var reqBody, resBody BrowseDiagnosticLogBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CanProvisionObjectsBody struct { + Req *types.CanProvisionObjects `xml:"urn:vim25 CanProvisionObjects,omitempty"` + Res *types.CanProvisionObjectsResponse `xml:"urn:vim25 CanProvisionObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CanProvisionObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func CanProvisionObjects(ctx context.Context, r soap.RoundTripper, req *types.CanProvisionObjects) (*types.CanProvisionObjectsResponse, error) { + var reqBody, resBody CanProvisionObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelRecommendationBody struct { + Req *types.CancelRecommendation `xml:"urn:vim25 CancelRecommendation,omitempty"` + Res *types.CancelRecommendationResponse `xml:"urn:vim25 CancelRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelRecommendation(ctx context.Context, r soap.RoundTripper, req *types.CancelRecommendation) (*types.CancelRecommendationResponse, error) { + var reqBody, resBody CancelRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelRetrievePropertiesExBody struct { + Req *types.CancelRetrievePropertiesEx `xml:"urn:vim25 CancelRetrievePropertiesEx,omitempty"` + Res *types.CancelRetrievePropertiesExResponse `xml:"urn:vim25 CancelRetrievePropertiesExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelRetrievePropertiesExBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.CancelRetrievePropertiesEx) (*types.CancelRetrievePropertiesExResponse, error) { + var reqBody, resBody CancelRetrievePropertiesExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelStorageDrsRecommendationBody struct { + Req *types.CancelStorageDrsRecommendation `xml:"urn:vim25 CancelStorageDrsRecommendation,omitempty"` + Res *types.CancelStorageDrsRecommendationResponse `xml:"urn:vim25 CancelStorageDrsRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelStorageDrsRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, req *types.CancelStorageDrsRecommendation) (*types.CancelStorageDrsRecommendationResponse, error) { + var reqBody, resBody CancelStorageDrsRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelTaskBody struct { + Req *types.CancelTask `xml:"urn:vim25 CancelTask,omitempty"` + Res *types.CancelTaskResponse `xml:"urn:vim25 CancelTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelTask(ctx context.Context, r soap.RoundTripper, req *types.CancelTask) (*types.CancelTaskResponse, error) { + var reqBody, resBody CancelTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CancelWaitForUpdatesBody struct { + Req *types.CancelWaitForUpdates `xml:"urn:vim25 CancelWaitForUpdates,omitempty"` + Res *types.CancelWaitForUpdatesResponse `xml:"urn:vim25 CancelWaitForUpdatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CancelWaitForUpdatesBody) Fault() *soap.Fault { return b.Fault_ } + +func CancelWaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.CancelWaitForUpdates) (*types.CancelWaitForUpdatesResponse, error) { + var reqBody, resBody CancelWaitForUpdatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CertMgrRefreshCACertificatesAndCRLs_TaskBody struct { + Req *types.CertMgrRefreshCACertificatesAndCRLs_Task `xml:"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_Task,omitempty"` + Res *types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse `xml:"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CertMgrRefreshCACertificatesAndCRLs_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CertMgrRefreshCACertificatesAndCRLs_Task(ctx context.Context, r soap.RoundTripper, req *types.CertMgrRefreshCACertificatesAndCRLs_Task) (*types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse, error) { + var reqBody, resBody CertMgrRefreshCACertificatesAndCRLs_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CertMgrRefreshCertificates_TaskBody struct { + Req *types.CertMgrRefreshCertificates_Task `xml:"urn:vim25 CertMgrRefreshCertificates_Task,omitempty"` + Res *types.CertMgrRefreshCertificates_TaskResponse `xml:"urn:vim25 CertMgrRefreshCertificates_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CertMgrRefreshCertificates_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CertMgrRefreshCertificates_Task(ctx context.Context, r soap.RoundTripper, req *types.CertMgrRefreshCertificates_Task) (*types.CertMgrRefreshCertificates_TaskResponse, error) { + var reqBody, resBody CertMgrRefreshCertificates_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CertMgrRevokeCertificates_TaskBody struct { + Req *types.CertMgrRevokeCertificates_Task `xml:"urn:vim25 CertMgrRevokeCertificates_Task,omitempty"` + Res *types.CertMgrRevokeCertificates_TaskResponse `xml:"urn:vim25 CertMgrRevokeCertificates_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CertMgrRevokeCertificates_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CertMgrRevokeCertificates_Task(ctx context.Context, r soap.RoundTripper, req *types.CertMgrRevokeCertificates_Task) (*types.CertMgrRevokeCertificates_TaskResponse, error) { + var reqBody, resBody CertMgrRevokeCertificates_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeAccessModeBody struct { + Req *types.ChangeAccessMode `xml:"urn:vim25 ChangeAccessMode,omitempty"` + Res *types.ChangeAccessModeResponse `xml:"urn:vim25 ChangeAccessModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeAccessModeBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeAccessMode(ctx context.Context, r soap.RoundTripper, req *types.ChangeAccessMode) (*types.ChangeAccessModeResponse, error) { + var reqBody, resBody ChangeAccessModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeFileAttributesInGuestBody struct { + Req *types.ChangeFileAttributesInGuest `xml:"urn:vim25 ChangeFileAttributesInGuest,omitempty"` + Res *types.ChangeFileAttributesInGuestResponse `xml:"urn:vim25 ChangeFileAttributesInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeFileAttributesInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeFileAttributesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ChangeFileAttributesInGuest) (*types.ChangeFileAttributesInGuestResponse, error) { + var reqBody, resBody ChangeFileAttributesInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeKey_TaskBody struct { + Req *types.ChangeKey_Task `xml:"urn:vim25 ChangeKey_Task,omitempty"` + Res *types.ChangeKey_TaskResponse `xml:"urn:vim25 ChangeKey_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeKey_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeKey_Task(ctx context.Context, r soap.RoundTripper, req *types.ChangeKey_Task) (*types.ChangeKey_TaskResponse, error) { + var reqBody, resBody ChangeKey_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeLockdownModeBody struct { + Req *types.ChangeLockdownMode `xml:"urn:vim25 ChangeLockdownMode,omitempty"` + Res *types.ChangeLockdownModeResponse `xml:"urn:vim25 ChangeLockdownModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeLockdownModeBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.ChangeLockdownMode) (*types.ChangeLockdownModeResponse, error) { + var reqBody, resBody ChangeLockdownModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeNFSUserPasswordBody struct { + Req *types.ChangeNFSUserPassword `xml:"urn:vim25 ChangeNFSUserPassword,omitempty"` + Res *types.ChangeNFSUserPasswordResponse `xml:"urn:vim25 ChangeNFSUserPasswordResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeNFSUserPasswordBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeNFSUserPassword(ctx context.Context, r soap.RoundTripper, req *types.ChangeNFSUserPassword) (*types.ChangeNFSUserPasswordResponse, error) { + var reqBody, resBody ChangeNFSUserPasswordBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ChangeOwnerBody struct { + Req *types.ChangeOwner `xml:"urn:vim25 ChangeOwner,omitempty"` + Res *types.ChangeOwnerResponse `xml:"urn:vim25 ChangeOwnerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangeOwnerBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangeOwner(ctx context.Context, r soap.RoundTripper, req *types.ChangeOwner) (*types.ChangeOwnerResponse, error) { + var reqBody, resBody ChangeOwnerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckAddHostEvc_TaskBody struct { + Req *types.CheckAddHostEvc_Task `xml:"urn:vim25 CheckAddHostEvc_Task,omitempty"` + Res *types.CheckAddHostEvc_TaskResponse `xml:"urn:vim25 CheckAddHostEvc_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckAddHostEvc_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckAddHostEvc_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckAddHostEvc_Task) (*types.CheckAddHostEvc_TaskResponse, error) { + var reqBody, resBody CheckAddHostEvc_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckAnswerFileStatus_TaskBody struct { + Req *types.CheckAnswerFileStatus_Task `xml:"urn:vim25 CheckAnswerFileStatus_Task,omitempty"` + Res *types.CheckAnswerFileStatus_TaskResponse `xml:"urn:vim25 CheckAnswerFileStatus_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckAnswerFileStatus_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckAnswerFileStatus_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckAnswerFileStatus_Task) (*types.CheckAnswerFileStatus_TaskResponse, error) { + var reqBody, resBody CheckAnswerFileStatus_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckClone_TaskBody struct { + Req *types.CheckClone_Task `xml:"urn:vim25 CheckClone_Task,omitempty"` + Res *types.CheckClone_TaskResponse `xml:"urn:vim25 CheckClone_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckClone_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckClone_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckClone_Task) (*types.CheckClone_TaskResponse, error) { + var reqBody, resBody CheckClone_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckCompatibility_TaskBody struct { + Req *types.CheckCompatibility_Task `xml:"urn:vim25 CheckCompatibility_Task,omitempty"` + Res *types.CheckCompatibility_TaskResponse `xml:"urn:vim25 CheckCompatibility_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckCompatibility_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckCompatibility_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckCompatibility_Task) (*types.CheckCompatibility_TaskResponse, error) { + var reqBody, resBody CheckCompatibility_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckCompliance_TaskBody struct { + Req *types.CheckCompliance_Task `xml:"urn:vim25 CheckCompliance_Task,omitempty"` + Res *types.CheckCompliance_TaskResponse `xml:"urn:vim25 CheckCompliance_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckCompliance_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckCompliance_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckCompliance_Task) (*types.CheckCompliance_TaskResponse, error) { + var reqBody, resBody CheckCompliance_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckConfigureEvcMode_TaskBody struct { + Req *types.CheckConfigureEvcMode_Task `xml:"urn:vim25 CheckConfigureEvcMode_Task,omitempty"` + Res *types.CheckConfigureEvcMode_TaskResponse `xml:"urn:vim25 CheckConfigureEvcMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckConfigureEvcMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckConfigureEvcMode_Task) (*types.CheckConfigureEvcMode_TaskResponse, error) { + var reqBody, resBody CheckConfigureEvcMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckCustomizationResourcesBody struct { + Req *types.CheckCustomizationResources `xml:"urn:vim25 CheckCustomizationResources,omitempty"` + Res *types.CheckCustomizationResourcesResponse `xml:"urn:vim25 CheckCustomizationResourcesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckCustomizationResourcesBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckCustomizationResources(ctx context.Context, r soap.RoundTripper, req *types.CheckCustomizationResources) (*types.CheckCustomizationResourcesResponse, error) { + var reqBody, resBody CheckCustomizationResourcesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckCustomizationSpecBody struct { + Req *types.CheckCustomizationSpec `xml:"urn:vim25 CheckCustomizationSpec,omitempty"` + Res *types.CheckCustomizationSpecResponse `xml:"urn:vim25 CheckCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.CheckCustomizationSpec) (*types.CheckCustomizationSpecResponse, error) { + var reqBody, resBody CheckCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckForUpdatesBody struct { + Req *types.CheckForUpdates `xml:"urn:vim25 CheckForUpdates,omitempty"` + Res *types.CheckForUpdatesResponse `xml:"urn:vim25 CheckForUpdatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckForUpdatesBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckForUpdates(ctx context.Context, r soap.RoundTripper, req *types.CheckForUpdates) (*types.CheckForUpdatesResponse, error) { + var reqBody, resBody CheckForUpdatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckHostPatch_TaskBody struct { + Req *types.CheckHostPatch_Task `xml:"urn:vim25 CheckHostPatch_Task,omitempty"` + Res *types.CheckHostPatch_TaskResponse `xml:"urn:vim25 CheckHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckHostPatch_Task) (*types.CheckHostPatch_TaskResponse, error) { + var reqBody, resBody CheckHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckInstantClone_TaskBody struct { + Req *types.CheckInstantClone_Task `xml:"urn:vim25 CheckInstantClone_Task,omitempty"` + Res *types.CheckInstantClone_TaskResponse `xml:"urn:vim25 CheckInstantClone_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckInstantClone_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckInstantClone_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckInstantClone_Task) (*types.CheckInstantClone_TaskResponse, error) { + var reqBody, resBody CheckInstantClone_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckLicenseFeatureBody struct { + Req *types.CheckLicenseFeature `xml:"urn:vim25 CheckLicenseFeature,omitempty"` + Res *types.CheckLicenseFeatureResponse `xml:"urn:vim25 CheckLicenseFeatureResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckLicenseFeatureBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckLicenseFeature(ctx context.Context, r soap.RoundTripper, req *types.CheckLicenseFeature) (*types.CheckLicenseFeatureResponse, error) { + var reqBody, resBody CheckLicenseFeatureBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckMigrate_TaskBody struct { + Req *types.CheckMigrate_Task `xml:"urn:vim25 CheckMigrate_Task,omitempty"` + Res *types.CheckMigrate_TaskResponse `xml:"urn:vim25 CheckMigrate_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckMigrate_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckMigrate_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckMigrate_Task) (*types.CheckMigrate_TaskResponse, error) { + var reqBody, resBody CheckMigrate_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckPowerOn_TaskBody struct { + Req *types.CheckPowerOn_Task `xml:"urn:vim25 CheckPowerOn_Task,omitempty"` + Res *types.CheckPowerOn_TaskResponse `xml:"urn:vim25 CheckPowerOn_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckPowerOn_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckPowerOn_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckPowerOn_Task) (*types.CheckPowerOn_TaskResponse, error) { + var reqBody, resBody CheckPowerOn_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckProfileCompliance_TaskBody struct { + Req *types.CheckProfileCompliance_Task `xml:"urn:vim25 CheckProfileCompliance_Task,omitempty"` + Res *types.CheckProfileCompliance_TaskResponse `xml:"urn:vim25 CheckProfileCompliance_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckProfileCompliance_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckProfileCompliance_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckProfileCompliance_Task) (*types.CheckProfileCompliance_TaskResponse, error) { + var reqBody, resBody CheckProfileCompliance_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckRelocate_TaskBody struct { + Req *types.CheckRelocate_Task `xml:"urn:vim25 CheckRelocate_Task,omitempty"` + Res *types.CheckRelocate_TaskResponse `xml:"urn:vim25 CheckRelocate_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckRelocate_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckRelocate_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckRelocate_Task) (*types.CheckRelocate_TaskResponse, error) { + var reqBody, resBody CheckRelocate_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CheckVmConfig_TaskBody struct { + Req *types.CheckVmConfig_Task `xml:"urn:vim25 CheckVmConfig_Task,omitempty"` + Res *types.CheckVmConfig_TaskResponse `xml:"urn:vim25 CheckVmConfig_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CheckVmConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CheckVmConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckVmConfig_Task) (*types.CheckVmConfig_TaskResponse, error) { + var reqBody, resBody CheckVmConfig_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ClearComplianceStatusBody struct { + Req *types.ClearComplianceStatus `xml:"urn:vim25 ClearComplianceStatus,omitempty"` + Res *types.ClearComplianceStatusResponse `xml:"urn:vim25 ClearComplianceStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ClearComplianceStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func ClearComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types.ClearComplianceStatus) (*types.ClearComplianceStatusResponse, error) { + var reqBody, resBody ClearComplianceStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ClearNFSUserBody struct { + Req *types.ClearNFSUser `xml:"urn:vim25 ClearNFSUser,omitempty"` + Res *types.ClearNFSUserResponse `xml:"urn:vim25 ClearNFSUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ClearNFSUserBody) Fault() *soap.Fault { return b.Fault_ } + +func ClearNFSUser(ctx context.Context, r soap.RoundTripper, req *types.ClearNFSUser) (*types.ClearNFSUserResponse, error) { + var reqBody, resBody ClearNFSUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ClearSystemEventLogBody struct { + Req *types.ClearSystemEventLog `xml:"urn:vim25 ClearSystemEventLog,omitempty"` + Res *types.ClearSystemEventLogResponse `xml:"urn:vim25 ClearSystemEventLogResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ClearSystemEventLogBody) Fault() *soap.Fault { return b.Fault_ } + +func ClearSystemEventLog(ctx context.Context, r soap.RoundTripper, req *types.ClearSystemEventLog) (*types.ClearSystemEventLogResponse, error) { + var reqBody, resBody ClearSystemEventLogBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ClearTriggeredAlarmsBody struct { + Req *types.ClearTriggeredAlarms `xml:"urn:vim25 ClearTriggeredAlarms,omitempty"` + Res *types.ClearTriggeredAlarmsResponse `xml:"urn:vim25 ClearTriggeredAlarmsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ClearTriggeredAlarmsBody) Fault() *soap.Fault { return b.Fault_ } + +func ClearTriggeredAlarms(ctx context.Context, r soap.RoundTripper, req *types.ClearTriggeredAlarms) (*types.ClearTriggeredAlarmsResponse, error) { + var reqBody, resBody ClearTriggeredAlarmsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ClearVStorageObjectControlFlagsBody struct { + Req *types.ClearVStorageObjectControlFlags `xml:"urn:vim25 ClearVStorageObjectControlFlags,omitempty"` + Res *types.ClearVStorageObjectControlFlagsResponse `xml:"urn:vim25 ClearVStorageObjectControlFlagsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ClearVStorageObjectControlFlagsBody) Fault() *soap.Fault { return b.Fault_ } + +func ClearVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, req *types.ClearVStorageObjectControlFlags) (*types.ClearVStorageObjectControlFlagsResponse, error) { + var reqBody, resBody ClearVStorageObjectControlFlagsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CloneSessionBody struct { + Req *types.CloneSession `xml:"urn:vim25 CloneSession,omitempty"` + Res *types.CloneSessionResponse `xml:"urn:vim25 CloneSessionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CloneSessionBody) Fault() *soap.Fault { return b.Fault_ } + +func CloneSession(ctx context.Context, r soap.RoundTripper, req *types.CloneSession) (*types.CloneSessionResponse, error) { + var reqBody, resBody CloneSessionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CloneVApp_TaskBody struct { + Req *types.CloneVApp_Task `xml:"urn:vim25 CloneVApp_Task,omitempty"` + Res *types.CloneVApp_TaskResponse `xml:"urn:vim25 CloneVApp_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CloneVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CloneVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVApp_Task) (*types.CloneVApp_TaskResponse, error) { + var reqBody, resBody CloneVApp_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CloneVM_TaskBody struct { + Req *types.CloneVM_Task `xml:"urn:vim25 CloneVM_Task,omitempty"` + Res *types.CloneVM_TaskResponse `xml:"urn:vim25 CloneVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CloneVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CloneVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVM_Task) (*types.CloneVM_TaskResponse, error) { + var reqBody, resBody CloneVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CloneVStorageObject_TaskBody struct { + Req *types.CloneVStorageObject_Task `xml:"urn:vim25 CloneVStorageObject_Task,omitempty"` + Res *types.CloneVStorageObject_TaskResponse `xml:"urn:vim25 CloneVStorageObject_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CloneVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CloneVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVStorageObject_Task) (*types.CloneVStorageObject_TaskResponse, error) { + var reqBody, resBody CloneVStorageObject_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CloseInventoryViewFolderBody struct { + Req *types.CloseInventoryViewFolder `xml:"urn:vim25 CloseInventoryViewFolder,omitempty"` + Res *types.CloseInventoryViewFolderResponse `xml:"urn:vim25 CloseInventoryViewFolderResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CloseInventoryViewFolderBody) Fault() *soap.Fault { return b.Fault_ } + +func CloseInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *types.CloseInventoryViewFolder) (*types.CloseInventoryViewFolderResponse, error) { + var reqBody, resBody CloseInventoryViewFolderBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ClusterEnterMaintenanceModeBody struct { + Req *types.ClusterEnterMaintenanceMode `xml:"urn:vim25 ClusterEnterMaintenanceMode,omitempty"` + Res *types.ClusterEnterMaintenanceModeResponse `xml:"urn:vim25 ClusterEnterMaintenanceModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ClusterEnterMaintenanceModeBody) Fault() *soap.Fault { return b.Fault_ } + +func ClusterEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req *types.ClusterEnterMaintenanceMode) (*types.ClusterEnterMaintenanceModeResponse, error) { + var reqBody, resBody ClusterEnterMaintenanceModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CompositeHostProfile_TaskBody struct { + Req *types.CompositeHostProfile_Task `xml:"urn:vim25 CompositeHostProfile_Task,omitempty"` + Res *types.CompositeHostProfile_TaskResponse `xml:"urn:vim25 CompositeHostProfile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CompositeHostProfile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CompositeHostProfile_Task(ctx context.Context, r soap.RoundTripper, req *types.CompositeHostProfile_Task) (*types.CompositeHostProfile_TaskResponse, error) { + var reqBody, resBody CompositeHostProfile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ComputeDiskPartitionInfoBody struct { + Req *types.ComputeDiskPartitionInfo `xml:"urn:vim25 ComputeDiskPartitionInfo,omitempty"` + Res *types.ComputeDiskPartitionInfoResponse `xml:"urn:vim25 ComputeDiskPartitionInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ComputeDiskPartitionInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func ComputeDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *types.ComputeDiskPartitionInfo) (*types.ComputeDiskPartitionInfoResponse, error) { + var reqBody, resBody ComputeDiskPartitionInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ComputeDiskPartitionInfoForResizeBody struct { + Req *types.ComputeDiskPartitionInfoForResize `xml:"urn:vim25 ComputeDiskPartitionInfoForResize,omitempty"` + Res *types.ComputeDiskPartitionInfoForResizeResponse `xml:"urn:vim25 ComputeDiskPartitionInfoForResizeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ComputeDiskPartitionInfoForResizeBody) Fault() *soap.Fault { return b.Fault_ } + +func ComputeDiskPartitionInfoForResize(ctx context.Context, r soap.RoundTripper, req *types.ComputeDiskPartitionInfoForResize) (*types.ComputeDiskPartitionInfoForResizeResponse, error) { + var reqBody, resBody ComputeDiskPartitionInfoForResizeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureCryptoKeyBody struct { + Req *types.ConfigureCryptoKey `xml:"urn:vim25 ConfigureCryptoKey,omitempty"` + Res *types.ConfigureCryptoKeyResponse `xml:"urn:vim25 ConfigureCryptoKeyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureCryptoKeyBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureCryptoKey(ctx context.Context, r soap.RoundTripper, req *types.ConfigureCryptoKey) (*types.ConfigureCryptoKeyResponse, error) { + var reqBody, resBody ConfigureCryptoKeyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureDatastoreIORM_TaskBody struct { + Req *types.ConfigureDatastoreIORM_Task `xml:"urn:vim25 ConfigureDatastoreIORM_Task,omitempty"` + Res *types.ConfigureDatastoreIORM_TaskResponse `xml:"urn:vim25 ConfigureDatastoreIORM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureDatastoreIORM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureDatastoreIORM_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureDatastoreIORM_Task) (*types.ConfigureDatastoreIORM_TaskResponse, error) { + var reqBody, resBody ConfigureDatastoreIORM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureDatastorePrincipalBody struct { + Req *types.ConfigureDatastorePrincipal `xml:"urn:vim25 ConfigureDatastorePrincipal,omitempty"` + Res *types.ConfigureDatastorePrincipalResponse `xml:"urn:vim25 ConfigureDatastorePrincipalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureDatastorePrincipalBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureDatastorePrincipal(ctx context.Context, r soap.RoundTripper, req *types.ConfigureDatastorePrincipal) (*types.ConfigureDatastorePrincipalResponse, error) { + var reqBody, resBody ConfigureDatastorePrincipalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureEvcMode_TaskBody struct { + Req *types.ConfigureEvcMode_Task `xml:"urn:vim25 ConfigureEvcMode_Task,omitempty"` + Res *types.ConfigureEvcMode_TaskResponse `xml:"urn:vim25 ConfigureEvcMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureEvcMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureEvcMode_Task) (*types.ConfigureEvcMode_TaskResponse, error) { + var reqBody, resBody ConfigureEvcMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureHostCache_TaskBody struct { + Req *types.ConfigureHostCache_Task `xml:"urn:vim25 ConfigureHostCache_Task,omitempty"` + Res *types.ConfigureHostCache_TaskResponse `xml:"urn:vim25 ConfigureHostCache_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureHostCache_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureHostCache_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureHostCache_Task) (*types.ConfigureHostCache_TaskResponse, error) { + var reqBody, resBody ConfigureHostCache_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureLicenseSourceBody struct { + Req *types.ConfigureLicenseSource `xml:"urn:vim25 ConfigureLicenseSource,omitempty"` + Res *types.ConfigureLicenseSourceResponse `xml:"urn:vim25 ConfigureLicenseSourceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureLicenseSourceBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureLicenseSource(ctx context.Context, r soap.RoundTripper, req *types.ConfigureLicenseSource) (*types.ConfigureLicenseSourceResponse, error) { + var reqBody, resBody ConfigureLicenseSourceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigurePowerPolicyBody struct { + Req *types.ConfigurePowerPolicy `xml:"urn:vim25 ConfigurePowerPolicy,omitempty"` + Res *types.ConfigurePowerPolicyResponse `xml:"urn:vim25 ConfigurePowerPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigurePowerPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigurePowerPolicy(ctx context.Context, r soap.RoundTripper, req *types.ConfigurePowerPolicy) (*types.ConfigurePowerPolicyResponse, error) { + var reqBody, resBody ConfigurePowerPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureStorageDrsForPod_TaskBody struct { + Req *types.ConfigureStorageDrsForPod_Task `xml:"urn:vim25 ConfigureStorageDrsForPod_Task,omitempty"` + Res *types.ConfigureStorageDrsForPod_TaskResponse `xml:"urn:vim25 ConfigureStorageDrsForPod_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureStorageDrsForPod_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureStorageDrsForPod_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureStorageDrsForPod_Task) (*types.ConfigureStorageDrsForPod_TaskResponse, error) { + var reqBody, resBody ConfigureStorageDrsForPod_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureVFlashResourceEx_TaskBody struct { + Req *types.ConfigureVFlashResourceEx_Task `xml:"urn:vim25 ConfigureVFlashResourceEx_Task,omitempty"` + Res *types.ConfigureVFlashResourceEx_TaskResponse `xml:"urn:vim25 ConfigureVFlashResourceEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureVFlashResourceEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureVFlashResourceEx_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureVFlashResourceEx_Task) (*types.ConfigureVFlashResourceEx_TaskResponse, error) { + var reqBody, resBody ConfigureVFlashResourceEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConsolidateVMDisks_TaskBody struct { + Req *types.ConsolidateVMDisks_Task `xml:"urn:vim25 ConsolidateVMDisks_Task,omitempty"` + Res *types.ConsolidateVMDisks_TaskResponse `xml:"urn:vim25 ConsolidateVMDisks_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConsolidateVMDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConsolidateVMDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.ConsolidateVMDisks_Task) (*types.ConsolidateVMDisks_TaskResponse, error) { + var reqBody, resBody ConsolidateVMDisks_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ContinueRetrievePropertiesExBody struct { + Req *types.ContinueRetrievePropertiesEx `xml:"urn:vim25 ContinueRetrievePropertiesEx,omitempty"` + Res *types.ContinueRetrievePropertiesExResponse `xml:"urn:vim25 ContinueRetrievePropertiesExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ContinueRetrievePropertiesExBody) Fault() *soap.Fault { return b.Fault_ } + +func ContinueRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.ContinueRetrievePropertiesEx) (*types.ContinueRetrievePropertiesExResponse, error) { + var reqBody, resBody ContinueRetrievePropertiesExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConvertNamespacePathToUuidPathBody struct { + Req *types.ConvertNamespacePathToUuidPath `xml:"urn:vim25 ConvertNamespacePathToUuidPath,omitempty"` + Res *types.ConvertNamespacePathToUuidPathResponse `xml:"urn:vim25 ConvertNamespacePathToUuidPathResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConvertNamespacePathToUuidPathBody) Fault() *soap.Fault { return b.Fault_ } + +func ConvertNamespacePathToUuidPath(ctx context.Context, r soap.RoundTripper, req *types.ConvertNamespacePathToUuidPath) (*types.ConvertNamespacePathToUuidPathResponse, error) { + var reqBody, resBody ConvertNamespacePathToUuidPathBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CopyDatastoreFile_TaskBody struct { + Req *types.CopyDatastoreFile_Task `xml:"urn:vim25 CopyDatastoreFile_Task,omitempty"` + Res *types.CopyDatastoreFile_TaskResponse `xml:"urn:vim25 CopyDatastoreFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CopyDatastoreFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CopyDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types.CopyDatastoreFile_Task) (*types.CopyDatastoreFile_TaskResponse, error) { + var reqBody, resBody CopyDatastoreFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CopyVirtualDisk_TaskBody struct { + Req *types.CopyVirtualDisk_Task `xml:"urn:vim25 CopyVirtualDisk_Task,omitempty"` + Res *types.CopyVirtualDisk_TaskResponse `xml:"urn:vim25 CopyVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CopyVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CopyVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.CopyVirtualDisk_Task) (*types.CopyVirtualDisk_TaskResponse, error) { + var reqBody, resBody CopyVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateAlarmBody struct { + Req *types.CreateAlarm `xml:"urn:vim25 CreateAlarm,omitempty"` + Res *types.CreateAlarmResponse `xml:"urn:vim25 CreateAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateAlarm(ctx context.Context, r soap.RoundTripper, req *types.CreateAlarm) (*types.CreateAlarmResponse, error) { + var reqBody, resBody CreateAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateChildVM_TaskBody struct { + Req *types.CreateChildVM_Task `xml:"urn:vim25 CreateChildVM_Task,omitempty"` + Res *types.CreateChildVM_TaskResponse `xml:"urn:vim25 CreateChildVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateChildVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateChildVM_Task) (*types.CreateChildVM_TaskResponse, error) { + var reqBody, resBody CreateChildVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateClusterBody struct { + Req *types.CreateCluster `xml:"urn:vim25 CreateCluster,omitempty"` + Res *types.CreateClusterResponse `xml:"urn:vim25 CreateClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateCluster(ctx context.Context, r soap.RoundTripper, req *types.CreateCluster) (*types.CreateClusterResponse, error) { + var reqBody, resBody CreateClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateClusterExBody struct { + Req *types.CreateClusterEx `xml:"urn:vim25 CreateClusterEx,omitempty"` + Res *types.CreateClusterExResponse `xml:"urn:vim25 CreateClusterExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateClusterExBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateClusterEx(ctx context.Context, r soap.RoundTripper, req *types.CreateClusterEx) (*types.CreateClusterExResponse, error) { + var reqBody, resBody CreateClusterExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateCollectorForEventsBody struct { + Req *types.CreateCollectorForEvents `xml:"urn:vim25 CreateCollectorForEvents,omitempty"` + Res *types.CreateCollectorForEventsResponse `xml:"urn:vim25 CreateCollectorForEventsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateCollectorForEventsBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateCollectorForEvents(ctx context.Context, r soap.RoundTripper, req *types.CreateCollectorForEvents) (*types.CreateCollectorForEventsResponse, error) { + var reqBody, resBody CreateCollectorForEventsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateCollectorForTasksBody struct { + Req *types.CreateCollectorForTasks `xml:"urn:vim25 CreateCollectorForTasks,omitempty"` + Res *types.CreateCollectorForTasksResponse `xml:"urn:vim25 CreateCollectorForTasksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateCollectorForTasksBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateCollectorForTasks(ctx context.Context, r soap.RoundTripper, req *types.CreateCollectorForTasks) (*types.CreateCollectorForTasksResponse, error) { + var reqBody, resBody CreateCollectorForTasksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateContainerViewBody struct { + Req *types.CreateContainerView `xml:"urn:vim25 CreateContainerView,omitempty"` + Res *types.CreateContainerViewResponse `xml:"urn:vim25 CreateContainerViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateContainerViewBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateContainerView(ctx context.Context, r soap.RoundTripper, req *types.CreateContainerView) (*types.CreateContainerViewResponse, error) { + var reqBody, resBody CreateContainerViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateCustomizationSpecBody struct { + Req *types.CreateCustomizationSpec `xml:"urn:vim25 CreateCustomizationSpec,omitempty"` + Res *types.CreateCustomizationSpecResponse `xml:"urn:vim25 CreateCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.CreateCustomizationSpec) (*types.CreateCustomizationSpecResponse, error) { + var reqBody, resBody CreateCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDVPortgroup_TaskBody struct { + Req *types.CreateDVPortgroup_Task `xml:"urn:vim25 CreateDVPortgroup_Task,omitempty"` + Res *types.CreateDVPortgroup_TaskResponse `xml:"urn:vim25 CreateDVPortgroup_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateDVPortgroup_Task) (*types.CreateDVPortgroup_TaskResponse, error) { + var reqBody, resBody CreateDVPortgroup_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDVS_TaskBody struct { + Req *types.CreateDVS_Task `xml:"urn:vim25 CreateDVS_Task,omitempty"` + Res *types.CreateDVS_TaskResponse `xml:"urn:vim25 CreateDVS_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDVS_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDVS_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateDVS_Task) (*types.CreateDVS_TaskResponse, error) { + var reqBody, resBody CreateDVS_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDatacenterBody struct { + Req *types.CreateDatacenter `xml:"urn:vim25 CreateDatacenter,omitempty"` + Res *types.CreateDatacenterResponse `xml:"urn:vim25 CreateDatacenterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDatacenterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDatacenter(ctx context.Context, r soap.RoundTripper, req *types.CreateDatacenter) (*types.CreateDatacenterResponse, error) { + var reqBody, resBody CreateDatacenterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDefaultProfileBody struct { + Req *types.CreateDefaultProfile `xml:"urn:vim25 CreateDefaultProfile,omitempty"` + Res *types.CreateDefaultProfileResponse `xml:"urn:vim25 CreateDefaultProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDefaultProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDefaultProfile(ctx context.Context, r soap.RoundTripper, req *types.CreateDefaultProfile) (*types.CreateDefaultProfileResponse, error) { + var reqBody, resBody CreateDefaultProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDescriptorBody struct { + Req *types.CreateDescriptor `xml:"urn:vim25 CreateDescriptor,omitempty"` + Res *types.CreateDescriptorResponse `xml:"urn:vim25 CreateDescriptorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDescriptorBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDescriptor(ctx context.Context, r soap.RoundTripper, req *types.CreateDescriptor) (*types.CreateDescriptorResponse, error) { + var reqBody, resBody CreateDescriptorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDiagnosticPartitionBody struct { + Req *types.CreateDiagnosticPartition `xml:"urn:vim25 CreateDiagnosticPartition,omitempty"` + Res *types.CreateDiagnosticPartitionResponse `xml:"urn:vim25 CreateDiagnosticPartitionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDiagnosticPartitionBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDiagnosticPartition(ctx context.Context, r soap.RoundTripper, req *types.CreateDiagnosticPartition) (*types.CreateDiagnosticPartitionResponse, error) { + var reqBody, resBody CreateDiagnosticPartitionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDirectoryBody struct { + Req *types.CreateDirectory `xml:"urn:vim25 CreateDirectory,omitempty"` + Res *types.CreateDirectoryResponse `xml:"urn:vim25 CreateDirectoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDirectoryBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDirectory(ctx context.Context, r soap.RoundTripper, req *types.CreateDirectory) (*types.CreateDirectoryResponse, error) { + var reqBody, resBody CreateDirectoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDiskFromSnapshot_TaskBody struct { + Req *types.CreateDiskFromSnapshot_Task `xml:"urn:vim25 CreateDiskFromSnapshot_Task,omitempty"` + Res *types.CreateDiskFromSnapshot_TaskResponse `xml:"urn:vim25 CreateDiskFromSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDiskFromSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDiskFromSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateDiskFromSnapshot_Task) (*types.CreateDiskFromSnapshot_TaskResponse, error) { + var reqBody, resBody CreateDiskFromSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateDisk_TaskBody struct { + Req *types.CreateDisk_Task `xml:"urn:vim25 CreateDisk_Task,omitempty"` + Res *types.CreateDisk_TaskResponse `xml:"urn:vim25 CreateDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateDisk_Task) (*types.CreateDisk_TaskResponse, error) { + var reqBody, resBody CreateDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateFilterBody struct { + Req *types.CreateFilter `xml:"urn:vim25 CreateFilter,omitempty"` + Res *types.CreateFilterResponse `xml:"urn:vim25 CreateFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateFilter(ctx context.Context, r soap.RoundTripper, req *types.CreateFilter) (*types.CreateFilterResponse, error) { + var reqBody, resBody CreateFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateFolderBody struct { + Req *types.CreateFolder `xml:"urn:vim25 CreateFolder,omitempty"` + Res *types.CreateFolderResponse `xml:"urn:vim25 CreateFolderResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateFolderBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateFolder(ctx context.Context, r soap.RoundTripper, req *types.CreateFolder) (*types.CreateFolderResponse, error) { + var reqBody, resBody CreateFolderBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateGroupBody struct { + Req *types.CreateGroup `xml:"urn:vim25 CreateGroup,omitempty"` + Res *types.CreateGroupResponse `xml:"urn:vim25 CreateGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateGroup(ctx context.Context, r soap.RoundTripper, req *types.CreateGroup) (*types.CreateGroupResponse, error) { + var reqBody, resBody CreateGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateImportSpecBody struct { + Req *types.CreateImportSpec `xml:"urn:vim25 CreateImportSpec,omitempty"` + Res *types.CreateImportSpecResponse `xml:"urn:vim25 CreateImportSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateImportSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateImportSpec(ctx context.Context, r soap.RoundTripper, req *types.CreateImportSpec) (*types.CreateImportSpecResponse, error) { + var reqBody, resBody CreateImportSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateInventoryViewBody struct { + Req *types.CreateInventoryView `xml:"urn:vim25 CreateInventoryView,omitempty"` + Res *types.CreateInventoryViewResponse `xml:"urn:vim25 CreateInventoryViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateInventoryViewBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateInventoryView(ctx context.Context, r soap.RoundTripper, req *types.CreateInventoryView) (*types.CreateInventoryViewResponse, error) { + var reqBody, resBody CreateInventoryViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateIpPoolBody struct { + Req *types.CreateIpPool `xml:"urn:vim25 CreateIpPool,omitempty"` + Res *types.CreateIpPoolResponse `xml:"urn:vim25 CreateIpPoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateIpPoolBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateIpPool(ctx context.Context, r soap.RoundTripper, req *types.CreateIpPool) (*types.CreateIpPoolResponse, error) { + var reqBody, resBody CreateIpPoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateListViewBody struct { + Req *types.CreateListView `xml:"urn:vim25 CreateListView,omitempty"` + Res *types.CreateListViewResponse `xml:"urn:vim25 CreateListViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateListViewBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateListView(ctx context.Context, r soap.RoundTripper, req *types.CreateListView) (*types.CreateListViewResponse, error) { + var reqBody, resBody CreateListViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateListViewFromViewBody struct { + Req *types.CreateListViewFromView `xml:"urn:vim25 CreateListViewFromView,omitempty"` + Res *types.CreateListViewFromViewResponse `xml:"urn:vim25 CreateListViewFromViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateListViewFromViewBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateListViewFromView(ctx context.Context, r soap.RoundTripper, req *types.CreateListViewFromView) (*types.CreateListViewFromViewResponse, error) { + var reqBody, resBody CreateListViewFromViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateLocalDatastoreBody struct { + Req *types.CreateLocalDatastore `xml:"urn:vim25 CreateLocalDatastore,omitempty"` + Res *types.CreateLocalDatastoreResponse `xml:"urn:vim25 CreateLocalDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateLocalDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateLocalDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateLocalDatastore) (*types.CreateLocalDatastoreResponse, error) { + var reqBody, resBody CreateLocalDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateNasDatastoreBody struct { + Req *types.CreateNasDatastore `xml:"urn:vim25 CreateNasDatastore,omitempty"` + Res *types.CreateNasDatastoreResponse `xml:"urn:vim25 CreateNasDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateNasDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateNasDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateNasDatastore) (*types.CreateNasDatastoreResponse, error) { + var reqBody, resBody CreateNasDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateNvdimmNamespace_TaskBody struct { + Req *types.CreateNvdimmNamespace_Task `xml:"urn:vim25 CreateNvdimmNamespace_Task,omitempty"` + Res *types.CreateNvdimmNamespace_TaskResponse `xml:"urn:vim25 CreateNvdimmNamespace_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateNvdimmNamespace_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateNvdimmNamespace_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateNvdimmNamespace_Task) (*types.CreateNvdimmNamespace_TaskResponse, error) { + var reqBody, resBody CreateNvdimmNamespace_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateObjectScheduledTaskBody struct { + Req *types.CreateObjectScheduledTask `xml:"urn:vim25 CreateObjectScheduledTask,omitempty"` + Res *types.CreateObjectScheduledTaskResponse `xml:"urn:vim25 CreateObjectScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateObjectScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.CreateObjectScheduledTask) (*types.CreateObjectScheduledTaskResponse, error) { + var reqBody, resBody CreateObjectScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreatePerfIntervalBody struct { + Req *types.CreatePerfInterval `xml:"urn:vim25 CreatePerfInterval,omitempty"` + Res *types.CreatePerfIntervalResponse `xml:"urn:vim25 CreatePerfIntervalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreatePerfIntervalBody) Fault() *soap.Fault { return b.Fault_ } + +func CreatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.CreatePerfInterval) (*types.CreatePerfIntervalResponse, error) { + var reqBody, resBody CreatePerfIntervalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateProfileBody struct { + Req *types.CreateProfile `xml:"urn:vim25 CreateProfile,omitempty"` + Res *types.CreateProfileResponse `xml:"urn:vim25 CreateProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateProfile(ctx context.Context, r soap.RoundTripper, req *types.CreateProfile) (*types.CreateProfileResponse, error) { + var reqBody, resBody CreateProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreatePropertyCollectorBody struct { + Req *types.CreatePropertyCollector `xml:"urn:vim25 CreatePropertyCollector,omitempty"` + Res *types.CreatePropertyCollectorResponse `xml:"urn:vim25 CreatePropertyCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreatePropertyCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func CreatePropertyCollector(ctx context.Context, r soap.RoundTripper, req *types.CreatePropertyCollector) (*types.CreatePropertyCollectorResponse, error) { + var reqBody, resBody CreatePropertyCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateRegistryKeyInGuestBody struct { + Req *types.CreateRegistryKeyInGuest `xml:"urn:vim25 CreateRegistryKeyInGuest,omitempty"` + Res *types.CreateRegistryKeyInGuestResponse `xml:"urn:vim25 CreateRegistryKeyInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateRegistryKeyInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *types.CreateRegistryKeyInGuest) (*types.CreateRegistryKeyInGuestResponse, error) { + var reqBody, resBody CreateRegistryKeyInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateResourcePoolBody struct { + Req *types.CreateResourcePool `xml:"urn:vim25 CreateResourcePool,omitempty"` + Res *types.CreateResourcePoolResponse `xml:"urn:vim25 CreateResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateResourcePool(ctx context.Context, r soap.RoundTripper, req *types.CreateResourcePool) (*types.CreateResourcePoolResponse, error) { + var reqBody, resBody CreateResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateScheduledTaskBody struct { + Req *types.CreateScheduledTask `xml:"urn:vim25 CreateScheduledTask,omitempty"` + Res *types.CreateScheduledTaskResponse `xml:"urn:vim25 CreateScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.CreateScheduledTask) (*types.CreateScheduledTaskResponse, error) { + var reqBody, resBody CreateScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateScreenshot_TaskBody struct { + Req *types.CreateScreenshot_Task `xml:"urn:vim25 CreateScreenshot_Task,omitempty"` + Res *types.CreateScreenshot_TaskResponse `xml:"urn:vim25 CreateScreenshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateScreenshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateScreenshot_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateScreenshot_Task) (*types.CreateScreenshot_TaskResponse, error) { + var reqBody, resBody CreateScreenshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateSecondaryVMEx_TaskBody struct { + Req *types.CreateSecondaryVMEx_Task `xml:"urn:vim25 CreateSecondaryVMEx_Task,omitempty"` + Res *types.CreateSecondaryVMEx_TaskResponse `xml:"urn:vim25 CreateSecondaryVMEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateSecondaryVMEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateSecondaryVMEx_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSecondaryVMEx_Task) (*types.CreateSecondaryVMEx_TaskResponse, error) { + var reqBody, resBody CreateSecondaryVMEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateSecondaryVM_TaskBody struct { + Req *types.CreateSecondaryVM_Task `xml:"urn:vim25 CreateSecondaryVM_Task,omitempty"` + Res *types.CreateSecondaryVM_TaskResponse `xml:"urn:vim25 CreateSecondaryVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateSecondaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSecondaryVM_Task) (*types.CreateSecondaryVM_TaskResponse, error) { + var reqBody, resBody CreateSecondaryVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateSnapshotEx_TaskBody struct { + Req *types.CreateSnapshotEx_Task `xml:"urn:vim25 CreateSnapshotEx_Task,omitempty"` + Res *types.CreateSnapshotEx_TaskResponse `xml:"urn:vim25 CreateSnapshotEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateSnapshotEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateSnapshotEx_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSnapshotEx_Task) (*types.CreateSnapshotEx_TaskResponse, error) { + var reqBody, resBody CreateSnapshotEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateSnapshot_TaskBody struct { + Req *types.CreateSnapshot_Task `xml:"urn:vim25 CreateSnapshot_Task,omitempty"` + Res *types.CreateSnapshot_TaskResponse `xml:"urn:vim25 CreateSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateSnapshot_Task) (*types.CreateSnapshot_TaskResponse, error) { + var reqBody, resBody CreateSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateStoragePodBody struct { + Req *types.CreateStoragePod `xml:"urn:vim25 CreateStoragePod,omitempty"` + Res *types.CreateStoragePodResponse `xml:"urn:vim25 CreateStoragePodResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateStoragePodBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateStoragePod(ctx context.Context, r soap.RoundTripper, req *types.CreateStoragePod) (*types.CreateStoragePodResponse, error) { + var reqBody, resBody CreateStoragePodBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateTaskBody struct { + Req *types.CreateTask `xml:"urn:vim25 CreateTask,omitempty"` + Res *types.CreateTaskResponse `xml:"urn:vim25 CreateTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateTask(ctx context.Context, r soap.RoundTripper, req *types.CreateTask) (*types.CreateTaskResponse, error) { + var reqBody, resBody CreateTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateTemporaryDirectoryInGuestBody struct { + Req *types.CreateTemporaryDirectoryInGuest `xml:"urn:vim25 CreateTemporaryDirectoryInGuest,omitempty"` + Res *types.CreateTemporaryDirectoryInGuestResponse `xml:"urn:vim25 CreateTemporaryDirectoryInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateTemporaryDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateTemporaryDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.CreateTemporaryDirectoryInGuest) (*types.CreateTemporaryDirectoryInGuestResponse, error) { + var reqBody, resBody CreateTemporaryDirectoryInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateTemporaryFileInGuestBody struct { + Req *types.CreateTemporaryFileInGuest `xml:"urn:vim25 CreateTemporaryFileInGuest,omitempty"` + Res *types.CreateTemporaryFileInGuestResponse `xml:"urn:vim25 CreateTemporaryFileInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateTemporaryFileInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateTemporaryFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.CreateTemporaryFileInGuest) (*types.CreateTemporaryFileInGuestResponse, error) { + var reqBody, resBody CreateTemporaryFileInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateUserBody struct { + Req *types.CreateUser `xml:"urn:vim25 CreateUser,omitempty"` + Res *types.CreateUserResponse `xml:"urn:vim25 CreateUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateUserBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateUser(ctx context.Context, r soap.RoundTripper, req *types.CreateUser) (*types.CreateUserResponse, error) { + var reqBody, resBody CreateUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVAppBody struct { + Req *types.CreateVApp `xml:"urn:vim25 CreateVApp,omitempty"` + Res *types.CreateVAppResponse `xml:"urn:vim25 CreateVAppResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVAppBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVApp(ctx context.Context, r soap.RoundTripper, req *types.CreateVApp) (*types.CreateVAppResponse, error) { + var reqBody, resBody CreateVAppBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVM_TaskBody struct { + Req *types.CreateVM_Task `xml:"urn:vim25 CreateVM_Task,omitempty"` + Res *types.CreateVM_TaskResponse `xml:"urn:vim25 CreateVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateVM_Task) (*types.CreateVM_TaskResponse, error) { + var reqBody, resBody CreateVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVirtualDisk_TaskBody struct { + Req *types.CreateVirtualDisk_Task `xml:"urn:vim25 CreateVirtualDisk_Task,omitempty"` + Res *types.CreateVirtualDisk_TaskResponse `xml:"urn:vim25 CreateVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateVirtualDisk_Task) (*types.CreateVirtualDisk_TaskResponse, error) { + var reqBody, resBody CreateVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVmfsDatastoreBody struct { + Req *types.CreateVmfsDatastore `xml:"urn:vim25 CreateVmfsDatastore,omitempty"` + Res *types.CreateVmfsDatastoreResponse `xml:"urn:vim25 CreateVmfsDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVmfsDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateVmfsDatastore) (*types.CreateVmfsDatastoreResponse, error) { + var reqBody, resBody CreateVmfsDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateVvolDatastoreBody struct { + Req *types.CreateVvolDatastore `xml:"urn:vim25 CreateVvolDatastore,omitempty"` + Res *types.CreateVvolDatastoreResponse `xml:"urn:vim25 CreateVvolDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateVvolDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateVvolDatastore(ctx context.Context, r soap.RoundTripper, req *types.CreateVvolDatastore) (*types.CreateVvolDatastoreResponse, error) { + var reqBody, resBody CreateVvolDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CryptoManagerHostEnableBody struct { + Req *types.CryptoManagerHostEnable `xml:"urn:vim25 CryptoManagerHostEnable,omitempty"` + Res *types.CryptoManagerHostEnableResponse `xml:"urn:vim25 CryptoManagerHostEnableResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CryptoManagerHostEnableBody) Fault() *soap.Fault { return b.Fault_ } + +func CryptoManagerHostEnable(ctx context.Context, r soap.RoundTripper, req *types.CryptoManagerHostEnable) (*types.CryptoManagerHostEnableResponse, error) { + var reqBody, resBody CryptoManagerHostEnableBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CryptoManagerHostPrepareBody struct { + Req *types.CryptoManagerHostPrepare `xml:"urn:vim25 CryptoManagerHostPrepare,omitempty"` + Res *types.CryptoManagerHostPrepareResponse `xml:"urn:vim25 CryptoManagerHostPrepareResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CryptoManagerHostPrepareBody) Fault() *soap.Fault { return b.Fault_ } + +func CryptoManagerHostPrepare(ctx context.Context, r soap.RoundTripper, req *types.CryptoManagerHostPrepare) (*types.CryptoManagerHostPrepareResponse, error) { + var reqBody, resBody CryptoManagerHostPrepareBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CryptoUnlock_TaskBody struct { + Req *types.CryptoUnlock_Task `xml:"urn:vim25 CryptoUnlock_Task,omitempty"` + Res *types.CryptoUnlock_TaskResponse `xml:"urn:vim25 CryptoUnlock_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CryptoUnlock_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CryptoUnlock_Task(ctx context.Context, r soap.RoundTripper, req *types.CryptoUnlock_Task) (*types.CryptoUnlock_TaskResponse, error) { + var reqBody, resBody CryptoUnlock_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CurrentTimeBody struct { + Req *types.CurrentTime `xml:"urn:vim25 CurrentTime,omitempty"` + Res *types.CurrentTimeResponse `xml:"urn:vim25 CurrentTimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CurrentTimeBody) Fault() *soap.Fault { return b.Fault_ } + +func CurrentTime(ctx context.Context, r soap.RoundTripper, req *types.CurrentTime) (*types.CurrentTimeResponse, error) { + var reqBody, resBody CurrentTimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CustomizationSpecItemToXmlBody struct { + Req *types.CustomizationSpecItemToXml `xml:"urn:vim25 CustomizationSpecItemToXml,omitempty"` + Res *types.CustomizationSpecItemToXmlResponse `xml:"urn:vim25 CustomizationSpecItemToXmlResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CustomizationSpecItemToXmlBody) Fault() *soap.Fault { return b.Fault_ } + +func CustomizationSpecItemToXml(ctx context.Context, r soap.RoundTripper, req *types.CustomizationSpecItemToXml) (*types.CustomizationSpecItemToXmlResponse, error) { + var reqBody, resBody CustomizationSpecItemToXmlBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CustomizeVM_TaskBody struct { + Req *types.CustomizeVM_Task `xml:"urn:vim25 CustomizeVM_Task,omitempty"` + Res *types.CustomizeVM_TaskResponse `xml:"urn:vim25 CustomizeVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CustomizeVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CustomizeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CustomizeVM_Task) (*types.CustomizeVM_TaskResponse, error) { + var reqBody, resBody CustomizeVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVPortgroupRollback_TaskBody struct { + Req *types.DVPortgroupRollback_Task `xml:"urn:vim25 DVPortgroupRollback_Task,omitempty"` + Res *types.DVPortgroupRollback_TaskResponse `xml:"urn:vim25 DVPortgroupRollback_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVPortgroupRollback_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DVPortgroupRollback_Task(ctx context.Context, r soap.RoundTripper, req *types.DVPortgroupRollback_Task) (*types.DVPortgroupRollback_TaskResponse, error) { + var reqBody, resBody DVPortgroupRollback_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVSManagerExportEntity_TaskBody struct { + Req *types.DVSManagerExportEntity_Task `xml:"urn:vim25 DVSManagerExportEntity_Task,omitempty"` + Res *types.DVSManagerExportEntity_TaskResponse `xml:"urn:vim25 DVSManagerExportEntity_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVSManagerExportEntity_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DVSManagerExportEntity_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSManagerExportEntity_Task) (*types.DVSManagerExportEntity_TaskResponse, error) { + var reqBody, resBody DVSManagerExportEntity_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVSManagerImportEntity_TaskBody struct { + Req *types.DVSManagerImportEntity_Task `xml:"urn:vim25 DVSManagerImportEntity_Task,omitempty"` + Res *types.DVSManagerImportEntity_TaskResponse `xml:"urn:vim25 DVSManagerImportEntity_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVSManagerImportEntity_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DVSManagerImportEntity_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSManagerImportEntity_Task) (*types.DVSManagerImportEntity_TaskResponse, error) { + var reqBody, resBody DVSManagerImportEntity_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVSManagerLookupDvPortGroupBody struct { + Req *types.DVSManagerLookupDvPortGroup `xml:"urn:vim25 DVSManagerLookupDvPortGroup,omitempty"` + Res *types.DVSManagerLookupDvPortGroupResponse `xml:"urn:vim25 DVSManagerLookupDvPortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVSManagerLookupDvPortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func DVSManagerLookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req *types.DVSManagerLookupDvPortGroup) (*types.DVSManagerLookupDvPortGroupResponse, error) { + var reqBody, resBody DVSManagerLookupDvPortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DVSRollback_TaskBody struct { + Req *types.DVSRollback_Task `xml:"urn:vim25 DVSRollback_Task,omitempty"` + Res *types.DVSRollback_TaskResponse `xml:"urn:vim25 DVSRollback_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DVSRollback_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DVSRollback_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSRollback_Task) (*types.DVSRollback_TaskResponse, error) { + var reqBody, resBody DVSRollback_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DatastoreEnterMaintenanceModeBody struct { + Req *types.DatastoreEnterMaintenanceMode `xml:"urn:vim25 DatastoreEnterMaintenanceMode,omitempty"` + Res *types.DatastoreEnterMaintenanceModeResponse `xml:"urn:vim25 DatastoreEnterMaintenanceModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DatastoreEnterMaintenanceModeBody) Fault() *soap.Fault { return b.Fault_ } + +func DatastoreEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req *types.DatastoreEnterMaintenanceMode) (*types.DatastoreEnterMaintenanceModeResponse, error) { + var reqBody, resBody DatastoreEnterMaintenanceModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DatastoreExitMaintenanceMode_TaskBody struct { + Req *types.DatastoreExitMaintenanceMode_Task `xml:"urn:vim25 DatastoreExitMaintenanceMode_Task,omitempty"` + Res *types.DatastoreExitMaintenanceMode_TaskResponse `xml:"urn:vim25 DatastoreExitMaintenanceMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DatastoreExitMaintenanceMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DatastoreExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *types.DatastoreExitMaintenanceMode_Task) (*types.DatastoreExitMaintenanceMode_TaskResponse, error) { + var reqBody, resBody DatastoreExitMaintenanceMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DecodeLicenseBody struct { + Req *types.DecodeLicense `xml:"urn:vim25 DecodeLicense,omitempty"` + Res *types.DecodeLicenseResponse `xml:"urn:vim25 DecodeLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DecodeLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func DecodeLicense(ctx context.Context, r soap.RoundTripper, req *types.DecodeLicense) (*types.DecodeLicenseResponse, error) { + var reqBody, resBody DecodeLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DefragmentAllDisksBody struct { + Req *types.DefragmentAllDisks `xml:"urn:vim25 DefragmentAllDisks,omitempty"` + Res *types.DefragmentAllDisksResponse `xml:"urn:vim25 DefragmentAllDisksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DefragmentAllDisksBody) Fault() *soap.Fault { return b.Fault_ } + +func DefragmentAllDisks(ctx context.Context, r soap.RoundTripper, req *types.DefragmentAllDisks) (*types.DefragmentAllDisksResponse, error) { + var reqBody, resBody DefragmentAllDisksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DefragmentVirtualDisk_TaskBody struct { + Req *types.DefragmentVirtualDisk_Task `xml:"urn:vim25 DefragmentVirtualDisk_Task,omitempty"` + Res *types.DefragmentVirtualDisk_TaskResponse `xml:"urn:vim25 DefragmentVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DefragmentVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DefragmentVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.DefragmentVirtualDisk_Task) (*types.DefragmentVirtualDisk_TaskResponse, error) { + var reqBody, resBody DefragmentVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteCustomizationSpecBody struct { + Req *types.DeleteCustomizationSpec `xml:"urn:vim25 DeleteCustomizationSpec,omitempty"` + Res *types.DeleteCustomizationSpecResponse `xml:"urn:vim25 DeleteCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.DeleteCustomizationSpec) (*types.DeleteCustomizationSpecResponse, error) { + var reqBody, resBody DeleteCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteDatastoreFile_TaskBody struct { + Req *types.DeleteDatastoreFile_Task `xml:"urn:vim25 DeleteDatastoreFile_Task,omitempty"` + Res *types.DeleteDatastoreFile_TaskResponse `xml:"urn:vim25 DeleteDatastoreFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteDatastoreFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteDatastoreFile_Task) (*types.DeleteDatastoreFile_TaskResponse, error) { + var reqBody, resBody DeleteDatastoreFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteDirectoryBody struct { + Req *types.DeleteDirectory `xml:"urn:vim25 DeleteDirectory,omitempty"` + Res *types.DeleteDirectoryResponse `xml:"urn:vim25 DeleteDirectoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteDirectoryBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteDirectory(ctx context.Context, r soap.RoundTripper, req *types.DeleteDirectory) (*types.DeleteDirectoryResponse, error) { + var reqBody, resBody DeleteDirectoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteDirectoryInGuestBody struct { + Req *types.DeleteDirectoryInGuest `xml:"urn:vim25 DeleteDirectoryInGuest,omitempty"` + Res *types.DeleteDirectoryInGuestResponse `xml:"urn:vim25 DeleteDirectoryInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteDirectoryInGuest) (*types.DeleteDirectoryInGuestResponse, error) { + var reqBody, resBody DeleteDirectoryInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteFileBody struct { + Req *types.DeleteFile `xml:"urn:vim25 DeleteFile,omitempty"` + Res *types.DeleteFileResponse `xml:"urn:vim25 DeleteFileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteFileBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteFile(ctx context.Context, r soap.RoundTripper, req *types.DeleteFile) (*types.DeleteFileResponse, error) { + var reqBody, resBody DeleteFileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteFileInGuestBody struct { + Req *types.DeleteFileInGuest `xml:"urn:vim25 DeleteFileInGuest,omitempty"` + Res *types.DeleteFileInGuestResponse `xml:"urn:vim25 DeleteFileInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteFileInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteFileInGuest) (*types.DeleteFileInGuestResponse, error) { + var reqBody, resBody DeleteFileInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteHostSpecificationBody struct { + Req *types.DeleteHostSpecification `xml:"urn:vim25 DeleteHostSpecification,omitempty"` + Res *types.DeleteHostSpecificationResponse `xml:"urn:vim25 DeleteHostSpecificationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteHostSpecificationBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteHostSpecification(ctx context.Context, r soap.RoundTripper, req *types.DeleteHostSpecification) (*types.DeleteHostSpecificationResponse, error) { + var reqBody, resBody DeleteHostSpecificationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteHostSubSpecificationBody struct { + Req *types.DeleteHostSubSpecification `xml:"urn:vim25 DeleteHostSubSpecification,omitempty"` + Res *types.DeleteHostSubSpecificationResponse `xml:"urn:vim25 DeleteHostSubSpecificationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteHostSubSpecificationBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteHostSubSpecification(ctx context.Context, r soap.RoundTripper, req *types.DeleteHostSubSpecification) (*types.DeleteHostSubSpecificationResponse, error) { + var reqBody, resBody DeleteHostSubSpecificationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteNvdimmBlockNamespaces_TaskBody struct { + Req *types.DeleteNvdimmBlockNamespaces_Task `xml:"urn:vim25 DeleteNvdimmBlockNamespaces_Task,omitempty"` + Res *types.DeleteNvdimmBlockNamespaces_TaskResponse `xml:"urn:vim25 DeleteNvdimmBlockNamespaces_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteNvdimmBlockNamespaces_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteNvdimmBlockNamespaces_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteNvdimmBlockNamespaces_Task) (*types.DeleteNvdimmBlockNamespaces_TaskResponse, error) { + var reqBody, resBody DeleteNvdimmBlockNamespaces_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteNvdimmNamespace_TaskBody struct { + Req *types.DeleteNvdimmNamespace_Task `xml:"urn:vim25 DeleteNvdimmNamespace_Task,omitempty"` + Res *types.DeleteNvdimmNamespace_TaskResponse `xml:"urn:vim25 DeleteNvdimmNamespace_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteNvdimmNamespace_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteNvdimmNamespace_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteNvdimmNamespace_Task) (*types.DeleteNvdimmNamespace_TaskResponse, error) { + var reqBody, resBody DeleteNvdimmNamespace_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteRegistryKeyInGuestBody struct { + Req *types.DeleteRegistryKeyInGuest `xml:"urn:vim25 DeleteRegistryKeyInGuest,omitempty"` + Res *types.DeleteRegistryKeyInGuestResponse `xml:"urn:vim25 DeleteRegistryKeyInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteRegistryKeyInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteRegistryKeyInGuest) (*types.DeleteRegistryKeyInGuestResponse, error) { + var reqBody, resBody DeleteRegistryKeyInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteRegistryValueInGuestBody struct { + Req *types.DeleteRegistryValueInGuest `xml:"urn:vim25 DeleteRegistryValueInGuest,omitempty"` + Res *types.DeleteRegistryValueInGuestResponse `xml:"urn:vim25 DeleteRegistryValueInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteRegistryValueInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *types.DeleteRegistryValueInGuest) (*types.DeleteRegistryValueInGuestResponse, error) { + var reqBody, resBody DeleteRegistryValueInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteScsiLunStateBody struct { + Req *types.DeleteScsiLunState `xml:"urn:vim25 DeleteScsiLunState,omitempty"` + Res *types.DeleteScsiLunStateResponse `xml:"urn:vim25 DeleteScsiLunStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteScsiLunStateBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteScsiLunState(ctx context.Context, r soap.RoundTripper, req *types.DeleteScsiLunState) (*types.DeleteScsiLunStateResponse, error) { + var reqBody, resBody DeleteScsiLunStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteSnapshot_TaskBody struct { + Req *types.DeleteSnapshot_Task `xml:"urn:vim25 DeleteSnapshot_Task,omitempty"` + Res *types.DeleteSnapshot_TaskResponse `xml:"urn:vim25 DeleteSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteSnapshot_Task) (*types.DeleteSnapshot_TaskResponse, error) { + var reqBody, resBody DeleteSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVStorageObject_TaskBody struct { + Req *types.DeleteVStorageObject_Task `xml:"urn:vim25 DeleteVStorageObject_Task,omitempty"` + Res *types.DeleteVStorageObject_TaskResponse `xml:"urn:vim25 DeleteVStorageObject_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteVStorageObject_Task) (*types.DeleteVStorageObject_TaskResponse, error) { + var reqBody, resBody DeleteVStorageObject_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVffsVolumeStateBody struct { + Req *types.DeleteVffsVolumeState `xml:"urn:vim25 DeleteVffsVolumeState,omitempty"` + Res *types.DeleteVffsVolumeStateResponse `xml:"urn:vim25 DeleteVffsVolumeStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVffsVolumeStateBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVffsVolumeState(ctx context.Context, r soap.RoundTripper, req *types.DeleteVffsVolumeState) (*types.DeleteVffsVolumeStateResponse, error) { + var reqBody, resBody DeleteVffsVolumeStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVirtualDisk_TaskBody struct { + Req *types.DeleteVirtualDisk_Task `xml:"urn:vim25 DeleteVirtualDisk_Task,omitempty"` + Res *types.DeleteVirtualDisk_TaskResponse `xml:"urn:vim25 DeleteVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteVirtualDisk_Task) (*types.DeleteVirtualDisk_TaskResponse, error) { + var reqBody, resBody DeleteVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVmfsVolumeStateBody struct { + Req *types.DeleteVmfsVolumeState `xml:"urn:vim25 DeleteVmfsVolumeState,omitempty"` + Res *types.DeleteVmfsVolumeStateResponse `xml:"urn:vim25 DeleteVmfsVolumeStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVmfsVolumeStateBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVmfsVolumeState(ctx context.Context, r soap.RoundTripper, req *types.DeleteVmfsVolumeState) (*types.DeleteVmfsVolumeStateResponse, error) { + var reqBody, resBody DeleteVmfsVolumeStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeleteVsanObjectsBody struct { + Req *types.DeleteVsanObjects `xml:"urn:vim25 DeleteVsanObjects,omitempty"` + Res *types.DeleteVsanObjectsResponse `xml:"urn:vim25 DeleteVsanObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.DeleteVsanObjects) (*types.DeleteVsanObjectsResponse, error) { + var reqBody, resBody DeleteVsanObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeselectVnicBody struct { + Req *types.DeselectVnic `xml:"urn:vim25 DeselectVnic,omitempty"` + Res *types.DeselectVnicResponse `xml:"urn:vim25 DeselectVnicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeselectVnicBody) Fault() *soap.Fault { return b.Fault_ } + +func DeselectVnic(ctx context.Context, r soap.RoundTripper, req *types.DeselectVnic) (*types.DeselectVnicResponse, error) { + var reqBody, resBody DeselectVnicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeselectVnicForNicTypeBody struct { + Req *types.DeselectVnicForNicType `xml:"urn:vim25 DeselectVnicForNicType,omitempty"` + Res *types.DeselectVnicForNicTypeResponse `xml:"urn:vim25 DeselectVnicForNicTypeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeselectVnicForNicTypeBody) Fault() *soap.Fault { return b.Fault_ } + +func DeselectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types.DeselectVnicForNicType) (*types.DeselectVnicForNicTypeResponse, error) { + var reqBody, resBody DeselectVnicForNicTypeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyChildrenBody struct { + Req *types.DestroyChildren `xml:"urn:vim25 DestroyChildren,omitempty"` + Res *types.DestroyChildrenResponse `xml:"urn:vim25 DestroyChildrenResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyChildrenBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyChildren(ctx context.Context, r soap.RoundTripper, req *types.DestroyChildren) (*types.DestroyChildrenResponse, error) { + var reqBody, resBody DestroyChildrenBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyCollectorBody struct { + Req *types.DestroyCollector `xml:"urn:vim25 DestroyCollector,omitempty"` + Res *types.DestroyCollectorResponse `xml:"urn:vim25 DestroyCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyCollector(ctx context.Context, r soap.RoundTripper, req *types.DestroyCollector) (*types.DestroyCollectorResponse, error) { + var reqBody, resBody DestroyCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyDatastoreBody struct { + Req *types.DestroyDatastore `xml:"urn:vim25 DestroyDatastore,omitempty"` + Res *types.DestroyDatastoreResponse `xml:"urn:vim25 DestroyDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyDatastore(ctx context.Context, r soap.RoundTripper, req *types.DestroyDatastore) (*types.DestroyDatastoreResponse, error) { + var reqBody, resBody DestroyDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyIpPoolBody struct { + Req *types.DestroyIpPool `xml:"urn:vim25 DestroyIpPool,omitempty"` + Res *types.DestroyIpPoolResponse `xml:"urn:vim25 DestroyIpPoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyIpPoolBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyIpPool(ctx context.Context, r soap.RoundTripper, req *types.DestroyIpPool) (*types.DestroyIpPoolResponse, error) { + var reqBody, resBody DestroyIpPoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyNetworkBody struct { + Req *types.DestroyNetwork `xml:"urn:vim25 DestroyNetwork,omitempty"` + Res *types.DestroyNetworkResponse `xml:"urn:vim25 DestroyNetworkResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyNetworkBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyNetwork(ctx context.Context, r soap.RoundTripper, req *types.DestroyNetwork) (*types.DestroyNetworkResponse, error) { + var reqBody, resBody DestroyNetworkBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyProfileBody struct { + Req *types.DestroyProfile `xml:"urn:vim25 DestroyProfile,omitempty"` + Res *types.DestroyProfileResponse `xml:"urn:vim25 DestroyProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyProfile(ctx context.Context, r soap.RoundTripper, req *types.DestroyProfile) (*types.DestroyProfileResponse, error) { + var reqBody, resBody DestroyProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyPropertyCollectorBody struct { + Req *types.DestroyPropertyCollector `xml:"urn:vim25 DestroyPropertyCollector,omitempty"` + Res *types.DestroyPropertyCollectorResponse `xml:"urn:vim25 DestroyPropertyCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyPropertyCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyPropertyCollector(ctx context.Context, r soap.RoundTripper, req *types.DestroyPropertyCollector) (*types.DestroyPropertyCollectorResponse, error) { + var reqBody, resBody DestroyPropertyCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyPropertyFilterBody struct { + Req *types.DestroyPropertyFilter `xml:"urn:vim25 DestroyPropertyFilter,omitempty"` + Res *types.DestroyPropertyFilterResponse `xml:"urn:vim25 DestroyPropertyFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyPropertyFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyPropertyFilter(ctx context.Context, r soap.RoundTripper, req *types.DestroyPropertyFilter) (*types.DestroyPropertyFilterResponse, error) { + var reqBody, resBody DestroyPropertyFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyVffsBody struct { + Req *types.DestroyVffs `xml:"urn:vim25 DestroyVffs,omitempty"` + Res *types.DestroyVffsResponse `xml:"urn:vim25 DestroyVffsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyVffsBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyVffs(ctx context.Context, r soap.RoundTripper, req *types.DestroyVffs) (*types.DestroyVffsResponse, error) { + var reqBody, resBody DestroyVffsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyViewBody struct { + Req *types.DestroyView `xml:"urn:vim25 DestroyView,omitempty"` + Res *types.DestroyViewResponse `xml:"urn:vim25 DestroyViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyViewBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyView(ctx context.Context, r soap.RoundTripper, req *types.DestroyView) (*types.DestroyViewResponse, error) { + var reqBody, resBody DestroyViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type Destroy_TaskBody struct { + Req *types.Destroy_Task `xml:"urn:vim25 Destroy_Task,omitempty"` + Res *types.Destroy_TaskResponse `xml:"urn:vim25 Destroy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *Destroy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func Destroy_Task(ctx context.Context, r soap.RoundTripper, req *types.Destroy_Task) (*types.Destroy_TaskResponse, error) { + var reqBody, resBody Destroy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DetachDisk_TaskBody struct { + Req *types.DetachDisk_Task `xml:"urn:vim25 DetachDisk_Task,omitempty"` + Res *types.DetachDisk_TaskResponse `xml:"urn:vim25 DetachDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DetachDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DetachDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.DetachDisk_Task) (*types.DetachDisk_TaskResponse, error) { + var reqBody, resBody DetachDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DetachScsiLunBody struct { + Req *types.DetachScsiLun `xml:"urn:vim25 DetachScsiLun,omitempty"` + Res *types.DetachScsiLunResponse `xml:"urn:vim25 DetachScsiLunResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DetachScsiLunBody) Fault() *soap.Fault { return b.Fault_ } + +func DetachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.DetachScsiLun) (*types.DetachScsiLunResponse, error) { + var reqBody, resBody DetachScsiLunBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DetachScsiLunEx_TaskBody struct { + Req *types.DetachScsiLunEx_Task `xml:"urn:vim25 DetachScsiLunEx_Task,omitempty"` + Res *types.DetachScsiLunEx_TaskResponse `xml:"urn:vim25 DetachScsiLunEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DetachScsiLunEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DetachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.DetachScsiLunEx_Task) (*types.DetachScsiLunEx_TaskResponse, error) { + var reqBody, resBody DetachScsiLunEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DetachTagFromVStorageObjectBody struct { + Req *types.DetachTagFromVStorageObject `xml:"urn:vim25 DetachTagFromVStorageObject,omitempty"` + Res *types.DetachTagFromVStorageObjectResponse `xml:"urn:vim25 DetachTagFromVStorageObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DetachTagFromVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func DetachTagFromVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.DetachTagFromVStorageObject) (*types.DetachTagFromVStorageObjectResponse, error) { + var reqBody, resBody DetachTagFromVStorageObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableEvcMode_TaskBody struct { + Req *types.DisableEvcMode_Task `xml:"urn:vim25 DisableEvcMode_Task,omitempty"` + Res *types.DisableEvcMode_TaskResponse `xml:"urn:vim25 DisableEvcMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableEvcMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.DisableEvcMode_Task) (*types.DisableEvcMode_TaskResponse, error) { + var reqBody, resBody DisableEvcMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableFeatureBody struct { + Req *types.DisableFeature `xml:"urn:vim25 DisableFeature,omitempty"` + Res *types.DisableFeatureResponse `xml:"urn:vim25 DisableFeatureResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableFeatureBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableFeature(ctx context.Context, r soap.RoundTripper, req *types.DisableFeature) (*types.DisableFeatureResponse, error) { + var reqBody, resBody DisableFeatureBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableHyperThreadingBody struct { + Req *types.DisableHyperThreading `xml:"urn:vim25 DisableHyperThreading,omitempty"` + Res *types.DisableHyperThreadingResponse `xml:"urn:vim25 DisableHyperThreadingResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableHyperThreadingBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types.DisableHyperThreading) (*types.DisableHyperThreadingResponse, error) { + var reqBody, resBody DisableHyperThreadingBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableMultipathPathBody struct { + Req *types.DisableMultipathPath `xml:"urn:vim25 DisableMultipathPath,omitempty"` + Res *types.DisableMultipathPathResponse `xml:"urn:vim25 DisableMultipathPathResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableMultipathPathBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.DisableMultipathPath) (*types.DisableMultipathPathResponse, error) { + var reqBody, resBody DisableMultipathPathBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableRulesetBody struct { + Req *types.DisableRuleset `xml:"urn:vim25 DisableRuleset,omitempty"` + Res *types.DisableRulesetResponse `xml:"urn:vim25 DisableRulesetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableRulesetBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableRuleset(ctx context.Context, r soap.RoundTripper, req *types.DisableRuleset) (*types.DisableRulesetResponse, error) { + var reqBody, resBody DisableRulesetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableSecondaryVM_TaskBody struct { + Req *types.DisableSecondaryVM_Task `xml:"urn:vim25 DisableSecondaryVM_Task,omitempty"` + Res *types.DisableSecondaryVM_TaskResponse `xml:"urn:vim25 DisableSecondaryVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableSecondaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.DisableSecondaryVM_Task) (*types.DisableSecondaryVM_TaskResponse, error) { + var reqBody, resBody DisableSecondaryVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableSmartCardAuthenticationBody struct { + Req *types.DisableSmartCardAuthentication `xml:"urn:vim25 DisableSmartCardAuthentication,omitempty"` + Res *types.DisableSmartCardAuthenticationResponse `xml:"urn:vim25 DisableSmartCardAuthenticationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableSmartCardAuthenticationBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, req *types.DisableSmartCardAuthentication) (*types.DisableSmartCardAuthenticationResponse, error) { + var reqBody, resBody DisableSmartCardAuthenticationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisconnectHost_TaskBody struct { + Req *types.DisconnectHost_Task `xml:"urn:vim25 DisconnectHost_Task,omitempty"` + Res *types.DisconnectHost_TaskResponse `xml:"urn:vim25 DisconnectHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisconnectHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DisconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.DisconnectHost_Task) (*types.DisconnectHost_TaskResponse, error) { + var reqBody, resBody DisconnectHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DiscoverFcoeHbasBody struct { + Req *types.DiscoverFcoeHbas `xml:"urn:vim25 DiscoverFcoeHbas,omitempty"` + Res *types.DiscoverFcoeHbasResponse `xml:"urn:vim25 DiscoverFcoeHbasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DiscoverFcoeHbasBody) Fault() *soap.Fault { return b.Fault_ } + +func DiscoverFcoeHbas(ctx context.Context, r soap.RoundTripper, req *types.DiscoverFcoeHbas) (*types.DiscoverFcoeHbasResponse, error) { + var reqBody, resBody DiscoverFcoeHbasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DissociateProfileBody struct { + Req *types.DissociateProfile `xml:"urn:vim25 DissociateProfile,omitempty"` + Res *types.DissociateProfileResponse `xml:"urn:vim25 DissociateProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DissociateProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func DissociateProfile(ctx context.Context, r soap.RoundTripper, req *types.DissociateProfile) (*types.DissociateProfileResponse, error) { + var reqBody, resBody DissociateProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DoesCustomizationSpecExistBody struct { + Req *types.DoesCustomizationSpecExist `xml:"urn:vim25 DoesCustomizationSpecExist,omitempty"` + Res *types.DoesCustomizationSpecExistResponse `xml:"urn:vim25 DoesCustomizationSpecExistResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DoesCustomizationSpecExistBody) Fault() *soap.Fault { return b.Fault_ } + +func DoesCustomizationSpecExist(ctx context.Context, r soap.RoundTripper, req *types.DoesCustomizationSpecExist) (*types.DoesCustomizationSpecExistResponse, error) { + var reqBody, resBody DoesCustomizationSpecExistBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DuplicateCustomizationSpecBody struct { + Req *types.DuplicateCustomizationSpec `xml:"urn:vim25 DuplicateCustomizationSpec,omitempty"` + Res *types.DuplicateCustomizationSpecResponse `xml:"urn:vim25 DuplicateCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DuplicateCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func DuplicateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.DuplicateCustomizationSpec) (*types.DuplicateCustomizationSpecResponse, error) { + var reqBody, resBody DuplicateCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DvsReconfigureVmVnicNetworkResourcePool_TaskBody struct { + Req *types.DvsReconfigureVmVnicNetworkResourcePool_Task `xml:"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_Task,omitempty"` + Res *types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse `xml:"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DvsReconfigureVmVnicNetworkResourcePool_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DvsReconfigureVmVnicNetworkResourcePool_Task(ctx context.Context, r soap.RoundTripper, req *types.DvsReconfigureVmVnicNetworkResourcePool_Task) (*types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse, error) { + var reqBody, resBody DvsReconfigureVmVnicNetworkResourcePool_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EagerZeroVirtualDisk_TaskBody struct { + Req *types.EagerZeroVirtualDisk_Task `xml:"urn:vim25 EagerZeroVirtualDisk_Task,omitempty"` + Res *types.EagerZeroVirtualDisk_TaskResponse `xml:"urn:vim25 EagerZeroVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EagerZeroVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EagerZeroVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.EagerZeroVirtualDisk_Task) (*types.EagerZeroVirtualDisk_TaskResponse, error) { + var reqBody, resBody EagerZeroVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableAlarmActionsBody struct { + Req *types.EnableAlarmActions `xml:"urn:vim25 EnableAlarmActions,omitempty"` + Res *types.EnableAlarmActionsResponse `xml:"urn:vim25 EnableAlarmActionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableAlarmActionsBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableAlarmActions(ctx context.Context, r soap.RoundTripper, req *types.EnableAlarmActions) (*types.EnableAlarmActionsResponse, error) { + var reqBody, resBody EnableAlarmActionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableCryptoBody struct { + Req *types.EnableCrypto `xml:"urn:vim25 EnableCrypto,omitempty"` + Res *types.EnableCryptoResponse `xml:"urn:vim25 EnableCryptoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableCryptoBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableCrypto(ctx context.Context, r soap.RoundTripper, req *types.EnableCrypto) (*types.EnableCryptoResponse, error) { + var reqBody, resBody EnableCryptoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableFeatureBody struct { + Req *types.EnableFeature `xml:"urn:vim25 EnableFeature,omitempty"` + Res *types.EnableFeatureResponse `xml:"urn:vim25 EnableFeatureResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableFeatureBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableFeature(ctx context.Context, r soap.RoundTripper, req *types.EnableFeature) (*types.EnableFeatureResponse, error) { + var reqBody, resBody EnableFeatureBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableHyperThreadingBody struct { + Req *types.EnableHyperThreading `xml:"urn:vim25 EnableHyperThreading,omitempty"` + Res *types.EnableHyperThreadingResponse `xml:"urn:vim25 EnableHyperThreadingResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableHyperThreadingBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types.EnableHyperThreading) (*types.EnableHyperThreadingResponse, error) { + var reqBody, resBody EnableHyperThreadingBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableMultipathPathBody struct { + Req *types.EnableMultipathPath `xml:"urn:vim25 EnableMultipathPath,omitempty"` + Res *types.EnableMultipathPathResponse `xml:"urn:vim25 EnableMultipathPathResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableMultipathPathBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.EnableMultipathPath) (*types.EnableMultipathPathResponse, error) { + var reqBody, resBody EnableMultipathPathBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableNetworkResourceManagementBody struct { + Req *types.EnableNetworkResourceManagement `xml:"urn:vim25 EnableNetworkResourceManagement,omitempty"` + Res *types.EnableNetworkResourceManagementResponse `xml:"urn:vim25 EnableNetworkResourceManagementResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableNetworkResourceManagementBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableNetworkResourceManagement(ctx context.Context, r soap.RoundTripper, req *types.EnableNetworkResourceManagement) (*types.EnableNetworkResourceManagementResponse, error) { + var reqBody, resBody EnableNetworkResourceManagementBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableRulesetBody struct { + Req *types.EnableRuleset `xml:"urn:vim25 EnableRuleset,omitempty"` + Res *types.EnableRulesetResponse `xml:"urn:vim25 EnableRulesetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableRulesetBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableRuleset(ctx context.Context, r soap.RoundTripper, req *types.EnableRuleset) (*types.EnableRulesetResponse, error) { + var reqBody, resBody EnableRulesetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableSecondaryVM_TaskBody struct { + Req *types.EnableSecondaryVM_Task `xml:"urn:vim25 EnableSecondaryVM_Task,omitempty"` + Res *types.EnableSecondaryVM_TaskResponse `xml:"urn:vim25 EnableSecondaryVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableSecondaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.EnableSecondaryVM_Task) (*types.EnableSecondaryVM_TaskResponse, error) { + var reqBody, resBody EnableSecondaryVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnableSmartCardAuthenticationBody struct { + Req *types.EnableSmartCardAuthentication `xml:"urn:vim25 EnableSmartCardAuthentication,omitempty"` + Res *types.EnableSmartCardAuthenticationResponse `xml:"urn:vim25 EnableSmartCardAuthenticationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableSmartCardAuthenticationBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, req *types.EnableSmartCardAuthentication) (*types.EnableSmartCardAuthenticationResponse, error) { + var reqBody, resBody EnableSmartCardAuthenticationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnterLockdownModeBody struct { + Req *types.EnterLockdownMode `xml:"urn:vim25 EnterLockdownMode,omitempty"` + Res *types.EnterLockdownModeResponse `xml:"urn:vim25 EnterLockdownModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnterLockdownModeBody) Fault() *soap.Fault { return b.Fault_ } + +func EnterLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.EnterLockdownMode) (*types.EnterLockdownModeResponse, error) { + var reqBody, resBody EnterLockdownModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EnterMaintenanceMode_TaskBody struct { + Req *types.EnterMaintenanceMode_Task `xml:"urn:vim25 EnterMaintenanceMode_Task,omitempty"` + Res *types.EnterMaintenanceMode_TaskResponse `xml:"urn:vim25 EnterMaintenanceMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnterMaintenanceMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EnterMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *types.EnterMaintenanceMode_Task) (*types.EnterMaintenanceMode_TaskResponse, error) { + var reqBody, resBody EnterMaintenanceMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EstimateDatabaseSizeBody struct { + Req *types.EstimateDatabaseSize `xml:"urn:vim25 EstimateDatabaseSize,omitempty"` + Res *types.EstimateDatabaseSizeResponse `xml:"urn:vim25 EstimateDatabaseSizeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EstimateDatabaseSizeBody) Fault() *soap.Fault { return b.Fault_ } + +func EstimateDatabaseSize(ctx context.Context, r soap.RoundTripper, req *types.EstimateDatabaseSize) (*types.EstimateDatabaseSizeResponse, error) { + var reqBody, resBody EstimateDatabaseSizeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EstimateStorageForConsolidateSnapshots_TaskBody struct { + Req *types.EstimateStorageForConsolidateSnapshots_Task `xml:"urn:vim25 EstimateStorageForConsolidateSnapshots_Task,omitempty"` + Res *types.EstimateStorageForConsolidateSnapshots_TaskResponse `xml:"urn:vim25 EstimateStorageForConsolidateSnapshots_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EstimateStorageForConsolidateSnapshots_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EstimateStorageForConsolidateSnapshots_Task(ctx context.Context, r soap.RoundTripper, req *types.EstimateStorageForConsolidateSnapshots_Task) (*types.EstimateStorageForConsolidateSnapshots_TaskResponse, error) { + var reqBody, resBody EstimateStorageForConsolidateSnapshots_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EsxAgentHostManagerUpdateConfigBody struct { + Req *types.EsxAgentHostManagerUpdateConfig `xml:"urn:vim25 EsxAgentHostManagerUpdateConfig,omitempty"` + Res *types.EsxAgentHostManagerUpdateConfigResponse `xml:"urn:vim25 EsxAgentHostManagerUpdateConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EsxAgentHostManagerUpdateConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func EsxAgentHostManagerUpdateConfig(ctx context.Context, r soap.RoundTripper, req *types.EsxAgentHostManagerUpdateConfig) (*types.EsxAgentHostManagerUpdateConfigResponse, error) { + var reqBody, resBody EsxAgentHostManagerUpdateConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EvacuateVsanNode_TaskBody struct { + Req *types.EvacuateVsanNode_Task `xml:"urn:vim25 EvacuateVsanNode_Task,omitempty"` + Res *types.EvacuateVsanNode_TaskResponse `xml:"urn:vim25 EvacuateVsanNode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EvacuateVsanNode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func EvacuateVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *types.EvacuateVsanNode_Task) (*types.EvacuateVsanNode_TaskResponse, error) { + var reqBody, resBody EvacuateVsanNode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type EvcManagerBody struct { + Req *types.EvcManager `xml:"urn:vim25 EvcManager,omitempty"` + Res *types.EvcManagerResponse `xml:"urn:vim25 EvcManagerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EvcManagerBody) Fault() *soap.Fault { return b.Fault_ } + +func EvcManager(ctx context.Context, r soap.RoundTripper, req *types.EvcManager) (*types.EvcManagerResponse, error) { + var reqBody, resBody EvcManagerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExecuteHostProfileBody struct { + Req *types.ExecuteHostProfile `xml:"urn:vim25 ExecuteHostProfile,omitempty"` + Res *types.ExecuteHostProfileResponse `xml:"urn:vim25 ExecuteHostProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExecuteHostProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func ExecuteHostProfile(ctx context.Context, r soap.RoundTripper, req *types.ExecuteHostProfile) (*types.ExecuteHostProfileResponse, error) { + var reqBody, resBody ExecuteHostProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExecuteSimpleCommandBody struct { + Req *types.ExecuteSimpleCommand `xml:"urn:vim25 ExecuteSimpleCommand,omitempty"` + Res *types.ExecuteSimpleCommandResponse `xml:"urn:vim25 ExecuteSimpleCommandResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExecuteSimpleCommandBody) Fault() *soap.Fault { return b.Fault_ } + +func ExecuteSimpleCommand(ctx context.Context, r soap.RoundTripper, req *types.ExecuteSimpleCommand) (*types.ExecuteSimpleCommandResponse, error) { + var reqBody, resBody ExecuteSimpleCommandBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExitLockdownModeBody struct { + Req *types.ExitLockdownMode `xml:"urn:vim25 ExitLockdownMode,omitempty"` + Res *types.ExitLockdownModeResponse `xml:"urn:vim25 ExitLockdownModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExitLockdownModeBody) Fault() *soap.Fault { return b.Fault_ } + +func ExitLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.ExitLockdownMode) (*types.ExitLockdownModeResponse, error) { + var reqBody, resBody ExitLockdownModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExitMaintenanceMode_TaskBody struct { + Req *types.ExitMaintenanceMode_Task `xml:"urn:vim25 ExitMaintenanceMode_Task,omitempty"` + Res *types.ExitMaintenanceMode_TaskResponse `xml:"urn:vim25 ExitMaintenanceMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExitMaintenanceMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *types.ExitMaintenanceMode_Task) (*types.ExitMaintenanceMode_TaskResponse, error) { + var reqBody, resBody ExitMaintenanceMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExpandVmfsDatastoreBody struct { + Req *types.ExpandVmfsDatastore `xml:"urn:vim25 ExpandVmfsDatastore,omitempty"` + Res *types.ExpandVmfsDatastoreResponse `xml:"urn:vim25 ExpandVmfsDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExpandVmfsDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func ExpandVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.ExpandVmfsDatastore) (*types.ExpandVmfsDatastoreResponse, error) { + var reqBody, resBody ExpandVmfsDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExpandVmfsExtentBody struct { + Req *types.ExpandVmfsExtent `xml:"urn:vim25 ExpandVmfsExtent,omitempty"` + Res *types.ExpandVmfsExtentResponse `xml:"urn:vim25 ExpandVmfsExtentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExpandVmfsExtentBody) Fault() *soap.Fault { return b.Fault_ } + +func ExpandVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.ExpandVmfsExtent) (*types.ExpandVmfsExtentResponse, error) { + var reqBody, resBody ExpandVmfsExtentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportAnswerFile_TaskBody struct { + Req *types.ExportAnswerFile_Task `xml:"urn:vim25 ExportAnswerFile_Task,omitempty"` + Res *types.ExportAnswerFile_TaskResponse `xml:"urn:vim25 ExportAnswerFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportAnswerFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types.ExportAnswerFile_Task) (*types.ExportAnswerFile_TaskResponse, error) { + var reqBody, resBody ExportAnswerFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportProfileBody struct { + Req *types.ExportProfile `xml:"urn:vim25 ExportProfile,omitempty"` + Res *types.ExportProfileResponse `xml:"urn:vim25 ExportProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportProfile(ctx context.Context, r soap.RoundTripper, req *types.ExportProfile) (*types.ExportProfileResponse, error) { + var reqBody, resBody ExportProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportSnapshotBody struct { + Req *types.ExportSnapshot `xml:"urn:vim25 ExportSnapshot,omitempty"` + Res *types.ExportSnapshotResponse `xml:"urn:vim25 ExportSnapshotResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportSnapshotBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ExportSnapshot) (*types.ExportSnapshotResponse, error) { + var reqBody, resBody ExportSnapshotBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportVAppBody struct { + Req *types.ExportVApp `xml:"urn:vim25 ExportVApp,omitempty"` + Res *types.ExportVAppResponse `xml:"urn:vim25 ExportVAppResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportVAppBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportVApp(ctx context.Context, r soap.RoundTripper, req *types.ExportVApp) (*types.ExportVAppResponse, error) { + var reqBody, resBody ExportVAppBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExportVmBody struct { + Req *types.ExportVm `xml:"urn:vim25 ExportVm,omitempty"` + Res *types.ExportVmResponse `xml:"urn:vim25 ExportVmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExportVmBody) Fault() *soap.Fault { return b.Fault_ } + +func ExportVm(ctx context.Context, r soap.RoundTripper, req *types.ExportVm) (*types.ExportVmResponse, error) { + var reqBody, resBody ExportVmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExtendDisk_TaskBody struct { + Req *types.ExtendDisk_Task `xml:"urn:vim25 ExtendDisk_Task,omitempty"` + Res *types.ExtendDisk_TaskResponse `xml:"urn:vim25 ExtendDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtendDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtendDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ExtendDisk_Task) (*types.ExtendDisk_TaskResponse, error) { + var reqBody, resBody ExtendDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExtendVffsBody struct { + Req *types.ExtendVffs `xml:"urn:vim25 ExtendVffs,omitempty"` + Res *types.ExtendVffsResponse `xml:"urn:vim25 ExtendVffsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtendVffsBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtendVffs(ctx context.Context, r soap.RoundTripper, req *types.ExtendVffs) (*types.ExtendVffsResponse, error) { + var reqBody, resBody ExtendVffsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExtendVirtualDisk_TaskBody struct { + Req *types.ExtendVirtualDisk_Task `xml:"urn:vim25 ExtendVirtualDisk_Task,omitempty"` + Res *types.ExtendVirtualDisk_TaskResponse `xml:"urn:vim25 ExtendVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtendVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtendVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ExtendVirtualDisk_Task) (*types.ExtendVirtualDisk_TaskResponse, error) { + var reqBody, resBody ExtendVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExtendVmfsDatastoreBody struct { + Req *types.ExtendVmfsDatastore `xml:"urn:vim25 ExtendVmfsDatastore,omitempty"` + Res *types.ExtendVmfsDatastoreResponse `xml:"urn:vim25 ExtendVmfsDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtendVmfsDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtendVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.ExtendVmfsDatastore) (*types.ExtendVmfsDatastoreResponse, error) { + var reqBody, resBody ExtendVmfsDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExtractOvfEnvironmentBody struct { + Req *types.ExtractOvfEnvironment `xml:"urn:vim25 ExtractOvfEnvironment,omitempty"` + Res *types.ExtractOvfEnvironmentResponse `xml:"urn:vim25 ExtractOvfEnvironmentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtractOvfEnvironmentBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtractOvfEnvironment(ctx context.Context, r soap.RoundTripper, req *types.ExtractOvfEnvironment) (*types.ExtractOvfEnvironmentResponse, error) { + var reqBody, resBody ExtractOvfEnvironmentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FetchDVPortKeysBody struct { + Req *types.FetchDVPortKeys `xml:"urn:vim25 FetchDVPortKeys,omitempty"` + Res *types.FetchDVPortKeysResponse `xml:"urn:vim25 FetchDVPortKeysResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FetchDVPortKeysBody) Fault() *soap.Fault { return b.Fault_ } + +func FetchDVPortKeys(ctx context.Context, r soap.RoundTripper, req *types.FetchDVPortKeys) (*types.FetchDVPortKeysResponse, error) { + var reqBody, resBody FetchDVPortKeysBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FetchDVPortsBody struct { + Req *types.FetchDVPorts `xml:"urn:vim25 FetchDVPorts,omitempty"` + Res *types.FetchDVPortsResponse `xml:"urn:vim25 FetchDVPortsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FetchDVPortsBody) Fault() *soap.Fault { return b.Fault_ } + +func FetchDVPorts(ctx context.Context, r soap.RoundTripper, req *types.FetchDVPorts) (*types.FetchDVPortsResponse, error) { + var reqBody, resBody FetchDVPortsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FetchSystemEventLogBody struct { + Req *types.FetchSystemEventLog `xml:"urn:vim25 FetchSystemEventLog,omitempty"` + Res *types.FetchSystemEventLogResponse `xml:"urn:vim25 FetchSystemEventLogResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FetchSystemEventLogBody) Fault() *soap.Fault { return b.Fault_ } + +func FetchSystemEventLog(ctx context.Context, r soap.RoundTripper, req *types.FetchSystemEventLog) (*types.FetchSystemEventLogResponse, error) { + var reqBody, resBody FetchSystemEventLogBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FetchUserPrivilegeOnEntitiesBody struct { + Req *types.FetchUserPrivilegeOnEntities `xml:"urn:vim25 FetchUserPrivilegeOnEntities,omitempty"` + Res *types.FetchUserPrivilegeOnEntitiesResponse `xml:"urn:vim25 FetchUserPrivilegeOnEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FetchUserPrivilegeOnEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func FetchUserPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *types.FetchUserPrivilegeOnEntities) (*types.FetchUserPrivilegeOnEntitiesResponse, error) { + var reqBody, resBody FetchUserPrivilegeOnEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindAllByDnsNameBody struct { + Req *types.FindAllByDnsName `xml:"urn:vim25 FindAllByDnsName,omitempty"` + Res *types.FindAllByDnsNameResponse `xml:"urn:vim25 FindAllByDnsNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindAllByDnsNameBody) Fault() *soap.Fault { return b.Fault_ } + +func FindAllByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindAllByDnsName) (*types.FindAllByDnsNameResponse, error) { + var reqBody, resBody FindAllByDnsNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindAllByIpBody struct { + Req *types.FindAllByIp `xml:"urn:vim25 FindAllByIp,omitempty"` + Res *types.FindAllByIpResponse `xml:"urn:vim25 FindAllByIpResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindAllByIpBody) Fault() *soap.Fault { return b.Fault_ } + +func FindAllByIp(ctx context.Context, r soap.RoundTripper, req *types.FindAllByIp) (*types.FindAllByIpResponse, error) { + var reqBody, resBody FindAllByIpBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindAllByUuidBody struct { + Req *types.FindAllByUuid `xml:"urn:vim25 FindAllByUuid,omitempty"` + Res *types.FindAllByUuidResponse `xml:"urn:vim25 FindAllByUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindAllByUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func FindAllByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindAllByUuid) (*types.FindAllByUuidResponse, error) { + var reqBody, resBody FindAllByUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindAssociatedProfileBody struct { + Req *types.FindAssociatedProfile `xml:"urn:vim25 FindAssociatedProfile,omitempty"` + Res *types.FindAssociatedProfileResponse `xml:"urn:vim25 FindAssociatedProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindAssociatedProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func FindAssociatedProfile(ctx context.Context, r soap.RoundTripper, req *types.FindAssociatedProfile) (*types.FindAssociatedProfileResponse, error) { + var reqBody, resBody FindAssociatedProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByDatastorePathBody struct { + Req *types.FindByDatastorePath `xml:"urn:vim25 FindByDatastorePath,omitempty"` + Res *types.FindByDatastorePathResponse `xml:"urn:vim25 FindByDatastorePathResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByDatastorePathBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByDatastorePath(ctx context.Context, r soap.RoundTripper, req *types.FindByDatastorePath) (*types.FindByDatastorePathResponse, error) { + var reqBody, resBody FindByDatastorePathBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByDnsNameBody struct { + Req *types.FindByDnsName `xml:"urn:vim25 FindByDnsName,omitempty"` + Res *types.FindByDnsNameResponse `xml:"urn:vim25 FindByDnsNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByDnsNameBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindByDnsName) (*types.FindByDnsNameResponse, error) { + var reqBody, resBody FindByDnsNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByInventoryPathBody struct { + Req *types.FindByInventoryPath `xml:"urn:vim25 FindByInventoryPath,omitempty"` + Res *types.FindByInventoryPathResponse `xml:"urn:vim25 FindByInventoryPathResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByInventoryPathBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByInventoryPath(ctx context.Context, r soap.RoundTripper, req *types.FindByInventoryPath) (*types.FindByInventoryPathResponse, error) { + var reqBody, resBody FindByInventoryPathBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByIpBody struct { + Req *types.FindByIp `xml:"urn:vim25 FindByIp,omitempty"` + Res *types.FindByIpResponse `xml:"urn:vim25 FindByIpResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByIpBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByIp(ctx context.Context, r soap.RoundTripper, req *types.FindByIp) (*types.FindByIpResponse, error) { + var reqBody, resBody FindByIpBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindByUuidBody struct { + Req *types.FindByUuid `xml:"urn:vim25 FindByUuid,omitempty"` + Res *types.FindByUuidResponse `xml:"urn:vim25 FindByUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindByUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func FindByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindByUuid) (*types.FindByUuidResponse, error) { + var reqBody, resBody FindByUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindChildBody struct { + Req *types.FindChild `xml:"urn:vim25 FindChild,omitempty"` + Res *types.FindChildResponse `xml:"urn:vim25 FindChildResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindChildBody) Fault() *soap.Fault { return b.Fault_ } + +func FindChild(ctx context.Context, r soap.RoundTripper, req *types.FindChild) (*types.FindChildResponse, error) { + var reqBody, resBody FindChildBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindExtensionBody struct { + Req *types.FindExtension `xml:"urn:vim25 FindExtension,omitempty"` + Res *types.FindExtensionResponse `xml:"urn:vim25 FindExtensionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindExtensionBody) Fault() *soap.Fault { return b.Fault_ } + +func FindExtension(ctx context.Context, r soap.RoundTripper, req *types.FindExtension) (*types.FindExtensionResponse, error) { + var reqBody, resBody FindExtensionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FindRulesForVmBody struct { + Req *types.FindRulesForVm `xml:"urn:vim25 FindRulesForVm,omitempty"` + Res *types.FindRulesForVmResponse `xml:"urn:vim25 FindRulesForVmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FindRulesForVmBody) Fault() *soap.Fault { return b.Fault_ } + +func FindRulesForVm(ctx context.Context, r soap.RoundTripper, req *types.FindRulesForVm) (*types.FindRulesForVmResponse, error) { + var reqBody, resBody FindRulesForVmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FormatVffsBody struct { + Req *types.FormatVffs `xml:"urn:vim25 FormatVffs,omitempty"` + Res *types.FormatVffsResponse `xml:"urn:vim25 FormatVffsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FormatVffsBody) Fault() *soap.Fault { return b.Fault_ } + +func FormatVffs(ctx context.Context, r soap.RoundTripper, req *types.FormatVffs) (*types.FormatVffsResponse, error) { + var reqBody, resBody FormatVffsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FormatVmfsBody struct { + Req *types.FormatVmfs `xml:"urn:vim25 FormatVmfs,omitempty"` + Res *types.FormatVmfsResponse `xml:"urn:vim25 FormatVmfsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FormatVmfsBody) Fault() *soap.Fault { return b.Fault_ } + +func FormatVmfs(ctx context.Context, r soap.RoundTripper, req *types.FormatVmfs) (*types.FormatVmfsResponse, error) { + var reqBody, resBody FormatVmfsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateCertificateSigningRequestBody struct { + Req *types.GenerateCertificateSigningRequest `xml:"urn:vim25 GenerateCertificateSigningRequest,omitempty"` + Res *types.GenerateCertificateSigningRequestResponse `xml:"urn:vim25 GenerateCertificateSigningRequestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateCertificateSigningRequestBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateCertificateSigningRequest(ctx context.Context, r soap.RoundTripper, req *types.GenerateCertificateSigningRequest) (*types.GenerateCertificateSigningRequestResponse, error) { + var reqBody, resBody GenerateCertificateSigningRequestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateCertificateSigningRequestByDnBody struct { + Req *types.GenerateCertificateSigningRequestByDn `xml:"urn:vim25 GenerateCertificateSigningRequestByDn,omitempty"` + Res *types.GenerateCertificateSigningRequestByDnResponse `xml:"urn:vim25 GenerateCertificateSigningRequestByDnResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateCertificateSigningRequestByDnBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateCertificateSigningRequestByDn(ctx context.Context, r soap.RoundTripper, req *types.GenerateCertificateSigningRequestByDn) (*types.GenerateCertificateSigningRequestByDnResponse, error) { + var reqBody, resBody GenerateCertificateSigningRequestByDnBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateClientCsrBody struct { + Req *types.GenerateClientCsr `xml:"urn:vim25 GenerateClientCsr,omitempty"` + Res *types.GenerateClientCsrResponse `xml:"urn:vim25 GenerateClientCsrResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateClientCsrBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateClientCsr(ctx context.Context, r soap.RoundTripper, req *types.GenerateClientCsr) (*types.GenerateClientCsrResponse, error) { + var reqBody, resBody GenerateClientCsrBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateConfigTaskListBody struct { + Req *types.GenerateConfigTaskList `xml:"urn:vim25 GenerateConfigTaskList,omitempty"` + Res *types.GenerateConfigTaskListResponse `xml:"urn:vim25 GenerateConfigTaskListResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateConfigTaskListBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateConfigTaskList(ctx context.Context, r soap.RoundTripper, req *types.GenerateConfigTaskList) (*types.GenerateConfigTaskListResponse, error) { + var reqBody, resBody GenerateConfigTaskListBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateHostConfigTaskSpec_TaskBody struct { + Req *types.GenerateHostConfigTaskSpec_Task `xml:"urn:vim25 GenerateHostConfigTaskSpec_Task,omitempty"` + Res *types.GenerateHostConfigTaskSpec_TaskResponse `xml:"urn:vim25 GenerateHostConfigTaskSpec_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateHostConfigTaskSpec_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateHostConfigTaskSpec_Task(ctx context.Context, r soap.RoundTripper, req *types.GenerateHostConfigTaskSpec_Task) (*types.GenerateHostConfigTaskSpec_TaskResponse, error) { + var reqBody, resBody GenerateHostConfigTaskSpec_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateHostProfileTaskList_TaskBody struct { + Req *types.GenerateHostProfileTaskList_Task `xml:"urn:vim25 GenerateHostProfileTaskList_Task,omitempty"` + Res *types.GenerateHostProfileTaskList_TaskResponse `xml:"urn:vim25 GenerateHostProfileTaskList_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateHostProfileTaskList_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateHostProfileTaskList_Task(ctx context.Context, r soap.RoundTripper, req *types.GenerateHostProfileTaskList_Task) (*types.GenerateHostProfileTaskList_TaskResponse, error) { + var reqBody, resBody GenerateHostProfileTaskList_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateKeyBody struct { + Req *types.GenerateKey `xml:"urn:vim25 GenerateKey,omitempty"` + Res *types.GenerateKeyResponse `xml:"urn:vim25 GenerateKeyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateKeyBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateKey(ctx context.Context, r soap.RoundTripper, req *types.GenerateKey) (*types.GenerateKeyResponse, error) { + var reqBody, resBody GenerateKeyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateLogBundles_TaskBody struct { + Req *types.GenerateLogBundles_Task `xml:"urn:vim25 GenerateLogBundles_Task,omitempty"` + Res *types.GenerateLogBundles_TaskResponse `xml:"urn:vim25 GenerateLogBundles_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateLogBundles_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateLogBundles_Task(ctx context.Context, r soap.RoundTripper, req *types.GenerateLogBundles_Task) (*types.GenerateLogBundles_TaskResponse, error) { + var reqBody, resBody GenerateLogBundles_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GenerateSelfSignedClientCertBody struct { + Req *types.GenerateSelfSignedClientCert `xml:"urn:vim25 GenerateSelfSignedClientCert,omitempty"` + Res *types.GenerateSelfSignedClientCertResponse `xml:"urn:vim25 GenerateSelfSignedClientCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GenerateSelfSignedClientCertBody) Fault() *soap.Fault { return b.Fault_ } + +func GenerateSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *types.GenerateSelfSignedClientCert) (*types.GenerateSelfSignedClientCertResponse, error) { + var reqBody, resBody GenerateSelfSignedClientCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetAlarmBody struct { + Req *types.GetAlarm `xml:"urn:vim25 GetAlarm,omitempty"` + Res *types.GetAlarmResponse `xml:"urn:vim25 GetAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func GetAlarm(ctx context.Context, r soap.RoundTripper, req *types.GetAlarm) (*types.GetAlarmResponse, error) { + var reqBody, resBody GetAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetAlarmStateBody struct { + Req *types.GetAlarmState `xml:"urn:vim25 GetAlarmState,omitempty"` + Res *types.GetAlarmStateResponse `xml:"urn:vim25 GetAlarmStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetAlarmStateBody) Fault() *soap.Fault { return b.Fault_ } + +func GetAlarmState(ctx context.Context, r soap.RoundTripper, req *types.GetAlarmState) (*types.GetAlarmStateResponse, error) { + var reqBody, resBody GetAlarmStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetCustomizationSpecBody struct { + Req *types.GetCustomizationSpec `xml:"urn:vim25 GetCustomizationSpec,omitempty"` + Res *types.GetCustomizationSpecResponse `xml:"urn:vim25 GetCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func GetCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.GetCustomizationSpec) (*types.GetCustomizationSpecResponse, error) { + var reqBody, resBody GetCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetPublicKeyBody struct { + Req *types.GetPublicKey `xml:"urn:vim25 GetPublicKey,omitempty"` + Res *types.GetPublicKeyResponse `xml:"urn:vim25 GetPublicKeyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetPublicKeyBody) Fault() *soap.Fault { return b.Fault_ } + +func GetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.GetPublicKey) (*types.GetPublicKeyResponse, error) { + var reqBody, resBody GetPublicKeyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetResourceUsageBody struct { + Req *types.GetResourceUsage `xml:"urn:vim25 GetResourceUsage,omitempty"` + Res *types.GetResourceUsageResponse `xml:"urn:vim25 GetResourceUsageResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetResourceUsageBody) Fault() *soap.Fault { return b.Fault_ } + +func GetResourceUsage(ctx context.Context, r soap.RoundTripper, req *types.GetResourceUsage) (*types.GetResourceUsageResponse, error) { + var reqBody, resBody GetResourceUsageBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetVchaClusterHealthBody struct { + Req *types.GetVchaClusterHealth `xml:"urn:vim25 GetVchaClusterHealth,omitempty"` + Res *types.GetVchaClusterHealthResponse `xml:"urn:vim25 GetVchaClusterHealthResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetVchaClusterHealthBody) Fault() *soap.Fault { return b.Fault_ } + +func GetVchaClusterHealth(ctx context.Context, r soap.RoundTripper, req *types.GetVchaClusterHealth) (*types.GetVchaClusterHealthResponse, error) { + var reqBody, resBody GetVchaClusterHealthBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetVsanObjExtAttrsBody struct { + Req *types.GetVsanObjExtAttrs `xml:"urn:vim25 GetVsanObjExtAttrs,omitempty"` + Res *types.GetVsanObjExtAttrsResponse `xml:"urn:vim25 GetVsanObjExtAttrsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetVsanObjExtAttrsBody) Fault() *soap.Fault { return b.Fault_ } + +func GetVsanObjExtAttrs(ctx context.Context, r soap.RoundTripper, req *types.GetVsanObjExtAttrs) (*types.GetVsanObjExtAttrsResponse, error) { + var reqBody, resBody GetVsanObjExtAttrsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HasMonitoredEntityBody struct { + Req *types.HasMonitoredEntity `xml:"urn:vim25 HasMonitoredEntity,omitempty"` + Res *types.HasMonitoredEntityResponse `xml:"urn:vim25 HasMonitoredEntityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HasMonitoredEntityBody) Fault() *soap.Fault { return b.Fault_ } + +func HasMonitoredEntity(ctx context.Context, r soap.RoundTripper, req *types.HasMonitoredEntity) (*types.HasMonitoredEntityResponse, error) { + var reqBody, resBody HasMonitoredEntityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HasPrivilegeOnEntitiesBody struct { + Req *types.HasPrivilegeOnEntities `xml:"urn:vim25 HasPrivilegeOnEntities,omitempty"` + Res *types.HasPrivilegeOnEntitiesResponse `xml:"urn:vim25 HasPrivilegeOnEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HasPrivilegeOnEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func HasPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *types.HasPrivilegeOnEntities) (*types.HasPrivilegeOnEntitiesResponse, error) { + var reqBody, resBody HasPrivilegeOnEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HasPrivilegeOnEntityBody struct { + Req *types.HasPrivilegeOnEntity `xml:"urn:vim25 HasPrivilegeOnEntity,omitempty"` + Res *types.HasPrivilegeOnEntityResponse `xml:"urn:vim25 HasPrivilegeOnEntityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HasPrivilegeOnEntityBody) Fault() *soap.Fault { return b.Fault_ } + +func HasPrivilegeOnEntity(ctx context.Context, r soap.RoundTripper, req *types.HasPrivilegeOnEntity) (*types.HasPrivilegeOnEntityResponse, error) { + var reqBody, resBody HasPrivilegeOnEntityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HasProviderBody struct { + Req *types.HasProvider `xml:"urn:vim25 HasProvider,omitempty"` + Res *types.HasProviderResponse `xml:"urn:vim25 HasProviderResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HasProviderBody) Fault() *soap.Fault { return b.Fault_ } + +func HasProvider(ctx context.Context, r soap.RoundTripper, req *types.HasProvider) (*types.HasProviderResponse, error) { + var reqBody, resBody HasProviderBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HasUserPrivilegeOnEntitiesBody struct { + Req *types.HasUserPrivilegeOnEntities `xml:"urn:vim25 HasUserPrivilegeOnEntities,omitempty"` + Res *types.HasUserPrivilegeOnEntitiesResponse `xml:"urn:vim25 HasUserPrivilegeOnEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HasUserPrivilegeOnEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func HasUserPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *types.HasUserPrivilegeOnEntities) (*types.HasUserPrivilegeOnEntitiesResponse, error) { + var reqBody, resBody HasUserPrivilegeOnEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostClearVStorageObjectControlFlagsBody struct { + Req *types.HostClearVStorageObjectControlFlags `xml:"urn:vim25 HostClearVStorageObjectControlFlags,omitempty"` + Res *types.HostClearVStorageObjectControlFlagsResponse `xml:"urn:vim25 HostClearVStorageObjectControlFlagsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostClearVStorageObjectControlFlagsBody) Fault() *soap.Fault { return b.Fault_ } + +func HostClearVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, req *types.HostClearVStorageObjectControlFlags) (*types.HostClearVStorageObjectControlFlagsResponse, error) { + var reqBody, resBody HostClearVStorageObjectControlFlagsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostCloneVStorageObject_TaskBody struct { + Req *types.HostCloneVStorageObject_Task `xml:"urn:vim25 HostCloneVStorageObject_Task,omitempty"` + Res *types.HostCloneVStorageObject_TaskResponse `xml:"urn:vim25 HostCloneVStorageObject_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostCloneVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostCloneVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.HostCloneVStorageObject_Task) (*types.HostCloneVStorageObject_TaskResponse, error) { + var reqBody, resBody HostCloneVStorageObject_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostConfigVFlashCacheBody struct { + Req *types.HostConfigVFlashCache `xml:"urn:vim25 HostConfigVFlashCache,omitempty"` + Res *types.HostConfigVFlashCacheResponse `xml:"urn:vim25 HostConfigVFlashCacheResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostConfigVFlashCacheBody) Fault() *soap.Fault { return b.Fault_ } + +func HostConfigVFlashCache(ctx context.Context, r soap.RoundTripper, req *types.HostConfigVFlashCache) (*types.HostConfigVFlashCacheResponse, error) { + var reqBody, resBody HostConfigVFlashCacheBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostConfigureVFlashResourceBody struct { + Req *types.HostConfigureVFlashResource `xml:"urn:vim25 HostConfigureVFlashResource,omitempty"` + Res *types.HostConfigureVFlashResourceResponse `xml:"urn:vim25 HostConfigureVFlashResourceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostConfigureVFlashResourceBody) Fault() *soap.Fault { return b.Fault_ } + +func HostConfigureVFlashResource(ctx context.Context, r soap.RoundTripper, req *types.HostConfigureVFlashResource) (*types.HostConfigureVFlashResourceResponse, error) { + var reqBody, resBody HostConfigureVFlashResourceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostCreateDisk_TaskBody struct { + Req *types.HostCreateDisk_Task `xml:"urn:vim25 HostCreateDisk_Task,omitempty"` + Res *types.HostCreateDisk_TaskResponse `xml:"urn:vim25 HostCreateDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostCreateDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostCreateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.HostCreateDisk_Task) (*types.HostCreateDisk_TaskResponse, error) { + var reqBody, resBody HostCreateDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostDeleteVStorageObject_TaskBody struct { + Req *types.HostDeleteVStorageObject_Task `xml:"urn:vim25 HostDeleteVStorageObject_Task,omitempty"` + Res *types.HostDeleteVStorageObject_TaskResponse `xml:"urn:vim25 HostDeleteVStorageObject_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostDeleteVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostDeleteVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.HostDeleteVStorageObject_Task) (*types.HostDeleteVStorageObject_TaskResponse, error) { + var reqBody, resBody HostDeleteVStorageObject_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostExtendDisk_TaskBody struct { + Req *types.HostExtendDisk_Task `xml:"urn:vim25 HostExtendDisk_Task,omitempty"` + Res *types.HostExtendDisk_TaskResponse `xml:"urn:vim25 HostExtendDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostExtendDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostExtendDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.HostExtendDisk_Task) (*types.HostExtendDisk_TaskResponse, error) { + var reqBody, resBody HostExtendDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostGetVFlashModuleDefaultConfigBody struct { + Req *types.HostGetVFlashModuleDefaultConfig `xml:"urn:vim25 HostGetVFlashModuleDefaultConfig,omitempty"` + Res *types.HostGetVFlashModuleDefaultConfigResponse `xml:"urn:vim25 HostGetVFlashModuleDefaultConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostGetVFlashModuleDefaultConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func HostGetVFlashModuleDefaultConfig(ctx context.Context, r soap.RoundTripper, req *types.HostGetVFlashModuleDefaultConfig) (*types.HostGetVFlashModuleDefaultConfigResponse, error) { + var reqBody, resBody HostGetVFlashModuleDefaultConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostImageConfigGetAcceptanceBody struct { + Req *types.HostImageConfigGetAcceptance `xml:"urn:vim25 HostImageConfigGetAcceptance,omitempty"` + Res *types.HostImageConfigGetAcceptanceResponse `xml:"urn:vim25 HostImageConfigGetAcceptanceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostImageConfigGetAcceptanceBody) Fault() *soap.Fault { return b.Fault_ } + +func HostImageConfigGetAcceptance(ctx context.Context, r soap.RoundTripper, req *types.HostImageConfigGetAcceptance) (*types.HostImageConfigGetAcceptanceResponse, error) { + var reqBody, resBody HostImageConfigGetAcceptanceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostImageConfigGetProfileBody struct { + Req *types.HostImageConfigGetProfile `xml:"urn:vim25 HostImageConfigGetProfile,omitempty"` + Res *types.HostImageConfigGetProfileResponse `xml:"urn:vim25 HostImageConfigGetProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostImageConfigGetProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func HostImageConfigGetProfile(ctx context.Context, r soap.RoundTripper, req *types.HostImageConfigGetProfile) (*types.HostImageConfigGetProfileResponse, error) { + var reqBody, resBody HostImageConfigGetProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostInflateDisk_TaskBody struct { + Req *types.HostInflateDisk_Task `xml:"urn:vim25 HostInflateDisk_Task,omitempty"` + Res *types.HostInflateDisk_TaskResponse `xml:"urn:vim25 HostInflateDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostInflateDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostInflateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.HostInflateDisk_Task) (*types.HostInflateDisk_TaskResponse, error) { + var reqBody, resBody HostInflateDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostListVStorageObjectBody struct { + Req *types.HostListVStorageObject `xml:"urn:vim25 HostListVStorageObject,omitempty"` + Res *types.HostListVStorageObjectResponse `xml:"urn:vim25 HostListVStorageObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostListVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func HostListVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.HostListVStorageObject) (*types.HostListVStorageObjectResponse, error) { + var reqBody, resBody HostListVStorageObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostProfileResetValidationStateBody struct { + Req *types.HostProfileResetValidationState `xml:"urn:vim25 HostProfileResetValidationState,omitempty"` + Res *types.HostProfileResetValidationStateResponse `xml:"urn:vim25 HostProfileResetValidationStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostProfileResetValidationStateBody) Fault() *soap.Fault { return b.Fault_ } + +func HostProfileResetValidationState(ctx context.Context, r soap.RoundTripper, req *types.HostProfileResetValidationState) (*types.HostProfileResetValidationStateResponse, error) { + var reqBody, resBody HostProfileResetValidationStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostReconcileDatastoreInventory_TaskBody struct { + Req *types.HostReconcileDatastoreInventory_Task `xml:"urn:vim25 HostReconcileDatastoreInventory_Task,omitempty"` + Res *types.HostReconcileDatastoreInventory_TaskResponse `xml:"urn:vim25 HostReconcileDatastoreInventory_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostReconcileDatastoreInventory_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostReconcileDatastoreInventory_Task(ctx context.Context, r soap.RoundTripper, req *types.HostReconcileDatastoreInventory_Task) (*types.HostReconcileDatastoreInventory_TaskResponse, error) { + var reqBody, resBody HostReconcileDatastoreInventory_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRegisterDiskBody struct { + Req *types.HostRegisterDisk `xml:"urn:vim25 HostRegisterDisk,omitempty"` + Res *types.HostRegisterDiskResponse `xml:"urn:vim25 HostRegisterDiskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRegisterDiskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRegisterDisk(ctx context.Context, r soap.RoundTripper, req *types.HostRegisterDisk) (*types.HostRegisterDiskResponse, error) { + var reqBody, resBody HostRegisterDiskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRelocateVStorageObject_TaskBody struct { + Req *types.HostRelocateVStorageObject_Task `xml:"urn:vim25 HostRelocateVStorageObject_Task,omitempty"` + Res *types.HostRelocateVStorageObject_TaskResponse `xml:"urn:vim25 HostRelocateVStorageObject_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRelocateVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRelocateVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.HostRelocateVStorageObject_Task) (*types.HostRelocateVStorageObject_TaskResponse, error) { + var reqBody, resBody HostRelocateVStorageObject_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRemoveVFlashResourceBody struct { + Req *types.HostRemoveVFlashResource `xml:"urn:vim25 HostRemoveVFlashResource,omitempty"` + Res *types.HostRemoveVFlashResourceResponse `xml:"urn:vim25 HostRemoveVFlashResourceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRemoveVFlashResourceBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRemoveVFlashResource(ctx context.Context, r soap.RoundTripper, req *types.HostRemoveVFlashResource) (*types.HostRemoveVFlashResourceResponse, error) { + var reqBody, resBody HostRemoveVFlashResourceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRenameVStorageObjectBody struct { + Req *types.HostRenameVStorageObject `xml:"urn:vim25 HostRenameVStorageObject,omitempty"` + Res *types.HostRenameVStorageObjectResponse `xml:"urn:vim25 HostRenameVStorageObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRenameVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRenameVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.HostRenameVStorageObject) (*types.HostRenameVStorageObjectResponse, error) { + var reqBody, resBody HostRenameVStorageObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRetrieveVStorageInfrastructureObjectPolicyBody struct { + Req *types.HostRetrieveVStorageInfrastructureObjectPolicy `xml:"urn:vim25 HostRetrieveVStorageInfrastructureObjectPolicy,omitempty"` + Res *types.HostRetrieveVStorageInfrastructureObjectPolicyResponse `xml:"urn:vim25 HostRetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRetrieveVStorageInfrastructureObjectPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRetrieveVStorageInfrastructureObjectPolicy(ctx context.Context, r soap.RoundTripper, req *types.HostRetrieveVStorageInfrastructureObjectPolicy) (*types.HostRetrieveVStorageInfrastructureObjectPolicyResponse, error) { + var reqBody, resBody HostRetrieveVStorageInfrastructureObjectPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRetrieveVStorageObjectBody struct { + Req *types.HostRetrieveVStorageObject `xml:"urn:vim25 HostRetrieveVStorageObject,omitempty"` + Res *types.HostRetrieveVStorageObjectResponse `xml:"urn:vim25 HostRetrieveVStorageObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRetrieveVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRetrieveVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.HostRetrieveVStorageObject) (*types.HostRetrieveVStorageObjectResponse, error) { + var reqBody, resBody HostRetrieveVStorageObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRetrieveVStorageObjectStateBody struct { + Req *types.HostRetrieveVStorageObjectState `xml:"urn:vim25 HostRetrieveVStorageObjectState,omitempty"` + Res *types.HostRetrieveVStorageObjectStateResponse `xml:"urn:vim25 HostRetrieveVStorageObjectStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRetrieveVStorageObjectStateBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRetrieveVStorageObjectState(ctx context.Context, r soap.RoundTripper, req *types.HostRetrieveVStorageObjectState) (*types.HostRetrieveVStorageObjectStateResponse, error) { + var reqBody, resBody HostRetrieveVStorageObjectStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostScheduleReconcileDatastoreInventoryBody struct { + Req *types.HostScheduleReconcileDatastoreInventory `xml:"urn:vim25 HostScheduleReconcileDatastoreInventory,omitempty"` + Res *types.HostScheduleReconcileDatastoreInventoryResponse `xml:"urn:vim25 HostScheduleReconcileDatastoreInventoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostScheduleReconcileDatastoreInventoryBody) Fault() *soap.Fault { return b.Fault_ } + +func HostScheduleReconcileDatastoreInventory(ctx context.Context, r soap.RoundTripper, req *types.HostScheduleReconcileDatastoreInventory) (*types.HostScheduleReconcileDatastoreInventoryResponse, error) { + var reqBody, resBody HostScheduleReconcileDatastoreInventoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostSetVStorageObjectControlFlagsBody struct { + Req *types.HostSetVStorageObjectControlFlags `xml:"urn:vim25 HostSetVStorageObjectControlFlags,omitempty"` + Res *types.HostSetVStorageObjectControlFlagsResponse `xml:"urn:vim25 HostSetVStorageObjectControlFlagsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostSetVStorageObjectControlFlagsBody) Fault() *soap.Fault { return b.Fault_ } + +func HostSetVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, req *types.HostSetVStorageObjectControlFlags) (*types.HostSetVStorageObjectControlFlagsResponse, error) { + var reqBody, resBody HostSetVStorageObjectControlFlagsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostSpecGetUpdatedHostsBody struct { + Req *types.HostSpecGetUpdatedHosts `xml:"urn:vim25 HostSpecGetUpdatedHosts,omitempty"` + Res *types.HostSpecGetUpdatedHostsResponse `xml:"urn:vim25 HostSpecGetUpdatedHostsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostSpecGetUpdatedHostsBody) Fault() *soap.Fault { return b.Fault_ } + +func HostSpecGetUpdatedHosts(ctx context.Context, r soap.RoundTripper, req *types.HostSpecGetUpdatedHosts) (*types.HostSpecGetUpdatedHostsResponse, error) { + var reqBody, resBody HostSpecGetUpdatedHostsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostVStorageObjectCreateDiskFromSnapshot_TaskBody struct { + Req *types.HostVStorageObjectCreateDiskFromSnapshot_Task `xml:"urn:vim25 HostVStorageObjectCreateDiskFromSnapshot_Task,omitempty"` + Res *types.HostVStorageObjectCreateDiskFromSnapshot_TaskResponse `xml:"urn:vim25 HostVStorageObjectCreateDiskFromSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostVStorageObjectCreateDiskFromSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostVStorageObjectCreateDiskFromSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.HostVStorageObjectCreateDiskFromSnapshot_Task) (*types.HostVStorageObjectCreateDiskFromSnapshot_TaskResponse, error) { + var reqBody, resBody HostVStorageObjectCreateDiskFromSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostVStorageObjectCreateSnapshot_TaskBody struct { + Req *types.HostVStorageObjectCreateSnapshot_Task `xml:"urn:vim25 HostVStorageObjectCreateSnapshot_Task,omitempty"` + Res *types.HostVStorageObjectCreateSnapshot_TaskResponse `xml:"urn:vim25 HostVStorageObjectCreateSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostVStorageObjectCreateSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostVStorageObjectCreateSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.HostVStorageObjectCreateSnapshot_Task) (*types.HostVStorageObjectCreateSnapshot_TaskResponse, error) { + var reqBody, resBody HostVStorageObjectCreateSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostVStorageObjectDeleteSnapshot_TaskBody struct { + Req *types.HostVStorageObjectDeleteSnapshot_Task `xml:"urn:vim25 HostVStorageObjectDeleteSnapshot_Task,omitempty"` + Res *types.HostVStorageObjectDeleteSnapshot_TaskResponse `xml:"urn:vim25 HostVStorageObjectDeleteSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostVStorageObjectDeleteSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostVStorageObjectDeleteSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.HostVStorageObjectDeleteSnapshot_Task) (*types.HostVStorageObjectDeleteSnapshot_TaskResponse, error) { + var reqBody, resBody HostVStorageObjectDeleteSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostVStorageObjectRetrieveSnapshotInfoBody struct { + Req *types.HostVStorageObjectRetrieveSnapshotInfo `xml:"urn:vim25 HostVStorageObjectRetrieveSnapshotInfo,omitempty"` + Res *types.HostVStorageObjectRetrieveSnapshotInfoResponse `xml:"urn:vim25 HostVStorageObjectRetrieveSnapshotInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostVStorageObjectRetrieveSnapshotInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func HostVStorageObjectRetrieveSnapshotInfo(ctx context.Context, r soap.RoundTripper, req *types.HostVStorageObjectRetrieveSnapshotInfo) (*types.HostVStorageObjectRetrieveSnapshotInfoResponse, error) { + var reqBody, resBody HostVStorageObjectRetrieveSnapshotInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostVStorageObjectRevert_TaskBody struct { + Req *types.HostVStorageObjectRevert_Task `xml:"urn:vim25 HostVStorageObjectRevert_Task,omitempty"` + Res *types.HostVStorageObjectRevert_TaskResponse `xml:"urn:vim25 HostVStorageObjectRevert_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostVStorageObjectRevert_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostVStorageObjectRevert_Task(ctx context.Context, r soap.RoundTripper, req *types.HostVStorageObjectRevert_Task) (*types.HostVStorageObjectRevert_TaskResponse, error) { + var reqBody, resBody HostVStorageObjectRevert_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeaseAbortBody struct { + Req *types.HttpNfcLeaseAbort `xml:"urn:vim25 HttpNfcLeaseAbort,omitempty"` + Res *types.HttpNfcLeaseAbortResponse `xml:"urn:vim25 HttpNfcLeaseAbortResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseAbortBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseAbort(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseAbort) (*types.HttpNfcLeaseAbortResponse, error) { + var reqBody, resBody HttpNfcLeaseAbortBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeaseCompleteBody struct { + Req *types.HttpNfcLeaseComplete `xml:"urn:vim25 HttpNfcLeaseComplete,omitempty"` + Res *types.HttpNfcLeaseCompleteResponse `xml:"urn:vim25 HttpNfcLeaseCompleteResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseCompleteBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseComplete(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseComplete) (*types.HttpNfcLeaseCompleteResponse, error) { + var reqBody, resBody HttpNfcLeaseCompleteBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeaseGetManifestBody struct { + Req *types.HttpNfcLeaseGetManifest `xml:"urn:vim25 HttpNfcLeaseGetManifest,omitempty"` + Res *types.HttpNfcLeaseGetManifestResponse `xml:"urn:vim25 HttpNfcLeaseGetManifestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseGetManifestBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseGetManifest(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseGetManifest) (*types.HttpNfcLeaseGetManifestResponse, error) { + var reqBody, resBody HttpNfcLeaseGetManifestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeaseProgressBody struct { + Req *types.HttpNfcLeaseProgress `xml:"urn:vim25 HttpNfcLeaseProgress,omitempty"` + Res *types.HttpNfcLeaseProgressResponse `xml:"urn:vim25 HttpNfcLeaseProgressResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseProgressBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseProgress(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseProgress) (*types.HttpNfcLeaseProgressResponse, error) { + var reqBody, resBody HttpNfcLeaseProgressBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeasePullFromUrls_TaskBody struct { + Req *types.HttpNfcLeasePullFromUrls_Task `xml:"urn:vim25 HttpNfcLeasePullFromUrls_Task,omitempty"` + Res *types.HttpNfcLeasePullFromUrls_TaskResponse `xml:"urn:vim25 HttpNfcLeasePullFromUrls_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeasePullFromUrls_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeasePullFromUrls_Task(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeasePullFromUrls_Task) (*types.HttpNfcLeasePullFromUrls_TaskResponse, error) { + var reqBody, resBody HttpNfcLeasePullFromUrls_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HttpNfcLeaseSetManifestChecksumTypeBody struct { + Req *types.HttpNfcLeaseSetManifestChecksumType `xml:"urn:vim25 HttpNfcLeaseSetManifestChecksumType,omitempty"` + Res *types.HttpNfcLeaseSetManifestChecksumTypeResponse `xml:"urn:vim25 HttpNfcLeaseSetManifestChecksumTypeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseSetManifestChecksumTypeBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseSetManifestChecksumType(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseSetManifestChecksumType) (*types.HttpNfcLeaseSetManifestChecksumTypeResponse, error) { + var reqBody, resBody HttpNfcLeaseSetManifestChecksumTypeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ImpersonateUserBody struct { + Req *types.ImpersonateUser `xml:"urn:vim25 ImpersonateUser,omitempty"` + Res *types.ImpersonateUserResponse `xml:"urn:vim25 ImpersonateUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ImpersonateUserBody) Fault() *soap.Fault { return b.Fault_ } + +func ImpersonateUser(ctx context.Context, r soap.RoundTripper, req *types.ImpersonateUser) (*types.ImpersonateUserResponse, error) { + var reqBody, resBody ImpersonateUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ImportCertificateForCAM_TaskBody struct { + Req *types.ImportCertificateForCAM_Task `xml:"urn:vim25 ImportCertificateForCAM_Task,omitempty"` + Res *types.ImportCertificateForCAM_TaskResponse `xml:"urn:vim25 ImportCertificateForCAM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ImportCertificateForCAM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ImportCertificateForCAM_Task(ctx context.Context, r soap.RoundTripper, req *types.ImportCertificateForCAM_Task) (*types.ImportCertificateForCAM_TaskResponse, error) { + var reqBody, resBody ImportCertificateForCAM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ImportUnmanagedSnapshotBody struct { + Req *types.ImportUnmanagedSnapshot `xml:"urn:vim25 ImportUnmanagedSnapshot,omitempty"` + Res *types.ImportUnmanagedSnapshotResponse `xml:"urn:vim25 ImportUnmanagedSnapshotResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ImportUnmanagedSnapshotBody) Fault() *soap.Fault { return b.Fault_ } + +func ImportUnmanagedSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ImportUnmanagedSnapshot) (*types.ImportUnmanagedSnapshotResponse, error) { + var reqBody, resBody ImportUnmanagedSnapshotBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ImportVAppBody struct { + Req *types.ImportVApp `xml:"urn:vim25 ImportVApp,omitempty"` + Res *types.ImportVAppResponse `xml:"urn:vim25 ImportVAppResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ImportVAppBody) Fault() *soap.Fault { return b.Fault_ } + +func ImportVApp(ctx context.Context, r soap.RoundTripper, req *types.ImportVApp) (*types.ImportVAppResponse, error) { + var reqBody, resBody ImportVAppBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InflateDisk_TaskBody struct { + Req *types.InflateDisk_Task `xml:"urn:vim25 InflateDisk_Task,omitempty"` + Res *types.InflateDisk_TaskResponse `xml:"urn:vim25 InflateDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InflateDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InflateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.InflateDisk_Task) (*types.InflateDisk_TaskResponse, error) { + var reqBody, resBody InflateDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InflateVirtualDisk_TaskBody struct { + Req *types.InflateVirtualDisk_Task `xml:"urn:vim25 InflateVirtualDisk_Task,omitempty"` + Res *types.InflateVirtualDisk_TaskResponse `xml:"urn:vim25 InflateVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InflateVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InflateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.InflateVirtualDisk_Task) (*types.InflateVirtualDisk_TaskResponse, error) { + var reqBody, resBody InflateVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InitializeDisks_TaskBody struct { + Req *types.InitializeDisks_Task `xml:"urn:vim25 InitializeDisks_Task,omitempty"` + Res *types.InitializeDisks_TaskResponse `xml:"urn:vim25 InitializeDisks_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InitializeDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InitializeDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.InitializeDisks_Task) (*types.InitializeDisks_TaskResponse, error) { + var reqBody, resBody InitializeDisks_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InitiateFileTransferFromGuestBody struct { + Req *types.InitiateFileTransferFromGuest `xml:"urn:vim25 InitiateFileTransferFromGuest,omitempty"` + Res *types.InitiateFileTransferFromGuestResponse `xml:"urn:vim25 InitiateFileTransferFromGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InitiateFileTransferFromGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func InitiateFileTransferFromGuest(ctx context.Context, r soap.RoundTripper, req *types.InitiateFileTransferFromGuest) (*types.InitiateFileTransferFromGuestResponse, error) { + var reqBody, resBody InitiateFileTransferFromGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InitiateFileTransferToGuestBody struct { + Req *types.InitiateFileTransferToGuest `xml:"urn:vim25 InitiateFileTransferToGuest,omitempty"` + Res *types.InitiateFileTransferToGuestResponse `xml:"urn:vim25 InitiateFileTransferToGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InitiateFileTransferToGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func InitiateFileTransferToGuest(ctx context.Context, r soap.RoundTripper, req *types.InitiateFileTransferToGuest) (*types.InitiateFileTransferToGuestResponse, error) { + var reqBody, resBody InitiateFileTransferToGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallHostPatchV2_TaskBody struct { + Req *types.InstallHostPatchV2_Task `xml:"urn:vim25 InstallHostPatchV2_Task,omitempty"` + Res *types.InstallHostPatchV2_TaskResponse `xml:"urn:vim25 InstallHostPatchV2_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallHostPatchV2_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *types.InstallHostPatchV2_Task) (*types.InstallHostPatchV2_TaskResponse, error) { + var reqBody, resBody InstallHostPatchV2_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallHostPatch_TaskBody struct { + Req *types.InstallHostPatch_Task `xml:"urn:vim25 InstallHostPatch_Task,omitempty"` + Res *types.InstallHostPatch_TaskResponse `xml:"urn:vim25 InstallHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.InstallHostPatch_Task) (*types.InstallHostPatch_TaskResponse, error) { + var reqBody, resBody InstallHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallIoFilter_TaskBody struct { + Req *types.InstallIoFilter_Task `xml:"urn:vim25 InstallIoFilter_Task,omitempty"` + Res *types.InstallIoFilter_TaskResponse `xml:"urn:vim25 InstallIoFilter_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallIoFilter_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.InstallIoFilter_Task) (*types.InstallIoFilter_TaskResponse, error) { + var reqBody, resBody InstallIoFilter_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallServerCertificateBody struct { + Req *types.InstallServerCertificate `xml:"urn:vim25 InstallServerCertificate,omitempty"` + Res *types.InstallServerCertificateResponse `xml:"urn:vim25 InstallServerCertificateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallServerCertificateBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallServerCertificate(ctx context.Context, r soap.RoundTripper, req *types.InstallServerCertificate) (*types.InstallServerCertificateResponse, error) { + var reqBody, resBody InstallServerCertificateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallSmartCardTrustAnchorBody struct { + Req *types.InstallSmartCardTrustAnchor `xml:"urn:vim25 InstallSmartCardTrustAnchor,omitempty"` + Res *types.InstallSmartCardTrustAnchorResponse `xml:"urn:vim25 InstallSmartCardTrustAnchorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallSmartCardTrustAnchorBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req *types.InstallSmartCardTrustAnchor) (*types.InstallSmartCardTrustAnchorResponse, error) { + var reqBody, resBody InstallSmartCardTrustAnchorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstantClone_TaskBody struct { + Req *types.InstantClone_Task `xml:"urn:vim25 InstantClone_Task,omitempty"` + Res *types.InstantClone_TaskResponse `xml:"urn:vim25 InstantClone_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstantClone_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InstantClone_Task(ctx context.Context, r soap.RoundTripper, req *types.InstantClone_Task) (*types.InstantClone_TaskResponse, error) { + var reqBody, resBody InstantClone_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type IsSharedGraphicsActiveBody struct { + Req *types.IsSharedGraphicsActive `xml:"urn:vim25 IsSharedGraphicsActive,omitempty"` + Res *types.IsSharedGraphicsActiveResponse `xml:"urn:vim25 IsSharedGraphicsActiveResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *IsSharedGraphicsActiveBody) Fault() *soap.Fault { return b.Fault_ } + +func IsSharedGraphicsActive(ctx context.Context, r soap.RoundTripper, req *types.IsSharedGraphicsActive) (*types.IsSharedGraphicsActiveResponse, error) { + var reqBody, resBody IsSharedGraphicsActiveBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type JoinDomainWithCAM_TaskBody struct { + Req *types.JoinDomainWithCAM_Task `xml:"urn:vim25 JoinDomainWithCAM_Task,omitempty"` + Res *types.JoinDomainWithCAM_TaskResponse `xml:"urn:vim25 JoinDomainWithCAM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *JoinDomainWithCAM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func JoinDomainWithCAM_Task(ctx context.Context, r soap.RoundTripper, req *types.JoinDomainWithCAM_Task) (*types.JoinDomainWithCAM_TaskResponse, error) { + var reqBody, resBody JoinDomainWithCAM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type JoinDomain_TaskBody struct { + Req *types.JoinDomain_Task `xml:"urn:vim25 JoinDomain_Task,omitempty"` + Res *types.JoinDomain_TaskResponse `xml:"urn:vim25 JoinDomain_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *JoinDomain_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func JoinDomain_Task(ctx context.Context, r soap.RoundTripper, req *types.JoinDomain_Task) (*types.JoinDomain_TaskResponse, error) { + var reqBody, resBody JoinDomain_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LeaveCurrentDomain_TaskBody struct { + Req *types.LeaveCurrentDomain_Task `xml:"urn:vim25 LeaveCurrentDomain_Task,omitempty"` + Res *types.LeaveCurrentDomain_TaskResponse `xml:"urn:vim25 LeaveCurrentDomain_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LeaveCurrentDomain_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func LeaveCurrentDomain_Task(ctx context.Context, r soap.RoundTripper, req *types.LeaveCurrentDomain_Task) (*types.LeaveCurrentDomain_TaskResponse, error) { + var reqBody, resBody LeaveCurrentDomain_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListCACertificateRevocationListsBody struct { + Req *types.ListCACertificateRevocationLists `xml:"urn:vim25 ListCACertificateRevocationLists,omitempty"` + Res *types.ListCACertificateRevocationListsResponse `xml:"urn:vim25 ListCACertificateRevocationListsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListCACertificateRevocationListsBody) Fault() *soap.Fault { return b.Fault_ } + +func ListCACertificateRevocationLists(ctx context.Context, r soap.RoundTripper, req *types.ListCACertificateRevocationLists) (*types.ListCACertificateRevocationListsResponse, error) { + var reqBody, resBody ListCACertificateRevocationListsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListCACertificatesBody struct { + Req *types.ListCACertificates `xml:"urn:vim25 ListCACertificates,omitempty"` + Res *types.ListCACertificatesResponse `xml:"urn:vim25 ListCACertificatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListCACertificatesBody) Fault() *soap.Fault { return b.Fault_ } + +func ListCACertificates(ctx context.Context, r soap.RoundTripper, req *types.ListCACertificates) (*types.ListCACertificatesResponse, error) { + var reqBody, resBody ListCACertificatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListFilesInGuestBody struct { + Req *types.ListFilesInGuest `xml:"urn:vim25 ListFilesInGuest,omitempty"` + Res *types.ListFilesInGuestResponse `xml:"urn:vim25 ListFilesInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListFilesInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ListFilesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListFilesInGuest) (*types.ListFilesInGuestResponse, error) { + var reqBody, resBody ListFilesInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListGuestAliasesBody struct { + Req *types.ListGuestAliases `xml:"urn:vim25 ListGuestAliases,omitempty"` + Res *types.ListGuestAliasesResponse `xml:"urn:vim25 ListGuestAliasesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListGuestAliasesBody) Fault() *soap.Fault { return b.Fault_ } + +func ListGuestAliases(ctx context.Context, r soap.RoundTripper, req *types.ListGuestAliases) (*types.ListGuestAliasesResponse, error) { + var reqBody, resBody ListGuestAliasesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListGuestMappedAliasesBody struct { + Req *types.ListGuestMappedAliases `xml:"urn:vim25 ListGuestMappedAliases,omitempty"` + Res *types.ListGuestMappedAliasesResponse `xml:"urn:vim25 ListGuestMappedAliasesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListGuestMappedAliasesBody) Fault() *soap.Fault { return b.Fault_ } + +func ListGuestMappedAliases(ctx context.Context, r soap.RoundTripper, req *types.ListGuestMappedAliases) (*types.ListGuestMappedAliasesResponse, error) { + var reqBody, resBody ListGuestMappedAliasesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListKeysBody struct { + Req *types.ListKeys `xml:"urn:vim25 ListKeys,omitempty"` + Res *types.ListKeysResponse `xml:"urn:vim25 ListKeysResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListKeysBody) Fault() *soap.Fault { return b.Fault_ } + +func ListKeys(ctx context.Context, r soap.RoundTripper, req *types.ListKeys) (*types.ListKeysResponse, error) { + var reqBody, resBody ListKeysBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListKmipServersBody struct { + Req *types.ListKmipServers `xml:"urn:vim25 ListKmipServers,omitempty"` + Res *types.ListKmipServersResponse `xml:"urn:vim25 ListKmipServersResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListKmipServersBody) Fault() *soap.Fault { return b.Fault_ } + +func ListKmipServers(ctx context.Context, r soap.RoundTripper, req *types.ListKmipServers) (*types.ListKmipServersResponse, error) { + var reqBody, resBody ListKmipServersBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListProcessesInGuestBody struct { + Req *types.ListProcessesInGuest `xml:"urn:vim25 ListProcessesInGuest,omitempty"` + Res *types.ListProcessesInGuestResponse `xml:"urn:vim25 ListProcessesInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListProcessesInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ListProcessesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListProcessesInGuest) (*types.ListProcessesInGuestResponse, error) { + var reqBody, resBody ListProcessesInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListRegistryKeysInGuestBody struct { + Req *types.ListRegistryKeysInGuest `xml:"urn:vim25 ListRegistryKeysInGuest,omitempty"` + Res *types.ListRegistryKeysInGuestResponse `xml:"urn:vim25 ListRegistryKeysInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListRegistryKeysInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ListRegistryKeysInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListRegistryKeysInGuest) (*types.ListRegistryKeysInGuestResponse, error) { + var reqBody, resBody ListRegistryKeysInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListRegistryValuesInGuestBody struct { + Req *types.ListRegistryValuesInGuest `xml:"urn:vim25 ListRegistryValuesInGuest,omitempty"` + Res *types.ListRegistryValuesInGuestResponse `xml:"urn:vim25 ListRegistryValuesInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListRegistryValuesInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ListRegistryValuesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListRegistryValuesInGuest) (*types.ListRegistryValuesInGuestResponse, error) { + var reqBody, resBody ListRegistryValuesInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListSmartCardTrustAnchorsBody struct { + Req *types.ListSmartCardTrustAnchors `xml:"urn:vim25 ListSmartCardTrustAnchors,omitempty"` + Res *types.ListSmartCardTrustAnchorsResponse `xml:"urn:vim25 ListSmartCardTrustAnchorsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListSmartCardTrustAnchorsBody) Fault() *soap.Fault { return b.Fault_ } + +func ListSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req *types.ListSmartCardTrustAnchors) (*types.ListSmartCardTrustAnchorsResponse, error) { + var reqBody, resBody ListSmartCardTrustAnchorsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListTagsAttachedToVStorageObjectBody struct { + Req *types.ListTagsAttachedToVStorageObject `xml:"urn:vim25 ListTagsAttachedToVStorageObject,omitempty"` + Res *types.ListTagsAttachedToVStorageObjectResponse `xml:"urn:vim25 ListTagsAttachedToVStorageObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListTagsAttachedToVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func ListTagsAttachedToVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.ListTagsAttachedToVStorageObject) (*types.ListTagsAttachedToVStorageObjectResponse, error) { + var reqBody, resBody ListTagsAttachedToVStorageObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListVStorageObjectBody struct { + Req *types.ListVStorageObject `xml:"urn:vim25 ListVStorageObject,omitempty"` + Res *types.ListVStorageObjectResponse `xml:"urn:vim25 ListVStorageObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func ListVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.ListVStorageObject) (*types.ListVStorageObjectResponse, error) { + var reqBody, resBody ListVStorageObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ListVStorageObjectsAttachedToTagBody struct { + Req *types.ListVStorageObjectsAttachedToTag `xml:"urn:vim25 ListVStorageObjectsAttachedToTag,omitempty"` + Res *types.ListVStorageObjectsAttachedToTagResponse `xml:"urn:vim25 ListVStorageObjectsAttachedToTagResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListVStorageObjectsAttachedToTagBody) Fault() *soap.Fault { return b.Fault_ } + +func ListVStorageObjectsAttachedToTag(ctx context.Context, r soap.RoundTripper, req *types.ListVStorageObjectsAttachedToTag) (*types.ListVStorageObjectsAttachedToTagResponse, error) { + var reqBody, resBody ListVStorageObjectsAttachedToTagBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LogUserEventBody struct { + Req *types.LogUserEvent `xml:"urn:vim25 LogUserEvent,omitempty"` + Res *types.LogUserEventResponse `xml:"urn:vim25 LogUserEventResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LogUserEventBody) Fault() *soap.Fault { return b.Fault_ } + +func LogUserEvent(ctx context.Context, r soap.RoundTripper, req *types.LogUserEvent) (*types.LogUserEventResponse, error) { + var reqBody, resBody LogUserEventBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginBody struct { + Req *types.Login `xml:"urn:vim25 Login,omitempty"` + Res *types.LoginResponse `xml:"urn:vim25 LoginResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginBody) Fault() *soap.Fault { return b.Fault_ } + +func Login(ctx context.Context, r soap.RoundTripper, req *types.Login) (*types.LoginResponse, error) { + var reqBody, resBody LoginBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginBySSPIBody struct { + Req *types.LoginBySSPI `xml:"urn:vim25 LoginBySSPI,omitempty"` + Res *types.LoginBySSPIResponse `xml:"urn:vim25 LoginBySSPIResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginBySSPIBody) Fault() *soap.Fault { return b.Fault_ } + +func LoginBySSPI(ctx context.Context, r soap.RoundTripper, req *types.LoginBySSPI) (*types.LoginBySSPIResponse, error) { + var reqBody, resBody LoginBySSPIBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginByTokenBody struct { + Req *types.LoginByToken `xml:"urn:vim25 LoginByToken,omitempty"` + Res *types.LoginByTokenResponse `xml:"urn:vim25 LoginByTokenResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginByTokenBody) Fault() *soap.Fault { return b.Fault_ } + +func LoginByToken(ctx context.Context, r soap.RoundTripper, req *types.LoginByToken) (*types.LoginByTokenResponse, error) { + var reqBody, resBody LoginByTokenBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginExtensionByCertificateBody struct { + Req *types.LoginExtensionByCertificate `xml:"urn:vim25 LoginExtensionByCertificate,omitempty"` + Res *types.LoginExtensionByCertificateResponse `xml:"urn:vim25 LoginExtensionByCertificateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginExtensionByCertificateBody) Fault() *soap.Fault { return b.Fault_ } + +func LoginExtensionByCertificate(ctx context.Context, r soap.RoundTripper, req *types.LoginExtensionByCertificate) (*types.LoginExtensionByCertificateResponse, error) { + var reqBody, resBody LoginExtensionByCertificateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LoginExtensionBySubjectNameBody struct { + Req *types.LoginExtensionBySubjectName `xml:"urn:vim25 LoginExtensionBySubjectName,omitempty"` + Res *types.LoginExtensionBySubjectNameResponse `xml:"urn:vim25 LoginExtensionBySubjectNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LoginExtensionBySubjectNameBody) Fault() *soap.Fault { return b.Fault_ } + +func LoginExtensionBySubjectName(ctx context.Context, r soap.RoundTripper, req *types.LoginExtensionBySubjectName) (*types.LoginExtensionBySubjectNameResponse, error) { + var reqBody, resBody LoginExtensionBySubjectNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LogoutBody struct { + Req *types.Logout `xml:"urn:vim25 Logout,omitempty"` + Res *types.LogoutResponse `xml:"urn:vim25 LogoutResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LogoutBody) Fault() *soap.Fault { return b.Fault_ } + +func Logout(ctx context.Context, r soap.RoundTripper, req *types.Logout) (*types.LogoutResponse, error) { + var reqBody, resBody LogoutBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LookupDvPortGroupBody struct { + Req *types.LookupDvPortGroup `xml:"urn:vim25 LookupDvPortGroup,omitempty"` + Res *types.LookupDvPortGroupResponse `xml:"urn:vim25 LookupDvPortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LookupDvPortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func LookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req *types.LookupDvPortGroup) (*types.LookupDvPortGroupResponse, error) { + var reqBody, resBody LookupDvPortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type LookupVmOverheadMemoryBody struct { + Req *types.LookupVmOverheadMemory `xml:"urn:vim25 LookupVmOverheadMemory,omitempty"` + Res *types.LookupVmOverheadMemoryResponse `xml:"urn:vim25 LookupVmOverheadMemoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *LookupVmOverheadMemoryBody) Fault() *soap.Fault { return b.Fault_ } + +func LookupVmOverheadMemory(ctx context.Context, r soap.RoundTripper, req *types.LookupVmOverheadMemory) (*types.LookupVmOverheadMemoryResponse, error) { + var reqBody, resBody LookupVmOverheadMemoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MakeDirectoryBody struct { + Req *types.MakeDirectory `xml:"urn:vim25 MakeDirectory,omitempty"` + Res *types.MakeDirectoryResponse `xml:"urn:vim25 MakeDirectoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MakeDirectoryBody) Fault() *soap.Fault { return b.Fault_ } + +func MakeDirectory(ctx context.Context, r soap.RoundTripper, req *types.MakeDirectory) (*types.MakeDirectoryResponse, error) { + var reqBody, resBody MakeDirectoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MakeDirectoryInGuestBody struct { + Req *types.MakeDirectoryInGuest `xml:"urn:vim25 MakeDirectoryInGuest,omitempty"` + Res *types.MakeDirectoryInGuestResponse `xml:"urn:vim25 MakeDirectoryInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MakeDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func MakeDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.MakeDirectoryInGuest) (*types.MakeDirectoryInGuestResponse, error) { + var reqBody, resBody MakeDirectoryInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MakePrimaryVM_TaskBody struct { + Req *types.MakePrimaryVM_Task `xml:"urn:vim25 MakePrimaryVM_Task,omitempty"` + Res *types.MakePrimaryVM_TaskResponse `xml:"urn:vim25 MakePrimaryVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MakePrimaryVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MakePrimaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.MakePrimaryVM_Task) (*types.MakePrimaryVM_TaskResponse, error) { + var reqBody, resBody MakePrimaryVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsLocal_TaskBody struct { + Req *types.MarkAsLocal_Task `xml:"urn:vim25 MarkAsLocal_Task,omitempty"` + Res *types.MarkAsLocal_TaskResponse `xml:"urn:vim25 MarkAsLocal_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsLocal_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsLocal_Task) (*types.MarkAsLocal_TaskResponse, error) { + var reqBody, resBody MarkAsLocal_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsNonLocal_TaskBody struct { + Req *types.MarkAsNonLocal_Task `xml:"urn:vim25 MarkAsNonLocal_Task,omitempty"` + Res *types.MarkAsNonLocal_TaskResponse `xml:"urn:vim25 MarkAsNonLocal_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsNonLocal_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsNonLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsNonLocal_Task) (*types.MarkAsNonLocal_TaskResponse, error) { + var reqBody, resBody MarkAsNonLocal_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsNonSsd_TaskBody struct { + Req *types.MarkAsNonSsd_Task `xml:"urn:vim25 MarkAsNonSsd_Task,omitempty"` + Res *types.MarkAsNonSsd_TaskResponse `xml:"urn:vim25 MarkAsNonSsd_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsNonSsd_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsNonSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsNonSsd_Task) (*types.MarkAsNonSsd_TaskResponse, error) { + var reqBody, resBody MarkAsNonSsd_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsSsd_TaskBody struct { + Req *types.MarkAsSsd_Task `xml:"urn:vim25 MarkAsSsd_Task,omitempty"` + Res *types.MarkAsSsd_TaskResponse `xml:"urn:vim25 MarkAsSsd_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsSsd_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsSsd_Task) (*types.MarkAsSsd_TaskResponse, error) { + var reqBody, resBody MarkAsSsd_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsTemplateBody struct { + Req *types.MarkAsTemplate `xml:"urn:vim25 MarkAsTemplate,omitempty"` + Res *types.MarkAsTemplateResponse `xml:"urn:vim25 MarkAsTemplateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsTemplateBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsTemplate(ctx context.Context, r soap.RoundTripper, req *types.MarkAsTemplate) (*types.MarkAsTemplateResponse, error) { + var reqBody, resBody MarkAsTemplateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkAsVirtualMachineBody struct { + Req *types.MarkAsVirtualMachine `xml:"urn:vim25 MarkAsVirtualMachine,omitempty"` + Res *types.MarkAsVirtualMachineResponse `xml:"urn:vim25 MarkAsVirtualMachineResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkAsVirtualMachineBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkAsVirtualMachine(ctx context.Context, r soap.RoundTripper, req *types.MarkAsVirtualMachine) (*types.MarkAsVirtualMachineResponse, error) { + var reqBody, resBody MarkAsVirtualMachineBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkDefaultBody struct { + Req *types.MarkDefault `xml:"urn:vim25 MarkDefault,omitempty"` + Res *types.MarkDefaultResponse `xml:"urn:vim25 MarkDefaultResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkDefaultBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkDefault(ctx context.Context, r soap.RoundTripper, req *types.MarkDefault) (*types.MarkDefaultResponse, error) { + var reqBody, resBody MarkDefaultBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkForRemovalBody struct { + Req *types.MarkForRemoval `xml:"urn:vim25 MarkForRemoval,omitempty"` + Res *types.MarkForRemovalResponse `xml:"urn:vim25 MarkForRemovalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkForRemovalBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkForRemoval(ctx context.Context, r soap.RoundTripper, req *types.MarkForRemoval) (*types.MarkForRemovalResponse, error) { + var reqBody, resBody MarkForRemovalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MergeDvs_TaskBody struct { + Req *types.MergeDvs_Task `xml:"urn:vim25 MergeDvs_Task,omitempty"` + Res *types.MergeDvs_TaskResponse `xml:"urn:vim25 MergeDvs_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MergeDvs_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MergeDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.MergeDvs_Task) (*types.MergeDvs_TaskResponse, error) { + var reqBody, resBody MergeDvs_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MergePermissionsBody struct { + Req *types.MergePermissions `xml:"urn:vim25 MergePermissions,omitempty"` + Res *types.MergePermissionsResponse `xml:"urn:vim25 MergePermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MergePermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func MergePermissions(ctx context.Context, r soap.RoundTripper, req *types.MergePermissions) (*types.MergePermissionsResponse, error) { + var reqBody, resBody MergePermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MigrateVM_TaskBody struct { + Req *types.MigrateVM_Task `xml:"urn:vim25 MigrateVM_Task,omitempty"` + Res *types.MigrateVM_TaskResponse `xml:"urn:vim25 MigrateVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MigrateVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MigrateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.MigrateVM_Task) (*types.MigrateVM_TaskResponse, error) { + var reqBody, resBody MigrateVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ModifyListViewBody struct { + Req *types.ModifyListView `xml:"urn:vim25 ModifyListView,omitempty"` + Res *types.ModifyListViewResponse `xml:"urn:vim25 ModifyListViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ModifyListViewBody) Fault() *soap.Fault { return b.Fault_ } + +func ModifyListView(ctx context.Context, r soap.RoundTripper, req *types.ModifyListView) (*types.ModifyListViewResponse, error) { + var reqBody, resBody ModifyListViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MountToolsInstallerBody struct { + Req *types.MountToolsInstaller `xml:"urn:vim25 MountToolsInstaller,omitempty"` + Res *types.MountToolsInstallerResponse `xml:"urn:vim25 MountToolsInstallerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MountToolsInstallerBody) Fault() *soap.Fault { return b.Fault_ } + +func MountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types.MountToolsInstaller) (*types.MountToolsInstallerResponse, error) { + var reqBody, resBody MountToolsInstallerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MountVffsVolumeBody struct { + Req *types.MountVffsVolume `xml:"urn:vim25 MountVffsVolume,omitempty"` + Res *types.MountVffsVolumeResponse `xml:"urn:vim25 MountVffsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MountVffsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func MountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountVffsVolume) (*types.MountVffsVolumeResponse, error) { + var reqBody, resBody MountVffsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MountVmfsVolumeBody struct { + Req *types.MountVmfsVolume `xml:"urn:vim25 MountVmfsVolume,omitempty"` + Res *types.MountVmfsVolumeResponse `xml:"urn:vim25 MountVmfsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MountVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func MountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountVmfsVolume) (*types.MountVmfsVolumeResponse, error) { + var reqBody, resBody MountVmfsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MountVmfsVolumeEx_TaskBody struct { + Req *types.MountVmfsVolumeEx_Task `xml:"urn:vim25 MountVmfsVolumeEx_Task,omitempty"` + Res *types.MountVmfsVolumeEx_TaskResponse `xml:"urn:vim25 MountVmfsVolumeEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MountVmfsVolumeEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types.MountVmfsVolumeEx_Task) (*types.MountVmfsVolumeEx_TaskResponse, error) { + var reqBody, resBody MountVmfsVolumeEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveDVPort_TaskBody struct { + Req *types.MoveDVPort_Task `xml:"urn:vim25 MoveDVPort_Task,omitempty"` + Res *types.MoveDVPort_TaskResponse `xml:"urn:vim25 MoveDVPort_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveDVPort_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveDVPort_Task) (*types.MoveDVPort_TaskResponse, error) { + var reqBody, resBody MoveDVPort_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveDatastoreFile_TaskBody struct { + Req *types.MoveDatastoreFile_Task `xml:"urn:vim25 MoveDatastoreFile_Task,omitempty"` + Res *types.MoveDatastoreFile_TaskResponse `xml:"urn:vim25 MoveDatastoreFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveDatastoreFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveDatastoreFile_Task) (*types.MoveDatastoreFile_TaskResponse, error) { + var reqBody, resBody MoveDatastoreFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveDirectoryInGuestBody struct { + Req *types.MoveDirectoryInGuest `xml:"urn:vim25 MoveDirectoryInGuest,omitempty"` + Res *types.MoveDirectoryInGuestResponse `xml:"urn:vim25 MoveDirectoryInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveDirectoryInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.MoveDirectoryInGuest) (*types.MoveDirectoryInGuestResponse, error) { + var reqBody, resBody MoveDirectoryInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveFileInGuestBody struct { + Req *types.MoveFileInGuest `xml:"urn:vim25 MoveFileInGuest,omitempty"` + Res *types.MoveFileInGuestResponse `xml:"urn:vim25 MoveFileInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveFileInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.MoveFileInGuest) (*types.MoveFileInGuestResponse, error) { + var reqBody, resBody MoveFileInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveHostInto_TaskBody struct { + Req *types.MoveHostInto_Task `xml:"urn:vim25 MoveHostInto_Task,omitempty"` + Res *types.MoveHostInto_TaskResponse `xml:"urn:vim25 MoveHostInto_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveHostInto_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveHostInto_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveHostInto_Task) (*types.MoveHostInto_TaskResponse, error) { + var reqBody, resBody MoveHostInto_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveIntoFolder_TaskBody struct { + Req *types.MoveIntoFolder_Task `xml:"urn:vim25 MoveIntoFolder_Task,omitempty"` + Res *types.MoveIntoFolder_TaskResponse `xml:"urn:vim25 MoveIntoFolder_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveIntoFolder_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveIntoFolder_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveIntoFolder_Task) (*types.MoveIntoFolder_TaskResponse, error) { + var reqBody, resBody MoveIntoFolder_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveIntoResourcePoolBody struct { + Req *types.MoveIntoResourcePool `xml:"urn:vim25 MoveIntoResourcePool,omitempty"` + Res *types.MoveIntoResourcePoolResponse `xml:"urn:vim25 MoveIntoResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveIntoResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveIntoResourcePool(ctx context.Context, r soap.RoundTripper, req *types.MoveIntoResourcePool) (*types.MoveIntoResourcePoolResponse, error) { + var reqBody, resBody MoveIntoResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveInto_TaskBody struct { + Req *types.MoveInto_Task `xml:"urn:vim25 MoveInto_Task,omitempty"` + Res *types.MoveInto_TaskResponse `xml:"urn:vim25 MoveInto_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveInto_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveInto_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveInto_Task) (*types.MoveInto_TaskResponse, error) { + var reqBody, resBody MoveInto_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MoveVirtualDisk_TaskBody struct { + Req *types.MoveVirtualDisk_Task `xml:"urn:vim25 MoveVirtualDisk_Task,omitempty"` + Res *types.MoveVirtualDisk_TaskResponse `xml:"urn:vim25 MoveVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MoveVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MoveVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveVirtualDisk_Task) (*types.MoveVirtualDisk_TaskResponse, error) { + var reqBody, resBody MoveVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type OpenInventoryViewFolderBody struct { + Req *types.OpenInventoryViewFolder `xml:"urn:vim25 OpenInventoryViewFolder,omitempty"` + Res *types.OpenInventoryViewFolderResponse `xml:"urn:vim25 OpenInventoryViewFolderResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *OpenInventoryViewFolderBody) Fault() *soap.Fault { return b.Fault_ } + +func OpenInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *types.OpenInventoryViewFolder) (*types.OpenInventoryViewFolderResponse, error) { + var reqBody, resBody OpenInventoryViewFolderBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type OverwriteCustomizationSpecBody struct { + Req *types.OverwriteCustomizationSpec `xml:"urn:vim25 OverwriteCustomizationSpec,omitempty"` + Res *types.OverwriteCustomizationSpecResponse `xml:"urn:vim25 OverwriteCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *OverwriteCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func OverwriteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.OverwriteCustomizationSpec) (*types.OverwriteCustomizationSpecResponse, error) { + var reqBody, resBody OverwriteCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ParseDescriptorBody struct { + Req *types.ParseDescriptor `xml:"urn:vim25 ParseDescriptor,omitempty"` + Res *types.ParseDescriptorResponse `xml:"urn:vim25 ParseDescriptorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ParseDescriptorBody) Fault() *soap.Fault { return b.Fault_ } + +func ParseDescriptor(ctx context.Context, r soap.RoundTripper, req *types.ParseDescriptor) (*types.ParseDescriptorResponse, error) { + var reqBody, resBody ParseDescriptorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PerformDvsProductSpecOperation_TaskBody struct { + Req *types.PerformDvsProductSpecOperation_Task `xml:"urn:vim25 PerformDvsProductSpecOperation_Task,omitempty"` + Res *types.PerformDvsProductSpecOperation_TaskResponse `xml:"urn:vim25 PerformDvsProductSpecOperation_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PerformDvsProductSpecOperation_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PerformDvsProductSpecOperation_Task(ctx context.Context, r soap.RoundTripper, req *types.PerformDvsProductSpecOperation_Task) (*types.PerformDvsProductSpecOperation_TaskResponse, error) { + var reqBody, resBody PerformDvsProductSpecOperation_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PerformVsanUpgradePreflightCheckBody struct { + Req *types.PerformVsanUpgradePreflightCheck `xml:"urn:vim25 PerformVsanUpgradePreflightCheck,omitempty"` + Res *types.PerformVsanUpgradePreflightCheckResponse `xml:"urn:vim25 PerformVsanUpgradePreflightCheckResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PerformVsanUpgradePreflightCheckBody) Fault() *soap.Fault { return b.Fault_ } + +func PerformVsanUpgradePreflightCheck(ctx context.Context, r soap.RoundTripper, req *types.PerformVsanUpgradePreflightCheck) (*types.PerformVsanUpgradePreflightCheckResponse, error) { + var reqBody, resBody PerformVsanUpgradePreflightCheckBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PerformVsanUpgrade_TaskBody struct { + Req *types.PerformVsanUpgrade_Task `xml:"urn:vim25 PerformVsanUpgrade_Task,omitempty"` + Res *types.PerformVsanUpgrade_TaskResponse `xml:"urn:vim25 PerformVsanUpgrade_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PerformVsanUpgrade_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PerformVsanUpgrade_Task(ctx context.Context, r soap.RoundTripper, req *types.PerformVsanUpgrade_Task) (*types.PerformVsanUpgrade_TaskResponse, error) { + var reqBody, resBody PerformVsanUpgrade_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PlaceVmBody struct { + Req *types.PlaceVm `xml:"urn:vim25 PlaceVm,omitempty"` + Res *types.PlaceVmResponse `xml:"urn:vim25 PlaceVmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PlaceVmBody) Fault() *soap.Fault { return b.Fault_ } + +func PlaceVm(ctx context.Context, r soap.RoundTripper, req *types.PlaceVm) (*types.PlaceVmResponse, error) { + var reqBody, resBody PlaceVmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PostEventBody struct { + Req *types.PostEvent `xml:"urn:vim25 PostEvent,omitempty"` + Res *types.PostEventResponse `xml:"urn:vim25 PostEventResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PostEventBody) Fault() *soap.Fault { return b.Fault_ } + +func PostEvent(ctx context.Context, r soap.RoundTripper, req *types.PostEvent) (*types.PostEventResponse, error) { + var reqBody, resBody PostEventBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PostHealthUpdatesBody struct { + Req *types.PostHealthUpdates `xml:"urn:vim25 PostHealthUpdates,omitempty"` + Res *types.PostHealthUpdatesResponse `xml:"urn:vim25 PostHealthUpdatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PostHealthUpdatesBody) Fault() *soap.Fault { return b.Fault_ } + +func PostHealthUpdates(ctx context.Context, r soap.RoundTripper, req *types.PostHealthUpdates) (*types.PostHealthUpdatesResponse, error) { + var reqBody, resBody PostHealthUpdatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerDownHostToStandBy_TaskBody struct { + Req *types.PowerDownHostToStandBy_Task `xml:"urn:vim25 PowerDownHostToStandBy_Task,omitempty"` + Res *types.PowerDownHostToStandBy_TaskResponse `xml:"urn:vim25 PowerDownHostToStandBy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerDownHostToStandBy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerDownHostToStandBy_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerDownHostToStandBy_Task) (*types.PowerDownHostToStandBy_TaskResponse, error) { + var reqBody, resBody PowerDownHostToStandBy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOffVApp_TaskBody struct { + Req *types.PowerOffVApp_Task `xml:"urn:vim25 PowerOffVApp_Task,omitempty"` + Res *types.PowerOffVApp_TaskResponse `xml:"urn:vim25 PowerOffVApp_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOffVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOffVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOffVApp_Task) (*types.PowerOffVApp_TaskResponse, error) { + var reqBody, resBody PowerOffVApp_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOffVM_TaskBody struct { + Req *types.PowerOffVM_Task `xml:"urn:vim25 PowerOffVM_Task,omitempty"` + Res *types.PowerOffVM_TaskResponse `xml:"urn:vim25 PowerOffVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOffVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOffVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOffVM_Task) (*types.PowerOffVM_TaskResponse, error) { + var reqBody, resBody PowerOffVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOnMultiVM_TaskBody struct { + Req *types.PowerOnMultiVM_Task `xml:"urn:vim25 PowerOnMultiVM_Task,omitempty"` + Res *types.PowerOnMultiVM_TaskResponse `xml:"urn:vim25 PowerOnMultiVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOnMultiVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOnMultiVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOnMultiVM_Task) (*types.PowerOnMultiVM_TaskResponse, error) { + var reqBody, resBody PowerOnMultiVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOnVApp_TaskBody struct { + Req *types.PowerOnVApp_Task `xml:"urn:vim25 PowerOnVApp_Task,omitempty"` + Res *types.PowerOnVApp_TaskResponse `xml:"urn:vim25 PowerOnVApp_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOnVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOnVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOnVApp_Task) (*types.PowerOnVApp_TaskResponse, error) { + var reqBody, resBody PowerOnVApp_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerOnVM_TaskBody struct { + Req *types.PowerOnVM_Task `xml:"urn:vim25 PowerOnVM_Task,omitempty"` + Res *types.PowerOnVM_TaskResponse `xml:"urn:vim25 PowerOnVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerOnVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerOnVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOnVM_Task) (*types.PowerOnVM_TaskResponse, error) { + var reqBody, resBody PowerOnVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PowerUpHostFromStandBy_TaskBody struct { + Req *types.PowerUpHostFromStandBy_Task `xml:"urn:vim25 PowerUpHostFromStandBy_Task,omitempty"` + Res *types.PowerUpHostFromStandBy_TaskResponse `xml:"urn:vim25 PowerUpHostFromStandBy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PowerUpHostFromStandBy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PowerUpHostFromStandBy_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerUpHostFromStandBy_Task) (*types.PowerUpHostFromStandBy_TaskResponse, error) { + var reqBody, resBody PowerUpHostFromStandBy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PrepareCryptoBody struct { + Req *types.PrepareCrypto `xml:"urn:vim25 PrepareCrypto,omitempty"` + Res *types.PrepareCryptoResponse `xml:"urn:vim25 PrepareCryptoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PrepareCryptoBody) Fault() *soap.Fault { return b.Fault_ } + +func PrepareCrypto(ctx context.Context, r soap.RoundTripper, req *types.PrepareCrypto) (*types.PrepareCryptoResponse, error) { + var reqBody, resBody PrepareCryptoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PromoteDisks_TaskBody struct { + Req *types.PromoteDisks_Task `xml:"urn:vim25 PromoteDisks_Task,omitempty"` + Res *types.PromoteDisks_TaskResponse `xml:"urn:vim25 PromoteDisks_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PromoteDisks_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PromoteDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.PromoteDisks_Task) (*types.PromoteDisks_TaskResponse, error) { + var reqBody, resBody PromoteDisks_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PutUsbScanCodesBody struct { + Req *types.PutUsbScanCodes `xml:"urn:vim25 PutUsbScanCodes,omitempty"` + Res *types.PutUsbScanCodesResponse `xml:"urn:vim25 PutUsbScanCodesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PutUsbScanCodesBody) Fault() *soap.Fault { return b.Fault_ } + +func PutUsbScanCodes(ctx context.Context, r soap.RoundTripper, req *types.PutUsbScanCodes) (*types.PutUsbScanCodesResponse, error) { + var reqBody, resBody PutUsbScanCodesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAnswerFileStatusBody struct { + Req *types.QueryAnswerFileStatus `xml:"urn:vim25 QueryAnswerFileStatus,omitempty"` + Res *types.QueryAnswerFileStatusResponse `xml:"urn:vim25 QueryAnswerFileStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAnswerFileStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAnswerFileStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryAnswerFileStatus) (*types.QueryAnswerFileStatusResponse, error) { + var reqBody, resBody QueryAnswerFileStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAssignedLicensesBody struct { + Req *types.QueryAssignedLicenses `xml:"urn:vim25 QueryAssignedLicenses,omitempty"` + Res *types.QueryAssignedLicensesResponse `xml:"urn:vim25 QueryAssignedLicensesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAssignedLicensesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAssignedLicenses(ctx context.Context, r soap.RoundTripper, req *types.QueryAssignedLicenses) (*types.QueryAssignedLicensesResponse, error) { + var reqBody, resBody QueryAssignedLicensesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailableDisksForVmfsBody struct { + Req *types.QueryAvailableDisksForVmfs `xml:"urn:vim25 QueryAvailableDisksForVmfs,omitempty"` + Res *types.QueryAvailableDisksForVmfsResponse `xml:"urn:vim25 QueryAvailableDisksForVmfsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailableDisksForVmfsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailableDisksForVmfs(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableDisksForVmfs) (*types.QueryAvailableDisksForVmfsResponse, error) { + var reqBody, resBody QueryAvailableDisksForVmfsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailableDvsSpecBody struct { + Req *types.QueryAvailableDvsSpec `xml:"urn:vim25 QueryAvailableDvsSpec,omitempty"` + Res *types.QueryAvailableDvsSpecResponse `xml:"urn:vim25 QueryAvailableDvsSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailableDvsSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailableDvsSpec(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableDvsSpec) (*types.QueryAvailableDvsSpecResponse, error) { + var reqBody, resBody QueryAvailableDvsSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailablePartitionBody struct { + Req *types.QueryAvailablePartition `xml:"urn:vim25 QueryAvailablePartition,omitempty"` + Res *types.QueryAvailablePartitionResponse `xml:"urn:vim25 QueryAvailablePartitionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailablePartitionBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailablePartition(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailablePartition) (*types.QueryAvailablePartitionResponse, error) { + var reqBody, resBody QueryAvailablePartitionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailablePerfMetricBody struct { + Req *types.QueryAvailablePerfMetric `xml:"urn:vim25 QueryAvailablePerfMetric,omitempty"` + Res *types.QueryAvailablePerfMetricResponse `xml:"urn:vim25 QueryAvailablePerfMetricResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailablePerfMetricBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailablePerfMetric(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailablePerfMetric) (*types.QueryAvailablePerfMetricResponse, error) { + var reqBody, resBody QueryAvailablePerfMetricBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailableSsdsBody struct { + Req *types.QueryAvailableSsds `xml:"urn:vim25 QueryAvailableSsds,omitempty"` + Res *types.QueryAvailableSsdsResponse `xml:"urn:vim25 QueryAvailableSsdsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailableSsdsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailableSsds(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableSsds) (*types.QueryAvailableSsdsResponse, error) { + var reqBody, resBody QueryAvailableSsdsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryAvailableTimeZonesBody struct { + Req *types.QueryAvailableTimeZones `xml:"urn:vim25 QueryAvailableTimeZones,omitempty"` + Res *types.QueryAvailableTimeZonesResponse `xml:"urn:vim25 QueryAvailableTimeZonesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryAvailableTimeZonesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryAvailableTimeZones(ctx context.Context, r soap.RoundTripper, req *types.QueryAvailableTimeZones) (*types.QueryAvailableTimeZonesResponse, error) { + var reqBody, resBody QueryAvailableTimeZonesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryBootDevicesBody struct { + Req *types.QueryBootDevices `xml:"urn:vim25 QueryBootDevices,omitempty"` + Res *types.QueryBootDevicesResponse `xml:"urn:vim25 QueryBootDevicesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryBootDevicesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryBootDevices(ctx context.Context, r soap.RoundTripper, req *types.QueryBootDevices) (*types.QueryBootDevicesResponse, error) { + var reqBody, resBody QueryBootDevicesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryBoundVnicsBody struct { + Req *types.QueryBoundVnics `xml:"urn:vim25 QueryBoundVnics,omitempty"` + Res *types.QueryBoundVnicsResponse `xml:"urn:vim25 QueryBoundVnicsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryBoundVnicsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryBoundVnics(ctx context.Context, r soap.RoundTripper, req *types.QueryBoundVnics) (*types.QueryBoundVnicsResponse, error) { + var reqBody, resBody QueryBoundVnicsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCandidateNicsBody struct { + Req *types.QueryCandidateNics `xml:"urn:vim25 QueryCandidateNics,omitempty"` + Res *types.QueryCandidateNicsResponse `xml:"urn:vim25 QueryCandidateNicsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCandidateNicsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCandidateNics(ctx context.Context, r soap.RoundTripper, req *types.QueryCandidateNics) (*types.QueryCandidateNicsResponse, error) { + var reqBody, resBody QueryCandidateNicsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryChangedDiskAreasBody struct { + Req *types.QueryChangedDiskAreas `xml:"urn:vim25 QueryChangedDiskAreas,omitempty"` + Res *types.QueryChangedDiskAreasResponse `xml:"urn:vim25 QueryChangedDiskAreasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryChangedDiskAreasBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryChangedDiskAreas(ctx context.Context, r soap.RoundTripper, req *types.QueryChangedDiskAreas) (*types.QueryChangedDiskAreasResponse, error) { + var reqBody, resBody QueryChangedDiskAreasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCmmdsBody struct { + Req *types.QueryCmmds `xml:"urn:vim25 QueryCmmds,omitempty"` + Res *types.QueryCmmdsResponse `xml:"urn:vim25 QueryCmmdsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCmmdsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCmmds(ctx context.Context, r soap.RoundTripper, req *types.QueryCmmds) (*types.QueryCmmdsResponse, error) { + var reqBody, resBody QueryCmmdsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCompatibleHostForExistingDvsBody struct { + Req *types.QueryCompatibleHostForExistingDvs `xml:"urn:vim25 QueryCompatibleHostForExistingDvs,omitempty"` + Res *types.QueryCompatibleHostForExistingDvsResponse `xml:"urn:vim25 QueryCompatibleHostForExistingDvsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCompatibleHostForExistingDvsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCompatibleHostForExistingDvs(ctx context.Context, r soap.RoundTripper, req *types.QueryCompatibleHostForExistingDvs) (*types.QueryCompatibleHostForExistingDvsResponse, error) { + var reqBody, resBody QueryCompatibleHostForExistingDvsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCompatibleHostForNewDvsBody struct { + Req *types.QueryCompatibleHostForNewDvs `xml:"urn:vim25 QueryCompatibleHostForNewDvs,omitempty"` + Res *types.QueryCompatibleHostForNewDvsResponse `xml:"urn:vim25 QueryCompatibleHostForNewDvsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCompatibleHostForNewDvsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCompatibleHostForNewDvs(ctx context.Context, r soap.RoundTripper, req *types.QueryCompatibleHostForNewDvs) (*types.QueryCompatibleHostForNewDvsResponse, error) { + var reqBody, resBody QueryCompatibleHostForNewDvsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryComplianceStatusBody struct { + Req *types.QueryComplianceStatus `xml:"urn:vim25 QueryComplianceStatus,omitempty"` + Res *types.QueryComplianceStatusResponse `xml:"urn:vim25 QueryComplianceStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryComplianceStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryComplianceStatus) (*types.QueryComplianceStatusResponse, error) { + var reqBody, resBody QueryComplianceStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfigOptionBody struct { + Req *types.QueryConfigOption `xml:"urn:vim25 QueryConfigOption,omitempty"` + Res *types.QueryConfigOptionResponse `xml:"urn:vim25 QueryConfigOptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfigOptionBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigOption) (*types.QueryConfigOptionResponse, error) { + var reqBody, resBody QueryConfigOptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfigOptionDescriptorBody struct { + Req *types.QueryConfigOptionDescriptor `xml:"urn:vim25 QueryConfigOptionDescriptor,omitempty"` + Res *types.QueryConfigOptionDescriptorResponse `xml:"urn:vim25 QueryConfigOptionDescriptorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfigOptionDescriptorBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfigOptionDescriptor(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigOptionDescriptor) (*types.QueryConfigOptionDescriptorResponse, error) { + var reqBody, resBody QueryConfigOptionDescriptorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfigOptionExBody struct { + Req *types.QueryConfigOptionEx `xml:"urn:vim25 QueryConfigOptionEx,omitempty"` + Res *types.QueryConfigOptionExResponse `xml:"urn:vim25 QueryConfigOptionExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfigOptionExBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfigOptionEx(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigOptionEx) (*types.QueryConfigOptionExResponse, error) { + var reqBody, resBody QueryConfigOptionExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfigTargetBody struct { + Req *types.QueryConfigTarget `xml:"urn:vim25 QueryConfigTarget,omitempty"` + Res *types.QueryConfigTargetResponse `xml:"urn:vim25 QueryConfigTargetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfigTargetBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.QueryConfigTarget) (*types.QueryConfigTargetResponse, error) { + var reqBody, resBody QueryConfigTargetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConfiguredModuleOptionStringBody struct { + Req *types.QueryConfiguredModuleOptionString `xml:"urn:vim25 QueryConfiguredModuleOptionString,omitempty"` + Res *types.QueryConfiguredModuleOptionStringResponse `xml:"urn:vim25 QueryConfiguredModuleOptionStringResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConfiguredModuleOptionStringBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConfiguredModuleOptionString(ctx context.Context, r soap.RoundTripper, req *types.QueryConfiguredModuleOptionString) (*types.QueryConfiguredModuleOptionStringResponse, error) { + var reqBody, resBody QueryConfiguredModuleOptionStringBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConnectionInfoBody struct { + Req *types.QueryConnectionInfo `xml:"urn:vim25 QueryConnectionInfo,omitempty"` + Res *types.QueryConnectionInfoResponse `xml:"urn:vim25 QueryConnectionInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConnectionInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConnectionInfo(ctx context.Context, r soap.RoundTripper, req *types.QueryConnectionInfo) (*types.QueryConnectionInfoResponse, error) { + var reqBody, resBody QueryConnectionInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryConnectionInfoViaSpecBody struct { + Req *types.QueryConnectionInfoViaSpec `xml:"urn:vim25 QueryConnectionInfoViaSpec,omitempty"` + Res *types.QueryConnectionInfoViaSpecResponse `xml:"urn:vim25 QueryConnectionInfoViaSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConnectionInfoViaSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConnectionInfoViaSpec(ctx context.Context, r soap.RoundTripper, req *types.QueryConnectionInfoViaSpec) (*types.QueryConnectionInfoViaSpecResponse, error) { + var reqBody, resBody QueryConnectionInfoViaSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDatastorePerformanceSummaryBody struct { + Req *types.QueryDatastorePerformanceSummary `xml:"urn:vim25 QueryDatastorePerformanceSummary,omitempty"` + Res *types.QueryDatastorePerformanceSummaryResponse `xml:"urn:vim25 QueryDatastorePerformanceSummaryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDatastorePerformanceSummaryBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDatastorePerformanceSummary(ctx context.Context, r soap.RoundTripper, req *types.QueryDatastorePerformanceSummary) (*types.QueryDatastorePerformanceSummaryResponse, error) { + var reqBody, resBody QueryDatastorePerformanceSummaryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDateTimeBody struct { + Req *types.QueryDateTime `xml:"urn:vim25 QueryDateTime,omitempty"` + Res *types.QueryDateTimeResponse `xml:"urn:vim25 QueryDateTimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDateTimeBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDateTime(ctx context.Context, r soap.RoundTripper, req *types.QueryDateTime) (*types.QueryDateTimeResponse, error) { + var reqBody, resBody QueryDateTimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDescriptionsBody struct { + Req *types.QueryDescriptions `xml:"urn:vim25 QueryDescriptions,omitempty"` + Res *types.QueryDescriptionsResponse `xml:"urn:vim25 QueryDescriptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDescriptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDescriptions(ctx context.Context, r soap.RoundTripper, req *types.QueryDescriptions) (*types.QueryDescriptionsResponse, error) { + var reqBody, resBody QueryDescriptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDisksForVsanBody struct { + Req *types.QueryDisksForVsan `xml:"urn:vim25 QueryDisksForVsan,omitempty"` + Res *types.QueryDisksForVsanResponse `xml:"urn:vim25 QueryDisksForVsanResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDisksForVsanBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDisksForVsan(ctx context.Context, r soap.RoundTripper, req *types.QueryDisksForVsan) (*types.QueryDisksForVsanResponse, error) { + var reqBody, resBody QueryDisksForVsanBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDisksUsingFilterBody struct { + Req *types.QueryDisksUsingFilter `xml:"urn:vim25 QueryDisksUsingFilter,omitempty"` + Res *types.QueryDisksUsingFilterResponse `xml:"urn:vim25 QueryDisksUsingFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDisksUsingFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDisksUsingFilter(ctx context.Context, r soap.RoundTripper, req *types.QueryDisksUsingFilter) (*types.QueryDisksUsingFilterResponse, error) { + var reqBody, resBody QueryDisksUsingFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsByUuidBody struct { + Req *types.QueryDvsByUuid `xml:"urn:vim25 QueryDvsByUuid,omitempty"` + Res *types.QueryDvsByUuidResponse `xml:"urn:vim25 QueryDvsByUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsByUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsByUuid(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsByUuid) (*types.QueryDvsByUuidResponse, error) { + var reqBody, resBody QueryDvsByUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsCheckCompatibilityBody struct { + Req *types.QueryDvsCheckCompatibility `xml:"urn:vim25 QueryDvsCheckCompatibility,omitempty"` + Res *types.QueryDvsCheckCompatibilityResponse `xml:"urn:vim25 QueryDvsCheckCompatibilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsCheckCompatibilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsCheckCompatibility(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsCheckCompatibility) (*types.QueryDvsCheckCompatibilityResponse, error) { + var reqBody, resBody QueryDvsCheckCompatibilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsCompatibleHostSpecBody struct { + Req *types.QueryDvsCompatibleHostSpec `xml:"urn:vim25 QueryDvsCompatibleHostSpec,omitempty"` + Res *types.QueryDvsCompatibleHostSpecResponse `xml:"urn:vim25 QueryDvsCompatibleHostSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsCompatibleHostSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsCompatibleHostSpec(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsCompatibleHostSpec) (*types.QueryDvsCompatibleHostSpecResponse, error) { + var reqBody, resBody QueryDvsCompatibleHostSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsConfigTargetBody struct { + Req *types.QueryDvsConfigTarget `xml:"urn:vim25 QueryDvsConfigTarget,omitempty"` + Res *types.QueryDvsConfigTargetResponse `xml:"urn:vim25 QueryDvsConfigTargetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsConfigTargetBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsConfigTarget) (*types.QueryDvsConfigTargetResponse, error) { + var reqBody, resBody QueryDvsConfigTargetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDvsFeatureCapabilityBody struct { + Req *types.QueryDvsFeatureCapability `xml:"urn:vim25 QueryDvsFeatureCapability,omitempty"` + Res *types.QueryDvsFeatureCapabilityResponse `xml:"urn:vim25 QueryDvsFeatureCapabilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDvsFeatureCapabilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDvsFeatureCapability(ctx context.Context, r soap.RoundTripper, req *types.QueryDvsFeatureCapability) (*types.QueryDvsFeatureCapabilityResponse, error) { + var reqBody, resBody QueryDvsFeatureCapabilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryEventsBody struct { + Req *types.QueryEvents `xml:"urn:vim25 QueryEvents,omitempty"` + Res *types.QueryEventsResponse `xml:"urn:vim25 QueryEventsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryEventsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryEvents(ctx context.Context, r soap.RoundTripper, req *types.QueryEvents) (*types.QueryEventsResponse, error) { + var reqBody, resBody QueryEventsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryExpressionMetadataBody struct { + Req *types.QueryExpressionMetadata `xml:"urn:vim25 QueryExpressionMetadata,omitempty"` + Res *types.QueryExpressionMetadataResponse `xml:"urn:vim25 QueryExpressionMetadataResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryExpressionMetadataBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryExpressionMetadata(ctx context.Context, r soap.RoundTripper, req *types.QueryExpressionMetadata) (*types.QueryExpressionMetadataResponse, error) { + var reqBody, resBody QueryExpressionMetadataBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryExtensionIpAllocationUsageBody struct { + Req *types.QueryExtensionIpAllocationUsage `xml:"urn:vim25 QueryExtensionIpAllocationUsage,omitempty"` + Res *types.QueryExtensionIpAllocationUsageResponse `xml:"urn:vim25 QueryExtensionIpAllocationUsageResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryExtensionIpAllocationUsageBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryExtensionIpAllocationUsage(ctx context.Context, r soap.RoundTripper, req *types.QueryExtensionIpAllocationUsage) (*types.QueryExtensionIpAllocationUsageResponse, error) { + var reqBody, resBody QueryExtensionIpAllocationUsageBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFaultToleranceCompatibilityBody struct { + Req *types.QueryFaultToleranceCompatibility `xml:"urn:vim25 QueryFaultToleranceCompatibility,omitempty"` + Res *types.QueryFaultToleranceCompatibilityResponse `xml:"urn:vim25 QueryFaultToleranceCompatibilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFaultToleranceCompatibilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFaultToleranceCompatibility(ctx context.Context, r soap.RoundTripper, req *types.QueryFaultToleranceCompatibility) (*types.QueryFaultToleranceCompatibilityResponse, error) { + var reqBody, resBody QueryFaultToleranceCompatibilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFaultToleranceCompatibilityExBody struct { + Req *types.QueryFaultToleranceCompatibilityEx `xml:"urn:vim25 QueryFaultToleranceCompatibilityEx,omitempty"` + Res *types.QueryFaultToleranceCompatibilityExResponse `xml:"urn:vim25 QueryFaultToleranceCompatibilityExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFaultToleranceCompatibilityExBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFaultToleranceCompatibilityEx(ctx context.Context, r soap.RoundTripper, req *types.QueryFaultToleranceCompatibilityEx) (*types.QueryFaultToleranceCompatibilityExResponse, error) { + var reqBody, resBody QueryFaultToleranceCompatibilityExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFilterEntitiesBody struct { + Req *types.QueryFilterEntities `xml:"urn:vim25 QueryFilterEntities,omitempty"` + Res *types.QueryFilterEntitiesResponse `xml:"urn:vim25 QueryFilterEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFilterEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.QueryFilterEntities) (*types.QueryFilterEntitiesResponse, error) { + var reqBody, resBody QueryFilterEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFilterInfoIdsBody struct { + Req *types.QueryFilterInfoIds `xml:"urn:vim25 QueryFilterInfoIds,omitempty"` + Res *types.QueryFilterInfoIdsResponse `xml:"urn:vim25 QueryFilterInfoIdsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFilterInfoIdsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFilterInfoIds(ctx context.Context, r soap.RoundTripper, req *types.QueryFilterInfoIds) (*types.QueryFilterInfoIdsResponse, error) { + var reqBody, resBody QueryFilterInfoIdsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFilterListBody struct { + Req *types.QueryFilterList `xml:"urn:vim25 QueryFilterList,omitempty"` + Res *types.QueryFilterListResponse `xml:"urn:vim25 QueryFilterListResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFilterListBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFilterList(ctx context.Context, r soap.RoundTripper, req *types.QueryFilterList) (*types.QueryFilterListResponse, error) { + var reqBody, resBody QueryFilterListBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFilterNameBody struct { + Req *types.QueryFilterName `xml:"urn:vim25 QueryFilterName,omitempty"` + Res *types.QueryFilterNameResponse `xml:"urn:vim25 QueryFilterNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFilterNameBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFilterName(ctx context.Context, r soap.RoundTripper, req *types.QueryFilterName) (*types.QueryFilterNameResponse, error) { + var reqBody, resBody QueryFilterNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryFirmwareConfigUploadURLBody struct { + Req *types.QueryFirmwareConfigUploadURL `xml:"urn:vim25 QueryFirmwareConfigUploadURL,omitempty"` + Res *types.QueryFirmwareConfigUploadURLResponse `xml:"urn:vim25 QueryFirmwareConfigUploadURLResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryFirmwareConfigUploadURLBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryFirmwareConfigUploadURL(ctx context.Context, r soap.RoundTripper, req *types.QueryFirmwareConfigUploadURL) (*types.QueryFirmwareConfigUploadURLResponse, error) { + var reqBody, resBody QueryFirmwareConfigUploadURLBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHealthUpdateInfosBody struct { + Req *types.QueryHealthUpdateInfos `xml:"urn:vim25 QueryHealthUpdateInfos,omitempty"` + Res *types.QueryHealthUpdateInfosResponse `xml:"urn:vim25 QueryHealthUpdateInfosResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHealthUpdateInfosBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHealthUpdateInfos(ctx context.Context, r soap.RoundTripper, req *types.QueryHealthUpdateInfos) (*types.QueryHealthUpdateInfosResponse, error) { + var reqBody, resBody QueryHealthUpdateInfosBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHealthUpdatesBody struct { + Req *types.QueryHealthUpdates `xml:"urn:vim25 QueryHealthUpdates,omitempty"` + Res *types.QueryHealthUpdatesResponse `xml:"urn:vim25 QueryHealthUpdatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHealthUpdatesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHealthUpdates(ctx context.Context, r soap.RoundTripper, req *types.QueryHealthUpdates) (*types.QueryHealthUpdatesResponse, error) { + var reqBody, resBody QueryHealthUpdatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHostConnectionInfoBody struct { + Req *types.QueryHostConnectionInfo `xml:"urn:vim25 QueryHostConnectionInfo,omitempty"` + Res *types.QueryHostConnectionInfoResponse `xml:"urn:vim25 QueryHostConnectionInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostConnectionInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostConnectionInfo(ctx context.Context, r soap.RoundTripper, req *types.QueryHostConnectionInfo) (*types.QueryHostConnectionInfoResponse, error) { + var reqBody, resBody QueryHostConnectionInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHostPatch_TaskBody struct { + Req *types.QueryHostPatch_Task `xml:"urn:vim25 QueryHostPatch_Task,omitempty"` + Res *types.QueryHostPatch_TaskResponse `xml:"urn:vim25 QueryHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.QueryHostPatch_Task) (*types.QueryHostPatch_TaskResponse, error) { + var reqBody, resBody QueryHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHostProfileMetadataBody struct { + Req *types.QueryHostProfileMetadata `xml:"urn:vim25 QueryHostProfileMetadata,omitempty"` + Res *types.QueryHostProfileMetadataResponse `xml:"urn:vim25 QueryHostProfileMetadataResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostProfileMetadataBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostProfileMetadata(ctx context.Context, r soap.RoundTripper, req *types.QueryHostProfileMetadata) (*types.QueryHostProfileMetadataResponse, error) { + var reqBody, resBody QueryHostProfileMetadataBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryHostStatusBody struct { + Req *types.QueryHostStatus `xml:"urn:vim25 QueryHostStatus,omitempty"` + Res *types.QueryHostStatusResponse `xml:"urn:vim25 QueryHostStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryHostStatus) (*types.QueryHostStatusResponse, error) { + var reqBody, resBody QueryHostStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIORMConfigOptionBody struct { + Req *types.QueryIORMConfigOption `xml:"urn:vim25 QueryIORMConfigOption,omitempty"` + Res *types.QueryIORMConfigOptionResponse `xml:"urn:vim25 QueryIORMConfigOptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIORMConfigOptionBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIORMConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryIORMConfigOption) (*types.QueryIORMConfigOptionResponse, error) { + var reqBody, resBody QueryIORMConfigOptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIPAllocationsBody struct { + Req *types.QueryIPAllocations `xml:"urn:vim25 QueryIPAllocations,omitempty"` + Res *types.QueryIPAllocationsResponse `xml:"urn:vim25 QueryIPAllocationsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIPAllocationsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIPAllocations(ctx context.Context, r soap.RoundTripper, req *types.QueryIPAllocations) (*types.QueryIPAllocationsResponse, error) { + var reqBody, resBody QueryIPAllocationsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIoFilterInfoBody struct { + Req *types.QueryIoFilterInfo `xml:"urn:vim25 QueryIoFilterInfo,omitempty"` + Res *types.QueryIoFilterInfoResponse `xml:"urn:vim25 QueryIoFilterInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIoFilterInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIoFilterInfo(ctx context.Context, r soap.RoundTripper, req *types.QueryIoFilterInfo) (*types.QueryIoFilterInfoResponse, error) { + var reqBody, resBody QueryIoFilterInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIoFilterIssuesBody struct { + Req *types.QueryIoFilterIssues `xml:"urn:vim25 QueryIoFilterIssues,omitempty"` + Res *types.QueryIoFilterIssuesResponse `xml:"urn:vim25 QueryIoFilterIssuesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIoFilterIssuesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIoFilterIssues(ctx context.Context, r soap.RoundTripper, req *types.QueryIoFilterIssues) (*types.QueryIoFilterIssuesResponse, error) { + var reqBody, resBody QueryIoFilterIssuesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryIpPoolsBody struct { + Req *types.QueryIpPools `xml:"urn:vim25 QueryIpPools,omitempty"` + Res *types.QueryIpPoolsResponse `xml:"urn:vim25 QueryIpPoolsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryIpPoolsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryIpPools(ctx context.Context, r soap.RoundTripper, req *types.QueryIpPools) (*types.QueryIpPoolsResponse, error) { + var reqBody, resBody QueryIpPoolsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryLicenseSourceAvailabilityBody struct { + Req *types.QueryLicenseSourceAvailability `xml:"urn:vim25 QueryLicenseSourceAvailability,omitempty"` + Res *types.QueryLicenseSourceAvailabilityResponse `xml:"urn:vim25 QueryLicenseSourceAvailabilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryLicenseSourceAvailabilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryLicenseSourceAvailability(ctx context.Context, r soap.RoundTripper, req *types.QueryLicenseSourceAvailability) (*types.QueryLicenseSourceAvailabilityResponse, error) { + var reqBody, resBody QueryLicenseSourceAvailabilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryLicenseUsageBody struct { + Req *types.QueryLicenseUsage `xml:"urn:vim25 QueryLicenseUsage,omitempty"` + Res *types.QueryLicenseUsageResponse `xml:"urn:vim25 QueryLicenseUsageResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryLicenseUsageBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryLicenseUsage(ctx context.Context, r soap.RoundTripper, req *types.QueryLicenseUsage) (*types.QueryLicenseUsageResponse, error) { + var reqBody, resBody QueryLicenseUsageBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryLockdownExceptionsBody struct { + Req *types.QueryLockdownExceptions `xml:"urn:vim25 QueryLockdownExceptions,omitempty"` + Res *types.QueryLockdownExceptionsResponse `xml:"urn:vim25 QueryLockdownExceptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryLockdownExceptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *types.QueryLockdownExceptions) (*types.QueryLockdownExceptionsResponse, error) { + var reqBody, resBody QueryLockdownExceptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryManagedByBody struct { + Req *types.QueryManagedBy `xml:"urn:vim25 QueryManagedBy,omitempty"` + Res *types.QueryManagedByResponse `xml:"urn:vim25 QueryManagedByResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryManagedByBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryManagedBy(ctx context.Context, r soap.RoundTripper, req *types.QueryManagedBy) (*types.QueryManagedByResponse, error) { + var reqBody, resBody QueryManagedByBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryMemoryOverheadBody struct { + Req *types.QueryMemoryOverhead `xml:"urn:vim25 QueryMemoryOverhead,omitempty"` + Res *types.QueryMemoryOverheadResponse `xml:"urn:vim25 QueryMemoryOverheadResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryMemoryOverheadBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryMemoryOverhead(ctx context.Context, r soap.RoundTripper, req *types.QueryMemoryOverhead) (*types.QueryMemoryOverheadResponse, error) { + var reqBody, resBody QueryMemoryOverheadBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryMemoryOverheadExBody struct { + Req *types.QueryMemoryOverheadEx `xml:"urn:vim25 QueryMemoryOverheadEx,omitempty"` + Res *types.QueryMemoryOverheadExResponse `xml:"urn:vim25 QueryMemoryOverheadExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryMemoryOverheadExBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryMemoryOverheadEx(ctx context.Context, r soap.RoundTripper, req *types.QueryMemoryOverheadEx) (*types.QueryMemoryOverheadExResponse, error) { + var reqBody, resBody QueryMemoryOverheadExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryMigrationDependenciesBody struct { + Req *types.QueryMigrationDependencies `xml:"urn:vim25 QueryMigrationDependencies,omitempty"` + Res *types.QueryMigrationDependenciesResponse `xml:"urn:vim25 QueryMigrationDependenciesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryMigrationDependenciesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryMigrationDependencies(ctx context.Context, r soap.RoundTripper, req *types.QueryMigrationDependencies) (*types.QueryMigrationDependenciesResponse, error) { + var reqBody, resBody QueryMigrationDependenciesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryModulesBody struct { + Req *types.QueryModules `xml:"urn:vim25 QueryModules,omitempty"` + Res *types.QueryModulesResponse `xml:"urn:vim25 QueryModulesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryModulesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryModules(ctx context.Context, r soap.RoundTripper, req *types.QueryModules) (*types.QueryModulesResponse, error) { + var reqBody, resBody QueryModulesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryMonitoredEntitiesBody struct { + Req *types.QueryMonitoredEntities `xml:"urn:vim25 QueryMonitoredEntities,omitempty"` + Res *types.QueryMonitoredEntitiesResponse `xml:"urn:vim25 QueryMonitoredEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryMonitoredEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types.QueryMonitoredEntities) (*types.QueryMonitoredEntitiesResponse, error) { + var reqBody, resBody QueryMonitoredEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryNFSUserBody struct { + Req *types.QueryNFSUser `xml:"urn:vim25 QueryNFSUser,omitempty"` + Res *types.QueryNFSUserResponse `xml:"urn:vim25 QueryNFSUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryNFSUserBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryNFSUser(ctx context.Context, r soap.RoundTripper, req *types.QueryNFSUser) (*types.QueryNFSUserResponse, error) { + var reqBody, resBody QueryNFSUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryNetConfigBody struct { + Req *types.QueryNetConfig `xml:"urn:vim25 QueryNetConfig,omitempty"` + Res *types.QueryNetConfigResponse `xml:"urn:vim25 QueryNetConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryNetConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryNetConfig(ctx context.Context, r soap.RoundTripper, req *types.QueryNetConfig) (*types.QueryNetConfigResponse, error) { + var reqBody, resBody QueryNetConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryNetworkHintBody struct { + Req *types.QueryNetworkHint `xml:"urn:vim25 QueryNetworkHint,omitempty"` + Res *types.QueryNetworkHintResponse `xml:"urn:vim25 QueryNetworkHintResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryNetworkHintBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryNetworkHint(ctx context.Context, r soap.RoundTripper, req *types.QueryNetworkHint) (*types.QueryNetworkHintResponse, error) { + var reqBody, resBody QueryNetworkHintBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryObjectsOnPhysicalVsanDiskBody struct { + Req *types.QueryObjectsOnPhysicalVsanDisk `xml:"urn:vim25 QueryObjectsOnPhysicalVsanDisk,omitempty"` + Res *types.QueryObjectsOnPhysicalVsanDiskResponse `xml:"urn:vim25 QueryObjectsOnPhysicalVsanDiskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryObjectsOnPhysicalVsanDiskBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryObjectsOnPhysicalVsanDisk(ctx context.Context, r soap.RoundTripper, req *types.QueryObjectsOnPhysicalVsanDisk) (*types.QueryObjectsOnPhysicalVsanDiskResponse, error) { + var reqBody, resBody QueryObjectsOnPhysicalVsanDiskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryOptionsBody struct { + Req *types.QueryOptions `xml:"urn:vim25 QueryOptions,omitempty"` + Res *types.QueryOptionsResponse `xml:"urn:vim25 QueryOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryOptions) (*types.QueryOptionsResponse, error) { + var reqBody, resBody QueryOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPartitionCreateDescBody struct { + Req *types.QueryPartitionCreateDesc `xml:"urn:vim25 QueryPartitionCreateDesc,omitempty"` + Res *types.QueryPartitionCreateDescResponse `xml:"urn:vim25 QueryPartitionCreateDescResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPartitionCreateDescBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPartitionCreateDesc(ctx context.Context, r soap.RoundTripper, req *types.QueryPartitionCreateDesc) (*types.QueryPartitionCreateDescResponse, error) { + var reqBody, resBody QueryPartitionCreateDescBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPartitionCreateOptionsBody struct { + Req *types.QueryPartitionCreateOptions `xml:"urn:vim25 QueryPartitionCreateOptions,omitempty"` + Res *types.QueryPartitionCreateOptionsResponse `xml:"urn:vim25 QueryPartitionCreateOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPartitionCreateOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPartitionCreateOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryPartitionCreateOptions) (*types.QueryPartitionCreateOptionsResponse, error) { + var reqBody, resBody QueryPartitionCreateOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPathSelectionPolicyOptionsBody struct { + Req *types.QueryPathSelectionPolicyOptions `xml:"urn:vim25 QueryPathSelectionPolicyOptions,omitempty"` + Res *types.QueryPathSelectionPolicyOptionsResponse `xml:"urn:vim25 QueryPathSelectionPolicyOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPathSelectionPolicyOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPathSelectionPolicyOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryPathSelectionPolicyOptions) (*types.QueryPathSelectionPolicyOptionsResponse, error) { + var reqBody, resBody QueryPathSelectionPolicyOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfBody struct { + Req *types.QueryPerf `xml:"urn:vim25 QueryPerf,omitempty"` + Res *types.QueryPerfResponse `xml:"urn:vim25 QueryPerfResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerf(ctx context.Context, r soap.RoundTripper, req *types.QueryPerf) (*types.QueryPerfResponse, error) { + var reqBody, resBody QueryPerfBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfCompositeBody struct { + Req *types.QueryPerfComposite `xml:"urn:vim25 QueryPerfComposite,omitempty"` + Res *types.QueryPerfCompositeResponse `xml:"urn:vim25 QueryPerfCompositeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfCompositeBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerfComposite(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfComposite) (*types.QueryPerfCompositeResponse, error) { + var reqBody, resBody QueryPerfCompositeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfCounterBody struct { + Req *types.QueryPerfCounter `xml:"urn:vim25 QueryPerfCounter,omitempty"` + Res *types.QueryPerfCounterResponse `xml:"urn:vim25 QueryPerfCounterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfCounterBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerfCounter(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfCounter) (*types.QueryPerfCounterResponse, error) { + var reqBody, resBody QueryPerfCounterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfCounterByLevelBody struct { + Req *types.QueryPerfCounterByLevel `xml:"urn:vim25 QueryPerfCounterByLevel,omitempty"` + Res *types.QueryPerfCounterByLevelResponse `xml:"urn:vim25 QueryPerfCounterByLevelResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfCounterByLevelBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerfCounterByLevel(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfCounterByLevel) (*types.QueryPerfCounterByLevelResponse, error) { + var reqBody, resBody QueryPerfCounterByLevelBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPerfProviderSummaryBody struct { + Req *types.QueryPerfProviderSummary `xml:"urn:vim25 QueryPerfProviderSummary,omitempty"` + Res *types.QueryPerfProviderSummaryResponse `xml:"urn:vim25 QueryPerfProviderSummaryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPerfProviderSummaryBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPerfProviderSummary(ctx context.Context, r soap.RoundTripper, req *types.QueryPerfProviderSummary) (*types.QueryPerfProviderSummaryResponse, error) { + var reqBody, resBody QueryPerfProviderSummaryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPhysicalVsanDisksBody struct { + Req *types.QueryPhysicalVsanDisks `xml:"urn:vim25 QueryPhysicalVsanDisks,omitempty"` + Res *types.QueryPhysicalVsanDisksResponse `xml:"urn:vim25 QueryPhysicalVsanDisksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPhysicalVsanDisksBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPhysicalVsanDisks(ctx context.Context, r soap.RoundTripper, req *types.QueryPhysicalVsanDisks) (*types.QueryPhysicalVsanDisksResponse, error) { + var reqBody, resBody QueryPhysicalVsanDisksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPnicStatusBody struct { + Req *types.QueryPnicStatus `xml:"urn:vim25 QueryPnicStatus,omitempty"` + Res *types.QueryPnicStatusResponse `xml:"urn:vim25 QueryPnicStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPnicStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryPnicStatus) (*types.QueryPnicStatusResponse, error) { + var reqBody, resBody QueryPnicStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryPolicyMetadataBody struct { + Req *types.QueryPolicyMetadata `xml:"urn:vim25 QueryPolicyMetadata,omitempty"` + Res *types.QueryPolicyMetadataResponse `xml:"urn:vim25 QueryPolicyMetadataResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryPolicyMetadataBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryPolicyMetadata(ctx context.Context, r soap.RoundTripper, req *types.QueryPolicyMetadata) (*types.QueryPolicyMetadataResponse, error) { + var reqBody, resBody QueryPolicyMetadataBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryProfileStructureBody struct { + Req *types.QueryProfileStructure `xml:"urn:vim25 QueryProfileStructure,omitempty"` + Res *types.QueryProfileStructureResponse `xml:"urn:vim25 QueryProfileStructureResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryProfileStructureBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryProfileStructure(ctx context.Context, r soap.RoundTripper, req *types.QueryProfileStructure) (*types.QueryProfileStructureResponse, error) { + var reqBody, resBody QueryProfileStructureBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryProviderListBody struct { + Req *types.QueryProviderList `xml:"urn:vim25 QueryProviderList,omitempty"` + Res *types.QueryProviderListResponse `xml:"urn:vim25 QueryProviderListResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryProviderListBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryProviderList(ctx context.Context, r soap.RoundTripper, req *types.QueryProviderList) (*types.QueryProviderListResponse, error) { + var reqBody, resBody QueryProviderListBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryProviderNameBody struct { + Req *types.QueryProviderName `xml:"urn:vim25 QueryProviderName,omitempty"` + Res *types.QueryProviderNameResponse `xml:"urn:vim25 QueryProviderNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryProviderNameBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryProviderName(ctx context.Context, r soap.RoundTripper, req *types.QueryProviderName) (*types.QueryProviderNameResponse, error) { + var reqBody, resBody QueryProviderNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryResourceConfigOptionBody struct { + Req *types.QueryResourceConfigOption `xml:"urn:vim25 QueryResourceConfigOption,omitempty"` + Res *types.QueryResourceConfigOptionResponse `xml:"urn:vim25 QueryResourceConfigOptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryResourceConfigOptionBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryResourceConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryResourceConfigOption) (*types.QueryResourceConfigOptionResponse, error) { + var reqBody, resBody QueryResourceConfigOptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryServiceListBody struct { + Req *types.QueryServiceList `xml:"urn:vim25 QueryServiceList,omitempty"` + Res *types.QueryServiceListResponse `xml:"urn:vim25 QueryServiceListResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryServiceListBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryServiceList(ctx context.Context, r soap.RoundTripper, req *types.QueryServiceList) (*types.QueryServiceListResponse, error) { + var reqBody, resBody QueryServiceListBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryStorageArrayTypePolicyOptionsBody struct { + Req *types.QueryStorageArrayTypePolicyOptions `xml:"urn:vim25 QueryStorageArrayTypePolicyOptions,omitempty"` + Res *types.QueryStorageArrayTypePolicyOptionsResponse `xml:"urn:vim25 QueryStorageArrayTypePolicyOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryStorageArrayTypePolicyOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryStorageArrayTypePolicyOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryStorageArrayTypePolicyOptions) (*types.QueryStorageArrayTypePolicyOptionsResponse, error) { + var reqBody, resBody QueryStorageArrayTypePolicyOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QuerySupportedFeaturesBody struct { + Req *types.QuerySupportedFeatures `xml:"urn:vim25 QuerySupportedFeatures,omitempty"` + Res *types.QuerySupportedFeaturesResponse `xml:"urn:vim25 QuerySupportedFeaturesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QuerySupportedFeaturesBody) Fault() *soap.Fault { return b.Fault_ } + +func QuerySupportedFeatures(ctx context.Context, r soap.RoundTripper, req *types.QuerySupportedFeatures) (*types.QuerySupportedFeaturesResponse, error) { + var reqBody, resBody QuerySupportedFeaturesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QuerySyncingVsanObjectsBody struct { + Req *types.QuerySyncingVsanObjects `xml:"urn:vim25 QuerySyncingVsanObjects,omitempty"` + Res *types.QuerySyncingVsanObjectsResponse `xml:"urn:vim25 QuerySyncingVsanObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QuerySyncingVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func QuerySyncingVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.QuerySyncingVsanObjects) (*types.QuerySyncingVsanObjectsResponse, error) { + var reqBody, resBody QuerySyncingVsanObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QuerySystemUsersBody struct { + Req *types.QuerySystemUsers `xml:"urn:vim25 QuerySystemUsers,omitempty"` + Res *types.QuerySystemUsersResponse `xml:"urn:vim25 QuerySystemUsersResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QuerySystemUsersBody) Fault() *soap.Fault { return b.Fault_ } + +func QuerySystemUsers(ctx context.Context, r soap.RoundTripper, req *types.QuerySystemUsers) (*types.QuerySystemUsersResponse, error) { + var reqBody, resBody QuerySystemUsersBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryTargetCapabilitiesBody struct { + Req *types.QueryTargetCapabilities `xml:"urn:vim25 QueryTargetCapabilities,omitempty"` + Res *types.QueryTargetCapabilitiesResponse `xml:"urn:vim25 QueryTargetCapabilitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryTargetCapabilitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryTargetCapabilities(ctx context.Context, r soap.RoundTripper, req *types.QueryTargetCapabilities) (*types.QueryTargetCapabilitiesResponse, error) { + var reqBody, resBody QueryTargetCapabilitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryTpmAttestationReportBody struct { + Req *types.QueryTpmAttestationReport `xml:"urn:vim25 QueryTpmAttestationReport,omitempty"` + Res *types.QueryTpmAttestationReportResponse `xml:"urn:vim25 QueryTpmAttestationReportResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryTpmAttestationReportBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryTpmAttestationReport(ctx context.Context, r soap.RoundTripper, req *types.QueryTpmAttestationReport) (*types.QueryTpmAttestationReportResponse, error) { + var reqBody, resBody QueryTpmAttestationReportBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryUnmonitoredHostsBody struct { + Req *types.QueryUnmonitoredHosts `xml:"urn:vim25 QueryUnmonitoredHosts,omitempty"` + Res *types.QueryUnmonitoredHostsResponse `xml:"urn:vim25 QueryUnmonitoredHostsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryUnmonitoredHostsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryUnmonitoredHosts(ctx context.Context, r soap.RoundTripper, req *types.QueryUnmonitoredHosts) (*types.QueryUnmonitoredHostsResponse, error) { + var reqBody, resBody QueryUnmonitoredHostsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryUnownedFilesBody struct { + Req *types.QueryUnownedFiles `xml:"urn:vim25 QueryUnownedFiles,omitempty"` + Res *types.QueryUnownedFilesResponse `xml:"urn:vim25 QueryUnownedFilesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryUnownedFilesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryUnownedFiles(ctx context.Context, r soap.RoundTripper, req *types.QueryUnownedFiles) (*types.QueryUnownedFilesResponse, error) { + var reqBody, resBody QueryUnownedFilesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryUnresolvedVmfsVolumeBody struct { + Req *types.QueryUnresolvedVmfsVolume `xml:"urn:vim25 QueryUnresolvedVmfsVolume,omitempty"` + Res *types.QueryUnresolvedVmfsVolumeResponse `xml:"urn:vim25 QueryUnresolvedVmfsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryUnresolvedVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryUnresolvedVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.QueryUnresolvedVmfsVolume) (*types.QueryUnresolvedVmfsVolumeResponse, error) { + var reqBody, resBody QueryUnresolvedVmfsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryUnresolvedVmfsVolumesBody struct { + Req *types.QueryUnresolvedVmfsVolumes `xml:"urn:vim25 QueryUnresolvedVmfsVolumes,omitempty"` + Res *types.QueryUnresolvedVmfsVolumesResponse `xml:"urn:vim25 QueryUnresolvedVmfsVolumesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryUnresolvedVmfsVolumesBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripper, req *types.QueryUnresolvedVmfsVolumes) (*types.QueryUnresolvedVmfsVolumesResponse, error) { + var reqBody, resBody QueryUnresolvedVmfsVolumesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryUsedVlanIdInDvsBody struct { + Req *types.QueryUsedVlanIdInDvs `xml:"urn:vim25 QueryUsedVlanIdInDvs,omitempty"` + Res *types.QueryUsedVlanIdInDvsResponse `xml:"urn:vim25 QueryUsedVlanIdInDvsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryUsedVlanIdInDvsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryUsedVlanIdInDvs(ctx context.Context, r soap.RoundTripper, req *types.QueryUsedVlanIdInDvs) (*types.QueryUsedVlanIdInDvsResponse, error) { + var reqBody, resBody QueryUsedVlanIdInDvsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVMotionCompatibilityBody struct { + Req *types.QueryVMotionCompatibility `xml:"urn:vim25 QueryVMotionCompatibility,omitempty"` + Res *types.QueryVMotionCompatibilityResponse `xml:"urn:vim25 QueryVMotionCompatibilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVMotionCompatibilityBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVMotionCompatibility(ctx context.Context, r soap.RoundTripper, req *types.QueryVMotionCompatibility) (*types.QueryVMotionCompatibilityResponse, error) { + var reqBody, resBody QueryVMotionCompatibilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVMotionCompatibilityEx_TaskBody struct { + Req *types.QueryVMotionCompatibilityEx_Task `xml:"urn:vim25 QueryVMotionCompatibilityEx_Task,omitempty"` + Res *types.QueryVMotionCompatibilityEx_TaskResponse `xml:"urn:vim25 QueryVMotionCompatibilityEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVMotionCompatibilityEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVMotionCompatibilityEx_Task(ctx context.Context, r soap.RoundTripper, req *types.QueryVMotionCompatibilityEx_Task) (*types.QueryVMotionCompatibilityEx_TaskResponse, error) { + var reqBody, resBody QueryVMotionCompatibilityEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVirtualDiskFragmentationBody struct { + Req *types.QueryVirtualDiskFragmentation `xml:"urn:vim25 QueryVirtualDiskFragmentation,omitempty"` + Res *types.QueryVirtualDiskFragmentationResponse `xml:"urn:vim25 QueryVirtualDiskFragmentationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVirtualDiskFragmentationBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVirtualDiskFragmentation(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskFragmentation) (*types.QueryVirtualDiskFragmentationResponse, error) { + var reqBody, resBody QueryVirtualDiskFragmentationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVirtualDiskGeometryBody struct { + Req *types.QueryVirtualDiskGeometry `xml:"urn:vim25 QueryVirtualDiskGeometry,omitempty"` + Res *types.QueryVirtualDiskGeometryResponse `xml:"urn:vim25 QueryVirtualDiskGeometryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVirtualDiskGeometryBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVirtualDiskGeometry(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskGeometry) (*types.QueryVirtualDiskGeometryResponse, error) { + var reqBody, resBody QueryVirtualDiskGeometryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVirtualDiskUuidBody struct { + Req *types.QueryVirtualDiskUuid `xml:"urn:vim25 QueryVirtualDiskUuid,omitempty"` + Res *types.QueryVirtualDiskUuidResponse `xml:"urn:vim25 QueryVirtualDiskUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVirtualDiskUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskUuid) (*types.QueryVirtualDiskUuidResponse, error) { + var reqBody, resBody QueryVirtualDiskUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVmfsConfigOptionBody struct { + Req *types.QueryVmfsConfigOption `xml:"urn:vim25 QueryVmfsConfigOption,omitempty"` + Res *types.QueryVmfsConfigOptionResponse `xml:"urn:vim25 QueryVmfsConfigOptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVmfsConfigOptionBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVmfsConfigOption(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsConfigOption) (*types.QueryVmfsConfigOptionResponse, error) { + var reqBody, resBody QueryVmfsConfigOptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVmfsDatastoreCreateOptionsBody struct { + Req *types.QueryVmfsDatastoreCreateOptions `xml:"urn:vim25 QueryVmfsDatastoreCreateOptions,omitempty"` + Res *types.QueryVmfsDatastoreCreateOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreCreateOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVmfsDatastoreCreateOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVmfsDatastoreCreateOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsDatastoreCreateOptions) (*types.QueryVmfsDatastoreCreateOptionsResponse, error) { + var reqBody, resBody QueryVmfsDatastoreCreateOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVmfsDatastoreExpandOptionsBody struct { + Req *types.QueryVmfsDatastoreExpandOptions `xml:"urn:vim25 QueryVmfsDatastoreExpandOptions,omitempty"` + Res *types.QueryVmfsDatastoreExpandOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreExpandOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVmfsDatastoreExpandOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVmfsDatastoreExpandOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsDatastoreExpandOptions) (*types.QueryVmfsDatastoreExpandOptionsResponse, error) { + var reqBody, resBody QueryVmfsDatastoreExpandOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVmfsDatastoreExtendOptionsBody struct { + Req *types.QueryVmfsDatastoreExtendOptions `xml:"urn:vim25 QueryVmfsDatastoreExtendOptions,omitempty"` + Res *types.QueryVmfsDatastoreExtendOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreExtendOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVmfsDatastoreExtendOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVmfsDatastoreExtendOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryVmfsDatastoreExtendOptions) (*types.QueryVmfsDatastoreExtendOptionsResponse, error) { + var reqBody, resBody QueryVmfsDatastoreExtendOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVnicStatusBody struct { + Req *types.QueryVnicStatus `xml:"urn:vim25 QueryVnicStatus,omitempty"` + Res *types.QueryVnicStatusResponse `xml:"urn:vim25 QueryVnicStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVnicStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryVnicStatus) (*types.QueryVnicStatusResponse, error) { + var reqBody, resBody QueryVnicStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVsanObjectUuidsByFilterBody struct { + Req *types.QueryVsanObjectUuidsByFilter `xml:"urn:vim25 QueryVsanObjectUuidsByFilter,omitempty"` + Res *types.QueryVsanObjectUuidsByFilterResponse `xml:"urn:vim25 QueryVsanObjectUuidsByFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVsanObjectUuidsByFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVsanObjectUuidsByFilter(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanObjectUuidsByFilter) (*types.QueryVsanObjectUuidsByFilterResponse, error) { + var reqBody, resBody QueryVsanObjectUuidsByFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVsanObjectsBody struct { + Req *types.QueryVsanObjects `xml:"urn:vim25 QueryVsanObjects,omitempty"` + Res *types.QueryVsanObjectsResponse `xml:"urn:vim25 QueryVsanObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanObjects) (*types.QueryVsanObjectsResponse, error) { + var reqBody, resBody QueryVsanObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVsanStatisticsBody struct { + Req *types.QueryVsanStatistics `xml:"urn:vim25 QueryVsanStatistics,omitempty"` + Res *types.QueryVsanStatisticsResponse `xml:"urn:vim25 QueryVsanStatisticsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVsanStatisticsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVsanStatistics(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanStatistics) (*types.QueryVsanStatisticsResponse, error) { + var reqBody, resBody QueryVsanStatisticsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryVsanUpgradeStatusBody struct { + Req *types.QueryVsanUpgradeStatus `xml:"urn:vim25 QueryVsanUpgradeStatus,omitempty"` + Res *types.QueryVsanUpgradeStatusResponse `xml:"urn:vim25 QueryVsanUpgradeStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVsanUpgradeStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVsanUpgradeStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryVsanUpgradeStatus) (*types.QueryVsanUpgradeStatusResponse, error) { + var reqBody, resBody QueryVsanUpgradeStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadEnvironmentVariableInGuestBody struct { + Req *types.ReadEnvironmentVariableInGuest `xml:"urn:vim25 ReadEnvironmentVariableInGuest,omitempty"` + Res *types.ReadEnvironmentVariableInGuestResponse `xml:"urn:vim25 ReadEnvironmentVariableInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadEnvironmentVariableInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadEnvironmentVariableInGuest(ctx context.Context, r soap.RoundTripper, req *types.ReadEnvironmentVariableInGuest) (*types.ReadEnvironmentVariableInGuestResponse, error) { + var reqBody, resBody ReadEnvironmentVariableInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadNextEventsBody struct { + Req *types.ReadNextEvents `xml:"urn:vim25 ReadNextEvents,omitempty"` + Res *types.ReadNextEventsResponse `xml:"urn:vim25 ReadNextEventsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadNextEventsBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadNextEvents(ctx context.Context, r soap.RoundTripper, req *types.ReadNextEvents) (*types.ReadNextEventsResponse, error) { + var reqBody, resBody ReadNextEventsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadNextTasksBody struct { + Req *types.ReadNextTasks `xml:"urn:vim25 ReadNextTasks,omitempty"` + Res *types.ReadNextTasksResponse `xml:"urn:vim25 ReadNextTasksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadNextTasksBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadNextTasks(ctx context.Context, r soap.RoundTripper, req *types.ReadNextTasks) (*types.ReadNextTasksResponse, error) { + var reqBody, resBody ReadNextTasksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadPreviousEventsBody struct { + Req *types.ReadPreviousEvents `xml:"urn:vim25 ReadPreviousEvents,omitempty"` + Res *types.ReadPreviousEventsResponse `xml:"urn:vim25 ReadPreviousEventsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadPreviousEventsBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadPreviousEvents(ctx context.Context, r soap.RoundTripper, req *types.ReadPreviousEvents) (*types.ReadPreviousEventsResponse, error) { + var reqBody, resBody ReadPreviousEventsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReadPreviousTasksBody struct { + Req *types.ReadPreviousTasks `xml:"urn:vim25 ReadPreviousTasks,omitempty"` + Res *types.ReadPreviousTasksResponse `xml:"urn:vim25 ReadPreviousTasksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReadPreviousTasksBody) Fault() *soap.Fault { return b.Fault_ } + +func ReadPreviousTasks(ctx context.Context, r soap.RoundTripper, req *types.ReadPreviousTasks) (*types.ReadPreviousTasksResponse, error) { + var reqBody, resBody ReadPreviousTasksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RebootGuestBody struct { + Req *types.RebootGuest `xml:"urn:vim25 RebootGuest,omitempty"` + Res *types.RebootGuestResponse `xml:"urn:vim25 RebootGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RebootGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func RebootGuest(ctx context.Context, r soap.RoundTripper, req *types.RebootGuest) (*types.RebootGuestResponse, error) { + var reqBody, resBody RebootGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RebootHost_TaskBody struct { + Req *types.RebootHost_Task `xml:"urn:vim25 RebootHost_Task,omitempty"` + Res *types.RebootHost_TaskResponse `xml:"urn:vim25 RebootHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RebootHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RebootHost_Task(ctx context.Context, r soap.RoundTripper, req *types.RebootHost_Task) (*types.RebootHost_TaskResponse, error) { + var reqBody, resBody RebootHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RecommendDatastoresBody struct { + Req *types.RecommendDatastores `xml:"urn:vim25 RecommendDatastores,omitempty"` + Res *types.RecommendDatastoresResponse `xml:"urn:vim25 RecommendDatastoresResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RecommendDatastoresBody) Fault() *soap.Fault { return b.Fault_ } + +func RecommendDatastores(ctx context.Context, r soap.RoundTripper, req *types.RecommendDatastores) (*types.RecommendDatastoresResponse, error) { + var reqBody, resBody RecommendDatastoresBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RecommendHostsForVmBody struct { + Req *types.RecommendHostsForVm `xml:"urn:vim25 RecommendHostsForVm,omitempty"` + Res *types.RecommendHostsForVmResponse `xml:"urn:vim25 RecommendHostsForVmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RecommendHostsForVmBody) Fault() *soap.Fault { return b.Fault_ } + +func RecommendHostsForVm(ctx context.Context, r soap.RoundTripper, req *types.RecommendHostsForVm) (*types.RecommendHostsForVmResponse, error) { + var reqBody, resBody RecommendHostsForVmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RecommissionVsanNode_TaskBody struct { + Req *types.RecommissionVsanNode_Task `xml:"urn:vim25 RecommissionVsanNode_Task,omitempty"` + Res *types.RecommissionVsanNode_TaskResponse `xml:"urn:vim25 RecommissionVsanNode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RecommissionVsanNode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RecommissionVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *types.RecommissionVsanNode_Task) (*types.RecommissionVsanNode_TaskResponse, error) { + var reqBody, resBody RecommissionVsanNode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconcileDatastoreInventory_TaskBody struct { + Req *types.ReconcileDatastoreInventory_Task `xml:"urn:vim25 ReconcileDatastoreInventory_Task,omitempty"` + Res *types.ReconcileDatastoreInventory_TaskResponse `xml:"urn:vim25 ReconcileDatastoreInventory_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconcileDatastoreInventory_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconcileDatastoreInventory_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconcileDatastoreInventory_Task) (*types.ReconcileDatastoreInventory_TaskResponse, error) { + var reqBody, resBody ReconcileDatastoreInventory_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigVM_TaskBody struct { + Req *types.ReconfigVM_Task `xml:"urn:vim25 ReconfigVM_Task,omitempty"` + Res *types.ReconfigVM_TaskResponse `xml:"urn:vim25 ReconfigVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigVM_Task) (*types.ReconfigVM_TaskResponse, error) { + var reqBody, resBody ReconfigVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigurationSatisfiableBody struct { + Req *types.ReconfigurationSatisfiable `xml:"urn:vim25 ReconfigurationSatisfiable,omitempty"` + Res *types.ReconfigurationSatisfiableResponse `xml:"urn:vim25 ReconfigurationSatisfiableResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigurationSatisfiableBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigurationSatisfiable(ctx context.Context, r soap.RoundTripper, req *types.ReconfigurationSatisfiable) (*types.ReconfigurationSatisfiableResponse, error) { + var reqBody, resBody ReconfigurationSatisfiableBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureAlarmBody struct { + Req *types.ReconfigureAlarm `xml:"urn:vim25 ReconfigureAlarm,omitempty"` + Res *types.ReconfigureAlarmResponse `xml:"urn:vim25 ReconfigureAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureAlarm(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureAlarm) (*types.ReconfigureAlarmResponse, error) { + var reqBody, resBody ReconfigureAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureAutostartBody struct { + Req *types.ReconfigureAutostart `xml:"urn:vim25 ReconfigureAutostart,omitempty"` + Res *types.ReconfigureAutostartResponse `xml:"urn:vim25 ReconfigureAutostartResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureAutostartBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureAutostart(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureAutostart) (*types.ReconfigureAutostartResponse, error) { + var reqBody, resBody ReconfigureAutostartBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureCluster_TaskBody struct { + Req *types.ReconfigureCluster_Task `xml:"urn:vim25 ReconfigureCluster_Task,omitempty"` + Res *types.ReconfigureCluster_TaskResponse `xml:"urn:vim25 ReconfigureCluster_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureCluster_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureCluster_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureCluster_Task) (*types.ReconfigureCluster_TaskResponse, error) { + var reqBody, resBody ReconfigureCluster_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureComputeResource_TaskBody struct { + Req *types.ReconfigureComputeResource_Task `xml:"urn:vim25 ReconfigureComputeResource_Task,omitempty"` + Res *types.ReconfigureComputeResource_TaskResponse `xml:"urn:vim25 ReconfigureComputeResource_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureComputeResource_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureComputeResource_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureComputeResource_Task) (*types.ReconfigureComputeResource_TaskResponse, error) { + var reqBody, resBody ReconfigureComputeResource_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDVPort_TaskBody struct { + Req *types.ReconfigureDVPort_Task `xml:"urn:vim25 ReconfigureDVPort_Task,omitempty"` + Res *types.ReconfigureDVPort_TaskResponse `xml:"urn:vim25 ReconfigureDVPort_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDVPort_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDVPort_Task) (*types.ReconfigureDVPort_TaskResponse, error) { + var reqBody, resBody ReconfigureDVPort_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDVPortgroup_TaskBody struct { + Req *types.ReconfigureDVPortgroup_Task `xml:"urn:vim25 ReconfigureDVPortgroup_Task,omitempty"` + Res *types.ReconfigureDVPortgroup_TaskResponse `xml:"urn:vim25 ReconfigureDVPortgroup_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDVPortgroup_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDVPortgroup_Task) (*types.ReconfigureDVPortgroup_TaskResponse, error) { + var reqBody, resBody ReconfigureDVPortgroup_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDatacenter_TaskBody struct { + Req *types.ReconfigureDatacenter_Task `xml:"urn:vim25 ReconfigureDatacenter_Task,omitempty"` + Res *types.ReconfigureDatacenter_TaskResponse `xml:"urn:vim25 ReconfigureDatacenter_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDatacenter_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDatacenter_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDatacenter_Task) (*types.ReconfigureDatacenter_TaskResponse, error) { + var reqBody, resBody ReconfigureDatacenter_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDomObjectBody struct { + Req *types.ReconfigureDomObject `xml:"urn:vim25 ReconfigureDomObject,omitempty"` + Res *types.ReconfigureDomObjectResponse `xml:"urn:vim25 ReconfigureDomObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDomObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDomObject(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDomObject) (*types.ReconfigureDomObjectResponse, error) { + var reqBody, resBody ReconfigureDomObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureDvs_TaskBody struct { + Req *types.ReconfigureDvs_Task `xml:"urn:vim25 ReconfigureDvs_Task,omitempty"` + Res *types.ReconfigureDvs_TaskResponse `xml:"urn:vim25 ReconfigureDvs_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureDvs_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureDvs_Task) (*types.ReconfigureDvs_TaskResponse, error) { + var reqBody, resBody ReconfigureDvs_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureHostForDAS_TaskBody struct { + Req *types.ReconfigureHostForDAS_Task `xml:"urn:vim25 ReconfigureHostForDAS_Task,omitempty"` + Res *types.ReconfigureHostForDAS_TaskResponse `xml:"urn:vim25 ReconfigureHostForDAS_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureHostForDAS_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureHostForDAS_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureHostForDAS_Task) (*types.ReconfigureHostForDAS_TaskResponse, error) { + var reqBody, resBody ReconfigureHostForDAS_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureScheduledTaskBody struct { + Req *types.ReconfigureScheduledTask `xml:"urn:vim25 ReconfigureScheduledTask,omitempty"` + Res *types.ReconfigureScheduledTaskResponse `xml:"urn:vim25 ReconfigureScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureScheduledTask) (*types.ReconfigureScheduledTaskResponse, error) { + var reqBody, resBody ReconfigureScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureServiceConsoleReservationBody struct { + Req *types.ReconfigureServiceConsoleReservation `xml:"urn:vim25 ReconfigureServiceConsoleReservation,omitempty"` + Res *types.ReconfigureServiceConsoleReservationResponse `xml:"urn:vim25 ReconfigureServiceConsoleReservationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureServiceConsoleReservationBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureServiceConsoleReservation(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureServiceConsoleReservation) (*types.ReconfigureServiceConsoleReservationResponse, error) { + var reqBody, resBody ReconfigureServiceConsoleReservationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureSnmpAgentBody struct { + Req *types.ReconfigureSnmpAgent `xml:"urn:vim25 ReconfigureSnmpAgent,omitempty"` + Res *types.ReconfigureSnmpAgentResponse `xml:"urn:vim25 ReconfigureSnmpAgentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureSnmpAgentBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureSnmpAgent(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureSnmpAgent) (*types.ReconfigureSnmpAgentResponse, error) { + var reqBody, resBody ReconfigureSnmpAgentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconfigureVirtualMachineReservationBody struct { + Req *types.ReconfigureVirtualMachineReservation `xml:"urn:vim25 ReconfigureVirtualMachineReservation,omitempty"` + Res *types.ReconfigureVirtualMachineReservationResponse `xml:"urn:vim25 ReconfigureVirtualMachineReservationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconfigureVirtualMachineReservationBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconfigureVirtualMachineReservation(ctx context.Context, r soap.RoundTripper, req *types.ReconfigureVirtualMachineReservation) (*types.ReconfigureVirtualMachineReservationResponse, error) { + var reqBody, resBody ReconfigureVirtualMachineReservationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReconnectHost_TaskBody struct { + Req *types.ReconnectHost_Task `xml:"urn:vim25 ReconnectHost_Task,omitempty"` + Res *types.ReconnectHost_TaskResponse `xml:"urn:vim25 ReconnectHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReconnectHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.ReconnectHost_Task) (*types.ReconnectHost_TaskResponse, error) { + var reqBody, resBody ReconnectHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RectifyDvsHost_TaskBody struct { + Req *types.RectifyDvsHost_Task `xml:"urn:vim25 RectifyDvsHost_Task,omitempty"` + Res *types.RectifyDvsHost_TaskResponse `xml:"urn:vim25 RectifyDvsHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RectifyDvsHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RectifyDvsHost_Task(ctx context.Context, r soap.RoundTripper, req *types.RectifyDvsHost_Task) (*types.RectifyDvsHost_TaskResponse, error) { + var reqBody, resBody RectifyDvsHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RectifyDvsOnHost_TaskBody struct { + Req *types.RectifyDvsOnHost_Task `xml:"urn:vim25 RectifyDvsOnHost_Task,omitempty"` + Res *types.RectifyDvsOnHost_TaskResponse `xml:"urn:vim25 RectifyDvsOnHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RectifyDvsOnHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RectifyDvsOnHost_Task(ctx context.Context, r soap.RoundTripper, req *types.RectifyDvsOnHost_Task) (*types.RectifyDvsOnHost_TaskResponse, error) { + var reqBody, resBody RectifyDvsOnHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshBody struct { + Req *types.Refresh `xml:"urn:vim25 Refresh,omitempty"` + Res *types.RefreshResponse `xml:"urn:vim25 RefreshResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshBody) Fault() *soap.Fault { return b.Fault_ } + +func Refresh(ctx context.Context, r soap.RoundTripper, req *types.Refresh) (*types.RefreshResponse, error) { + var reqBody, resBody RefreshBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshDVPortStateBody struct { + Req *types.RefreshDVPortState `xml:"urn:vim25 RefreshDVPortState,omitempty"` + Res *types.RefreshDVPortStateResponse `xml:"urn:vim25 RefreshDVPortStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshDVPortStateBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshDVPortState(ctx context.Context, r soap.RoundTripper, req *types.RefreshDVPortState) (*types.RefreshDVPortStateResponse, error) { + var reqBody, resBody RefreshDVPortStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshDatastoreBody struct { + Req *types.RefreshDatastore `xml:"urn:vim25 RefreshDatastore,omitempty"` + Res *types.RefreshDatastoreResponse `xml:"urn:vim25 RefreshDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshDatastore(ctx context.Context, r soap.RoundTripper, req *types.RefreshDatastore) (*types.RefreshDatastoreResponse, error) { + var reqBody, resBody RefreshDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshDatastoreStorageInfoBody struct { + Req *types.RefreshDatastoreStorageInfo `xml:"urn:vim25 RefreshDatastoreStorageInfo,omitempty"` + Res *types.RefreshDatastoreStorageInfoResponse `xml:"urn:vim25 RefreshDatastoreStorageInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshDatastoreStorageInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshDatastoreStorageInfo(ctx context.Context, r soap.RoundTripper, req *types.RefreshDatastoreStorageInfo) (*types.RefreshDatastoreStorageInfoResponse, error) { + var reqBody, resBody RefreshDatastoreStorageInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshDateTimeSystemBody struct { + Req *types.RefreshDateTimeSystem `xml:"urn:vim25 RefreshDateTimeSystem,omitempty"` + Res *types.RefreshDateTimeSystemResponse `xml:"urn:vim25 RefreshDateTimeSystemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshDateTimeSystemBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshDateTimeSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshDateTimeSystem) (*types.RefreshDateTimeSystemResponse, error) { + var reqBody, resBody RefreshDateTimeSystemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshFirewallBody struct { + Req *types.RefreshFirewall `xml:"urn:vim25 RefreshFirewall,omitempty"` + Res *types.RefreshFirewallResponse `xml:"urn:vim25 RefreshFirewallResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshFirewallBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshFirewall(ctx context.Context, r soap.RoundTripper, req *types.RefreshFirewall) (*types.RefreshFirewallResponse, error) { + var reqBody, resBody RefreshFirewallBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshGraphicsManagerBody struct { + Req *types.RefreshGraphicsManager `xml:"urn:vim25 RefreshGraphicsManager,omitempty"` + Res *types.RefreshGraphicsManagerResponse `xml:"urn:vim25 RefreshGraphicsManagerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshGraphicsManagerBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshGraphicsManager(ctx context.Context, r soap.RoundTripper, req *types.RefreshGraphicsManager) (*types.RefreshGraphicsManagerResponse, error) { + var reqBody, resBody RefreshGraphicsManagerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshHealthStatusSystemBody struct { + Req *types.RefreshHealthStatusSystem `xml:"urn:vim25 RefreshHealthStatusSystem,omitempty"` + Res *types.RefreshHealthStatusSystemResponse `xml:"urn:vim25 RefreshHealthStatusSystemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshHealthStatusSystemBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshHealthStatusSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshHealthStatusSystem) (*types.RefreshHealthStatusSystemResponse, error) { + var reqBody, resBody RefreshHealthStatusSystemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshNetworkSystemBody struct { + Req *types.RefreshNetworkSystem `xml:"urn:vim25 RefreshNetworkSystem,omitempty"` + Res *types.RefreshNetworkSystemResponse `xml:"urn:vim25 RefreshNetworkSystemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshNetworkSystemBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshNetworkSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshNetworkSystem) (*types.RefreshNetworkSystemResponse, error) { + var reqBody, resBody RefreshNetworkSystemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshRecommendationBody struct { + Req *types.RefreshRecommendation `xml:"urn:vim25 RefreshRecommendation,omitempty"` + Res *types.RefreshRecommendationResponse `xml:"urn:vim25 RefreshRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshRecommendation(ctx context.Context, r soap.RoundTripper, req *types.RefreshRecommendation) (*types.RefreshRecommendationResponse, error) { + var reqBody, resBody RefreshRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshRuntimeBody struct { + Req *types.RefreshRuntime `xml:"urn:vim25 RefreshRuntime,omitempty"` + Res *types.RefreshRuntimeResponse `xml:"urn:vim25 RefreshRuntimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshRuntimeBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshRuntime(ctx context.Context, r soap.RoundTripper, req *types.RefreshRuntime) (*types.RefreshRuntimeResponse, error) { + var reqBody, resBody RefreshRuntimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshServicesBody struct { + Req *types.RefreshServices `xml:"urn:vim25 RefreshServices,omitempty"` + Res *types.RefreshServicesResponse `xml:"urn:vim25 RefreshServicesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshServicesBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshServices(ctx context.Context, r soap.RoundTripper, req *types.RefreshServices) (*types.RefreshServicesResponse, error) { + var reqBody, resBody RefreshServicesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshStorageDrsRecommendationBody struct { + Req *types.RefreshStorageDrsRecommendation `xml:"urn:vim25 RefreshStorageDrsRecommendation,omitempty"` + Res *types.RefreshStorageDrsRecommendationResponse `xml:"urn:vim25 RefreshStorageDrsRecommendationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshStorageDrsRecommendationBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageDrsRecommendation) (*types.RefreshStorageDrsRecommendationResponse, error) { + var reqBody, resBody RefreshStorageDrsRecommendationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshStorageDrsRecommendationsForPod_TaskBody struct { + Req *types.RefreshStorageDrsRecommendationsForPod_Task `xml:"urn:vim25 RefreshStorageDrsRecommendationsForPod_Task,omitempty"` + Res *types.RefreshStorageDrsRecommendationsForPod_TaskResponse `xml:"urn:vim25 RefreshStorageDrsRecommendationsForPod_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshStorageDrsRecommendationsForPod_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshStorageDrsRecommendationsForPod_Task(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageDrsRecommendationsForPod_Task) (*types.RefreshStorageDrsRecommendationsForPod_TaskResponse, error) { + var reqBody, resBody RefreshStorageDrsRecommendationsForPod_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshStorageInfoBody struct { + Req *types.RefreshStorageInfo `xml:"urn:vim25 RefreshStorageInfo,omitempty"` + Res *types.RefreshStorageInfoResponse `xml:"urn:vim25 RefreshStorageInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshStorageInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshStorageInfo(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageInfo) (*types.RefreshStorageInfoResponse, error) { + var reqBody, resBody RefreshStorageInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RefreshStorageSystemBody struct { + Req *types.RefreshStorageSystem `xml:"urn:vim25 RefreshStorageSystem,omitempty"` + Res *types.RefreshStorageSystemResponse `xml:"urn:vim25 RefreshStorageSystemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RefreshStorageSystemBody) Fault() *soap.Fault { return b.Fault_ } + +func RefreshStorageSystem(ctx context.Context, r soap.RoundTripper, req *types.RefreshStorageSystem) (*types.RefreshStorageSystemResponse, error) { + var reqBody, resBody RefreshStorageSystemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RegisterChildVM_TaskBody struct { + Req *types.RegisterChildVM_Task `xml:"urn:vim25 RegisterChildVM_Task,omitempty"` + Res *types.RegisterChildVM_TaskResponse `xml:"urn:vim25 RegisterChildVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterChildVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.RegisterChildVM_Task) (*types.RegisterChildVM_TaskResponse, error) { + var reqBody, resBody RegisterChildVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RegisterDiskBody struct { + Req *types.RegisterDisk `xml:"urn:vim25 RegisterDisk,omitempty"` + Res *types.RegisterDiskResponse `xml:"urn:vim25 RegisterDiskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterDiskBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterDisk(ctx context.Context, r soap.RoundTripper, req *types.RegisterDisk) (*types.RegisterDiskResponse, error) { + var reqBody, resBody RegisterDiskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RegisterExtensionBody struct { + Req *types.RegisterExtension `xml:"urn:vim25 RegisterExtension,omitempty"` + Res *types.RegisterExtensionResponse `xml:"urn:vim25 RegisterExtensionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterExtensionBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterExtension(ctx context.Context, r soap.RoundTripper, req *types.RegisterExtension) (*types.RegisterExtensionResponse, error) { + var reqBody, resBody RegisterExtensionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RegisterHealthUpdateProviderBody struct { + Req *types.RegisterHealthUpdateProvider `xml:"urn:vim25 RegisterHealthUpdateProvider,omitempty"` + Res *types.RegisterHealthUpdateProviderResponse `xml:"urn:vim25 RegisterHealthUpdateProviderResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterHealthUpdateProviderBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterHealthUpdateProvider(ctx context.Context, r soap.RoundTripper, req *types.RegisterHealthUpdateProvider) (*types.RegisterHealthUpdateProviderResponse, error) { + var reqBody, resBody RegisterHealthUpdateProviderBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RegisterKmipServerBody struct { + Req *types.RegisterKmipServer `xml:"urn:vim25 RegisterKmipServer,omitempty"` + Res *types.RegisterKmipServerResponse `xml:"urn:vim25 RegisterKmipServerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterKmipServerBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterKmipServer(ctx context.Context, r soap.RoundTripper, req *types.RegisterKmipServer) (*types.RegisterKmipServerResponse, error) { + var reqBody, resBody RegisterKmipServerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RegisterVM_TaskBody struct { + Req *types.RegisterVM_Task `xml:"urn:vim25 RegisterVM_Task,omitempty"` + Res *types.RegisterVM_TaskResponse `xml:"urn:vim25 RegisterVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterVM_Task(ctx context.Context, r soap.RoundTripper, req *types.RegisterVM_Task) (*types.RegisterVM_TaskResponse, error) { + var reqBody, resBody RegisterVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReleaseCredentialsInGuestBody struct { + Req *types.ReleaseCredentialsInGuest `xml:"urn:vim25 ReleaseCredentialsInGuest,omitempty"` + Res *types.ReleaseCredentialsInGuestResponse `xml:"urn:vim25 ReleaseCredentialsInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReleaseCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ReleaseCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.ReleaseCredentialsInGuest) (*types.ReleaseCredentialsInGuestResponse, error) { + var reqBody, resBody ReleaseCredentialsInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReleaseIpAllocationBody struct { + Req *types.ReleaseIpAllocation `xml:"urn:vim25 ReleaseIpAllocation,omitempty"` + Res *types.ReleaseIpAllocationResponse `xml:"urn:vim25 ReleaseIpAllocationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReleaseIpAllocationBody) Fault() *soap.Fault { return b.Fault_ } + +func ReleaseIpAllocation(ctx context.Context, r soap.RoundTripper, req *types.ReleaseIpAllocation) (*types.ReleaseIpAllocationResponse, error) { + var reqBody, resBody ReleaseIpAllocationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReleaseManagedSnapshotBody struct { + Req *types.ReleaseManagedSnapshot `xml:"urn:vim25 ReleaseManagedSnapshot,omitempty"` + Res *types.ReleaseManagedSnapshotResponse `xml:"urn:vim25 ReleaseManagedSnapshotResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReleaseManagedSnapshotBody) Fault() *soap.Fault { return b.Fault_ } + +func ReleaseManagedSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ReleaseManagedSnapshot) (*types.ReleaseManagedSnapshotResponse, error) { + var reqBody, resBody ReleaseManagedSnapshotBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReloadBody struct { + Req *types.Reload `xml:"urn:vim25 Reload,omitempty"` + Res *types.ReloadResponse `xml:"urn:vim25 ReloadResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReloadBody) Fault() *soap.Fault { return b.Fault_ } + +func Reload(ctx context.Context, r soap.RoundTripper, req *types.Reload) (*types.ReloadResponse, error) { + var reqBody, resBody ReloadBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RelocateVM_TaskBody struct { + Req *types.RelocateVM_Task `xml:"urn:vim25 RelocateVM_Task,omitempty"` + Res *types.RelocateVM_TaskResponse `xml:"urn:vim25 RelocateVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RelocateVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RelocateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.RelocateVM_Task) (*types.RelocateVM_TaskResponse, error) { + var reqBody, resBody RelocateVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RelocateVStorageObject_TaskBody struct { + Req *types.RelocateVStorageObject_Task `xml:"urn:vim25 RelocateVStorageObject_Task,omitempty"` + Res *types.RelocateVStorageObject_TaskResponse `xml:"urn:vim25 RelocateVStorageObject_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RelocateVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RelocateVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.RelocateVStorageObject_Task) (*types.RelocateVStorageObject_TaskResponse, error) { + var reqBody, resBody RelocateVStorageObject_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveAlarmBody struct { + Req *types.RemoveAlarm `xml:"urn:vim25 RemoveAlarm,omitempty"` + Res *types.RemoveAlarmResponse `xml:"urn:vim25 RemoveAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveAlarm(ctx context.Context, r soap.RoundTripper, req *types.RemoveAlarm) (*types.RemoveAlarmResponse, error) { + var reqBody, resBody RemoveAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveAllSnapshots_TaskBody struct { + Req *types.RemoveAllSnapshots_Task `xml:"urn:vim25 RemoveAllSnapshots_Task,omitempty"` + Res *types.RemoveAllSnapshots_TaskResponse `xml:"urn:vim25 RemoveAllSnapshots_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveAllSnapshots_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveAllSnapshots_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveAllSnapshots_Task) (*types.RemoveAllSnapshots_TaskResponse, error) { + var reqBody, resBody RemoveAllSnapshots_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveAssignedLicenseBody struct { + Req *types.RemoveAssignedLicense `xml:"urn:vim25 RemoveAssignedLicense,omitempty"` + Res *types.RemoveAssignedLicenseResponse `xml:"urn:vim25 RemoveAssignedLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveAssignedLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types.RemoveAssignedLicense) (*types.RemoveAssignedLicenseResponse, error) { + var reqBody, resBody RemoveAssignedLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveAuthorizationRoleBody struct { + Req *types.RemoveAuthorizationRole `xml:"urn:vim25 RemoveAuthorizationRole,omitempty"` + Res *types.RemoveAuthorizationRoleResponse `xml:"urn:vim25 RemoveAuthorizationRoleResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.RemoveAuthorizationRole) (*types.RemoveAuthorizationRoleResponse, error) { + var reqBody, resBody RemoveAuthorizationRoleBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveCustomFieldDefBody struct { + Req *types.RemoveCustomFieldDef `xml:"urn:vim25 RemoveCustomFieldDef,omitempty"` + Res *types.RemoveCustomFieldDefResponse `xml:"urn:vim25 RemoveCustomFieldDefResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.RemoveCustomFieldDef) (*types.RemoveCustomFieldDefResponse, error) { + var reqBody, resBody RemoveCustomFieldDefBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveDatastoreBody struct { + Req *types.RemoveDatastore `xml:"urn:vim25 RemoveDatastore,omitempty"` + Res *types.RemoveDatastoreResponse `xml:"urn:vim25 RemoveDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveDatastore(ctx context.Context, r soap.RoundTripper, req *types.RemoveDatastore) (*types.RemoveDatastoreResponse, error) { + var reqBody, resBody RemoveDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveDatastoreEx_TaskBody struct { + Req *types.RemoveDatastoreEx_Task `xml:"urn:vim25 RemoveDatastoreEx_Task,omitempty"` + Res *types.RemoveDatastoreEx_TaskResponse `xml:"urn:vim25 RemoveDatastoreEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveDatastoreEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveDatastoreEx_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveDatastoreEx_Task) (*types.RemoveDatastoreEx_TaskResponse, error) { + var reqBody, resBody RemoveDatastoreEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveDiskMapping_TaskBody struct { + Req *types.RemoveDiskMapping_Task `xml:"urn:vim25 RemoveDiskMapping_Task,omitempty"` + Res *types.RemoveDiskMapping_TaskResponse `xml:"urn:vim25 RemoveDiskMapping_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveDiskMapping_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveDiskMapping_Task) (*types.RemoveDiskMapping_TaskResponse, error) { + var reqBody, resBody RemoveDiskMapping_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveDisk_TaskBody struct { + Req *types.RemoveDisk_Task `xml:"urn:vim25 RemoveDisk_Task,omitempty"` + Res *types.RemoveDisk_TaskResponse `xml:"urn:vim25 RemoveDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveDisk_Task) (*types.RemoveDisk_TaskResponse, error) { + var reqBody, resBody RemoveDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveEntityPermissionBody struct { + Req *types.RemoveEntityPermission `xml:"urn:vim25 RemoveEntityPermission,omitempty"` + Res *types.RemoveEntityPermissionResponse `xml:"urn:vim25 RemoveEntityPermissionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveEntityPermissionBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveEntityPermission(ctx context.Context, r soap.RoundTripper, req *types.RemoveEntityPermission) (*types.RemoveEntityPermissionResponse, error) { + var reqBody, resBody RemoveEntityPermissionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveFilterBody struct { + Req *types.RemoveFilter `xml:"urn:vim25 RemoveFilter,omitempty"` + Res *types.RemoveFilterResponse `xml:"urn:vim25 RemoveFilterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveFilterBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveFilter(ctx context.Context, r soap.RoundTripper, req *types.RemoveFilter) (*types.RemoveFilterResponse, error) { + var reqBody, resBody RemoveFilterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveFilterEntitiesBody struct { + Req *types.RemoveFilterEntities `xml:"urn:vim25 RemoveFilterEntities,omitempty"` + Res *types.RemoveFilterEntitiesResponse `xml:"urn:vim25 RemoveFilterEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveFilterEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.RemoveFilterEntities) (*types.RemoveFilterEntitiesResponse, error) { + var reqBody, resBody RemoveFilterEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveGroupBody struct { + Req *types.RemoveGroup `xml:"urn:vim25 RemoveGroup,omitempty"` + Res *types.RemoveGroupResponse `xml:"urn:vim25 RemoveGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveGroup(ctx context.Context, r soap.RoundTripper, req *types.RemoveGroup) (*types.RemoveGroupResponse, error) { + var reqBody, resBody RemoveGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveGuestAliasBody struct { + Req *types.RemoveGuestAlias `xml:"urn:vim25 RemoveGuestAlias,omitempty"` + Res *types.RemoveGuestAliasResponse `xml:"urn:vim25 RemoveGuestAliasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveGuestAliasBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.RemoveGuestAlias) (*types.RemoveGuestAliasResponse, error) { + var reqBody, resBody RemoveGuestAliasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveGuestAliasByCertBody struct { + Req *types.RemoveGuestAliasByCert `xml:"urn:vim25 RemoveGuestAliasByCert,omitempty"` + Res *types.RemoveGuestAliasByCertResponse `xml:"urn:vim25 RemoveGuestAliasByCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveGuestAliasByCertBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveGuestAliasByCert(ctx context.Context, r soap.RoundTripper, req *types.RemoveGuestAliasByCert) (*types.RemoveGuestAliasByCertResponse, error) { + var reqBody, resBody RemoveGuestAliasByCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveInternetScsiSendTargetsBody struct { + Req *types.RemoveInternetScsiSendTargets `xml:"urn:vim25 RemoveInternetScsiSendTargets,omitempty"` + Res *types.RemoveInternetScsiSendTargetsResponse `xml:"urn:vim25 RemoveInternetScsiSendTargetsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveInternetScsiSendTargetsBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req *types.RemoveInternetScsiSendTargets) (*types.RemoveInternetScsiSendTargetsResponse, error) { + var reqBody, resBody RemoveInternetScsiSendTargetsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveInternetScsiStaticTargetsBody struct { + Req *types.RemoveInternetScsiStaticTargets `xml:"urn:vim25 RemoveInternetScsiStaticTargets,omitempty"` + Res *types.RemoveInternetScsiStaticTargetsResponse `xml:"urn:vim25 RemoveInternetScsiStaticTargetsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveInternetScsiStaticTargetsBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, req *types.RemoveInternetScsiStaticTargets) (*types.RemoveInternetScsiStaticTargetsResponse, error) { + var reqBody, resBody RemoveInternetScsiStaticTargetsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveKeyBody struct { + Req *types.RemoveKey `xml:"urn:vim25 RemoveKey,omitempty"` + Res *types.RemoveKeyResponse `xml:"urn:vim25 RemoveKeyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveKeyBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveKey(ctx context.Context, r soap.RoundTripper, req *types.RemoveKey) (*types.RemoveKeyResponse, error) { + var reqBody, resBody RemoveKeyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveKeysBody struct { + Req *types.RemoveKeys `xml:"urn:vim25 RemoveKeys,omitempty"` + Res *types.RemoveKeysResponse `xml:"urn:vim25 RemoveKeysResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveKeysBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveKeys(ctx context.Context, r soap.RoundTripper, req *types.RemoveKeys) (*types.RemoveKeysResponse, error) { + var reqBody, resBody RemoveKeysBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveKmipServerBody struct { + Req *types.RemoveKmipServer `xml:"urn:vim25 RemoveKmipServer,omitempty"` + Res *types.RemoveKmipServerResponse `xml:"urn:vim25 RemoveKmipServerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveKmipServerBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveKmipServer(ctx context.Context, r soap.RoundTripper, req *types.RemoveKmipServer) (*types.RemoveKmipServerResponse, error) { + var reqBody, resBody RemoveKmipServerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveLicenseBody struct { + Req *types.RemoveLicense `xml:"urn:vim25 RemoveLicense,omitempty"` + Res *types.RemoveLicenseResponse `xml:"urn:vim25 RemoveLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveLicense(ctx context.Context, r soap.RoundTripper, req *types.RemoveLicense) (*types.RemoveLicenseResponse, error) { + var reqBody, resBody RemoveLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveLicenseLabelBody struct { + Req *types.RemoveLicenseLabel `xml:"urn:vim25 RemoveLicenseLabel,omitempty"` + Res *types.RemoveLicenseLabelResponse `xml:"urn:vim25 RemoveLicenseLabelResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveLicenseLabelBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.RemoveLicenseLabel) (*types.RemoveLicenseLabelResponse, error) { + var reqBody, resBody RemoveLicenseLabelBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveMonitoredEntitiesBody struct { + Req *types.RemoveMonitoredEntities `xml:"urn:vim25 RemoveMonitoredEntities,omitempty"` + Res *types.RemoveMonitoredEntitiesResponse `xml:"urn:vim25 RemoveMonitoredEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveMonitoredEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types.RemoveMonitoredEntities) (*types.RemoveMonitoredEntitiesResponse, error) { + var reqBody, resBody RemoveMonitoredEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveNetworkResourcePoolBody struct { + Req *types.RemoveNetworkResourcePool `xml:"urn:vim25 RemoveNetworkResourcePool,omitempty"` + Res *types.RemoveNetworkResourcePoolResponse `xml:"urn:vim25 RemoveNetworkResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.RemoveNetworkResourcePool) (*types.RemoveNetworkResourcePoolResponse, error) { + var reqBody, resBody RemoveNetworkResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemovePerfIntervalBody struct { + Req *types.RemovePerfInterval `xml:"urn:vim25 RemovePerfInterval,omitempty"` + Res *types.RemovePerfIntervalResponse `xml:"urn:vim25 RemovePerfIntervalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemovePerfIntervalBody) Fault() *soap.Fault { return b.Fault_ } + +func RemovePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.RemovePerfInterval) (*types.RemovePerfIntervalResponse, error) { + var reqBody, resBody RemovePerfIntervalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemovePortGroupBody struct { + Req *types.RemovePortGroup `xml:"urn:vim25 RemovePortGroup,omitempty"` + Res *types.RemovePortGroupResponse `xml:"urn:vim25 RemovePortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemovePortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func RemovePortGroup(ctx context.Context, r soap.RoundTripper, req *types.RemovePortGroup) (*types.RemovePortGroupResponse, error) { + var reqBody, resBody RemovePortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveScheduledTaskBody struct { + Req *types.RemoveScheduledTask `xml:"urn:vim25 RemoveScheduledTask,omitempty"` + Res *types.RemoveScheduledTaskResponse `xml:"urn:vim25 RemoveScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RemoveScheduledTask) (*types.RemoveScheduledTaskResponse, error) { + var reqBody, resBody RemoveScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveServiceConsoleVirtualNicBody struct { + Req *types.RemoveServiceConsoleVirtualNic `xml:"urn:vim25 RemoveServiceConsoleVirtualNic,omitempty"` + Res *types.RemoveServiceConsoleVirtualNicResponse `xml:"urn:vim25 RemoveServiceConsoleVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.RemoveServiceConsoleVirtualNic) (*types.RemoveServiceConsoleVirtualNicResponse, error) { + var reqBody, resBody RemoveServiceConsoleVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveSmartCardTrustAnchorBody struct { + Req *types.RemoveSmartCardTrustAnchor `xml:"urn:vim25 RemoveSmartCardTrustAnchor,omitempty"` + Res *types.RemoveSmartCardTrustAnchorResponse `xml:"urn:vim25 RemoveSmartCardTrustAnchorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveSmartCardTrustAnchorBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req *types.RemoveSmartCardTrustAnchor) (*types.RemoveSmartCardTrustAnchorResponse, error) { + var reqBody, resBody RemoveSmartCardTrustAnchorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveSmartCardTrustAnchorByFingerprintBody struct { + Req *types.RemoveSmartCardTrustAnchorByFingerprint `xml:"urn:vim25 RemoveSmartCardTrustAnchorByFingerprint,omitempty"` + Res *types.RemoveSmartCardTrustAnchorByFingerprintResponse `xml:"urn:vim25 RemoveSmartCardTrustAnchorByFingerprintResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveSmartCardTrustAnchorByFingerprintBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveSmartCardTrustAnchorByFingerprint(ctx context.Context, r soap.RoundTripper, req *types.RemoveSmartCardTrustAnchorByFingerprint) (*types.RemoveSmartCardTrustAnchorByFingerprintResponse, error) { + var reqBody, resBody RemoveSmartCardTrustAnchorByFingerprintBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveSnapshot_TaskBody struct { + Req *types.RemoveSnapshot_Task `xml:"urn:vim25 RemoveSnapshot_Task,omitempty"` + Res *types.RemoveSnapshot_TaskResponse `xml:"urn:vim25 RemoveSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveSnapshot_Task) (*types.RemoveSnapshot_TaskResponse, error) { + var reqBody, resBody RemoveSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveUserBody struct { + Req *types.RemoveUser `xml:"urn:vim25 RemoveUser,omitempty"` + Res *types.RemoveUserResponse `xml:"urn:vim25 RemoveUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveUserBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveUser(ctx context.Context, r soap.RoundTripper, req *types.RemoveUser) (*types.RemoveUserResponse, error) { + var reqBody, resBody RemoveUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveVirtualNicBody struct { + Req *types.RemoveVirtualNic `xml:"urn:vim25 RemoveVirtualNic,omitempty"` + Res *types.RemoveVirtualNicResponse `xml:"urn:vim25 RemoveVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.RemoveVirtualNic) (*types.RemoveVirtualNicResponse, error) { + var reqBody, resBody RemoveVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveVirtualSwitchBody struct { + Req *types.RemoveVirtualSwitch `xml:"urn:vim25 RemoveVirtualSwitch,omitempty"` + Res *types.RemoveVirtualSwitchResponse `xml:"urn:vim25 RemoveVirtualSwitchResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.RemoveVirtualSwitch) (*types.RemoveVirtualSwitchResponse, error) { + var reqBody, resBody RemoveVirtualSwitchBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RenameCustomFieldDefBody struct { + Req *types.RenameCustomFieldDef `xml:"urn:vim25 RenameCustomFieldDef,omitempty"` + Res *types.RenameCustomFieldDefResponse `xml:"urn:vim25 RenameCustomFieldDefResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RenameCustomFieldDefBody) Fault() *soap.Fault { return b.Fault_ } + +func RenameCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.RenameCustomFieldDef) (*types.RenameCustomFieldDefResponse, error) { + var reqBody, resBody RenameCustomFieldDefBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RenameCustomizationSpecBody struct { + Req *types.RenameCustomizationSpec `xml:"urn:vim25 RenameCustomizationSpec,omitempty"` + Res *types.RenameCustomizationSpecResponse `xml:"urn:vim25 RenameCustomizationSpecResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RenameCustomizationSpecBody) Fault() *soap.Fault { return b.Fault_ } + +func RenameCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.RenameCustomizationSpec) (*types.RenameCustomizationSpecResponse, error) { + var reqBody, resBody RenameCustomizationSpecBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RenameDatastoreBody struct { + Req *types.RenameDatastore `xml:"urn:vim25 RenameDatastore,omitempty"` + Res *types.RenameDatastoreResponse `xml:"urn:vim25 RenameDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RenameDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func RenameDatastore(ctx context.Context, r soap.RoundTripper, req *types.RenameDatastore) (*types.RenameDatastoreResponse, error) { + var reqBody, resBody RenameDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RenameSnapshotBody struct { + Req *types.RenameSnapshot `xml:"urn:vim25 RenameSnapshot,omitempty"` + Res *types.RenameSnapshotResponse `xml:"urn:vim25 RenameSnapshotResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RenameSnapshotBody) Fault() *soap.Fault { return b.Fault_ } + +func RenameSnapshot(ctx context.Context, r soap.RoundTripper, req *types.RenameSnapshot) (*types.RenameSnapshotResponse, error) { + var reqBody, resBody RenameSnapshotBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RenameVStorageObjectBody struct { + Req *types.RenameVStorageObject `xml:"urn:vim25 RenameVStorageObject,omitempty"` + Res *types.RenameVStorageObjectResponse `xml:"urn:vim25 RenameVStorageObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RenameVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func RenameVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.RenameVStorageObject) (*types.RenameVStorageObjectResponse, error) { + var reqBody, resBody RenameVStorageObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type Rename_TaskBody struct { + Req *types.Rename_Task `xml:"urn:vim25 Rename_Task,omitempty"` + Res *types.Rename_TaskResponse `xml:"urn:vim25 Rename_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *Rename_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func Rename_Task(ctx context.Context, r soap.RoundTripper, req *types.Rename_Task) (*types.Rename_TaskResponse, error) { + var reqBody, resBody Rename_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReplaceCACertificatesAndCRLsBody struct { + Req *types.ReplaceCACertificatesAndCRLs `xml:"urn:vim25 ReplaceCACertificatesAndCRLs,omitempty"` + Res *types.ReplaceCACertificatesAndCRLsResponse `xml:"urn:vim25 ReplaceCACertificatesAndCRLsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReplaceCACertificatesAndCRLsBody) Fault() *soap.Fault { return b.Fault_ } + +func ReplaceCACertificatesAndCRLs(ctx context.Context, r soap.RoundTripper, req *types.ReplaceCACertificatesAndCRLs) (*types.ReplaceCACertificatesAndCRLsResponse, error) { + var reqBody, resBody ReplaceCACertificatesAndCRLsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReplaceSmartCardTrustAnchorsBody struct { + Req *types.ReplaceSmartCardTrustAnchors `xml:"urn:vim25 ReplaceSmartCardTrustAnchors,omitempty"` + Res *types.ReplaceSmartCardTrustAnchorsResponse `xml:"urn:vim25 ReplaceSmartCardTrustAnchorsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReplaceSmartCardTrustAnchorsBody) Fault() *soap.Fault { return b.Fault_ } + +func ReplaceSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req *types.ReplaceSmartCardTrustAnchors) (*types.ReplaceSmartCardTrustAnchorsResponse, error) { + var reqBody, resBody ReplaceSmartCardTrustAnchorsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RescanAllHbaBody struct { + Req *types.RescanAllHba `xml:"urn:vim25 RescanAllHba,omitempty"` + Res *types.RescanAllHbaResponse `xml:"urn:vim25 RescanAllHbaResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RescanAllHbaBody) Fault() *soap.Fault { return b.Fault_ } + +func RescanAllHba(ctx context.Context, r soap.RoundTripper, req *types.RescanAllHba) (*types.RescanAllHbaResponse, error) { + var reqBody, resBody RescanAllHbaBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RescanHbaBody struct { + Req *types.RescanHba `xml:"urn:vim25 RescanHba,omitempty"` + Res *types.RescanHbaResponse `xml:"urn:vim25 RescanHbaResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RescanHbaBody) Fault() *soap.Fault { return b.Fault_ } + +func RescanHba(ctx context.Context, r soap.RoundTripper, req *types.RescanHba) (*types.RescanHbaResponse, error) { + var reqBody, resBody RescanHbaBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RescanVffsBody struct { + Req *types.RescanVffs `xml:"urn:vim25 RescanVffs,omitempty"` + Res *types.RescanVffsResponse `xml:"urn:vim25 RescanVffsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RescanVffsBody) Fault() *soap.Fault { return b.Fault_ } + +func RescanVffs(ctx context.Context, r soap.RoundTripper, req *types.RescanVffs) (*types.RescanVffsResponse, error) { + var reqBody, resBody RescanVffsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RescanVmfsBody struct { + Req *types.RescanVmfs `xml:"urn:vim25 RescanVmfs,omitempty"` + Res *types.RescanVmfsResponse `xml:"urn:vim25 RescanVmfsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RescanVmfsBody) Fault() *soap.Fault { return b.Fault_ } + +func RescanVmfs(ctx context.Context, r soap.RoundTripper, req *types.RescanVmfs) (*types.RescanVmfsResponse, error) { + var reqBody, resBody RescanVmfsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetCollectorBody struct { + Req *types.ResetCollector `xml:"urn:vim25 ResetCollector,omitempty"` + Res *types.ResetCollectorResponse `xml:"urn:vim25 ResetCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetCollector(ctx context.Context, r soap.RoundTripper, req *types.ResetCollector) (*types.ResetCollectorResponse, error) { + var reqBody, resBody ResetCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetCounterLevelMappingBody struct { + Req *types.ResetCounterLevelMapping `xml:"urn:vim25 ResetCounterLevelMapping,omitempty"` + Res *types.ResetCounterLevelMappingResponse `xml:"urn:vim25 ResetCounterLevelMappingResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetCounterLevelMappingBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *types.ResetCounterLevelMapping) (*types.ResetCounterLevelMappingResponse, error) { + var reqBody, resBody ResetCounterLevelMappingBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetEntityPermissionsBody struct { + Req *types.ResetEntityPermissions `xml:"urn:vim25 ResetEntityPermissions,omitempty"` + Res *types.ResetEntityPermissionsResponse `xml:"urn:vim25 ResetEntityPermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetEntityPermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.ResetEntityPermissions) (*types.ResetEntityPermissionsResponse, error) { + var reqBody, resBody ResetEntityPermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetFirmwareToFactoryDefaultsBody struct { + Req *types.ResetFirmwareToFactoryDefaults `xml:"urn:vim25 ResetFirmwareToFactoryDefaults,omitempty"` + Res *types.ResetFirmwareToFactoryDefaultsResponse `xml:"urn:vim25 ResetFirmwareToFactoryDefaultsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetFirmwareToFactoryDefaultsBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetFirmwareToFactoryDefaults(ctx context.Context, r soap.RoundTripper, req *types.ResetFirmwareToFactoryDefaults) (*types.ResetFirmwareToFactoryDefaultsResponse, error) { + var reqBody, resBody ResetFirmwareToFactoryDefaultsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetGuestInformationBody struct { + Req *types.ResetGuestInformation `xml:"urn:vim25 ResetGuestInformation,omitempty"` + Res *types.ResetGuestInformationResponse `xml:"urn:vim25 ResetGuestInformationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetGuestInformationBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetGuestInformation(ctx context.Context, r soap.RoundTripper, req *types.ResetGuestInformation) (*types.ResetGuestInformationResponse, error) { + var reqBody, resBody ResetGuestInformationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetListViewBody struct { + Req *types.ResetListView `xml:"urn:vim25 ResetListView,omitempty"` + Res *types.ResetListViewResponse `xml:"urn:vim25 ResetListViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetListViewBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetListView(ctx context.Context, r soap.RoundTripper, req *types.ResetListView) (*types.ResetListViewResponse, error) { + var reqBody, resBody ResetListViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetListViewFromViewBody struct { + Req *types.ResetListViewFromView `xml:"urn:vim25 ResetListViewFromView,omitempty"` + Res *types.ResetListViewFromViewResponse `xml:"urn:vim25 ResetListViewFromViewResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetListViewFromViewBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetListViewFromView(ctx context.Context, r soap.RoundTripper, req *types.ResetListViewFromView) (*types.ResetListViewFromViewResponse, error) { + var reqBody, resBody ResetListViewFromViewBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetSystemHealthInfoBody struct { + Req *types.ResetSystemHealthInfo `xml:"urn:vim25 ResetSystemHealthInfo,omitempty"` + Res *types.ResetSystemHealthInfoResponse `xml:"urn:vim25 ResetSystemHealthInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetSystemHealthInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetSystemHealthInfo(ctx context.Context, r soap.RoundTripper, req *types.ResetSystemHealthInfo) (*types.ResetSystemHealthInfoResponse, error) { + var reqBody, resBody ResetSystemHealthInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResetVM_TaskBody struct { + Req *types.ResetVM_Task `xml:"urn:vim25 ResetVM_Task,omitempty"` + Res *types.ResetVM_TaskResponse `xml:"urn:vim25 ResetVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResetVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResetVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ResetVM_Task) (*types.ResetVM_TaskResponse, error) { + var reqBody, resBody ResetVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResignatureUnresolvedVmfsVolume_TaskBody struct { + Req *types.ResignatureUnresolvedVmfsVolume_Task `xml:"urn:vim25 ResignatureUnresolvedVmfsVolume_Task,omitempty"` + Res *types.ResignatureUnresolvedVmfsVolume_TaskResponse `xml:"urn:vim25 ResignatureUnresolvedVmfsVolume_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResignatureUnresolvedVmfsVolume_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResignatureUnresolvedVmfsVolume_Task(ctx context.Context, r soap.RoundTripper, req *types.ResignatureUnresolvedVmfsVolume_Task) (*types.ResignatureUnresolvedVmfsVolume_TaskResponse, error) { + var reqBody, resBody ResignatureUnresolvedVmfsVolume_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResolveInstallationErrorsOnCluster_TaskBody struct { + Req *types.ResolveInstallationErrorsOnCluster_Task `xml:"urn:vim25 ResolveInstallationErrorsOnCluster_Task,omitempty"` + Res *types.ResolveInstallationErrorsOnCluster_TaskResponse `xml:"urn:vim25 ResolveInstallationErrorsOnCluster_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResolveInstallationErrorsOnCluster_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResolveInstallationErrorsOnCluster_Task(ctx context.Context, r soap.RoundTripper, req *types.ResolveInstallationErrorsOnCluster_Task) (*types.ResolveInstallationErrorsOnCluster_TaskResponse, error) { + var reqBody, resBody ResolveInstallationErrorsOnCluster_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResolveInstallationErrorsOnHost_TaskBody struct { + Req *types.ResolveInstallationErrorsOnHost_Task `xml:"urn:vim25 ResolveInstallationErrorsOnHost_Task,omitempty"` + Res *types.ResolveInstallationErrorsOnHost_TaskResponse `xml:"urn:vim25 ResolveInstallationErrorsOnHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResolveInstallationErrorsOnHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResolveInstallationErrorsOnHost_Task(ctx context.Context, r soap.RoundTripper, req *types.ResolveInstallationErrorsOnHost_Task) (*types.ResolveInstallationErrorsOnHost_TaskResponse, error) { + var reqBody, resBody ResolveInstallationErrorsOnHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResolveMultipleUnresolvedVmfsVolumesBody struct { + Req *types.ResolveMultipleUnresolvedVmfsVolumes `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumes,omitempty"` + Res *types.ResolveMultipleUnresolvedVmfsVolumesResponse `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResolveMultipleUnresolvedVmfsVolumesBody) Fault() *soap.Fault { return b.Fault_ } + +func ResolveMultipleUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripper, req *types.ResolveMultipleUnresolvedVmfsVolumes) (*types.ResolveMultipleUnresolvedVmfsVolumesResponse, error) { + var reqBody, resBody ResolveMultipleUnresolvedVmfsVolumesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody struct { + Req *types.ResolveMultipleUnresolvedVmfsVolumesEx_Task `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_Task,omitempty"` + Res *types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ResolveMultipleUnresolvedVmfsVolumesEx_Task(ctx context.Context, r soap.RoundTripper, req *types.ResolveMultipleUnresolvedVmfsVolumesEx_Task) (*types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse, error) { + var reqBody, resBody ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RestartServiceBody struct { + Req *types.RestartService `xml:"urn:vim25 RestartService,omitempty"` + Res *types.RestartServiceResponse `xml:"urn:vim25 RestartServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RestartServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func RestartService(ctx context.Context, r soap.RoundTripper, req *types.RestartService) (*types.RestartServiceResponse, error) { + var reqBody, resBody RestartServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RestartServiceConsoleVirtualNicBody struct { + Req *types.RestartServiceConsoleVirtualNic `xml:"urn:vim25 RestartServiceConsoleVirtualNic,omitempty"` + Res *types.RestartServiceConsoleVirtualNicResponse `xml:"urn:vim25 RestartServiceConsoleVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RestartServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func RestartServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.RestartServiceConsoleVirtualNic) (*types.RestartServiceConsoleVirtualNicResponse, error) { + var reqBody, resBody RestartServiceConsoleVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RestoreFirmwareConfigurationBody struct { + Req *types.RestoreFirmwareConfiguration `xml:"urn:vim25 RestoreFirmwareConfiguration,omitempty"` + Res *types.RestoreFirmwareConfigurationResponse `xml:"urn:vim25 RestoreFirmwareConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RestoreFirmwareConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func RestoreFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req *types.RestoreFirmwareConfiguration) (*types.RestoreFirmwareConfigurationResponse, error) { + var reqBody, resBody RestoreFirmwareConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveAllPermissionsBody struct { + Req *types.RetrieveAllPermissions `xml:"urn:vim25 RetrieveAllPermissions,omitempty"` + Res *types.RetrieveAllPermissionsResponse `xml:"urn:vim25 RetrieveAllPermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveAllPermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveAllPermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveAllPermissions) (*types.RetrieveAllPermissionsResponse, error) { + var reqBody, resBody RetrieveAllPermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveAnswerFileBody struct { + Req *types.RetrieveAnswerFile `xml:"urn:vim25 RetrieveAnswerFile,omitempty"` + Res *types.RetrieveAnswerFileResponse `xml:"urn:vim25 RetrieveAnswerFileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveAnswerFileBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveAnswerFile(ctx context.Context, r soap.RoundTripper, req *types.RetrieveAnswerFile) (*types.RetrieveAnswerFileResponse, error) { + var reqBody, resBody RetrieveAnswerFileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveAnswerFileForProfileBody struct { + Req *types.RetrieveAnswerFileForProfile `xml:"urn:vim25 RetrieveAnswerFileForProfile,omitempty"` + Res *types.RetrieveAnswerFileForProfileResponse `xml:"urn:vim25 RetrieveAnswerFileForProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveAnswerFileForProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveAnswerFileForProfile(ctx context.Context, r soap.RoundTripper, req *types.RetrieveAnswerFileForProfile) (*types.RetrieveAnswerFileForProfileResponse, error) { + var reqBody, resBody RetrieveAnswerFileForProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveArgumentDescriptionBody struct { + Req *types.RetrieveArgumentDescription `xml:"urn:vim25 RetrieveArgumentDescription,omitempty"` + Res *types.RetrieveArgumentDescriptionResponse `xml:"urn:vim25 RetrieveArgumentDescriptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveArgumentDescriptionBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveArgumentDescription(ctx context.Context, r soap.RoundTripper, req *types.RetrieveArgumentDescription) (*types.RetrieveArgumentDescriptionResponse, error) { + var reqBody, resBody RetrieveArgumentDescriptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveClientCertBody struct { + Req *types.RetrieveClientCert `xml:"urn:vim25 RetrieveClientCert,omitempty"` + Res *types.RetrieveClientCertResponse `xml:"urn:vim25 RetrieveClientCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveClientCertBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveClientCert(ctx context.Context, r soap.RoundTripper, req *types.RetrieveClientCert) (*types.RetrieveClientCertResponse, error) { + var reqBody, resBody RetrieveClientCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveClientCsrBody struct { + Req *types.RetrieveClientCsr `xml:"urn:vim25 RetrieveClientCsr,omitempty"` + Res *types.RetrieveClientCsrResponse `xml:"urn:vim25 RetrieveClientCsrResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveClientCsrBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveClientCsr(ctx context.Context, r soap.RoundTripper, req *types.RetrieveClientCsr) (*types.RetrieveClientCsrResponse, error) { + var reqBody, resBody RetrieveClientCsrBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveDasAdvancedRuntimeInfoBody struct { + Req *types.RetrieveDasAdvancedRuntimeInfo `xml:"urn:vim25 RetrieveDasAdvancedRuntimeInfo,omitempty"` + Res *types.RetrieveDasAdvancedRuntimeInfoResponse `xml:"urn:vim25 RetrieveDasAdvancedRuntimeInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveDasAdvancedRuntimeInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDasAdvancedRuntimeInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDasAdvancedRuntimeInfo) (*types.RetrieveDasAdvancedRuntimeInfoResponse, error) { + var reqBody, resBody RetrieveDasAdvancedRuntimeInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveDescriptionBody struct { + Req *types.RetrieveDescription `xml:"urn:vim25 RetrieveDescription,omitempty"` + Res *types.RetrieveDescriptionResponse `xml:"urn:vim25 RetrieveDescriptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveDescriptionBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDescription(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDescription) (*types.RetrieveDescriptionResponse, error) { + var reqBody, resBody RetrieveDescriptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveDiskPartitionInfoBody struct { + Req *types.RetrieveDiskPartitionInfo `xml:"urn:vim25 RetrieveDiskPartitionInfo,omitempty"` + Res *types.RetrieveDiskPartitionInfoResponse `xml:"urn:vim25 RetrieveDiskPartitionInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveDiskPartitionInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDiskPartitionInfo) (*types.RetrieveDiskPartitionInfoResponse, error) { + var reqBody, resBody RetrieveDiskPartitionInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveEntityPermissionsBody struct { + Req *types.RetrieveEntityPermissions `xml:"urn:vim25 RetrieveEntityPermissions,omitempty"` + Res *types.RetrieveEntityPermissionsResponse `xml:"urn:vim25 RetrieveEntityPermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveEntityPermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveEntityPermissions) (*types.RetrieveEntityPermissionsResponse, error) { + var reqBody, resBody RetrieveEntityPermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveEntityScheduledTaskBody struct { + Req *types.RetrieveEntityScheduledTask `xml:"urn:vim25 RetrieveEntityScheduledTask,omitempty"` + Res *types.RetrieveEntityScheduledTaskResponse `xml:"urn:vim25 RetrieveEntityScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveEntityScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveEntityScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RetrieveEntityScheduledTask) (*types.RetrieveEntityScheduledTaskResponse, error) { + var reqBody, resBody RetrieveEntityScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveHardwareUptimeBody struct { + Req *types.RetrieveHardwareUptime `xml:"urn:vim25 RetrieveHardwareUptime,omitempty"` + Res *types.RetrieveHardwareUptimeResponse `xml:"urn:vim25 RetrieveHardwareUptimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveHardwareUptimeBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveHardwareUptime(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHardwareUptime) (*types.RetrieveHardwareUptimeResponse, error) { + var reqBody, resBody RetrieveHardwareUptimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveHostAccessControlEntriesBody struct { + Req *types.RetrieveHostAccessControlEntries `xml:"urn:vim25 RetrieveHostAccessControlEntries,omitempty"` + Res *types.RetrieveHostAccessControlEntriesResponse `xml:"urn:vim25 RetrieveHostAccessControlEntriesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveHostAccessControlEntriesBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveHostAccessControlEntries(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHostAccessControlEntries) (*types.RetrieveHostAccessControlEntriesResponse, error) { + var reqBody, resBody RetrieveHostAccessControlEntriesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveHostCustomizationsBody struct { + Req *types.RetrieveHostCustomizations `xml:"urn:vim25 RetrieveHostCustomizations,omitempty"` + Res *types.RetrieveHostCustomizationsResponse `xml:"urn:vim25 RetrieveHostCustomizationsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveHostCustomizationsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveHostCustomizations(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHostCustomizations) (*types.RetrieveHostCustomizationsResponse, error) { + var reqBody, resBody RetrieveHostCustomizationsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveHostCustomizationsForProfileBody struct { + Req *types.RetrieveHostCustomizationsForProfile `xml:"urn:vim25 RetrieveHostCustomizationsForProfile,omitempty"` + Res *types.RetrieveHostCustomizationsForProfileResponse `xml:"urn:vim25 RetrieveHostCustomizationsForProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveHostCustomizationsForProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveHostCustomizationsForProfile(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHostCustomizationsForProfile) (*types.RetrieveHostCustomizationsForProfileResponse, error) { + var reqBody, resBody RetrieveHostCustomizationsForProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveHostSpecificationBody struct { + Req *types.RetrieveHostSpecification `xml:"urn:vim25 RetrieveHostSpecification,omitempty"` + Res *types.RetrieveHostSpecificationResponse `xml:"urn:vim25 RetrieveHostSpecificationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveHostSpecificationBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveHostSpecification(ctx context.Context, r soap.RoundTripper, req *types.RetrieveHostSpecification) (*types.RetrieveHostSpecificationResponse, error) { + var reqBody, resBody RetrieveHostSpecificationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveKmipServerCertBody struct { + Req *types.RetrieveKmipServerCert `xml:"urn:vim25 RetrieveKmipServerCert,omitempty"` + Res *types.RetrieveKmipServerCertResponse `xml:"urn:vim25 RetrieveKmipServerCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveKmipServerCertBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveKmipServerCert(ctx context.Context, r soap.RoundTripper, req *types.RetrieveKmipServerCert) (*types.RetrieveKmipServerCertResponse, error) { + var reqBody, resBody RetrieveKmipServerCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveKmipServersStatus_TaskBody struct { + Req *types.RetrieveKmipServersStatus_Task `xml:"urn:vim25 RetrieveKmipServersStatus_Task,omitempty"` + Res *types.RetrieveKmipServersStatus_TaskResponse `xml:"urn:vim25 RetrieveKmipServersStatus_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveKmipServersStatus_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveKmipServersStatus_Task(ctx context.Context, r soap.RoundTripper, req *types.RetrieveKmipServersStatus_Task) (*types.RetrieveKmipServersStatus_TaskResponse, error) { + var reqBody, resBody RetrieveKmipServersStatus_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveObjectScheduledTaskBody struct { + Req *types.RetrieveObjectScheduledTask `xml:"urn:vim25 RetrieveObjectScheduledTask,omitempty"` + Res *types.RetrieveObjectScheduledTaskResponse `xml:"urn:vim25 RetrieveObjectScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveObjectScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RetrieveObjectScheduledTask) (*types.RetrieveObjectScheduledTaskResponse, error) { + var reqBody, resBody RetrieveObjectScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveProductComponentsBody struct { + Req *types.RetrieveProductComponents `xml:"urn:vim25 RetrieveProductComponents,omitempty"` + Res *types.RetrieveProductComponentsResponse `xml:"urn:vim25 RetrieveProductComponentsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveProductComponentsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveProductComponents(ctx context.Context, r soap.RoundTripper, req *types.RetrieveProductComponents) (*types.RetrieveProductComponentsResponse, error) { + var reqBody, resBody RetrieveProductComponentsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrievePropertiesBody struct { + Req *types.RetrieveProperties `xml:"urn:vim25 RetrieveProperties,omitempty"` + Res *types.RetrievePropertiesResponse `xml:"urn:vim25 RetrievePropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrievePropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveProperties(ctx context.Context, r soap.RoundTripper, req *types.RetrieveProperties) (*types.RetrievePropertiesResponse, error) { + var reqBody, resBody RetrievePropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrievePropertiesExBody struct { + Req *types.RetrievePropertiesEx `xml:"urn:vim25 RetrievePropertiesEx,omitempty"` + Res *types.RetrievePropertiesExResponse `xml:"urn:vim25 RetrievePropertiesExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrievePropertiesExBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.RetrievePropertiesEx) (*types.RetrievePropertiesExResponse, error) { + var reqBody, resBody RetrievePropertiesExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveRolePermissionsBody struct { + Req *types.RetrieveRolePermissions `xml:"urn:vim25 RetrieveRolePermissions,omitempty"` + Res *types.RetrieveRolePermissionsResponse `xml:"urn:vim25 RetrieveRolePermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveRolePermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveRolePermissions(ctx context.Context, r soap.RoundTripper, req *types.RetrieveRolePermissions) (*types.RetrieveRolePermissionsResponse, error) { + var reqBody, resBody RetrieveRolePermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveSelfSignedClientCertBody struct { + Req *types.RetrieveSelfSignedClientCert `xml:"urn:vim25 RetrieveSelfSignedClientCert,omitempty"` + Res *types.RetrieveSelfSignedClientCertResponse `xml:"urn:vim25 RetrieveSelfSignedClientCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveSelfSignedClientCertBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *types.RetrieveSelfSignedClientCert) (*types.RetrieveSelfSignedClientCertResponse, error) { + var reqBody, resBody RetrieveSelfSignedClientCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveServiceContentBody struct { + Req *types.RetrieveServiceContent `xml:"urn:vim25 RetrieveServiceContent,omitempty"` + Res *types.RetrieveServiceContentResponse `xml:"urn:vim25 RetrieveServiceContentResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveServiceContentBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveServiceContent(ctx context.Context, r soap.RoundTripper, req *types.RetrieveServiceContent) (*types.RetrieveServiceContentResponse, error) { + var reqBody, resBody RetrieveServiceContentBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveSnapshotInfoBody struct { + Req *types.RetrieveSnapshotInfo `xml:"urn:vim25 RetrieveSnapshotInfo,omitempty"` + Res *types.RetrieveSnapshotInfoResponse `xml:"urn:vim25 RetrieveSnapshotInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveSnapshotInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveSnapshotInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveSnapshotInfo) (*types.RetrieveSnapshotInfoResponse, error) { + var reqBody, resBody RetrieveSnapshotInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveUserGroupsBody struct { + Req *types.RetrieveUserGroups `xml:"urn:vim25 RetrieveUserGroups,omitempty"` + Res *types.RetrieveUserGroupsResponse `xml:"urn:vim25 RetrieveUserGroupsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveUserGroupsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveUserGroups(ctx context.Context, r soap.RoundTripper, req *types.RetrieveUserGroups) (*types.RetrieveUserGroupsResponse, error) { + var reqBody, resBody RetrieveUserGroupsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveVStorageInfrastructureObjectPolicyBody struct { + Req *types.RetrieveVStorageInfrastructureObjectPolicy `xml:"urn:vim25 RetrieveVStorageInfrastructureObjectPolicy,omitempty"` + Res *types.RetrieveVStorageInfrastructureObjectPolicyResponse `xml:"urn:vim25 RetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveVStorageInfrastructureObjectPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveVStorageInfrastructureObjectPolicy(ctx context.Context, r soap.RoundTripper, req *types.RetrieveVStorageInfrastructureObjectPolicy) (*types.RetrieveVStorageInfrastructureObjectPolicyResponse, error) { + var reqBody, resBody RetrieveVStorageInfrastructureObjectPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveVStorageObjectBody struct { + Req *types.RetrieveVStorageObject `xml:"urn:vim25 RetrieveVStorageObject,omitempty"` + Res *types.RetrieveVStorageObjectResponse `xml:"urn:vim25 RetrieveVStorageObjectResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveVStorageObjectBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.RetrieveVStorageObject) (*types.RetrieveVStorageObjectResponse, error) { + var reqBody, resBody RetrieveVStorageObjectBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveVStorageObjectAssociationsBody struct { + Req *types.RetrieveVStorageObjectAssociations `xml:"urn:vim25 RetrieveVStorageObjectAssociations,omitempty"` + Res *types.RetrieveVStorageObjectAssociationsResponse `xml:"urn:vim25 RetrieveVStorageObjectAssociationsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveVStorageObjectAssociationsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveVStorageObjectAssociations(ctx context.Context, r soap.RoundTripper, req *types.RetrieveVStorageObjectAssociations) (*types.RetrieveVStorageObjectAssociationsResponse, error) { + var reqBody, resBody RetrieveVStorageObjectAssociationsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveVStorageObjectStateBody struct { + Req *types.RetrieveVStorageObjectState `xml:"urn:vim25 RetrieveVStorageObjectState,omitempty"` + Res *types.RetrieveVStorageObjectStateResponse `xml:"urn:vim25 RetrieveVStorageObjectStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveVStorageObjectStateBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveVStorageObjectState(ctx context.Context, r soap.RoundTripper, req *types.RetrieveVStorageObjectState) (*types.RetrieveVStorageObjectStateResponse, error) { + var reqBody, resBody RetrieveVStorageObjectStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RevertToCurrentSnapshot_TaskBody struct { + Req *types.RevertToCurrentSnapshot_Task `xml:"urn:vim25 RevertToCurrentSnapshot_Task,omitempty"` + Res *types.RevertToCurrentSnapshot_TaskResponse `xml:"urn:vim25 RevertToCurrentSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RevertToCurrentSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RevertToCurrentSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RevertToCurrentSnapshot_Task) (*types.RevertToCurrentSnapshot_TaskResponse, error) { + var reqBody, resBody RevertToCurrentSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RevertToSnapshot_TaskBody struct { + Req *types.RevertToSnapshot_Task `xml:"urn:vim25 RevertToSnapshot_Task,omitempty"` + Res *types.RevertToSnapshot_TaskResponse `xml:"urn:vim25 RevertToSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RevertToSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RevertToSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RevertToSnapshot_Task) (*types.RevertToSnapshot_TaskResponse, error) { + var reqBody, resBody RevertToSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RevertVStorageObject_TaskBody struct { + Req *types.RevertVStorageObject_Task `xml:"urn:vim25 RevertVStorageObject_Task,omitempty"` + Res *types.RevertVStorageObject_TaskResponse `xml:"urn:vim25 RevertVStorageObject_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RevertVStorageObject_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RevertVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *types.RevertVStorageObject_Task) (*types.RevertVStorageObject_TaskResponse, error) { + var reqBody, resBody RevertVStorageObject_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RewindCollectorBody struct { + Req *types.RewindCollector `xml:"urn:vim25 RewindCollector,omitempty"` + Res *types.RewindCollectorResponse `xml:"urn:vim25 RewindCollectorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RewindCollectorBody) Fault() *soap.Fault { return b.Fault_ } + +func RewindCollector(ctx context.Context, r soap.RoundTripper, req *types.RewindCollector) (*types.RewindCollectorResponse, error) { + var reqBody, resBody RewindCollectorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RunScheduledTaskBody struct { + Req *types.RunScheduledTask `xml:"urn:vim25 RunScheduledTask,omitempty"` + Res *types.RunScheduledTaskResponse `xml:"urn:vim25 RunScheduledTaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RunScheduledTaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RunScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RunScheduledTask) (*types.RunScheduledTaskResponse, error) { + var reqBody, resBody RunScheduledTaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RunVsanPhysicalDiskDiagnosticsBody struct { + Req *types.RunVsanPhysicalDiskDiagnostics `xml:"urn:vim25 RunVsanPhysicalDiskDiagnostics,omitempty"` + Res *types.RunVsanPhysicalDiskDiagnosticsResponse `xml:"urn:vim25 RunVsanPhysicalDiskDiagnosticsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RunVsanPhysicalDiskDiagnosticsBody) Fault() *soap.Fault { return b.Fault_ } + +func RunVsanPhysicalDiskDiagnostics(ctx context.Context, r soap.RoundTripper, req *types.RunVsanPhysicalDiskDiagnostics) (*types.RunVsanPhysicalDiskDiagnosticsResponse, error) { + var reqBody, resBody RunVsanPhysicalDiskDiagnosticsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ScanHostPatchV2_TaskBody struct { + Req *types.ScanHostPatchV2_Task `xml:"urn:vim25 ScanHostPatchV2_Task,omitempty"` + Res *types.ScanHostPatchV2_TaskResponse `xml:"urn:vim25 ScanHostPatchV2_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ScanHostPatchV2_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ScanHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *types.ScanHostPatchV2_Task) (*types.ScanHostPatchV2_TaskResponse, error) { + var reqBody, resBody ScanHostPatchV2_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ScanHostPatch_TaskBody struct { + Req *types.ScanHostPatch_Task `xml:"urn:vim25 ScanHostPatch_Task,omitempty"` + Res *types.ScanHostPatch_TaskResponse `xml:"urn:vim25 ScanHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ScanHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ScanHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.ScanHostPatch_Task) (*types.ScanHostPatch_TaskResponse, error) { + var reqBody, resBody ScanHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ScheduleReconcileDatastoreInventoryBody struct { + Req *types.ScheduleReconcileDatastoreInventory `xml:"urn:vim25 ScheduleReconcileDatastoreInventory,omitempty"` + Res *types.ScheduleReconcileDatastoreInventoryResponse `xml:"urn:vim25 ScheduleReconcileDatastoreInventoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ScheduleReconcileDatastoreInventoryBody) Fault() *soap.Fault { return b.Fault_ } + +func ScheduleReconcileDatastoreInventory(ctx context.Context, r soap.RoundTripper, req *types.ScheduleReconcileDatastoreInventory) (*types.ScheduleReconcileDatastoreInventoryResponse, error) { + var reqBody, resBody ScheduleReconcileDatastoreInventoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SearchDatastoreSubFolders_TaskBody struct { + Req *types.SearchDatastoreSubFolders_Task `xml:"urn:vim25 SearchDatastoreSubFolders_Task,omitempty"` + Res *types.SearchDatastoreSubFolders_TaskResponse `xml:"urn:vim25 SearchDatastoreSubFolders_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SearchDatastoreSubFolders_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SearchDatastoreSubFolders_Task(ctx context.Context, r soap.RoundTripper, req *types.SearchDatastoreSubFolders_Task) (*types.SearchDatastoreSubFolders_TaskResponse, error) { + var reqBody, resBody SearchDatastoreSubFolders_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SearchDatastore_TaskBody struct { + Req *types.SearchDatastore_Task `xml:"urn:vim25 SearchDatastore_Task,omitempty"` + Res *types.SearchDatastore_TaskResponse `xml:"urn:vim25 SearchDatastore_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SearchDatastore_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SearchDatastore_Task(ctx context.Context, r soap.RoundTripper, req *types.SearchDatastore_Task) (*types.SearchDatastore_TaskResponse, error) { + var reqBody, resBody SearchDatastore_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SelectActivePartitionBody struct { + Req *types.SelectActivePartition `xml:"urn:vim25 SelectActivePartition,omitempty"` + Res *types.SelectActivePartitionResponse `xml:"urn:vim25 SelectActivePartitionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SelectActivePartitionBody) Fault() *soap.Fault { return b.Fault_ } + +func SelectActivePartition(ctx context.Context, r soap.RoundTripper, req *types.SelectActivePartition) (*types.SelectActivePartitionResponse, error) { + var reqBody, resBody SelectActivePartitionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SelectVnicBody struct { + Req *types.SelectVnic `xml:"urn:vim25 SelectVnic,omitempty"` + Res *types.SelectVnicResponse `xml:"urn:vim25 SelectVnicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SelectVnicBody) Fault() *soap.Fault { return b.Fault_ } + +func SelectVnic(ctx context.Context, r soap.RoundTripper, req *types.SelectVnic) (*types.SelectVnicResponse, error) { + var reqBody, resBody SelectVnicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SelectVnicForNicTypeBody struct { + Req *types.SelectVnicForNicType `xml:"urn:vim25 SelectVnicForNicType,omitempty"` + Res *types.SelectVnicForNicTypeResponse `xml:"urn:vim25 SelectVnicForNicTypeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SelectVnicForNicTypeBody) Fault() *soap.Fault { return b.Fault_ } + +func SelectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types.SelectVnicForNicType) (*types.SelectVnicForNicTypeResponse, error) { + var reqBody, resBody SelectVnicForNicTypeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SendNMIBody struct { + Req *types.SendNMI `xml:"urn:vim25 SendNMI,omitempty"` + Res *types.SendNMIResponse `xml:"urn:vim25 SendNMIResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SendNMIBody) Fault() *soap.Fault { return b.Fault_ } + +func SendNMI(ctx context.Context, r soap.RoundTripper, req *types.SendNMI) (*types.SendNMIResponse, error) { + var reqBody, resBody SendNMIBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SendTestNotificationBody struct { + Req *types.SendTestNotification `xml:"urn:vim25 SendTestNotification,omitempty"` + Res *types.SendTestNotificationResponse `xml:"urn:vim25 SendTestNotificationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SendTestNotificationBody) Fault() *soap.Fault { return b.Fault_ } + +func SendTestNotification(ctx context.Context, r soap.RoundTripper, req *types.SendTestNotification) (*types.SendTestNotificationResponse, error) { + var reqBody, resBody SendTestNotificationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SessionIsActiveBody struct { + Req *types.SessionIsActive `xml:"urn:vim25 SessionIsActive,omitempty"` + Res *types.SessionIsActiveResponse `xml:"urn:vim25 SessionIsActiveResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SessionIsActiveBody) Fault() *soap.Fault { return b.Fault_ } + +func SessionIsActive(ctx context.Context, r soap.RoundTripper, req *types.SessionIsActive) (*types.SessionIsActiveResponse, error) { + var reqBody, resBody SessionIsActiveBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetCollectorPageSizeBody struct { + Req *types.SetCollectorPageSize `xml:"urn:vim25 SetCollectorPageSize,omitempty"` + Res *types.SetCollectorPageSizeResponse `xml:"urn:vim25 SetCollectorPageSizeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetCollectorPageSizeBody) Fault() *soap.Fault { return b.Fault_ } + +func SetCollectorPageSize(ctx context.Context, r soap.RoundTripper, req *types.SetCollectorPageSize) (*types.SetCollectorPageSizeResponse, error) { + var reqBody, resBody SetCollectorPageSizeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetDisplayTopologyBody struct { + Req *types.SetDisplayTopology `xml:"urn:vim25 SetDisplayTopology,omitempty"` + Res *types.SetDisplayTopologyResponse `xml:"urn:vim25 SetDisplayTopologyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetDisplayTopologyBody) Fault() *soap.Fault { return b.Fault_ } + +func SetDisplayTopology(ctx context.Context, r soap.RoundTripper, req *types.SetDisplayTopology) (*types.SetDisplayTopologyResponse, error) { + var reqBody, resBody SetDisplayTopologyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetEntityPermissionsBody struct { + Req *types.SetEntityPermissions `xml:"urn:vim25 SetEntityPermissions,omitempty"` + Res *types.SetEntityPermissionsResponse `xml:"urn:vim25 SetEntityPermissionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetEntityPermissionsBody) Fault() *soap.Fault { return b.Fault_ } + +func SetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.SetEntityPermissions) (*types.SetEntityPermissionsResponse, error) { + var reqBody, resBody SetEntityPermissionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetExtensionCertificateBody struct { + Req *types.SetExtensionCertificate `xml:"urn:vim25 SetExtensionCertificate,omitempty"` + Res *types.SetExtensionCertificateResponse `xml:"urn:vim25 SetExtensionCertificateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetExtensionCertificateBody) Fault() *soap.Fault { return b.Fault_ } + +func SetExtensionCertificate(ctx context.Context, r soap.RoundTripper, req *types.SetExtensionCertificate) (*types.SetExtensionCertificateResponse, error) { + var reqBody, resBody SetExtensionCertificateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetFieldBody struct { + Req *types.SetField `xml:"urn:vim25 SetField,omitempty"` + Res *types.SetFieldResponse `xml:"urn:vim25 SetFieldResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetFieldBody) Fault() *soap.Fault { return b.Fault_ } + +func SetField(ctx context.Context, r soap.RoundTripper, req *types.SetField) (*types.SetFieldResponse, error) { + var reqBody, resBody SetFieldBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetLicenseEditionBody struct { + Req *types.SetLicenseEdition `xml:"urn:vim25 SetLicenseEdition,omitempty"` + Res *types.SetLicenseEditionResponse `xml:"urn:vim25 SetLicenseEditionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetLicenseEditionBody) Fault() *soap.Fault { return b.Fault_ } + +func SetLicenseEdition(ctx context.Context, r soap.RoundTripper, req *types.SetLicenseEdition) (*types.SetLicenseEditionResponse, error) { + var reqBody, resBody SetLicenseEditionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetLocaleBody struct { + Req *types.SetLocale `xml:"urn:vim25 SetLocale,omitempty"` + Res *types.SetLocaleResponse `xml:"urn:vim25 SetLocaleResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetLocaleBody) Fault() *soap.Fault { return b.Fault_ } + +func SetLocale(ctx context.Context, r soap.RoundTripper, req *types.SetLocale) (*types.SetLocaleResponse, error) { + var reqBody, resBody SetLocaleBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetMultipathLunPolicyBody struct { + Req *types.SetMultipathLunPolicy `xml:"urn:vim25 SetMultipathLunPolicy,omitempty"` + Res *types.SetMultipathLunPolicyResponse `xml:"urn:vim25 SetMultipathLunPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetMultipathLunPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func SetMultipathLunPolicy(ctx context.Context, r soap.RoundTripper, req *types.SetMultipathLunPolicy) (*types.SetMultipathLunPolicyResponse, error) { + var reqBody, resBody SetMultipathLunPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetNFSUserBody struct { + Req *types.SetNFSUser `xml:"urn:vim25 SetNFSUser,omitempty"` + Res *types.SetNFSUserResponse `xml:"urn:vim25 SetNFSUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetNFSUserBody) Fault() *soap.Fault { return b.Fault_ } + +func SetNFSUser(ctx context.Context, r soap.RoundTripper, req *types.SetNFSUser) (*types.SetNFSUserResponse, error) { + var reqBody, resBody SetNFSUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetPublicKeyBody struct { + Req *types.SetPublicKey `xml:"urn:vim25 SetPublicKey,omitempty"` + Res *types.SetPublicKeyResponse `xml:"urn:vim25 SetPublicKeyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetPublicKeyBody) Fault() *soap.Fault { return b.Fault_ } + +func SetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.SetPublicKey) (*types.SetPublicKeyResponse, error) { + var reqBody, resBody SetPublicKeyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetRegistryValueInGuestBody struct { + Req *types.SetRegistryValueInGuest `xml:"urn:vim25 SetRegistryValueInGuest,omitempty"` + Res *types.SetRegistryValueInGuestResponse `xml:"urn:vim25 SetRegistryValueInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetRegistryValueInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func SetRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *types.SetRegistryValueInGuest) (*types.SetRegistryValueInGuestResponse, error) { + var reqBody, resBody SetRegistryValueInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetScreenResolutionBody struct { + Req *types.SetScreenResolution `xml:"urn:vim25 SetScreenResolution,omitempty"` + Res *types.SetScreenResolutionResponse `xml:"urn:vim25 SetScreenResolutionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetScreenResolutionBody) Fault() *soap.Fault { return b.Fault_ } + +func SetScreenResolution(ctx context.Context, r soap.RoundTripper, req *types.SetScreenResolution) (*types.SetScreenResolutionResponse, error) { + var reqBody, resBody SetScreenResolutionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetTaskDescriptionBody struct { + Req *types.SetTaskDescription `xml:"urn:vim25 SetTaskDescription,omitempty"` + Res *types.SetTaskDescriptionResponse `xml:"urn:vim25 SetTaskDescriptionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetTaskDescriptionBody) Fault() *soap.Fault { return b.Fault_ } + +func SetTaskDescription(ctx context.Context, r soap.RoundTripper, req *types.SetTaskDescription) (*types.SetTaskDescriptionResponse, error) { + var reqBody, resBody SetTaskDescriptionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetTaskStateBody struct { + Req *types.SetTaskState `xml:"urn:vim25 SetTaskState,omitempty"` + Res *types.SetTaskStateResponse `xml:"urn:vim25 SetTaskStateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetTaskStateBody) Fault() *soap.Fault { return b.Fault_ } + +func SetTaskState(ctx context.Context, r soap.RoundTripper, req *types.SetTaskState) (*types.SetTaskStateResponse, error) { + var reqBody, resBody SetTaskStateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetVStorageObjectControlFlagsBody struct { + Req *types.SetVStorageObjectControlFlags `xml:"urn:vim25 SetVStorageObjectControlFlags,omitempty"` + Res *types.SetVStorageObjectControlFlagsResponse `xml:"urn:vim25 SetVStorageObjectControlFlagsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetVStorageObjectControlFlagsBody) Fault() *soap.Fault { return b.Fault_ } + +func SetVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, req *types.SetVStorageObjectControlFlags) (*types.SetVStorageObjectControlFlagsResponse, error) { + var reqBody, resBody SetVStorageObjectControlFlagsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetVirtualDiskUuidBody struct { + Req *types.SetVirtualDiskUuid `xml:"urn:vim25 SetVirtualDiskUuid,omitempty"` + Res *types.SetVirtualDiskUuidResponse `xml:"urn:vim25 SetVirtualDiskUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetVirtualDiskUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func SetVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.SetVirtualDiskUuid) (*types.SetVirtualDiskUuidResponse, error) { + var reqBody, resBody SetVirtualDiskUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ShrinkVirtualDisk_TaskBody struct { + Req *types.ShrinkVirtualDisk_Task `xml:"urn:vim25 ShrinkVirtualDisk_Task,omitempty"` + Res *types.ShrinkVirtualDisk_TaskResponse `xml:"urn:vim25 ShrinkVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ShrinkVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ShrinkVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ShrinkVirtualDisk_Task) (*types.ShrinkVirtualDisk_TaskResponse, error) { + var reqBody, resBody ShrinkVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ShutdownGuestBody struct { + Req *types.ShutdownGuest `xml:"urn:vim25 ShutdownGuest,omitempty"` + Res *types.ShutdownGuestResponse `xml:"urn:vim25 ShutdownGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ShutdownGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ShutdownGuest(ctx context.Context, r soap.RoundTripper, req *types.ShutdownGuest) (*types.ShutdownGuestResponse, error) { + var reqBody, resBody ShutdownGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ShutdownHost_TaskBody struct { + Req *types.ShutdownHost_Task `xml:"urn:vim25 ShutdownHost_Task,omitempty"` + Res *types.ShutdownHost_TaskResponse `xml:"urn:vim25 ShutdownHost_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ShutdownHost_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ShutdownHost_Task(ctx context.Context, r soap.RoundTripper, req *types.ShutdownHost_Task) (*types.ShutdownHost_TaskResponse, error) { + var reqBody, resBody ShutdownHost_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StageHostPatch_TaskBody struct { + Req *types.StageHostPatch_Task `xml:"urn:vim25 StageHostPatch_Task,omitempty"` + Res *types.StageHostPatch_TaskResponse `xml:"urn:vim25 StageHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StageHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StageHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.StageHostPatch_Task) (*types.StageHostPatch_TaskResponse, error) { + var reqBody, resBody StageHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StampAllRulesWithUuid_TaskBody struct { + Req *types.StampAllRulesWithUuid_Task `xml:"urn:vim25 StampAllRulesWithUuid_Task,omitempty"` + Res *types.StampAllRulesWithUuid_TaskResponse `xml:"urn:vim25 StampAllRulesWithUuid_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StampAllRulesWithUuid_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StampAllRulesWithUuid_Task(ctx context.Context, r soap.RoundTripper, req *types.StampAllRulesWithUuid_Task) (*types.StampAllRulesWithUuid_TaskResponse, error) { + var reqBody, resBody StampAllRulesWithUuid_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StandbyGuestBody struct { + Req *types.StandbyGuest `xml:"urn:vim25 StandbyGuest,omitempty"` + Res *types.StandbyGuestResponse `xml:"urn:vim25 StandbyGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StandbyGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func StandbyGuest(ctx context.Context, r soap.RoundTripper, req *types.StandbyGuest) (*types.StandbyGuestResponse, error) { + var reqBody, resBody StandbyGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StartProgramInGuestBody struct { + Req *types.StartProgramInGuest `xml:"urn:vim25 StartProgramInGuest,omitempty"` + Res *types.StartProgramInGuestResponse `xml:"urn:vim25 StartProgramInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartProgramInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func StartProgramInGuest(ctx context.Context, r soap.RoundTripper, req *types.StartProgramInGuest) (*types.StartProgramInGuestResponse, error) { + var reqBody, resBody StartProgramInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StartRecording_TaskBody struct { + Req *types.StartRecording_Task `xml:"urn:vim25 StartRecording_Task,omitempty"` + Res *types.StartRecording_TaskResponse `xml:"urn:vim25 StartRecording_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartRecording_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StartRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.StartRecording_Task) (*types.StartRecording_TaskResponse, error) { + var reqBody, resBody StartRecording_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StartReplaying_TaskBody struct { + Req *types.StartReplaying_Task `xml:"urn:vim25 StartReplaying_Task,omitempty"` + Res *types.StartReplaying_TaskResponse `xml:"urn:vim25 StartReplaying_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartReplaying_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StartReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.StartReplaying_Task) (*types.StartReplaying_TaskResponse, error) { + var reqBody, resBody StartReplaying_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StartServiceBody struct { + Req *types.StartService `xml:"urn:vim25 StartService,omitempty"` + Res *types.StartServiceResponse `xml:"urn:vim25 StartServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func StartService(ctx context.Context, r soap.RoundTripper, req *types.StartService) (*types.StartServiceResponse, error) { + var reqBody, resBody StartServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StopRecording_TaskBody struct { + Req *types.StopRecording_Task `xml:"urn:vim25 StopRecording_Task,omitempty"` + Res *types.StopRecording_TaskResponse `xml:"urn:vim25 StopRecording_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StopRecording_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StopRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.StopRecording_Task) (*types.StopRecording_TaskResponse, error) { + var reqBody, resBody StopRecording_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StopReplaying_TaskBody struct { + Req *types.StopReplaying_Task `xml:"urn:vim25 StopReplaying_Task,omitempty"` + Res *types.StopReplaying_TaskResponse `xml:"urn:vim25 StopReplaying_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StopReplaying_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StopReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.StopReplaying_Task) (*types.StopReplaying_TaskResponse, error) { + var reqBody, resBody StopReplaying_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type StopServiceBody struct { + Req *types.StopService `xml:"urn:vim25 StopService,omitempty"` + Res *types.StopServiceResponse `xml:"urn:vim25 StopServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StopServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func StopService(ctx context.Context, r soap.RoundTripper, req *types.StopService) (*types.StopServiceResponse, error) { + var reqBody, resBody StopServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SuspendVApp_TaskBody struct { + Req *types.SuspendVApp_Task `xml:"urn:vim25 SuspendVApp_Task,omitempty"` + Res *types.SuspendVApp_TaskResponse `xml:"urn:vim25 SuspendVApp_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SuspendVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SuspendVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.SuspendVApp_Task) (*types.SuspendVApp_TaskResponse, error) { + var reqBody, resBody SuspendVApp_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SuspendVM_TaskBody struct { + Req *types.SuspendVM_Task `xml:"urn:vim25 SuspendVM_Task,omitempty"` + Res *types.SuspendVM_TaskResponse `xml:"urn:vim25 SuspendVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SuspendVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SuspendVM_Task(ctx context.Context, r soap.RoundTripper, req *types.SuspendVM_Task) (*types.SuspendVM_TaskResponse, error) { + var reqBody, resBody SuspendVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TerminateFaultTolerantVM_TaskBody struct { + Req *types.TerminateFaultTolerantVM_Task `xml:"urn:vim25 TerminateFaultTolerantVM_Task,omitempty"` + Res *types.TerminateFaultTolerantVM_TaskResponse `xml:"urn:vim25 TerminateFaultTolerantVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TerminateFaultTolerantVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func TerminateFaultTolerantVM_Task(ctx context.Context, r soap.RoundTripper, req *types.TerminateFaultTolerantVM_Task) (*types.TerminateFaultTolerantVM_TaskResponse, error) { + var reqBody, resBody TerminateFaultTolerantVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TerminateProcessInGuestBody struct { + Req *types.TerminateProcessInGuest `xml:"urn:vim25 TerminateProcessInGuest,omitempty"` + Res *types.TerminateProcessInGuestResponse `xml:"urn:vim25 TerminateProcessInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TerminateProcessInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func TerminateProcessInGuest(ctx context.Context, r soap.RoundTripper, req *types.TerminateProcessInGuest) (*types.TerminateProcessInGuestResponse, error) { + var reqBody, resBody TerminateProcessInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TerminateSessionBody struct { + Req *types.TerminateSession `xml:"urn:vim25 TerminateSession,omitempty"` + Res *types.TerminateSessionResponse `xml:"urn:vim25 TerminateSessionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TerminateSessionBody) Fault() *soap.Fault { return b.Fault_ } + +func TerminateSession(ctx context.Context, r soap.RoundTripper, req *types.TerminateSession) (*types.TerminateSessionResponse, error) { + var reqBody, resBody TerminateSessionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TerminateVMBody struct { + Req *types.TerminateVM `xml:"urn:vim25 TerminateVM,omitempty"` + Res *types.TerminateVMResponse `xml:"urn:vim25 TerminateVMResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TerminateVMBody) Fault() *soap.Fault { return b.Fault_ } + +func TerminateVM(ctx context.Context, r soap.RoundTripper, req *types.TerminateVM) (*types.TerminateVMResponse, error) { + var reqBody, resBody TerminateVMBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TurnDiskLocatorLedOff_TaskBody struct { + Req *types.TurnDiskLocatorLedOff_Task `xml:"urn:vim25 TurnDiskLocatorLedOff_Task,omitempty"` + Res *types.TurnDiskLocatorLedOff_TaskResponse `xml:"urn:vim25 TurnDiskLocatorLedOff_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TurnDiskLocatorLedOff_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func TurnDiskLocatorLedOff_Task(ctx context.Context, r soap.RoundTripper, req *types.TurnDiskLocatorLedOff_Task) (*types.TurnDiskLocatorLedOff_TaskResponse, error) { + var reqBody, resBody TurnDiskLocatorLedOff_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TurnDiskLocatorLedOn_TaskBody struct { + Req *types.TurnDiskLocatorLedOn_Task `xml:"urn:vim25 TurnDiskLocatorLedOn_Task,omitempty"` + Res *types.TurnDiskLocatorLedOn_TaskResponse `xml:"urn:vim25 TurnDiskLocatorLedOn_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TurnDiskLocatorLedOn_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func TurnDiskLocatorLedOn_Task(ctx context.Context, r soap.RoundTripper, req *types.TurnDiskLocatorLedOn_Task) (*types.TurnDiskLocatorLedOn_TaskResponse, error) { + var reqBody, resBody TurnDiskLocatorLedOn_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type TurnOffFaultToleranceForVM_TaskBody struct { + Req *types.TurnOffFaultToleranceForVM_Task `xml:"urn:vim25 TurnOffFaultToleranceForVM_Task,omitempty"` + Res *types.TurnOffFaultToleranceForVM_TaskResponse `xml:"urn:vim25 TurnOffFaultToleranceForVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TurnOffFaultToleranceForVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func TurnOffFaultToleranceForVM_Task(ctx context.Context, r soap.RoundTripper, req *types.TurnOffFaultToleranceForVM_Task) (*types.TurnOffFaultToleranceForVM_TaskResponse, error) { + var reqBody, resBody TurnOffFaultToleranceForVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnassignUserFromGroupBody struct { + Req *types.UnassignUserFromGroup `xml:"urn:vim25 UnassignUserFromGroup,omitempty"` + Res *types.UnassignUserFromGroupResponse `xml:"urn:vim25 UnassignUserFromGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnassignUserFromGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func UnassignUserFromGroup(ctx context.Context, r soap.RoundTripper, req *types.UnassignUserFromGroup) (*types.UnassignUserFromGroupResponse, error) { + var reqBody, resBody UnassignUserFromGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnbindVnicBody struct { + Req *types.UnbindVnic `xml:"urn:vim25 UnbindVnic,omitempty"` + Res *types.UnbindVnicResponse `xml:"urn:vim25 UnbindVnicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnbindVnicBody) Fault() *soap.Fault { return b.Fault_ } + +func UnbindVnic(ctx context.Context, r soap.RoundTripper, req *types.UnbindVnic) (*types.UnbindVnicResponse, error) { + var reqBody, resBody UnbindVnicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UninstallHostPatch_TaskBody struct { + Req *types.UninstallHostPatch_Task `xml:"urn:vim25 UninstallHostPatch_Task,omitempty"` + Res *types.UninstallHostPatch_TaskResponse `xml:"urn:vim25 UninstallHostPatch_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UninstallHostPatch_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UninstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.UninstallHostPatch_Task) (*types.UninstallHostPatch_TaskResponse, error) { + var reqBody, resBody UninstallHostPatch_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UninstallIoFilter_TaskBody struct { + Req *types.UninstallIoFilter_Task `xml:"urn:vim25 UninstallIoFilter_Task,omitempty"` + Res *types.UninstallIoFilter_TaskResponse `xml:"urn:vim25 UninstallIoFilter_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UninstallIoFilter_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UninstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.UninstallIoFilter_Task) (*types.UninstallIoFilter_TaskResponse, error) { + var reqBody, resBody UninstallIoFilter_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UninstallServiceBody struct { + Req *types.UninstallService `xml:"urn:vim25 UninstallService,omitempty"` + Res *types.UninstallServiceResponse `xml:"urn:vim25 UninstallServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UninstallServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func UninstallService(ctx context.Context, r soap.RoundTripper, req *types.UninstallService) (*types.UninstallServiceResponse, error) { + var reqBody, resBody UninstallServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmapVmfsVolumeEx_TaskBody struct { + Req *types.UnmapVmfsVolumeEx_Task `xml:"urn:vim25 UnmapVmfsVolumeEx_Task,omitempty"` + Res *types.UnmapVmfsVolumeEx_TaskResponse `xml:"urn:vim25 UnmapVmfsVolumeEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmapVmfsVolumeEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmapVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types.UnmapVmfsVolumeEx_Task) (*types.UnmapVmfsVolumeEx_TaskResponse, error) { + var reqBody, resBody UnmapVmfsVolumeEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountDiskMapping_TaskBody struct { + Req *types.UnmountDiskMapping_Task `xml:"urn:vim25 UnmountDiskMapping_Task,omitempty"` + Res *types.UnmountDiskMapping_TaskResponse `xml:"urn:vim25 UnmountDiskMapping_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountDiskMapping_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *types.UnmountDiskMapping_Task) (*types.UnmountDiskMapping_TaskResponse, error) { + var reqBody, resBody UnmountDiskMapping_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountForceMountedVmfsVolumeBody struct { + Req *types.UnmountForceMountedVmfsVolume `xml:"urn:vim25 UnmountForceMountedVmfsVolume,omitempty"` + Res *types.UnmountForceMountedVmfsVolumeResponse `xml:"urn:vim25 UnmountForceMountedVmfsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountForceMountedVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountForceMountedVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.UnmountForceMountedVmfsVolume) (*types.UnmountForceMountedVmfsVolumeResponse, error) { + var reqBody, resBody UnmountForceMountedVmfsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountToolsInstallerBody struct { + Req *types.UnmountToolsInstaller `xml:"urn:vim25 UnmountToolsInstaller,omitempty"` + Res *types.UnmountToolsInstallerResponse `xml:"urn:vim25 UnmountToolsInstallerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountToolsInstallerBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types.UnmountToolsInstaller) (*types.UnmountToolsInstallerResponse, error) { + var reqBody, resBody UnmountToolsInstallerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountVffsVolumeBody struct { + Req *types.UnmountVffsVolume `xml:"urn:vim25 UnmountVffsVolume,omitempty"` + Res *types.UnmountVffsVolumeResponse `xml:"urn:vim25 UnmountVffsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountVffsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.UnmountVffsVolume) (*types.UnmountVffsVolumeResponse, error) { + var reqBody, resBody UnmountVffsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountVmfsVolumeBody struct { + Req *types.UnmountVmfsVolume `xml:"urn:vim25 UnmountVmfsVolume,omitempty"` + Res *types.UnmountVmfsVolumeResponse `xml:"urn:vim25 UnmountVmfsVolumeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountVmfsVolumeBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.UnmountVmfsVolume) (*types.UnmountVmfsVolumeResponse, error) { + var reqBody, resBody UnmountVmfsVolumeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnmountVmfsVolumeEx_TaskBody struct { + Req *types.UnmountVmfsVolumeEx_Task `xml:"urn:vim25 UnmountVmfsVolumeEx_Task,omitempty"` + Res *types.UnmountVmfsVolumeEx_TaskResponse `xml:"urn:vim25 UnmountVmfsVolumeEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmountVmfsVolumeEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types.UnmountVmfsVolumeEx_Task) (*types.UnmountVmfsVolumeEx_TaskResponse, error) { + var reqBody, resBody UnmountVmfsVolumeEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnregisterAndDestroy_TaskBody struct { + Req *types.UnregisterAndDestroy_Task `xml:"urn:vim25 UnregisterAndDestroy_Task,omitempty"` + Res *types.UnregisterAndDestroy_TaskResponse `xml:"urn:vim25 UnregisterAndDestroy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterAndDestroy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterAndDestroy_Task(ctx context.Context, r soap.RoundTripper, req *types.UnregisterAndDestroy_Task) (*types.UnregisterAndDestroy_TaskResponse, error) { + var reqBody, resBody UnregisterAndDestroy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnregisterExtensionBody struct { + Req *types.UnregisterExtension `xml:"urn:vim25 UnregisterExtension,omitempty"` + Res *types.UnregisterExtensionResponse `xml:"urn:vim25 UnregisterExtensionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterExtensionBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterExtension(ctx context.Context, r soap.RoundTripper, req *types.UnregisterExtension) (*types.UnregisterExtensionResponse, error) { + var reqBody, resBody UnregisterExtensionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnregisterHealthUpdateProviderBody struct { + Req *types.UnregisterHealthUpdateProvider `xml:"urn:vim25 UnregisterHealthUpdateProvider,omitempty"` + Res *types.UnregisterHealthUpdateProviderResponse `xml:"urn:vim25 UnregisterHealthUpdateProviderResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterHealthUpdateProviderBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterHealthUpdateProvider(ctx context.Context, r soap.RoundTripper, req *types.UnregisterHealthUpdateProvider) (*types.UnregisterHealthUpdateProviderResponse, error) { + var reqBody, resBody UnregisterHealthUpdateProviderBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnregisterVMBody struct { + Req *types.UnregisterVM `xml:"urn:vim25 UnregisterVM,omitempty"` + Res *types.UnregisterVMResponse `xml:"urn:vim25 UnregisterVMResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterVMBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterVM(ctx context.Context, r soap.RoundTripper, req *types.UnregisterVM) (*types.UnregisterVMResponse, error) { + var reqBody, resBody UnregisterVMBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateAnswerFile_TaskBody struct { + Req *types.UpdateAnswerFile_Task `xml:"urn:vim25 UpdateAnswerFile_Task,omitempty"` + Res *types.UpdateAnswerFile_TaskResponse `xml:"urn:vim25 UpdateAnswerFile_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateAnswerFile_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateAnswerFile_Task) (*types.UpdateAnswerFile_TaskResponse, error) { + var reqBody, resBody UpdateAnswerFile_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateAssignedLicenseBody struct { + Req *types.UpdateAssignedLicense `xml:"urn:vim25 UpdateAssignedLicense,omitempty"` + Res *types.UpdateAssignedLicenseResponse `xml:"urn:vim25 UpdateAssignedLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateAssignedLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types.UpdateAssignedLicense) (*types.UpdateAssignedLicenseResponse, error) { + var reqBody, resBody UpdateAssignedLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateAuthorizationRoleBody struct { + Req *types.UpdateAuthorizationRole `xml:"urn:vim25 UpdateAuthorizationRole,omitempty"` + Res *types.UpdateAuthorizationRoleResponse `xml:"urn:vim25 UpdateAuthorizationRoleResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateAuthorizationRoleBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.UpdateAuthorizationRole) (*types.UpdateAuthorizationRoleResponse, error) { + var reqBody, resBody UpdateAuthorizationRoleBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateBootDeviceBody struct { + Req *types.UpdateBootDevice `xml:"urn:vim25 UpdateBootDevice,omitempty"` + Res *types.UpdateBootDeviceResponse `xml:"urn:vim25 UpdateBootDeviceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateBootDeviceBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateBootDevice(ctx context.Context, r soap.RoundTripper, req *types.UpdateBootDevice) (*types.UpdateBootDeviceResponse, error) { + var reqBody, resBody UpdateBootDeviceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateChildResourceConfigurationBody struct { + Req *types.UpdateChildResourceConfiguration `xml:"urn:vim25 UpdateChildResourceConfiguration,omitempty"` + Res *types.UpdateChildResourceConfigurationResponse `xml:"urn:vim25 UpdateChildResourceConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateChildResourceConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateChildResourceConfiguration(ctx context.Context, r soap.RoundTripper, req *types.UpdateChildResourceConfiguration) (*types.UpdateChildResourceConfigurationResponse, error) { + var reqBody, resBody UpdateChildResourceConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateClusterProfileBody struct { + Req *types.UpdateClusterProfile `xml:"urn:vim25 UpdateClusterProfile,omitempty"` + Res *types.UpdateClusterProfileResponse `xml:"urn:vim25 UpdateClusterProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateClusterProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateClusterProfile(ctx context.Context, r soap.RoundTripper, req *types.UpdateClusterProfile) (*types.UpdateClusterProfileResponse, error) { + var reqBody, resBody UpdateClusterProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateConfigBody struct { + Req *types.UpdateConfig `xml:"urn:vim25 UpdateConfig,omitempty"` + Res *types.UpdateConfigResponse `xml:"urn:vim25 UpdateConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateConfig) (*types.UpdateConfigResponse, error) { + var reqBody, resBody UpdateConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateConsoleIpRouteConfigBody struct { + Req *types.UpdateConsoleIpRouteConfig `xml:"urn:vim25 UpdateConsoleIpRouteConfig,omitempty"` + Res *types.UpdateConsoleIpRouteConfigResponse `xml:"urn:vim25 UpdateConsoleIpRouteConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateConsoleIpRouteConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateConsoleIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateConsoleIpRouteConfig) (*types.UpdateConsoleIpRouteConfigResponse, error) { + var reqBody, resBody UpdateConsoleIpRouteConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateCounterLevelMappingBody struct { + Req *types.UpdateCounterLevelMapping `xml:"urn:vim25 UpdateCounterLevelMapping,omitempty"` + Res *types.UpdateCounterLevelMappingResponse `xml:"urn:vim25 UpdateCounterLevelMappingResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateCounterLevelMappingBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *types.UpdateCounterLevelMapping) (*types.UpdateCounterLevelMappingResponse, error) { + var reqBody, resBody UpdateCounterLevelMappingBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDVSHealthCheckConfig_TaskBody struct { + Req *types.UpdateDVSHealthCheckConfig_Task `xml:"urn:vim25 UpdateDVSHealthCheckConfig_Task,omitempty"` + Res *types.UpdateDVSHealthCheckConfig_TaskResponse `xml:"urn:vim25 UpdateDVSHealthCheckConfig_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDVSHealthCheckConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDVSHealthCheckConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateDVSHealthCheckConfig_Task) (*types.UpdateDVSHealthCheckConfig_TaskResponse, error) { + var reqBody, resBody UpdateDVSHealthCheckConfig_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDVSLacpGroupConfig_TaskBody struct { + Req *types.UpdateDVSLacpGroupConfig_Task `xml:"urn:vim25 UpdateDVSLacpGroupConfig_Task,omitempty"` + Res *types.UpdateDVSLacpGroupConfig_TaskResponse `xml:"urn:vim25 UpdateDVSLacpGroupConfig_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDVSLacpGroupConfig_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDVSLacpGroupConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateDVSLacpGroupConfig_Task) (*types.UpdateDVSLacpGroupConfig_TaskResponse, error) { + var reqBody, resBody UpdateDVSLacpGroupConfig_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDateTimeBody struct { + Req *types.UpdateDateTime `xml:"urn:vim25 UpdateDateTime,omitempty"` + Res *types.UpdateDateTimeResponse `xml:"urn:vim25 UpdateDateTimeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDateTimeBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDateTime(ctx context.Context, r soap.RoundTripper, req *types.UpdateDateTime) (*types.UpdateDateTimeResponse, error) { + var reqBody, resBody UpdateDateTimeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDateTimeConfigBody struct { + Req *types.UpdateDateTimeConfig `xml:"urn:vim25 UpdateDateTimeConfig,omitempty"` + Res *types.UpdateDateTimeConfigResponse `xml:"urn:vim25 UpdateDateTimeConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDateTimeConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDateTimeConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateDateTimeConfig) (*types.UpdateDateTimeConfigResponse, error) { + var reqBody, resBody UpdateDateTimeConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDefaultPolicyBody struct { + Req *types.UpdateDefaultPolicy `xml:"urn:vim25 UpdateDefaultPolicy,omitempty"` + Res *types.UpdateDefaultPolicyResponse `xml:"urn:vim25 UpdateDefaultPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDefaultPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDefaultPolicy(ctx context.Context, r soap.RoundTripper, req *types.UpdateDefaultPolicy) (*types.UpdateDefaultPolicyResponse, error) { + var reqBody, resBody UpdateDefaultPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDiskPartitionsBody struct { + Req *types.UpdateDiskPartitions `xml:"urn:vim25 UpdateDiskPartitions,omitempty"` + Res *types.UpdateDiskPartitionsResponse `xml:"urn:vim25 UpdateDiskPartitionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDiskPartitionsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDiskPartitions(ctx context.Context, r soap.RoundTripper, req *types.UpdateDiskPartitions) (*types.UpdateDiskPartitionsResponse, error) { + var reqBody, resBody UpdateDiskPartitionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDnsConfigBody struct { + Req *types.UpdateDnsConfig `xml:"urn:vim25 UpdateDnsConfig,omitempty"` + Res *types.UpdateDnsConfigResponse `xml:"urn:vim25 UpdateDnsConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDnsConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDnsConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateDnsConfig) (*types.UpdateDnsConfigResponse, error) { + var reqBody, resBody UpdateDnsConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateDvsCapabilityBody struct { + Req *types.UpdateDvsCapability `xml:"urn:vim25 UpdateDvsCapability,omitempty"` + Res *types.UpdateDvsCapabilityResponse `xml:"urn:vim25 UpdateDvsCapabilityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateDvsCapabilityBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateDvsCapability(ctx context.Context, r soap.RoundTripper, req *types.UpdateDvsCapability) (*types.UpdateDvsCapabilityResponse, error) { + var reqBody, resBody UpdateDvsCapabilityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateExtensionBody struct { + Req *types.UpdateExtension `xml:"urn:vim25 UpdateExtension,omitempty"` + Res *types.UpdateExtensionResponse `xml:"urn:vim25 UpdateExtensionResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateExtensionBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateExtension(ctx context.Context, r soap.RoundTripper, req *types.UpdateExtension) (*types.UpdateExtensionResponse, error) { + var reqBody, resBody UpdateExtensionBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateFlagsBody struct { + Req *types.UpdateFlags `xml:"urn:vim25 UpdateFlags,omitempty"` + Res *types.UpdateFlagsResponse `xml:"urn:vim25 UpdateFlagsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateFlagsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateFlags(ctx context.Context, r soap.RoundTripper, req *types.UpdateFlags) (*types.UpdateFlagsResponse, error) { + var reqBody, resBody UpdateFlagsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateGraphicsConfigBody struct { + Req *types.UpdateGraphicsConfig `xml:"urn:vim25 UpdateGraphicsConfig,omitempty"` + Res *types.UpdateGraphicsConfigResponse `xml:"urn:vim25 UpdateGraphicsConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateGraphicsConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateGraphicsConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateGraphicsConfig) (*types.UpdateGraphicsConfigResponse, error) { + var reqBody, resBody UpdateGraphicsConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateHostCustomizations_TaskBody struct { + Req *types.UpdateHostCustomizations_Task `xml:"urn:vim25 UpdateHostCustomizations_Task,omitempty"` + Res *types.UpdateHostCustomizations_TaskResponse `xml:"urn:vim25 UpdateHostCustomizations_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateHostCustomizations_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateHostCustomizations_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostCustomizations_Task) (*types.UpdateHostCustomizations_TaskResponse, error) { + var reqBody, resBody UpdateHostCustomizations_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateHostImageAcceptanceLevelBody struct { + Req *types.UpdateHostImageAcceptanceLevel `xml:"urn:vim25 UpdateHostImageAcceptanceLevel,omitempty"` + Res *types.UpdateHostImageAcceptanceLevelResponse `xml:"urn:vim25 UpdateHostImageAcceptanceLevelResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateHostImageAcceptanceLevelBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateHostImageAcceptanceLevel(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostImageAcceptanceLevel) (*types.UpdateHostImageAcceptanceLevelResponse, error) { + var reqBody, resBody UpdateHostImageAcceptanceLevelBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateHostProfileBody struct { + Req *types.UpdateHostProfile `xml:"urn:vim25 UpdateHostProfile,omitempty"` + Res *types.UpdateHostProfileResponse `xml:"urn:vim25 UpdateHostProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateHostProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateHostProfile(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostProfile) (*types.UpdateHostProfileResponse, error) { + var reqBody, resBody UpdateHostProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateHostSpecificationBody struct { + Req *types.UpdateHostSpecification `xml:"urn:vim25 UpdateHostSpecification,omitempty"` + Res *types.UpdateHostSpecificationResponse `xml:"urn:vim25 UpdateHostSpecificationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateHostSpecificationBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateHostSpecification(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostSpecification) (*types.UpdateHostSpecificationResponse, error) { + var reqBody, resBody UpdateHostSpecificationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateHostSubSpecificationBody struct { + Req *types.UpdateHostSubSpecification `xml:"urn:vim25 UpdateHostSubSpecification,omitempty"` + Res *types.UpdateHostSubSpecificationResponse `xml:"urn:vim25 UpdateHostSubSpecificationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateHostSubSpecificationBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateHostSubSpecification(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostSubSpecification) (*types.UpdateHostSubSpecificationResponse, error) { + var reqBody, resBody UpdateHostSubSpecificationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiAdvancedOptionsBody struct { + Req *types.UpdateInternetScsiAdvancedOptions `xml:"urn:vim25 UpdateInternetScsiAdvancedOptions,omitempty"` + Res *types.UpdateInternetScsiAdvancedOptionsResponse `xml:"urn:vim25 UpdateInternetScsiAdvancedOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiAdvancedOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiAdvancedOptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiAdvancedOptions) (*types.UpdateInternetScsiAdvancedOptionsResponse, error) { + var reqBody, resBody UpdateInternetScsiAdvancedOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiAliasBody struct { + Req *types.UpdateInternetScsiAlias `xml:"urn:vim25 UpdateInternetScsiAlias,omitempty"` + Res *types.UpdateInternetScsiAliasResponse `xml:"urn:vim25 UpdateInternetScsiAliasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiAliasBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiAlias(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiAlias) (*types.UpdateInternetScsiAliasResponse, error) { + var reqBody, resBody UpdateInternetScsiAliasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiAuthenticationPropertiesBody struct { + Req *types.UpdateInternetScsiAuthenticationProperties `xml:"urn:vim25 UpdateInternetScsiAuthenticationProperties,omitempty"` + Res *types.UpdateInternetScsiAuthenticationPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiAuthenticationPropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiAuthenticationPropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiAuthenticationProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiAuthenticationProperties) (*types.UpdateInternetScsiAuthenticationPropertiesResponse, error) { + var reqBody, resBody UpdateInternetScsiAuthenticationPropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiDigestPropertiesBody struct { + Req *types.UpdateInternetScsiDigestProperties `xml:"urn:vim25 UpdateInternetScsiDigestProperties,omitempty"` + Res *types.UpdateInternetScsiDigestPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiDigestPropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiDigestPropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiDigestProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiDigestProperties) (*types.UpdateInternetScsiDigestPropertiesResponse, error) { + var reqBody, resBody UpdateInternetScsiDigestPropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiDiscoveryPropertiesBody struct { + Req *types.UpdateInternetScsiDiscoveryProperties `xml:"urn:vim25 UpdateInternetScsiDiscoveryProperties,omitempty"` + Res *types.UpdateInternetScsiDiscoveryPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiDiscoveryPropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiDiscoveryPropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiDiscoveryProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiDiscoveryProperties) (*types.UpdateInternetScsiDiscoveryPropertiesResponse, error) { + var reqBody, resBody UpdateInternetScsiDiscoveryPropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiIPPropertiesBody struct { + Req *types.UpdateInternetScsiIPProperties `xml:"urn:vim25 UpdateInternetScsiIPProperties,omitempty"` + Res *types.UpdateInternetScsiIPPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiIPPropertiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiIPPropertiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiIPProperties(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiIPProperties) (*types.UpdateInternetScsiIPPropertiesResponse, error) { + var reqBody, resBody UpdateInternetScsiIPPropertiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateInternetScsiNameBody struct { + Req *types.UpdateInternetScsiName `xml:"urn:vim25 UpdateInternetScsiName,omitempty"` + Res *types.UpdateInternetScsiNameResponse `xml:"urn:vim25 UpdateInternetScsiNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateInternetScsiNameBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateInternetScsiName(ctx context.Context, r soap.RoundTripper, req *types.UpdateInternetScsiName) (*types.UpdateInternetScsiNameResponse, error) { + var reqBody, resBody UpdateInternetScsiNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpConfigBody struct { + Req *types.UpdateIpConfig `xml:"urn:vim25 UpdateIpConfig,omitempty"` + Res *types.UpdateIpConfigResponse `xml:"urn:vim25 UpdateIpConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpConfig) (*types.UpdateIpConfigResponse, error) { + var reqBody, resBody UpdateIpConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpPoolBody struct { + Req *types.UpdateIpPool `xml:"urn:vim25 UpdateIpPool,omitempty"` + Res *types.UpdateIpPoolResponse `xml:"urn:vim25 UpdateIpPoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpPoolBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpPool(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpPool) (*types.UpdateIpPoolResponse, error) { + var reqBody, resBody UpdateIpPoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpRouteConfigBody struct { + Req *types.UpdateIpRouteConfig `xml:"urn:vim25 UpdateIpRouteConfig,omitempty"` + Res *types.UpdateIpRouteConfigResponse `xml:"urn:vim25 UpdateIpRouteConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpRouteConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpRouteConfig) (*types.UpdateIpRouteConfigResponse, error) { + var reqBody, resBody UpdateIpRouteConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpRouteTableConfigBody struct { + Req *types.UpdateIpRouteTableConfig `xml:"urn:vim25 UpdateIpRouteTableConfig,omitempty"` + Res *types.UpdateIpRouteTableConfigResponse `xml:"urn:vim25 UpdateIpRouteTableConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpRouteTableConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpRouteTableConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpRouteTableConfig) (*types.UpdateIpRouteTableConfigResponse, error) { + var reqBody, resBody UpdateIpRouteTableConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateIpmiBody struct { + Req *types.UpdateIpmi `xml:"urn:vim25 UpdateIpmi,omitempty"` + Res *types.UpdateIpmiResponse `xml:"urn:vim25 UpdateIpmiResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateIpmiBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateIpmi(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpmi) (*types.UpdateIpmiResponse, error) { + var reqBody, resBody UpdateIpmiBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateKmipServerBody struct { + Req *types.UpdateKmipServer `xml:"urn:vim25 UpdateKmipServer,omitempty"` + Res *types.UpdateKmipServerResponse `xml:"urn:vim25 UpdateKmipServerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateKmipServerBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateKmipServer(ctx context.Context, r soap.RoundTripper, req *types.UpdateKmipServer) (*types.UpdateKmipServerResponse, error) { + var reqBody, resBody UpdateKmipServerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateKmsSignedCsrClientCertBody struct { + Req *types.UpdateKmsSignedCsrClientCert `xml:"urn:vim25 UpdateKmsSignedCsrClientCert,omitempty"` + Res *types.UpdateKmsSignedCsrClientCertResponse `xml:"urn:vim25 UpdateKmsSignedCsrClientCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateKmsSignedCsrClientCertBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateKmsSignedCsrClientCert(ctx context.Context, r soap.RoundTripper, req *types.UpdateKmsSignedCsrClientCert) (*types.UpdateKmsSignedCsrClientCertResponse, error) { + var reqBody, resBody UpdateKmsSignedCsrClientCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLicenseBody struct { + Req *types.UpdateLicense `xml:"urn:vim25 UpdateLicense,omitempty"` + Res *types.UpdateLicenseResponse `xml:"urn:vim25 UpdateLicenseResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLicenseBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLicense(ctx context.Context, r soap.RoundTripper, req *types.UpdateLicense) (*types.UpdateLicenseResponse, error) { + var reqBody, resBody UpdateLicenseBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLicenseLabelBody struct { + Req *types.UpdateLicenseLabel `xml:"urn:vim25 UpdateLicenseLabel,omitempty"` + Res *types.UpdateLicenseLabelResponse `xml:"urn:vim25 UpdateLicenseLabelResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLicenseLabelBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.UpdateLicenseLabel) (*types.UpdateLicenseLabelResponse, error) { + var reqBody, resBody UpdateLicenseLabelBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLinkedChildrenBody struct { + Req *types.UpdateLinkedChildren `xml:"urn:vim25 UpdateLinkedChildren,omitempty"` + Res *types.UpdateLinkedChildrenResponse `xml:"urn:vim25 UpdateLinkedChildrenResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLinkedChildrenBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLinkedChildren(ctx context.Context, r soap.RoundTripper, req *types.UpdateLinkedChildren) (*types.UpdateLinkedChildrenResponse, error) { + var reqBody, resBody UpdateLinkedChildrenBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLocalSwapDatastoreBody struct { + Req *types.UpdateLocalSwapDatastore `xml:"urn:vim25 UpdateLocalSwapDatastore,omitempty"` + Res *types.UpdateLocalSwapDatastoreResponse `xml:"urn:vim25 UpdateLocalSwapDatastoreResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLocalSwapDatastoreBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLocalSwapDatastore(ctx context.Context, r soap.RoundTripper, req *types.UpdateLocalSwapDatastore) (*types.UpdateLocalSwapDatastoreResponse, error) { + var reqBody, resBody UpdateLocalSwapDatastoreBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateLockdownExceptionsBody struct { + Req *types.UpdateLockdownExceptions `xml:"urn:vim25 UpdateLockdownExceptions,omitempty"` + Res *types.UpdateLockdownExceptionsResponse `xml:"urn:vim25 UpdateLockdownExceptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateLockdownExceptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateLockdownExceptions) (*types.UpdateLockdownExceptionsResponse, error) { + var reqBody, resBody UpdateLockdownExceptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateModuleOptionStringBody struct { + Req *types.UpdateModuleOptionString `xml:"urn:vim25 UpdateModuleOptionString,omitempty"` + Res *types.UpdateModuleOptionStringResponse `xml:"urn:vim25 UpdateModuleOptionStringResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateModuleOptionStringBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateModuleOptionString(ctx context.Context, r soap.RoundTripper, req *types.UpdateModuleOptionString) (*types.UpdateModuleOptionStringResponse, error) { + var reqBody, resBody UpdateModuleOptionStringBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateNetworkConfigBody struct { + Req *types.UpdateNetworkConfig `xml:"urn:vim25 UpdateNetworkConfig,omitempty"` + Res *types.UpdateNetworkConfigResponse `xml:"urn:vim25 UpdateNetworkConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateNetworkConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateNetworkConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateNetworkConfig) (*types.UpdateNetworkConfigResponse, error) { + var reqBody, resBody UpdateNetworkConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateNetworkResourcePoolBody struct { + Req *types.UpdateNetworkResourcePool `xml:"urn:vim25 UpdateNetworkResourcePool,omitempty"` + Res *types.UpdateNetworkResourcePoolResponse `xml:"urn:vim25 UpdateNetworkResourcePoolResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateNetworkResourcePoolBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types.UpdateNetworkResourcePool) (*types.UpdateNetworkResourcePoolResponse, error) { + var reqBody, resBody UpdateNetworkResourcePoolBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateOptionsBody struct { + Req *types.UpdateOptions `xml:"urn:vim25 UpdateOptions,omitempty"` + Res *types.UpdateOptionsResponse `xml:"urn:vim25 UpdateOptionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateOptionsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateOptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateOptions) (*types.UpdateOptionsResponse, error) { + var reqBody, resBody UpdateOptionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdatePassthruConfigBody struct { + Req *types.UpdatePassthruConfig `xml:"urn:vim25 UpdatePassthruConfig,omitempty"` + Res *types.UpdatePassthruConfigResponse `xml:"urn:vim25 UpdatePassthruConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdatePassthruConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdatePassthruConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdatePassthruConfig) (*types.UpdatePassthruConfigResponse, error) { + var reqBody, resBody UpdatePassthruConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdatePerfIntervalBody struct { + Req *types.UpdatePerfInterval `xml:"urn:vim25 UpdatePerfInterval,omitempty"` + Res *types.UpdatePerfIntervalResponse `xml:"urn:vim25 UpdatePerfIntervalResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdatePerfIntervalBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.UpdatePerfInterval) (*types.UpdatePerfIntervalResponse, error) { + var reqBody, resBody UpdatePerfIntervalBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdatePhysicalNicLinkSpeedBody struct { + Req *types.UpdatePhysicalNicLinkSpeed `xml:"urn:vim25 UpdatePhysicalNicLinkSpeed,omitempty"` + Res *types.UpdatePhysicalNicLinkSpeedResponse `xml:"urn:vim25 UpdatePhysicalNicLinkSpeedResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdatePhysicalNicLinkSpeedBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdatePhysicalNicLinkSpeed(ctx context.Context, r soap.RoundTripper, req *types.UpdatePhysicalNicLinkSpeed) (*types.UpdatePhysicalNicLinkSpeedResponse, error) { + var reqBody, resBody UpdatePhysicalNicLinkSpeedBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdatePortGroupBody struct { + Req *types.UpdatePortGroup `xml:"urn:vim25 UpdatePortGroup,omitempty"` + Res *types.UpdatePortGroupResponse `xml:"urn:vim25 UpdatePortGroupResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdatePortGroupBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdatePortGroup(ctx context.Context, r soap.RoundTripper, req *types.UpdatePortGroup) (*types.UpdatePortGroupResponse, error) { + var reqBody, resBody UpdatePortGroupBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateProgressBody struct { + Req *types.UpdateProgress `xml:"urn:vim25 UpdateProgress,omitempty"` + Res *types.UpdateProgressResponse `xml:"urn:vim25 UpdateProgressResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateProgressBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateProgress(ctx context.Context, r soap.RoundTripper, req *types.UpdateProgress) (*types.UpdateProgressResponse, error) { + var reqBody, resBody UpdateProgressBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateReferenceHostBody struct { + Req *types.UpdateReferenceHost `xml:"urn:vim25 UpdateReferenceHost,omitempty"` + Res *types.UpdateReferenceHostResponse `xml:"urn:vim25 UpdateReferenceHostResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateReferenceHostBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateReferenceHost(ctx context.Context, r soap.RoundTripper, req *types.UpdateReferenceHost) (*types.UpdateReferenceHostResponse, error) { + var reqBody, resBody UpdateReferenceHostBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateRulesetBody struct { + Req *types.UpdateRuleset `xml:"urn:vim25 UpdateRuleset,omitempty"` + Res *types.UpdateRulesetResponse `xml:"urn:vim25 UpdateRulesetResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateRulesetBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateRuleset(ctx context.Context, r soap.RoundTripper, req *types.UpdateRuleset) (*types.UpdateRulesetResponse, error) { + var reqBody, resBody UpdateRulesetBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateScsiLunDisplayNameBody struct { + Req *types.UpdateScsiLunDisplayName `xml:"urn:vim25 UpdateScsiLunDisplayName,omitempty"` + Res *types.UpdateScsiLunDisplayNameResponse `xml:"urn:vim25 UpdateScsiLunDisplayNameResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateScsiLunDisplayNameBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateScsiLunDisplayName(ctx context.Context, r soap.RoundTripper, req *types.UpdateScsiLunDisplayName) (*types.UpdateScsiLunDisplayNameResponse, error) { + var reqBody, resBody UpdateScsiLunDisplayNameBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateSelfSignedClientCertBody struct { + Req *types.UpdateSelfSignedClientCert `xml:"urn:vim25 UpdateSelfSignedClientCert,omitempty"` + Res *types.UpdateSelfSignedClientCertResponse `xml:"urn:vim25 UpdateSelfSignedClientCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateSelfSignedClientCertBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *types.UpdateSelfSignedClientCert) (*types.UpdateSelfSignedClientCertResponse, error) { + var reqBody, resBody UpdateSelfSignedClientCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateServiceConsoleVirtualNicBody struct { + Req *types.UpdateServiceConsoleVirtualNic `xml:"urn:vim25 UpdateServiceConsoleVirtualNic,omitempty"` + Res *types.UpdateServiceConsoleVirtualNicResponse `xml:"urn:vim25 UpdateServiceConsoleVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateServiceConsoleVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.UpdateServiceConsoleVirtualNic) (*types.UpdateServiceConsoleVirtualNicResponse, error) { + var reqBody, resBody UpdateServiceConsoleVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateServiceMessageBody struct { + Req *types.UpdateServiceMessage `xml:"urn:vim25 UpdateServiceMessage,omitempty"` + Res *types.UpdateServiceMessageResponse `xml:"urn:vim25 UpdateServiceMessageResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateServiceMessageBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateServiceMessage(ctx context.Context, r soap.RoundTripper, req *types.UpdateServiceMessage) (*types.UpdateServiceMessageResponse, error) { + var reqBody, resBody UpdateServiceMessageBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateServicePolicyBody struct { + Req *types.UpdateServicePolicy `xml:"urn:vim25 UpdateServicePolicy,omitempty"` + Res *types.UpdateServicePolicyResponse `xml:"urn:vim25 UpdateServicePolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateServicePolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateServicePolicy(ctx context.Context, r soap.RoundTripper, req *types.UpdateServicePolicy) (*types.UpdateServicePolicyResponse, error) { + var reqBody, resBody UpdateServicePolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateSoftwareInternetScsiEnabledBody struct { + Req *types.UpdateSoftwareInternetScsiEnabled `xml:"urn:vim25 UpdateSoftwareInternetScsiEnabled,omitempty"` + Res *types.UpdateSoftwareInternetScsiEnabledResponse `xml:"urn:vim25 UpdateSoftwareInternetScsiEnabledResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateSoftwareInternetScsiEnabledBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateSoftwareInternetScsiEnabled(ctx context.Context, r soap.RoundTripper, req *types.UpdateSoftwareInternetScsiEnabled) (*types.UpdateSoftwareInternetScsiEnabledResponse, error) { + var reqBody, resBody UpdateSoftwareInternetScsiEnabledBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateSystemResourcesBody struct { + Req *types.UpdateSystemResources `xml:"urn:vim25 UpdateSystemResources,omitempty"` + Res *types.UpdateSystemResourcesResponse `xml:"urn:vim25 UpdateSystemResourcesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateSystemResourcesBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateSystemResources(ctx context.Context, r soap.RoundTripper, req *types.UpdateSystemResources) (*types.UpdateSystemResourcesResponse, error) { + var reqBody, resBody UpdateSystemResourcesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateSystemSwapConfigurationBody struct { + Req *types.UpdateSystemSwapConfiguration `xml:"urn:vim25 UpdateSystemSwapConfiguration,omitempty"` + Res *types.UpdateSystemSwapConfigurationResponse `xml:"urn:vim25 UpdateSystemSwapConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateSystemSwapConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateSystemSwapConfiguration(ctx context.Context, r soap.RoundTripper, req *types.UpdateSystemSwapConfiguration) (*types.UpdateSystemSwapConfigurationResponse, error) { + var reqBody, resBody UpdateSystemSwapConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateSystemUsersBody struct { + Req *types.UpdateSystemUsers `xml:"urn:vim25 UpdateSystemUsers,omitempty"` + Res *types.UpdateSystemUsersResponse `xml:"urn:vim25 UpdateSystemUsersResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateSystemUsersBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateSystemUsers(ctx context.Context, r soap.RoundTripper, req *types.UpdateSystemUsers) (*types.UpdateSystemUsersResponse, error) { + var reqBody, resBody UpdateSystemUsersBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateUserBody struct { + Req *types.UpdateUser `xml:"urn:vim25 UpdateUser,omitempty"` + Res *types.UpdateUserResponse `xml:"urn:vim25 UpdateUserResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateUserBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateUser(ctx context.Context, r soap.RoundTripper, req *types.UpdateUser) (*types.UpdateUserResponse, error) { + var reqBody, resBody UpdateUserBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVAppConfigBody struct { + Req *types.UpdateVAppConfig `xml:"urn:vim25 UpdateVAppConfig,omitempty"` + Res *types.UpdateVAppConfigResponse `xml:"urn:vim25 UpdateVAppConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVAppConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVAppConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateVAppConfig) (*types.UpdateVAppConfigResponse, error) { + var reqBody, resBody UpdateVAppConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVStorageInfrastructureObjectPolicy_TaskBody struct { + Req *types.UpdateVStorageInfrastructureObjectPolicy_Task `xml:"urn:vim25 UpdateVStorageInfrastructureObjectPolicy_Task,omitempty"` + Res *types.UpdateVStorageInfrastructureObjectPolicy_TaskResponse `xml:"urn:vim25 UpdateVStorageInfrastructureObjectPolicy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVStorageInfrastructureObjectPolicy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVStorageInfrastructureObjectPolicy_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVStorageInfrastructureObjectPolicy_Task) (*types.UpdateVStorageInfrastructureObjectPolicy_TaskResponse, error) { + var reqBody, resBody UpdateVStorageInfrastructureObjectPolicy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVStorageObjectPolicy_TaskBody struct { + Req *types.UpdateVStorageObjectPolicy_Task `xml:"urn:vim25 UpdateVStorageObjectPolicy_Task,omitempty"` + Res *types.UpdateVStorageObjectPolicy_TaskResponse `xml:"urn:vim25 UpdateVStorageObjectPolicy_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVStorageObjectPolicy_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVStorageObjectPolicy_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVStorageObjectPolicy_Task) (*types.UpdateVStorageObjectPolicy_TaskResponse, error) { + var reqBody, resBody UpdateVStorageObjectPolicy_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVVolVirtualMachineFiles_TaskBody struct { + Req *types.UpdateVVolVirtualMachineFiles_Task `xml:"urn:vim25 UpdateVVolVirtualMachineFiles_Task,omitempty"` + Res *types.UpdateVVolVirtualMachineFiles_TaskResponse `xml:"urn:vim25 UpdateVVolVirtualMachineFiles_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVVolVirtualMachineFiles_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVVolVirtualMachineFiles_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVVolVirtualMachineFiles_Task) (*types.UpdateVVolVirtualMachineFiles_TaskResponse, error) { + var reqBody, resBody UpdateVVolVirtualMachineFiles_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVirtualMachineFiles_TaskBody struct { + Req *types.UpdateVirtualMachineFiles_Task `xml:"urn:vim25 UpdateVirtualMachineFiles_Task,omitempty"` + Res *types.UpdateVirtualMachineFiles_TaskResponse `xml:"urn:vim25 UpdateVirtualMachineFiles_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVirtualMachineFiles_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVirtualMachineFiles_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVirtualMachineFiles_Task) (*types.UpdateVirtualMachineFiles_TaskResponse, error) { + var reqBody, resBody UpdateVirtualMachineFiles_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVirtualNicBody struct { + Req *types.UpdateVirtualNic `xml:"urn:vim25 UpdateVirtualNic,omitempty"` + Res *types.UpdateVirtualNicResponse `xml:"urn:vim25 UpdateVirtualNicResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVirtualNicBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.UpdateVirtualNic) (*types.UpdateVirtualNicResponse, error) { + var reqBody, resBody UpdateVirtualNicBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVirtualSwitchBody struct { + Req *types.UpdateVirtualSwitch `xml:"urn:vim25 UpdateVirtualSwitch,omitempty"` + Res *types.UpdateVirtualSwitchResponse `xml:"urn:vim25 UpdateVirtualSwitchResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVirtualSwitchBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.UpdateVirtualSwitch) (*types.UpdateVirtualSwitchResponse, error) { + var reqBody, resBody UpdateVirtualSwitchBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVmfsUnmapBandwidthBody struct { + Req *types.UpdateVmfsUnmapBandwidth `xml:"urn:vim25 UpdateVmfsUnmapBandwidth,omitempty"` + Res *types.UpdateVmfsUnmapBandwidthResponse `xml:"urn:vim25 UpdateVmfsUnmapBandwidthResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVmfsUnmapBandwidthBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVmfsUnmapBandwidth(ctx context.Context, r soap.RoundTripper, req *types.UpdateVmfsUnmapBandwidth) (*types.UpdateVmfsUnmapBandwidthResponse, error) { + var reqBody, resBody UpdateVmfsUnmapBandwidthBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVmfsUnmapPriorityBody struct { + Req *types.UpdateVmfsUnmapPriority `xml:"urn:vim25 UpdateVmfsUnmapPriority,omitempty"` + Res *types.UpdateVmfsUnmapPriorityResponse `xml:"urn:vim25 UpdateVmfsUnmapPriorityResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVmfsUnmapPriorityBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVmfsUnmapPriority(ctx context.Context, r soap.RoundTripper, req *types.UpdateVmfsUnmapPriority) (*types.UpdateVmfsUnmapPriorityResponse, error) { + var reqBody, resBody UpdateVmfsUnmapPriorityBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpdateVsan_TaskBody struct { + Req *types.UpdateVsan_Task `xml:"urn:vim25 UpdateVsan_Task,omitempty"` + Res *types.UpdateVsan_TaskResponse `xml:"urn:vim25 UpdateVsan_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVsan_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVsan_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVsan_Task) (*types.UpdateVsan_TaskResponse, error) { + var reqBody, resBody UpdateVsan_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeIoFilter_TaskBody struct { + Req *types.UpgradeIoFilter_Task `xml:"urn:vim25 UpgradeIoFilter_Task,omitempty"` + Res *types.UpgradeIoFilter_TaskResponse `xml:"urn:vim25 UpgradeIoFilter_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeIoFilter_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.UpgradeIoFilter_Task) (*types.UpgradeIoFilter_TaskResponse, error) { + var reqBody, resBody UpgradeIoFilter_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeTools_TaskBody struct { + Req *types.UpgradeTools_Task `xml:"urn:vim25 UpgradeTools_Task,omitempty"` + Res *types.UpgradeTools_TaskResponse `xml:"urn:vim25 UpgradeTools_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeTools_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeTools_Task(ctx context.Context, r soap.RoundTripper, req *types.UpgradeTools_Task) (*types.UpgradeTools_TaskResponse, error) { + var reqBody, resBody UpgradeTools_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeVM_TaskBody struct { + Req *types.UpgradeVM_Task `xml:"urn:vim25 UpgradeVM_Task,omitempty"` + Res *types.UpgradeVM_TaskResponse `xml:"urn:vim25 UpgradeVM_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeVM_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVM_Task) (*types.UpgradeVM_TaskResponse, error) { + var reqBody, resBody UpgradeVM_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeVmLayoutBody struct { + Req *types.UpgradeVmLayout `xml:"urn:vim25 UpgradeVmLayout,omitempty"` + Res *types.UpgradeVmLayoutResponse `xml:"urn:vim25 UpgradeVmLayoutResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeVmLayoutBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeVmLayout(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVmLayout) (*types.UpgradeVmLayoutResponse, error) { + var reqBody, resBody UpgradeVmLayoutBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeVmfsBody struct { + Req *types.UpgradeVmfs `xml:"urn:vim25 UpgradeVmfs,omitempty"` + Res *types.UpgradeVmfsResponse `xml:"urn:vim25 UpgradeVmfsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeVmfsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeVmfs(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVmfs) (*types.UpgradeVmfsResponse, error) { + var reqBody, resBody UpgradeVmfsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UpgradeVsanObjectsBody struct { + Req *types.UpgradeVsanObjects `xml:"urn:vim25 UpgradeVsanObjects,omitempty"` + Res *types.UpgradeVsanObjectsResponse `xml:"urn:vim25 UpgradeVsanObjectsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpgradeVsanObjectsBody) Fault() *soap.Fault { return b.Fault_ } + +func UpgradeVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVsanObjects) (*types.UpgradeVsanObjectsResponse, error) { + var reqBody, resBody UpgradeVsanObjectsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UploadClientCertBody struct { + Req *types.UploadClientCert `xml:"urn:vim25 UploadClientCert,omitempty"` + Res *types.UploadClientCertResponse `xml:"urn:vim25 UploadClientCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UploadClientCertBody) Fault() *soap.Fault { return b.Fault_ } + +func UploadClientCert(ctx context.Context, r soap.RoundTripper, req *types.UploadClientCert) (*types.UploadClientCertResponse, error) { + var reqBody, resBody UploadClientCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UploadKmipServerCertBody struct { + Req *types.UploadKmipServerCert `xml:"urn:vim25 UploadKmipServerCert,omitempty"` + Res *types.UploadKmipServerCertResponse `xml:"urn:vim25 UploadKmipServerCertResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UploadKmipServerCertBody) Fault() *soap.Fault { return b.Fault_ } + +func UploadKmipServerCert(ctx context.Context, r soap.RoundTripper, req *types.UploadKmipServerCert) (*types.UploadKmipServerCertResponse, error) { + var reqBody, resBody UploadKmipServerCertBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type VStorageObjectCreateSnapshot_TaskBody struct { + Req *types.VStorageObjectCreateSnapshot_Task `xml:"urn:vim25 VStorageObjectCreateSnapshot_Task,omitempty"` + Res *types.VStorageObjectCreateSnapshot_TaskResponse `xml:"urn:vim25 VStorageObjectCreateSnapshot_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *VStorageObjectCreateSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func VStorageObjectCreateSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.VStorageObjectCreateSnapshot_Task) (*types.VStorageObjectCreateSnapshot_TaskResponse, error) { + var reqBody, resBody VStorageObjectCreateSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ValidateCredentialsInGuestBody struct { + Req *types.ValidateCredentialsInGuest `xml:"urn:vim25 ValidateCredentialsInGuest,omitempty"` + Res *types.ValidateCredentialsInGuestResponse `xml:"urn:vim25 ValidateCredentialsInGuestResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateCredentialsInGuestBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *types.ValidateCredentialsInGuest) (*types.ValidateCredentialsInGuestResponse, error) { + var reqBody, resBody ValidateCredentialsInGuestBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ValidateHostBody struct { + Req *types.ValidateHost `xml:"urn:vim25 ValidateHost,omitempty"` + Res *types.ValidateHostResponse `xml:"urn:vim25 ValidateHostResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateHostBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateHost(ctx context.Context, r soap.RoundTripper, req *types.ValidateHost) (*types.ValidateHostResponse, error) { + var reqBody, resBody ValidateHostBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ValidateHostProfileComposition_TaskBody struct { + Req *types.ValidateHostProfileComposition_Task `xml:"urn:vim25 ValidateHostProfileComposition_Task,omitempty"` + Res *types.ValidateHostProfileComposition_TaskResponse `xml:"urn:vim25 ValidateHostProfileComposition_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateHostProfileComposition_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateHostProfileComposition_Task(ctx context.Context, r soap.RoundTripper, req *types.ValidateHostProfileComposition_Task) (*types.ValidateHostProfileComposition_TaskResponse, error) { + var reqBody, resBody ValidateHostProfileComposition_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ValidateMigrationBody struct { + Req *types.ValidateMigration `xml:"urn:vim25 ValidateMigration,omitempty"` + Res *types.ValidateMigrationResponse `xml:"urn:vim25 ValidateMigrationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateMigrationBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateMigration(ctx context.Context, r soap.RoundTripper, req *types.ValidateMigration) (*types.ValidateMigrationResponse, error) { + var reqBody, resBody ValidateMigrationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ValidateStoragePodConfigBody struct { + Req *types.ValidateStoragePodConfig `xml:"urn:vim25 ValidateStoragePodConfig,omitempty"` + Res *types.ValidateStoragePodConfigResponse `xml:"urn:vim25 ValidateStoragePodConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateStoragePodConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateStoragePodConfig(ctx context.Context, r soap.RoundTripper, req *types.ValidateStoragePodConfig) (*types.ValidateStoragePodConfigResponse, error) { + var reqBody, resBody ValidateStoragePodConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type WaitForUpdatesBody struct { + Req *types.WaitForUpdates `xml:"urn:vim25 WaitForUpdates,omitempty"` + Res *types.WaitForUpdatesResponse `xml:"urn:vim25 WaitForUpdatesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *WaitForUpdatesBody) Fault() *soap.Fault { return b.Fault_ } + +func WaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.WaitForUpdates) (*types.WaitForUpdatesResponse, error) { + var reqBody, resBody WaitForUpdatesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type WaitForUpdatesExBody struct { + Req *types.WaitForUpdatesEx `xml:"urn:vim25 WaitForUpdatesEx,omitempty"` + Res *types.WaitForUpdatesExResponse `xml:"urn:vim25 WaitForUpdatesExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *WaitForUpdatesExBody) Fault() *soap.Fault { return b.Fault_ } + +func WaitForUpdatesEx(ctx context.Context, r soap.RoundTripper, req *types.WaitForUpdatesEx) (*types.WaitForUpdatesExResponse, error) { + var reqBody, resBody WaitForUpdatesExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type XmlToCustomizationSpecItemBody struct { + Req *types.XmlToCustomizationSpecItem `xml:"urn:vim25 XmlToCustomizationSpecItem,omitempty"` + Res *types.XmlToCustomizationSpecItemResponse `xml:"urn:vim25 XmlToCustomizationSpecItemResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *XmlToCustomizationSpecItemBody) Fault() *soap.Fault { return b.Fault_ } + +func XmlToCustomizationSpecItem(ctx context.Context, r soap.RoundTripper, req *types.XmlToCustomizationSpecItem) (*types.XmlToCustomizationSpecItemResponse, error) { + var reqBody, resBody XmlToCustomizationSpecItemBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ZeroFillVirtualDisk_TaskBody struct { + Req *types.ZeroFillVirtualDisk_Task `xml:"urn:vim25 ZeroFillVirtualDisk_Task,omitempty"` + Res *types.ZeroFillVirtualDisk_TaskResponse `xml:"urn:vim25 ZeroFillVirtualDisk_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ZeroFillVirtualDisk_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ZeroFillVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.ZeroFillVirtualDisk_Task) (*types.ZeroFillVirtualDisk_TaskResponse, error) { + var reqBody, resBody ZeroFillVirtualDisk_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConfigureVcha_TaskBody struct { + Req *types.ConfigureVcha_Task `xml:"urn:vim25 configureVcha_Task,omitempty"` + Res *types.ConfigureVcha_TaskResponse `xml:"urn:vim25 configureVcha_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureVcha_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureVcha_Task) (*types.ConfigureVcha_TaskResponse, error) { + var reqBody, resBody ConfigureVcha_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreatePassiveNode_TaskBody struct { + Req *types.CreatePassiveNode_Task `xml:"urn:vim25 createPassiveNode_Task,omitempty"` + Res *types.CreatePassiveNode_TaskResponse `xml:"urn:vim25 createPassiveNode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreatePassiveNode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreatePassiveNode_Task(ctx context.Context, r soap.RoundTripper, req *types.CreatePassiveNode_Task) (*types.CreatePassiveNode_TaskResponse, error) { + var reqBody, resBody CreatePassiveNode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateWitnessNode_TaskBody struct { + Req *types.CreateWitnessNode_Task `xml:"urn:vim25 createWitnessNode_Task,omitempty"` + Res *types.CreateWitnessNode_TaskResponse `xml:"urn:vim25 createWitnessNode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateWitnessNode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateWitnessNode_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateWitnessNode_Task) (*types.CreateWitnessNode_TaskResponse, error) { + var reqBody, resBody CreateWitnessNode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DeployVcha_TaskBody struct { + Req *types.DeployVcha_Task `xml:"urn:vim25 deployVcha_Task,omitempty"` + Res *types.DeployVcha_TaskResponse `xml:"urn:vim25 deployVcha_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeployVcha_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeployVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.DeployVcha_Task) (*types.DeployVcha_TaskResponse, error) { + var reqBody, resBody DeployVcha_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DestroyVcha_TaskBody struct { + Req *types.DestroyVcha_Task `xml:"urn:vim25 destroyVcha_Task,omitempty"` + Res *types.DestroyVcha_TaskResponse `xml:"urn:vim25 destroyVcha_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DestroyVcha_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DestroyVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.DestroyVcha_Task) (*types.DestroyVcha_TaskResponse, error) { + var reqBody, resBody DestroyVcha_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type FetchSoftwarePackagesBody struct { + Req *types.FetchSoftwarePackages `xml:"urn:vim25 fetchSoftwarePackages,omitempty"` + Res *types.FetchSoftwarePackagesResponse `xml:"urn:vim25 fetchSoftwarePackagesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FetchSoftwarePackagesBody) Fault() *soap.Fault { return b.Fault_ } + +func FetchSoftwarePackages(ctx context.Context, r soap.RoundTripper, req *types.FetchSoftwarePackages) (*types.FetchSoftwarePackagesResponse, error) { + var reqBody, resBody FetchSoftwarePackagesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetClusterModeBody struct { + Req *types.GetClusterMode `xml:"urn:vim25 getClusterMode,omitempty"` + Res *types.GetClusterModeResponse `xml:"urn:vim25 getClusterModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetClusterModeBody) Fault() *soap.Fault { return b.Fault_ } + +func GetClusterMode(ctx context.Context, r soap.RoundTripper, req *types.GetClusterMode) (*types.GetClusterModeResponse, error) { + var reqBody, resBody GetClusterModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetVchaConfigBody struct { + Req *types.GetVchaConfig `xml:"urn:vim25 getVchaConfig,omitempty"` + Res *types.GetVchaConfigResponse `xml:"urn:vim25 getVchaConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetVchaConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func GetVchaConfig(ctx context.Context, r soap.RoundTripper, req *types.GetVchaConfig) (*types.GetVchaConfigResponse, error) { + var reqBody, resBody GetVchaConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InitiateFailover_TaskBody struct { + Req *types.InitiateFailover_Task `xml:"urn:vim25 initiateFailover_Task,omitempty"` + Res *types.InitiateFailover_TaskResponse `xml:"urn:vim25 initiateFailover_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InitiateFailover_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func InitiateFailover_Task(ctx context.Context, r soap.RoundTripper, req *types.InitiateFailover_Task) (*types.InitiateFailover_TaskResponse, error) { + var reqBody, resBody InitiateFailover_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type InstallDateBody struct { + Req *types.InstallDate `xml:"urn:vim25 installDate,omitempty"` + Res *types.InstallDateResponse `xml:"urn:vim25 installDateResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *InstallDateBody) Fault() *soap.Fault { return b.Fault_ } + +func InstallDate(ctx context.Context, r soap.RoundTripper, req *types.InstallDate) (*types.InstallDateResponse, error) { + var reqBody, resBody InstallDateBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type PrepareVcha_TaskBody struct { + Req *types.PrepareVcha_Task `xml:"urn:vim25 prepareVcha_Task,omitempty"` + Res *types.PrepareVcha_TaskResponse `xml:"urn:vim25 prepareVcha_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PrepareVcha_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func PrepareVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.PrepareVcha_Task) (*types.PrepareVcha_TaskResponse, error) { + var reqBody, resBody PrepareVcha_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryDatacenterConfigOptionDescriptorBody struct { + Req *types.QueryDatacenterConfigOptionDescriptor `xml:"urn:vim25 queryDatacenterConfigOptionDescriptor,omitempty"` + Res *types.QueryDatacenterConfigOptionDescriptorResponse `xml:"urn:vim25 queryDatacenterConfigOptionDescriptorResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryDatacenterConfigOptionDescriptorBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryDatacenterConfigOptionDescriptor(ctx context.Context, r soap.RoundTripper, req *types.QueryDatacenterConfigOptionDescriptor) (*types.QueryDatacenterConfigOptionDescriptorResponse, error) { + var reqBody, resBody QueryDatacenterConfigOptionDescriptorBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ReloadVirtualMachineFromPath_TaskBody struct { + Req *types.ReloadVirtualMachineFromPath_Task `xml:"urn:vim25 reloadVirtualMachineFromPath_Task,omitempty"` + Res *types.ReloadVirtualMachineFromPath_TaskResponse `xml:"urn:vim25 reloadVirtualMachineFromPath_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ReloadVirtualMachineFromPath_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ReloadVirtualMachineFromPath_Task(ctx context.Context, r soap.RoundTripper, req *types.ReloadVirtualMachineFromPath_Task) (*types.ReloadVirtualMachineFromPath_TaskResponse, error) { + var reqBody, resBody ReloadVirtualMachineFromPath_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetClusterMode_TaskBody struct { + Req *types.SetClusterMode_Task `xml:"urn:vim25 setClusterMode_Task,omitempty"` + Res *types.SetClusterMode_TaskResponse `xml:"urn:vim25 setClusterMode_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetClusterMode_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SetClusterMode_Task(ctx context.Context, r soap.RoundTripper, req *types.SetClusterMode_Task) (*types.SetClusterMode_TaskResponse, error) { + var reqBody, resBody SetClusterMode_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetCustomValueBody struct { + Req *types.SetCustomValue `xml:"urn:vim25 setCustomValue,omitempty"` + Res *types.SetCustomValueResponse `xml:"urn:vim25 setCustomValueResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetCustomValueBody) Fault() *soap.Fault { return b.Fault_ } + +func SetCustomValue(ctx context.Context, r soap.RoundTripper, req *types.SetCustomValue) (*types.SetCustomValueResponse, error) { + var reqBody, resBody SetCustomValueBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type UnregisterVApp_TaskBody struct { + Req *types.UnregisterVApp_Task `xml:"urn:vim25 unregisterVApp_Task,omitempty"` + Res *types.UnregisterVApp_TaskResponse `xml:"urn:vim25 unregisterVApp_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterVApp_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.UnregisterVApp_Task) (*types.UnregisterVApp_TaskResponse, error) { + var reqBody, resBody UnregisterVApp_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/service_content.go b/vendor/github.com/vmware/govmomi/vim25/methods/service_content.go new file mode 100644 index 000000000000..401646598d44 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/methods/service_content.go @@ -0,0 +1,57 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package methods + +import ( + "context" + "time" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +// copy of vim25.ServiceInstance to avoid import cycle +var serviceInstance = types.ManagedObjectReference{ + Type: "ServiceInstance", + Value: "ServiceInstance", +} + +func GetServiceContent(ctx context.Context, r soap.RoundTripper) (types.ServiceContent, error) { + req := types.RetrieveServiceContent{ + This: serviceInstance, + } + + res, err := RetrieveServiceContent(ctx, r, &req) + if err != nil { + return types.ServiceContent{}, err + } + + return res.Returnval, nil +} + +func GetCurrentTime(ctx context.Context, r soap.RoundTripper) (*time.Time, error) { + req := types.CurrentTime{ + This: serviceInstance, + } + + res, err := CurrentTime(ctx, r, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go b/vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go new file mode 100644 index 000000000000..d3da5b1847c9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/ancestors.go @@ -0,0 +1,137 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "context" + "fmt" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +// Ancestors returns the entire ancestry tree of a specified managed object. +// The return value includes the root node and the specified object itself. +func Ancestors(ctx context.Context, rt soap.RoundTripper, pc, obj types.ManagedObjectReference) ([]ManagedEntity, error) { + ospec := types.ObjectSpec{ + Obj: obj, + SelectSet: []types.BaseSelectionSpec{ + &types.TraversalSpec{ + SelectionSpec: types.SelectionSpec{Name: "traverseParent"}, + Type: "ManagedEntity", + Path: "parent", + Skip: types.NewBool(false), + SelectSet: []types.BaseSelectionSpec{ + &types.SelectionSpec{Name: "traverseParent"}, + }, + }, + &types.TraversalSpec{ + SelectionSpec: types.SelectionSpec{}, + Type: "VirtualMachine", + Path: "parentVApp", + Skip: types.NewBool(false), + SelectSet: []types.BaseSelectionSpec{ + &types.SelectionSpec{Name: "traverseParent"}, + }, + }, + }, + Skip: types.NewBool(false), + } + + pspec := []types.PropertySpec{ + { + Type: "ManagedEntity", + PathSet: []string{"name", "parent"}, + }, + { + Type: "VirtualMachine", + PathSet: []string{"parentVApp"}, + }, + } + + req := types.RetrieveProperties{ + This: pc, + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspec, + }, + }, + } + + var ifaces []interface{} + err := RetrievePropertiesForRequest(ctx, rt, req, &ifaces) + if err != nil { + return nil, err + } + + var out []ManagedEntity + + // Build ancestry tree by iteratively finding a new child. + for len(out) < len(ifaces) { + var find types.ManagedObjectReference + + if len(out) > 0 { + find = out[len(out)-1].Self + } + + // Find entity we're looking for given the last entity in the current tree. + for _, iface := range ifaces { + me := iface.(IsManagedEntity).GetManagedEntity() + + if me.Name == "" { + // The types below have their own 'Name' field, so ManagedEntity.Name (me.Name) is empty. + // We only hit this case when the 'obj' param is one of these types. + // In most cases, 'obj' is a Folder so Name isn't collected in this call. + switch x := iface.(type) { + case Network: + me.Name = x.Name + case DistributedVirtualSwitch: + me.Name = x.Name + case DistributedVirtualPortgroup: + me.Name = x.Name + case OpaqueNetwork: + me.Name = x.Name + default: + // ManagedEntity always has a Name, if we hit this point we missed a case above. + panic(fmt.Sprintf("%#v Name is empty", me.Reference())) + } + } + + if me.Parent == nil { + // Special case for VirtualMachine within VirtualApp, + // unlikely to hit this other than via Finder.Element() + switch x := iface.(type) { + case VirtualMachine: + me.Parent = x.ParentVApp + } + } + + if me.Parent == nil { + out = append(out, me) + break + } + + if *me.Parent == find { + out = append(out, me) + break + } + } + } + + return out, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/entity.go b/vendor/github.com/vmware/govmomi/vim25/mo/entity.go new file mode 100644 index 000000000000..193e6f71ea10 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/entity.go @@ -0,0 +1,24 @@ +/* +Copyright (c) 2016 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +// Entity is the interface that is implemented by all managed objects +// that extend ManagedEntity. +type Entity interface { + Reference + Entity() *ManagedEntity +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/extra.go b/vendor/github.com/vmware/govmomi/vim25/mo/extra.go new file mode 100644 index 000000000000..254ef35949bd --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/extra.go @@ -0,0 +1,61 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +type IsManagedEntity interface { + GetManagedEntity() ManagedEntity +} + +func (m ComputeResource) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m Datacenter) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m Datastore) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m DistributedVirtualSwitch) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m DistributedVirtualPortgroup) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m Folder) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m HostSystem) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m Network) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m ResourcePool) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} + +func (m VirtualMachine) GetManagedEntity() ManagedEntity { + return m.ManagedEntity +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/mo.go b/vendor/github.com/vmware/govmomi/vim25/mo/mo.go new file mode 100644 index 000000000000..4f19988e36df --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/mo.go @@ -0,0 +1,1801 @@ +/* +Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "reflect" + "time" + + "github.com/vmware/govmomi/vim25/types" +) + +type Alarm struct { + ExtensibleManagedObject + + Info types.AlarmInfo `mo:"info"` +} + +func init() { + t["Alarm"] = reflect.TypeOf((*Alarm)(nil)).Elem() +} + +type AlarmManager struct { + Self types.ManagedObjectReference + + DefaultExpression []types.BaseAlarmExpression `mo:"defaultExpression"` + Description types.AlarmDescription `mo:"description"` +} + +func (m AlarmManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["AlarmManager"] = reflect.TypeOf((*AlarmManager)(nil)).Elem() +} + +type AuthorizationManager struct { + Self types.ManagedObjectReference + + PrivilegeList []types.AuthorizationPrivilege `mo:"privilegeList"` + RoleList []types.AuthorizationRole `mo:"roleList"` + Description types.AuthorizationDescription `mo:"description"` +} + +func (m AuthorizationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["AuthorizationManager"] = reflect.TypeOf((*AuthorizationManager)(nil)).Elem() +} + +type CertificateManager struct { + Self types.ManagedObjectReference +} + +func (m CertificateManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["CertificateManager"] = reflect.TypeOf((*CertificateManager)(nil)).Elem() +} + +type ClusterComputeResource struct { + ComputeResource + + Configuration types.ClusterConfigInfo `mo:"configuration"` + Recommendation []types.ClusterRecommendation `mo:"recommendation"` + DrsRecommendation []types.ClusterDrsRecommendation `mo:"drsRecommendation"` + MigrationHistory []types.ClusterDrsMigration `mo:"migrationHistory"` + ActionHistory []types.ClusterActionHistory `mo:"actionHistory"` + DrsFault []types.ClusterDrsFaults `mo:"drsFault"` +} + +func init() { + t["ClusterComputeResource"] = reflect.TypeOf((*ClusterComputeResource)(nil)).Elem() +} + +type ClusterEVCManager struct { + ExtensibleManagedObject + + ManagedCluster types.ManagedObjectReference `mo:"managedCluster"` + EvcState types.ClusterEVCManagerEVCState `mo:"evcState"` +} + +func init() { + t["ClusterEVCManager"] = reflect.TypeOf((*ClusterEVCManager)(nil)).Elem() +} + +type ClusterProfile struct { + Profile +} + +func init() { + t["ClusterProfile"] = reflect.TypeOf((*ClusterProfile)(nil)).Elem() +} + +type ClusterProfileManager struct { + ProfileManager +} + +func init() { + t["ClusterProfileManager"] = reflect.TypeOf((*ClusterProfileManager)(nil)).Elem() +} + +type ComputeResource struct { + ManagedEntity + + ResourcePool *types.ManagedObjectReference `mo:"resourcePool"` + Host []types.ManagedObjectReference `mo:"host"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + Summary types.BaseComputeResourceSummary `mo:"summary"` + EnvironmentBrowser *types.ManagedObjectReference `mo:"environmentBrowser"` + ConfigurationEx types.BaseComputeResourceConfigInfo `mo:"configurationEx"` +} + +func (m *ComputeResource) Entity() *ManagedEntity { + return &m.ManagedEntity +} + +func init() { + t["ComputeResource"] = reflect.TypeOf((*ComputeResource)(nil)).Elem() +} + +type ContainerView struct { + ManagedObjectView + + Container types.ManagedObjectReference `mo:"container"` + Type []string `mo:"type"` + Recursive bool `mo:"recursive"` +} + +func init() { + t["ContainerView"] = reflect.TypeOf((*ContainerView)(nil)).Elem() +} + +type CryptoManager struct { + Self types.ManagedObjectReference + + Enabled bool `mo:"enabled"` +} + +func (m CryptoManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["CryptoManager"] = reflect.TypeOf((*CryptoManager)(nil)).Elem() +} + +type CryptoManagerHost struct { + CryptoManager +} + +func init() { + t["CryptoManagerHost"] = reflect.TypeOf((*CryptoManagerHost)(nil)).Elem() +} + +type CryptoManagerHostKMS struct { + CryptoManagerHost +} + +func init() { + t["CryptoManagerHostKMS"] = reflect.TypeOf((*CryptoManagerHostKMS)(nil)).Elem() +} + +type CryptoManagerKmip struct { + CryptoManager + + KmipServers []types.KmipClusterInfo `mo:"kmipServers"` +} + +func init() { + t["CryptoManagerKmip"] = reflect.TypeOf((*CryptoManagerKmip)(nil)).Elem() +} + +type CustomFieldsManager struct { + Self types.ManagedObjectReference + + Field []types.CustomFieldDef `mo:"field"` +} + +func (m CustomFieldsManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["CustomFieldsManager"] = reflect.TypeOf((*CustomFieldsManager)(nil)).Elem() +} + +type CustomizationSpecManager struct { + Self types.ManagedObjectReference + + Info []types.CustomizationSpecInfo `mo:"info"` + EncryptionKey []byte `mo:"encryptionKey"` +} + +func (m CustomizationSpecManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["CustomizationSpecManager"] = reflect.TypeOf((*CustomizationSpecManager)(nil)).Elem() +} + +type Datacenter struct { + ManagedEntity + + VmFolder types.ManagedObjectReference `mo:"vmFolder"` + HostFolder types.ManagedObjectReference `mo:"hostFolder"` + DatastoreFolder types.ManagedObjectReference `mo:"datastoreFolder"` + NetworkFolder types.ManagedObjectReference `mo:"networkFolder"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + Configuration types.DatacenterConfigInfo `mo:"configuration"` +} + +func (m *Datacenter) Entity() *ManagedEntity { + return &m.ManagedEntity +} + +func init() { + t["Datacenter"] = reflect.TypeOf((*Datacenter)(nil)).Elem() +} + +type Datastore struct { + ManagedEntity + + Info types.BaseDatastoreInfo `mo:"info"` + Summary types.DatastoreSummary `mo:"summary"` + Host []types.DatastoreHostMount `mo:"host"` + Vm []types.ManagedObjectReference `mo:"vm"` + Browser types.ManagedObjectReference `mo:"browser"` + Capability types.DatastoreCapability `mo:"capability"` + IormConfiguration *types.StorageIORMInfo `mo:"iormConfiguration"` +} + +func (m *Datastore) Entity() *ManagedEntity { + return &m.ManagedEntity +} + +func init() { + t["Datastore"] = reflect.TypeOf((*Datastore)(nil)).Elem() +} + +type DatastoreNamespaceManager struct { + Self types.ManagedObjectReference +} + +func (m DatastoreNamespaceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["DatastoreNamespaceManager"] = reflect.TypeOf((*DatastoreNamespaceManager)(nil)).Elem() +} + +type DiagnosticManager struct { + Self types.ManagedObjectReference +} + +func (m DiagnosticManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["DiagnosticManager"] = reflect.TypeOf((*DiagnosticManager)(nil)).Elem() +} + +type DistributedVirtualPortgroup struct { + Network + + Key string `mo:"key"` + Config types.DVPortgroupConfigInfo `mo:"config"` + PortKeys []string `mo:"portKeys"` +} + +func init() { + t["DistributedVirtualPortgroup"] = reflect.TypeOf((*DistributedVirtualPortgroup)(nil)).Elem() +} + +type DistributedVirtualSwitch struct { + ManagedEntity + + Uuid string `mo:"uuid"` + Capability types.DVSCapability `mo:"capability"` + Summary types.DVSSummary `mo:"summary"` + Config types.BaseDVSConfigInfo `mo:"config"` + NetworkResourcePool []types.DVSNetworkResourcePool `mo:"networkResourcePool"` + Portgroup []types.ManagedObjectReference `mo:"portgroup"` + Runtime *types.DVSRuntimeInfo `mo:"runtime"` +} + +func (m *DistributedVirtualSwitch) Entity() *ManagedEntity { + return &m.ManagedEntity +} + +func init() { + t["DistributedVirtualSwitch"] = reflect.TypeOf((*DistributedVirtualSwitch)(nil)).Elem() +} + +type DistributedVirtualSwitchManager struct { + Self types.ManagedObjectReference +} + +func (m DistributedVirtualSwitchManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["DistributedVirtualSwitchManager"] = reflect.TypeOf((*DistributedVirtualSwitchManager)(nil)).Elem() +} + +type EnvironmentBrowser struct { + Self types.ManagedObjectReference + + DatastoreBrowser *types.ManagedObjectReference `mo:"datastoreBrowser"` +} + +func (m EnvironmentBrowser) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["EnvironmentBrowser"] = reflect.TypeOf((*EnvironmentBrowser)(nil)).Elem() +} + +type EventHistoryCollector struct { + HistoryCollector + + LatestPage []types.BaseEvent `mo:"latestPage"` +} + +func init() { + t["EventHistoryCollector"] = reflect.TypeOf((*EventHistoryCollector)(nil)).Elem() +} + +type EventManager struct { + Self types.ManagedObjectReference + + Description types.EventDescription `mo:"description"` + LatestEvent types.BaseEvent `mo:"latestEvent"` + MaxCollector int32 `mo:"maxCollector"` +} + +func (m EventManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["EventManager"] = reflect.TypeOf((*EventManager)(nil)).Elem() +} + +type ExtensibleManagedObject struct { + Self types.ManagedObjectReference + + Value []types.BaseCustomFieldValue `mo:"value"` + AvailableField []types.CustomFieldDef `mo:"availableField"` +} + +func (m ExtensibleManagedObject) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ExtensibleManagedObject"] = reflect.TypeOf((*ExtensibleManagedObject)(nil)).Elem() +} + +type ExtensionManager struct { + Self types.ManagedObjectReference + + ExtensionList []types.Extension `mo:"extensionList"` +} + +func (m ExtensionManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ExtensionManager"] = reflect.TypeOf((*ExtensionManager)(nil)).Elem() +} + +type FailoverClusterConfigurator struct { + Self types.ManagedObjectReference + + DisabledConfigureMethod []string `mo:"disabledConfigureMethod"` +} + +func (m FailoverClusterConfigurator) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["FailoverClusterConfigurator"] = reflect.TypeOf((*FailoverClusterConfigurator)(nil)).Elem() +} + +type FailoverClusterManager struct { + Self types.ManagedObjectReference + + DisabledClusterMethod []string `mo:"disabledClusterMethod"` +} + +func (m FailoverClusterManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["FailoverClusterManager"] = reflect.TypeOf((*FailoverClusterManager)(nil)).Elem() +} + +type FileManager struct { + Self types.ManagedObjectReference +} + +func (m FileManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["FileManager"] = reflect.TypeOf((*FileManager)(nil)).Elem() +} + +type Folder struct { + ManagedEntity + + ChildType []string `mo:"childType"` + ChildEntity []types.ManagedObjectReference `mo:"childEntity"` +} + +func (m *Folder) Entity() *ManagedEntity { + return &m.ManagedEntity +} + +func init() { + t["Folder"] = reflect.TypeOf((*Folder)(nil)).Elem() +} + +type GuestAliasManager struct { + Self types.ManagedObjectReference +} + +func (m GuestAliasManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestAliasManager"] = reflect.TypeOf((*GuestAliasManager)(nil)).Elem() +} + +type GuestAuthManager struct { + Self types.ManagedObjectReference +} + +func (m GuestAuthManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestAuthManager"] = reflect.TypeOf((*GuestAuthManager)(nil)).Elem() +} + +type GuestFileManager struct { + Self types.ManagedObjectReference +} + +func (m GuestFileManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestFileManager"] = reflect.TypeOf((*GuestFileManager)(nil)).Elem() +} + +type GuestOperationsManager struct { + Self types.ManagedObjectReference + + AuthManager *types.ManagedObjectReference `mo:"authManager"` + FileManager *types.ManagedObjectReference `mo:"fileManager"` + ProcessManager *types.ManagedObjectReference `mo:"processManager"` + GuestWindowsRegistryManager *types.ManagedObjectReference `mo:"guestWindowsRegistryManager"` + AliasManager *types.ManagedObjectReference `mo:"aliasManager"` +} + +func (m GuestOperationsManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestOperationsManager"] = reflect.TypeOf((*GuestOperationsManager)(nil)).Elem() +} + +type GuestProcessManager struct { + Self types.ManagedObjectReference +} + +func (m GuestProcessManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestProcessManager"] = reflect.TypeOf((*GuestProcessManager)(nil)).Elem() +} + +type GuestWindowsRegistryManager struct { + Self types.ManagedObjectReference +} + +func (m GuestWindowsRegistryManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["GuestWindowsRegistryManager"] = reflect.TypeOf((*GuestWindowsRegistryManager)(nil)).Elem() +} + +type HealthUpdateManager struct { + Self types.ManagedObjectReference +} + +func (m HealthUpdateManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HealthUpdateManager"] = reflect.TypeOf((*HealthUpdateManager)(nil)).Elem() +} + +type HistoryCollector struct { + Self types.ManagedObjectReference + + Filter types.AnyType `mo:"filter"` +} + +func (m HistoryCollector) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HistoryCollector"] = reflect.TypeOf((*HistoryCollector)(nil)).Elem() +} + +type HostAccessManager struct { + Self types.ManagedObjectReference + + LockdownMode types.HostLockdownMode `mo:"lockdownMode"` +} + +func (m HostAccessManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostAccessManager"] = reflect.TypeOf((*HostAccessManager)(nil)).Elem() +} + +type HostActiveDirectoryAuthentication struct { + HostDirectoryStore +} + +func init() { + t["HostActiveDirectoryAuthentication"] = reflect.TypeOf((*HostActiveDirectoryAuthentication)(nil)).Elem() +} + +type HostAuthenticationManager struct { + Self types.ManagedObjectReference + + Info types.HostAuthenticationManagerInfo `mo:"info"` + SupportedStore []types.ManagedObjectReference `mo:"supportedStore"` +} + +func (m HostAuthenticationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostAuthenticationManager"] = reflect.TypeOf((*HostAuthenticationManager)(nil)).Elem() +} + +type HostAuthenticationStore struct { + Self types.ManagedObjectReference + + Info types.BaseHostAuthenticationStoreInfo `mo:"info"` +} + +func (m HostAuthenticationStore) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostAuthenticationStore"] = reflect.TypeOf((*HostAuthenticationStore)(nil)).Elem() +} + +type HostAutoStartManager struct { + Self types.ManagedObjectReference + + Config types.HostAutoStartManagerConfig `mo:"config"` +} + +func (m HostAutoStartManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostAutoStartManager"] = reflect.TypeOf((*HostAutoStartManager)(nil)).Elem() +} + +type HostBootDeviceSystem struct { + Self types.ManagedObjectReference +} + +func (m HostBootDeviceSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostBootDeviceSystem"] = reflect.TypeOf((*HostBootDeviceSystem)(nil)).Elem() +} + +type HostCacheConfigurationManager struct { + Self types.ManagedObjectReference + + CacheConfigurationInfo []types.HostCacheConfigurationInfo `mo:"cacheConfigurationInfo"` +} + +func (m HostCacheConfigurationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostCacheConfigurationManager"] = reflect.TypeOf((*HostCacheConfigurationManager)(nil)).Elem() +} + +type HostCertificateManager struct { + Self types.ManagedObjectReference + + CertificateInfo types.HostCertificateManagerCertificateInfo `mo:"certificateInfo"` +} + +func (m HostCertificateManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostCertificateManager"] = reflect.TypeOf((*HostCertificateManager)(nil)).Elem() +} + +type HostCpuSchedulerSystem struct { + ExtensibleManagedObject + + HyperthreadInfo *types.HostHyperThreadScheduleInfo `mo:"hyperthreadInfo"` +} + +func init() { + t["HostCpuSchedulerSystem"] = reflect.TypeOf((*HostCpuSchedulerSystem)(nil)).Elem() +} + +type HostDatastoreBrowser struct { + Self types.ManagedObjectReference + + Datastore []types.ManagedObjectReference `mo:"datastore"` + SupportedType []types.BaseFileQuery `mo:"supportedType"` +} + +func (m HostDatastoreBrowser) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostDatastoreBrowser"] = reflect.TypeOf((*HostDatastoreBrowser)(nil)).Elem() +} + +type HostDatastoreSystem struct { + Self types.ManagedObjectReference + + Datastore []types.ManagedObjectReference `mo:"datastore"` + Capabilities types.HostDatastoreSystemCapabilities `mo:"capabilities"` +} + +func (m HostDatastoreSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostDatastoreSystem"] = reflect.TypeOf((*HostDatastoreSystem)(nil)).Elem() +} + +type HostDateTimeSystem struct { + Self types.ManagedObjectReference + + DateTimeInfo types.HostDateTimeInfo `mo:"dateTimeInfo"` +} + +func (m HostDateTimeSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostDateTimeSystem"] = reflect.TypeOf((*HostDateTimeSystem)(nil)).Elem() +} + +type HostDiagnosticSystem struct { + Self types.ManagedObjectReference + + ActivePartition *types.HostDiagnosticPartition `mo:"activePartition"` +} + +func (m HostDiagnosticSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostDiagnosticSystem"] = reflect.TypeOf((*HostDiagnosticSystem)(nil)).Elem() +} + +type HostDirectoryStore struct { + HostAuthenticationStore +} + +func init() { + t["HostDirectoryStore"] = reflect.TypeOf((*HostDirectoryStore)(nil)).Elem() +} + +type HostEsxAgentHostManager struct { + Self types.ManagedObjectReference + + ConfigInfo types.HostEsxAgentHostManagerConfigInfo `mo:"configInfo"` +} + +func (m HostEsxAgentHostManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostEsxAgentHostManager"] = reflect.TypeOf((*HostEsxAgentHostManager)(nil)).Elem() +} + +type HostFirewallSystem struct { + ExtensibleManagedObject + + FirewallInfo *types.HostFirewallInfo `mo:"firewallInfo"` +} + +func init() { + t["HostFirewallSystem"] = reflect.TypeOf((*HostFirewallSystem)(nil)).Elem() +} + +type HostFirmwareSystem struct { + Self types.ManagedObjectReference +} + +func (m HostFirmwareSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostFirmwareSystem"] = reflect.TypeOf((*HostFirmwareSystem)(nil)).Elem() +} + +type HostGraphicsManager struct { + ExtensibleManagedObject + + GraphicsInfo []types.HostGraphicsInfo `mo:"graphicsInfo"` + GraphicsConfig *types.HostGraphicsConfig `mo:"graphicsConfig"` + SharedPassthruGpuTypes []string `mo:"sharedPassthruGpuTypes"` + SharedGpuCapabilities []types.HostSharedGpuCapabilities `mo:"sharedGpuCapabilities"` +} + +func init() { + t["HostGraphicsManager"] = reflect.TypeOf((*HostGraphicsManager)(nil)).Elem() +} + +type HostHealthStatusSystem struct { + Self types.ManagedObjectReference + + Runtime types.HealthSystemRuntime `mo:"runtime"` +} + +func (m HostHealthStatusSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostHealthStatusSystem"] = reflect.TypeOf((*HostHealthStatusSystem)(nil)).Elem() +} + +type HostImageConfigManager struct { + Self types.ManagedObjectReference +} + +func (m HostImageConfigManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostImageConfigManager"] = reflect.TypeOf((*HostImageConfigManager)(nil)).Elem() +} + +type HostKernelModuleSystem struct { + Self types.ManagedObjectReference +} + +func (m HostKernelModuleSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostKernelModuleSystem"] = reflect.TypeOf((*HostKernelModuleSystem)(nil)).Elem() +} + +type HostLocalAccountManager struct { + Self types.ManagedObjectReference +} + +func (m HostLocalAccountManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostLocalAccountManager"] = reflect.TypeOf((*HostLocalAccountManager)(nil)).Elem() +} + +type HostLocalAuthentication struct { + HostAuthenticationStore +} + +func init() { + t["HostLocalAuthentication"] = reflect.TypeOf((*HostLocalAuthentication)(nil)).Elem() +} + +type HostMemorySystem struct { + ExtensibleManagedObject + + ConsoleReservationInfo *types.ServiceConsoleReservationInfo `mo:"consoleReservationInfo"` + VirtualMachineReservationInfo *types.VirtualMachineMemoryReservationInfo `mo:"virtualMachineReservationInfo"` +} + +func init() { + t["HostMemorySystem"] = reflect.TypeOf((*HostMemorySystem)(nil)).Elem() +} + +type HostNetworkSystem struct { + ExtensibleManagedObject + + Capabilities *types.HostNetCapabilities `mo:"capabilities"` + NetworkInfo *types.HostNetworkInfo `mo:"networkInfo"` + OffloadCapabilities *types.HostNetOffloadCapabilities `mo:"offloadCapabilities"` + NetworkConfig *types.HostNetworkConfig `mo:"networkConfig"` + DnsConfig types.BaseHostDnsConfig `mo:"dnsConfig"` + IpRouteConfig types.BaseHostIpRouteConfig `mo:"ipRouteConfig"` + ConsoleIpRouteConfig types.BaseHostIpRouteConfig `mo:"consoleIpRouteConfig"` +} + +func init() { + t["HostNetworkSystem"] = reflect.TypeOf((*HostNetworkSystem)(nil)).Elem() +} + +type HostNvdimmSystem struct { + Self types.ManagedObjectReference + + NvdimmSystemInfo types.NvdimmSystemInfo `mo:"nvdimmSystemInfo"` +} + +func (m HostNvdimmSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostNvdimmSystem"] = reflect.TypeOf((*HostNvdimmSystem)(nil)).Elem() +} + +type HostPatchManager struct { + Self types.ManagedObjectReference +} + +func (m HostPatchManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostPatchManager"] = reflect.TypeOf((*HostPatchManager)(nil)).Elem() +} + +type HostPciPassthruSystem struct { + ExtensibleManagedObject + + PciPassthruInfo []types.BaseHostPciPassthruInfo `mo:"pciPassthruInfo"` + SriovDevicePoolInfo []types.BaseHostSriovDevicePoolInfo `mo:"sriovDevicePoolInfo"` +} + +func init() { + t["HostPciPassthruSystem"] = reflect.TypeOf((*HostPciPassthruSystem)(nil)).Elem() +} + +type HostPowerSystem struct { + Self types.ManagedObjectReference + + Capability types.PowerSystemCapability `mo:"capability"` + Info types.PowerSystemInfo `mo:"info"` +} + +func (m HostPowerSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostPowerSystem"] = reflect.TypeOf((*HostPowerSystem)(nil)).Elem() +} + +type HostProfile struct { + Profile + + ValidationState *string `mo:"validationState"` + ValidationStateUpdateTime *time.Time `mo:"validationStateUpdateTime"` + ValidationFailureInfo *types.HostProfileValidationFailureInfo `mo:"validationFailureInfo"` + ReferenceHost *types.ManagedObjectReference `mo:"referenceHost"` +} + +func init() { + t["HostProfile"] = reflect.TypeOf((*HostProfile)(nil)).Elem() +} + +type HostProfileManager struct { + ProfileManager +} + +func init() { + t["HostProfileManager"] = reflect.TypeOf((*HostProfileManager)(nil)).Elem() +} + +type HostServiceSystem struct { + ExtensibleManagedObject + + ServiceInfo types.HostServiceInfo `mo:"serviceInfo"` +} + +func init() { + t["HostServiceSystem"] = reflect.TypeOf((*HostServiceSystem)(nil)).Elem() +} + +type HostSnmpSystem struct { + Self types.ManagedObjectReference + + Configuration types.HostSnmpConfigSpec `mo:"configuration"` + Limits types.HostSnmpSystemAgentLimits `mo:"limits"` +} + +func (m HostSnmpSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostSnmpSystem"] = reflect.TypeOf((*HostSnmpSystem)(nil)).Elem() +} + +type HostSpecificationManager struct { + Self types.ManagedObjectReference +} + +func (m HostSpecificationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostSpecificationManager"] = reflect.TypeOf((*HostSpecificationManager)(nil)).Elem() +} + +type HostStorageSystem struct { + ExtensibleManagedObject + + StorageDeviceInfo *types.HostStorageDeviceInfo `mo:"storageDeviceInfo"` + FileSystemVolumeInfo types.HostFileSystemVolumeInfo `mo:"fileSystemVolumeInfo"` + SystemFile []string `mo:"systemFile"` + MultipathStateInfo *types.HostMultipathStateInfo `mo:"multipathStateInfo"` +} + +func init() { + t["HostStorageSystem"] = reflect.TypeOf((*HostStorageSystem)(nil)).Elem() +} + +type HostSystem struct { + ManagedEntity + + Runtime types.HostRuntimeInfo `mo:"runtime"` + Summary types.HostListSummary `mo:"summary"` + Hardware *types.HostHardwareInfo `mo:"hardware"` + Capability *types.HostCapability `mo:"capability"` + LicensableResource types.HostLicensableResourceInfo `mo:"licensableResource"` + RemediationState *types.HostSystemRemediationState `mo:"remediationState"` + PrecheckRemediationResult *types.ApplyHostProfileConfigurationSpec `mo:"precheckRemediationResult"` + RemediationResult *types.ApplyHostProfileConfigurationResult `mo:"remediationResult"` + ComplianceCheckState *types.HostSystemComplianceCheckState `mo:"complianceCheckState"` + ComplianceCheckResult *types.ComplianceResult `mo:"complianceCheckResult"` + ConfigManager types.HostConfigManager `mo:"configManager"` + Config *types.HostConfigInfo `mo:"config"` + Vm []types.ManagedObjectReference `mo:"vm"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + DatastoreBrowser types.ManagedObjectReference `mo:"datastoreBrowser"` + SystemResources *types.HostSystemResourceInfo `mo:"systemResources"` + AnswerFileValidationState *types.AnswerFileStatusResult `mo:"answerFileValidationState"` + AnswerFileValidationResult *types.AnswerFileStatusResult `mo:"answerFileValidationResult"` +} + +func (m *HostSystem) Entity() *ManagedEntity { + return &m.ManagedEntity +} + +func init() { + t["HostSystem"] = reflect.TypeOf((*HostSystem)(nil)).Elem() +} + +type HostVFlashManager struct { + Self types.ManagedObjectReference + + VFlashConfigInfo *types.HostVFlashManagerVFlashConfigInfo `mo:"vFlashConfigInfo"` +} + +func (m HostVFlashManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostVFlashManager"] = reflect.TypeOf((*HostVFlashManager)(nil)).Elem() +} + +type HostVMotionSystem struct { + ExtensibleManagedObject + + NetConfig *types.HostVMotionNetConfig `mo:"netConfig"` + IpConfig *types.HostIpConfig `mo:"ipConfig"` +} + +func init() { + t["HostVMotionSystem"] = reflect.TypeOf((*HostVMotionSystem)(nil)).Elem() +} + +type HostVStorageObjectManager struct { + VStorageObjectManagerBase +} + +func init() { + t["HostVStorageObjectManager"] = reflect.TypeOf((*HostVStorageObjectManager)(nil)).Elem() +} + +type HostVirtualNicManager struct { + ExtensibleManagedObject + + Info types.HostVirtualNicManagerInfo `mo:"info"` +} + +func init() { + t["HostVirtualNicManager"] = reflect.TypeOf((*HostVirtualNicManager)(nil)).Elem() +} + +type HostVsanInternalSystem struct { + Self types.ManagedObjectReference +} + +func (m HostVsanInternalSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostVsanInternalSystem"] = reflect.TypeOf((*HostVsanInternalSystem)(nil)).Elem() +} + +type HostVsanSystem struct { + Self types.ManagedObjectReference + + Config types.VsanHostConfigInfo `mo:"config"` +} + +func (m HostVsanSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostVsanSystem"] = reflect.TypeOf((*HostVsanSystem)(nil)).Elem() +} + +type HttpNfcLease struct { + Self types.ManagedObjectReference + + InitializeProgress int32 `mo:"initializeProgress"` + TransferProgress int32 `mo:"transferProgress"` + Mode string `mo:"mode"` + Capabilities types.HttpNfcLeaseCapabilities `mo:"capabilities"` + Info *types.HttpNfcLeaseInfo `mo:"info"` + State types.HttpNfcLeaseState `mo:"state"` + Error *types.LocalizedMethodFault `mo:"error"` +} + +func (m HttpNfcLease) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HttpNfcLease"] = reflect.TypeOf((*HttpNfcLease)(nil)).Elem() +} + +type InventoryView struct { + ManagedObjectView +} + +func init() { + t["InventoryView"] = reflect.TypeOf((*InventoryView)(nil)).Elem() +} + +type IoFilterManager struct { + Self types.ManagedObjectReference +} + +func (m IoFilterManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["IoFilterManager"] = reflect.TypeOf((*IoFilterManager)(nil)).Elem() +} + +type IpPoolManager struct { + Self types.ManagedObjectReference +} + +func (m IpPoolManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["IpPoolManager"] = reflect.TypeOf((*IpPoolManager)(nil)).Elem() +} + +type IscsiManager struct { + Self types.ManagedObjectReference +} + +func (m IscsiManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["IscsiManager"] = reflect.TypeOf((*IscsiManager)(nil)).Elem() +} + +type LicenseAssignmentManager struct { + Self types.ManagedObjectReference +} + +func (m LicenseAssignmentManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["LicenseAssignmentManager"] = reflect.TypeOf((*LicenseAssignmentManager)(nil)).Elem() +} + +type LicenseManager struct { + Self types.ManagedObjectReference + + Source types.BaseLicenseSource `mo:"source"` + SourceAvailable bool `mo:"sourceAvailable"` + Diagnostics *types.LicenseDiagnostics `mo:"diagnostics"` + FeatureInfo []types.LicenseFeatureInfo `mo:"featureInfo"` + LicensedEdition string `mo:"licensedEdition"` + Licenses []types.LicenseManagerLicenseInfo `mo:"licenses"` + LicenseAssignmentManager *types.ManagedObjectReference `mo:"licenseAssignmentManager"` + Evaluation types.LicenseManagerEvaluationInfo `mo:"evaluation"` +} + +func (m LicenseManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["LicenseManager"] = reflect.TypeOf((*LicenseManager)(nil)).Elem() +} + +type ListView struct { + ManagedObjectView +} + +func init() { + t["ListView"] = reflect.TypeOf((*ListView)(nil)).Elem() +} + +type LocalizationManager struct { + Self types.ManagedObjectReference + + Catalog []types.LocalizationManagerMessageCatalog `mo:"catalog"` +} + +func (m LocalizationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["LocalizationManager"] = reflect.TypeOf((*LocalizationManager)(nil)).Elem() +} + +type ManagedEntity struct { + ExtensibleManagedObject + + Parent *types.ManagedObjectReference `mo:"parent"` + CustomValue []types.BaseCustomFieldValue `mo:"customValue"` + OverallStatus types.ManagedEntityStatus `mo:"overallStatus"` + ConfigStatus types.ManagedEntityStatus `mo:"configStatus"` + ConfigIssue []types.BaseEvent `mo:"configIssue"` + EffectiveRole []int32 `mo:"effectiveRole"` + Permission []types.Permission `mo:"permission"` + Name string `mo:"name"` + DisabledMethod []string `mo:"disabledMethod"` + RecentTask []types.ManagedObjectReference `mo:"recentTask"` + DeclaredAlarmState []types.AlarmState `mo:"declaredAlarmState"` + TriggeredAlarmState []types.AlarmState `mo:"triggeredAlarmState"` + AlarmActionsEnabled *bool `mo:"alarmActionsEnabled"` + Tag []types.Tag `mo:"tag"` +} + +func init() { + t["ManagedEntity"] = reflect.TypeOf((*ManagedEntity)(nil)).Elem() +} + +type ManagedObjectView struct { + Self types.ManagedObjectReference + + View []types.ManagedObjectReference `mo:"view"` +} + +func (m ManagedObjectView) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ManagedObjectView"] = reflect.TypeOf((*ManagedObjectView)(nil)).Elem() +} + +type MessageBusProxy struct { + Self types.ManagedObjectReference +} + +func (m MessageBusProxy) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["MessageBusProxy"] = reflect.TypeOf((*MessageBusProxy)(nil)).Elem() +} + +type Network struct { + ManagedEntity + + Name string `mo:"name"` + Summary types.BaseNetworkSummary `mo:"summary"` + Host []types.ManagedObjectReference `mo:"host"` + Vm []types.ManagedObjectReference `mo:"vm"` +} + +func (m *Network) Entity() *ManagedEntity { + return &m.ManagedEntity +} + +func init() { + t["Network"] = reflect.TypeOf((*Network)(nil)).Elem() +} + +type OpaqueNetwork struct { + Network + + Capability *types.OpaqueNetworkCapability `mo:"capability"` + ExtraConfig []types.BaseOptionValue `mo:"extraConfig"` +} + +func init() { + t["OpaqueNetwork"] = reflect.TypeOf((*OpaqueNetwork)(nil)).Elem() +} + +type OptionManager struct { + Self types.ManagedObjectReference + + SupportedOption []types.OptionDef `mo:"supportedOption"` + Setting []types.BaseOptionValue `mo:"setting"` +} + +func (m OptionManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["OptionManager"] = reflect.TypeOf((*OptionManager)(nil)).Elem() +} + +type OverheadMemoryManager struct { + Self types.ManagedObjectReference +} + +func (m OverheadMemoryManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["OverheadMemoryManager"] = reflect.TypeOf((*OverheadMemoryManager)(nil)).Elem() +} + +type OvfManager struct { + Self types.ManagedObjectReference + + OvfImportOption []types.OvfOptionInfo `mo:"ovfImportOption"` + OvfExportOption []types.OvfOptionInfo `mo:"ovfExportOption"` +} + +func (m OvfManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["OvfManager"] = reflect.TypeOf((*OvfManager)(nil)).Elem() +} + +type PerformanceManager struct { + Self types.ManagedObjectReference + + Description types.PerformanceDescription `mo:"description"` + HistoricalInterval []types.PerfInterval `mo:"historicalInterval"` + PerfCounter []types.PerfCounterInfo `mo:"perfCounter"` +} + +func (m PerformanceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["PerformanceManager"] = reflect.TypeOf((*PerformanceManager)(nil)).Elem() +} + +type Profile struct { + Self types.ManagedObjectReference + + Config types.BaseProfileConfigInfo `mo:"config"` + Description *types.ProfileDescription `mo:"description"` + Name string `mo:"name"` + CreatedTime time.Time `mo:"createdTime"` + ModifiedTime time.Time `mo:"modifiedTime"` + Entity []types.ManagedObjectReference `mo:"entity"` + ComplianceStatus string `mo:"complianceStatus"` +} + +func (m Profile) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["Profile"] = reflect.TypeOf((*Profile)(nil)).Elem() +} + +type ProfileComplianceManager struct { + Self types.ManagedObjectReference +} + +func (m ProfileComplianceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ProfileComplianceManager"] = reflect.TypeOf((*ProfileComplianceManager)(nil)).Elem() +} + +type ProfileManager struct { + Self types.ManagedObjectReference + + Profile []types.ManagedObjectReference `mo:"profile"` +} + +func (m ProfileManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ProfileManager"] = reflect.TypeOf((*ProfileManager)(nil)).Elem() +} + +type PropertyCollector struct { + Self types.ManagedObjectReference + + Filter []types.ManagedObjectReference `mo:"filter"` +} + +func (m PropertyCollector) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["PropertyCollector"] = reflect.TypeOf((*PropertyCollector)(nil)).Elem() +} + +type PropertyFilter struct { + Self types.ManagedObjectReference + + Spec types.PropertyFilterSpec `mo:"spec"` + PartialUpdates bool `mo:"partialUpdates"` +} + +func (m PropertyFilter) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["PropertyFilter"] = reflect.TypeOf((*PropertyFilter)(nil)).Elem() +} + +type ResourcePlanningManager struct { + Self types.ManagedObjectReference +} + +func (m ResourcePlanningManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ResourcePlanningManager"] = reflect.TypeOf((*ResourcePlanningManager)(nil)).Elem() +} + +type ResourcePool struct { + ManagedEntity + + Summary types.BaseResourcePoolSummary `mo:"summary"` + Runtime types.ResourcePoolRuntimeInfo `mo:"runtime"` + Owner types.ManagedObjectReference `mo:"owner"` + ResourcePool []types.ManagedObjectReference `mo:"resourcePool"` + Vm []types.ManagedObjectReference `mo:"vm"` + Config types.ResourceConfigSpec `mo:"config"` + ChildConfiguration []types.ResourceConfigSpec `mo:"childConfiguration"` +} + +func (m *ResourcePool) Entity() *ManagedEntity { + return &m.ManagedEntity +} + +func init() { + t["ResourcePool"] = reflect.TypeOf((*ResourcePool)(nil)).Elem() +} + +type ScheduledTask struct { + ExtensibleManagedObject + + Info types.ScheduledTaskInfo `mo:"info"` +} + +func init() { + t["ScheduledTask"] = reflect.TypeOf((*ScheduledTask)(nil)).Elem() +} + +type ScheduledTaskManager struct { + Self types.ManagedObjectReference + + ScheduledTask []types.ManagedObjectReference `mo:"scheduledTask"` + Description types.ScheduledTaskDescription `mo:"description"` +} + +func (m ScheduledTaskManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ScheduledTaskManager"] = reflect.TypeOf((*ScheduledTaskManager)(nil)).Elem() +} + +type SearchIndex struct { + Self types.ManagedObjectReference +} + +func (m SearchIndex) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["SearchIndex"] = reflect.TypeOf((*SearchIndex)(nil)).Elem() +} + +type ServiceInstance struct { + Self types.ManagedObjectReference + + ServerClock time.Time `mo:"serverClock"` + Capability types.Capability `mo:"capability"` + Content types.ServiceContent `mo:"content"` +} + +func (m ServiceInstance) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ServiceInstance"] = reflect.TypeOf((*ServiceInstance)(nil)).Elem() +} + +type ServiceManager struct { + Self types.ManagedObjectReference + + Service []types.ServiceManagerServiceInfo `mo:"service"` +} + +func (m ServiceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ServiceManager"] = reflect.TypeOf((*ServiceManager)(nil)).Elem() +} + +type SessionManager struct { + Self types.ManagedObjectReference + + SessionList []types.UserSession `mo:"sessionList"` + CurrentSession *types.UserSession `mo:"currentSession"` + Message *string `mo:"message"` + MessageLocaleList []string `mo:"messageLocaleList"` + SupportedLocaleList []string `mo:"supportedLocaleList"` + DefaultLocale string `mo:"defaultLocale"` +} + +func (m SessionManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["SessionManager"] = reflect.TypeOf((*SessionManager)(nil)).Elem() +} + +type SimpleCommand struct { + Self types.ManagedObjectReference + + EncodingType types.SimpleCommandEncoding `mo:"encodingType"` + Entity types.ServiceManagerServiceInfo `mo:"entity"` +} + +func (m SimpleCommand) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["SimpleCommand"] = reflect.TypeOf((*SimpleCommand)(nil)).Elem() +} + +type StoragePod struct { + Folder + + Summary *types.StoragePodSummary `mo:"summary"` + PodStorageDrsEntry *types.PodStorageDrsEntry `mo:"podStorageDrsEntry"` +} + +func init() { + t["StoragePod"] = reflect.TypeOf((*StoragePod)(nil)).Elem() +} + +type StorageResourceManager struct { + Self types.ManagedObjectReference +} + +func (m StorageResourceManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["StorageResourceManager"] = reflect.TypeOf((*StorageResourceManager)(nil)).Elem() +} + +type Task struct { + ExtensibleManagedObject + + Info types.TaskInfo `mo:"info"` +} + +func init() { + t["Task"] = reflect.TypeOf((*Task)(nil)).Elem() +} + +type TaskHistoryCollector struct { + HistoryCollector + + LatestPage []types.TaskInfo `mo:"latestPage"` +} + +func init() { + t["TaskHistoryCollector"] = reflect.TypeOf((*TaskHistoryCollector)(nil)).Elem() +} + +type TaskManager struct { + Self types.ManagedObjectReference + + RecentTask []types.ManagedObjectReference `mo:"recentTask"` + Description types.TaskDescription `mo:"description"` + MaxCollector int32 `mo:"maxCollector"` +} + +func (m TaskManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["TaskManager"] = reflect.TypeOf((*TaskManager)(nil)).Elem() +} + +type UserDirectory struct { + Self types.ManagedObjectReference + + DomainList []string `mo:"domainList"` +} + +func (m UserDirectory) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["UserDirectory"] = reflect.TypeOf((*UserDirectory)(nil)).Elem() +} + +type VStorageObjectManagerBase struct { + Self types.ManagedObjectReference +} + +func (m VStorageObjectManagerBase) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VStorageObjectManagerBase"] = reflect.TypeOf((*VStorageObjectManagerBase)(nil)).Elem() +} + +type VcenterVStorageObjectManager struct { + VStorageObjectManagerBase +} + +func init() { + t["VcenterVStorageObjectManager"] = reflect.TypeOf((*VcenterVStorageObjectManager)(nil)).Elem() +} + +type View struct { + Self types.ManagedObjectReference +} + +func (m View) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["View"] = reflect.TypeOf((*View)(nil)).Elem() +} + +type ViewManager struct { + Self types.ManagedObjectReference + + ViewList []types.ManagedObjectReference `mo:"viewList"` +} + +func (m ViewManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["ViewManager"] = reflect.TypeOf((*ViewManager)(nil)).Elem() +} + +type VirtualApp struct { + ResourcePool + + ParentFolder *types.ManagedObjectReference `mo:"parentFolder"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + VAppConfig *types.VAppConfigInfo `mo:"vAppConfig"` + ParentVApp *types.ManagedObjectReference `mo:"parentVApp"` + ChildLink []types.VirtualAppLinkInfo `mo:"childLink"` +} + +func init() { + t["VirtualApp"] = reflect.TypeOf((*VirtualApp)(nil)).Elem() +} + +type VirtualDiskManager struct { + Self types.ManagedObjectReference +} + +func (m VirtualDiskManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualDiskManager"] = reflect.TypeOf((*VirtualDiskManager)(nil)).Elem() +} + +type VirtualMachine struct { + ManagedEntity + + Capability types.VirtualMachineCapability `mo:"capability"` + Config *types.VirtualMachineConfigInfo `mo:"config"` + Layout *types.VirtualMachineFileLayout `mo:"layout"` + LayoutEx *types.VirtualMachineFileLayoutEx `mo:"layoutEx"` + Storage *types.VirtualMachineStorageInfo `mo:"storage"` + EnvironmentBrowser types.ManagedObjectReference `mo:"environmentBrowser"` + ResourcePool *types.ManagedObjectReference `mo:"resourcePool"` + ParentVApp *types.ManagedObjectReference `mo:"parentVApp"` + ResourceConfig *types.ResourceConfigSpec `mo:"resourceConfig"` + Runtime types.VirtualMachineRuntimeInfo `mo:"runtime"` + Guest *types.GuestInfo `mo:"guest"` + Summary types.VirtualMachineSummary `mo:"summary"` + Datastore []types.ManagedObjectReference `mo:"datastore"` + Network []types.ManagedObjectReference `mo:"network"` + Snapshot *types.VirtualMachineSnapshotInfo `mo:"snapshot"` + RootSnapshot []types.ManagedObjectReference `mo:"rootSnapshot"` + GuestHeartbeatStatus types.ManagedEntityStatus `mo:"guestHeartbeatStatus"` +} + +func (m *VirtualMachine) Entity() *ManagedEntity { + return &m.ManagedEntity +} + +func init() { + t["VirtualMachine"] = reflect.TypeOf((*VirtualMachine)(nil)).Elem() +} + +type VirtualMachineCompatibilityChecker struct { + Self types.ManagedObjectReference +} + +func (m VirtualMachineCompatibilityChecker) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualMachineCompatibilityChecker"] = reflect.TypeOf((*VirtualMachineCompatibilityChecker)(nil)).Elem() +} + +type VirtualMachineProvisioningChecker struct { + Self types.ManagedObjectReference +} + +func (m VirtualMachineProvisioningChecker) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualMachineProvisioningChecker"] = reflect.TypeOf((*VirtualMachineProvisioningChecker)(nil)).Elem() +} + +type VirtualMachineSnapshot struct { + ExtensibleManagedObject + + Config types.VirtualMachineConfigInfo `mo:"config"` + ChildSnapshot []types.ManagedObjectReference `mo:"childSnapshot"` + Vm types.ManagedObjectReference `mo:"vm"` +} + +func init() { + t["VirtualMachineSnapshot"] = reflect.TypeOf((*VirtualMachineSnapshot)(nil)).Elem() +} + +type VirtualizationManager struct { + Self types.ManagedObjectReference +} + +func (m VirtualizationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualizationManager"] = reflect.TypeOf((*VirtualizationManager)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitch struct { + DistributedVirtualSwitch +} + +func init() { + t["VmwareDistributedVirtualSwitch"] = reflect.TypeOf((*VmwareDistributedVirtualSwitch)(nil)).Elem() +} + +type VsanUpgradeSystem struct { + Self types.ManagedObjectReference +} + +func (m VsanUpgradeSystem) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VsanUpgradeSystem"] = reflect.TypeOf((*VsanUpgradeSystem)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/reference.go b/vendor/github.com/vmware/govmomi/vim25/mo/reference.go new file mode 100644 index 000000000000..465edbe8072d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/reference.go @@ -0,0 +1,26 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import "github.com/vmware/govmomi/vim25/types" + +// Reference is the interface that is implemented by all the managed objects +// defined in this package. It specifies that these managed objects have a +// function that returns the managed object reference to themselves. +type Reference interface { + Reference() types.ManagedObjectReference +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/registry.go b/vendor/github.com/vmware/govmomi/vim25/mo/registry.go new file mode 100644 index 000000000000..deacf508bba7 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/registry.go @@ -0,0 +1,21 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import "reflect" + +var t = map[string]reflect.Type{} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go new file mode 100644 index 000000000000..e7ffc32cec19 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go @@ -0,0 +1,174 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "context" + "reflect" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +func ignoreMissingProperty(ref types.ManagedObjectReference, p types.MissingProperty) bool { + switch ref.Type { + case "VirtualMachine": + switch p.Path { + case "environmentBrowser": + // See https://github.com/vmware/govmomi/pull/242 + return true + case "alarmActionsEnabled": + // Seen with vApp child VM + return true + } + } + + return false +} + +// ObjectContentToType loads an ObjectContent value into the value it +// represents. If the ObjectContent value has a non-empty 'MissingSet' field, +// it returns the first fault it finds there as error. If the 'MissingSet' +// field is empty, it returns a pointer to a reflect.Value. It handles contain +// nested properties, such as 'guest.ipAddress' or 'config.hardware'. +func ObjectContentToType(o types.ObjectContent) (interface{}, error) { + // Expect no properties in the missing set + for _, p := range o.MissingSet { + if ignoreMissingProperty(o.Obj, p) { + continue + } + + return nil, soap.WrapVimFault(p.Fault.Fault) + } + + ti := typeInfoForType(o.Obj.Type) + v, err := ti.LoadFromObjectContent(o) + if err != nil { + return nil, err + } + + return v.Elem().Interface(), nil +} + +// LoadRetrievePropertiesResponse converts the response of a call to +// RetrieveProperties to one or more managed objects. +func LoadRetrievePropertiesResponse(res *types.RetrievePropertiesResponse, dst interface{}) error { + rt := reflect.TypeOf(dst) + if rt == nil || rt.Kind() != reflect.Ptr { + panic("need pointer") + } + + rv := reflect.ValueOf(dst).Elem() + if !rv.CanSet() { + panic("cannot set dst") + } + + isSlice := false + switch rt.Elem().Kind() { + case reflect.Struct: + case reflect.Slice: + isSlice = true + default: + panic("unexpected type") + } + + if isSlice { + for _, p := range res.Returnval { + v, err := ObjectContentToType(p) + if err != nil { + return err + } + + vt := reflect.TypeOf(v) + + if !rv.Type().AssignableTo(vt) { + // For example: dst is []ManagedEntity, res is []HostSystem + if field, ok := vt.FieldByName(rt.Elem().Elem().Name()); ok && field.Anonymous { + rv.Set(reflect.Append(rv, reflect.ValueOf(v).FieldByIndex(field.Index))) + continue + } + } + + rv.Set(reflect.Append(rv, reflect.ValueOf(v))) + } + } else { + switch len(res.Returnval) { + case 0: + case 1: + v, err := ObjectContentToType(res.Returnval[0]) + if err != nil { + return err + } + + vt := reflect.TypeOf(v) + + if !rv.Type().AssignableTo(vt) { + // For example: dst is ComputeResource, res is ClusterComputeResource + if field, ok := vt.FieldByName(rt.Elem().Name()); ok && field.Anonymous { + rv.Set(reflect.ValueOf(v).FieldByIndex(field.Index)) + return nil + } + } + + rv.Set(reflect.ValueOf(v)) + default: + // If dst is not a slice, expect to receive 0 or 1 results + panic("more than 1 result") + } + } + + return nil +} + +// RetrievePropertiesForRequest calls the RetrieveProperties method with the +// specified request and decodes the response struct into the value pointed to +// by dst. +func RetrievePropertiesForRequest(ctx context.Context, r soap.RoundTripper, req types.RetrieveProperties, dst interface{}) error { + res, err := methods.RetrieveProperties(ctx, r, &req) + if err != nil { + return err + } + + return LoadRetrievePropertiesResponse(res, dst) +} + +// RetrieveProperties retrieves the properties of the managed object specified +// as obj and decodes the response struct into the value pointed to by dst. +func RetrieveProperties(ctx context.Context, r soap.RoundTripper, pc, obj types.ManagedObjectReference, dst interface{}) error { + req := types.RetrieveProperties{ + This: pc, + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ + { + Obj: obj, + Skip: types.NewBool(false), + }, + }, + PropSet: []types.PropertySpec{ + { + All: types.NewBool(true), + Type: obj.Type, + }, + }, + }, + }, + } + + return RetrievePropertiesForRequest(ctx, r, req, dst) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go b/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go new file mode 100644 index 000000000000..0c9e5b034889 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go @@ -0,0 +1,247 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mo + +import ( + "fmt" + "reflect" + "regexp" + "strings" + "sync" + + "github.com/vmware/govmomi/vim25/types" +) + +type typeInfo struct { + typ reflect.Type + + // Field indices of "Self" field. + self []int + + // Map property names to field indices. + props map[string][]int +} + +var typeInfoLock sync.RWMutex +var typeInfoMap = make(map[string]*typeInfo) + +func typeInfoForType(tname string) *typeInfo { + typeInfoLock.RLock() + ti, ok := typeInfoMap[tname] + typeInfoLock.RUnlock() + + if ok { + return ti + } + + // Create new typeInfo for type. + if typ, ok := t[tname]; !ok { + panic("unknown type: " + tname) + } else { + // Multiple routines may race to set it, but the result is the same. + typeInfoLock.Lock() + ti = newTypeInfo(typ) + typeInfoMap[tname] = ti + typeInfoLock.Unlock() + } + + return ti +} + +func newTypeInfo(typ reflect.Type) *typeInfo { + t := typeInfo{ + typ: typ, + props: make(map[string][]int), + } + + t.build(typ, "", []int{}) + + return &t +} + +var managedObjectRefType = reflect.TypeOf((*types.ManagedObjectReference)(nil)).Elem() + +func buildName(fn string, f reflect.StructField) string { + if fn != "" { + fn += "." + } + + motag := f.Tag.Get("mo") + if motag != "" { + return fn + motag + } + + xmltag := f.Tag.Get("xml") + if xmltag != "" { + tokens := strings.Split(xmltag, ",") + if tokens[0] != "" { + return fn + tokens[0] + } + } + + return "" +} + +func (t *typeInfo) build(typ reflect.Type, fn string, fi []int) { + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + if typ.Kind() != reflect.Struct { + panic("need struct") + } + + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + ftyp := f.Type + + // Copy field indices so they can be passed along. + fic := make([]int, len(fi)+1) + copy(fic, fi) + fic[len(fi)] = i + + // Recurse into embedded field. + if f.Anonymous { + t.build(ftyp, fn, fic) + continue + } + + // Top level type has a "Self" field. + if f.Name == "Self" && ftyp == managedObjectRefType { + t.self = fic + continue + } + + fnc := buildName(fn, f) + if fnc == "" { + continue + } + + t.props[fnc] = fic + + // Dereference pointer. + if ftyp.Kind() == reflect.Ptr { + ftyp = ftyp.Elem() + } + + // Slices are not addressable by `foo.bar.qux`. + if ftyp.Kind() == reflect.Slice { + continue + } + + // Skip the managed reference type. + if ftyp == managedObjectRefType { + continue + } + + // Recurse into structs. + if ftyp.Kind() == reflect.Struct { + t.build(ftyp, fnc, fic) + } + } +} + +// assignValue assignes a value 'pv' to the struct pointed to by 'val', given a +// slice of field indices. It recurses into the struct until it finds the field +// specified by the indices. It creates new values for pointer types where +// needed. +func assignValue(val reflect.Value, fi []int, pv reflect.Value) { + // Create new value if necessary. + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + + val = val.Elem() + } + + rv := val.Field(fi[0]) + fi = fi[1:] + if len(fi) == 0 { + rt := rv.Type() + pt := pv.Type() + + // If type is a pointer, create new instance of type. + if rt.Kind() == reflect.Ptr { + rv.Set(reflect.New(rt.Elem())) + rv = rv.Elem() + rt = rv.Type() + } + + // If type is an interface, check if pv implements it. + if rt.Kind() == reflect.Interface && !pt.Implements(rt) { + // Check if pointer to pv implements it. + if reflect.PtrTo(pt).Implements(rt) { + npv := reflect.New(pt) + npv.Elem().Set(pv) + pv = npv + pt = pv.Type() + } else { + panic(fmt.Sprintf("type %s doesn't implement %s", pt.Name(), rt.Name())) + } + } + + if pt.AssignableTo(rt) { + rv.Set(pv) + } else if rt.ConvertibleTo(pt) { + rv.Set(pv.Convert(rt)) + } else { + panic(fmt.Sprintf("cannot assign %s (%s) to %s (%s)", rt.Name(), rt.Kind(), pt.Name(), pt.Kind())) + } + + return + } + + assignValue(rv, fi, pv) +} + +var arrayOfRegexp = regexp.MustCompile("ArrayOf(.*)$") + +func anyTypeToValue(t interface{}) reflect.Value { + rt := reflect.TypeOf(t) + rv := reflect.ValueOf(t) + + // Dereference if ArrayOfXYZ type + m := arrayOfRegexp.FindStringSubmatch(rt.Name()) + if len(m) > 0 { + // ArrayOfXYZ type has single field named XYZ + rv = rv.FieldByName(m[1]) + if !rv.IsValid() { + panic(fmt.Sprintf("expected %s type to have field %s", m[0], m[1])) + } + } + + return rv +} + +// LoadObjectFromContent loads properties from the 'PropSet' field in the +// specified ObjectContent value into the value it represents, which is +// returned as a reflect.Value. +func (t *typeInfo) LoadFromObjectContent(o types.ObjectContent) (reflect.Value, error) { + v := reflect.New(t.typ) + assignValue(v, t.self, reflect.ValueOf(o.Obj)) + + for _, p := range o.PropSet { + rv, ok := t.props[p.Name] + if !ok { + continue + } + assignValue(v, rv, anyTypeToValue(p.Val)) + } + + return v, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go b/vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go new file mode 100644 index 000000000000..24cb3d59a97e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/aggregator.go @@ -0,0 +1,73 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import "sync" + +type Aggregator struct { + downstream Sinker + upstream chan (<-chan Report) + + done chan struct{} + w sync.WaitGroup +} + +func NewAggregator(s Sinker) *Aggregator { + a := &Aggregator{ + downstream: s, + upstream: make(chan (<-chan Report)), + + done: make(chan struct{}), + } + + a.w.Add(1) + go a.loop() + + return a +} + +func (a *Aggregator) loop() { + defer a.w.Done() + + dch := a.downstream.Sink() + defer close(dch) + + for { + select { + case uch := <-a.upstream: + // Drain upstream channel + for e := range uch { + dch <- e + } + case <-a.done: + return + } + } +} + +func (a *Aggregator) Sink() chan<- Report { + ch := make(chan Report) + a.upstream <- ch + return ch +} + +// Done marks the aggregator as done. No more calls to Sink() may be made and +// the downstream progress report channel will be closed when Done() returns. +func (a *Aggregator) Done() { + close(a.done) + a.w.Wait() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/doc.go b/vendor/github.com/vmware/govmomi/vim25/progress/doc.go new file mode 100644 index 000000000000..a0458dd5cc95 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/doc.go @@ -0,0 +1,32 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +/* +The progress package contains functionality to deal with progress reporting. +The functionality is built to serve progress reporting for infrastructure +operations when talking the vSphere API, but is generic enough to be used +elsewhere. + +At the core of this progress reporting API lies the Sinker interface. This +interface is implemented by any object that can act as a sink for progress +reports. Callers of the Sink() function receives a send-only channel for +progress reports. They are responsible for closing the channel when done. +This semantic makes it easy to keep track of multiple progress report channels; +they are only created when Sink() is called and assumed closed when any +function that receives a Sinker parameter returns. +*/ diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/prefix.go b/vendor/github.com/vmware/govmomi/vim25/progress/prefix.go new file mode 100644 index 000000000000..4f842ad951f8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/prefix.go @@ -0,0 +1,54 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import "fmt" + +type prefixedReport struct { + Report + prefix string +} + +func (r prefixedReport) Detail() string { + if d := r.Report.Detail(); d != "" { + return fmt.Sprintf("%s: %s", r.prefix, d) + } + + return r.prefix +} + +func prefixLoop(upstream <-chan Report, downstream chan<- Report, prefix string) { + defer close(downstream) + + for r := range upstream { + downstream <- prefixedReport{ + Report: r, + prefix: prefix, + } + } +} + +func Prefix(s Sinker, prefix string) Sinker { + fn := func() chan<- Report { + upstream := make(chan Report) + downstream := s.Sink() + go prefixLoop(upstream, downstream, prefix) + return upstream + } + + return SinkFunc(fn) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/reader.go b/vendor/github.com/vmware/govmomi/vim25/progress/reader.go new file mode 100644 index 000000000000..9d67bc652581 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/reader.go @@ -0,0 +1,185 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +import ( + "container/list" + "context" + "fmt" + "io" + "sync/atomic" + "time" +) + +type readerReport struct { + t time.Time + + pos int64 + size int64 + bps *uint64 + + err error +} + +func (r readerReport) Percentage() float32 { + return 100.0 * float32(r.pos) / float32(r.size) +} + +func (r readerReport) Detail() string { + const ( + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + ) + + // Use the reader's bps field, so this report returns an up-to-date number. + // + // For example: if there hasn't been progress for the last 5 seconds, the + // most recent report should return "0B/s". + // + bps := atomic.LoadUint64(r.bps) + + switch { + case bps >= GiB: + return fmt.Sprintf("%.1fGiB/s", float32(bps)/float32(GiB)) + case bps >= MiB: + return fmt.Sprintf("%.1fMiB/s", float32(bps)/float32(MiB)) + case bps >= KiB: + return fmt.Sprintf("%.1fKiB/s", float32(bps)/float32(KiB)) + default: + return fmt.Sprintf("%dB/s", bps) + } +} + +func (p readerReport) Error() error { + return p.err +} + +// reader wraps an io.Reader and sends a progress report over a channel for +// every read it handles. +type reader struct { + r io.Reader + + pos int64 + size int64 + bps uint64 + + ch chan<- Report + ctx context.Context +} + +func NewReader(ctx context.Context, s Sinker, r io.Reader, size int64) *reader { + pr := reader{ + r: r, + ctx: ctx, + size: size, + } + + // Reports must be sent downstream and to the bps computation loop. + pr.ch = Tee(s, newBpsLoop(&pr.bps)).Sink() + + return &pr +} + +// Read calls the Read function on the underlying io.Reader. Additionally, +// every read causes a progress report to be sent to the progress reader's +// underlying channel. +func (r *reader) Read(b []byte) (int, error) { + n, err := r.r.Read(b) + r.pos += int64(n) + + if err != nil && err != io.EOF { + return n, err + } + + q := readerReport{ + t: time.Now(), + pos: r.pos, + size: r.size, + bps: &r.bps, + } + + select { + case r.ch <- q: + case <-r.ctx.Done(): + } + + return n, err +} + +// Done marks the progress reader as done, optionally including an error in the +// progress report. After sending it, the underlying channel is closed. +func (r *reader) Done(err error) { + q := readerReport{ + t: time.Now(), + pos: r.pos, + size: r.size, + bps: &r.bps, + err: err, + } + + select { + case r.ch <- q: + close(r.ch) + case <-r.ctx.Done(): + } +} + +// newBpsLoop returns a sink that monitors and stores throughput. +func newBpsLoop(dst *uint64) SinkFunc { + fn := func() chan<- Report { + sink := make(chan Report) + go bpsLoop(sink, dst) + return sink + } + + return fn +} + +func bpsLoop(ch <-chan Report, dst *uint64) { + l := list.New() + + for { + var tch <-chan time.Time + + // Setup timer for front of list to become stale. + if e := l.Front(); e != nil { + dt := time.Second - time.Now().Sub(e.Value.(readerReport).t) + tch = time.After(dt) + } + + select { + case q, ok := <-ch: + if !ok { + return + } + + l.PushBack(q) + case <-tch: + l.Remove(l.Front()) + } + + // Compute new bps + if l.Len() == 0 { + atomic.StoreUint64(dst, 0) + } else { + f := l.Front().Value.(readerReport) + b := l.Back().Value.(readerReport) + atomic.StoreUint64(dst, uint64(b.pos-f.pos)) + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/report.go b/vendor/github.com/vmware/govmomi/vim25/progress/report.go new file mode 100644 index 000000000000..bf80263ff5cb --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/report.go @@ -0,0 +1,26 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +// Report defines the interface for types that can deliver progress reports. +// Examples include uploads/downloads in the http client and the task info +// field in the task managed object. +type Report interface { + Percentage() float32 + Detail() string + Error() error +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/scale.go b/vendor/github.com/vmware/govmomi/vim25/progress/scale.go new file mode 100644 index 000000000000..98808392068e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/scale.go @@ -0,0 +1,76 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +type scaledReport struct { + Report + n int + i int +} + +func (r scaledReport) Percentage() float32 { + b := 100 * float32(r.i) / float32(r.n) + return b + (r.Report.Percentage() / float32(r.n)) +} + +type scaleOne struct { + s Sinker + n int + i int +} + +func (s scaleOne) Sink() chan<- Report { + upstream := make(chan Report) + downstream := s.s.Sink() + go s.loop(upstream, downstream) + return upstream +} + +func (s scaleOne) loop(upstream <-chan Report, downstream chan<- Report) { + defer close(downstream) + + for r := range upstream { + downstream <- scaledReport{ + Report: r, + n: s.n, + i: s.i, + } + } +} + +type scaleMany struct { + s Sinker + n int + i int +} + +func Scale(s Sinker, n int) Sinker { + return &scaleMany{ + s: s, + n: n, + } +} + +func (s *scaleMany) Sink() chan<- Report { + if s.i == s.n { + s.n++ + } + + ch := scaleOne{s: s.s, n: s.n, i: s.i}.Sink() + s.i++ + return ch +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/sinker.go b/vendor/github.com/vmware/govmomi/vim25/progress/sinker.go new file mode 100644 index 000000000000..0bd35a47f752 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/sinker.go @@ -0,0 +1,33 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +// Sinker defines what is expected of a type that can act as a sink for +// progress reports. The semantics are as follows. If you call Sink(), you are +// responsible for closing the returned channel. Closing this channel means +// that the related task is done, or resulted in error. +type Sinker interface { + Sink() chan<- Report +} + +// SinkFunc defines a function that returns a progress report channel. +type SinkFunc func() chan<- Report + +// Sink makes the SinkFunc implement the Sinker interface. +func (fn SinkFunc) Sink() chan<- Report { + return fn() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/tee.go b/vendor/github.com/vmware/govmomi/vim25/progress/tee.go new file mode 100644 index 000000000000..ab4607842b0a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/progress/tee.go @@ -0,0 +1,41 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package progress + +// Tee works like Unix tee; it forwards all progress reports it receives to the +// specified sinks +func Tee(s1, s2 Sinker) Sinker { + fn := func() chan<- Report { + d1 := s1.Sink() + d2 := s2.Sink() + u := make(chan Report) + go tee(u, d1, d2) + return u + } + + return SinkFunc(fn) +} + +func tee(u <-chan Report, d1, d2 chan<- Report) { + defer close(d1) + defer close(d2) + + for r := range u { + d1 <- r + d2 <- r + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/retry.go b/vendor/github.com/vmware/govmomi/vim25/retry.go new file mode 100644 index 000000000000..b8807eef3192 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/retry.go @@ -0,0 +1,105 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vim25 + +import ( + "context" + "net" + "net/url" + "time" + + "github.com/vmware/govmomi/vim25/soap" +) + +type RetryFunc func(err error) (retry bool, delay time.Duration) + +// TemporaryNetworkError returns a RetryFunc that retries up to a maximum of n +// times, only if the error returned by the RoundTrip function is a temporary +// network error (for example: a connect timeout). +func TemporaryNetworkError(n int) RetryFunc { + return func(err error) (retry bool, delay time.Duration) { + var nerr net.Error + var ok bool + + // Never retry if this is not a network error. + switch rerr := err.(type) { + case *url.Error: + if nerr, ok = rerr.Err.(net.Error); !ok { + return false, 0 + } + case net.Error: + nerr = rerr + default: + return false, 0 + } + + if !nerr.Temporary() { + return false, 0 + } + + // Don't retry if we're out of tries. + if n--; n <= 0 { + return false, 0 + } + + return true, 0 + } +} + +type retry struct { + roundTripper soap.RoundTripper + + // fn is a custom function that is called when an error occurs. + // It returns whether or not to retry, and if so, how long to + // delay before retrying. + fn RetryFunc +} + +// Retry wraps the specified soap.RoundTripper and invokes the +// specified RetryFunc. The RetryFunc returns whether or not to +// retry the call, and if so, how long to wait before retrying. If +// the result of this function is to not retry, the original error +// is returned from the RoundTrip function. +func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { + r := &retry{ + roundTripper: roundTripper, + fn: fn, + } + + return r +} + +func (r *retry) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + var err error + + for { + err = r.roundTripper.RoundTrip(ctx, req, res) + if err == nil { + break + } + + // Invoke retry function to see if another attempt should be made. + if retry, delay := r.fn(err); retry { + time.Sleep(delay) + continue + } + + break + } + + return err +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/client.go b/vendor/github.com/vmware/govmomi/vim25/soap/client.go new file mode 100644 index 000000000000..d94b6a05658e --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/client.go @@ -0,0 +1,775 @@ +/* +Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import ( + "bufio" + "bytes" + "context" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/cookiejar" + "net/url" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + "time" + + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" +) + +type HasFault interface { + Fault() *Fault +} + +type RoundTripper interface { + RoundTrip(ctx context.Context, req, res HasFault) error +} + +const ( + SessionCookieName = "vmware_soap_session" +) + +type Client struct { + http.Client + + u *url.URL + k bool // Named after curl's -k flag + d *debugContainer + t *http.Transport + + hostsMu sync.Mutex + hosts map[string]string + + Namespace string // Vim namespace + Version string // Vim version + UserAgent string + + cookie string +} + +var schemeMatch = regexp.MustCompile(`^\w+://`) + +// ParseURL is wrapper around url.Parse, where Scheme defaults to "https" and Path defaults to "/sdk" +func ParseURL(s string) (*url.URL, error) { + var err error + var u *url.URL + + if s != "" { + // Default the scheme to https + if !schemeMatch.MatchString(s) { + s = "https://" + s + } + + u, err = url.Parse(s) + if err != nil { + return nil, err + } + + // Default the path to /sdk + if u.Path == "" { + u.Path = "/sdk" + } + + if u.User == nil { + u.User = url.UserPassword("", "") + } + } + + return u, nil +} + +func NewClient(u *url.URL, insecure bool) *Client { + c := Client{ + u: u, + k: insecure, + d: newDebug(), + } + + // Initialize http.RoundTripper on client, so we can customize it below + if t, ok := http.DefaultTransport.(*http.Transport); ok { + c.t = &http.Transport{ + Proxy: t.Proxy, + DialContext: t.DialContext, + MaxIdleConns: t.MaxIdleConns, + IdleConnTimeout: t.IdleConnTimeout, + TLSHandshakeTimeout: t.TLSHandshakeTimeout, + ExpectContinueTimeout: t.ExpectContinueTimeout, + } + } else { + c.t = new(http.Transport) + } + + c.hosts = make(map[string]string) + c.t.TLSClientConfig = &tls.Config{InsecureSkipVerify: c.k} + // Don't bother setting DialTLS if InsecureSkipVerify=true + if !c.k { + c.t.DialTLS = c.dialTLS + } + + c.Client.Transport = c.t + c.Client.Jar, _ = cookiejar.New(nil) + + // Remove user information from a copy of the URL + c.u = c.URL() + c.u.User = nil + + return &c +} + +// NewServiceClient creates a NewClient with the given URL.Path and namespace. +func (c *Client) NewServiceClient(path string, namespace string) *Client { + vc := c.URL() + u, err := url.Parse(path) + if err != nil { + log.Panicf("url.Parse(%q): %s", path, err) + } + if u.Host == "" { + u.Scheme = vc.Scheme + u.Host = vc.Host + } + + client := NewClient(u, c.k) + client.Namespace = "urn:" + namespace + if cert := c.Certificate(); cert != nil { + client.SetCertificate(*cert) + } + + // Copy the trusted thumbprints + c.hostsMu.Lock() + for k, v := range c.hosts { + client.hosts[k] = v + } + c.hostsMu.Unlock() + + // Copy the cookies + client.Client.Jar.SetCookies(u, c.Client.Jar.Cookies(u)) + + // Set SOAP Header cookie + for _, cookie := range client.Jar.Cookies(u) { + if cookie.Name == SessionCookieName { + client.cookie = cookie.Value + break + } + } + + // Copy any query params (e.g. GOVMOMI_TUNNEL_PROXY_PORT used in testing) + client.u.RawQuery = vc.RawQuery + + return client +} + +// SetRootCAs defines the set of root certificate authorities +// that clients use when verifying server certificates. +// By default TLS uses the host's root CA set. +// +// See: http.Client.Transport.TLSClientConfig.RootCAs +func (c *Client) SetRootCAs(file string) error { + pool := x509.NewCertPool() + + for _, name := range filepath.SplitList(file) { + pem, err := ioutil.ReadFile(name) + if err != nil { + return err + } + + pool.AppendCertsFromPEM(pem) + } + + c.t.TLSClientConfig.RootCAs = pool + + return nil +} + +// Add default https port if missing +func hostAddr(addr string) string { + _, port := splitHostPort(addr) + if port == "" { + return addr + ":443" + } + return addr +} + +// SetThumbprint sets the known certificate thumbprint for the given host. +// A custom DialTLS function is used to support thumbprint based verification. +// We first try tls.Dial with the default tls.Config, only falling back to thumbprint verification +// if it fails with an x509.UnknownAuthorityError or x509.HostnameError +// +// See: http.Client.Transport.DialTLS +func (c *Client) SetThumbprint(host string, thumbprint string) { + host = hostAddr(host) + + c.hostsMu.Lock() + if thumbprint == "" { + delete(c.hosts, host) + } else { + c.hosts[host] = thumbprint + } + c.hostsMu.Unlock() +} + +// Thumbprint returns the certificate thumbprint for the given host if known to this client. +func (c *Client) Thumbprint(host string) string { + host = hostAddr(host) + c.hostsMu.Lock() + defer c.hostsMu.Unlock() + return c.hosts[host] +} + +// LoadThumbprints from file with the give name. +// If name is empty or name does not exist this function will return nil. +func (c *Client) LoadThumbprints(file string) error { + if file == "" { + return nil + } + + for _, name := range filepath.SplitList(file) { + err := c.loadThumbprints(name) + if err != nil { + return err + } + } + + return nil +} + +func (c *Client) loadThumbprints(name string) error { + f, err := os.Open(name) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + scanner := bufio.NewScanner(f) + + for scanner.Scan() { + e := strings.SplitN(scanner.Text(), " ", 2) + if len(e) != 2 { + continue + } + + c.SetThumbprint(e[0], e[1]) + } + + _ = f.Close() + + return scanner.Err() +} + +// ThumbprintSHA1 returns the thumbprint of the given cert in the same format used by the SDK and Client.SetThumbprint. +// +// See: SSLVerifyFault.Thumbprint, SessionManagerGenericServiceTicket.Thumbprint, HostConnectSpec.SslThumbprint +func ThumbprintSHA1(cert *x509.Certificate) string { + sum := sha1.Sum(cert.Raw) + hex := make([]string, len(sum)) + for i, b := range sum { + hex[i] = fmt.Sprintf("%02X", b) + } + return strings.Join(hex, ":") +} + +func (c *Client) dialTLS(network string, addr string) (net.Conn, error) { + // Would be nice if there was a tls.Config.Verify func, + // see tls.clientHandshakeState.doFullHandshake + + conn, err := tls.Dial(network, addr, c.t.TLSClientConfig) + + if err == nil { + return conn, nil + } + + switch err.(type) { + case x509.UnknownAuthorityError: + case x509.HostnameError: + default: + return nil, err + } + + thumbprint := c.Thumbprint(addr) + if thumbprint == "" { + return nil, err + } + + config := &tls.Config{InsecureSkipVerify: true} + conn, err = tls.Dial(network, addr, config) + if err != nil { + return nil, err + } + + cert := conn.ConnectionState().PeerCertificates[0] + peer := ThumbprintSHA1(cert) + if thumbprint != peer { + _ = conn.Close() + + return nil, fmt.Errorf("Host %q thumbprint does not match %q", addr, thumbprint) + } + + return conn, nil +} + +// splitHostPort is similar to net.SplitHostPort, +// but rather than return error if there isn't a ':port', +// return an empty string for the port. +func splitHostPort(host string) (string, string) { + ix := strings.LastIndex(host, ":") + + if ix <= strings.LastIndex(host, "]") { + return host, "" + } + + name := host[:ix] + port := host[ix+1:] + + return name, port +} + +const sdkTunnel = "sdkTunnel:8089" + +func (c *Client) Certificate() *tls.Certificate { + certs := c.t.TLSClientConfig.Certificates + if len(certs) == 0 { + return nil + } + return &certs[0] +} + +func (c *Client) SetCertificate(cert tls.Certificate) { + t := c.Client.Transport.(*http.Transport) + + // Extension or HoK certificate + t.TLSClientConfig.Certificates = []tls.Certificate{cert} +} + +// Tunnel returns a Client configured to proxy requests through vCenter's http port 80, +// to the SDK tunnel virtual host. Use of the SDK tunnel is required by LoginExtensionByCertificate() +// and optional for other methods. +func (c *Client) Tunnel() *Client { + tunnel := c.NewServiceClient(c.u.Path, c.Namespace) + t := tunnel.Client.Transport.(*http.Transport) + // Proxy to vCenter host on port 80 + host := tunnel.u.Hostname() + // Should be no reason to change the default port other than testing + key := "GOVMOMI_TUNNEL_PROXY_PORT" + + port := tunnel.URL().Query().Get(key) + if port == "" { + port = os.Getenv(key) + } + + if port != "" { + host += ":" + port + } + + t.Proxy = http.ProxyURL(&url.URL{ + Scheme: "http", + Host: host, + }) + + // Rewrite url Host to use the sdk tunnel, required for a certificate request. + tunnel.u.Host = sdkTunnel + return tunnel +} + +func (c *Client) URL() *url.URL { + urlCopy := *c.u + return &urlCopy +} + +type marshaledClient struct { + Cookies []*http.Cookie + URL *url.URL + Insecure bool +} + +func (c *Client) MarshalJSON() ([]byte, error) { + m := marshaledClient{ + Cookies: c.Jar.Cookies(c.u), + URL: c.u, + Insecure: c.k, + } + + return json.Marshal(m) +} + +func (c *Client) UnmarshalJSON(b []byte) error { + var m marshaledClient + + err := json.Unmarshal(b, &m) + if err != nil { + return err + } + + *c = *NewClient(m.URL, m.Insecure) + c.Jar.SetCookies(m.URL, m.Cookies) + + return nil +} + +func (c *Client) do(ctx context.Context, req *http.Request) (*http.Response, error) { + if nil == ctx || nil == ctx.Done() { // ctx.Done() is for ctx + return c.Client.Do(req) + } + + return c.Client.Do(req.WithContext(ctx)) +} + +// Signer can be implemented by soap.Header.Security to sign requests. +// If the soap.Header.Security field is set to an implementation of Signer via WithHeader(), +// then Client.RoundTrip will call Sign() to marshal the SOAP request. +type Signer interface { + Sign(Envelope) ([]byte, error) +} + +type headerContext struct{} + +// WithHeader can be used to modify the outgoing request soap.Header fields. +func (c *Client) WithHeader(ctx context.Context, header Header) context.Context { + return context.WithValue(ctx, headerContext{}, header) +} + +func (c *Client) RoundTrip(ctx context.Context, reqBody, resBody HasFault) error { + var err error + var b []byte + + reqEnv := Envelope{Body: reqBody} + resEnv := Envelope{Body: resBody} + + h, ok := ctx.Value(headerContext{}).(Header) + if !ok { + h = Header{} + } + + // We added support for OperationID before soap.Header was exported. + if id, ok := ctx.Value(types.ID{}).(string); ok { + h.ID = id + } + + h.Cookie = c.cookie + if h.Cookie != "" || h.ID != "" || h.Security != nil { + reqEnv.Header = &h // XML marshal header only if a field is set + } + + // Create debugging context for this round trip + d := c.d.newRoundTrip() + if d.enabled() { + defer d.done() + } + + if signer, ok := h.Security.(Signer); ok { + b, err = signer.Sign(reqEnv) + if err != nil { + return err + } + } else { + b, err = xml.Marshal(reqEnv) + if err != nil { + panic(err) + } + } + + rawReqBody := io.MultiReader(strings.NewReader(xml.Header), bytes.NewReader(b)) + req, err := http.NewRequest("POST", c.u.String(), rawReqBody) + if err != nil { + panic(err) + } + + req = req.WithContext(ctx) + + req.Header.Set(`Content-Type`, `text/xml; charset="utf-8"`) + + action := h.Action + if action == "" { + action = fmt.Sprintf("%s/%s", c.Namespace, c.Version) + } + req.Header.Set(`SOAPAction`, action) + + if c.UserAgent != "" { + req.Header.Set(`User-Agent`, c.UserAgent) + } + + if d.enabled() { + d.debugRequest(req) + } + + tstart := time.Now() + res, err := c.do(ctx, req) + tstop := time.Now() + + if d.enabled() { + d.logf("%6dms (%T)", tstop.Sub(tstart)/time.Millisecond, resBody) + } + + if err != nil { + return err + } + + if d.enabled() { + d.debugResponse(res) + } + + // Close response regardless of what happens next + defer res.Body.Close() + + switch res.StatusCode { + case http.StatusOK: + // OK + case http.StatusInternalServerError: + // Error, but typically includes a body explaining the error + default: + return errors.New(res.Status) + } + + dec := xml.NewDecoder(res.Body) + dec.TypeFunc = types.TypeFunc() + err = dec.Decode(&resEnv) + if err != nil { + return err + } + + if f := resBody.Fault(); f != nil { + return WrapSoapFault(f) + } + + return err +} + +func (c *Client) CloseIdleConnections() { + c.t.CloseIdleConnections() +} + +// ParseURL wraps url.Parse to rewrite the URL.Host field +// In the case of VM guest uploads or NFC lease URLs, a Host +// field with a value of "*" is rewritten to the Client's URL.Host. +func (c *Client) ParseURL(urlStr string) (*url.URL, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + host, _ := splitHostPort(u.Host) + if host == "*" { + // Also use Client's port, to support port forwarding + u.Host = c.URL().Host + } + + return u, nil +} + +type Upload struct { + Type string + Method string + ContentLength int64 + Headers map[string]string + Ticket *http.Cookie + Progress progress.Sinker +} + +var DefaultUpload = Upload{ + Type: "application/octet-stream", + Method: "PUT", +} + +// Upload PUTs the local file to the given URL +func (c *Client) Upload(ctx context.Context, f io.Reader, u *url.URL, param *Upload) error { + var err error + + if param.Progress != nil { + pr := progress.NewReader(ctx, param.Progress, f, param.ContentLength) + f = pr + + // Mark progress reader as done when returning from this function. + defer func() { + pr.Done(err) + }() + } + + req, err := http.NewRequest(param.Method, u.String(), f) + if err != nil { + return err + } + + req = req.WithContext(ctx) + + req.ContentLength = param.ContentLength + req.Header.Set("Content-Type", param.Type) + + for k, v := range param.Headers { + req.Header.Add(k, v) + } + + if param.Ticket != nil { + req.AddCookie(param.Ticket) + } + + res, err := c.Client.Do(req) + if err != nil { + return err + } + + switch res.StatusCode { + case http.StatusOK: + case http.StatusCreated: + default: + err = errors.New(res.Status) + } + + return err +} + +// UploadFile PUTs the local file to the given URL +func (c *Client) UploadFile(ctx context.Context, file string, u *url.URL, param *Upload) error { + if param == nil { + p := DefaultUpload // Copy since we set ContentLength + param = &p + } + + s, err := os.Stat(file) + if err != nil { + return err + } + + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + param.ContentLength = s.Size() + + return c.Upload(ctx, f, u, param) +} + +type Download struct { + Method string + Headers map[string]string + Ticket *http.Cookie + Progress progress.Sinker + Writer io.Writer +} + +var DefaultDownload = Download{ + Method: "GET", +} + +// DownloadRequest wraps http.Client.Do, returning the http.Response without checking its StatusCode +func (c *Client) DownloadRequest(ctx context.Context, u *url.URL, param *Download) (*http.Response, error) { + req, err := http.NewRequest(param.Method, u.String(), nil) + if err != nil { + return nil, err + } + + req = req.WithContext(ctx) + + for k, v := range param.Headers { + req.Header.Add(k, v) + } + + if param.Ticket != nil { + req.AddCookie(param.Ticket) + } + + return c.Client.Do(req) +} + +// Download GETs the remote file from the given URL +func (c *Client) Download(ctx context.Context, u *url.URL, param *Download) (io.ReadCloser, int64, error) { + res, err := c.DownloadRequest(ctx, u, param) + if err != nil { + return nil, 0, err + } + + switch res.StatusCode { + case http.StatusOK: + default: + err = errors.New(res.Status) + } + + if err != nil { + return nil, 0, err + } + + r := res.Body + + return r, res.ContentLength, nil +} + +func (c *Client) WriteFile(ctx context.Context, file string, src io.Reader, size int64, s progress.Sinker, w io.Writer) error { + var err error + + r := src + + fh, err := os.Create(file) + if err != nil { + return err + } + + if s != nil { + pr := progress.NewReader(ctx, s, src, size) + src = pr + + // Mark progress reader as done when returning from this function. + defer func() { + pr.Done(err) + }() + } + + if w == nil { + w = fh + } else { + w = io.MultiWriter(w, fh) + } + + _, err = io.Copy(w, r) + + cerr := fh.Close() + + if err == nil { + err = cerr + } + + return err +} + +// DownloadFile GETs the given URL to a local file +func (c *Client) DownloadFile(ctx context.Context, file string, u *url.URL, param *Download) error { + var err error + if param == nil { + param = &DefaultDownload + } + + rc, contentLength, err := c.Download(ctx, u, param) + if err != nil { + return err + } + + return c.WriteFile(ctx, file, rc, contentLength, param.Progress, param.Writer) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/debug.go b/vendor/github.com/vmware/govmomi/vim25/soap/debug.go new file mode 100644 index 000000000000..63518abca3ae --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/debug.go @@ -0,0 +1,149 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import ( + "fmt" + "io" + "net/http" + "net/http/httputil" + "sync/atomic" + "time" + + "github.com/vmware/govmomi/vim25/debug" +) + +// teeReader wraps io.TeeReader and patches through the Close() function. +type teeReader struct { + io.Reader + io.Closer +} + +func newTeeReader(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return teeReader{ + Reader: io.TeeReader(rc, w), + Closer: rc, + } +} + +// debugRoundTrip contains state and logic needed to debug a single round trip. +type debugRoundTrip struct { + cn uint64 // Client number + rn uint64 // Request number + log io.WriteCloser // Request log + cs []io.Closer // Files that need closing when done +} + +func (d *debugRoundTrip) logf(format string, a ...interface{}) { + now := time.Now().Format("2006-01-02T15-04-05.000000000") + fmt.Fprintf(d.log, "%s - %04d: ", now, d.rn) + fmt.Fprintf(d.log, format, a...) + fmt.Fprintf(d.log, "\n") +} + +func (d *debugRoundTrip) enabled() bool { + return d != nil +} + +func (d *debugRoundTrip) done() { + for _, c := range d.cs { + c.Close() + } +} + +func (d *debugRoundTrip) newFile(suffix string) io.WriteCloser { + return debug.NewFile(fmt.Sprintf("%d-%04d.%s", d.cn, d.rn, suffix)) +} + +func (d *debugRoundTrip) debugRequest(req *http.Request) { + if d == nil { + return + } + + var wc io.WriteCloser + + // Capture headers + wc = d.newFile("req.headers") + b, _ := httputil.DumpRequest(req, false) + wc.Write(b) + wc.Close() + + // Capture body + wc = d.newFile("req.xml") + req.Body = newTeeReader(req.Body, wc) + + // Delay closing until marked done + d.cs = append(d.cs, wc) +} + +func (d *debugRoundTrip) debugResponse(res *http.Response) { + if d == nil { + return + } + + var wc io.WriteCloser + + // Capture headers + wc = d.newFile("res.headers") + b, _ := httputil.DumpResponse(res, false) + wc.Write(b) + wc.Close() + + // Capture body + wc = d.newFile("res.xml") + res.Body = newTeeReader(res.Body, wc) + + // Delay closing until marked done + d.cs = append(d.cs, wc) +} + +var cn uint64 // Client counter + +// debugContainer wraps the debugging state for a single client. +type debugContainer struct { + cn uint64 // Client number + rn uint64 // Request counter + log io.WriteCloser // Request log +} + +func newDebug() *debugContainer { + d := debugContainer{ + cn: atomic.AddUint64(&cn, 1), + rn: 0, + } + + if !debug.Enabled() { + return nil + } + + d.log = debug.NewFile(fmt.Sprintf("%d-client.log", d.cn)) + return &d +} + +func (d *debugContainer) newRoundTrip() *debugRoundTrip { + if d == nil { + return nil + } + + drt := debugRoundTrip{ + cn: d.cn, + rn: atomic.AddUint64(&d.rn, 1), + log: d.log, + } + + return &drt +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/error.go b/vendor/github.com/vmware/govmomi/vim25/soap/error.go new file mode 100644 index 000000000000..d89208522a9b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/error.go @@ -0,0 +1,115 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import ( + "fmt" + "reflect" + + "github.com/vmware/govmomi/vim25/types" +) + +type regularError struct { + err error +} + +func (r regularError) Error() string { + return r.err.Error() +} + +type soapFaultError struct { + fault *Fault +} + +func (s soapFaultError) Error() string { + msg := s.fault.String + + if msg == "" { + msg = reflect.TypeOf(s.fault.Detail.Fault).Name() + } + + return fmt.Sprintf("%s: %s", s.fault.Code, msg) +} + +type vimFaultError struct { + fault types.BaseMethodFault +} + +func (v vimFaultError) Error() string { + typ := reflect.TypeOf(v.fault) + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + return typ.Name() +} + +func (v vimFaultError) Fault() types.BaseMethodFault { + return v.fault +} + +func Wrap(err error) error { + switch err.(type) { + case regularError: + return err + case soapFaultError: + return err + case vimFaultError: + return err + } + + return WrapRegularError(err) +} + +func WrapRegularError(err error) error { + return regularError{err} +} + +func IsRegularError(err error) bool { + _, ok := err.(regularError) + return ok +} + +func ToRegularError(err error) error { + return err.(regularError).err +} + +func WrapSoapFault(f *Fault) error { + return soapFaultError{f} +} + +func IsSoapFault(err error) bool { + _, ok := err.(soapFaultError) + return ok +} + +func ToSoapFault(err error) *Fault { + return err.(soapFaultError).fault +} + +func WrapVimFault(v types.BaseMethodFault) error { + return vimFaultError{v} +} + +func IsVimFault(err error) bool { + _, ok := err.(vimFaultError) + return ok +} + +func ToVimFault(err error) types.BaseMethodFault { + return err.(vimFaultError).fault +} diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/soap.go b/vendor/github.com/vmware/govmomi/vim25/soap/soap.go new file mode 100644 index 000000000000..a8dc121baade --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/soap/soap.go @@ -0,0 +1,49 @@ +/* +Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package soap + +import ( + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" +) + +// Header includes optional soap Header fields. +type Header struct { + Action string `xml:"-"` // Action is the 'SOAPAction' HTTP header value. Defaults to "Client.Namespace/Client.Version". + Cookie string `xml:"vcSessionCookie,omitempty"` // Cookie is a vCenter session cookie that can be used with other SDK endpoints (e.g. pbm). + ID string `xml:"operationID,omitempty"` // ID is the operationID used by ESX/vCenter logging for correlation. + Security interface{} `xml:",omitempty"` // Security is used for SAML token authentication and request signing. +} + +type Envelope struct { + XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` + Header *Header `xml:"http://schemas.xmlsoap.org/soap/envelope/ Header,omitempty"` + Body interface{} +} + +type Fault struct { + XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault"` + Code string `xml:"faultcode"` + String string `xml:"faultstring"` + Detail struct { + Fault types.AnyType `xml:",any,typeattr"` + } `xml:"detail"` +} + +func (f *Fault) VimFault() types.AnyType { + return f.Detail.Fault +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/base.go b/vendor/github.com/vmware/govmomi/vim25/types/base.go new file mode 100644 index 000000000000..3bb12b7412e1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/base.go @@ -0,0 +1,19 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +type AnyType interface{} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/enum.go b/vendor/github.com/vmware/govmomi/vim25/types/enum.go new file mode 100644 index 000000000000..c8c5977526e9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/enum.go @@ -0,0 +1,4824 @@ +/* +Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "reflect" + +type ActionParameter string + +const ( + ActionParameterTargetName = ActionParameter("targetName") + ActionParameterAlarmName = ActionParameter("alarmName") + ActionParameterOldStatus = ActionParameter("oldStatus") + ActionParameterNewStatus = ActionParameter("newStatus") + ActionParameterTriggeringSummary = ActionParameter("triggeringSummary") + ActionParameterDeclaringSummary = ActionParameter("declaringSummary") + ActionParameterEventDescription = ActionParameter("eventDescription") + ActionParameterTarget = ActionParameter("target") + ActionParameterAlarm = ActionParameter("alarm") +) + +func init() { + t["ActionParameter"] = reflect.TypeOf((*ActionParameter)(nil)).Elem() +} + +type ActionType string + +const ( + ActionTypeMigrationV1 = ActionType("MigrationV1") + ActionTypeVmPowerV1 = ActionType("VmPowerV1") + ActionTypeHostPowerV1 = ActionType("HostPowerV1") + ActionTypeHostMaintenanceV1 = ActionType("HostMaintenanceV1") + ActionTypeStorageMigrationV1 = ActionType("StorageMigrationV1") + ActionTypeStoragePlacementV1 = ActionType("StoragePlacementV1") + ActionTypePlacementV1 = ActionType("PlacementV1") + ActionTypeHostInfraUpdateHaV1 = ActionType("HostInfraUpdateHaV1") +) + +func init() { + t["ActionType"] = reflect.TypeOf((*ActionType)(nil)).Elem() +} + +type AffinityType string + +const ( + AffinityTypeMemory = AffinityType("memory") + AffinityTypeCpu = AffinityType("cpu") +) + +func init() { + t["AffinityType"] = reflect.TypeOf((*AffinityType)(nil)).Elem() +} + +type AgentInstallFailedReason string + +const ( + AgentInstallFailedReasonNotEnoughSpaceOnDevice = AgentInstallFailedReason("NotEnoughSpaceOnDevice") + AgentInstallFailedReasonPrepareToUpgradeFailed = AgentInstallFailedReason("PrepareToUpgradeFailed") + AgentInstallFailedReasonAgentNotRunning = AgentInstallFailedReason("AgentNotRunning") + AgentInstallFailedReasonAgentNotReachable = AgentInstallFailedReason("AgentNotReachable") + AgentInstallFailedReasonInstallTimedout = AgentInstallFailedReason("InstallTimedout") + AgentInstallFailedReasonSignatureVerificationFailed = AgentInstallFailedReason("SignatureVerificationFailed") + AgentInstallFailedReasonAgentUploadFailed = AgentInstallFailedReason("AgentUploadFailed") + AgentInstallFailedReasonAgentUploadTimedout = AgentInstallFailedReason("AgentUploadTimedout") + AgentInstallFailedReasonUnknownInstallerError = AgentInstallFailedReason("UnknownInstallerError") +) + +func init() { + t["AgentInstallFailedReason"] = reflect.TypeOf((*AgentInstallFailedReason)(nil)).Elem() +} + +type AlarmFilterSpecAlarmTypeByEntity string + +const ( + AlarmFilterSpecAlarmTypeByEntityEntityTypeAll = AlarmFilterSpecAlarmTypeByEntity("entityTypeAll") + AlarmFilterSpecAlarmTypeByEntityEntityTypeHost = AlarmFilterSpecAlarmTypeByEntity("entityTypeHost") + AlarmFilterSpecAlarmTypeByEntityEntityTypeVm = AlarmFilterSpecAlarmTypeByEntity("entityTypeVm") +) + +func init() { + t["AlarmFilterSpecAlarmTypeByEntity"] = reflect.TypeOf((*AlarmFilterSpecAlarmTypeByEntity)(nil)).Elem() +} + +type AlarmFilterSpecAlarmTypeByTrigger string + +const ( + AlarmFilterSpecAlarmTypeByTriggerTriggerTypeAll = AlarmFilterSpecAlarmTypeByTrigger("triggerTypeAll") + AlarmFilterSpecAlarmTypeByTriggerTriggerTypeEvent = AlarmFilterSpecAlarmTypeByTrigger("triggerTypeEvent") + AlarmFilterSpecAlarmTypeByTriggerTriggerTypeMetric = AlarmFilterSpecAlarmTypeByTrigger("triggerTypeMetric") +) + +func init() { + t["AlarmFilterSpecAlarmTypeByTrigger"] = reflect.TypeOf((*AlarmFilterSpecAlarmTypeByTrigger)(nil)).Elem() +} + +type AnswerFileValidationInfoStatus string + +const ( + AnswerFileValidationInfoStatusSuccess = AnswerFileValidationInfoStatus("success") + AnswerFileValidationInfoStatusFailed = AnswerFileValidationInfoStatus("failed") + AnswerFileValidationInfoStatusFailed_defaults = AnswerFileValidationInfoStatus("failed_defaults") +) + +func init() { + t["AnswerFileValidationInfoStatus"] = reflect.TypeOf((*AnswerFileValidationInfoStatus)(nil)).Elem() +} + +type ApplyHostProfileConfigurationResultStatus string + +const ( + ApplyHostProfileConfigurationResultStatusSuccess = ApplyHostProfileConfigurationResultStatus("success") + ApplyHostProfileConfigurationResultStatusFailed = ApplyHostProfileConfigurationResultStatus("failed") + ApplyHostProfileConfigurationResultStatusReboot_failed = ApplyHostProfileConfigurationResultStatus("reboot_failed") + ApplyHostProfileConfigurationResultStatusStateless_reboot_failed = ApplyHostProfileConfigurationResultStatus("stateless_reboot_failed") + ApplyHostProfileConfigurationResultStatusCheck_compliance_failed = ApplyHostProfileConfigurationResultStatus("check_compliance_failed") + ApplyHostProfileConfigurationResultStatusState_not_satisfied = ApplyHostProfileConfigurationResultStatus("state_not_satisfied") + ApplyHostProfileConfigurationResultStatusExit_maintenancemode_failed = ApplyHostProfileConfigurationResultStatus("exit_maintenancemode_failed") + ApplyHostProfileConfigurationResultStatusCanceled = ApplyHostProfileConfigurationResultStatus("canceled") +) + +func init() { + t["ApplyHostProfileConfigurationResultStatus"] = reflect.TypeOf((*ApplyHostProfileConfigurationResultStatus)(nil)).Elem() +} + +type ArrayUpdateOperation string + +const ( + ArrayUpdateOperationAdd = ArrayUpdateOperation("add") + ArrayUpdateOperationRemove = ArrayUpdateOperation("remove") + ArrayUpdateOperationEdit = ArrayUpdateOperation("edit") +) + +func init() { + t["ArrayUpdateOperation"] = reflect.TypeOf((*ArrayUpdateOperation)(nil)).Elem() +} + +type AutoStartAction string + +const ( + AutoStartActionNone = AutoStartAction("none") + AutoStartActionSystemDefault = AutoStartAction("systemDefault") + AutoStartActionPowerOn = AutoStartAction("powerOn") + AutoStartActionPowerOff = AutoStartAction("powerOff") + AutoStartActionGuestShutdown = AutoStartAction("guestShutdown") + AutoStartActionSuspend = AutoStartAction("suspend") +) + +func init() { + t["AutoStartAction"] = reflect.TypeOf((*AutoStartAction)(nil)).Elem() +} + +type AutoStartWaitHeartbeatSetting string + +const ( + AutoStartWaitHeartbeatSettingYes = AutoStartWaitHeartbeatSetting("yes") + AutoStartWaitHeartbeatSettingNo = AutoStartWaitHeartbeatSetting("no") + AutoStartWaitHeartbeatSettingSystemDefault = AutoStartWaitHeartbeatSetting("systemDefault") +) + +func init() { + t["AutoStartWaitHeartbeatSetting"] = reflect.TypeOf((*AutoStartWaitHeartbeatSetting)(nil)).Elem() +} + +type BaseConfigInfoDiskFileBackingInfoProvisioningType string + +const ( + BaseConfigInfoDiskFileBackingInfoProvisioningTypeThin = BaseConfigInfoDiskFileBackingInfoProvisioningType("thin") + BaseConfigInfoDiskFileBackingInfoProvisioningTypeEagerZeroedThick = BaseConfigInfoDiskFileBackingInfoProvisioningType("eagerZeroedThick") + BaseConfigInfoDiskFileBackingInfoProvisioningTypeLazyZeroedThick = BaseConfigInfoDiskFileBackingInfoProvisioningType("lazyZeroedThick") +) + +func init() { + t["BaseConfigInfoDiskFileBackingInfoProvisioningType"] = reflect.TypeOf((*BaseConfigInfoDiskFileBackingInfoProvisioningType)(nil)).Elem() +} + +type BatchResultResult string + +const ( + BatchResultResultSuccess = BatchResultResult("success") + BatchResultResultFail = BatchResultResult("fail") +) + +func init() { + t["BatchResultResult"] = reflect.TypeOf((*BatchResultResult)(nil)).Elem() +} + +type CannotEnableVmcpForClusterReason string + +const ( + CannotEnableVmcpForClusterReasonAPDTimeoutDisabled = CannotEnableVmcpForClusterReason("APDTimeoutDisabled") + CannotEnableVmcpForClusterReasonIncompatibleHostVersion = CannotEnableVmcpForClusterReason("IncompatibleHostVersion") +) + +func init() { + t["CannotEnableVmcpForClusterReason"] = reflect.TypeOf((*CannotEnableVmcpForClusterReason)(nil)).Elem() +} + +type CannotMoveFaultToleranceVmMoveType string + +const ( + CannotMoveFaultToleranceVmMoveTypeResourcePool = CannotMoveFaultToleranceVmMoveType("resourcePool") + CannotMoveFaultToleranceVmMoveTypeCluster = CannotMoveFaultToleranceVmMoveType("cluster") +) + +func init() { + t["CannotMoveFaultToleranceVmMoveType"] = reflect.TypeOf((*CannotMoveFaultToleranceVmMoveType)(nil)).Elem() +} + +type CannotPowerOffVmInClusterOperation string + +const ( + CannotPowerOffVmInClusterOperationSuspend = CannotPowerOffVmInClusterOperation("suspend") + CannotPowerOffVmInClusterOperationPowerOff = CannotPowerOffVmInClusterOperation("powerOff") + CannotPowerOffVmInClusterOperationGuestShutdown = CannotPowerOffVmInClusterOperation("guestShutdown") + CannotPowerOffVmInClusterOperationGuestSuspend = CannotPowerOffVmInClusterOperation("guestSuspend") +) + +func init() { + t["CannotPowerOffVmInClusterOperation"] = reflect.TypeOf((*CannotPowerOffVmInClusterOperation)(nil)).Elem() +} + +type CannotUseNetworkReason string + +const ( + CannotUseNetworkReasonNetworkReservationNotSupported = CannotUseNetworkReason("NetworkReservationNotSupported") + CannotUseNetworkReasonMismatchedNetworkPolicies = CannotUseNetworkReason("MismatchedNetworkPolicies") + CannotUseNetworkReasonMismatchedDvsVersionOrVendor = CannotUseNetworkReason("MismatchedDvsVersionOrVendor") + CannotUseNetworkReasonVMotionToUnsupportedNetworkType = CannotUseNetworkReason("VMotionToUnsupportedNetworkType") +) + +func init() { + t["CannotUseNetworkReason"] = reflect.TypeOf((*CannotUseNetworkReason)(nil)).Elem() +} + +type CheckTestType string + +const ( + CheckTestTypeSourceTests = CheckTestType("sourceTests") + CheckTestTypeHostTests = CheckTestType("hostTests") + CheckTestTypeResourcePoolTests = CheckTestType("resourcePoolTests") + CheckTestTypeDatastoreTests = CheckTestType("datastoreTests") + CheckTestTypeNetworkTests = CheckTestType("networkTests") +) + +func init() { + t["CheckTestType"] = reflect.TypeOf((*CheckTestType)(nil)).Elem() +} + +type ClusterDasAamNodeStateDasState string + +const ( + ClusterDasAamNodeStateDasStateUninitialized = ClusterDasAamNodeStateDasState("uninitialized") + ClusterDasAamNodeStateDasStateInitialized = ClusterDasAamNodeStateDasState("initialized") + ClusterDasAamNodeStateDasStateConfiguring = ClusterDasAamNodeStateDasState("configuring") + ClusterDasAamNodeStateDasStateUnconfiguring = ClusterDasAamNodeStateDasState("unconfiguring") + ClusterDasAamNodeStateDasStateRunning = ClusterDasAamNodeStateDasState("running") + ClusterDasAamNodeStateDasStateError = ClusterDasAamNodeStateDasState("error") + ClusterDasAamNodeStateDasStateAgentShutdown = ClusterDasAamNodeStateDasState("agentShutdown") + ClusterDasAamNodeStateDasStateNodeFailed = ClusterDasAamNodeStateDasState("nodeFailed") +) + +func init() { + t["ClusterDasAamNodeStateDasState"] = reflect.TypeOf((*ClusterDasAamNodeStateDasState)(nil)).Elem() +} + +type ClusterDasConfigInfoHBDatastoreCandidate string + +const ( + ClusterDasConfigInfoHBDatastoreCandidateUserSelectedDs = ClusterDasConfigInfoHBDatastoreCandidate("userSelectedDs") + ClusterDasConfigInfoHBDatastoreCandidateAllFeasibleDs = ClusterDasConfigInfoHBDatastoreCandidate("allFeasibleDs") + ClusterDasConfigInfoHBDatastoreCandidateAllFeasibleDsWithUserPreference = ClusterDasConfigInfoHBDatastoreCandidate("allFeasibleDsWithUserPreference") +) + +func init() { + t["ClusterDasConfigInfoHBDatastoreCandidate"] = reflect.TypeOf((*ClusterDasConfigInfoHBDatastoreCandidate)(nil)).Elem() +} + +type ClusterDasConfigInfoServiceState string + +const ( + ClusterDasConfigInfoServiceStateDisabled = ClusterDasConfigInfoServiceState("disabled") + ClusterDasConfigInfoServiceStateEnabled = ClusterDasConfigInfoServiceState("enabled") +) + +func init() { + t["ClusterDasConfigInfoServiceState"] = reflect.TypeOf((*ClusterDasConfigInfoServiceState)(nil)).Elem() +} + +type ClusterDasConfigInfoVmMonitoringState string + +const ( + ClusterDasConfigInfoVmMonitoringStateVmMonitoringDisabled = ClusterDasConfigInfoVmMonitoringState("vmMonitoringDisabled") + ClusterDasConfigInfoVmMonitoringStateVmMonitoringOnly = ClusterDasConfigInfoVmMonitoringState("vmMonitoringOnly") + ClusterDasConfigInfoVmMonitoringStateVmAndAppMonitoring = ClusterDasConfigInfoVmMonitoringState("vmAndAppMonitoring") +) + +func init() { + t["ClusterDasConfigInfoVmMonitoringState"] = reflect.TypeOf((*ClusterDasConfigInfoVmMonitoringState)(nil)).Elem() +} + +type ClusterDasFdmAvailabilityState string + +const ( + ClusterDasFdmAvailabilityStateUninitialized = ClusterDasFdmAvailabilityState("uninitialized") + ClusterDasFdmAvailabilityStateElection = ClusterDasFdmAvailabilityState("election") + ClusterDasFdmAvailabilityStateMaster = ClusterDasFdmAvailabilityState("master") + ClusterDasFdmAvailabilityStateConnectedToMaster = ClusterDasFdmAvailabilityState("connectedToMaster") + ClusterDasFdmAvailabilityStateNetworkPartitionedFromMaster = ClusterDasFdmAvailabilityState("networkPartitionedFromMaster") + ClusterDasFdmAvailabilityStateNetworkIsolated = ClusterDasFdmAvailabilityState("networkIsolated") + ClusterDasFdmAvailabilityStateHostDown = ClusterDasFdmAvailabilityState("hostDown") + ClusterDasFdmAvailabilityStateInitializationError = ClusterDasFdmAvailabilityState("initializationError") + ClusterDasFdmAvailabilityStateUninitializationError = ClusterDasFdmAvailabilityState("uninitializationError") + ClusterDasFdmAvailabilityStateFdmUnreachable = ClusterDasFdmAvailabilityState("fdmUnreachable") +) + +func init() { + t["ClusterDasFdmAvailabilityState"] = reflect.TypeOf((*ClusterDasFdmAvailabilityState)(nil)).Elem() +} + +type ClusterDasVmSettingsIsolationResponse string + +const ( + ClusterDasVmSettingsIsolationResponseNone = ClusterDasVmSettingsIsolationResponse("none") + ClusterDasVmSettingsIsolationResponsePowerOff = ClusterDasVmSettingsIsolationResponse("powerOff") + ClusterDasVmSettingsIsolationResponseShutdown = ClusterDasVmSettingsIsolationResponse("shutdown") + ClusterDasVmSettingsIsolationResponseClusterIsolationResponse = ClusterDasVmSettingsIsolationResponse("clusterIsolationResponse") +) + +func init() { + t["ClusterDasVmSettingsIsolationResponse"] = reflect.TypeOf((*ClusterDasVmSettingsIsolationResponse)(nil)).Elem() +} + +type ClusterDasVmSettingsRestartPriority string + +const ( + ClusterDasVmSettingsRestartPriorityDisabled = ClusterDasVmSettingsRestartPriority("disabled") + ClusterDasVmSettingsRestartPriorityLowest = ClusterDasVmSettingsRestartPriority("lowest") + ClusterDasVmSettingsRestartPriorityLow = ClusterDasVmSettingsRestartPriority("low") + ClusterDasVmSettingsRestartPriorityMedium = ClusterDasVmSettingsRestartPriority("medium") + ClusterDasVmSettingsRestartPriorityHigh = ClusterDasVmSettingsRestartPriority("high") + ClusterDasVmSettingsRestartPriorityHighest = ClusterDasVmSettingsRestartPriority("highest") + ClusterDasVmSettingsRestartPriorityClusterRestartPriority = ClusterDasVmSettingsRestartPriority("clusterRestartPriority") +) + +func init() { + t["ClusterDasVmSettingsRestartPriority"] = reflect.TypeOf((*ClusterDasVmSettingsRestartPriority)(nil)).Elem() +} + +type ClusterHostInfraUpdateHaModeActionOperationType string + +const ( + ClusterHostInfraUpdateHaModeActionOperationTypeEnterQuarantine = ClusterHostInfraUpdateHaModeActionOperationType("enterQuarantine") + ClusterHostInfraUpdateHaModeActionOperationTypeExitQuarantine = ClusterHostInfraUpdateHaModeActionOperationType("exitQuarantine") + ClusterHostInfraUpdateHaModeActionOperationTypeEnterMaintenance = ClusterHostInfraUpdateHaModeActionOperationType("enterMaintenance") +) + +func init() { + t["ClusterHostInfraUpdateHaModeActionOperationType"] = reflect.TypeOf((*ClusterHostInfraUpdateHaModeActionOperationType)(nil)).Elem() +} + +type ClusterInfraUpdateHaConfigInfoBehaviorType string + +const ( + ClusterInfraUpdateHaConfigInfoBehaviorTypeManual = ClusterInfraUpdateHaConfigInfoBehaviorType("Manual") + ClusterInfraUpdateHaConfigInfoBehaviorTypeAutomated = ClusterInfraUpdateHaConfigInfoBehaviorType("Automated") +) + +func init() { + t["ClusterInfraUpdateHaConfigInfoBehaviorType"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfoBehaviorType)(nil)).Elem() +} + +type ClusterInfraUpdateHaConfigInfoRemediationType string + +const ( + ClusterInfraUpdateHaConfigInfoRemediationTypeQuarantineMode = ClusterInfraUpdateHaConfigInfoRemediationType("QuarantineMode") + ClusterInfraUpdateHaConfigInfoRemediationTypeMaintenanceMode = ClusterInfraUpdateHaConfigInfoRemediationType("MaintenanceMode") +) + +func init() { + t["ClusterInfraUpdateHaConfigInfoRemediationType"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfoRemediationType)(nil)).Elem() +} + +type ClusterPowerOnVmOption string + +const ( + ClusterPowerOnVmOptionOverrideAutomationLevel = ClusterPowerOnVmOption("OverrideAutomationLevel") + ClusterPowerOnVmOptionReserveResources = ClusterPowerOnVmOption("ReserveResources") +) + +func init() { + t["ClusterPowerOnVmOption"] = reflect.TypeOf((*ClusterPowerOnVmOption)(nil)).Elem() +} + +type ClusterProfileServiceType string + +const ( + ClusterProfileServiceTypeDRS = ClusterProfileServiceType("DRS") + ClusterProfileServiceTypeHA = ClusterProfileServiceType("HA") + ClusterProfileServiceTypeDPM = ClusterProfileServiceType("DPM") + ClusterProfileServiceTypeFT = ClusterProfileServiceType("FT") +) + +func init() { + t["ClusterProfileServiceType"] = reflect.TypeOf((*ClusterProfileServiceType)(nil)).Elem() +} + +type ClusterVmComponentProtectionSettingsStorageVmReaction string + +const ( + ClusterVmComponentProtectionSettingsStorageVmReactionDisabled = ClusterVmComponentProtectionSettingsStorageVmReaction("disabled") + ClusterVmComponentProtectionSettingsStorageVmReactionWarning = ClusterVmComponentProtectionSettingsStorageVmReaction("warning") + ClusterVmComponentProtectionSettingsStorageVmReactionRestartConservative = ClusterVmComponentProtectionSettingsStorageVmReaction("restartConservative") + ClusterVmComponentProtectionSettingsStorageVmReactionRestartAggressive = ClusterVmComponentProtectionSettingsStorageVmReaction("restartAggressive") + ClusterVmComponentProtectionSettingsStorageVmReactionClusterDefault = ClusterVmComponentProtectionSettingsStorageVmReaction("clusterDefault") +) + +func init() { + t["ClusterVmComponentProtectionSettingsStorageVmReaction"] = reflect.TypeOf((*ClusterVmComponentProtectionSettingsStorageVmReaction)(nil)).Elem() +} + +type ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared string + +const ( + ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedNone = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared("none") + ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedReset = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared("reset") + ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedUseClusterDefault = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared("useClusterDefault") +) + +func init() { + t["ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared"] = reflect.TypeOf((*ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared)(nil)).Elem() +} + +type ClusterVmReadinessReadyCondition string + +const ( + ClusterVmReadinessReadyConditionNone = ClusterVmReadinessReadyCondition("none") + ClusterVmReadinessReadyConditionPoweredOn = ClusterVmReadinessReadyCondition("poweredOn") + ClusterVmReadinessReadyConditionGuestHbStatusGreen = ClusterVmReadinessReadyCondition("guestHbStatusGreen") + ClusterVmReadinessReadyConditionAppHbStatusGreen = ClusterVmReadinessReadyCondition("appHbStatusGreen") + ClusterVmReadinessReadyConditionUseClusterDefault = ClusterVmReadinessReadyCondition("useClusterDefault") +) + +func init() { + t["ClusterVmReadinessReadyCondition"] = reflect.TypeOf((*ClusterVmReadinessReadyCondition)(nil)).Elem() +} + +type ComplianceResultStatus string + +const ( + ComplianceResultStatusCompliant = ComplianceResultStatus("compliant") + ComplianceResultStatusNonCompliant = ComplianceResultStatus("nonCompliant") + ComplianceResultStatusUnknown = ComplianceResultStatus("unknown") + ComplianceResultStatusRunning = ComplianceResultStatus("running") +) + +func init() { + t["ComplianceResultStatus"] = reflect.TypeOf((*ComplianceResultStatus)(nil)).Elem() +} + +type ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState string + +const ( + ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateLicensed = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState("licensed") + ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateUnlicensed = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState("unlicensed") + ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateUnknown = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState("unknown") +) + +func init() { + t["ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState"] = reflect.TypeOf((*ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState)(nil)).Elem() +} + +type ConfigSpecOperation string + +const ( + ConfigSpecOperationAdd = ConfigSpecOperation("add") + ConfigSpecOperationEdit = ConfigSpecOperation("edit") + ConfigSpecOperationRemove = ConfigSpecOperation("remove") +) + +func init() { + t["ConfigSpecOperation"] = reflect.TypeOf((*ConfigSpecOperation)(nil)).Elem() +} + +type CustomizationLicenseDataMode string + +const ( + CustomizationLicenseDataModePerServer = CustomizationLicenseDataMode("perServer") + CustomizationLicenseDataModePerSeat = CustomizationLicenseDataMode("perSeat") +) + +func init() { + t["CustomizationLicenseDataMode"] = reflect.TypeOf((*CustomizationLicenseDataMode)(nil)).Elem() +} + +type CustomizationNetBIOSMode string + +const ( + CustomizationNetBIOSModeEnableNetBIOSViaDhcp = CustomizationNetBIOSMode("enableNetBIOSViaDhcp") + CustomizationNetBIOSModeEnableNetBIOS = CustomizationNetBIOSMode("enableNetBIOS") + CustomizationNetBIOSModeDisableNetBIOS = CustomizationNetBIOSMode("disableNetBIOS") +) + +func init() { + t["CustomizationNetBIOSMode"] = reflect.TypeOf((*CustomizationNetBIOSMode)(nil)).Elem() +} + +type CustomizationSysprepRebootOption string + +const ( + CustomizationSysprepRebootOptionReboot = CustomizationSysprepRebootOption("reboot") + CustomizationSysprepRebootOptionNoreboot = CustomizationSysprepRebootOption("noreboot") + CustomizationSysprepRebootOptionShutdown = CustomizationSysprepRebootOption("shutdown") +) + +func init() { + t["CustomizationSysprepRebootOption"] = reflect.TypeOf((*CustomizationSysprepRebootOption)(nil)).Elem() +} + +type DVPortStatusVmDirectPathGen2InactiveReasonNetwork string + +const ( + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptIncompatibleDvs = DVPortStatusVmDirectPathGen2InactiveReasonNetwork("portNptIncompatibleDvs") + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptNoCompatibleNics = DVPortStatusVmDirectPathGen2InactiveReasonNetwork("portNptNoCompatibleNics") + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptNoVirtualFunctionsAvailable = DVPortStatusVmDirectPathGen2InactiveReasonNetwork("portNptNoVirtualFunctionsAvailable") + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptDisabledForPort = DVPortStatusVmDirectPathGen2InactiveReasonNetwork("portNptDisabledForPort") +) + +func init() { + t["DVPortStatusVmDirectPathGen2InactiveReasonNetwork"] = reflect.TypeOf((*DVPortStatusVmDirectPathGen2InactiveReasonNetwork)(nil)).Elem() +} + +type DVPortStatusVmDirectPathGen2InactiveReasonOther string + +const ( + DVPortStatusVmDirectPathGen2InactiveReasonOtherPortNptIncompatibleHost = DVPortStatusVmDirectPathGen2InactiveReasonOther("portNptIncompatibleHost") + DVPortStatusVmDirectPathGen2InactiveReasonOtherPortNptIncompatibleConnectee = DVPortStatusVmDirectPathGen2InactiveReasonOther("portNptIncompatibleConnectee") +) + +func init() { + t["DVPortStatusVmDirectPathGen2InactiveReasonOther"] = reflect.TypeOf((*DVPortStatusVmDirectPathGen2InactiveReasonOther)(nil)).Elem() +} + +type DVSMacLimitPolicyType string + +const ( + DVSMacLimitPolicyTypeAllow = DVSMacLimitPolicyType("allow") + DVSMacLimitPolicyTypeDrop = DVSMacLimitPolicyType("drop") +) + +func init() { + t["DVSMacLimitPolicyType"] = reflect.TypeOf((*DVSMacLimitPolicyType)(nil)).Elem() +} + +type DasConfigFaultDasConfigFaultReason string + +const ( + DasConfigFaultDasConfigFaultReasonHostNetworkMisconfiguration = DasConfigFaultDasConfigFaultReason("HostNetworkMisconfiguration") + DasConfigFaultDasConfigFaultReasonHostMisconfiguration = DasConfigFaultDasConfigFaultReason("HostMisconfiguration") + DasConfigFaultDasConfigFaultReasonInsufficientPrivileges = DasConfigFaultDasConfigFaultReason("InsufficientPrivileges") + DasConfigFaultDasConfigFaultReasonNoPrimaryAgentAvailable = DasConfigFaultDasConfigFaultReason("NoPrimaryAgentAvailable") + DasConfigFaultDasConfigFaultReasonOther = DasConfigFaultDasConfigFaultReason("Other") + DasConfigFaultDasConfigFaultReasonNoDatastoresConfigured = DasConfigFaultDasConfigFaultReason("NoDatastoresConfigured") + DasConfigFaultDasConfigFaultReasonCreateConfigVvolFailed = DasConfigFaultDasConfigFaultReason("CreateConfigVvolFailed") + DasConfigFaultDasConfigFaultReasonVSanNotSupportedOnHost = DasConfigFaultDasConfigFaultReason("VSanNotSupportedOnHost") + DasConfigFaultDasConfigFaultReasonDasNetworkMisconfiguration = DasConfigFaultDasConfigFaultReason("DasNetworkMisconfiguration") +) + +func init() { + t["DasConfigFaultDasConfigFaultReason"] = reflect.TypeOf((*DasConfigFaultDasConfigFaultReason)(nil)).Elem() +} + +type DasVmPriority string + +const ( + DasVmPriorityDisabled = DasVmPriority("disabled") + DasVmPriorityLow = DasVmPriority("low") + DasVmPriorityMedium = DasVmPriority("medium") + DasVmPriorityHigh = DasVmPriority("high") +) + +func init() { + t["DasVmPriority"] = reflect.TypeOf((*DasVmPriority)(nil)).Elem() +} + +type DatastoreAccessible string + +const ( + DatastoreAccessibleTrue = DatastoreAccessible("True") + DatastoreAccessibleFalse = DatastoreAccessible("False") +) + +func init() { + t["DatastoreAccessible"] = reflect.TypeOf((*DatastoreAccessible)(nil)).Elem() +} + +type DatastoreSummaryMaintenanceModeState string + +const ( + DatastoreSummaryMaintenanceModeStateNormal = DatastoreSummaryMaintenanceModeState("normal") + DatastoreSummaryMaintenanceModeStateEnteringMaintenance = DatastoreSummaryMaintenanceModeState("enteringMaintenance") + DatastoreSummaryMaintenanceModeStateInMaintenance = DatastoreSummaryMaintenanceModeState("inMaintenance") +) + +func init() { + t["DatastoreSummaryMaintenanceModeState"] = reflect.TypeOf((*DatastoreSummaryMaintenanceModeState)(nil)).Elem() +} + +type DayOfWeek string + +const ( + DayOfWeekSunday = DayOfWeek("sunday") + DayOfWeekMonday = DayOfWeek("monday") + DayOfWeekTuesday = DayOfWeek("tuesday") + DayOfWeekWednesday = DayOfWeek("wednesday") + DayOfWeekThursday = DayOfWeek("thursday") + DayOfWeekFriday = DayOfWeek("friday") + DayOfWeekSaturday = DayOfWeek("saturday") +) + +func init() { + t["DayOfWeek"] = reflect.TypeOf((*DayOfWeek)(nil)).Elem() +} + +type DeviceNotSupportedReason string + +const ( + DeviceNotSupportedReasonHost = DeviceNotSupportedReason("host") + DeviceNotSupportedReasonGuest = DeviceNotSupportedReason("guest") +) + +func init() { + t["DeviceNotSupportedReason"] = reflect.TypeOf((*DeviceNotSupportedReason)(nil)).Elem() +} + +type DiagnosticManagerLogCreator string + +const ( + DiagnosticManagerLogCreatorVpxd = DiagnosticManagerLogCreator("vpxd") + DiagnosticManagerLogCreatorVpxa = DiagnosticManagerLogCreator("vpxa") + DiagnosticManagerLogCreatorHostd = DiagnosticManagerLogCreator("hostd") + DiagnosticManagerLogCreatorServerd = DiagnosticManagerLogCreator("serverd") + DiagnosticManagerLogCreatorInstall = DiagnosticManagerLogCreator("install") + DiagnosticManagerLogCreatorVpxClient = DiagnosticManagerLogCreator("vpxClient") + DiagnosticManagerLogCreatorRecordLog = DiagnosticManagerLogCreator("recordLog") +) + +func init() { + t["DiagnosticManagerLogCreator"] = reflect.TypeOf((*DiagnosticManagerLogCreator)(nil)).Elem() +} + +type DiagnosticManagerLogFormat string + +const ( + DiagnosticManagerLogFormatPlain = DiagnosticManagerLogFormat("plain") +) + +func init() { + t["DiagnosticManagerLogFormat"] = reflect.TypeOf((*DiagnosticManagerLogFormat)(nil)).Elem() +} + +type DiagnosticPartitionStorageType string + +const ( + DiagnosticPartitionStorageTypeDirectAttached = DiagnosticPartitionStorageType("directAttached") + DiagnosticPartitionStorageTypeNetworkAttached = DiagnosticPartitionStorageType("networkAttached") +) + +func init() { + t["DiagnosticPartitionStorageType"] = reflect.TypeOf((*DiagnosticPartitionStorageType)(nil)).Elem() +} + +type DiagnosticPartitionType string + +const ( + DiagnosticPartitionTypeSingleHost = DiagnosticPartitionType("singleHost") + DiagnosticPartitionTypeMultiHost = DiagnosticPartitionType("multiHost") +) + +func init() { + t["DiagnosticPartitionType"] = reflect.TypeOf((*DiagnosticPartitionType)(nil)).Elem() +} + +type DisallowedChangeByServiceDisallowedChange string + +const ( + DisallowedChangeByServiceDisallowedChangeHotExtendDisk = DisallowedChangeByServiceDisallowedChange("hotExtendDisk") +) + +func init() { + t["DisallowedChangeByServiceDisallowedChange"] = reflect.TypeOf((*DisallowedChangeByServiceDisallowedChange)(nil)).Elem() +} + +type DistributedVirtualPortgroupMetaTagName string + +const ( + DistributedVirtualPortgroupMetaTagNameDvsName = DistributedVirtualPortgroupMetaTagName("dvsName") + DistributedVirtualPortgroupMetaTagNamePortgroupName = DistributedVirtualPortgroupMetaTagName("portgroupName") + DistributedVirtualPortgroupMetaTagNamePortIndex = DistributedVirtualPortgroupMetaTagName("portIndex") +) + +func init() { + t["DistributedVirtualPortgroupMetaTagName"] = reflect.TypeOf((*DistributedVirtualPortgroupMetaTagName)(nil)).Elem() +} + +type DistributedVirtualPortgroupPortgroupType string + +const ( + DistributedVirtualPortgroupPortgroupTypeEarlyBinding = DistributedVirtualPortgroupPortgroupType("earlyBinding") + DistributedVirtualPortgroupPortgroupTypeLateBinding = DistributedVirtualPortgroupPortgroupType("lateBinding") + DistributedVirtualPortgroupPortgroupTypeEphemeral = DistributedVirtualPortgroupPortgroupType("ephemeral") +) + +func init() { + t["DistributedVirtualPortgroupPortgroupType"] = reflect.TypeOf((*DistributedVirtualPortgroupPortgroupType)(nil)).Elem() +} + +type DistributedVirtualSwitchHostInfrastructureTrafficClass string + +const ( + DistributedVirtualSwitchHostInfrastructureTrafficClassManagement = DistributedVirtualSwitchHostInfrastructureTrafficClass("management") + DistributedVirtualSwitchHostInfrastructureTrafficClassFaultTolerance = DistributedVirtualSwitchHostInfrastructureTrafficClass("faultTolerance") + DistributedVirtualSwitchHostInfrastructureTrafficClassVmotion = DistributedVirtualSwitchHostInfrastructureTrafficClass("vmotion") + DistributedVirtualSwitchHostInfrastructureTrafficClassVirtualMachine = DistributedVirtualSwitchHostInfrastructureTrafficClass("virtualMachine") + DistributedVirtualSwitchHostInfrastructureTrafficClassISCSI = DistributedVirtualSwitchHostInfrastructureTrafficClass("iSCSI") + DistributedVirtualSwitchHostInfrastructureTrafficClassNfs = DistributedVirtualSwitchHostInfrastructureTrafficClass("nfs") + DistributedVirtualSwitchHostInfrastructureTrafficClassHbr = DistributedVirtualSwitchHostInfrastructureTrafficClass("hbr") + DistributedVirtualSwitchHostInfrastructureTrafficClassVsan = DistributedVirtualSwitchHostInfrastructureTrafficClass("vsan") + DistributedVirtualSwitchHostInfrastructureTrafficClassVdp = DistributedVirtualSwitchHostInfrastructureTrafficClass("vdp") +) + +func init() { + t["DistributedVirtualSwitchHostInfrastructureTrafficClass"] = reflect.TypeOf((*DistributedVirtualSwitchHostInfrastructureTrafficClass)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberHostComponentState string + +const ( + DistributedVirtualSwitchHostMemberHostComponentStateUp = DistributedVirtualSwitchHostMemberHostComponentState("up") + DistributedVirtualSwitchHostMemberHostComponentStatePending = DistributedVirtualSwitchHostMemberHostComponentState("pending") + DistributedVirtualSwitchHostMemberHostComponentStateOutOfSync = DistributedVirtualSwitchHostMemberHostComponentState("outOfSync") + DistributedVirtualSwitchHostMemberHostComponentStateWarning = DistributedVirtualSwitchHostMemberHostComponentState("warning") + DistributedVirtualSwitchHostMemberHostComponentStateDisconnected = DistributedVirtualSwitchHostMemberHostComponentState("disconnected") + DistributedVirtualSwitchHostMemberHostComponentStateDown = DistributedVirtualSwitchHostMemberHostComponentState("down") +) + +func init() { + t["DistributedVirtualSwitchHostMemberHostComponentState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberHostComponentState)(nil)).Elem() +} + +type DistributedVirtualSwitchNetworkResourceControlVersion string + +const ( + DistributedVirtualSwitchNetworkResourceControlVersionVersion2 = DistributedVirtualSwitchNetworkResourceControlVersion("version2") + DistributedVirtualSwitchNetworkResourceControlVersionVersion3 = DistributedVirtualSwitchNetworkResourceControlVersion("version3") +) + +func init() { + t["DistributedVirtualSwitchNetworkResourceControlVersion"] = reflect.TypeOf((*DistributedVirtualSwitchNetworkResourceControlVersion)(nil)).Elem() +} + +type DistributedVirtualSwitchNicTeamingPolicyMode string + +const ( + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_ip = DistributedVirtualSwitchNicTeamingPolicyMode("loadbalance_ip") + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_srcmac = DistributedVirtualSwitchNicTeamingPolicyMode("loadbalance_srcmac") + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_srcid = DistributedVirtualSwitchNicTeamingPolicyMode("loadbalance_srcid") + DistributedVirtualSwitchNicTeamingPolicyModeFailover_explicit = DistributedVirtualSwitchNicTeamingPolicyMode("failover_explicit") + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_loadbased = DistributedVirtualSwitchNicTeamingPolicyMode("loadbalance_loadbased") +) + +func init() { + t["DistributedVirtualSwitchNicTeamingPolicyMode"] = reflect.TypeOf((*DistributedVirtualSwitchNicTeamingPolicyMode)(nil)).Elem() +} + +type DistributedVirtualSwitchPortConnecteeConnecteeType string + +const ( + DistributedVirtualSwitchPortConnecteeConnecteeTypePnic = DistributedVirtualSwitchPortConnecteeConnecteeType("pnic") + DistributedVirtualSwitchPortConnecteeConnecteeTypeVmVnic = DistributedVirtualSwitchPortConnecteeConnecteeType("vmVnic") + DistributedVirtualSwitchPortConnecteeConnecteeTypeHostConsoleVnic = DistributedVirtualSwitchPortConnecteeConnecteeType("hostConsoleVnic") + DistributedVirtualSwitchPortConnecteeConnecteeTypeHostVmkVnic = DistributedVirtualSwitchPortConnecteeConnecteeType("hostVmkVnic") +) + +func init() { + t["DistributedVirtualSwitchPortConnecteeConnecteeType"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnecteeConnecteeType)(nil)).Elem() +} + +type DistributedVirtualSwitchProductSpecOperationType string + +const ( + DistributedVirtualSwitchProductSpecOperationTypePreInstall = DistributedVirtualSwitchProductSpecOperationType("preInstall") + DistributedVirtualSwitchProductSpecOperationTypeUpgrade = DistributedVirtualSwitchProductSpecOperationType("upgrade") + DistributedVirtualSwitchProductSpecOperationTypeNotifyAvailableUpgrade = DistributedVirtualSwitchProductSpecOperationType("notifyAvailableUpgrade") + DistributedVirtualSwitchProductSpecOperationTypeProceedWithUpgrade = DistributedVirtualSwitchProductSpecOperationType("proceedWithUpgrade") + DistributedVirtualSwitchProductSpecOperationTypeUpdateBundleInfo = DistributedVirtualSwitchProductSpecOperationType("updateBundleInfo") +) + +func init() { + t["DistributedVirtualSwitchProductSpecOperationType"] = reflect.TypeOf((*DistributedVirtualSwitchProductSpecOperationType)(nil)).Elem() +} + +type DpmBehavior string + +const ( + DpmBehaviorManual = DpmBehavior("manual") + DpmBehaviorAutomated = DpmBehavior("automated") +) + +func init() { + t["DpmBehavior"] = reflect.TypeOf((*DpmBehavior)(nil)).Elem() +} + +type DrsBehavior string + +const ( + DrsBehaviorManual = DrsBehavior("manual") + DrsBehaviorPartiallyAutomated = DrsBehavior("partiallyAutomated") + DrsBehaviorFullyAutomated = DrsBehavior("fullyAutomated") +) + +func init() { + t["DrsBehavior"] = reflect.TypeOf((*DrsBehavior)(nil)).Elem() +} + +type DrsInjectorWorkloadCorrelationState string + +const ( + DrsInjectorWorkloadCorrelationStateCorrelated = DrsInjectorWorkloadCorrelationState("Correlated") + DrsInjectorWorkloadCorrelationStateUncorrelated = DrsInjectorWorkloadCorrelationState("Uncorrelated") +) + +func init() { + t["DrsInjectorWorkloadCorrelationState"] = reflect.TypeOf((*DrsInjectorWorkloadCorrelationState)(nil)).Elem() +} + +type DrsRecommendationReasonCode string + +const ( + DrsRecommendationReasonCodeFairnessCpuAvg = DrsRecommendationReasonCode("fairnessCpuAvg") + DrsRecommendationReasonCodeFairnessMemAvg = DrsRecommendationReasonCode("fairnessMemAvg") + DrsRecommendationReasonCodeJointAffin = DrsRecommendationReasonCode("jointAffin") + DrsRecommendationReasonCodeAntiAffin = DrsRecommendationReasonCode("antiAffin") + DrsRecommendationReasonCodeHostMaint = DrsRecommendationReasonCode("hostMaint") +) + +func init() { + t["DrsRecommendationReasonCode"] = reflect.TypeOf((*DrsRecommendationReasonCode)(nil)).Elem() +} + +type DvsEventPortBlockState string + +const ( + DvsEventPortBlockStateUnset = DvsEventPortBlockState("unset") + DvsEventPortBlockStateBlocked = DvsEventPortBlockState("blocked") + DvsEventPortBlockStateUnblocked = DvsEventPortBlockState("unblocked") + DvsEventPortBlockStateUnknown = DvsEventPortBlockState("unknown") +) + +func init() { + t["DvsEventPortBlockState"] = reflect.TypeOf((*DvsEventPortBlockState)(nil)).Elem() +} + +type DvsFilterOnFailure string + +const ( + DvsFilterOnFailureFailOpen = DvsFilterOnFailure("failOpen") + DvsFilterOnFailureFailClosed = DvsFilterOnFailure("failClosed") +) + +func init() { + t["DvsFilterOnFailure"] = reflect.TypeOf((*DvsFilterOnFailure)(nil)).Elem() +} + +type DvsNetworkRuleDirectionType string + +const ( + DvsNetworkRuleDirectionTypeIncomingPackets = DvsNetworkRuleDirectionType("incomingPackets") + DvsNetworkRuleDirectionTypeOutgoingPackets = DvsNetworkRuleDirectionType("outgoingPackets") + DvsNetworkRuleDirectionTypeBoth = DvsNetworkRuleDirectionType("both") +) + +func init() { + t["DvsNetworkRuleDirectionType"] = reflect.TypeOf((*DvsNetworkRuleDirectionType)(nil)).Elem() +} + +type EntityImportType string + +const ( + EntityImportTypeCreateEntityWithNewIdentifier = EntityImportType("createEntityWithNewIdentifier") + EntityImportTypeCreateEntityWithOriginalIdentifier = EntityImportType("createEntityWithOriginalIdentifier") + EntityImportTypeApplyToEntitySpecified = EntityImportType("applyToEntitySpecified") +) + +func init() { + t["EntityImportType"] = reflect.TypeOf((*EntityImportType)(nil)).Elem() +} + +type EntityType string + +const ( + EntityTypeDistributedVirtualSwitch = EntityType("distributedVirtualSwitch") + EntityTypeDistributedVirtualPortgroup = EntityType("distributedVirtualPortgroup") +) + +func init() { + t["EntityType"] = reflect.TypeOf((*EntityType)(nil)).Elem() +} + +type EventAlarmExpressionComparisonOperator string + +const ( + EventAlarmExpressionComparisonOperatorEquals = EventAlarmExpressionComparisonOperator("equals") + EventAlarmExpressionComparisonOperatorNotEqualTo = EventAlarmExpressionComparisonOperator("notEqualTo") + EventAlarmExpressionComparisonOperatorStartsWith = EventAlarmExpressionComparisonOperator("startsWith") + EventAlarmExpressionComparisonOperatorDoesNotStartWith = EventAlarmExpressionComparisonOperator("doesNotStartWith") + EventAlarmExpressionComparisonOperatorEndsWith = EventAlarmExpressionComparisonOperator("endsWith") + EventAlarmExpressionComparisonOperatorDoesNotEndWith = EventAlarmExpressionComparisonOperator("doesNotEndWith") +) + +func init() { + t["EventAlarmExpressionComparisonOperator"] = reflect.TypeOf((*EventAlarmExpressionComparisonOperator)(nil)).Elem() +} + +type EventCategory string + +const ( + EventCategoryInfo = EventCategory("info") + EventCategoryWarning = EventCategory("warning") + EventCategoryError = EventCategory("error") + EventCategoryUser = EventCategory("user") +) + +func init() { + t["EventCategory"] = reflect.TypeOf((*EventCategory)(nil)).Elem() +} + +type EventEventSeverity string + +const ( + EventEventSeverityError = EventEventSeverity("error") + EventEventSeverityWarning = EventEventSeverity("warning") + EventEventSeverityInfo = EventEventSeverity("info") + EventEventSeverityUser = EventEventSeverity("user") +) + +func init() { + t["EventEventSeverity"] = reflect.TypeOf((*EventEventSeverity)(nil)).Elem() +} + +type EventFilterSpecRecursionOption string + +const ( + EventFilterSpecRecursionOptionSelf = EventFilterSpecRecursionOption("self") + EventFilterSpecRecursionOptionChildren = EventFilterSpecRecursionOption("children") + EventFilterSpecRecursionOptionAll = EventFilterSpecRecursionOption("all") +) + +func init() { + t["EventFilterSpecRecursionOption"] = reflect.TypeOf((*EventFilterSpecRecursionOption)(nil)).Elem() +} + +type FibreChannelPortType string + +const ( + FibreChannelPortTypeFabric = FibreChannelPortType("fabric") + FibreChannelPortTypeLoop = FibreChannelPortType("loop") + FibreChannelPortTypePointToPoint = FibreChannelPortType("pointToPoint") + FibreChannelPortTypeUnknown = FibreChannelPortType("unknown") +) + +func init() { + t["FibreChannelPortType"] = reflect.TypeOf((*FibreChannelPortType)(nil)).Elem() +} + +type FileSystemMountInfoVStorageSupportStatus string + +const ( + FileSystemMountInfoVStorageSupportStatusVStorageSupported = FileSystemMountInfoVStorageSupportStatus("vStorageSupported") + FileSystemMountInfoVStorageSupportStatusVStorageUnsupported = FileSystemMountInfoVStorageSupportStatus("vStorageUnsupported") + FileSystemMountInfoVStorageSupportStatusVStorageUnknown = FileSystemMountInfoVStorageSupportStatus("vStorageUnknown") +) + +func init() { + t["FileSystemMountInfoVStorageSupportStatus"] = reflect.TypeOf((*FileSystemMountInfoVStorageSupportStatus)(nil)).Elem() +} + +type FtIssuesOnHostHostSelectionType string + +const ( + FtIssuesOnHostHostSelectionTypeUser = FtIssuesOnHostHostSelectionType("user") + FtIssuesOnHostHostSelectionTypeVc = FtIssuesOnHostHostSelectionType("vc") + FtIssuesOnHostHostSelectionTypeDrs = FtIssuesOnHostHostSelectionType("drs") +) + +func init() { + t["FtIssuesOnHostHostSelectionType"] = reflect.TypeOf((*FtIssuesOnHostHostSelectionType)(nil)).Elem() +} + +type GuestFileType string + +const ( + GuestFileTypeFile = GuestFileType("file") + GuestFileTypeDirectory = GuestFileType("directory") + GuestFileTypeSymlink = GuestFileType("symlink") +) + +func init() { + t["GuestFileType"] = reflect.TypeOf((*GuestFileType)(nil)).Elem() +} + +type GuestInfoAppStateType string + +const ( + GuestInfoAppStateTypeNone = GuestInfoAppStateType("none") + GuestInfoAppStateTypeAppStateOk = GuestInfoAppStateType("appStateOk") + GuestInfoAppStateTypeAppStateNeedReset = GuestInfoAppStateType("appStateNeedReset") +) + +func init() { + t["GuestInfoAppStateType"] = reflect.TypeOf((*GuestInfoAppStateType)(nil)).Elem() +} + +type GuestOsDescriptorFirmwareType string + +const ( + GuestOsDescriptorFirmwareTypeBios = GuestOsDescriptorFirmwareType("bios") + GuestOsDescriptorFirmwareTypeEfi = GuestOsDescriptorFirmwareType("efi") +) + +func init() { + t["GuestOsDescriptorFirmwareType"] = reflect.TypeOf((*GuestOsDescriptorFirmwareType)(nil)).Elem() +} + +type GuestOsDescriptorSupportLevel string + +const ( + GuestOsDescriptorSupportLevelExperimental = GuestOsDescriptorSupportLevel("experimental") + GuestOsDescriptorSupportLevelLegacy = GuestOsDescriptorSupportLevel("legacy") + GuestOsDescriptorSupportLevelTerminated = GuestOsDescriptorSupportLevel("terminated") + GuestOsDescriptorSupportLevelSupported = GuestOsDescriptorSupportLevel("supported") + GuestOsDescriptorSupportLevelUnsupported = GuestOsDescriptorSupportLevel("unsupported") + GuestOsDescriptorSupportLevelDeprecated = GuestOsDescriptorSupportLevel("deprecated") + GuestOsDescriptorSupportLevelTechPreview = GuestOsDescriptorSupportLevel("techPreview") +) + +func init() { + t["GuestOsDescriptorSupportLevel"] = reflect.TypeOf((*GuestOsDescriptorSupportLevel)(nil)).Elem() +} + +type GuestRegKeyWowSpec string + +const ( + GuestRegKeyWowSpecWOWNative = GuestRegKeyWowSpec("WOWNative") + GuestRegKeyWowSpecWOW32 = GuestRegKeyWowSpec("WOW32") + GuestRegKeyWowSpecWOW64 = GuestRegKeyWowSpec("WOW64") +) + +func init() { + t["GuestRegKeyWowSpec"] = reflect.TypeOf((*GuestRegKeyWowSpec)(nil)).Elem() +} + +type HealthUpdateInfoComponentType string + +const ( + HealthUpdateInfoComponentTypeMemory = HealthUpdateInfoComponentType("Memory") + HealthUpdateInfoComponentTypePower = HealthUpdateInfoComponentType("Power") + HealthUpdateInfoComponentTypeFan = HealthUpdateInfoComponentType("Fan") + HealthUpdateInfoComponentTypeNetwork = HealthUpdateInfoComponentType("Network") + HealthUpdateInfoComponentTypeStorage = HealthUpdateInfoComponentType("Storage") +) + +func init() { + t["HealthUpdateInfoComponentType"] = reflect.TypeOf((*HealthUpdateInfoComponentType)(nil)).Elem() +} + +type HostAccessMode string + +const ( + HostAccessModeAccessNone = HostAccessMode("accessNone") + HostAccessModeAccessAdmin = HostAccessMode("accessAdmin") + HostAccessModeAccessNoAccess = HostAccessMode("accessNoAccess") + HostAccessModeAccessReadOnly = HostAccessMode("accessReadOnly") + HostAccessModeAccessOther = HostAccessMode("accessOther") +) + +func init() { + t["HostAccessMode"] = reflect.TypeOf((*HostAccessMode)(nil)).Elem() +} + +type HostActiveDirectoryAuthenticationCertificateDigest string + +const ( + HostActiveDirectoryAuthenticationCertificateDigestSHA1 = HostActiveDirectoryAuthenticationCertificateDigest("SHA1") +) + +func init() { + t["HostActiveDirectoryAuthenticationCertificateDigest"] = reflect.TypeOf((*HostActiveDirectoryAuthenticationCertificateDigest)(nil)).Elem() +} + +type HostActiveDirectoryInfoDomainMembershipStatus string + +const ( + HostActiveDirectoryInfoDomainMembershipStatusUnknown = HostActiveDirectoryInfoDomainMembershipStatus("unknown") + HostActiveDirectoryInfoDomainMembershipStatusOk = HostActiveDirectoryInfoDomainMembershipStatus("ok") + HostActiveDirectoryInfoDomainMembershipStatusNoServers = HostActiveDirectoryInfoDomainMembershipStatus("noServers") + HostActiveDirectoryInfoDomainMembershipStatusClientTrustBroken = HostActiveDirectoryInfoDomainMembershipStatus("clientTrustBroken") + HostActiveDirectoryInfoDomainMembershipStatusServerTrustBroken = HostActiveDirectoryInfoDomainMembershipStatus("serverTrustBroken") + HostActiveDirectoryInfoDomainMembershipStatusInconsistentTrust = HostActiveDirectoryInfoDomainMembershipStatus("inconsistentTrust") + HostActiveDirectoryInfoDomainMembershipStatusOtherProblem = HostActiveDirectoryInfoDomainMembershipStatus("otherProblem") +) + +func init() { + t["HostActiveDirectoryInfoDomainMembershipStatus"] = reflect.TypeOf((*HostActiveDirectoryInfoDomainMembershipStatus)(nil)).Elem() +} + +type HostCapabilityFtUnsupportedReason string + +const ( + HostCapabilityFtUnsupportedReasonVMotionNotLicensed = HostCapabilityFtUnsupportedReason("vMotionNotLicensed") + HostCapabilityFtUnsupportedReasonMissingVMotionNic = HostCapabilityFtUnsupportedReason("missingVMotionNic") + HostCapabilityFtUnsupportedReasonMissingFTLoggingNic = HostCapabilityFtUnsupportedReason("missingFTLoggingNic") + HostCapabilityFtUnsupportedReasonFtNotLicensed = HostCapabilityFtUnsupportedReason("ftNotLicensed") + HostCapabilityFtUnsupportedReasonHaAgentIssue = HostCapabilityFtUnsupportedReason("haAgentIssue") + HostCapabilityFtUnsupportedReasonUnsupportedProduct = HostCapabilityFtUnsupportedReason("unsupportedProduct") + HostCapabilityFtUnsupportedReasonCpuHvUnsupported = HostCapabilityFtUnsupportedReason("cpuHvUnsupported") + HostCapabilityFtUnsupportedReasonCpuHwmmuUnsupported = HostCapabilityFtUnsupportedReason("cpuHwmmuUnsupported") + HostCapabilityFtUnsupportedReasonCpuHvDisabled = HostCapabilityFtUnsupportedReason("cpuHvDisabled") +) + +func init() { + t["HostCapabilityFtUnsupportedReason"] = reflect.TypeOf((*HostCapabilityFtUnsupportedReason)(nil)).Elem() +} + +type HostCapabilityUnmapMethodSupported string + +const ( + HostCapabilityUnmapMethodSupportedPriority = HostCapabilityUnmapMethodSupported("priority") + HostCapabilityUnmapMethodSupportedFixed = HostCapabilityUnmapMethodSupported("fixed") + HostCapabilityUnmapMethodSupportedDynamic = HostCapabilityUnmapMethodSupported("dynamic") +) + +func init() { + t["HostCapabilityUnmapMethodSupported"] = reflect.TypeOf((*HostCapabilityUnmapMethodSupported)(nil)).Elem() +} + +type HostCapabilityVmDirectPathGen2UnsupportedReason string + +const ( + HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptIncompatibleProduct = HostCapabilityVmDirectPathGen2UnsupportedReason("hostNptIncompatibleProduct") + HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptIncompatibleHardware = HostCapabilityVmDirectPathGen2UnsupportedReason("hostNptIncompatibleHardware") + HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptDisabled = HostCapabilityVmDirectPathGen2UnsupportedReason("hostNptDisabled") +) + +func init() { + t["HostCapabilityVmDirectPathGen2UnsupportedReason"] = reflect.TypeOf((*HostCapabilityVmDirectPathGen2UnsupportedReason)(nil)).Elem() +} + +type HostCertificateManagerCertificateInfoCertificateStatus string + +const ( + HostCertificateManagerCertificateInfoCertificateStatusUnknown = HostCertificateManagerCertificateInfoCertificateStatus("unknown") + HostCertificateManagerCertificateInfoCertificateStatusExpired = HostCertificateManagerCertificateInfoCertificateStatus("expired") + HostCertificateManagerCertificateInfoCertificateStatusExpiring = HostCertificateManagerCertificateInfoCertificateStatus("expiring") + HostCertificateManagerCertificateInfoCertificateStatusExpiringShortly = HostCertificateManagerCertificateInfoCertificateStatus("expiringShortly") + HostCertificateManagerCertificateInfoCertificateStatusExpirationImminent = HostCertificateManagerCertificateInfoCertificateStatus("expirationImminent") + HostCertificateManagerCertificateInfoCertificateStatusGood = HostCertificateManagerCertificateInfoCertificateStatus("good") +) + +func init() { + t["HostCertificateManagerCertificateInfoCertificateStatus"] = reflect.TypeOf((*HostCertificateManagerCertificateInfoCertificateStatus)(nil)).Elem() +} + +type HostConfigChangeMode string + +const ( + HostConfigChangeModeModify = HostConfigChangeMode("modify") + HostConfigChangeModeReplace = HostConfigChangeMode("replace") +) + +func init() { + t["HostConfigChangeMode"] = reflect.TypeOf((*HostConfigChangeMode)(nil)).Elem() +} + +type HostConfigChangeOperation string + +const ( + HostConfigChangeOperationAdd = HostConfigChangeOperation("add") + HostConfigChangeOperationRemove = HostConfigChangeOperation("remove") + HostConfigChangeOperationEdit = HostConfigChangeOperation("edit") + HostConfigChangeOperationIgnore = HostConfigChangeOperation("ignore") +) + +func init() { + t["HostConfigChangeOperation"] = reflect.TypeOf((*HostConfigChangeOperation)(nil)).Elem() +} + +type HostCpuPackageVendor string + +const ( + HostCpuPackageVendorUnknown = HostCpuPackageVendor("unknown") + HostCpuPackageVendorIntel = HostCpuPackageVendor("intel") + HostCpuPackageVendorAmd = HostCpuPackageVendor("amd") +) + +func init() { + t["HostCpuPackageVendor"] = reflect.TypeOf((*HostCpuPackageVendor)(nil)).Elem() +} + +type HostCpuPowerManagementInfoPolicyType string + +const ( + HostCpuPowerManagementInfoPolicyTypeOff = HostCpuPowerManagementInfoPolicyType("off") + HostCpuPowerManagementInfoPolicyTypeStaticPolicy = HostCpuPowerManagementInfoPolicyType("staticPolicy") + HostCpuPowerManagementInfoPolicyTypeDynamicPolicy = HostCpuPowerManagementInfoPolicyType("dynamicPolicy") +) + +func init() { + t["HostCpuPowerManagementInfoPolicyType"] = reflect.TypeOf((*HostCpuPowerManagementInfoPolicyType)(nil)).Elem() +} + +type HostCryptoState string + +const ( + HostCryptoStateIncapable = HostCryptoState("incapable") + HostCryptoStatePrepared = HostCryptoState("prepared") + HostCryptoStateSafe = HostCryptoState("safe") +) + +func init() { + t["HostCryptoState"] = reflect.TypeOf((*HostCryptoState)(nil)).Elem() +} + +type HostDasErrorEventHostDasErrorReason string + +const ( + HostDasErrorEventHostDasErrorReasonConfigFailed = HostDasErrorEventHostDasErrorReason("configFailed") + HostDasErrorEventHostDasErrorReasonTimeout = HostDasErrorEventHostDasErrorReason("timeout") + HostDasErrorEventHostDasErrorReasonCommunicationInitFailed = HostDasErrorEventHostDasErrorReason("communicationInitFailed") + HostDasErrorEventHostDasErrorReasonHealthCheckScriptFailed = HostDasErrorEventHostDasErrorReason("healthCheckScriptFailed") + HostDasErrorEventHostDasErrorReasonAgentFailed = HostDasErrorEventHostDasErrorReason("agentFailed") + HostDasErrorEventHostDasErrorReasonAgentShutdown = HostDasErrorEventHostDasErrorReason("agentShutdown") + HostDasErrorEventHostDasErrorReasonIsolationAddressUnpingable = HostDasErrorEventHostDasErrorReason("isolationAddressUnpingable") + HostDasErrorEventHostDasErrorReasonOther = HostDasErrorEventHostDasErrorReason("other") +) + +func init() { + t["HostDasErrorEventHostDasErrorReason"] = reflect.TypeOf((*HostDasErrorEventHostDasErrorReason)(nil)).Elem() +} + +type HostDigestInfoDigestMethodType string + +const ( + HostDigestInfoDigestMethodTypeSHA1 = HostDigestInfoDigestMethodType("SHA1") + HostDigestInfoDigestMethodTypeMD5 = HostDigestInfoDigestMethodType("MD5") + HostDigestInfoDigestMethodTypeSHA256 = HostDigestInfoDigestMethodType("SHA256") + HostDigestInfoDigestMethodTypeSHA384 = HostDigestInfoDigestMethodType("SHA384") + HostDigestInfoDigestMethodTypeSHA512 = HostDigestInfoDigestMethodType("SHA512") + HostDigestInfoDigestMethodTypeSM3_256 = HostDigestInfoDigestMethodType("SM3_256") +) + +func init() { + t["HostDigestInfoDigestMethodType"] = reflect.TypeOf((*HostDigestInfoDigestMethodType)(nil)).Elem() +} + +type HostDisconnectedEventReasonCode string + +const ( + HostDisconnectedEventReasonCodeSslThumbprintVerifyFailed = HostDisconnectedEventReasonCode("sslThumbprintVerifyFailed") + HostDisconnectedEventReasonCodeLicenseExpired = HostDisconnectedEventReasonCode("licenseExpired") + HostDisconnectedEventReasonCodeAgentUpgrade = HostDisconnectedEventReasonCode("agentUpgrade") + HostDisconnectedEventReasonCodeUserRequest = HostDisconnectedEventReasonCode("userRequest") + HostDisconnectedEventReasonCodeInsufficientLicenses = HostDisconnectedEventReasonCode("insufficientLicenses") + HostDisconnectedEventReasonCodeAgentOutOfDate = HostDisconnectedEventReasonCode("agentOutOfDate") + HostDisconnectedEventReasonCodePasswordDecryptFailure = HostDisconnectedEventReasonCode("passwordDecryptFailure") + HostDisconnectedEventReasonCodeUnknown = HostDisconnectedEventReasonCode("unknown") + HostDisconnectedEventReasonCodeVcVRAMCapacityExceeded = HostDisconnectedEventReasonCode("vcVRAMCapacityExceeded") +) + +func init() { + t["HostDisconnectedEventReasonCode"] = reflect.TypeOf((*HostDisconnectedEventReasonCode)(nil)).Elem() +} + +type HostDiskPartitionInfoPartitionFormat string + +const ( + HostDiskPartitionInfoPartitionFormatGpt = HostDiskPartitionInfoPartitionFormat("gpt") + HostDiskPartitionInfoPartitionFormatMbr = HostDiskPartitionInfoPartitionFormat("mbr") + HostDiskPartitionInfoPartitionFormatUnknown = HostDiskPartitionInfoPartitionFormat("unknown") +) + +func init() { + t["HostDiskPartitionInfoPartitionFormat"] = reflect.TypeOf((*HostDiskPartitionInfoPartitionFormat)(nil)).Elem() +} + +type HostDiskPartitionInfoType string + +const ( + HostDiskPartitionInfoTypeNone = HostDiskPartitionInfoType("none") + HostDiskPartitionInfoTypeVmfs = HostDiskPartitionInfoType("vmfs") + HostDiskPartitionInfoTypeLinuxNative = HostDiskPartitionInfoType("linuxNative") + HostDiskPartitionInfoTypeLinuxSwap = HostDiskPartitionInfoType("linuxSwap") + HostDiskPartitionInfoTypeExtended = HostDiskPartitionInfoType("extended") + HostDiskPartitionInfoTypeNtfs = HostDiskPartitionInfoType("ntfs") + HostDiskPartitionInfoTypeVmkDiagnostic = HostDiskPartitionInfoType("vmkDiagnostic") + HostDiskPartitionInfoTypeVffs = HostDiskPartitionInfoType("vffs") +) + +func init() { + t["HostDiskPartitionInfoType"] = reflect.TypeOf((*HostDiskPartitionInfoType)(nil)).Elem() +} + +type HostFeatureVersionKey string + +const ( + HostFeatureVersionKeyFaultTolerance = HostFeatureVersionKey("faultTolerance") +) + +func init() { + t["HostFeatureVersionKey"] = reflect.TypeOf((*HostFeatureVersionKey)(nil)).Elem() +} + +type HostFileSystemVolumeFileSystemType string + +const ( + HostFileSystemVolumeFileSystemTypeVMFS = HostFileSystemVolumeFileSystemType("VMFS") + HostFileSystemVolumeFileSystemTypeNFS = HostFileSystemVolumeFileSystemType("NFS") + HostFileSystemVolumeFileSystemTypeNFS41 = HostFileSystemVolumeFileSystemType("NFS41") + HostFileSystemVolumeFileSystemTypeCIFS = HostFileSystemVolumeFileSystemType("CIFS") + HostFileSystemVolumeFileSystemTypeVsan = HostFileSystemVolumeFileSystemType("vsan") + HostFileSystemVolumeFileSystemTypeVFFS = HostFileSystemVolumeFileSystemType("VFFS") + HostFileSystemVolumeFileSystemTypeVVOL = HostFileSystemVolumeFileSystemType("VVOL") + HostFileSystemVolumeFileSystemTypePMEM = HostFileSystemVolumeFileSystemType("PMEM") + HostFileSystemVolumeFileSystemTypeOTHER = HostFileSystemVolumeFileSystemType("OTHER") +) + +func init() { + t["HostFileSystemVolumeFileSystemType"] = reflect.TypeOf((*HostFileSystemVolumeFileSystemType)(nil)).Elem() +} + +type HostFirewallRuleDirection string + +const ( + HostFirewallRuleDirectionInbound = HostFirewallRuleDirection("inbound") + HostFirewallRuleDirectionOutbound = HostFirewallRuleDirection("outbound") +) + +func init() { + t["HostFirewallRuleDirection"] = reflect.TypeOf((*HostFirewallRuleDirection)(nil)).Elem() +} + +type HostFirewallRulePortType string + +const ( + HostFirewallRulePortTypeSrc = HostFirewallRulePortType("src") + HostFirewallRulePortTypeDst = HostFirewallRulePortType("dst") +) + +func init() { + t["HostFirewallRulePortType"] = reflect.TypeOf((*HostFirewallRulePortType)(nil)).Elem() +} + +type HostFirewallRuleProtocol string + +const ( + HostFirewallRuleProtocolTcp = HostFirewallRuleProtocol("tcp") + HostFirewallRuleProtocolUdp = HostFirewallRuleProtocol("udp") +) + +func init() { + t["HostFirewallRuleProtocol"] = reflect.TypeOf((*HostFirewallRuleProtocol)(nil)).Elem() +} + +type HostGraphicsConfigGraphicsType string + +const ( + HostGraphicsConfigGraphicsTypeShared = HostGraphicsConfigGraphicsType("shared") + HostGraphicsConfigGraphicsTypeSharedDirect = HostGraphicsConfigGraphicsType("sharedDirect") +) + +func init() { + t["HostGraphicsConfigGraphicsType"] = reflect.TypeOf((*HostGraphicsConfigGraphicsType)(nil)).Elem() +} + +type HostGraphicsConfigSharedPassthruAssignmentPolicy string + +const ( + HostGraphicsConfigSharedPassthruAssignmentPolicyPerformance = HostGraphicsConfigSharedPassthruAssignmentPolicy("performance") + HostGraphicsConfigSharedPassthruAssignmentPolicyConsolidation = HostGraphicsConfigSharedPassthruAssignmentPolicy("consolidation") +) + +func init() { + t["HostGraphicsConfigSharedPassthruAssignmentPolicy"] = reflect.TypeOf((*HostGraphicsConfigSharedPassthruAssignmentPolicy)(nil)).Elem() +} + +type HostGraphicsInfoGraphicsType string + +const ( + HostGraphicsInfoGraphicsTypeBasic = HostGraphicsInfoGraphicsType("basic") + HostGraphicsInfoGraphicsTypeShared = HostGraphicsInfoGraphicsType("shared") + HostGraphicsInfoGraphicsTypeDirect = HostGraphicsInfoGraphicsType("direct") + HostGraphicsInfoGraphicsTypeSharedDirect = HostGraphicsInfoGraphicsType("sharedDirect") +) + +func init() { + t["HostGraphicsInfoGraphicsType"] = reflect.TypeOf((*HostGraphicsInfoGraphicsType)(nil)).Elem() +} + +type HostHardwareElementStatus string + +const ( + HostHardwareElementStatusUnknown = HostHardwareElementStatus("Unknown") + HostHardwareElementStatusGreen = HostHardwareElementStatus("Green") + HostHardwareElementStatusYellow = HostHardwareElementStatus("Yellow") + HostHardwareElementStatusRed = HostHardwareElementStatus("Red") +) + +func init() { + t["HostHardwareElementStatus"] = reflect.TypeOf((*HostHardwareElementStatus)(nil)).Elem() +} + +type HostHasComponentFailureHostComponentType string + +const ( + HostHasComponentFailureHostComponentTypeDatastore = HostHasComponentFailureHostComponentType("Datastore") +) + +func init() { + t["HostHasComponentFailureHostComponentType"] = reflect.TypeOf((*HostHasComponentFailureHostComponentType)(nil)).Elem() +} + +type HostImageAcceptanceLevel string + +const ( + HostImageAcceptanceLevelVmware_certified = HostImageAcceptanceLevel("vmware_certified") + HostImageAcceptanceLevelVmware_accepted = HostImageAcceptanceLevel("vmware_accepted") + HostImageAcceptanceLevelPartner = HostImageAcceptanceLevel("partner") + HostImageAcceptanceLevelCommunity = HostImageAcceptanceLevel("community") +) + +func init() { + t["HostImageAcceptanceLevel"] = reflect.TypeOf((*HostImageAcceptanceLevel)(nil)).Elem() +} + +type HostIncompatibleForFaultToleranceReason string + +const ( + HostIncompatibleForFaultToleranceReasonProduct = HostIncompatibleForFaultToleranceReason("product") + HostIncompatibleForFaultToleranceReasonProcessor = HostIncompatibleForFaultToleranceReason("processor") +) + +func init() { + t["HostIncompatibleForFaultToleranceReason"] = reflect.TypeOf((*HostIncompatibleForFaultToleranceReason)(nil)).Elem() +} + +type HostIncompatibleForRecordReplayReason string + +const ( + HostIncompatibleForRecordReplayReasonProduct = HostIncompatibleForRecordReplayReason("product") + HostIncompatibleForRecordReplayReasonProcessor = HostIncompatibleForRecordReplayReason("processor") +) + +func init() { + t["HostIncompatibleForRecordReplayReason"] = reflect.TypeOf((*HostIncompatibleForRecordReplayReason)(nil)).Elem() +} + +type HostInternetScsiHbaChapAuthenticationType string + +const ( + HostInternetScsiHbaChapAuthenticationTypeChapProhibited = HostInternetScsiHbaChapAuthenticationType("chapProhibited") + HostInternetScsiHbaChapAuthenticationTypeChapDiscouraged = HostInternetScsiHbaChapAuthenticationType("chapDiscouraged") + HostInternetScsiHbaChapAuthenticationTypeChapPreferred = HostInternetScsiHbaChapAuthenticationType("chapPreferred") + HostInternetScsiHbaChapAuthenticationTypeChapRequired = HostInternetScsiHbaChapAuthenticationType("chapRequired") +) + +func init() { + t["HostInternetScsiHbaChapAuthenticationType"] = reflect.TypeOf((*HostInternetScsiHbaChapAuthenticationType)(nil)).Elem() +} + +type HostInternetScsiHbaDigestType string + +const ( + HostInternetScsiHbaDigestTypeDigestProhibited = HostInternetScsiHbaDigestType("digestProhibited") + HostInternetScsiHbaDigestTypeDigestDiscouraged = HostInternetScsiHbaDigestType("digestDiscouraged") + HostInternetScsiHbaDigestTypeDigestPreferred = HostInternetScsiHbaDigestType("digestPreferred") + HostInternetScsiHbaDigestTypeDigestRequired = HostInternetScsiHbaDigestType("digestRequired") +) + +func init() { + t["HostInternetScsiHbaDigestType"] = reflect.TypeOf((*HostInternetScsiHbaDigestType)(nil)).Elem() +} + +type HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType string + +const ( + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeDHCP = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType("DHCP") + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeAutoConfigured = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType("AutoConfigured") + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeStatic = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType("Static") + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeOther = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType("Other") +) + +func init() { + t["HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType)(nil)).Elem() +} + +type HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation string + +const ( + HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperationAdd = HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation("add") + HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperationRemove = HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation("remove") +) + +func init() { + t["HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation)(nil)).Elem() +} + +type HostInternetScsiHbaNetworkBindingSupportType string + +const ( + HostInternetScsiHbaNetworkBindingSupportTypeNotsupported = HostInternetScsiHbaNetworkBindingSupportType("notsupported") + HostInternetScsiHbaNetworkBindingSupportTypeOptional = HostInternetScsiHbaNetworkBindingSupportType("optional") + HostInternetScsiHbaNetworkBindingSupportTypeRequired = HostInternetScsiHbaNetworkBindingSupportType("required") +) + +func init() { + t["HostInternetScsiHbaNetworkBindingSupportType"] = reflect.TypeOf((*HostInternetScsiHbaNetworkBindingSupportType)(nil)).Elem() +} + +type HostInternetScsiHbaStaticTargetTargetDiscoveryMethod string + +const ( + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodStaticMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("staticMethod") + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodSendTargetMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("sendTargetMethod") + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodSlpMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("slpMethod") + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodIsnsMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("isnsMethod") + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodUnknownMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("unknownMethod") +) + +func init() { + t["HostInternetScsiHbaStaticTargetTargetDiscoveryMethod"] = reflect.TypeOf((*HostInternetScsiHbaStaticTargetTargetDiscoveryMethod)(nil)).Elem() +} + +type HostIpConfigIpV6AddressConfigType string + +const ( + HostIpConfigIpV6AddressConfigTypeOther = HostIpConfigIpV6AddressConfigType("other") + HostIpConfigIpV6AddressConfigTypeManual = HostIpConfigIpV6AddressConfigType("manual") + HostIpConfigIpV6AddressConfigTypeDhcp = HostIpConfigIpV6AddressConfigType("dhcp") + HostIpConfigIpV6AddressConfigTypeLinklayer = HostIpConfigIpV6AddressConfigType("linklayer") + HostIpConfigIpV6AddressConfigTypeRandom = HostIpConfigIpV6AddressConfigType("random") +) + +func init() { + t["HostIpConfigIpV6AddressConfigType"] = reflect.TypeOf((*HostIpConfigIpV6AddressConfigType)(nil)).Elem() +} + +type HostIpConfigIpV6AddressStatus string + +const ( + HostIpConfigIpV6AddressStatusPreferred = HostIpConfigIpV6AddressStatus("preferred") + HostIpConfigIpV6AddressStatusDeprecated = HostIpConfigIpV6AddressStatus("deprecated") + HostIpConfigIpV6AddressStatusInvalid = HostIpConfigIpV6AddressStatus("invalid") + HostIpConfigIpV6AddressStatusInaccessible = HostIpConfigIpV6AddressStatus("inaccessible") + HostIpConfigIpV6AddressStatusUnknown = HostIpConfigIpV6AddressStatus("unknown") + HostIpConfigIpV6AddressStatusTentative = HostIpConfigIpV6AddressStatus("tentative") + HostIpConfigIpV6AddressStatusDuplicate = HostIpConfigIpV6AddressStatus("duplicate") +) + +func init() { + t["HostIpConfigIpV6AddressStatus"] = reflect.TypeOf((*HostIpConfigIpV6AddressStatus)(nil)).Elem() +} + +type HostLicensableResourceKey string + +const ( + HostLicensableResourceKeyNumCpuPackages = HostLicensableResourceKey("numCpuPackages") + HostLicensableResourceKeyNumCpuCores = HostLicensableResourceKey("numCpuCores") + HostLicensableResourceKeyMemorySize = HostLicensableResourceKey("memorySize") + HostLicensableResourceKeyMemoryForVms = HostLicensableResourceKey("memoryForVms") + HostLicensableResourceKeyNumVmsStarted = HostLicensableResourceKey("numVmsStarted") + HostLicensableResourceKeyNumVmsStarting = HostLicensableResourceKey("numVmsStarting") +) + +func init() { + t["HostLicensableResourceKey"] = reflect.TypeOf((*HostLicensableResourceKey)(nil)).Elem() +} + +type HostLockdownMode string + +const ( + HostLockdownModeLockdownDisabled = HostLockdownMode("lockdownDisabled") + HostLockdownModeLockdownNormal = HostLockdownMode("lockdownNormal") + HostLockdownModeLockdownStrict = HostLockdownMode("lockdownStrict") +) + +func init() { + t["HostLockdownMode"] = reflect.TypeOf((*HostLockdownMode)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileType string + +const ( + HostLowLevelProvisioningManagerFileTypeFile = HostLowLevelProvisioningManagerFileType("File") + HostLowLevelProvisioningManagerFileTypeVirtualDisk = HostLowLevelProvisioningManagerFileType("VirtualDisk") + HostLowLevelProvisioningManagerFileTypeDirectory = HostLowLevelProvisioningManagerFileType("Directory") +) + +func init() { + t["HostLowLevelProvisioningManagerFileType"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileType)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerReloadTarget string + +const ( + HostLowLevelProvisioningManagerReloadTargetCurrentConfig = HostLowLevelProvisioningManagerReloadTarget("currentConfig") + HostLowLevelProvisioningManagerReloadTargetSnapshotConfig = HostLowLevelProvisioningManagerReloadTarget("snapshotConfig") +) + +func init() { + t["HostLowLevelProvisioningManagerReloadTarget"] = reflect.TypeOf((*HostLowLevelProvisioningManagerReloadTarget)(nil)).Elem() +} + +type HostMountInfoInaccessibleReason string + +const ( + HostMountInfoInaccessibleReasonAllPathsDown_Start = HostMountInfoInaccessibleReason("AllPathsDown_Start") + HostMountInfoInaccessibleReasonAllPathsDown_Timeout = HostMountInfoInaccessibleReason("AllPathsDown_Timeout") + HostMountInfoInaccessibleReasonPermanentDeviceLoss = HostMountInfoInaccessibleReason("PermanentDeviceLoss") +) + +func init() { + t["HostMountInfoInaccessibleReason"] = reflect.TypeOf((*HostMountInfoInaccessibleReason)(nil)).Elem() +} + +type HostMountMode string + +const ( + HostMountModeReadWrite = HostMountMode("readWrite") + HostMountModeReadOnly = HostMountMode("readOnly") +) + +func init() { + t["HostMountMode"] = reflect.TypeOf((*HostMountMode)(nil)).Elem() +} + +type HostNasVolumeSecurityType string + +const ( + HostNasVolumeSecurityTypeAUTH_SYS = HostNasVolumeSecurityType("AUTH_SYS") + HostNasVolumeSecurityTypeSEC_KRB5 = HostNasVolumeSecurityType("SEC_KRB5") + HostNasVolumeSecurityTypeSEC_KRB5I = HostNasVolumeSecurityType("SEC_KRB5I") +) + +func init() { + t["HostNasVolumeSecurityType"] = reflect.TypeOf((*HostNasVolumeSecurityType)(nil)).Elem() +} + +type HostNetStackInstanceCongestionControlAlgorithmType string + +const ( + HostNetStackInstanceCongestionControlAlgorithmTypeNewreno = HostNetStackInstanceCongestionControlAlgorithmType("newreno") + HostNetStackInstanceCongestionControlAlgorithmTypeCubic = HostNetStackInstanceCongestionControlAlgorithmType("cubic") +) + +func init() { + t["HostNetStackInstanceCongestionControlAlgorithmType"] = reflect.TypeOf((*HostNetStackInstanceCongestionControlAlgorithmType)(nil)).Elem() +} + +type HostNetStackInstanceSystemStackKey string + +const ( + HostNetStackInstanceSystemStackKeyDefaultTcpipStack = HostNetStackInstanceSystemStackKey("defaultTcpipStack") + HostNetStackInstanceSystemStackKeyVmotion = HostNetStackInstanceSystemStackKey("vmotion") + HostNetStackInstanceSystemStackKeyVSphereProvisioning = HostNetStackInstanceSystemStackKey("vSphereProvisioning") +) + +func init() { + t["HostNetStackInstanceSystemStackKey"] = reflect.TypeOf((*HostNetStackInstanceSystemStackKey)(nil)).Elem() +} + +type HostNumericSensorHealthState string + +const ( + HostNumericSensorHealthStateUnknown = HostNumericSensorHealthState("unknown") + HostNumericSensorHealthStateGreen = HostNumericSensorHealthState("green") + HostNumericSensorHealthStateYellow = HostNumericSensorHealthState("yellow") + HostNumericSensorHealthStateRed = HostNumericSensorHealthState("red") +) + +func init() { + t["HostNumericSensorHealthState"] = reflect.TypeOf((*HostNumericSensorHealthState)(nil)).Elem() +} + +type HostNumericSensorType string + +const ( + HostNumericSensorTypeFan = HostNumericSensorType("fan") + HostNumericSensorTypePower = HostNumericSensorType("power") + HostNumericSensorTypeTemperature = HostNumericSensorType("temperature") + HostNumericSensorTypeVoltage = HostNumericSensorType("voltage") + HostNumericSensorTypeOther = HostNumericSensorType("other") + HostNumericSensorTypeProcessor = HostNumericSensorType("processor") + HostNumericSensorTypeMemory = HostNumericSensorType("memory") + HostNumericSensorTypeStorage = HostNumericSensorType("storage") + HostNumericSensorTypeSystemBoard = HostNumericSensorType("systemBoard") + HostNumericSensorTypeBattery = HostNumericSensorType("battery") + HostNumericSensorTypeBios = HostNumericSensorType("bios") + HostNumericSensorTypeCable = HostNumericSensorType("cable") + HostNumericSensorTypeWatchdog = HostNumericSensorType("watchdog") +) + +func init() { + t["HostNumericSensorType"] = reflect.TypeOf((*HostNumericSensorType)(nil)).Elem() +} + +type HostOpaqueSwitchOpaqueSwitchState string + +const ( + HostOpaqueSwitchOpaqueSwitchStateUp = HostOpaqueSwitchOpaqueSwitchState("up") + HostOpaqueSwitchOpaqueSwitchStateWarning = HostOpaqueSwitchOpaqueSwitchState("warning") + HostOpaqueSwitchOpaqueSwitchStateDown = HostOpaqueSwitchOpaqueSwitchState("down") +) + +func init() { + t["HostOpaqueSwitchOpaqueSwitchState"] = reflect.TypeOf((*HostOpaqueSwitchOpaqueSwitchState)(nil)).Elem() +} + +type HostPatchManagerInstallState string + +const ( + HostPatchManagerInstallStateHostRestarted = HostPatchManagerInstallState("hostRestarted") + HostPatchManagerInstallStateImageActive = HostPatchManagerInstallState("imageActive") +) + +func init() { + t["HostPatchManagerInstallState"] = reflect.TypeOf((*HostPatchManagerInstallState)(nil)).Elem() +} + +type HostPatchManagerIntegrityStatus string + +const ( + HostPatchManagerIntegrityStatusValidated = HostPatchManagerIntegrityStatus("validated") + HostPatchManagerIntegrityStatusKeyNotFound = HostPatchManagerIntegrityStatus("keyNotFound") + HostPatchManagerIntegrityStatusKeyRevoked = HostPatchManagerIntegrityStatus("keyRevoked") + HostPatchManagerIntegrityStatusKeyExpired = HostPatchManagerIntegrityStatus("keyExpired") + HostPatchManagerIntegrityStatusDigestMismatch = HostPatchManagerIntegrityStatus("digestMismatch") + HostPatchManagerIntegrityStatusNotEnoughSignatures = HostPatchManagerIntegrityStatus("notEnoughSignatures") + HostPatchManagerIntegrityStatusValidationError = HostPatchManagerIntegrityStatus("validationError") +) + +func init() { + t["HostPatchManagerIntegrityStatus"] = reflect.TypeOf((*HostPatchManagerIntegrityStatus)(nil)).Elem() +} + +type HostPatchManagerReason string + +const ( + HostPatchManagerReasonObsoleted = HostPatchManagerReason("obsoleted") + HostPatchManagerReasonMissingPatch = HostPatchManagerReason("missingPatch") + HostPatchManagerReasonMissingLib = HostPatchManagerReason("missingLib") + HostPatchManagerReasonHasDependentPatch = HostPatchManagerReason("hasDependentPatch") + HostPatchManagerReasonConflictPatch = HostPatchManagerReason("conflictPatch") + HostPatchManagerReasonConflictLib = HostPatchManagerReason("conflictLib") +) + +func init() { + t["HostPatchManagerReason"] = reflect.TypeOf((*HostPatchManagerReason)(nil)).Elem() +} + +type HostPowerOperationType string + +const ( + HostPowerOperationTypePowerOn = HostPowerOperationType("powerOn") + HostPowerOperationTypePowerOff = HostPowerOperationType("powerOff") +) + +func init() { + t["HostPowerOperationType"] = reflect.TypeOf((*HostPowerOperationType)(nil)).Elem() +} + +type HostProfileManagerAnswerFileStatus string + +const ( + HostProfileManagerAnswerFileStatusValid = HostProfileManagerAnswerFileStatus("valid") + HostProfileManagerAnswerFileStatusInvalid = HostProfileManagerAnswerFileStatus("invalid") + HostProfileManagerAnswerFileStatusUnknown = HostProfileManagerAnswerFileStatus("unknown") +) + +func init() { + t["HostProfileManagerAnswerFileStatus"] = reflect.TypeOf((*HostProfileManagerAnswerFileStatus)(nil)).Elem() +} + +type HostProfileManagerCompositionResultResultElementStatus string + +const ( + HostProfileManagerCompositionResultResultElementStatusSuccess = HostProfileManagerCompositionResultResultElementStatus("success") + HostProfileManagerCompositionResultResultElementStatusError = HostProfileManagerCompositionResultResultElementStatus("error") +) + +func init() { + t["HostProfileManagerCompositionResultResultElementStatus"] = reflect.TypeOf((*HostProfileManagerCompositionResultResultElementStatus)(nil)).Elem() +} + +type HostProfileManagerCompositionValidationResultResultElementStatus string + +const ( + HostProfileManagerCompositionValidationResultResultElementStatusSuccess = HostProfileManagerCompositionValidationResultResultElementStatus("success") + HostProfileManagerCompositionValidationResultResultElementStatusError = HostProfileManagerCompositionValidationResultResultElementStatus("error") +) + +func init() { + t["HostProfileManagerCompositionValidationResultResultElementStatus"] = reflect.TypeOf((*HostProfileManagerCompositionValidationResultResultElementStatus)(nil)).Elem() +} + +type HostProfileManagerTaskListRequirement string + +const ( + HostProfileManagerTaskListRequirementMaintenanceModeRequired = HostProfileManagerTaskListRequirement("maintenanceModeRequired") + HostProfileManagerTaskListRequirementRebootRequired = HostProfileManagerTaskListRequirement("rebootRequired") +) + +func init() { + t["HostProfileManagerTaskListRequirement"] = reflect.TypeOf((*HostProfileManagerTaskListRequirement)(nil)).Elem() +} + +type HostProfileValidationFailureInfoUpdateType string + +const ( + HostProfileValidationFailureInfoUpdateTypeHostBased = HostProfileValidationFailureInfoUpdateType("HostBased") + HostProfileValidationFailureInfoUpdateTypeImport = HostProfileValidationFailureInfoUpdateType("Import") + HostProfileValidationFailureInfoUpdateTypeEdit = HostProfileValidationFailureInfoUpdateType("Edit") + HostProfileValidationFailureInfoUpdateTypeCompose = HostProfileValidationFailureInfoUpdateType("Compose") +) + +func init() { + t["HostProfileValidationFailureInfoUpdateType"] = reflect.TypeOf((*HostProfileValidationFailureInfoUpdateType)(nil)).Elem() +} + +type HostProfileValidationState string + +const ( + HostProfileValidationStateReady = HostProfileValidationState("Ready") + HostProfileValidationStateRunning = HostProfileValidationState("Running") + HostProfileValidationStateFailed = HostProfileValidationState("Failed") +) + +func init() { + t["HostProfileValidationState"] = reflect.TypeOf((*HostProfileValidationState)(nil)).Elem() +} + +type HostProtocolEndpointPEType string + +const ( + HostProtocolEndpointPETypeBlock = HostProtocolEndpointPEType("block") + HostProtocolEndpointPETypeNas = HostProtocolEndpointPEType("nas") +) + +func init() { + t["HostProtocolEndpointPEType"] = reflect.TypeOf((*HostProtocolEndpointPEType)(nil)).Elem() +} + +type HostProtocolEndpointProtocolEndpointType string + +const ( + HostProtocolEndpointProtocolEndpointTypeScsi = HostProtocolEndpointProtocolEndpointType("scsi") + HostProtocolEndpointProtocolEndpointTypeNfs = HostProtocolEndpointProtocolEndpointType("nfs") + HostProtocolEndpointProtocolEndpointTypeNfs4x = HostProtocolEndpointProtocolEndpointType("nfs4x") +) + +func init() { + t["HostProtocolEndpointProtocolEndpointType"] = reflect.TypeOf((*HostProtocolEndpointProtocolEndpointType)(nil)).Elem() +} + +type HostReplayUnsupportedReason string + +const ( + HostReplayUnsupportedReasonIncompatibleProduct = HostReplayUnsupportedReason("incompatibleProduct") + HostReplayUnsupportedReasonIncompatibleCpu = HostReplayUnsupportedReason("incompatibleCpu") + HostReplayUnsupportedReasonHvDisabled = HostReplayUnsupportedReason("hvDisabled") + HostReplayUnsupportedReasonCpuidLimitSet = HostReplayUnsupportedReason("cpuidLimitSet") + HostReplayUnsupportedReasonOldBIOS = HostReplayUnsupportedReason("oldBIOS") + HostReplayUnsupportedReasonUnknown = HostReplayUnsupportedReason("unknown") +) + +func init() { + t["HostReplayUnsupportedReason"] = reflect.TypeOf((*HostReplayUnsupportedReason)(nil)).Elem() +} + +type HostRuntimeInfoNetStackInstanceRuntimeInfoState string + +const ( + HostRuntimeInfoNetStackInstanceRuntimeInfoStateInactive = HostRuntimeInfoNetStackInstanceRuntimeInfoState("inactive") + HostRuntimeInfoNetStackInstanceRuntimeInfoStateActive = HostRuntimeInfoNetStackInstanceRuntimeInfoState("active") + HostRuntimeInfoNetStackInstanceRuntimeInfoStateDeactivating = HostRuntimeInfoNetStackInstanceRuntimeInfoState("deactivating") + HostRuntimeInfoNetStackInstanceRuntimeInfoStateActivating = HostRuntimeInfoNetStackInstanceRuntimeInfoState("activating") +) + +func init() { + t["HostRuntimeInfoNetStackInstanceRuntimeInfoState"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfoState)(nil)).Elem() +} + +type HostServicePolicy string + +const ( + HostServicePolicyOn = HostServicePolicy("on") + HostServicePolicyAutomatic = HostServicePolicy("automatic") + HostServicePolicyOff = HostServicePolicy("off") +) + +func init() { + t["HostServicePolicy"] = reflect.TypeOf((*HostServicePolicy)(nil)).Elem() +} + +type HostSnmpAgentCapability string + +const ( + HostSnmpAgentCapabilityCOMPLETE = HostSnmpAgentCapability("COMPLETE") + HostSnmpAgentCapabilityDIAGNOSTICS = HostSnmpAgentCapability("DIAGNOSTICS") + HostSnmpAgentCapabilityCONFIGURATION = HostSnmpAgentCapability("CONFIGURATION") +) + +func init() { + t["HostSnmpAgentCapability"] = reflect.TypeOf((*HostSnmpAgentCapability)(nil)).Elem() +} + +type HostStandbyMode string + +const ( + HostStandbyModeEntering = HostStandbyMode("entering") + HostStandbyModeExiting = HostStandbyMode("exiting") + HostStandbyModeIn = HostStandbyMode("in") + HostStandbyModeNone = HostStandbyMode("none") +) + +func init() { + t["HostStandbyMode"] = reflect.TypeOf((*HostStandbyMode)(nil)).Elem() +} + +type HostSystemConnectionState string + +const ( + HostSystemConnectionStateConnected = HostSystemConnectionState("connected") + HostSystemConnectionStateNotResponding = HostSystemConnectionState("notResponding") + HostSystemConnectionStateDisconnected = HostSystemConnectionState("disconnected") +) + +func init() { + t["HostSystemConnectionState"] = reflect.TypeOf((*HostSystemConnectionState)(nil)).Elem() +} + +type HostSystemIdentificationInfoIdentifier string + +const ( + HostSystemIdentificationInfoIdentifierAssetTag = HostSystemIdentificationInfoIdentifier("AssetTag") + HostSystemIdentificationInfoIdentifierServiceTag = HostSystemIdentificationInfoIdentifier("ServiceTag") + HostSystemIdentificationInfoIdentifierOemSpecificString = HostSystemIdentificationInfoIdentifier("OemSpecificString") +) + +func init() { + t["HostSystemIdentificationInfoIdentifier"] = reflect.TypeOf((*HostSystemIdentificationInfoIdentifier)(nil)).Elem() +} + +type HostSystemPowerState string + +const ( + HostSystemPowerStatePoweredOn = HostSystemPowerState("poweredOn") + HostSystemPowerStatePoweredOff = HostSystemPowerState("poweredOff") + HostSystemPowerStateStandBy = HostSystemPowerState("standBy") + HostSystemPowerStateUnknown = HostSystemPowerState("unknown") +) + +func init() { + t["HostSystemPowerState"] = reflect.TypeOf((*HostSystemPowerState)(nil)).Elem() +} + +type HostSystemRemediationStateState string + +const ( + HostSystemRemediationStateStateRemediationReady = HostSystemRemediationStateState("remediationReady") + HostSystemRemediationStateStatePrecheckRemediationRunning = HostSystemRemediationStateState("precheckRemediationRunning") + HostSystemRemediationStateStatePrecheckRemediationComplete = HostSystemRemediationStateState("precheckRemediationComplete") + HostSystemRemediationStateStatePrecheckRemediationFailed = HostSystemRemediationStateState("precheckRemediationFailed") + HostSystemRemediationStateStateRemediationRunning = HostSystemRemediationStateState("remediationRunning") + HostSystemRemediationStateStateRemediationFailed = HostSystemRemediationStateState("remediationFailed") +) + +func init() { + t["HostSystemRemediationStateState"] = reflect.TypeOf((*HostSystemRemediationStateState)(nil)).Elem() +} + +type HostTpmAttestationInfoAcceptanceStatus string + +const ( + HostTpmAttestationInfoAcceptanceStatusNotAccepted = HostTpmAttestationInfoAcceptanceStatus("notAccepted") + HostTpmAttestationInfoAcceptanceStatusAccepted = HostTpmAttestationInfoAcceptanceStatus("accepted") +) + +func init() { + t["HostTpmAttestationInfoAcceptanceStatus"] = reflect.TypeOf((*HostTpmAttestationInfoAcceptanceStatus)(nil)).Elem() +} + +type HostUnresolvedVmfsExtentUnresolvedReason string + +const ( + HostUnresolvedVmfsExtentUnresolvedReasonDiskIdMismatch = HostUnresolvedVmfsExtentUnresolvedReason("diskIdMismatch") + HostUnresolvedVmfsExtentUnresolvedReasonUuidConflict = HostUnresolvedVmfsExtentUnresolvedReason("uuidConflict") +) + +func init() { + t["HostUnresolvedVmfsExtentUnresolvedReason"] = reflect.TypeOf((*HostUnresolvedVmfsExtentUnresolvedReason)(nil)).Elem() +} + +type HostUnresolvedVmfsResolutionSpecVmfsUuidResolution string + +const ( + HostUnresolvedVmfsResolutionSpecVmfsUuidResolutionResignature = HostUnresolvedVmfsResolutionSpecVmfsUuidResolution("resignature") + HostUnresolvedVmfsResolutionSpecVmfsUuidResolutionForceMount = HostUnresolvedVmfsResolutionSpecVmfsUuidResolution("forceMount") +) + +func init() { + t["HostUnresolvedVmfsResolutionSpecVmfsUuidResolution"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionSpecVmfsUuidResolution)(nil)).Elem() +} + +type HostVirtualNicManagerNicType string + +const ( + HostVirtualNicManagerNicTypeVmotion = HostVirtualNicManagerNicType("vmotion") + HostVirtualNicManagerNicTypeFaultToleranceLogging = HostVirtualNicManagerNicType("faultToleranceLogging") + HostVirtualNicManagerNicTypeVSphereReplication = HostVirtualNicManagerNicType("vSphereReplication") + HostVirtualNicManagerNicTypeVSphereReplicationNFC = HostVirtualNicManagerNicType("vSphereReplicationNFC") + HostVirtualNicManagerNicTypeManagement = HostVirtualNicManagerNicType("management") + HostVirtualNicManagerNicTypeVsan = HostVirtualNicManagerNicType("vsan") + HostVirtualNicManagerNicTypeVSphereProvisioning = HostVirtualNicManagerNicType("vSphereProvisioning") + HostVirtualNicManagerNicTypeVsanWitness = HostVirtualNicManagerNicType("vsanWitness") +) + +func init() { + t["HostVirtualNicManagerNicType"] = reflect.TypeOf((*HostVirtualNicManagerNicType)(nil)).Elem() +} + +type HostVmciAccessManagerMode string + +const ( + HostVmciAccessManagerModeGrant = HostVmciAccessManagerMode("grant") + HostVmciAccessManagerModeReplace = HostVmciAccessManagerMode("replace") + HostVmciAccessManagerModeRevoke = HostVmciAccessManagerMode("revoke") +) + +func init() { + t["HostVmciAccessManagerMode"] = reflect.TypeOf((*HostVmciAccessManagerMode)(nil)).Elem() +} + +type HostVmfsVolumeUnmapBandwidthPolicy string + +const ( + HostVmfsVolumeUnmapBandwidthPolicyFixed = HostVmfsVolumeUnmapBandwidthPolicy("fixed") + HostVmfsVolumeUnmapBandwidthPolicyDynamic = HostVmfsVolumeUnmapBandwidthPolicy("dynamic") +) + +func init() { + t["HostVmfsVolumeUnmapBandwidthPolicy"] = reflect.TypeOf((*HostVmfsVolumeUnmapBandwidthPolicy)(nil)).Elem() +} + +type HostVmfsVolumeUnmapPriority string + +const ( + HostVmfsVolumeUnmapPriorityNone = HostVmfsVolumeUnmapPriority("none") + HostVmfsVolumeUnmapPriorityLow = HostVmfsVolumeUnmapPriority("low") +) + +func init() { + t["HostVmfsVolumeUnmapPriority"] = reflect.TypeOf((*HostVmfsVolumeUnmapPriority)(nil)).Elem() +} + +type HttpNfcLeaseManifestEntryChecksumType string + +const ( + HttpNfcLeaseManifestEntryChecksumTypeSha1 = HttpNfcLeaseManifestEntryChecksumType("sha1") + HttpNfcLeaseManifestEntryChecksumTypeSha256 = HttpNfcLeaseManifestEntryChecksumType("sha256") +) + +func init() { + t["HttpNfcLeaseManifestEntryChecksumType"] = reflect.TypeOf((*HttpNfcLeaseManifestEntryChecksumType)(nil)).Elem() +} + +type HttpNfcLeaseMode string + +const ( + HttpNfcLeaseModePushOrGet = HttpNfcLeaseMode("pushOrGet") + HttpNfcLeaseModePull = HttpNfcLeaseMode("pull") +) + +func init() { + t["HttpNfcLeaseMode"] = reflect.TypeOf((*HttpNfcLeaseMode)(nil)).Elem() +} + +type HttpNfcLeaseState string + +const ( + HttpNfcLeaseStateInitializing = HttpNfcLeaseState("initializing") + HttpNfcLeaseStateReady = HttpNfcLeaseState("ready") + HttpNfcLeaseStateDone = HttpNfcLeaseState("done") + HttpNfcLeaseStateError = HttpNfcLeaseState("error") +) + +func init() { + t["HttpNfcLeaseState"] = reflect.TypeOf((*HttpNfcLeaseState)(nil)).Elem() +} + +type IncompatibleHostForVmReplicationIncompatibleReason string + +const ( + IncompatibleHostForVmReplicationIncompatibleReasonRpo = IncompatibleHostForVmReplicationIncompatibleReason("rpo") + IncompatibleHostForVmReplicationIncompatibleReasonNetCompression = IncompatibleHostForVmReplicationIncompatibleReason("netCompression") +) + +func init() { + t["IncompatibleHostForVmReplicationIncompatibleReason"] = reflect.TypeOf((*IncompatibleHostForVmReplicationIncompatibleReason)(nil)).Elem() +} + +type InternetScsiSnsDiscoveryMethod string + +const ( + InternetScsiSnsDiscoveryMethodIsnsStatic = InternetScsiSnsDiscoveryMethod("isnsStatic") + InternetScsiSnsDiscoveryMethodIsnsDhcp = InternetScsiSnsDiscoveryMethod("isnsDhcp") + InternetScsiSnsDiscoveryMethodIsnsSlp = InternetScsiSnsDiscoveryMethod("isnsSlp") +) + +func init() { + t["InternetScsiSnsDiscoveryMethod"] = reflect.TypeOf((*InternetScsiSnsDiscoveryMethod)(nil)).Elem() +} + +type InvalidDasConfigArgumentEntryForInvalidArgument string + +const ( + InvalidDasConfigArgumentEntryForInvalidArgumentAdmissionControl = InvalidDasConfigArgumentEntryForInvalidArgument("admissionControl") + InvalidDasConfigArgumentEntryForInvalidArgumentUserHeartbeatDs = InvalidDasConfigArgumentEntryForInvalidArgument("userHeartbeatDs") + InvalidDasConfigArgumentEntryForInvalidArgumentVmConfig = InvalidDasConfigArgumentEntryForInvalidArgument("vmConfig") +) + +func init() { + t["InvalidDasConfigArgumentEntryForInvalidArgument"] = reflect.TypeOf((*InvalidDasConfigArgumentEntryForInvalidArgument)(nil)).Elem() +} + +type InvalidProfileReferenceHostReason string + +const ( + InvalidProfileReferenceHostReasonIncompatibleVersion = InvalidProfileReferenceHostReason("incompatibleVersion") + InvalidProfileReferenceHostReasonMissingReferenceHost = InvalidProfileReferenceHostReason("missingReferenceHost") +) + +func init() { + t["InvalidProfileReferenceHostReason"] = reflect.TypeOf((*InvalidProfileReferenceHostReason)(nil)).Elem() +} + +type IoFilterOperation string + +const ( + IoFilterOperationInstall = IoFilterOperation("install") + IoFilterOperationUninstall = IoFilterOperation("uninstall") + IoFilterOperationUpgrade = IoFilterOperation("upgrade") +) + +func init() { + t["IoFilterOperation"] = reflect.TypeOf((*IoFilterOperation)(nil)).Elem() +} + +type IoFilterType string + +const ( + IoFilterTypeCache = IoFilterType("cache") + IoFilterTypeReplication = IoFilterType("replication") + IoFilterTypeEncryption = IoFilterType("encryption") + IoFilterTypeCompression = IoFilterType("compression") + IoFilterTypeInspection = IoFilterType("inspection") + IoFilterTypeDatastoreIoControl = IoFilterType("datastoreIoControl") + IoFilterTypeDataProvider = IoFilterType("dataProvider") +) + +func init() { + t["IoFilterType"] = reflect.TypeOf((*IoFilterType)(nil)).Elem() +} + +type IscsiPortInfoPathStatus string + +const ( + IscsiPortInfoPathStatusNotUsed = IscsiPortInfoPathStatus("notUsed") + IscsiPortInfoPathStatusActive = IscsiPortInfoPathStatus("active") + IscsiPortInfoPathStatusStandBy = IscsiPortInfoPathStatus("standBy") + IscsiPortInfoPathStatusLastActive = IscsiPortInfoPathStatus("lastActive") +) + +func init() { + t["IscsiPortInfoPathStatus"] = reflect.TypeOf((*IscsiPortInfoPathStatus)(nil)).Elem() +} + +type LatencySensitivitySensitivityLevel string + +const ( + LatencySensitivitySensitivityLevelLow = LatencySensitivitySensitivityLevel("low") + LatencySensitivitySensitivityLevelNormal = LatencySensitivitySensitivityLevel("normal") + LatencySensitivitySensitivityLevelMedium = LatencySensitivitySensitivityLevel("medium") + LatencySensitivitySensitivityLevelHigh = LatencySensitivitySensitivityLevel("high") + LatencySensitivitySensitivityLevelCustom = LatencySensitivitySensitivityLevel("custom") +) + +func init() { + t["LatencySensitivitySensitivityLevel"] = reflect.TypeOf((*LatencySensitivitySensitivityLevel)(nil)).Elem() +} + +type LicenseAssignmentFailedReason string + +const ( + LicenseAssignmentFailedReasonKeyEntityMismatch = LicenseAssignmentFailedReason("keyEntityMismatch") + LicenseAssignmentFailedReasonDowngradeDisallowed = LicenseAssignmentFailedReason("downgradeDisallowed") + LicenseAssignmentFailedReasonInventoryNotManageableByVirtualCenter = LicenseAssignmentFailedReason("inventoryNotManageableByVirtualCenter") + LicenseAssignmentFailedReasonHostsUnmanageableByVirtualCenterWithoutLicenseServer = LicenseAssignmentFailedReason("hostsUnmanageableByVirtualCenterWithoutLicenseServer") +) + +func init() { + t["LicenseAssignmentFailedReason"] = reflect.TypeOf((*LicenseAssignmentFailedReason)(nil)).Elem() +} + +type LicenseFeatureInfoSourceRestriction string + +const ( + LicenseFeatureInfoSourceRestrictionUnrestricted = LicenseFeatureInfoSourceRestriction("unrestricted") + LicenseFeatureInfoSourceRestrictionServed = LicenseFeatureInfoSourceRestriction("served") + LicenseFeatureInfoSourceRestrictionFile = LicenseFeatureInfoSourceRestriction("file") +) + +func init() { + t["LicenseFeatureInfoSourceRestriction"] = reflect.TypeOf((*LicenseFeatureInfoSourceRestriction)(nil)).Elem() +} + +type LicenseFeatureInfoState string + +const ( + LicenseFeatureInfoStateEnabled = LicenseFeatureInfoState("enabled") + LicenseFeatureInfoStateDisabled = LicenseFeatureInfoState("disabled") + LicenseFeatureInfoStateOptional = LicenseFeatureInfoState("optional") +) + +func init() { + t["LicenseFeatureInfoState"] = reflect.TypeOf((*LicenseFeatureInfoState)(nil)).Elem() +} + +type LicenseFeatureInfoUnit string + +const ( + LicenseFeatureInfoUnitHost = LicenseFeatureInfoUnit("host") + LicenseFeatureInfoUnitCpuCore = LicenseFeatureInfoUnit("cpuCore") + LicenseFeatureInfoUnitCpuPackage = LicenseFeatureInfoUnit("cpuPackage") + LicenseFeatureInfoUnitServer = LicenseFeatureInfoUnit("server") + LicenseFeatureInfoUnitVm = LicenseFeatureInfoUnit("vm") +) + +func init() { + t["LicenseFeatureInfoUnit"] = reflect.TypeOf((*LicenseFeatureInfoUnit)(nil)).Elem() +} + +type LicenseManagerLicenseKey string + +const ( + LicenseManagerLicenseKeyEsxFull = LicenseManagerLicenseKey("esxFull") + LicenseManagerLicenseKeyEsxVmtn = LicenseManagerLicenseKey("esxVmtn") + LicenseManagerLicenseKeyEsxExpress = LicenseManagerLicenseKey("esxExpress") + LicenseManagerLicenseKeySan = LicenseManagerLicenseKey("san") + LicenseManagerLicenseKeyIscsi = LicenseManagerLicenseKey("iscsi") + LicenseManagerLicenseKeyNas = LicenseManagerLicenseKey("nas") + LicenseManagerLicenseKeyVsmp = LicenseManagerLicenseKey("vsmp") + LicenseManagerLicenseKeyBackup = LicenseManagerLicenseKey("backup") + LicenseManagerLicenseKeyVc = LicenseManagerLicenseKey("vc") + LicenseManagerLicenseKeyVcExpress = LicenseManagerLicenseKey("vcExpress") + LicenseManagerLicenseKeyEsxHost = LicenseManagerLicenseKey("esxHost") + LicenseManagerLicenseKeyGsxHost = LicenseManagerLicenseKey("gsxHost") + LicenseManagerLicenseKeyServerHost = LicenseManagerLicenseKey("serverHost") + LicenseManagerLicenseKeyDrsPower = LicenseManagerLicenseKey("drsPower") + LicenseManagerLicenseKeyVmotion = LicenseManagerLicenseKey("vmotion") + LicenseManagerLicenseKeyDrs = LicenseManagerLicenseKey("drs") + LicenseManagerLicenseKeyDas = LicenseManagerLicenseKey("das") +) + +func init() { + t["LicenseManagerLicenseKey"] = reflect.TypeOf((*LicenseManagerLicenseKey)(nil)).Elem() +} + +type LicenseManagerState string + +const ( + LicenseManagerStateInitializing = LicenseManagerState("initializing") + LicenseManagerStateNormal = LicenseManagerState("normal") + LicenseManagerStateMarginal = LicenseManagerState("marginal") + LicenseManagerStateFault = LicenseManagerState("fault") +) + +func init() { + t["LicenseManagerState"] = reflect.TypeOf((*LicenseManagerState)(nil)).Elem() +} + +type LicenseReservationInfoState string + +const ( + LicenseReservationInfoStateNotUsed = LicenseReservationInfoState("notUsed") + LicenseReservationInfoStateNoLicense = LicenseReservationInfoState("noLicense") + LicenseReservationInfoStateUnlicensedUse = LicenseReservationInfoState("unlicensedUse") + LicenseReservationInfoStateLicensed = LicenseReservationInfoState("licensed") +) + +func init() { + t["LicenseReservationInfoState"] = reflect.TypeOf((*LicenseReservationInfoState)(nil)).Elem() +} + +type LinkDiscoveryProtocolConfigOperationType string + +const ( + LinkDiscoveryProtocolConfigOperationTypeNone = LinkDiscoveryProtocolConfigOperationType("none") + LinkDiscoveryProtocolConfigOperationTypeListen = LinkDiscoveryProtocolConfigOperationType("listen") + LinkDiscoveryProtocolConfigOperationTypeAdvertise = LinkDiscoveryProtocolConfigOperationType("advertise") + LinkDiscoveryProtocolConfigOperationTypeBoth = LinkDiscoveryProtocolConfigOperationType("both") +) + +func init() { + t["LinkDiscoveryProtocolConfigOperationType"] = reflect.TypeOf((*LinkDiscoveryProtocolConfigOperationType)(nil)).Elem() +} + +type LinkDiscoveryProtocolConfigProtocolType string + +const ( + LinkDiscoveryProtocolConfigProtocolTypeCdp = LinkDiscoveryProtocolConfigProtocolType("cdp") + LinkDiscoveryProtocolConfigProtocolTypeLldp = LinkDiscoveryProtocolConfigProtocolType("lldp") +) + +func init() { + t["LinkDiscoveryProtocolConfigProtocolType"] = reflect.TypeOf((*LinkDiscoveryProtocolConfigProtocolType)(nil)).Elem() +} + +type ManagedEntityStatus string + +const ( + ManagedEntityStatusGray = ManagedEntityStatus("gray") + ManagedEntityStatusGreen = ManagedEntityStatus("green") + ManagedEntityStatusYellow = ManagedEntityStatus("yellow") + ManagedEntityStatusRed = ManagedEntityStatus("red") +) + +func init() { + t["ManagedEntityStatus"] = reflect.TypeOf((*ManagedEntityStatus)(nil)).Elem() +} + +type MetricAlarmOperator string + +const ( + MetricAlarmOperatorIsAbove = MetricAlarmOperator("isAbove") + MetricAlarmOperatorIsBelow = MetricAlarmOperator("isBelow") +) + +func init() { + t["MetricAlarmOperator"] = reflect.TypeOf((*MetricAlarmOperator)(nil)).Elem() +} + +type MultipathState string + +const ( + MultipathStateStandby = MultipathState("standby") + MultipathStateActive = MultipathState("active") + MultipathStateDisabled = MultipathState("disabled") + MultipathStateDead = MultipathState("dead") + MultipathStateUnknown = MultipathState("unknown") +) + +func init() { + t["MultipathState"] = reflect.TypeOf((*MultipathState)(nil)).Elem() +} + +type NetBIOSConfigInfoMode string + +const ( + NetBIOSConfigInfoModeUnknown = NetBIOSConfigInfoMode("unknown") + NetBIOSConfigInfoModeEnabled = NetBIOSConfigInfoMode("enabled") + NetBIOSConfigInfoModeDisabled = NetBIOSConfigInfoMode("disabled") + NetBIOSConfigInfoModeEnabledViaDHCP = NetBIOSConfigInfoMode("enabledViaDHCP") +) + +func init() { + t["NetBIOSConfigInfoMode"] = reflect.TypeOf((*NetBIOSConfigInfoMode)(nil)).Elem() +} + +type NetIpConfigInfoIpAddressOrigin string + +const ( + NetIpConfigInfoIpAddressOriginOther = NetIpConfigInfoIpAddressOrigin("other") + NetIpConfigInfoIpAddressOriginManual = NetIpConfigInfoIpAddressOrigin("manual") + NetIpConfigInfoIpAddressOriginDhcp = NetIpConfigInfoIpAddressOrigin("dhcp") + NetIpConfigInfoIpAddressOriginLinklayer = NetIpConfigInfoIpAddressOrigin("linklayer") + NetIpConfigInfoIpAddressOriginRandom = NetIpConfigInfoIpAddressOrigin("random") +) + +func init() { + t["NetIpConfigInfoIpAddressOrigin"] = reflect.TypeOf((*NetIpConfigInfoIpAddressOrigin)(nil)).Elem() +} + +type NetIpConfigInfoIpAddressStatus string + +const ( + NetIpConfigInfoIpAddressStatusPreferred = NetIpConfigInfoIpAddressStatus("preferred") + NetIpConfigInfoIpAddressStatusDeprecated = NetIpConfigInfoIpAddressStatus("deprecated") + NetIpConfigInfoIpAddressStatusInvalid = NetIpConfigInfoIpAddressStatus("invalid") + NetIpConfigInfoIpAddressStatusInaccessible = NetIpConfigInfoIpAddressStatus("inaccessible") + NetIpConfigInfoIpAddressStatusUnknown = NetIpConfigInfoIpAddressStatus("unknown") + NetIpConfigInfoIpAddressStatusTentative = NetIpConfigInfoIpAddressStatus("tentative") + NetIpConfigInfoIpAddressStatusDuplicate = NetIpConfigInfoIpAddressStatus("duplicate") +) + +func init() { + t["NetIpConfigInfoIpAddressStatus"] = reflect.TypeOf((*NetIpConfigInfoIpAddressStatus)(nil)).Elem() +} + +type NetIpStackInfoEntryType string + +const ( + NetIpStackInfoEntryTypeOther = NetIpStackInfoEntryType("other") + NetIpStackInfoEntryTypeInvalid = NetIpStackInfoEntryType("invalid") + NetIpStackInfoEntryTypeDynamic = NetIpStackInfoEntryType("dynamic") + NetIpStackInfoEntryTypeManual = NetIpStackInfoEntryType("manual") +) + +func init() { + t["NetIpStackInfoEntryType"] = reflect.TypeOf((*NetIpStackInfoEntryType)(nil)).Elem() +} + +type NetIpStackInfoPreference string + +const ( + NetIpStackInfoPreferenceReserved = NetIpStackInfoPreference("reserved") + NetIpStackInfoPreferenceLow = NetIpStackInfoPreference("low") + NetIpStackInfoPreferenceMedium = NetIpStackInfoPreference("medium") + NetIpStackInfoPreferenceHigh = NetIpStackInfoPreference("high") +) + +func init() { + t["NetIpStackInfoPreference"] = reflect.TypeOf((*NetIpStackInfoPreference)(nil)).Elem() +} + +type NotSupportedDeviceForFTDeviceType string + +const ( + NotSupportedDeviceForFTDeviceTypeVirtualVmxnet3 = NotSupportedDeviceForFTDeviceType("virtualVmxnet3") + NotSupportedDeviceForFTDeviceTypeParaVirtualSCSIController = NotSupportedDeviceForFTDeviceType("paraVirtualSCSIController") +) + +func init() { + t["NotSupportedDeviceForFTDeviceType"] = reflect.TypeOf((*NotSupportedDeviceForFTDeviceType)(nil)).Elem() +} + +type NumVirtualCpusIncompatibleReason string + +const ( + NumVirtualCpusIncompatibleReasonRecordReplay = NumVirtualCpusIncompatibleReason("recordReplay") + NumVirtualCpusIncompatibleReasonFaultTolerance = NumVirtualCpusIncompatibleReason("faultTolerance") +) + +func init() { + t["NumVirtualCpusIncompatibleReason"] = reflect.TypeOf((*NumVirtualCpusIncompatibleReason)(nil)).Elem() +} + +type NvdimmInterleaveSetState string + +const ( + NvdimmInterleaveSetStateInvalid = NvdimmInterleaveSetState("invalid") + NvdimmInterleaveSetStateActive = NvdimmInterleaveSetState("active") +) + +func init() { + t["NvdimmInterleaveSetState"] = reflect.TypeOf((*NvdimmInterleaveSetState)(nil)).Elem() +} + +type NvdimmNamespaceHealthStatus string + +const ( + NvdimmNamespaceHealthStatusNormal = NvdimmNamespaceHealthStatus("normal") + NvdimmNamespaceHealthStatusMissing = NvdimmNamespaceHealthStatus("missing") + NvdimmNamespaceHealthStatusLabelMissing = NvdimmNamespaceHealthStatus("labelMissing") + NvdimmNamespaceHealthStatusInterleaveBroken = NvdimmNamespaceHealthStatus("interleaveBroken") + NvdimmNamespaceHealthStatusLabelInconsistent = NvdimmNamespaceHealthStatus("labelInconsistent") + NvdimmNamespaceHealthStatusBttCorrupt = NvdimmNamespaceHealthStatus("bttCorrupt") + NvdimmNamespaceHealthStatusBadBlockSize = NvdimmNamespaceHealthStatus("badBlockSize") +) + +func init() { + t["NvdimmNamespaceHealthStatus"] = reflect.TypeOf((*NvdimmNamespaceHealthStatus)(nil)).Elem() +} + +type NvdimmNamespaceState string + +const ( + NvdimmNamespaceStateInvalid = NvdimmNamespaceState("invalid") + NvdimmNamespaceStateNotInUse = NvdimmNamespaceState("notInUse") + NvdimmNamespaceStateInUse = NvdimmNamespaceState("inUse") +) + +func init() { + t["NvdimmNamespaceState"] = reflect.TypeOf((*NvdimmNamespaceState)(nil)).Elem() +} + +type NvdimmNamespaceType string + +const ( + NvdimmNamespaceTypeBlockNamespace = NvdimmNamespaceType("blockNamespace") + NvdimmNamespaceTypePersistentNamespace = NvdimmNamespaceType("persistentNamespace") +) + +func init() { + t["NvdimmNamespaceType"] = reflect.TypeOf((*NvdimmNamespaceType)(nil)).Elem() +} + +type NvdimmNvdimmHealthInfoState string + +const ( + NvdimmNvdimmHealthInfoStateNormal = NvdimmNvdimmHealthInfoState("normal") + NvdimmNvdimmHealthInfoStateError = NvdimmNvdimmHealthInfoState("error") +) + +func init() { + t["NvdimmNvdimmHealthInfoState"] = reflect.TypeOf((*NvdimmNvdimmHealthInfoState)(nil)).Elem() +} + +type NvdimmRangeType string + +const ( + NvdimmRangeTypeVolatileRange = NvdimmRangeType("volatileRange") + NvdimmRangeTypePersistentRange = NvdimmRangeType("persistentRange") + NvdimmRangeTypeControlRange = NvdimmRangeType("controlRange") + NvdimmRangeTypeBlockRange = NvdimmRangeType("blockRange") + NvdimmRangeTypeVolatileVirtualDiskRange = NvdimmRangeType("volatileVirtualDiskRange") + NvdimmRangeTypeVolatileVirtualCDRange = NvdimmRangeType("volatileVirtualCDRange") + NvdimmRangeTypePersistentVirtualDiskRange = NvdimmRangeType("persistentVirtualDiskRange") + NvdimmRangeTypePersistentVirtualCDRange = NvdimmRangeType("persistentVirtualCDRange") +) + +func init() { + t["NvdimmRangeType"] = reflect.TypeOf((*NvdimmRangeType)(nil)).Elem() +} + +type ObjectUpdateKind string + +const ( + ObjectUpdateKindModify = ObjectUpdateKind("modify") + ObjectUpdateKindEnter = ObjectUpdateKind("enter") + ObjectUpdateKindLeave = ObjectUpdateKind("leave") +) + +func init() { + t["ObjectUpdateKind"] = reflect.TypeOf((*ObjectUpdateKind)(nil)).Elem() +} + +type OvfConsumerOstNodeType string + +const ( + OvfConsumerOstNodeTypeEnvelope = OvfConsumerOstNodeType("envelope") + OvfConsumerOstNodeTypeVirtualSystem = OvfConsumerOstNodeType("virtualSystem") + OvfConsumerOstNodeTypeVirtualSystemCollection = OvfConsumerOstNodeType("virtualSystemCollection") +) + +func init() { + t["OvfConsumerOstNodeType"] = reflect.TypeOf((*OvfConsumerOstNodeType)(nil)).Elem() +} + +type OvfCreateImportSpecParamsDiskProvisioningType string + +const ( + OvfCreateImportSpecParamsDiskProvisioningTypeMonolithicSparse = OvfCreateImportSpecParamsDiskProvisioningType("monolithicSparse") + OvfCreateImportSpecParamsDiskProvisioningTypeMonolithicFlat = OvfCreateImportSpecParamsDiskProvisioningType("monolithicFlat") + OvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentSparse = OvfCreateImportSpecParamsDiskProvisioningType("twoGbMaxExtentSparse") + OvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentFlat = OvfCreateImportSpecParamsDiskProvisioningType("twoGbMaxExtentFlat") + OvfCreateImportSpecParamsDiskProvisioningTypeThin = OvfCreateImportSpecParamsDiskProvisioningType("thin") + OvfCreateImportSpecParamsDiskProvisioningTypeThick = OvfCreateImportSpecParamsDiskProvisioningType("thick") + OvfCreateImportSpecParamsDiskProvisioningTypeSeSparse = OvfCreateImportSpecParamsDiskProvisioningType("seSparse") + OvfCreateImportSpecParamsDiskProvisioningTypeEagerZeroedThick = OvfCreateImportSpecParamsDiskProvisioningType("eagerZeroedThick") + OvfCreateImportSpecParamsDiskProvisioningTypeSparse = OvfCreateImportSpecParamsDiskProvisioningType("sparse") + OvfCreateImportSpecParamsDiskProvisioningTypeFlat = OvfCreateImportSpecParamsDiskProvisioningType("flat") +) + +func init() { + t["OvfCreateImportSpecParamsDiskProvisioningType"] = reflect.TypeOf((*OvfCreateImportSpecParamsDiskProvisioningType)(nil)).Elem() +} + +type PerfFormat string + +const ( + PerfFormatNormal = PerfFormat("normal") + PerfFormatCsv = PerfFormat("csv") +) + +func init() { + t["PerfFormat"] = reflect.TypeOf((*PerfFormat)(nil)).Elem() +} + +type PerfStatsType string + +const ( + PerfStatsTypeAbsolute = PerfStatsType("absolute") + PerfStatsTypeDelta = PerfStatsType("delta") + PerfStatsTypeRate = PerfStatsType("rate") +) + +func init() { + t["PerfStatsType"] = reflect.TypeOf((*PerfStatsType)(nil)).Elem() +} + +type PerfSummaryType string + +const ( + PerfSummaryTypeAverage = PerfSummaryType("average") + PerfSummaryTypeMaximum = PerfSummaryType("maximum") + PerfSummaryTypeMinimum = PerfSummaryType("minimum") + PerfSummaryTypeLatest = PerfSummaryType("latest") + PerfSummaryTypeSummation = PerfSummaryType("summation") + PerfSummaryTypeNone = PerfSummaryType("none") +) + +func init() { + t["PerfSummaryType"] = reflect.TypeOf((*PerfSummaryType)(nil)).Elem() +} + +type PerformanceManagerUnit string + +const ( + PerformanceManagerUnitPercent = PerformanceManagerUnit("percent") + PerformanceManagerUnitKiloBytes = PerformanceManagerUnit("kiloBytes") + PerformanceManagerUnitMegaBytes = PerformanceManagerUnit("megaBytes") + PerformanceManagerUnitMegaHertz = PerformanceManagerUnit("megaHertz") + PerformanceManagerUnitNumber = PerformanceManagerUnit("number") + PerformanceManagerUnitMicrosecond = PerformanceManagerUnit("microsecond") + PerformanceManagerUnitMillisecond = PerformanceManagerUnit("millisecond") + PerformanceManagerUnitSecond = PerformanceManagerUnit("second") + PerformanceManagerUnitKiloBytesPerSecond = PerformanceManagerUnit("kiloBytesPerSecond") + PerformanceManagerUnitMegaBytesPerSecond = PerformanceManagerUnit("megaBytesPerSecond") + PerformanceManagerUnitWatt = PerformanceManagerUnit("watt") + PerformanceManagerUnitJoule = PerformanceManagerUnit("joule") + PerformanceManagerUnitTeraBytes = PerformanceManagerUnit("teraBytes") +) + +func init() { + t["PerformanceManagerUnit"] = reflect.TypeOf((*PerformanceManagerUnit)(nil)).Elem() +} + +type PhysicalNicResourcePoolSchedulerDisallowedReason string + +const ( + PhysicalNicResourcePoolSchedulerDisallowedReasonUserOptOut = PhysicalNicResourcePoolSchedulerDisallowedReason("userOptOut") + PhysicalNicResourcePoolSchedulerDisallowedReasonHardwareUnsupported = PhysicalNicResourcePoolSchedulerDisallowedReason("hardwareUnsupported") +) + +func init() { + t["PhysicalNicResourcePoolSchedulerDisallowedReason"] = reflect.TypeOf((*PhysicalNicResourcePoolSchedulerDisallowedReason)(nil)).Elem() +} + +type PhysicalNicVmDirectPathGen2SupportedMode string + +const ( + PhysicalNicVmDirectPathGen2SupportedModeUpt = PhysicalNicVmDirectPathGen2SupportedMode("upt") +) + +func init() { + t["PhysicalNicVmDirectPathGen2SupportedMode"] = reflect.TypeOf((*PhysicalNicVmDirectPathGen2SupportedMode)(nil)).Elem() +} + +type PlacementAffinityRuleRuleScope string + +const ( + PlacementAffinityRuleRuleScopeCluster = PlacementAffinityRuleRuleScope("cluster") + PlacementAffinityRuleRuleScopeHost = PlacementAffinityRuleRuleScope("host") + PlacementAffinityRuleRuleScopeStoragePod = PlacementAffinityRuleRuleScope("storagePod") + PlacementAffinityRuleRuleScopeDatastore = PlacementAffinityRuleRuleScope("datastore") +) + +func init() { + t["PlacementAffinityRuleRuleScope"] = reflect.TypeOf((*PlacementAffinityRuleRuleScope)(nil)).Elem() +} + +type PlacementAffinityRuleRuleType string + +const ( + PlacementAffinityRuleRuleTypeAffinity = PlacementAffinityRuleRuleType("affinity") + PlacementAffinityRuleRuleTypeAntiAffinity = PlacementAffinityRuleRuleType("antiAffinity") + PlacementAffinityRuleRuleTypeSoftAffinity = PlacementAffinityRuleRuleType("softAffinity") + PlacementAffinityRuleRuleTypeSoftAntiAffinity = PlacementAffinityRuleRuleType("softAntiAffinity") +) + +func init() { + t["PlacementAffinityRuleRuleType"] = reflect.TypeOf((*PlacementAffinityRuleRuleType)(nil)).Elem() +} + +type PlacementSpecPlacementType string + +const ( + PlacementSpecPlacementTypeCreate = PlacementSpecPlacementType("create") + PlacementSpecPlacementTypeReconfigure = PlacementSpecPlacementType("reconfigure") + PlacementSpecPlacementTypeRelocate = PlacementSpecPlacementType("relocate") + PlacementSpecPlacementTypeClone = PlacementSpecPlacementType("clone") +) + +func init() { + t["PlacementSpecPlacementType"] = reflect.TypeOf((*PlacementSpecPlacementType)(nil)).Elem() +} + +type PortGroupConnecteeType string + +const ( + PortGroupConnecteeTypeVirtualMachine = PortGroupConnecteeType("virtualMachine") + PortGroupConnecteeTypeSystemManagement = PortGroupConnecteeType("systemManagement") + PortGroupConnecteeTypeHost = PortGroupConnecteeType("host") + PortGroupConnecteeTypeUnknown = PortGroupConnecteeType("unknown") +) + +func init() { + t["PortGroupConnecteeType"] = reflect.TypeOf((*PortGroupConnecteeType)(nil)).Elem() +} + +type ProfileExecuteResultStatus string + +const ( + ProfileExecuteResultStatusSuccess = ProfileExecuteResultStatus("success") + ProfileExecuteResultStatusNeedInput = ProfileExecuteResultStatus("needInput") + ProfileExecuteResultStatusError = ProfileExecuteResultStatus("error") +) + +func init() { + t["ProfileExecuteResultStatus"] = reflect.TypeOf((*ProfileExecuteResultStatus)(nil)).Elem() +} + +type ProfileNumericComparator string + +const ( + ProfileNumericComparatorLessThan = ProfileNumericComparator("lessThan") + ProfileNumericComparatorLessThanEqual = ProfileNumericComparator("lessThanEqual") + ProfileNumericComparatorEqual = ProfileNumericComparator("equal") + ProfileNumericComparatorNotEqual = ProfileNumericComparator("notEqual") + ProfileNumericComparatorGreaterThanEqual = ProfileNumericComparator("greaterThanEqual") + ProfileNumericComparatorGreaterThan = ProfileNumericComparator("greaterThan") +) + +func init() { + t["ProfileNumericComparator"] = reflect.TypeOf((*ProfileNumericComparator)(nil)).Elem() +} + +type ProfileParameterMetadataRelationType string + +const ( + ProfileParameterMetadataRelationTypeDynamic_relation = ProfileParameterMetadataRelationType("dynamic_relation") + ProfileParameterMetadataRelationTypeExtensible_relation = ProfileParameterMetadataRelationType("extensible_relation") + ProfileParameterMetadataRelationTypeLocalizable_relation = ProfileParameterMetadataRelationType("localizable_relation") + ProfileParameterMetadataRelationTypeStatic_relation = ProfileParameterMetadataRelationType("static_relation") + ProfileParameterMetadataRelationTypeValidation_relation = ProfileParameterMetadataRelationType("validation_relation") +) + +func init() { + t["ProfileParameterMetadataRelationType"] = reflect.TypeOf((*ProfileParameterMetadataRelationType)(nil)).Elem() +} + +type PropertyChangeOp string + +const ( + PropertyChangeOpAdd = PropertyChangeOp("add") + PropertyChangeOpRemove = PropertyChangeOp("remove") + PropertyChangeOpAssign = PropertyChangeOp("assign") + PropertyChangeOpIndirectRemove = PropertyChangeOp("indirectRemove") +) + +func init() { + t["PropertyChangeOp"] = reflect.TypeOf((*PropertyChangeOp)(nil)).Elem() +} + +type QuarantineModeFaultFaultType string + +const ( + QuarantineModeFaultFaultTypeNoCompatibleNonQuarantinedHost = QuarantineModeFaultFaultType("NoCompatibleNonQuarantinedHost") + QuarantineModeFaultFaultTypeCorrectionDisallowed = QuarantineModeFaultFaultType("CorrectionDisallowed") + QuarantineModeFaultFaultTypeCorrectionImpact = QuarantineModeFaultFaultType("CorrectionImpact") +) + +func init() { + t["QuarantineModeFaultFaultType"] = reflect.TypeOf((*QuarantineModeFaultFaultType)(nil)).Elem() +} + +type QuiesceMode string + +const ( + QuiesceModeApplication = QuiesceMode("application") + QuiesceModeFilesystem = QuiesceMode("filesystem") + QuiesceModeNone = QuiesceMode("none") +) + +func init() { + t["QuiesceMode"] = reflect.TypeOf((*QuiesceMode)(nil)).Elem() +} + +type RecommendationReasonCode string + +const ( + RecommendationReasonCodeFairnessCpuAvg = RecommendationReasonCode("fairnessCpuAvg") + RecommendationReasonCodeFairnessMemAvg = RecommendationReasonCode("fairnessMemAvg") + RecommendationReasonCodeJointAffin = RecommendationReasonCode("jointAffin") + RecommendationReasonCodeAntiAffin = RecommendationReasonCode("antiAffin") + RecommendationReasonCodeHostMaint = RecommendationReasonCode("hostMaint") + RecommendationReasonCodeEnterStandby = RecommendationReasonCode("enterStandby") + RecommendationReasonCodeReservationCpu = RecommendationReasonCode("reservationCpu") + RecommendationReasonCodeReservationMem = RecommendationReasonCode("reservationMem") + RecommendationReasonCodePowerOnVm = RecommendationReasonCode("powerOnVm") + RecommendationReasonCodePowerSaving = RecommendationReasonCode("powerSaving") + RecommendationReasonCodeIncreaseCapacity = RecommendationReasonCode("increaseCapacity") + RecommendationReasonCodeCheckResource = RecommendationReasonCode("checkResource") + RecommendationReasonCodeUnreservedCapacity = RecommendationReasonCode("unreservedCapacity") + RecommendationReasonCodeVmHostHardAffinity = RecommendationReasonCode("vmHostHardAffinity") + RecommendationReasonCodeVmHostSoftAffinity = RecommendationReasonCode("vmHostSoftAffinity") + RecommendationReasonCodeBalanceDatastoreSpaceUsage = RecommendationReasonCode("balanceDatastoreSpaceUsage") + RecommendationReasonCodeBalanceDatastoreIOLoad = RecommendationReasonCode("balanceDatastoreIOLoad") + RecommendationReasonCodeBalanceDatastoreIOPSReservation = RecommendationReasonCode("balanceDatastoreIOPSReservation") + RecommendationReasonCodeDatastoreMaint = RecommendationReasonCode("datastoreMaint") + RecommendationReasonCodeVirtualDiskJointAffin = RecommendationReasonCode("virtualDiskJointAffin") + RecommendationReasonCodeVirtualDiskAntiAffin = RecommendationReasonCode("virtualDiskAntiAffin") + RecommendationReasonCodeDatastoreSpaceOutage = RecommendationReasonCode("datastoreSpaceOutage") + RecommendationReasonCodeStoragePlacement = RecommendationReasonCode("storagePlacement") + RecommendationReasonCodeIolbDisabledInternal = RecommendationReasonCode("iolbDisabledInternal") + RecommendationReasonCodeXvmotionPlacement = RecommendationReasonCode("xvmotionPlacement") + RecommendationReasonCodeNetworkBandwidthReservation = RecommendationReasonCode("networkBandwidthReservation") + RecommendationReasonCodeHostInDegradation = RecommendationReasonCode("hostInDegradation") + RecommendationReasonCodeHostExitDegradation = RecommendationReasonCode("hostExitDegradation") + RecommendationReasonCodeMaxVmsConstraint = RecommendationReasonCode("maxVmsConstraint") + RecommendationReasonCodeFtConstraints = RecommendationReasonCode("ftConstraints") +) + +func init() { + t["RecommendationReasonCode"] = reflect.TypeOf((*RecommendationReasonCode)(nil)).Elem() +} + +type RecommendationType string + +const ( + RecommendationTypeV1 = RecommendationType("V1") +) + +func init() { + t["RecommendationType"] = reflect.TypeOf((*RecommendationType)(nil)).Elem() +} + +type ReplicationDiskConfigFaultReasonForFault string + +const ( + ReplicationDiskConfigFaultReasonForFaultDiskNotFound = ReplicationDiskConfigFaultReasonForFault("diskNotFound") + ReplicationDiskConfigFaultReasonForFaultDiskTypeNotSupported = ReplicationDiskConfigFaultReasonForFault("diskTypeNotSupported") + ReplicationDiskConfigFaultReasonForFaultInvalidDiskKey = ReplicationDiskConfigFaultReasonForFault("invalidDiskKey") + ReplicationDiskConfigFaultReasonForFaultInvalidDiskReplicationId = ReplicationDiskConfigFaultReasonForFault("invalidDiskReplicationId") + ReplicationDiskConfigFaultReasonForFaultDuplicateDiskReplicationId = ReplicationDiskConfigFaultReasonForFault("duplicateDiskReplicationId") + ReplicationDiskConfigFaultReasonForFaultInvalidPersistentFilePath = ReplicationDiskConfigFaultReasonForFault("invalidPersistentFilePath") + ReplicationDiskConfigFaultReasonForFaultReconfigureDiskReplicationIdNotAllowed = ReplicationDiskConfigFaultReasonForFault("reconfigureDiskReplicationIdNotAllowed") +) + +func init() { + t["ReplicationDiskConfigFaultReasonForFault"] = reflect.TypeOf((*ReplicationDiskConfigFaultReasonForFault)(nil)).Elem() +} + +type ReplicationVmConfigFaultReasonForFault string + +const ( + ReplicationVmConfigFaultReasonForFaultIncompatibleHwVersion = ReplicationVmConfigFaultReasonForFault("incompatibleHwVersion") + ReplicationVmConfigFaultReasonForFaultInvalidVmReplicationId = ReplicationVmConfigFaultReasonForFault("invalidVmReplicationId") + ReplicationVmConfigFaultReasonForFaultInvalidGenerationNumber = ReplicationVmConfigFaultReasonForFault("invalidGenerationNumber") + ReplicationVmConfigFaultReasonForFaultOutOfBoundsRpoValue = ReplicationVmConfigFaultReasonForFault("outOfBoundsRpoValue") + ReplicationVmConfigFaultReasonForFaultInvalidDestinationIpAddress = ReplicationVmConfigFaultReasonForFault("invalidDestinationIpAddress") + ReplicationVmConfigFaultReasonForFaultInvalidDestinationPort = ReplicationVmConfigFaultReasonForFault("invalidDestinationPort") + ReplicationVmConfigFaultReasonForFaultInvalidExtraVmOptions = ReplicationVmConfigFaultReasonForFault("invalidExtraVmOptions") + ReplicationVmConfigFaultReasonForFaultStaleGenerationNumber = ReplicationVmConfigFaultReasonForFault("staleGenerationNumber") + ReplicationVmConfigFaultReasonForFaultReconfigureVmReplicationIdNotAllowed = ReplicationVmConfigFaultReasonForFault("reconfigureVmReplicationIdNotAllowed") + ReplicationVmConfigFaultReasonForFaultCannotRetrieveVmReplicationConfiguration = ReplicationVmConfigFaultReasonForFault("cannotRetrieveVmReplicationConfiguration") + ReplicationVmConfigFaultReasonForFaultReplicationAlreadyEnabled = ReplicationVmConfigFaultReasonForFault("replicationAlreadyEnabled") + ReplicationVmConfigFaultReasonForFaultInvalidPriorConfiguration = ReplicationVmConfigFaultReasonForFault("invalidPriorConfiguration") + ReplicationVmConfigFaultReasonForFaultReplicationNotEnabled = ReplicationVmConfigFaultReasonForFault("replicationNotEnabled") + ReplicationVmConfigFaultReasonForFaultReplicationConfigurationFailed = ReplicationVmConfigFaultReasonForFault("replicationConfigurationFailed") + ReplicationVmConfigFaultReasonForFaultEncryptedVm = ReplicationVmConfigFaultReasonForFault("encryptedVm") + ReplicationVmConfigFaultReasonForFaultInvalidThumbprint = ReplicationVmConfigFaultReasonForFault("invalidThumbprint") + ReplicationVmConfigFaultReasonForFaultIncompatibleDevice = ReplicationVmConfigFaultReasonForFault("incompatibleDevice") +) + +func init() { + t["ReplicationVmConfigFaultReasonForFault"] = reflect.TypeOf((*ReplicationVmConfigFaultReasonForFault)(nil)).Elem() +} + +type ReplicationVmFaultReasonForFault string + +const ( + ReplicationVmFaultReasonForFaultNotConfigured = ReplicationVmFaultReasonForFault("notConfigured") + ReplicationVmFaultReasonForFaultPoweredOff = ReplicationVmFaultReasonForFault("poweredOff") + ReplicationVmFaultReasonForFaultSuspended = ReplicationVmFaultReasonForFault("suspended") + ReplicationVmFaultReasonForFaultPoweredOn = ReplicationVmFaultReasonForFault("poweredOn") + ReplicationVmFaultReasonForFaultOfflineReplicating = ReplicationVmFaultReasonForFault("offlineReplicating") + ReplicationVmFaultReasonForFaultInvalidState = ReplicationVmFaultReasonForFault("invalidState") + ReplicationVmFaultReasonForFaultInvalidInstanceId = ReplicationVmFaultReasonForFault("invalidInstanceId") + ReplicationVmFaultReasonForFaultCloseDiskError = ReplicationVmFaultReasonForFault("closeDiskError") + ReplicationVmFaultReasonForFaultGroupExist = ReplicationVmFaultReasonForFault("groupExist") +) + +func init() { + t["ReplicationVmFaultReasonForFault"] = reflect.TypeOf((*ReplicationVmFaultReasonForFault)(nil)).Elem() +} + +type ReplicationVmInProgressFaultActivity string + +const ( + ReplicationVmInProgressFaultActivityFullSync = ReplicationVmInProgressFaultActivity("fullSync") + ReplicationVmInProgressFaultActivityDelta = ReplicationVmInProgressFaultActivity("delta") +) + +func init() { + t["ReplicationVmInProgressFaultActivity"] = reflect.TypeOf((*ReplicationVmInProgressFaultActivity)(nil)).Elem() +} + +type ReplicationVmState string + +const ( + ReplicationVmStateNone = ReplicationVmState("none") + ReplicationVmStatePaused = ReplicationVmState("paused") + ReplicationVmStateSyncing = ReplicationVmState("syncing") + ReplicationVmStateIdle = ReplicationVmState("idle") + ReplicationVmStateActive = ReplicationVmState("active") + ReplicationVmStateError = ReplicationVmState("error") +) + +func init() { + t["ReplicationVmState"] = reflect.TypeOf((*ReplicationVmState)(nil)).Elem() +} + +type ScheduledHardwareUpgradeInfoHardwareUpgradePolicy string + +const ( + ScheduledHardwareUpgradeInfoHardwareUpgradePolicyNever = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy("never") + ScheduledHardwareUpgradeInfoHardwareUpgradePolicyOnSoftPowerOff = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy("onSoftPowerOff") + ScheduledHardwareUpgradeInfoHardwareUpgradePolicyAlways = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy("always") +) + +func init() { + t["ScheduledHardwareUpgradeInfoHardwareUpgradePolicy"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradePolicy)(nil)).Elem() +} + +type ScheduledHardwareUpgradeInfoHardwareUpgradeStatus string + +const ( + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusNone = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("none") + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusPending = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("pending") + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusSuccess = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("success") + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusFailed = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("failed") +) + +func init() { + t["ScheduledHardwareUpgradeInfoHardwareUpgradeStatus"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradeStatus)(nil)).Elem() +} + +type ScsiDiskType string + +const ( + ScsiDiskTypeNative512 = ScsiDiskType("native512") + ScsiDiskTypeEmulated512 = ScsiDiskType("emulated512") + ScsiDiskTypeNative4k = ScsiDiskType("native4k") + ScsiDiskTypeSoftwareEmulated4k = ScsiDiskType("SoftwareEmulated4k") + ScsiDiskTypeUnknown = ScsiDiskType("unknown") +) + +func init() { + t["ScsiDiskType"] = reflect.TypeOf((*ScsiDiskType)(nil)).Elem() +} + +type ScsiLunDescriptorQuality string + +const ( + ScsiLunDescriptorQualityHighQuality = ScsiLunDescriptorQuality("highQuality") + ScsiLunDescriptorQualityMediumQuality = ScsiLunDescriptorQuality("mediumQuality") + ScsiLunDescriptorQualityLowQuality = ScsiLunDescriptorQuality("lowQuality") + ScsiLunDescriptorQualityUnknownQuality = ScsiLunDescriptorQuality("unknownQuality") +) + +func init() { + t["ScsiLunDescriptorQuality"] = reflect.TypeOf((*ScsiLunDescriptorQuality)(nil)).Elem() +} + +type ScsiLunState string + +const ( + ScsiLunStateUnknownState = ScsiLunState("unknownState") + ScsiLunStateOk = ScsiLunState("ok") + ScsiLunStateError = ScsiLunState("error") + ScsiLunStateOff = ScsiLunState("off") + ScsiLunStateQuiesced = ScsiLunState("quiesced") + ScsiLunStateDegraded = ScsiLunState("degraded") + ScsiLunStateLostCommunication = ScsiLunState("lostCommunication") + ScsiLunStateTimeout = ScsiLunState("timeout") +) + +func init() { + t["ScsiLunState"] = reflect.TypeOf((*ScsiLunState)(nil)).Elem() +} + +type ScsiLunType string + +const ( + ScsiLunTypeDisk = ScsiLunType("disk") + ScsiLunTypeTape = ScsiLunType("tape") + ScsiLunTypePrinter = ScsiLunType("printer") + ScsiLunTypeProcessor = ScsiLunType("processor") + ScsiLunTypeWorm = ScsiLunType("worm") + ScsiLunTypeCdrom = ScsiLunType("cdrom") + ScsiLunTypeScanner = ScsiLunType("scanner") + ScsiLunTypeOpticalDevice = ScsiLunType("opticalDevice") + ScsiLunTypeMediaChanger = ScsiLunType("mediaChanger") + ScsiLunTypeCommunications = ScsiLunType("communications") + ScsiLunTypeStorageArrayController = ScsiLunType("storageArrayController") + ScsiLunTypeEnclosure = ScsiLunType("enclosure") + ScsiLunTypeUnknown = ScsiLunType("unknown") +) + +func init() { + t["ScsiLunType"] = reflect.TypeOf((*ScsiLunType)(nil)).Elem() +} + +type ScsiLunVStorageSupportStatus string + +const ( + ScsiLunVStorageSupportStatusVStorageSupported = ScsiLunVStorageSupportStatus("vStorageSupported") + ScsiLunVStorageSupportStatusVStorageUnsupported = ScsiLunVStorageSupportStatus("vStorageUnsupported") + ScsiLunVStorageSupportStatusVStorageUnknown = ScsiLunVStorageSupportStatus("vStorageUnknown") +) + +func init() { + t["ScsiLunVStorageSupportStatus"] = reflect.TypeOf((*ScsiLunVStorageSupportStatus)(nil)).Elem() +} + +type SessionManagerHttpServiceRequestSpecMethod string + +const ( + SessionManagerHttpServiceRequestSpecMethodHttpOptions = SessionManagerHttpServiceRequestSpecMethod("httpOptions") + SessionManagerHttpServiceRequestSpecMethodHttpGet = SessionManagerHttpServiceRequestSpecMethod("httpGet") + SessionManagerHttpServiceRequestSpecMethodHttpHead = SessionManagerHttpServiceRequestSpecMethod("httpHead") + SessionManagerHttpServiceRequestSpecMethodHttpPost = SessionManagerHttpServiceRequestSpecMethod("httpPost") + SessionManagerHttpServiceRequestSpecMethodHttpPut = SessionManagerHttpServiceRequestSpecMethod("httpPut") + SessionManagerHttpServiceRequestSpecMethodHttpDelete = SessionManagerHttpServiceRequestSpecMethod("httpDelete") + SessionManagerHttpServiceRequestSpecMethodHttpTrace = SessionManagerHttpServiceRequestSpecMethod("httpTrace") + SessionManagerHttpServiceRequestSpecMethodHttpConnect = SessionManagerHttpServiceRequestSpecMethod("httpConnect") +) + +func init() { + t["SessionManagerHttpServiceRequestSpecMethod"] = reflect.TypeOf((*SessionManagerHttpServiceRequestSpecMethod)(nil)).Elem() +} + +type SharesLevel string + +const ( + SharesLevelLow = SharesLevel("low") + SharesLevelNormal = SharesLevel("normal") + SharesLevelHigh = SharesLevel("high") + SharesLevelCustom = SharesLevel("custom") +) + +func init() { + t["SharesLevel"] = reflect.TypeOf((*SharesLevel)(nil)).Elem() +} + +type SimpleCommandEncoding string + +const ( + SimpleCommandEncodingCSV = SimpleCommandEncoding("CSV") + SimpleCommandEncodingHEX = SimpleCommandEncoding("HEX") + SimpleCommandEncodingSTRING = SimpleCommandEncoding("STRING") +) + +func init() { + t["SimpleCommandEncoding"] = reflect.TypeOf((*SimpleCommandEncoding)(nil)).Elem() +} + +type SlpDiscoveryMethod string + +const ( + SlpDiscoveryMethodSlpDhcp = SlpDiscoveryMethod("slpDhcp") + SlpDiscoveryMethodSlpAutoUnicast = SlpDiscoveryMethod("slpAutoUnicast") + SlpDiscoveryMethodSlpAutoMulticast = SlpDiscoveryMethod("slpAutoMulticast") + SlpDiscoveryMethodSlpManual = SlpDiscoveryMethod("slpManual") +) + +func init() { + t["SlpDiscoveryMethod"] = reflect.TypeOf((*SlpDiscoveryMethod)(nil)).Elem() +} + +type SoftwarePackageConstraint string + +const ( + SoftwarePackageConstraintEquals = SoftwarePackageConstraint("equals") + SoftwarePackageConstraintLessThan = SoftwarePackageConstraint("lessThan") + SoftwarePackageConstraintLessThanEqual = SoftwarePackageConstraint("lessThanEqual") + SoftwarePackageConstraintGreaterThanEqual = SoftwarePackageConstraint("greaterThanEqual") + SoftwarePackageConstraintGreaterThan = SoftwarePackageConstraint("greaterThan") +) + +func init() { + t["SoftwarePackageConstraint"] = reflect.TypeOf((*SoftwarePackageConstraint)(nil)).Elem() +} + +type SoftwarePackageVibType string + +const ( + SoftwarePackageVibTypeBootbank = SoftwarePackageVibType("bootbank") + SoftwarePackageVibTypeTools = SoftwarePackageVibType("tools") + SoftwarePackageVibTypeMeta = SoftwarePackageVibType("meta") +) + +func init() { + t["SoftwarePackageVibType"] = reflect.TypeOf((*SoftwarePackageVibType)(nil)).Elem() +} + +type StateAlarmOperator string + +const ( + StateAlarmOperatorIsEqual = StateAlarmOperator("isEqual") + StateAlarmOperatorIsUnequal = StateAlarmOperator("isUnequal") +) + +func init() { + t["StateAlarmOperator"] = reflect.TypeOf((*StateAlarmOperator)(nil)).Elem() +} + +type StorageDrsPodConfigInfoBehavior string + +const ( + StorageDrsPodConfigInfoBehaviorManual = StorageDrsPodConfigInfoBehavior("manual") + StorageDrsPodConfigInfoBehaviorAutomated = StorageDrsPodConfigInfoBehavior("automated") +) + +func init() { + t["StorageDrsPodConfigInfoBehavior"] = reflect.TypeOf((*StorageDrsPodConfigInfoBehavior)(nil)).Elem() +} + +type StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode string + +const ( + StorageDrsSpaceLoadBalanceConfigSpaceThresholdModeUtilization = StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode("utilization") + StorageDrsSpaceLoadBalanceConfigSpaceThresholdModeFreeSpace = StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode("freeSpace") +) + +func init() { + t["StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode"] = reflect.TypeOf((*StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode)(nil)).Elem() +} + +type StorageIORMThresholdMode string + +const ( + StorageIORMThresholdModeAutomatic = StorageIORMThresholdMode("automatic") + StorageIORMThresholdModeManual = StorageIORMThresholdMode("manual") +) + +func init() { + t["StorageIORMThresholdMode"] = reflect.TypeOf((*StorageIORMThresholdMode)(nil)).Elem() +} + +type StoragePlacementSpecPlacementType string + +const ( + StoragePlacementSpecPlacementTypeCreate = StoragePlacementSpecPlacementType("create") + StoragePlacementSpecPlacementTypeReconfigure = StoragePlacementSpecPlacementType("reconfigure") + StoragePlacementSpecPlacementTypeRelocate = StoragePlacementSpecPlacementType("relocate") + StoragePlacementSpecPlacementTypeClone = StoragePlacementSpecPlacementType("clone") +) + +func init() { + t["StoragePlacementSpecPlacementType"] = reflect.TypeOf((*StoragePlacementSpecPlacementType)(nil)).Elem() +} + +type TaskFilterSpecRecursionOption string + +const ( + TaskFilterSpecRecursionOptionSelf = TaskFilterSpecRecursionOption("self") + TaskFilterSpecRecursionOptionChildren = TaskFilterSpecRecursionOption("children") + TaskFilterSpecRecursionOptionAll = TaskFilterSpecRecursionOption("all") +) + +func init() { + t["TaskFilterSpecRecursionOption"] = reflect.TypeOf((*TaskFilterSpecRecursionOption)(nil)).Elem() +} + +type TaskFilterSpecTimeOption string + +const ( + TaskFilterSpecTimeOptionQueuedTime = TaskFilterSpecTimeOption("queuedTime") + TaskFilterSpecTimeOptionStartedTime = TaskFilterSpecTimeOption("startedTime") + TaskFilterSpecTimeOptionCompletedTime = TaskFilterSpecTimeOption("completedTime") +) + +func init() { + t["TaskFilterSpecTimeOption"] = reflect.TypeOf((*TaskFilterSpecTimeOption)(nil)).Elem() +} + +type TaskInfoState string + +const ( + TaskInfoStateQueued = TaskInfoState("queued") + TaskInfoStateRunning = TaskInfoState("running") + TaskInfoStateSuccess = TaskInfoState("success") + TaskInfoStateError = TaskInfoState("error") +) + +func init() { + t["TaskInfoState"] = reflect.TypeOf((*TaskInfoState)(nil)).Elem() +} + +type ThirdPartyLicenseAssignmentFailedReason string + +const ( + ThirdPartyLicenseAssignmentFailedReasonLicenseAssignmentFailed = ThirdPartyLicenseAssignmentFailedReason("licenseAssignmentFailed") + ThirdPartyLicenseAssignmentFailedReasonModuleNotInstalled = ThirdPartyLicenseAssignmentFailedReason("moduleNotInstalled") +) + +func init() { + t["ThirdPartyLicenseAssignmentFailedReason"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailedReason)(nil)).Elem() +} + +type UpgradePolicy string + +const ( + UpgradePolicyManual = UpgradePolicy("manual") + UpgradePolicyUpgradeAtPowerCycle = UpgradePolicy("upgradeAtPowerCycle") +) + +func init() { + t["UpgradePolicy"] = reflect.TypeOf((*UpgradePolicy)(nil)).Elem() +} + +type VAppAutoStartAction string + +const ( + VAppAutoStartActionNone = VAppAutoStartAction("none") + VAppAutoStartActionPowerOn = VAppAutoStartAction("powerOn") + VAppAutoStartActionPowerOff = VAppAutoStartAction("powerOff") + VAppAutoStartActionGuestShutdown = VAppAutoStartAction("guestShutdown") + VAppAutoStartActionSuspend = VAppAutoStartAction("suspend") +) + +func init() { + t["VAppAutoStartAction"] = reflect.TypeOf((*VAppAutoStartAction)(nil)).Elem() +} + +type VAppCloneSpecProvisioningType string + +const ( + VAppCloneSpecProvisioningTypeSameAsSource = VAppCloneSpecProvisioningType("sameAsSource") + VAppCloneSpecProvisioningTypeThin = VAppCloneSpecProvisioningType("thin") + VAppCloneSpecProvisioningTypeThick = VAppCloneSpecProvisioningType("thick") +) + +func init() { + t["VAppCloneSpecProvisioningType"] = reflect.TypeOf((*VAppCloneSpecProvisioningType)(nil)).Elem() +} + +type VAppIPAssignmentInfoAllocationSchemes string + +const ( + VAppIPAssignmentInfoAllocationSchemesDhcp = VAppIPAssignmentInfoAllocationSchemes("dhcp") + VAppIPAssignmentInfoAllocationSchemesOvfenv = VAppIPAssignmentInfoAllocationSchemes("ovfenv") +) + +func init() { + t["VAppIPAssignmentInfoAllocationSchemes"] = reflect.TypeOf((*VAppIPAssignmentInfoAllocationSchemes)(nil)).Elem() +} + +type VAppIPAssignmentInfoIpAllocationPolicy string + +const ( + VAppIPAssignmentInfoIpAllocationPolicyDhcpPolicy = VAppIPAssignmentInfoIpAllocationPolicy("dhcpPolicy") + VAppIPAssignmentInfoIpAllocationPolicyTransientPolicy = VAppIPAssignmentInfoIpAllocationPolicy("transientPolicy") + VAppIPAssignmentInfoIpAllocationPolicyFixedPolicy = VAppIPAssignmentInfoIpAllocationPolicy("fixedPolicy") + VAppIPAssignmentInfoIpAllocationPolicyFixedAllocatedPolicy = VAppIPAssignmentInfoIpAllocationPolicy("fixedAllocatedPolicy") +) + +func init() { + t["VAppIPAssignmentInfoIpAllocationPolicy"] = reflect.TypeOf((*VAppIPAssignmentInfoIpAllocationPolicy)(nil)).Elem() +} + +type VAppIPAssignmentInfoProtocols string + +const ( + VAppIPAssignmentInfoProtocolsIPv4 = VAppIPAssignmentInfoProtocols("IPv4") + VAppIPAssignmentInfoProtocolsIPv6 = VAppIPAssignmentInfoProtocols("IPv6") +) + +func init() { + t["VAppIPAssignmentInfoProtocols"] = reflect.TypeOf((*VAppIPAssignmentInfoProtocols)(nil)).Elem() +} + +type VFlashModuleNotSupportedReason string + +const ( + VFlashModuleNotSupportedReasonCacheModeNotSupported = VFlashModuleNotSupportedReason("CacheModeNotSupported") + VFlashModuleNotSupportedReasonCacheConsistencyTypeNotSupported = VFlashModuleNotSupportedReason("CacheConsistencyTypeNotSupported") + VFlashModuleNotSupportedReasonCacheBlockSizeNotSupported = VFlashModuleNotSupportedReason("CacheBlockSizeNotSupported") + VFlashModuleNotSupportedReasonCacheReservationNotSupported = VFlashModuleNotSupportedReason("CacheReservationNotSupported") + VFlashModuleNotSupportedReasonDiskSizeNotSupported = VFlashModuleNotSupportedReason("DiskSizeNotSupported") +) + +func init() { + t["VFlashModuleNotSupportedReason"] = reflect.TypeOf((*VFlashModuleNotSupportedReason)(nil)).Elem() +} + +type VMotionCompatibilityType string + +const ( + VMotionCompatibilityTypeCpu = VMotionCompatibilityType("cpu") + VMotionCompatibilityTypeSoftware = VMotionCompatibilityType("software") +) + +func init() { + t["VMotionCompatibilityType"] = reflect.TypeOf((*VMotionCompatibilityType)(nil)).Elem() +} + +type VMwareDVSTeamingMatchStatus string + +const ( + VMwareDVSTeamingMatchStatusIphashMatch = VMwareDVSTeamingMatchStatus("iphashMatch") + VMwareDVSTeamingMatchStatusNonIphashMatch = VMwareDVSTeamingMatchStatus("nonIphashMatch") + VMwareDVSTeamingMatchStatusIphashMismatch = VMwareDVSTeamingMatchStatus("iphashMismatch") + VMwareDVSTeamingMatchStatusNonIphashMismatch = VMwareDVSTeamingMatchStatus("nonIphashMismatch") +) + +func init() { + t["VMwareDVSTeamingMatchStatus"] = reflect.TypeOf((*VMwareDVSTeamingMatchStatus)(nil)).Elem() +} + +type VMwareDVSVspanSessionEncapType string + +const ( + VMwareDVSVspanSessionEncapTypeGre = VMwareDVSVspanSessionEncapType("gre") + VMwareDVSVspanSessionEncapTypeErspan2 = VMwareDVSVspanSessionEncapType("erspan2") + VMwareDVSVspanSessionEncapTypeErspan3 = VMwareDVSVspanSessionEncapType("erspan3") +) + +func init() { + t["VMwareDVSVspanSessionEncapType"] = reflect.TypeOf((*VMwareDVSVspanSessionEncapType)(nil)).Elem() +} + +type VMwareDVSVspanSessionType string + +const ( + VMwareDVSVspanSessionTypeMixedDestMirror = VMwareDVSVspanSessionType("mixedDestMirror") + VMwareDVSVspanSessionTypeDvPortMirror = VMwareDVSVspanSessionType("dvPortMirror") + VMwareDVSVspanSessionTypeRemoteMirrorSource = VMwareDVSVspanSessionType("remoteMirrorSource") + VMwareDVSVspanSessionTypeRemoteMirrorDest = VMwareDVSVspanSessionType("remoteMirrorDest") + VMwareDVSVspanSessionTypeEncapsulatedRemoteMirrorSource = VMwareDVSVspanSessionType("encapsulatedRemoteMirrorSource") +) + +func init() { + t["VMwareDVSVspanSessionType"] = reflect.TypeOf((*VMwareDVSVspanSessionType)(nil)).Elem() +} + +type VMwareDvsLacpApiVersion string + +const ( + VMwareDvsLacpApiVersionSingleLag = VMwareDvsLacpApiVersion("singleLag") + VMwareDvsLacpApiVersionMultipleLag = VMwareDvsLacpApiVersion("multipleLag") +) + +func init() { + t["VMwareDvsLacpApiVersion"] = reflect.TypeOf((*VMwareDvsLacpApiVersion)(nil)).Elem() +} + +type VMwareDvsLacpLoadBalanceAlgorithm string + +const ( + VMwareDvsLacpLoadBalanceAlgorithmSrcMac = VMwareDvsLacpLoadBalanceAlgorithm("srcMac") + VMwareDvsLacpLoadBalanceAlgorithmDestMac = VMwareDvsLacpLoadBalanceAlgorithm("destMac") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestMac = VMwareDvsLacpLoadBalanceAlgorithm("srcDestMac") + VMwareDvsLacpLoadBalanceAlgorithmDestIpVlan = VMwareDvsLacpLoadBalanceAlgorithm("destIpVlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcIpVlan = VMwareDvsLacpLoadBalanceAlgorithm("srcIpVlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIpVlan = VMwareDvsLacpLoadBalanceAlgorithm("srcDestIpVlan") + VMwareDvsLacpLoadBalanceAlgorithmDestTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("destTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmSrcTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("srcTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("srcDestTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmDestIpTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("destIpTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmSrcIpTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("srcIpTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIpTcpUdpPort = VMwareDvsLacpLoadBalanceAlgorithm("srcDestIpTcpUdpPort") + VMwareDvsLacpLoadBalanceAlgorithmDestIpTcpUdpPortVlan = VMwareDvsLacpLoadBalanceAlgorithm("destIpTcpUdpPortVlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcIpTcpUdpPortVlan = VMwareDvsLacpLoadBalanceAlgorithm("srcIpTcpUdpPortVlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIpTcpUdpPortVlan = VMwareDvsLacpLoadBalanceAlgorithm("srcDestIpTcpUdpPortVlan") + VMwareDvsLacpLoadBalanceAlgorithmDestIp = VMwareDvsLacpLoadBalanceAlgorithm("destIp") + VMwareDvsLacpLoadBalanceAlgorithmSrcIp = VMwareDvsLacpLoadBalanceAlgorithm("srcIp") + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIp = VMwareDvsLacpLoadBalanceAlgorithm("srcDestIp") + VMwareDvsLacpLoadBalanceAlgorithmVlan = VMwareDvsLacpLoadBalanceAlgorithm("vlan") + VMwareDvsLacpLoadBalanceAlgorithmSrcPortId = VMwareDvsLacpLoadBalanceAlgorithm("srcPortId") +) + +func init() { + t["VMwareDvsLacpLoadBalanceAlgorithm"] = reflect.TypeOf((*VMwareDvsLacpLoadBalanceAlgorithm)(nil)).Elem() +} + +type VMwareDvsMulticastFilteringMode string + +const ( + VMwareDvsMulticastFilteringModeLegacyFiltering = VMwareDvsMulticastFilteringMode("legacyFiltering") + VMwareDvsMulticastFilteringModeSnooping = VMwareDvsMulticastFilteringMode("snooping") +) + +func init() { + t["VMwareDvsMulticastFilteringMode"] = reflect.TypeOf((*VMwareDvsMulticastFilteringMode)(nil)).Elem() +} + +type VMwareUplinkLacpMode string + +const ( + VMwareUplinkLacpModeActive = VMwareUplinkLacpMode("active") + VMwareUplinkLacpModePassive = VMwareUplinkLacpMode("passive") +) + +func init() { + t["VMwareUplinkLacpMode"] = reflect.TypeOf((*VMwareUplinkLacpMode)(nil)).Elem() +} + +type VStorageObjectConsumptionType string + +const ( + VStorageObjectConsumptionTypeDisk = VStorageObjectConsumptionType("disk") +) + +func init() { + t["VStorageObjectConsumptionType"] = reflect.TypeOf((*VStorageObjectConsumptionType)(nil)).Elem() +} + +type ValidateMigrationTestType string + +const ( + ValidateMigrationTestTypeSourceTests = ValidateMigrationTestType("sourceTests") + ValidateMigrationTestTypeCompatibilityTests = ValidateMigrationTestType("compatibilityTests") + ValidateMigrationTestTypeDiskAccessibilityTests = ValidateMigrationTestType("diskAccessibilityTests") + ValidateMigrationTestTypeResourceTests = ValidateMigrationTestType("resourceTests") +) + +func init() { + t["ValidateMigrationTestType"] = reflect.TypeOf((*ValidateMigrationTestType)(nil)).Elem() +} + +type VchaClusterMode string + +const ( + VchaClusterModeEnabled = VchaClusterMode("enabled") + VchaClusterModeDisabled = VchaClusterMode("disabled") + VchaClusterModeMaintenance = VchaClusterMode("maintenance") +) + +func init() { + t["VchaClusterMode"] = reflect.TypeOf((*VchaClusterMode)(nil)).Elem() +} + +type VchaClusterState string + +const ( + VchaClusterStateHealthy = VchaClusterState("healthy") + VchaClusterStateDegraded = VchaClusterState("degraded") + VchaClusterStateIsolated = VchaClusterState("isolated") +) + +func init() { + t["VchaClusterState"] = reflect.TypeOf((*VchaClusterState)(nil)).Elem() +} + +type VchaNodeRole string + +const ( + VchaNodeRoleActive = VchaNodeRole("active") + VchaNodeRolePassive = VchaNodeRole("passive") + VchaNodeRoleWitness = VchaNodeRole("witness") +) + +func init() { + t["VchaNodeRole"] = reflect.TypeOf((*VchaNodeRole)(nil)).Elem() +} + +type VchaNodeState string + +const ( + VchaNodeStateUp = VchaNodeState("up") + VchaNodeStateDown = VchaNodeState("down") +) + +func init() { + t["VchaNodeState"] = reflect.TypeOf((*VchaNodeState)(nil)).Elem() +} + +type VchaState string + +const ( + VchaStateConfigured = VchaState("configured") + VchaStateNotConfigured = VchaState("notConfigured") + VchaStateInvalid = VchaState("invalid") + VchaStatePrepared = VchaState("prepared") +) + +func init() { + t["VchaState"] = reflect.TypeOf((*VchaState)(nil)).Elem() +} + +type VirtualAppVAppState string + +const ( + VirtualAppVAppStateStarted = VirtualAppVAppState("started") + VirtualAppVAppStateStopped = VirtualAppVAppState("stopped") + VirtualAppVAppStateStarting = VirtualAppVAppState("starting") + VirtualAppVAppStateStopping = VirtualAppVAppState("stopping") +) + +func init() { + t["VirtualAppVAppState"] = reflect.TypeOf((*VirtualAppVAppState)(nil)).Elem() +} + +type VirtualDeviceConfigSpecFileOperation string + +const ( + VirtualDeviceConfigSpecFileOperationCreate = VirtualDeviceConfigSpecFileOperation("create") + VirtualDeviceConfigSpecFileOperationDestroy = VirtualDeviceConfigSpecFileOperation("destroy") + VirtualDeviceConfigSpecFileOperationReplace = VirtualDeviceConfigSpecFileOperation("replace") +) + +func init() { + t["VirtualDeviceConfigSpecFileOperation"] = reflect.TypeOf((*VirtualDeviceConfigSpecFileOperation)(nil)).Elem() +} + +type VirtualDeviceConfigSpecOperation string + +const ( + VirtualDeviceConfigSpecOperationAdd = VirtualDeviceConfigSpecOperation("add") + VirtualDeviceConfigSpecOperationRemove = VirtualDeviceConfigSpecOperation("remove") + VirtualDeviceConfigSpecOperationEdit = VirtualDeviceConfigSpecOperation("edit") +) + +func init() { + t["VirtualDeviceConfigSpecOperation"] = reflect.TypeOf((*VirtualDeviceConfigSpecOperation)(nil)).Elem() +} + +type VirtualDeviceConnectInfoMigrateConnectOp string + +const ( + VirtualDeviceConnectInfoMigrateConnectOpConnect = VirtualDeviceConnectInfoMigrateConnectOp("connect") + VirtualDeviceConnectInfoMigrateConnectOpDisconnect = VirtualDeviceConnectInfoMigrateConnectOp("disconnect") + VirtualDeviceConnectInfoMigrateConnectOpUnset = VirtualDeviceConnectInfoMigrateConnectOp("unset") +) + +func init() { + t["VirtualDeviceConnectInfoMigrateConnectOp"] = reflect.TypeOf((*VirtualDeviceConnectInfoMigrateConnectOp)(nil)).Elem() +} + +type VirtualDeviceConnectInfoStatus string + +const ( + VirtualDeviceConnectInfoStatusOk = VirtualDeviceConnectInfoStatus("ok") + VirtualDeviceConnectInfoStatusRecoverableError = VirtualDeviceConnectInfoStatus("recoverableError") + VirtualDeviceConnectInfoStatusUnrecoverableError = VirtualDeviceConnectInfoStatus("unrecoverableError") + VirtualDeviceConnectInfoStatusUntried = VirtualDeviceConnectInfoStatus("untried") +) + +func init() { + t["VirtualDeviceConnectInfoStatus"] = reflect.TypeOf((*VirtualDeviceConnectInfoStatus)(nil)).Elem() +} + +type VirtualDeviceFileExtension string + +const ( + VirtualDeviceFileExtensionIso = VirtualDeviceFileExtension("iso") + VirtualDeviceFileExtensionFlp = VirtualDeviceFileExtension("flp") + VirtualDeviceFileExtensionVmdk = VirtualDeviceFileExtension("vmdk") + VirtualDeviceFileExtensionDsk = VirtualDeviceFileExtension("dsk") + VirtualDeviceFileExtensionRdm = VirtualDeviceFileExtension("rdm") +) + +func init() { + t["VirtualDeviceFileExtension"] = reflect.TypeOf((*VirtualDeviceFileExtension)(nil)).Elem() +} + +type VirtualDeviceURIBackingOptionDirection string + +const ( + VirtualDeviceURIBackingOptionDirectionServer = VirtualDeviceURIBackingOptionDirection("server") + VirtualDeviceURIBackingOptionDirectionClient = VirtualDeviceURIBackingOptionDirection("client") +) + +func init() { + t["VirtualDeviceURIBackingOptionDirection"] = reflect.TypeOf((*VirtualDeviceURIBackingOptionDirection)(nil)).Elem() +} + +type VirtualDiskAdapterType string + +const ( + VirtualDiskAdapterTypeIde = VirtualDiskAdapterType("ide") + VirtualDiskAdapterTypeBusLogic = VirtualDiskAdapterType("busLogic") + VirtualDiskAdapterTypeLsiLogic = VirtualDiskAdapterType("lsiLogic") +) + +func init() { + t["VirtualDiskAdapterType"] = reflect.TypeOf((*VirtualDiskAdapterType)(nil)).Elem() +} + +type VirtualDiskCompatibilityMode string + +const ( + VirtualDiskCompatibilityModeVirtualMode = VirtualDiskCompatibilityMode("virtualMode") + VirtualDiskCompatibilityModePhysicalMode = VirtualDiskCompatibilityMode("physicalMode") +) + +func init() { + t["VirtualDiskCompatibilityMode"] = reflect.TypeOf((*VirtualDiskCompatibilityMode)(nil)).Elem() +} + +type VirtualDiskDeltaDiskFormat string + +const ( + VirtualDiskDeltaDiskFormatRedoLogFormat = VirtualDiskDeltaDiskFormat("redoLogFormat") + VirtualDiskDeltaDiskFormatNativeFormat = VirtualDiskDeltaDiskFormat("nativeFormat") + VirtualDiskDeltaDiskFormatSeSparseFormat = VirtualDiskDeltaDiskFormat("seSparseFormat") +) + +func init() { + t["VirtualDiskDeltaDiskFormat"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormat)(nil)).Elem() +} + +type VirtualDiskDeltaDiskFormatVariant string + +const ( + VirtualDiskDeltaDiskFormatVariantVmfsSparseVariant = VirtualDiskDeltaDiskFormatVariant("vmfsSparseVariant") + VirtualDiskDeltaDiskFormatVariantVsanSparseVariant = VirtualDiskDeltaDiskFormatVariant("vsanSparseVariant") +) + +func init() { + t["VirtualDiskDeltaDiskFormatVariant"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormatVariant)(nil)).Elem() +} + +type VirtualDiskMode string + +const ( + VirtualDiskModePersistent = VirtualDiskMode("persistent") + VirtualDiskModeNonpersistent = VirtualDiskMode("nonpersistent") + VirtualDiskModeUndoable = VirtualDiskMode("undoable") + VirtualDiskModeIndependent_persistent = VirtualDiskMode("independent_persistent") + VirtualDiskModeIndependent_nonpersistent = VirtualDiskMode("independent_nonpersistent") + VirtualDiskModeAppend = VirtualDiskMode("append") +) + +func init() { + t["VirtualDiskMode"] = reflect.TypeOf((*VirtualDiskMode)(nil)).Elem() +} + +type VirtualDiskRuleSpecRuleType string + +const ( + VirtualDiskRuleSpecRuleTypeAffinity = VirtualDiskRuleSpecRuleType("affinity") + VirtualDiskRuleSpecRuleTypeAntiAffinity = VirtualDiskRuleSpecRuleType("antiAffinity") + VirtualDiskRuleSpecRuleTypeDisabled = VirtualDiskRuleSpecRuleType("disabled") +) + +func init() { + t["VirtualDiskRuleSpecRuleType"] = reflect.TypeOf((*VirtualDiskRuleSpecRuleType)(nil)).Elem() +} + +type VirtualDiskSharing string + +const ( + VirtualDiskSharingSharingNone = VirtualDiskSharing("sharingNone") + VirtualDiskSharingSharingMultiWriter = VirtualDiskSharing("sharingMultiWriter") +) + +func init() { + t["VirtualDiskSharing"] = reflect.TypeOf((*VirtualDiskSharing)(nil)).Elem() +} + +type VirtualDiskType string + +const ( + VirtualDiskTypePreallocated = VirtualDiskType("preallocated") + VirtualDiskTypeThin = VirtualDiskType("thin") + VirtualDiskTypeSeSparse = VirtualDiskType("seSparse") + VirtualDiskTypeRdm = VirtualDiskType("rdm") + VirtualDiskTypeRdmp = VirtualDiskType("rdmp") + VirtualDiskTypeRaw = VirtualDiskType("raw") + VirtualDiskTypeDelta = VirtualDiskType("delta") + VirtualDiskTypeSparse2Gb = VirtualDiskType("sparse2Gb") + VirtualDiskTypeThick2Gb = VirtualDiskType("thick2Gb") + VirtualDiskTypeEagerZeroedThick = VirtualDiskType("eagerZeroedThick") + VirtualDiskTypeSparseMonolithic = VirtualDiskType("sparseMonolithic") + VirtualDiskTypeFlatMonolithic = VirtualDiskType("flatMonolithic") + VirtualDiskTypeThick = VirtualDiskType("thick") +) + +func init() { + t["VirtualDiskType"] = reflect.TypeOf((*VirtualDiskType)(nil)).Elem() +} + +type VirtualDiskVFlashCacheConfigInfoCacheConsistencyType string + +const ( + VirtualDiskVFlashCacheConfigInfoCacheConsistencyTypeStrong = VirtualDiskVFlashCacheConfigInfoCacheConsistencyType("strong") + VirtualDiskVFlashCacheConfigInfoCacheConsistencyTypeWeak = VirtualDiskVFlashCacheConfigInfoCacheConsistencyType("weak") +) + +func init() { + t["VirtualDiskVFlashCacheConfigInfoCacheConsistencyType"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfoCacheConsistencyType)(nil)).Elem() +} + +type VirtualDiskVFlashCacheConfigInfoCacheMode string + +const ( + VirtualDiskVFlashCacheConfigInfoCacheModeWrite_thru = VirtualDiskVFlashCacheConfigInfoCacheMode("write_thru") + VirtualDiskVFlashCacheConfigInfoCacheModeWrite_back = VirtualDiskVFlashCacheConfigInfoCacheMode("write_back") +) + +func init() { + t["VirtualDiskVFlashCacheConfigInfoCacheMode"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfoCacheMode)(nil)).Elem() +} + +type VirtualEthernetCardLegacyNetworkDeviceName string + +const ( + VirtualEthernetCardLegacyNetworkDeviceNameBridged = VirtualEthernetCardLegacyNetworkDeviceName("bridged") + VirtualEthernetCardLegacyNetworkDeviceNameNat = VirtualEthernetCardLegacyNetworkDeviceName("nat") + VirtualEthernetCardLegacyNetworkDeviceNameHostonly = VirtualEthernetCardLegacyNetworkDeviceName("hostonly") +) + +func init() { + t["VirtualEthernetCardLegacyNetworkDeviceName"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkDeviceName)(nil)).Elem() +} + +type VirtualEthernetCardMacType string + +const ( + VirtualEthernetCardMacTypeManual = VirtualEthernetCardMacType("manual") + VirtualEthernetCardMacTypeGenerated = VirtualEthernetCardMacType("generated") + VirtualEthernetCardMacTypeAssigned = VirtualEthernetCardMacType("assigned") +) + +func init() { + t["VirtualEthernetCardMacType"] = reflect.TypeOf((*VirtualEthernetCardMacType)(nil)).Elem() +} + +type VirtualMachineAppHeartbeatStatusType string + +const ( + VirtualMachineAppHeartbeatStatusTypeAppStatusGray = VirtualMachineAppHeartbeatStatusType("appStatusGray") + VirtualMachineAppHeartbeatStatusTypeAppStatusGreen = VirtualMachineAppHeartbeatStatusType("appStatusGreen") + VirtualMachineAppHeartbeatStatusTypeAppStatusRed = VirtualMachineAppHeartbeatStatusType("appStatusRed") +) + +func init() { + t["VirtualMachineAppHeartbeatStatusType"] = reflect.TypeOf((*VirtualMachineAppHeartbeatStatusType)(nil)).Elem() +} + +type VirtualMachineBootOptionsNetworkBootProtocolType string + +const ( + VirtualMachineBootOptionsNetworkBootProtocolTypeIpv4 = VirtualMachineBootOptionsNetworkBootProtocolType("ipv4") + VirtualMachineBootOptionsNetworkBootProtocolTypeIpv6 = VirtualMachineBootOptionsNetworkBootProtocolType("ipv6") +) + +func init() { + t["VirtualMachineBootOptionsNetworkBootProtocolType"] = reflect.TypeOf((*VirtualMachineBootOptionsNetworkBootProtocolType)(nil)).Elem() +} + +type VirtualMachineConfigInfoNpivWwnType string + +const ( + VirtualMachineConfigInfoNpivWwnTypeVc = VirtualMachineConfigInfoNpivWwnType("vc") + VirtualMachineConfigInfoNpivWwnTypeHost = VirtualMachineConfigInfoNpivWwnType("host") + VirtualMachineConfigInfoNpivWwnTypeExternal = VirtualMachineConfigInfoNpivWwnType("external") +) + +func init() { + t["VirtualMachineConfigInfoNpivWwnType"] = reflect.TypeOf((*VirtualMachineConfigInfoNpivWwnType)(nil)).Elem() +} + +type VirtualMachineConfigInfoSwapPlacementType string + +const ( + VirtualMachineConfigInfoSwapPlacementTypeInherit = VirtualMachineConfigInfoSwapPlacementType("inherit") + VirtualMachineConfigInfoSwapPlacementTypeVmDirectory = VirtualMachineConfigInfoSwapPlacementType("vmDirectory") + VirtualMachineConfigInfoSwapPlacementTypeHostLocal = VirtualMachineConfigInfoSwapPlacementType("hostLocal") +) + +func init() { + t["VirtualMachineConfigInfoSwapPlacementType"] = reflect.TypeOf((*VirtualMachineConfigInfoSwapPlacementType)(nil)).Elem() +} + +type VirtualMachineConfigSpecEncryptedVMotionModes string + +const ( + VirtualMachineConfigSpecEncryptedVMotionModesDisabled = VirtualMachineConfigSpecEncryptedVMotionModes("disabled") + VirtualMachineConfigSpecEncryptedVMotionModesOpportunistic = VirtualMachineConfigSpecEncryptedVMotionModes("opportunistic") + VirtualMachineConfigSpecEncryptedVMotionModesRequired = VirtualMachineConfigSpecEncryptedVMotionModes("required") +) + +func init() { + t["VirtualMachineConfigSpecEncryptedVMotionModes"] = reflect.TypeOf((*VirtualMachineConfigSpecEncryptedVMotionModes)(nil)).Elem() +} + +type VirtualMachineConfigSpecNpivWwnOp string + +const ( + VirtualMachineConfigSpecNpivWwnOpGenerate = VirtualMachineConfigSpecNpivWwnOp("generate") + VirtualMachineConfigSpecNpivWwnOpSet = VirtualMachineConfigSpecNpivWwnOp("set") + VirtualMachineConfigSpecNpivWwnOpRemove = VirtualMachineConfigSpecNpivWwnOp("remove") + VirtualMachineConfigSpecNpivWwnOpExtend = VirtualMachineConfigSpecNpivWwnOp("extend") +) + +func init() { + t["VirtualMachineConfigSpecNpivWwnOp"] = reflect.TypeOf((*VirtualMachineConfigSpecNpivWwnOp)(nil)).Elem() +} + +type VirtualMachineConnectionState string + +const ( + VirtualMachineConnectionStateConnected = VirtualMachineConnectionState("connected") + VirtualMachineConnectionStateDisconnected = VirtualMachineConnectionState("disconnected") + VirtualMachineConnectionStateOrphaned = VirtualMachineConnectionState("orphaned") + VirtualMachineConnectionStateInaccessible = VirtualMachineConnectionState("inaccessible") + VirtualMachineConnectionStateInvalid = VirtualMachineConnectionState("invalid") +) + +func init() { + t["VirtualMachineConnectionState"] = reflect.TypeOf((*VirtualMachineConnectionState)(nil)).Elem() +} + +type VirtualMachineCryptoState string + +const ( + VirtualMachineCryptoStateUnlocked = VirtualMachineCryptoState("unlocked") + VirtualMachineCryptoStateLocked = VirtualMachineCryptoState("locked") +) + +func init() { + t["VirtualMachineCryptoState"] = reflect.TypeOf((*VirtualMachineCryptoState)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther string + +const ( + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOtherVmNptIncompatibleHost = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther("vmNptIncompatibleHost") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOtherVmNptIncompatibleNetwork = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther("vmNptIncompatibleNetwork") +) + +func init() { + t["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm string + +const ( + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleGuest = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleGuest") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleGuestDriver = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleGuestDriver") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleAdapterType = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleAdapterType") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptDisabledOrDisconnectedAdapter = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptDisabledOrDisconnectedAdapter") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleAdapterFeatures = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleAdapterFeatures") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleBackingType = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptIncompatibleBackingType") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptInsufficientMemoryReservation = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptInsufficientMemoryReservation") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptFaultToleranceOrRecordReplayConfigured = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptFaultToleranceOrRecordReplayConfigured") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptConflictingIOChainConfigured = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptConflictingIOChainConfigured") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptMonitorBlocks = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptMonitorBlocks") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptConflictingOperationInProgress = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptConflictingOperationInProgress") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptRuntimeError = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptRuntimeError") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptOutOfIntrVector = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptOutOfIntrVector") + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptVMCIActive = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptVMCIActive") +) + +func init() { + t["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm)(nil)).Elem() +} + +type VirtualMachineFaultToleranceState string + +const ( + VirtualMachineFaultToleranceStateNotConfigured = VirtualMachineFaultToleranceState("notConfigured") + VirtualMachineFaultToleranceStateDisabled = VirtualMachineFaultToleranceState("disabled") + VirtualMachineFaultToleranceStateEnabled = VirtualMachineFaultToleranceState("enabled") + VirtualMachineFaultToleranceStateNeedSecondary = VirtualMachineFaultToleranceState("needSecondary") + VirtualMachineFaultToleranceStateStarting = VirtualMachineFaultToleranceState("starting") + VirtualMachineFaultToleranceStateRunning = VirtualMachineFaultToleranceState("running") +) + +func init() { + t["VirtualMachineFaultToleranceState"] = reflect.TypeOf((*VirtualMachineFaultToleranceState)(nil)).Elem() +} + +type VirtualMachineFaultToleranceType string + +const ( + VirtualMachineFaultToleranceTypeUnset = VirtualMachineFaultToleranceType("unset") + VirtualMachineFaultToleranceTypeRecordReplay = VirtualMachineFaultToleranceType("recordReplay") + VirtualMachineFaultToleranceTypeCheckpointing = VirtualMachineFaultToleranceType("checkpointing") +) + +func init() { + t["VirtualMachineFaultToleranceType"] = reflect.TypeOf((*VirtualMachineFaultToleranceType)(nil)).Elem() +} + +type VirtualMachineFileLayoutExFileType string + +const ( + VirtualMachineFileLayoutExFileTypeConfig = VirtualMachineFileLayoutExFileType("config") + VirtualMachineFileLayoutExFileTypeExtendedConfig = VirtualMachineFileLayoutExFileType("extendedConfig") + VirtualMachineFileLayoutExFileTypeDiskDescriptor = VirtualMachineFileLayoutExFileType("diskDescriptor") + VirtualMachineFileLayoutExFileTypeDiskExtent = VirtualMachineFileLayoutExFileType("diskExtent") + VirtualMachineFileLayoutExFileTypeDigestDescriptor = VirtualMachineFileLayoutExFileType("digestDescriptor") + VirtualMachineFileLayoutExFileTypeDigestExtent = VirtualMachineFileLayoutExFileType("digestExtent") + VirtualMachineFileLayoutExFileTypeDiskReplicationState = VirtualMachineFileLayoutExFileType("diskReplicationState") + VirtualMachineFileLayoutExFileTypeLog = VirtualMachineFileLayoutExFileType("log") + VirtualMachineFileLayoutExFileTypeStat = VirtualMachineFileLayoutExFileType("stat") + VirtualMachineFileLayoutExFileTypeNamespaceData = VirtualMachineFileLayoutExFileType("namespaceData") + VirtualMachineFileLayoutExFileTypeNvram = VirtualMachineFileLayoutExFileType("nvram") + VirtualMachineFileLayoutExFileTypeSnapshotData = VirtualMachineFileLayoutExFileType("snapshotData") + VirtualMachineFileLayoutExFileTypeSnapshotMemory = VirtualMachineFileLayoutExFileType("snapshotMemory") + VirtualMachineFileLayoutExFileTypeSnapshotList = VirtualMachineFileLayoutExFileType("snapshotList") + VirtualMachineFileLayoutExFileTypeSnapshotManifestList = VirtualMachineFileLayoutExFileType("snapshotManifestList") + VirtualMachineFileLayoutExFileTypeSuspend = VirtualMachineFileLayoutExFileType("suspend") + VirtualMachineFileLayoutExFileTypeSuspendMemory = VirtualMachineFileLayoutExFileType("suspendMemory") + VirtualMachineFileLayoutExFileTypeSwap = VirtualMachineFileLayoutExFileType("swap") + VirtualMachineFileLayoutExFileTypeUwswap = VirtualMachineFileLayoutExFileType("uwswap") + VirtualMachineFileLayoutExFileTypeCore = VirtualMachineFileLayoutExFileType("core") + VirtualMachineFileLayoutExFileTypeScreenshot = VirtualMachineFileLayoutExFileType("screenshot") + VirtualMachineFileLayoutExFileTypeFtMetadata = VirtualMachineFileLayoutExFileType("ftMetadata") + VirtualMachineFileLayoutExFileTypeGuestCustomization = VirtualMachineFileLayoutExFileType("guestCustomization") +) + +func init() { + t["VirtualMachineFileLayoutExFileType"] = reflect.TypeOf((*VirtualMachineFileLayoutExFileType)(nil)).Elem() +} + +type VirtualMachineFlagInfoMonitorType string + +const ( + VirtualMachineFlagInfoMonitorTypeRelease = VirtualMachineFlagInfoMonitorType("release") + VirtualMachineFlagInfoMonitorTypeDebug = VirtualMachineFlagInfoMonitorType("debug") + VirtualMachineFlagInfoMonitorTypeStats = VirtualMachineFlagInfoMonitorType("stats") +) + +func init() { + t["VirtualMachineFlagInfoMonitorType"] = reflect.TypeOf((*VirtualMachineFlagInfoMonitorType)(nil)).Elem() +} + +type VirtualMachineFlagInfoVirtualExecUsage string + +const ( + VirtualMachineFlagInfoVirtualExecUsageHvAuto = VirtualMachineFlagInfoVirtualExecUsage("hvAuto") + VirtualMachineFlagInfoVirtualExecUsageHvOn = VirtualMachineFlagInfoVirtualExecUsage("hvOn") + VirtualMachineFlagInfoVirtualExecUsageHvOff = VirtualMachineFlagInfoVirtualExecUsage("hvOff") +) + +func init() { + t["VirtualMachineFlagInfoVirtualExecUsage"] = reflect.TypeOf((*VirtualMachineFlagInfoVirtualExecUsage)(nil)).Elem() +} + +type VirtualMachineFlagInfoVirtualMmuUsage string + +const ( + VirtualMachineFlagInfoVirtualMmuUsageAutomatic = VirtualMachineFlagInfoVirtualMmuUsage("automatic") + VirtualMachineFlagInfoVirtualMmuUsageOn = VirtualMachineFlagInfoVirtualMmuUsage("on") + VirtualMachineFlagInfoVirtualMmuUsageOff = VirtualMachineFlagInfoVirtualMmuUsage("off") +) + +func init() { + t["VirtualMachineFlagInfoVirtualMmuUsage"] = reflect.TypeOf((*VirtualMachineFlagInfoVirtualMmuUsage)(nil)).Elem() +} + +type VirtualMachineForkConfigInfoChildType string + +const ( + VirtualMachineForkConfigInfoChildTypeNone = VirtualMachineForkConfigInfoChildType("none") + VirtualMachineForkConfigInfoChildTypePersistent = VirtualMachineForkConfigInfoChildType("persistent") + VirtualMachineForkConfigInfoChildTypeNonpersistent = VirtualMachineForkConfigInfoChildType("nonpersistent") +) + +func init() { + t["VirtualMachineForkConfigInfoChildType"] = reflect.TypeOf((*VirtualMachineForkConfigInfoChildType)(nil)).Elem() +} + +type VirtualMachineGuestOsFamily string + +const ( + VirtualMachineGuestOsFamilyWindowsGuest = VirtualMachineGuestOsFamily("windowsGuest") + VirtualMachineGuestOsFamilyLinuxGuest = VirtualMachineGuestOsFamily("linuxGuest") + VirtualMachineGuestOsFamilyNetwareGuest = VirtualMachineGuestOsFamily("netwareGuest") + VirtualMachineGuestOsFamilySolarisGuest = VirtualMachineGuestOsFamily("solarisGuest") + VirtualMachineGuestOsFamilyDarwinGuestFamily = VirtualMachineGuestOsFamily("darwinGuestFamily") + VirtualMachineGuestOsFamilyOtherGuestFamily = VirtualMachineGuestOsFamily("otherGuestFamily") +) + +func init() { + t["VirtualMachineGuestOsFamily"] = reflect.TypeOf((*VirtualMachineGuestOsFamily)(nil)).Elem() +} + +type VirtualMachineGuestOsIdentifier string + +const ( + VirtualMachineGuestOsIdentifierDosGuest = VirtualMachineGuestOsIdentifier("dosGuest") + VirtualMachineGuestOsIdentifierWin31Guest = VirtualMachineGuestOsIdentifier("win31Guest") + VirtualMachineGuestOsIdentifierWin95Guest = VirtualMachineGuestOsIdentifier("win95Guest") + VirtualMachineGuestOsIdentifierWin98Guest = VirtualMachineGuestOsIdentifier("win98Guest") + VirtualMachineGuestOsIdentifierWinMeGuest = VirtualMachineGuestOsIdentifier("winMeGuest") + VirtualMachineGuestOsIdentifierWinNTGuest = VirtualMachineGuestOsIdentifier("winNTGuest") + VirtualMachineGuestOsIdentifierWin2000ProGuest = VirtualMachineGuestOsIdentifier("win2000ProGuest") + VirtualMachineGuestOsIdentifierWin2000ServGuest = VirtualMachineGuestOsIdentifier("win2000ServGuest") + VirtualMachineGuestOsIdentifierWin2000AdvServGuest = VirtualMachineGuestOsIdentifier("win2000AdvServGuest") + VirtualMachineGuestOsIdentifierWinXPHomeGuest = VirtualMachineGuestOsIdentifier("winXPHomeGuest") + VirtualMachineGuestOsIdentifierWinXPProGuest = VirtualMachineGuestOsIdentifier("winXPProGuest") + VirtualMachineGuestOsIdentifierWinXPPro64Guest = VirtualMachineGuestOsIdentifier("winXPPro64Guest") + VirtualMachineGuestOsIdentifierWinNetWebGuest = VirtualMachineGuestOsIdentifier("winNetWebGuest") + VirtualMachineGuestOsIdentifierWinNetStandardGuest = VirtualMachineGuestOsIdentifier("winNetStandardGuest") + VirtualMachineGuestOsIdentifierWinNetEnterpriseGuest = VirtualMachineGuestOsIdentifier("winNetEnterpriseGuest") + VirtualMachineGuestOsIdentifierWinNetDatacenterGuest = VirtualMachineGuestOsIdentifier("winNetDatacenterGuest") + VirtualMachineGuestOsIdentifierWinNetBusinessGuest = VirtualMachineGuestOsIdentifier("winNetBusinessGuest") + VirtualMachineGuestOsIdentifierWinNetStandard64Guest = VirtualMachineGuestOsIdentifier("winNetStandard64Guest") + VirtualMachineGuestOsIdentifierWinNetEnterprise64Guest = VirtualMachineGuestOsIdentifier("winNetEnterprise64Guest") + VirtualMachineGuestOsIdentifierWinLonghornGuest = VirtualMachineGuestOsIdentifier("winLonghornGuest") + VirtualMachineGuestOsIdentifierWinLonghorn64Guest = VirtualMachineGuestOsIdentifier("winLonghorn64Guest") + VirtualMachineGuestOsIdentifierWinNetDatacenter64Guest = VirtualMachineGuestOsIdentifier("winNetDatacenter64Guest") + VirtualMachineGuestOsIdentifierWinVistaGuest = VirtualMachineGuestOsIdentifier("winVistaGuest") + VirtualMachineGuestOsIdentifierWinVista64Guest = VirtualMachineGuestOsIdentifier("winVista64Guest") + VirtualMachineGuestOsIdentifierWindows7Guest = VirtualMachineGuestOsIdentifier("windows7Guest") + VirtualMachineGuestOsIdentifierWindows7_64Guest = VirtualMachineGuestOsIdentifier("windows7_64Guest") + VirtualMachineGuestOsIdentifierWindows7Server64Guest = VirtualMachineGuestOsIdentifier("windows7Server64Guest") + VirtualMachineGuestOsIdentifierWindows8Guest = VirtualMachineGuestOsIdentifier("windows8Guest") + VirtualMachineGuestOsIdentifierWindows8_64Guest = VirtualMachineGuestOsIdentifier("windows8_64Guest") + VirtualMachineGuestOsIdentifierWindows8Server64Guest = VirtualMachineGuestOsIdentifier("windows8Server64Guest") + VirtualMachineGuestOsIdentifierWindows9Guest = VirtualMachineGuestOsIdentifier("windows9Guest") + VirtualMachineGuestOsIdentifierWindows9_64Guest = VirtualMachineGuestOsIdentifier("windows9_64Guest") + VirtualMachineGuestOsIdentifierWindows9Server64Guest = VirtualMachineGuestOsIdentifier("windows9Server64Guest") + VirtualMachineGuestOsIdentifierWindowsHyperVGuest = VirtualMachineGuestOsIdentifier("windowsHyperVGuest") + VirtualMachineGuestOsIdentifierFreebsdGuest = VirtualMachineGuestOsIdentifier("freebsdGuest") + VirtualMachineGuestOsIdentifierFreebsd64Guest = VirtualMachineGuestOsIdentifier("freebsd64Guest") + VirtualMachineGuestOsIdentifierFreebsd11Guest = VirtualMachineGuestOsIdentifier("freebsd11Guest") + VirtualMachineGuestOsIdentifierFreebsd11_64Guest = VirtualMachineGuestOsIdentifier("freebsd11_64Guest") + VirtualMachineGuestOsIdentifierFreebsd12Guest = VirtualMachineGuestOsIdentifier("freebsd12Guest") + VirtualMachineGuestOsIdentifierFreebsd12_64Guest = VirtualMachineGuestOsIdentifier("freebsd12_64Guest") + VirtualMachineGuestOsIdentifierRedhatGuest = VirtualMachineGuestOsIdentifier("redhatGuest") + VirtualMachineGuestOsIdentifierRhel2Guest = VirtualMachineGuestOsIdentifier("rhel2Guest") + VirtualMachineGuestOsIdentifierRhel3Guest = VirtualMachineGuestOsIdentifier("rhel3Guest") + VirtualMachineGuestOsIdentifierRhel3_64Guest = VirtualMachineGuestOsIdentifier("rhel3_64Guest") + VirtualMachineGuestOsIdentifierRhel4Guest = VirtualMachineGuestOsIdentifier("rhel4Guest") + VirtualMachineGuestOsIdentifierRhel4_64Guest = VirtualMachineGuestOsIdentifier("rhel4_64Guest") + VirtualMachineGuestOsIdentifierRhel5Guest = VirtualMachineGuestOsIdentifier("rhel5Guest") + VirtualMachineGuestOsIdentifierRhel5_64Guest = VirtualMachineGuestOsIdentifier("rhel5_64Guest") + VirtualMachineGuestOsIdentifierRhel6Guest = VirtualMachineGuestOsIdentifier("rhel6Guest") + VirtualMachineGuestOsIdentifierRhel6_64Guest = VirtualMachineGuestOsIdentifier("rhel6_64Guest") + VirtualMachineGuestOsIdentifierRhel7Guest = VirtualMachineGuestOsIdentifier("rhel7Guest") + VirtualMachineGuestOsIdentifierRhel7_64Guest = VirtualMachineGuestOsIdentifier("rhel7_64Guest") + VirtualMachineGuestOsIdentifierRhel8_64Guest = VirtualMachineGuestOsIdentifier("rhel8_64Guest") + VirtualMachineGuestOsIdentifierCentosGuest = VirtualMachineGuestOsIdentifier("centosGuest") + VirtualMachineGuestOsIdentifierCentos64Guest = VirtualMachineGuestOsIdentifier("centos64Guest") + VirtualMachineGuestOsIdentifierCentos6Guest = VirtualMachineGuestOsIdentifier("centos6Guest") + VirtualMachineGuestOsIdentifierCentos6_64Guest = VirtualMachineGuestOsIdentifier("centos6_64Guest") + VirtualMachineGuestOsIdentifierCentos7Guest = VirtualMachineGuestOsIdentifier("centos7Guest") + VirtualMachineGuestOsIdentifierCentos7_64Guest = VirtualMachineGuestOsIdentifier("centos7_64Guest") + VirtualMachineGuestOsIdentifierCentos8_64Guest = VirtualMachineGuestOsIdentifier("centos8_64Guest") + VirtualMachineGuestOsIdentifierOracleLinuxGuest = VirtualMachineGuestOsIdentifier("oracleLinuxGuest") + VirtualMachineGuestOsIdentifierOracleLinux64Guest = VirtualMachineGuestOsIdentifier("oracleLinux64Guest") + VirtualMachineGuestOsIdentifierOracleLinux6Guest = VirtualMachineGuestOsIdentifier("oracleLinux6Guest") + VirtualMachineGuestOsIdentifierOracleLinux6_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux6_64Guest") + VirtualMachineGuestOsIdentifierOracleLinux7Guest = VirtualMachineGuestOsIdentifier("oracleLinux7Guest") + VirtualMachineGuestOsIdentifierOracleLinux7_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux7_64Guest") + VirtualMachineGuestOsIdentifierOracleLinux8_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux8_64Guest") + VirtualMachineGuestOsIdentifierSuseGuest = VirtualMachineGuestOsIdentifier("suseGuest") + VirtualMachineGuestOsIdentifierSuse64Guest = VirtualMachineGuestOsIdentifier("suse64Guest") + VirtualMachineGuestOsIdentifierSlesGuest = VirtualMachineGuestOsIdentifier("slesGuest") + VirtualMachineGuestOsIdentifierSles64Guest = VirtualMachineGuestOsIdentifier("sles64Guest") + VirtualMachineGuestOsIdentifierSles10Guest = VirtualMachineGuestOsIdentifier("sles10Guest") + VirtualMachineGuestOsIdentifierSles10_64Guest = VirtualMachineGuestOsIdentifier("sles10_64Guest") + VirtualMachineGuestOsIdentifierSles11Guest = VirtualMachineGuestOsIdentifier("sles11Guest") + VirtualMachineGuestOsIdentifierSles11_64Guest = VirtualMachineGuestOsIdentifier("sles11_64Guest") + VirtualMachineGuestOsIdentifierSles12Guest = VirtualMachineGuestOsIdentifier("sles12Guest") + VirtualMachineGuestOsIdentifierSles12_64Guest = VirtualMachineGuestOsIdentifier("sles12_64Guest") + VirtualMachineGuestOsIdentifierSles15_64Guest = VirtualMachineGuestOsIdentifier("sles15_64Guest") + VirtualMachineGuestOsIdentifierNld9Guest = VirtualMachineGuestOsIdentifier("nld9Guest") + VirtualMachineGuestOsIdentifierOesGuest = VirtualMachineGuestOsIdentifier("oesGuest") + VirtualMachineGuestOsIdentifierSjdsGuest = VirtualMachineGuestOsIdentifier("sjdsGuest") + VirtualMachineGuestOsIdentifierMandrakeGuest = VirtualMachineGuestOsIdentifier("mandrakeGuest") + VirtualMachineGuestOsIdentifierMandrivaGuest = VirtualMachineGuestOsIdentifier("mandrivaGuest") + VirtualMachineGuestOsIdentifierMandriva64Guest = VirtualMachineGuestOsIdentifier("mandriva64Guest") + VirtualMachineGuestOsIdentifierTurboLinuxGuest = VirtualMachineGuestOsIdentifier("turboLinuxGuest") + VirtualMachineGuestOsIdentifierTurboLinux64Guest = VirtualMachineGuestOsIdentifier("turboLinux64Guest") + VirtualMachineGuestOsIdentifierUbuntuGuest = VirtualMachineGuestOsIdentifier("ubuntuGuest") + VirtualMachineGuestOsIdentifierUbuntu64Guest = VirtualMachineGuestOsIdentifier("ubuntu64Guest") + VirtualMachineGuestOsIdentifierDebian4Guest = VirtualMachineGuestOsIdentifier("debian4Guest") + VirtualMachineGuestOsIdentifierDebian4_64Guest = VirtualMachineGuestOsIdentifier("debian4_64Guest") + VirtualMachineGuestOsIdentifierDebian5Guest = VirtualMachineGuestOsIdentifier("debian5Guest") + VirtualMachineGuestOsIdentifierDebian5_64Guest = VirtualMachineGuestOsIdentifier("debian5_64Guest") + VirtualMachineGuestOsIdentifierDebian6Guest = VirtualMachineGuestOsIdentifier("debian6Guest") + VirtualMachineGuestOsIdentifierDebian6_64Guest = VirtualMachineGuestOsIdentifier("debian6_64Guest") + VirtualMachineGuestOsIdentifierDebian7Guest = VirtualMachineGuestOsIdentifier("debian7Guest") + VirtualMachineGuestOsIdentifierDebian7_64Guest = VirtualMachineGuestOsIdentifier("debian7_64Guest") + VirtualMachineGuestOsIdentifierDebian8Guest = VirtualMachineGuestOsIdentifier("debian8Guest") + VirtualMachineGuestOsIdentifierDebian8_64Guest = VirtualMachineGuestOsIdentifier("debian8_64Guest") + VirtualMachineGuestOsIdentifierDebian9Guest = VirtualMachineGuestOsIdentifier("debian9Guest") + VirtualMachineGuestOsIdentifierDebian9_64Guest = VirtualMachineGuestOsIdentifier("debian9_64Guest") + VirtualMachineGuestOsIdentifierDebian10Guest = VirtualMachineGuestOsIdentifier("debian10Guest") + VirtualMachineGuestOsIdentifierDebian10_64Guest = VirtualMachineGuestOsIdentifier("debian10_64Guest") + VirtualMachineGuestOsIdentifierAsianux3Guest = VirtualMachineGuestOsIdentifier("asianux3Guest") + VirtualMachineGuestOsIdentifierAsianux3_64Guest = VirtualMachineGuestOsIdentifier("asianux3_64Guest") + VirtualMachineGuestOsIdentifierAsianux4Guest = VirtualMachineGuestOsIdentifier("asianux4Guest") + VirtualMachineGuestOsIdentifierAsianux4_64Guest = VirtualMachineGuestOsIdentifier("asianux4_64Guest") + VirtualMachineGuestOsIdentifierAsianux5_64Guest = VirtualMachineGuestOsIdentifier("asianux5_64Guest") + VirtualMachineGuestOsIdentifierAsianux7_64Guest = VirtualMachineGuestOsIdentifier("asianux7_64Guest") + VirtualMachineGuestOsIdentifierAsianux8_64Guest = VirtualMachineGuestOsIdentifier("asianux8_64Guest") + VirtualMachineGuestOsIdentifierOpensuseGuest = VirtualMachineGuestOsIdentifier("opensuseGuest") + VirtualMachineGuestOsIdentifierOpensuse64Guest = VirtualMachineGuestOsIdentifier("opensuse64Guest") + VirtualMachineGuestOsIdentifierFedoraGuest = VirtualMachineGuestOsIdentifier("fedoraGuest") + VirtualMachineGuestOsIdentifierFedora64Guest = VirtualMachineGuestOsIdentifier("fedora64Guest") + VirtualMachineGuestOsIdentifierCoreos64Guest = VirtualMachineGuestOsIdentifier("coreos64Guest") + VirtualMachineGuestOsIdentifierVmwarePhoton64Guest = VirtualMachineGuestOsIdentifier("vmwarePhoton64Guest") + VirtualMachineGuestOsIdentifierOther24xLinuxGuest = VirtualMachineGuestOsIdentifier("other24xLinuxGuest") + VirtualMachineGuestOsIdentifierOther26xLinuxGuest = VirtualMachineGuestOsIdentifier("other26xLinuxGuest") + VirtualMachineGuestOsIdentifierOtherLinuxGuest = VirtualMachineGuestOsIdentifier("otherLinuxGuest") + VirtualMachineGuestOsIdentifierOther3xLinuxGuest = VirtualMachineGuestOsIdentifier("other3xLinuxGuest") + VirtualMachineGuestOsIdentifierOther4xLinuxGuest = VirtualMachineGuestOsIdentifier("other4xLinuxGuest") + VirtualMachineGuestOsIdentifierGenericLinuxGuest = VirtualMachineGuestOsIdentifier("genericLinuxGuest") + VirtualMachineGuestOsIdentifierOther24xLinux64Guest = VirtualMachineGuestOsIdentifier("other24xLinux64Guest") + VirtualMachineGuestOsIdentifierOther26xLinux64Guest = VirtualMachineGuestOsIdentifier("other26xLinux64Guest") + VirtualMachineGuestOsIdentifierOther3xLinux64Guest = VirtualMachineGuestOsIdentifier("other3xLinux64Guest") + VirtualMachineGuestOsIdentifierOther4xLinux64Guest = VirtualMachineGuestOsIdentifier("other4xLinux64Guest") + VirtualMachineGuestOsIdentifierOtherLinux64Guest = VirtualMachineGuestOsIdentifier("otherLinux64Guest") + VirtualMachineGuestOsIdentifierSolaris6Guest = VirtualMachineGuestOsIdentifier("solaris6Guest") + VirtualMachineGuestOsIdentifierSolaris7Guest = VirtualMachineGuestOsIdentifier("solaris7Guest") + VirtualMachineGuestOsIdentifierSolaris8Guest = VirtualMachineGuestOsIdentifier("solaris8Guest") + VirtualMachineGuestOsIdentifierSolaris9Guest = VirtualMachineGuestOsIdentifier("solaris9Guest") + VirtualMachineGuestOsIdentifierSolaris10Guest = VirtualMachineGuestOsIdentifier("solaris10Guest") + VirtualMachineGuestOsIdentifierSolaris10_64Guest = VirtualMachineGuestOsIdentifier("solaris10_64Guest") + VirtualMachineGuestOsIdentifierSolaris11_64Guest = VirtualMachineGuestOsIdentifier("solaris11_64Guest") + VirtualMachineGuestOsIdentifierOs2Guest = VirtualMachineGuestOsIdentifier("os2Guest") + VirtualMachineGuestOsIdentifierEComStationGuest = VirtualMachineGuestOsIdentifier("eComStationGuest") + VirtualMachineGuestOsIdentifierEComStation2Guest = VirtualMachineGuestOsIdentifier("eComStation2Guest") + VirtualMachineGuestOsIdentifierNetware4Guest = VirtualMachineGuestOsIdentifier("netware4Guest") + VirtualMachineGuestOsIdentifierNetware5Guest = VirtualMachineGuestOsIdentifier("netware5Guest") + VirtualMachineGuestOsIdentifierNetware6Guest = VirtualMachineGuestOsIdentifier("netware6Guest") + VirtualMachineGuestOsIdentifierOpenServer5Guest = VirtualMachineGuestOsIdentifier("openServer5Guest") + VirtualMachineGuestOsIdentifierOpenServer6Guest = VirtualMachineGuestOsIdentifier("openServer6Guest") + VirtualMachineGuestOsIdentifierUnixWare7Guest = VirtualMachineGuestOsIdentifier("unixWare7Guest") + VirtualMachineGuestOsIdentifierDarwinGuest = VirtualMachineGuestOsIdentifier("darwinGuest") + VirtualMachineGuestOsIdentifierDarwin64Guest = VirtualMachineGuestOsIdentifier("darwin64Guest") + VirtualMachineGuestOsIdentifierDarwin10Guest = VirtualMachineGuestOsIdentifier("darwin10Guest") + VirtualMachineGuestOsIdentifierDarwin10_64Guest = VirtualMachineGuestOsIdentifier("darwin10_64Guest") + VirtualMachineGuestOsIdentifierDarwin11Guest = VirtualMachineGuestOsIdentifier("darwin11Guest") + VirtualMachineGuestOsIdentifierDarwin11_64Guest = VirtualMachineGuestOsIdentifier("darwin11_64Guest") + VirtualMachineGuestOsIdentifierDarwin12_64Guest = VirtualMachineGuestOsIdentifier("darwin12_64Guest") + VirtualMachineGuestOsIdentifierDarwin13_64Guest = VirtualMachineGuestOsIdentifier("darwin13_64Guest") + VirtualMachineGuestOsIdentifierDarwin14_64Guest = VirtualMachineGuestOsIdentifier("darwin14_64Guest") + VirtualMachineGuestOsIdentifierDarwin15_64Guest = VirtualMachineGuestOsIdentifier("darwin15_64Guest") + VirtualMachineGuestOsIdentifierDarwin16_64Guest = VirtualMachineGuestOsIdentifier("darwin16_64Guest") + VirtualMachineGuestOsIdentifierDarwin17_64Guest = VirtualMachineGuestOsIdentifier("darwin17_64Guest") + VirtualMachineGuestOsIdentifierDarwin18_64Guest = VirtualMachineGuestOsIdentifier("darwin18_64Guest") + VirtualMachineGuestOsIdentifierVmkernelGuest = VirtualMachineGuestOsIdentifier("vmkernelGuest") + VirtualMachineGuestOsIdentifierVmkernel5Guest = VirtualMachineGuestOsIdentifier("vmkernel5Guest") + VirtualMachineGuestOsIdentifierVmkernel6Guest = VirtualMachineGuestOsIdentifier("vmkernel6Guest") + VirtualMachineGuestOsIdentifierVmkernel65Guest = VirtualMachineGuestOsIdentifier("vmkernel65Guest") + VirtualMachineGuestOsIdentifierOtherGuest = VirtualMachineGuestOsIdentifier("otherGuest") + VirtualMachineGuestOsIdentifierOtherGuest64 = VirtualMachineGuestOsIdentifier("otherGuest64") +) + +func init() { + t["VirtualMachineGuestOsIdentifier"] = reflect.TypeOf((*VirtualMachineGuestOsIdentifier)(nil)).Elem() +} + +type VirtualMachineGuestState string + +const ( + VirtualMachineGuestStateRunning = VirtualMachineGuestState("running") + VirtualMachineGuestStateShuttingDown = VirtualMachineGuestState("shuttingDown") + VirtualMachineGuestStateResetting = VirtualMachineGuestState("resetting") + VirtualMachineGuestStateStandby = VirtualMachineGuestState("standby") + VirtualMachineGuestStateNotRunning = VirtualMachineGuestState("notRunning") + VirtualMachineGuestStateUnknown = VirtualMachineGuestState("unknown") +) + +func init() { + t["VirtualMachineGuestState"] = reflect.TypeOf((*VirtualMachineGuestState)(nil)).Elem() +} + +type VirtualMachineHtSharing string + +const ( + VirtualMachineHtSharingAny = VirtualMachineHtSharing("any") + VirtualMachineHtSharingNone = VirtualMachineHtSharing("none") + VirtualMachineHtSharingInternal = VirtualMachineHtSharing("internal") +) + +func init() { + t["VirtualMachineHtSharing"] = reflect.TypeOf((*VirtualMachineHtSharing)(nil)).Elem() +} + +type VirtualMachineMemoryAllocationPolicy string + +const ( + VirtualMachineMemoryAllocationPolicySwapNone = VirtualMachineMemoryAllocationPolicy("swapNone") + VirtualMachineMemoryAllocationPolicySwapSome = VirtualMachineMemoryAllocationPolicy("swapSome") + VirtualMachineMemoryAllocationPolicySwapMost = VirtualMachineMemoryAllocationPolicy("swapMost") +) + +func init() { + t["VirtualMachineMemoryAllocationPolicy"] = reflect.TypeOf((*VirtualMachineMemoryAllocationPolicy)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataOp string + +const ( + VirtualMachineMetadataManagerVmMetadataOpUpdate = VirtualMachineMetadataManagerVmMetadataOp("Update") + VirtualMachineMetadataManagerVmMetadataOpRemove = VirtualMachineMetadataManagerVmMetadataOp("Remove") +) + +func init() { + t["VirtualMachineMetadataManagerVmMetadataOp"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOp)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataOwnerOwner string + +const ( + VirtualMachineMetadataManagerVmMetadataOwnerOwnerComVmwareVsphereHA = VirtualMachineMetadataManagerVmMetadataOwnerOwner("ComVmwareVsphereHA") +) + +func init() { + t["VirtualMachineMetadataManagerVmMetadataOwnerOwner"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOwnerOwner)(nil)).Elem() +} + +type VirtualMachineMovePriority string + +const ( + VirtualMachineMovePriorityLowPriority = VirtualMachineMovePriority("lowPriority") + VirtualMachineMovePriorityHighPriority = VirtualMachineMovePriority("highPriority") + VirtualMachineMovePriorityDefaultPriority = VirtualMachineMovePriority("defaultPriority") +) + +func init() { + t["VirtualMachineMovePriority"] = reflect.TypeOf((*VirtualMachineMovePriority)(nil)).Elem() +} + +type VirtualMachineNeedSecondaryReason string + +const ( + VirtualMachineNeedSecondaryReasonInitializing = VirtualMachineNeedSecondaryReason("initializing") + VirtualMachineNeedSecondaryReasonDivergence = VirtualMachineNeedSecondaryReason("divergence") + VirtualMachineNeedSecondaryReasonLostConnection = VirtualMachineNeedSecondaryReason("lostConnection") + VirtualMachineNeedSecondaryReasonPartialHardwareFailure = VirtualMachineNeedSecondaryReason("partialHardwareFailure") + VirtualMachineNeedSecondaryReasonUserAction = VirtualMachineNeedSecondaryReason("userAction") + VirtualMachineNeedSecondaryReasonCheckpointError = VirtualMachineNeedSecondaryReason("checkpointError") + VirtualMachineNeedSecondaryReasonOther = VirtualMachineNeedSecondaryReason("other") +) + +func init() { + t["VirtualMachineNeedSecondaryReason"] = reflect.TypeOf((*VirtualMachineNeedSecondaryReason)(nil)).Elem() +} + +type VirtualMachinePowerOffBehavior string + +const ( + VirtualMachinePowerOffBehaviorPowerOff = VirtualMachinePowerOffBehavior("powerOff") + VirtualMachinePowerOffBehaviorRevert = VirtualMachinePowerOffBehavior("revert") + VirtualMachinePowerOffBehaviorPrompt = VirtualMachinePowerOffBehavior("prompt") + VirtualMachinePowerOffBehaviorTake = VirtualMachinePowerOffBehavior("take") +) + +func init() { + t["VirtualMachinePowerOffBehavior"] = reflect.TypeOf((*VirtualMachinePowerOffBehavior)(nil)).Elem() +} + +type VirtualMachinePowerOpType string + +const ( + VirtualMachinePowerOpTypeSoft = VirtualMachinePowerOpType("soft") + VirtualMachinePowerOpTypeHard = VirtualMachinePowerOpType("hard") + VirtualMachinePowerOpTypePreset = VirtualMachinePowerOpType("preset") +) + +func init() { + t["VirtualMachinePowerOpType"] = reflect.TypeOf((*VirtualMachinePowerOpType)(nil)).Elem() +} + +type VirtualMachinePowerState string + +const ( + VirtualMachinePowerStatePoweredOff = VirtualMachinePowerState("poweredOff") + VirtualMachinePowerStatePoweredOn = VirtualMachinePowerState("poweredOn") + VirtualMachinePowerStateSuspended = VirtualMachinePowerState("suspended") +) + +func init() { + t["VirtualMachinePowerState"] = reflect.TypeOf((*VirtualMachinePowerState)(nil)).Elem() +} + +type VirtualMachineRecordReplayState string + +const ( + VirtualMachineRecordReplayStateRecording = VirtualMachineRecordReplayState("recording") + VirtualMachineRecordReplayStateReplaying = VirtualMachineRecordReplayState("replaying") + VirtualMachineRecordReplayStateInactive = VirtualMachineRecordReplayState("inactive") +) + +func init() { + t["VirtualMachineRecordReplayState"] = reflect.TypeOf((*VirtualMachineRecordReplayState)(nil)).Elem() +} + +type VirtualMachineRelocateDiskMoveOptions string + +const ( + VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndAllowSharing = VirtualMachineRelocateDiskMoveOptions("moveAllDiskBackingsAndAllowSharing") + VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndDisallowSharing = VirtualMachineRelocateDiskMoveOptions("moveAllDiskBackingsAndDisallowSharing") + VirtualMachineRelocateDiskMoveOptionsMoveChildMostDiskBacking = VirtualMachineRelocateDiskMoveOptions("moveChildMostDiskBacking") + VirtualMachineRelocateDiskMoveOptionsCreateNewChildDiskBacking = VirtualMachineRelocateDiskMoveOptions("createNewChildDiskBacking") + VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndConsolidate = VirtualMachineRelocateDiskMoveOptions("moveAllDiskBackingsAndConsolidate") +) + +func init() { + t["VirtualMachineRelocateDiskMoveOptions"] = reflect.TypeOf((*VirtualMachineRelocateDiskMoveOptions)(nil)).Elem() +} + +type VirtualMachineRelocateTransformation string + +const ( + VirtualMachineRelocateTransformationFlat = VirtualMachineRelocateTransformation("flat") + VirtualMachineRelocateTransformationSparse = VirtualMachineRelocateTransformation("sparse") +) + +func init() { + t["VirtualMachineRelocateTransformation"] = reflect.TypeOf((*VirtualMachineRelocateTransformation)(nil)).Elem() +} + +type VirtualMachineScsiPassthroughType string + +const ( + VirtualMachineScsiPassthroughTypeDisk = VirtualMachineScsiPassthroughType("disk") + VirtualMachineScsiPassthroughTypeTape = VirtualMachineScsiPassthroughType("tape") + VirtualMachineScsiPassthroughTypePrinter = VirtualMachineScsiPassthroughType("printer") + VirtualMachineScsiPassthroughTypeProcessor = VirtualMachineScsiPassthroughType("processor") + VirtualMachineScsiPassthroughTypeWorm = VirtualMachineScsiPassthroughType("worm") + VirtualMachineScsiPassthroughTypeCdrom = VirtualMachineScsiPassthroughType("cdrom") + VirtualMachineScsiPassthroughTypeScanner = VirtualMachineScsiPassthroughType("scanner") + VirtualMachineScsiPassthroughTypeOptical = VirtualMachineScsiPassthroughType("optical") + VirtualMachineScsiPassthroughTypeMedia = VirtualMachineScsiPassthroughType("media") + VirtualMachineScsiPassthroughTypeCom = VirtualMachineScsiPassthroughType("com") + VirtualMachineScsiPassthroughTypeRaid = VirtualMachineScsiPassthroughType("raid") + VirtualMachineScsiPassthroughTypeUnknown = VirtualMachineScsiPassthroughType("unknown") +) + +func init() { + t["VirtualMachineScsiPassthroughType"] = reflect.TypeOf((*VirtualMachineScsiPassthroughType)(nil)).Elem() +} + +type VirtualMachineStandbyActionType string + +const ( + VirtualMachineStandbyActionTypeCheckpoint = VirtualMachineStandbyActionType("checkpoint") + VirtualMachineStandbyActionTypePowerOnSuspend = VirtualMachineStandbyActionType("powerOnSuspend") +) + +func init() { + t["VirtualMachineStandbyActionType"] = reflect.TypeOf((*VirtualMachineStandbyActionType)(nil)).Elem() +} + +type VirtualMachineTargetInfoConfigurationTag string + +const ( + VirtualMachineTargetInfoConfigurationTagCompliant = VirtualMachineTargetInfoConfigurationTag("compliant") + VirtualMachineTargetInfoConfigurationTagClusterWide = VirtualMachineTargetInfoConfigurationTag("clusterWide") +) + +func init() { + t["VirtualMachineTargetInfoConfigurationTag"] = reflect.TypeOf((*VirtualMachineTargetInfoConfigurationTag)(nil)).Elem() +} + +type VirtualMachineTicketType string + +const ( + VirtualMachineTicketTypeMks = VirtualMachineTicketType("mks") + VirtualMachineTicketTypeDevice = VirtualMachineTicketType("device") + VirtualMachineTicketTypeGuestControl = VirtualMachineTicketType("guestControl") + VirtualMachineTicketTypeWebmks = VirtualMachineTicketType("webmks") + VirtualMachineTicketTypeGuestIntegrity = VirtualMachineTicketType("guestIntegrity") +) + +func init() { + t["VirtualMachineTicketType"] = reflect.TypeOf((*VirtualMachineTicketType)(nil)).Elem() +} + +type VirtualMachineToolsInstallType string + +const ( + VirtualMachineToolsInstallTypeGuestToolsTypeUnknown = VirtualMachineToolsInstallType("guestToolsTypeUnknown") + VirtualMachineToolsInstallTypeGuestToolsTypeMSI = VirtualMachineToolsInstallType("guestToolsTypeMSI") + VirtualMachineToolsInstallTypeGuestToolsTypeTar = VirtualMachineToolsInstallType("guestToolsTypeTar") + VirtualMachineToolsInstallTypeGuestToolsTypeOSP = VirtualMachineToolsInstallType("guestToolsTypeOSP") + VirtualMachineToolsInstallTypeGuestToolsTypeOpenVMTools = VirtualMachineToolsInstallType("guestToolsTypeOpenVMTools") +) + +func init() { + t["VirtualMachineToolsInstallType"] = reflect.TypeOf((*VirtualMachineToolsInstallType)(nil)).Elem() +} + +type VirtualMachineToolsRunningStatus string + +const ( + VirtualMachineToolsRunningStatusGuestToolsNotRunning = VirtualMachineToolsRunningStatus("guestToolsNotRunning") + VirtualMachineToolsRunningStatusGuestToolsRunning = VirtualMachineToolsRunningStatus("guestToolsRunning") + VirtualMachineToolsRunningStatusGuestToolsExecutingScripts = VirtualMachineToolsRunningStatus("guestToolsExecutingScripts") +) + +func init() { + t["VirtualMachineToolsRunningStatus"] = reflect.TypeOf((*VirtualMachineToolsRunningStatus)(nil)).Elem() +} + +type VirtualMachineToolsStatus string + +const ( + VirtualMachineToolsStatusToolsNotInstalled = VirtualMachineToolsStatus("toolsNotInstalled") + VirtualMachineToolsStatusToolsNotRunning = VirtualMachineToolsStatus("toolsNotRunning") + VirtualMachineToolsStatusToolsOld = VirtualMachineToolsStatus("toolsOld") + VirtualMachineToolsStatusToolsOk = VirtualMachineToolsStatus("toolsOk") +) + +func init() { + t["VirtualMachineToolsStatus"] = reflect.TypeOf((*VirtualMachineToolsStatus)(nil)).Elem() +} + +type VirtualMachineToolsVersionStatus string + +const ( + VirtualMachineToolsVersionStatusGuestToolsNotInstalled = VirtualMachineToolsVersionStatus("guestToolsNotInstalled") + VirtualMachineToolsVersionStatusGuestToolsNeedUpgrade = VirtualMachineToolsVersionStatus("guestToolsNeedUpgrade") + VirtualMachineToolsVersionStatusGuestToolsCurrent = VirtualMachineToolsVersionStatus("guestToolsCurrent") + VirtualMachineToolsVersionStatusGuestToolsUnmanaged = VirtualMachineToolsVersionStatus("guestToolsUnmanaged") + VirtualMachineToolsVersionStatusGuestToolsTooOld = VirtualMachineToolsVersionStatus("guestToolsTooOld") + VirtualMachineToolsVersionStatusGuestToolsSupportedOld = VirtualMachineToolsVersionStatus("guestToolsSupportedOld") + VirtualMachineToolsVersionStatusGuestToolsSupportedNew = VirtualMachineToolsVersionStatus("guestToolsSupportedNew") + VirtualMachineToolsVersionStatusGuestToolsTooNew = VirtualMachineToolsVersionStatus("guestToolsTooNew") + VirtualMachineToolsVersionStatusGuestToolsBlacklisted = VirtualMachineToolsVersionStatus("guestToolsBlacklisted") +) + +func init() { + t["VirtualMachineToolsVersionStatus"] = reflect.TypeOf((*VirtualMachineToolsVersionStatus)(nil)).Elem() +} + +type VirtualMachineUsbInfoFamily string + +const ( + VirtualMachineUsbInfoFamilyAudio = VirtualMachineUsbInfoFamily("audio") + VirtualMachineUsbInfoFamilyHid = VirtualMachineUsbInfoFamily("hid") + VirtualMachineUsbInfoFamilyHid_bootable = VirtualMachineUsbInfoFamily("hid_bootable") + VirtualMachineUsbInfoFamilyPhysical = VirtualMachineUsbInfoFamily("physical") + VirtualMachineUsbInfoFamilyCommunication = VirtualMachineUsbInfoFamily("communication") + VirtualMachineUsbInfoFamilyImaging = VirtualMachineUsbInfoFamily("imaging") + VirtualMachineUsbInfoFamilyPrinter = VirtualMachineUsbInfoFamily("printer") + VirtualMachineUsbInfoFamilyStorage = VirtualMachineUsbInfoFamily("storage") + VirtualMachineUsbInfoFamilyHub = VirtualMachineUsbInfoFamily("hub") + VirtualMachineUsbInfoFamilySmart_card = VirtualMachineUsbInfoFamily("smart_card") + VirtualMachineUsbInfoFamilySecurity = VirtualMachineUsbInfoFamily("security") + VirtualMachineUsbInfoFamilyVideo = VirtualMachineUsbInfoFamily("video") + VirtualMachineUsbInfoFamilyWireless = VirtualMachineUsbInfoFamily("wireless") + VirtualMachineUsbInfoFamilyBluetooth = VirtualMachineUsbInfoFamily("bluetooth") + VirtualMachineUsbInfoFamilyWusb = VirtualMachineUsbInfoFamily("wusb") + VirtualMachineUsbInfoFamilyPda = VirtualMachineUsbInfoFamily("pda") + VirtualMachineUsbInfoFamilyVendor_specific = VirtualMachineUsbInfoFamily("vendor_specific") + VirtualMachineUsbInfoFamilyOther = VirtualMachineUsbInfoFamily("other") + VirtualMachineUsbInfoFamilyUnknownFamily = VirtualMachineUsbInfoFamily("unknownFamily") +) + +func init() { + t["VirtualMachineUsbInfoFamily"] = reflect.TypeOf((*VirtualMachineUsbInfoFamily)(nil)).Elem() +} + +type VirtualMachineUsbInfoSpeed string + +const ( + VirtualMachineUsbInfoSpeedLow = VirtualMachineUsbInfoSpeed("low") + VirtualMachineUsbInfoSpeedFull = VirtualMachineUsbInfoSpeed("full") + VirtualMachineUsbInfoSpeedHigh = VirtualMachineUsbInfoSpeed("high") + VirtualMachineUsbInfoSpeedSuperSpeed = VirtualMachineUsbInfoSpeed("superSpeed") + VirtualMachineUsbInfoSpeedUnknownSpeed = VirtualMachineUsbInfoSpeed("unknownSpeed") +) + +func init() { + t["VirtualMachineUsbInfoSpeed"] = reflect.TypeOf((*VirtualMachineUsbInfoSpeed)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceAction string + +const ( + VirtualMachineVMCIDeviceActionAllow = VirtualMachineVMCIDeviceAction("allow") + VirtualMachineVMCIDeviceActionDeny = VirtualMachineVMCIDeviceAction("deny") +) + +func init() { + t["VirtualMachineVMCIDeviceAction"] = reflect.TypeOf((*VirtualMachineVMCIDeviceAction)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceDirection string + +const ( + VirtualMachineVMCIDeviceDirectionGuest = VirtualMachineVMCIDeviceDirection("guest") + VirtualMachineVMCIDeviceDirectionHost = VirtualMachineVMCIDeviceDirection("host") + VirtualMachineVMCIDeviceDirectionAnyDirection = VirtualMachineVMCIDeviceDirection("anyDirection") +) + +func init() { + t["VirtualMachineVMCIDeviceDirection"] = reflect.TypeOf((*VirtualMachineVMCIDeviceDirection)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceProtocol string + +const ( + VirtualMachineVMCIDeviceProtocolHypervisor = VirtualMachineVMCIDeviceProtocol("hypervisor") + VirtualMachineVMCIDeviceProtocolDoorbell = VirtualMachineVMCIDeviceProtocol("doorbell") + VirtualMachineVMCIDeviceProtocolQueuepair = VirtualMachineVMCIDeviceProtocol("queuepair") + VirtualMachineVMCIDeviceProtocolDatagram = VirtualMachineVMCIDeviceProtocol("datagram") + VirtualMachineVMCIDeviceProtocolStream = VirtualMachineVMCIDeviceProtocol("stream") + VirtualMachineVMCIDeviceProtocolAnyProtocol = VirtualMachineVMCIDeviceProtocol("anyProtocol") +) + +func init() { + t["VirtualMachineVMCIDeviceProtocol"] = reflect.TypeOf((*VirtualMachineVMCIDeviceProtocol)(nil)).Elem() +} + +type VirtualMachineVideoCardUse3dRenderer string + +const ( + VirtualMachineVideoCardUse3dRendererAutomatic = VirtualMachineVideoCardUse3dRenderer("automatic") + VirtualMachineVideoCardUse3dRendererSoftware = VirtualMachineVideoCardUse3dRenderer("software") + VirtualMachineVideoCardUse3dRendererHardware = VirtualMachineVideoCardUse3dRenderer("hardware") +) + +func init() { + t["VirtualMachineVideoCardUse3dRenderer"] = reflect.TypeOf((*VirtualMachineVideoCardUse3dRenderer)(nil)).Elem() +} + +type VirtualMachineWindowsQuiesceSpecVssBackupContext string + +const ( + VirtualMachineWindowsQuiesceSpecVssBackupContextCtx_auto = VirtualMachineWindowsQuiesceSpecVssBackupContext("ctx_auto") + VirtualMachineWindowsQuiesceSpecVssBackupContextCtx_backup = VirtualMachineWindowsQuiesceSpecVssBackupContext("ctx_backup") + VirtualMachineWindowsQuiesceSpecVssBackupContextCtx_file_share_backup = VirtualMachineWindowsQuiesceSpecVssBackupContext("ctx_file_share_backup") +) + +func init() { + t["VirtualMachineWindowsQuiesceSpecVssBackupContext"] = reflect.TypeOf((*VirtualMachineWindowsQuiesceSpecVssBackupContext)(nil)).Elem() +} + +type VirtualPointingDeviceHostChoice string + +const ( + VirtualPointingDeviceHostChoiceAutodetect = VirtualPointingDeviceHostChoice("autodetect") + VirtualPointingDeviceHostChoiceIntellimouseExplorer = VirtualPointingDeviceHostChoice("intellimouseExplorer") + VirtualPointingDeviceHostChoiceIntellimousePs2 = VirtualPointingDeviceHostChoice("intellimousePs2") + VirtualPointingDeviceHostChoiceLogitechMouseman = VirtualPointingDeviceHostChoice("logitechMouseman") + VirtualPointingDeviceHostChoiceMicrosoft_serial = VirtualPointingDeviceHostChoice("microsoft_serial") + VirtualPointingDeviceHostChoiceMouseSystems = VirtualPointingDeviceHostChoice("mouseSystems") + VirtualPointingDeviceHostChoiceMousemanSerial = VirtualPointingDeviceHostChoice("mousemanSerial") + VirtualPointingDeviceHostChoicePs2 = VirtualPointingDeviceHostChoice("ps2") +) + +func init() { + t["VirtualPointingDeviceHostChoice"] = reflect.TypeOf((*VirtualPointingDeviceHostChoice)(nil)).Elem() +} + +type VirtualSCSISharing string + +const ( + VirtualSCSISharingNoSharing = VirtualSCSISharing("noSharing") + VirtualSCSISharingVirtualSharing = VirtualSCSISharing("virtualSharing") + VirtualSCSISharingPhysicalSharing = VirtualSCSISharing("physicalSharing") +) + +func init() { + t["VirtualSCSISharing"] = reflect.TypeOf((*VirtualSCSISharing)(nil)).Elem() +} + +type VirtualSerialPortEndPoint string + +const ( + VirtualSerialPortEndPointClient = VirtualSerialPortEndPoint("client") + VirtualSerialPortEndPointServer = VirtualSerialPortEndPoint("server") +) + +func init() { + t["VirtualSerialPortEndPoint"] = reflect.TypeOf((*VirtualSerialPortEndPoint)(nil)).Elem() +} + +type VirtualVmxnet3VrdmaOptionDeviceProtocols string + +const ( + VirtualVmxnet3VrdmaOptionDeviceProtocolsRocev1 = VirtualVmxnet3VrdmaOptionDeviceProtocols("rocev1") + VirtualVmxnet3VrdmaOptionDeviceProtocolsRocev2 = VirtualVmxnet3VrdmaOptionDeviceProtocols("rocev2") +) + +func init() { + t["VirtualVmxnet3VrdmaOptionDeviceProtocols"] = reflect.TypeOf((*VirtualVmxnet3VrdmaOptionDeviceProtocols)(nil)).Elem() +} + +type VmDasBeingResetEventReasonCode string + +const ( + VmDasBeingResetEventReasonCodeVmtoolsHeartbeatFailure = VmDasBeingResetEventReasonCode("vmtoolsHeartbeatFailure") + VmDasBeingResetEventReasonCodeAppHeartbeatFailure = VmDasBeingResetEventReasonCode("appHeartbeatFailure") + VmDasBeingResetEventReasonCodeAppImmediateResetRequest = VmDasBeingResetEventReasonCode("appImmediateResetRequest") + VmDasBeingResetEventReasonCodeVmcpResetApdCleared = VmDasBeingResetEventReasonCode("vmcpResetApdCleared") +) + +func init() { + t["VmDasBeingResetEventReasonCode"] = reflect.TypeOf((*VmDasBeingResetEventReasonCode)(nil)).Elem() +} + +type VmFailedStartingSecondaryEventFailureReason string + +const ( + VmFailedStartingSecondaryEventFailureReasonIncompatibleHost = VmFailedStartingSecondaryEventFailureReason("incompatibleHost") + VmFailedStartingSecondaryEventFailureReasonLoginFailed = VmFailedStartingSecondaryEventFailureReason("loginFailed") + VmFailedStartingSecondaryEventFailureReasonRegisterVmFailed = VmFailedStartingSecondaryEventFailureReason("registerVmFailed") + VmFailedStartingSecondaryEventFailureReasonMigrateFailed = VmFailedStartingSecondaryEventFailureReason("migrateFailed") +) + +func init() { + t["VmFailedStartingSecondaryEventFailureReason"] = reflect.TypeOf((*VmFailedStartingSecondaryEventFailureReason)(nil)).Elem() +} + +type VmFaultToleranceConfigIssueReasonForIssue string + +const ( + VmFaultToleranceConfigIssueReasonForIssueHaNotEnabled = VmFaultToleranceConfigIssueReasonForIssue("haNotEnabled") + VmFaultToleranceConfigIssueReasonForIssueMoreThanOneSecondary = VmFaultToleranceConfigIssueReasonForIssue("moreThanOneSecondary") + VmFaultToleranceConfigIssueReasonForIssueRecordReplayNotSupported = VmFaultToleranceConfigIssueReasonForIssue("recordReplayNotSupported") + VmFaultToleranceConfigIssueReasonForIssueReplayNotSupported = VmFaultToleranceConfigIssueReasonForIssue("replayNotSupported") + VmFaultToleranceConfigIssueReasonForIssueTemplateVm = VmFaultToleranceConfigIssueReasonForIssue("templateVm") + VmFaultToleranceConfigIssueReasonForIssueMultipleVCPU = VmFaultToleranceConfigIssueReasonForIssue("multipleVCPU") + VmFaultToleranceConfigIssueReasonForIssueHostInactive = VmFaultToleranceConfigIssueReasonForIssue("hostInactive") + VmFaultToleranceConfigIssueReasonForIssueFtUnsupportedHardware = VmFaultToleranceConfigIssueReasonForIssue("ftUnsupportedHardware") + VmFaultToleranceConfigIssueReasonForIssueFtUnsupportedProduct = VmFaultToleranceConfigIssueReasonForIssue("ftUnsupportedProduct") + VmFaultToleranceConfigIssueReasonForIssueMissingVMotionNic = VmFaultToleranceConfigIssueReasonForIssue("missingVMotionNic") + VmFaultToleranceConfigIssueReasonForIssueMissingFTLoggingNic = VmFaultToleranceConfigIssueReasonForIssue("missingFTLoggingNic") + VmFaultToleranceConfigIssueReasonForIssueThinDisk = VmFaultToleranceConfigIssueReasonForIssue("thinDisk") + VmFaultToleranceConfigIssueReasonForIssueVerifySSLCertificateFlagNotSet = VmFaultToleranceConfigIssueReasonForIssue("verifySSLCertificateFlagNotSet") + VmFaultToleranceConfigIssueReasonForIssueHasSnapshots = VmFaultToleranceConfigIssueReasonForIssue("hasSnapshots") + VmFaultToleranceConfigIssueReasonForIssueNoConfig = VmFaultToleranceConfigIssueReasonForIssue("noConfig") + VmFaultToleranceConfigIssueReasonForIssueFtSecondaryVm = VmFaultToleranceConfigIssueReasonForIssue("ftSecondaryVm") + VmFaultToleranceConfigIssueReasonForIssueHasLocalDisk = VmFaultToleranceConfigIssueReasonForIssue("hasLocalDisk") + VmFaultToleranceConfigIssueReasonForIssueEsxAgentVm = VmFaultToleranceConfigIssueReasonForIssue("esxAgentVm") + VmFaultToleranceConfigIssueReasonForIssueVideo3dEnabled = VmFaultToleranceConfigIssueReasonForIssue("video3dEnabled") + VmFaultToleranceConfigIssueReasonForIssueHasUnsupportedDisk = VmFaultToleranceConfigIssueReasonForIssue("hasUnsupportedDisk") + VmFaultToleranceConfigIssueReasonForIssueInsufficientBandwidth = VmFaultToleranceConfigIssueReasonForIssue("insufficientBandwidth") + VmFaultToleranceConfigIssueReasonForIssueHasNestedHVConfiguration = VmFaultToleranceConfigIssueReasonForIssue("hasNestedHVConfiguration") + VmFaultToleranceConfigIssueReasonForIssueHasVFlashConfiguration = VmFaultToleranceConfigIssueReasonForIssue("hasVFlashConfiguration") + VmFaultToleranceConfigIssueReasonForIssueUnsupportedProduct = VmFaultToleranceConfigIssueReasonForIssue("unsupportedProduct") + VmFaultToleranceConfigIssueReasonForIssueCpuHvUnsupported = VmFaultToleranceConfigIssueReasonForIssue("cpuHvUnsupported") + VmFaultToleranceConfigIssueReasonForIssueCpuHwmmuUnsupported = VmFaultToleranceConfigIssueReasonForIssue("cpuHwmmuUnsupported") + VmFaultToleranceConfigIssueReasonForIssueCpuHvDisabled = VmFaultToleranceConfigIssueReasonForIssue("cpuHvDisabled") + VmFaultToleranceConfigIssueReasonForIssueHasEFIFirmware = VmFaultToleranceConfigIssueReasonForIssue("hasEFIFirmware") + VmFaultToleranceConfigIssueReasonForIssueTooManyVCPUs = VmFaultToleranceConfigIssueReasonForIssue("tooManyVCPUs") + VmFaultToleranceConfigIssueReasonForIssueTooMuchMemory = VmFaultToleranceConfigIssueReasonForIssue("tooMuchMemory") +) + +func init() { + t["VmFaultToleranceConfigIssueReasonForIssue"] = reflect.TypeOf((*VmFaultToleranceConfigIssueReasonForIssue)(nil)).Elem() +} + +type VmFaultToleranceInvalidFileBackingDeviceType string + +const ( + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualFloppy = VmFaultToleranceInvalidFileBackingDeviceType("virtualFloppy") + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualCdrom = VmFaultToleranceInvalidFileBackingDeviceType("virtualCdrom") + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualSerialPort = VmFaultToleranceInvalidFileBackingDeviceType("virtualSerialPort") + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualParallelPort = VmFaultToleranceInvalidFileBackingDeviceType("virtualParallelPort") + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualDisk = VmFaultToleranceInvalidFileBackingDeviceType("virtualDisk") +) + +func init() { + t["VmFaultToleranceInvalidFileBackingDeviceType"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBackingDeviceType)(nil)).Elem() +} + +type VmShutdownOnIsolationEventOperation string + +const ( + VmShutdownOnIsolationEventOperationShutdown = VmShutdownOnIsolationEventOperation("shutdown") + VmShutdownOnIsolationEventOperationPoweredOff = VmShutdownOnIsolationEventOperation("poweredOff") +) + +func init() { + t["VmShutdownOnIsolationEventOperation"] = reflect.TypeOf((*VmShutdownOnIsolationEventOperation)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchPvlanPortType string + +const ( + VmwareDistributedVirtualSwitchPvlanPortTypePromiscuous = VmwareDistributedVirtualSwitchPvlanPortType("promiscuous") + VmwareDistributedVirtualSwitchPvlanPortTypeIsolated = VmwareDistributedVirtualSwitchPvlanPortType("isolated") + VmwareDistributedVirtualSwitchPvlanPortTypeCommunity = VmwareDistributedVirtualSwitchPvlanPortType("community") +) + +func init() { + t["VmwareDistributedVirtualSwitchPvlanPortType"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchPvlanPortType)(nil)).Elem() +} + +type VsanDiskIssueType string + +const ( + VsanDiskIssueTypeNonExist = VsanDiskIssueType("nonExist") + VsanDiskIssueTypeStampMismatch = VsanDiskIssueType("stampMismatch") + VsanDiskIssueTypeUnknown = VsanDiskIssueType("unknown") +) + +func init() { + t["VsanDiskIssueType"] = reflect.TypeOf((*VsanDiskIssueType)(nil)).Elem() +} + +type VsanHostDecommissionModeObjectAction string + +const ( + VsanHostDecommissionModeObjectActionNoAction = VsanHostDecommissionModeObjectAction("noAction") + VsanHostDecommissionModeObjectActionEnsureObjectAccessibility = VsanHostDecommissionModeObjectAction("ensureObjectAccessibility") + VsanHostDecommissionModeObjectActionEvacuateAllData = VsanHostDecommissionModeObjectAction("evacuateAllData") +) + +func init() { + t["VsanHostDecommissionModeObjectAction"] = reflect.TypeOf((*VsanHostDecommissionModeObjectAction)(nil)).Elem() +} + +type VsanHostDiskResultState string + +const ( + VsanHostDiskResultStateInUse = VsanHostDiskResultState("inUse") + VsanHostDiskResultStateEligible = VsanHostDiskResultState("eligible") + VsanHostDiskResultStateIneligible = VsanHostDiskResultState("ineligible") +) + +func init() { + t["VsanHostDiskResultState"] = reflect.TypeOf((*VsanHostDiskResultState)(nil)).Elem() +} + +type VsanHostHealthState string + +const ( + VsanHostHealthStateUnknown = VsanHostHealthState("unknown") + VsanHostHealthStateHealthy = VsanHostHealthState("healthy") + VsanHostHealthStateUnhealthy = VsanHostHealthState("unhealthy") +) + +func init() { + t["VsanHostHealthState"] = reflect.TypeOf((*VsanHostHealthState)(nil)).Elem() +} + +type VsanHostNodeState string + +const ( + VsanHostNodeStateError = VsanHostNodeState("error") + VsanHostNodeStateDisabled = VsanHostNodeState("disabled") + VsanHostNodeStateAgent = VsanHostNodeState("agent") + VsanHostNodeStateMaster = VsanHostNodeState("master") + VsanHostNodeStateBackup = VsanHostNodeState("backup") + VsanHostNodeStateStarting = VsanHostNodeState("starting") + VsanHostNodeStateStopping = VsanHostNodeState("stopping") + VsanHostNodeStateEnteringMaintenanceMode = VsanHostNodeState("enteringMaintenanceMode") + VsanHostNodeStateExitingMaintenanceMode = VsanHostNodeState("exitingMaintenanceMode") + VsanHostNodeStateDecommissioning = VsanHostNodeState("decommissioning") +) + +func init() { + t["VsanHostNodeState"] = reflect.TypeOf((*VsanHostNodeState)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeHistoryDiskGroupOpType string + +const ( + VsanUpgradeSystemUpgradeHistoryDiskGroupOpTypeAdd = VsanUpgradeSystemUpgradeHistoryDiskGroupOpType("add") + VsanUpgradeSystemUpgradeHistoryDiskGroupOpTypeRemove = VsanUpgradeSystemUpgradeHistoryDiskGroupOpType("remove") +) + +func init() { + t["VsanUpgradeSystemUpgradeHistoryDiskGroupOpType"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryDiskGroupOpType)(nil)).Elem() +} + +type WeekOfMonth string + +const ( + WeekOfMonthFirst = WeekOfMonth("first") + WeekOfMonthSecond = WeekOfMonth("second") + WeekOfMonthThird = WeekOfMonth("third") + WeekOfMonthFourth = WeekOfMonth("fourth") + WeekOfMonthLast = WeekOfMonth("last") +) + +func init() { + t["WeekOfMonth"] = reflect.TypeOf((*WeekOfMonth)(nil)).Elem() +} + +type WillLoseHAProtectionResolution string + +const ( + WillLoseHAProtectionResolutionSvmotion = WillLoseHAProtectionResolution("svmotion") + WillLoseHAProtectionResolutionRelocate = WillLoseHAProtectionResolution("relocate") +) + +func init() { + t["WillLoseHAProtectionResolution"] = reflect.TypeOf((*WillLoseHAProtectionResolution)(nil)).Elem() +} + +type VslmVStorageObjectControlFlag string + +const ( + VslmVStorageObjectControlFlagKeepAfterDeleteVm = VslmVStorageObjectControlFlag("keepAfterDeleteVm") + VslmVStorageObjectControlFlagDisableRelocation = VslmVStorageObjectControlFlag("disableRelocation") + VslmVStorageObjectControlFlagEnableChangedBlockTracking = VslmVStorageObjectControlFlag("enableChangedBlockTracking") +) + +func init() { + t["vslmVStorageObjectControlFlag"] = reflect.TypeOf((*VslmVStorageObjectControlFlag)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/fault.go b/vendor/github.com/vmware/govmomi/vim25/types/fault.go new file mode 100644 index 000000000000..c2503fa5cb72 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/fault.go @@ -0,0 +1,32 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +type HasFault interface { + Fault() BaseMethodFault +} + +func IsFileNotFound(err error) bool { + if f, ok := err.(HasFault); ok { + switch f.Fault().(type) { + case *FileNotFound: + return true + } + } + + return false +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/helpers.go b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go new file mode 100644 index 000000000000..7ccfd29b634a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go @@ -0,0 +1,95 @@ +/* +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "reflect" + "strings" + "time" +) + +func NewBool(v bool) *bool { + return &v +} + +func NewInt32(v int32) *int32 { + return &v +} + +func NewInt64(v int64) *int64 { + return &v +} + +func NewTime(v time.Time) *time.Time { + return &v +} + +func NewReference(r ManagedObjectReference) *ManagedObjectReference { + return &r +} + +func (r ManagedObjectReference) Reference() ManagedObjectReference { + return r +} + +func (r ManagedObjectReference) String() string { + return strings.Join([]string{r.Type, r.Value}, ":") +} + +func (r *ManagedObjectReference) FromString(o string) bool { + s := strings.SplitN(o, ":", 2) + + if len(s) < 2 { + return false + } + + r.Type = s[0] + r.Value = s[1] + + return true +} + +func (c *PerfCounterInfo) Name() string { + return c.GroupInfo.GetElementDescription().Key + "." + c.NameInfo.GetElementDescription().Key + "." + string(c.RollupType) +} + +func defaultResourceAllocationInfo() ResourceAllocationInfo { + return ResourceAllocationInfo{ + Reservation: NewInt64(0), + ExpandableReservation: NewBool(true), + Limit: NewInt64(-1), + Shares: &SharesInfo{ + Level: SharesLevelNormal, + }, + } +} + +// DefaultResourceConfigSpec returns a ResourceConfigSpec populated with the same default field values as vCenter. +// Note that the wsdl marks these fields as optional, but they are required to be set when creating a resource pool. +// They are only optional when updating a resource pool. +func DefaultResourceConfigSpec() ResourceConfigSpec { + return ResourceConfigSpec{ + CpuAllocation: defaultResourceAllocationInfo(), + MemoryAllocation: defaultResourceAllocationInfo(), + } +} + +func init() { + // Known 6.5 issue where this event type is sent even though it is internal. + // This workaround allows us to unmarshal and avoid NPEs. + t["HostSubSpecificationUpdateEvent"] = reflect.TypeOf((*HostEvent)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/if.go b/vendor/github.com/vmware/govmomi/vim25/types/if.go new file mode 100644 index 000000000000..89d02f234bb9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/if.go @@ -0,0 +1,3449 @@ +/* +Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import "reflect" + +func (b *Action) GetAction() *Action { return b } + +type BaseAction interface { + GetAction() *Action +} + +func init() { + t["BaseAction"] = reflect.TypeOf((*Action)(nil)).Elem() +} + +func (b *ActiveDirectoryFault) GetActiveDirectoryFault() *ActiveDirectoryFault { return b } + +type BaseActiveDirectoryFault interface { + GetActiveDirectoryFault() *ActiveDirectoryFault +} + +func init() { + t["BaseActiveDirectoryFault"] = reflect.TypeOf((*ActiveDirectoryFault)(nil)).Elem() +} + +func (b *AlarmAction) GetAlarmAction() *AlarmAction { return b } + +type BaseAlarmAction interface { + GetAlarmAction() *AlarmAction +} + +func init() { + t["BaseAlarmAction"] = reflect.TypeOf((*AlarmAction)(nil)).Elem() +} + +func (b *AlarmEvent) GetAlarmEvent() *AlarmEvent { return b } + +type BaseAlarmEvent interface { + GetAlarmEvent() *AlarmEvent +} + +func init() { + t["BaseAlarmEvent"] = reflect.TypeOf((*AlarmEvent)(nil)).Elem() +} + +func (b *AlarmExpression) GetAlarmExpression() *AlarmExpression { return b } + +type BaseAlarmExpression interface { + GetAlarmExpression() *AlarmExpression +} + +func init() { + t["BaseAlarmExpression"] = reflect.TypeOf((*AlarmExpression)(nil)).Elem() +} + +func (b *AlarmSpec) GetAlarmSpec() *AlarmSpec { return b } + +type BaseAlarmSpec interface { + GetAlarmSpec() *AlarmSpec +} + +func init() { + t["BaseAlarmSpec"] = reflect.TypeOf((*AlarmSpec)(nil)).Elem() +} + +func (b *AnswerFileCreateSpec) GetAnswerFileCreateSpec() *AnswerFileCreateSpec { return b } + +type BaseAnswerFileCreateSpec interface { + GetAnswerFileCreateSpec() *AnswerFileCreateSpec +} + +func init() { + t["BaseAnswerFileCreateSpec"] = reflect.TypeOf((*AnswerFileCreateSpec)(nil)).Elem() +} + +func (b *ApplyProfile) GetApplyProfile() *ApplyProfile { return b } + +type BaseApplyProfile interface { + GetApplyProfile() *ApplyProfile +} + +func init() { + t["BaseApplyProfile"] = reflect.TypeOf((*ApplyProfile)(nil)).Elem() +} + +func (b *ArrayUpdateSpec) GetArrayUpdateSpec() *ArrayUpdateSpec { return b } + +type BaseArrayUpdateSpec interface { + GetArrayUpdateSpec() *ArrayUpdateSpec +} + +func init() { + t["BaseArrayUpdateSpec"] = reflect.TypeOf((*ArrayUpdateSpec)(nil)).Elem() +} + +func (b *AuthorizationEvent) GetAuthorizationEvent() *AuthorizationEvent { return b } + +type BaseAuthorizationEvent interface { + GetAuthorizationEvent() *AuthorizationEvent +} + +func init() { + t["BaseAuthorizationEvent"] = reflect.TypeOf((*AuthorizationEvent)(nil)).Elem() +} + +func (b *BaseConfigInfo) GetBaseConfigInfo() *BaseConfigInfo { return b } + +type BaseBaseConfigInfo interface { + GetBaseConfigInfo() *BaseConfigInfo +} + +func init() { + t["BaseBaseConfigInfo"] = reflect.TypeOf((*BaseConfigInfo)(nil)).Elem() +} + +func (b *BaseConfigInfoBackingInfo) GetBaseConfigInfoBackingInfo() *BaseConfigInfoBackingInfo { + return b +} + +type BaseBaseConfigInfoBackingInfo interface { + GetBaseConfigInfoBackingInfo() *BaseConfigInfoBackingInfo +} + +func init() { + t["BaseBaseConfigInfoBackingInfo"] = reflect.TypeOf((*BaseConfigInfoBackingInfo)(nil)).Elem() +} + +func (b *BaseConfigInfoFileBackingInfo) GetBaseConfigInfoFileBackingInfo() *BaseConfigInfoFileBackingInfo { + return b +} + +type BaseBaseConfigInfoFileBackingInfo interface { + GetBaseConfigInfoFileBackingInfo() *BaseConfigInfoFileBackingInfo +} + +func init() { + t["BaseBaseConfigInfoFileBackingInfo"] = reflect.TypeOf((*BaseConfigInfoFileBackingInfo)(nil)).Elem() +} + +func (b *CannotAccessNetwork) GetCannotAccessNetwork() *CannotAccessNetwork { return b } + +type BaseCannotAccessNetwork interface { + GetCannotAccessNetwork() *CannotAccessNetwork +} + +func init() { + t["BaseCannotAccessNetwork"] = reflect.TypeOf((*CannotAccessNetwork)(nil)).Elem() +} + +func (b *CannotAccessVmComponent) GetCannotAccessVmComponent() *CannotAccessVmComponent { return b } + +type BaseCannotAccessVmComponent interface { + GetCannotAccessVmComponent() *CannotAccessVmComponent +} + +func init() { + t["BaseCannotAccessVmComponent"] = reflect.TypeOf((*CannotAccessVmComponent)(nil)).Elem() +} + +func (b *CannotAccessVmDevice) GetCannotAccessVmDevice() *CannotAccessVmDevice { return b } + +type BaseCannotAccessVmDevice interface { + GetCannotAccessVmDevice() *CannotAccessVmDevice +} + +func init() { + t["BaseCannotAccessVmDevice"] = reflect.TypeOf((*CannotAccessVmDevice)(nil)).Elem() +} + +func (b *CannotAccessVmDisk) GetCannotAccessVmDisk() *CannotAccessVmDisk { return b } + +type BaseCannotAccessVmDisk interface { + GetCannotAccessVmDisk() *CannotAccessVmDisk +} + +func init() { + t["BaseCannotAccessVmDisk"] = reflect.TypeOf((*CannotAccessVmDisk)(nil)).Elem() +} + +func (b *CannotMoveVsanEnabledHost) GetCannotMoveVsanEnabledHost() *CannotMoveVsanEnabledHost { + return b +} + +type BaseCannotMoveVsanEnabledHost interface { + GetCannotMoveVsanEnabledHost() *CannotMoveVsanEnabledHost +} + +func init() { + t["BaseCannotMoveVsanEnabledHost"] = reflect.TypeOf((*CannotMoveVsanEnabledHost)(nil)).Elem() +} + +func (b *ClusterAction) GetClusterAction() *ClusterAction { return b } + +type BaseClusterAction interface { + GetClusterAction() *ClusterAction +} + +func init() { + t["BaseClusterAction"] = reflect.TypeOf((*ClusterAction)(nil)).Elem() +} + +func (b *ClusterDasAdmissionControlInfo) GetClusterDasAdmissionControlInfo() *ClusterDasAdmissionControlInfo { + return b +} + +type BaseClusterDasAdmissionControlInfo interface { + GetClusterDasAdmissionControlInfo() *ClusterDasAdmissionControlInfo +} + +func init() { + t["BaseClusterDasAdmissionControlInfo"] = reflect.TypeOf((*ClusterDasAdmissionControlInfo)(nil)).Elem() +} + +func (b *ClusterDasAdmissionControlPolicy) GetClusterDasAdmissionControlPolicy() *ClusterDasAdmissionControlPolicy { + return b +} + +type BaseClusterDasAdmissionControlPolicy interface { + GetClusterDasAdmissionControlPolicy() *ClusterDasAdmissionControlPolicy +} + +func init() { + t["BaseClusterDasAdmissionControlPolicy"] = reflect.TypeOf((*ClusterDasAdmissionControlPolicy)(nil)).Elem() +} + +func (b *ClusterDasAdvancedRuntimeInfo) GetClusterDasAdvancedRuntimeInfo() *ClusterDasAdvancedRuntimeInfo { + return b +} + +type BaseClusterDasAdvancedRuntimeInfo interface { + GetClusterDasAdvancedRuntimeInfo() *ClusterDasAdvancedRuntimeInfo +} + +func init() { + t["BaseClusterDasAdvancedRuntimeInfo"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfo)(nil)).Elem() +} + +func (b *ClusterDasData) GetClusterDasData() *ClusterDasData { return b } + +type BaseClusterDasData interface { + GetClusterDasData() *ClusterDasData +} + +func init() { + t["BaseClusterDasData"] = reflect.TypeOf((*ClusterDasData)(nil)).Elem() +} + +func (b *ClusterDasHostInfo) GetClusterDasHostInfo() *ClusterDasHostInfo { return b } + +type BaseClusterDasHostInfo interface { + GetClusterDasHostInfo() *ClusterDasHostInfo +} + +func init() { + t["BaseClusterDasHostInfo"] = reflect.TypeOf((*ClusterDasHostInfo)(nil)).Elem() +} + +func (b *ClusterDrsFaultsFaultsByVm) GetClusterDrsFaultsFaultsByVm() *ClusterDrsFaultsFaultsByVm { + return b +} + +type BaseClusterDrsFaultsFaultsByVm interface { + GetClusterDrsFaultsFaultsByVm() *ClusterDrsFaultsFaultsByVm +} + +func init() { + t["BaseClusterDrsFaultsFaultsByVm"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVm)(nil)).Elem() +} + +func (b *ClusterEvent) GetClusterEvent() *ClusterEvent { return b } + +type BaseClusterEvent interface { + GetClusterEvent() *ClusterEvent +} + +func init() { + t["BaseClusterEvent"] = reflect.TypeOf((*ClusterEvent)(nil)).Elem() +} + +func (b *ClusterGroupInfo) GetClusterGroupInfo() *ClusterGroupInfo { return b } + +type BaseClusterGroupInfo interface { + GetClusterGroupInfo() *ClusterGroupInfo +} + +func init() { + t["BaseClusterGroupInfo"] = reflect.TypeOf((*ClusterGroupInfo)(nil)).Elem() +} + +func (b *ClusterOvercommittedEvent) GetClusterOvercommittedEvent() *ClusterOvercommittedEvent { + return b +} + +type BaseClusterOvercommittedEvent interface { + GetClusterOvercommittedEvent() *ClusterOvercommittedEvent +} + +func init() { + t["BaseClusterOvercommittedEvent"] = reflect.TypeOf((*ClusterOvercommittedEvent)(nil)).Elem() +} + +func (b *ClusterProfileConfigSpec) GetClusterProfileConfigSpec() *ClusterProfileConfigSpec { return b } + +type BaseClusterProfileConfigSpec interface { + GetClusterProfileConfigSpec() *ClusterProfileConfigSpec +} + +func init() { + t["BaseClusterProfileConfigSpec"] = reflect.TypeOf((*ClusterProfileConfigSpec)(nil)).Elem() +} + +func (b *ClusterProfileCreateSpec) GetClusterProfileCreateSpec() *ClusterProfileCreateSpec { return b } + +type BaseClusterProfileCreateSpec interface { + GetClusterProfileCreateSpec() *ClusterProfileCreateSpec +} + +func init() { + t["BaseClusterProfileCreateSpec"] = reflect.TypeOf((*ClusterProfileCreateSpec)(nil)).Elem() +} + +func (b *ClusterRuleInfo) GetClusterRuleInfo() *ClusterRuleInfo { return b } + +type BaseClusterRuleInfo interface { + GetClusterRuleInfo() *ClusterRuleInfo +} + +func init() { + t["BaseClusterRuleInfo"] = reflect.TypeOf((*ClusterRuleInfo)(nil)).Elem() +} + +func (b *ClusterSlotPolicy) GetClusterSlotPolicy() *ClusterSlotPolicy { return b } + +type BaseClusterSlotPolicy interface { + GetClusterSlotPolicy() *ClusterSlotPolicy +} + +func init() { + t["BaseClusterSlotPolicy"] = reflect.TypeOf((*ClusterSlotPolicy)(nil)).Elem() +} + +func (b *ClusterStatusChangedEvent) GetClusterStatusChangedEvent() *ClusterStatusChangedEvent { + return b +} + +type BaseClusterStatusChangedEvent interface { + GetClusterStatusChangedEvent() *ClusterStatusChangedEvent +} + +func init() { + t["BaseClusterStatusChangedEvent"] = reflect.TypeOf((*ClusterStatusChangedEvent)(nil)).Elem() +} + +func (b *ComputeResourceConfigInfo) GetComputeResourceConfigInfo() *ComputeResourceConfigInfo { + return b +} + +type BaseComputeResourceConfigInfo interface { + GetComputeResourceConfigInfo() *ComputeResourceConfigInfo +} + +func init() { + t["BaseComputeResourceConfigInfo"] = reflect.TypeOf((*ComputeResourceConfigInfo)(nil)).Elem() +} + +func (b *ComputeResourceConfigSpec) GetComputeResourceConfigSpec() *ComputeResourceConfigSpec { + return b +} + +type BaseComputeResourceConfigSpec interface { + GetComputeResourceConfigSpec() *ComputeResourceConfigSpec +} + +func init() { + t["BaseComputeResourceConfigSpec"] = reflect.TypeOf((*ComputeResourceConfigSpec)(nil)).Elem() +} + +func (b *ComputeResourceSummary) GetComputeResourceSummary() *ComputeResourceSummary { return b } + +type BaseComputeResourceSummary interface { + GetComputeResourceSummary() *ComputeResourceSummary +} + +func init() { + t["BaseComputeResourceSummary"] = reflect.TypeOf((*ComputeResourceSummary)(nil)).Elem() +} + +func (b *CpuIncompatible) GetCpuIncompatible() *CpuIncompatible { return b } + +type BaseCpuIncompatible interface { + GetCpuIncompatible() *CpuIncompatible +} + +func init() { + t["BaseCpuIncompatible"] = reflect.TypeOf((*CpuIncompatible)(nil)).Elem() +} + +func (b *CryptoSpec) GetCryptoSpec() *CryptoSpec { return b } + +type BaseCryptoSpec interface { + GetCryptoSpec() *CryptoSpec +} + +func init() { + t["BaseCryptoSpec"] = reflect.TypeOf((*CryptoSpec)(nil)).Elem() +} + +func (b *CryptoSpecNoOp) GetCryptoSpecNoOp() *CryptoSpecNoOp { return b } + +type BaseCryptoSpecNoOp interface { + GetCryptoSpecNoOp() *CryptoSpecNoOp +} + +func init() { + t["BaseCryptoSpecNoOp"] = reflect.TypeOf((*CryptoSpecNoOp)(nil)).Elem() +} + +func (b *CustomFieldDefEvent) GetCustomFieldDefEvent() *CustomFieldDefEvent { return b } + +type BaseCustomFieldDefEvent interface { + GetCustomFieldDefEvent() *CustomFieldDefEvent +} + +func init() { + t["BaseCustomFieldDefEvent"] = reflect.TypeOf((*CustomFieldDefEvent)(nil)).Elem() +} + +func (b *CustomFieldEvent) GetCustomFieldEvent() *CustomFieldEvent { return b } + +type BaseCustomFieldEvent interface { + GetCustomFieldEvent() *CustomFieldEvent +} + +func init() { + t["BaseCustomFieldEvent"] = reflect.TypeOf((*CustomFieldEvent)(nil)).Elem() +} + +func (b *CustomFieldValue) GetCustomFieldValue() *CustomFieldValue { return b } + +type BaseCustomFieldValue interface { + GetCustomFieldValue() *CustomFieldValue +} + +func init() { + t["BaseCustomFieldValue"] = reflect.TypeOf((*CustomFieldValue)(nil)).Elem() +} + +func (b *CustomizationEvent) GetCustomizationEvent() *CustomizationEvent { return b } + +type BaseCustomizationEvent interface { + GetCustomizationEvent() *CustomizationEvent +} + +func init() { + t["BaseCustomizationEvent"] = reflect.TypeOf((*CustomizationEvent)(nil)).Elem() +} + +func (b *CustomizationFailed) GetCustomizationFailed() *CustomizationFailed { return b } + +type BaseCustomizationFailed interface { + GetCustomizationFailed() *CustomizationFailed +} + +func init() { + t["BaseCustomizationFailed"] = reflect.TypeOf((*CustomizationFailed)(nil)).Elem() +} + +func (b *CustomizationFault) GetCustomizationFault() *CustomizationFault { return b } + +type BaseCustomizationFault interface { + GetCustomizationFault() *CustomizationFault +} + +func init() { + t["BaseCustomizationFault"] = reflect.TypeOf((*CustomizationFault)(nil)).Elem() +} + +func (b *CustomizationIdentitySettings) GetCustomizationIdentitySettings() *CustomizationIdentitySettings { + return b +} + +type BaseCustomizationIdentitySettings interface { + GetCustomizationIdentitySettings() *CustomizationIdentitySettings +} + +func init() { + t["BaseCustomizationIdentitySettings"] = reflect.TypeOf((*CustomizationIdentitySettings)(nil)).Elem() +} + +func (b *CustomizationIpGenerator) GetCustomizationIpGenerator() *CustomizationIpGenerator { return b } + +type BaseCustomizationIpGenerator interface { + GetCustomizationIpGenerator() *CustomizationIpGenerator +} + +func init() { + t["BaseCustomizationIpGenerator"] = reflect.TypeOf((*CustomizationIpGenerator)(nil)).Elem() +} + +func (b *CustomizationIpV6Generator) GetCustomizationIpV6Generator() *CustomizationIpV6Generator { + return b +} + +type BaseCustomizationIpV6Generator interface { + GetCustomizationIpV6Generator() *CustomizationIpV6Generator +} + +func init() { + t["BaseCustomizationIpV6Generator"] = reflect.TypeOf((*CustomizationIpV6Generator)(nil)).Elem() +} + +func (b *CustomizationName) GetCustomizationName() *CustomizationName { return b } + +type BaseCustomizationName interface { + GetCustomizationName() *CustomizationName +} + +func init() { + t["BaseCustomizationName"] = reflect.TypeOf((*CustomizationName)(nil)).Elem() +} + +func (b *CustomizationOptions) GetCustomizationOptions() *CustomizationOptions { return b } + +type BaseCustomizationOptions interface { + GetCustomizationOptions() *CustomizationOptions +} + +func init() { + t["BaseCustomizationOptions"] = reflect.TypeOf((*CustomizationOptions)(nil)).Elem() +} + +func (b *DVPortSetting) GetDVPortSetting() *DVPortSetting { return b } + +type BaseDVPortSetting interface { + GetDVPortSetting() *DVPortSetting +} + +func init() { + t["BaseDVPortSetting"] = reflect.TypeOf((*DVPortSetting)(nil)).Elem() +} + +func (b *DVPortgroupEvent) GetDVPortgroupEvent() *DVPortgroupEvent { return b } + +type BaseDVPortgroupEvent interface { + GetDVPortgroupEvent() *DVPortgroupEvent +} + +func init() { + t["BaseDVPortgroupEvent"] = reflect.TypeOf((*DVPortgroupEvent)(nil)).Elem() +} + +func (b *DVPortgroupPolicy) GetDVPortgroupPolicy() *DVPortgroupPolicy { return b } + +type BaseDVPortgroupPolicy interface { + GetDVPortgroupPolicy() *DVPortgroupPolicy +} + +func init() { + t["BaseDVPortgroupPolicy"] = reflect.TypeOf((*DVPortgroupPolicy)(nil)).Elem() +} + +func (b *DVSConfigInfo) GetDVSConfigInfo() *DVSConfigInfo { return b } + +type BaseDVSConfigInfo interface { + GetDVSConfigInfo() *DVSConfigInfo +} + +func init() { + t["BaseDVSConfigInfo"] = reflect.TypeOf((*DVSConfigInfo)(nil)).Elem() +} + +func (b *DVSConfigSpec) GetDVSConfigSpec() *DVSConfigSpec { return b } + +type BaseDVSConfigSpec interface { + GetDVSConfigSpec() *DVSConfigSpec +} + +func init() { + t["BaseDVSConfigSpec"] = reflect.TypeOf((*DVSConfigSpec)(nil)).Elem() +} + +func (b *DVSFeatureCapability) GetDVSFeatureCapability() *DVSFeatureCapability { return b } + +type BaseDVSFeatureCapability interface { + GetDVSFeatureCapability() *DVSFeatureCapability +} + +func init() { + t["BaseDVSFeatureCapability"] = reflect.TypeOf((*DVSFeatureCapability)(nil)).Elem() +} + +func (b *DVSHealthCheckCapability) GetDVSHealthCheckCapability() *DVSHealthCheckCapability { return b } + +type BaseDVSHealthCheckCapability interface { + GetDVSHealthCheckCapability() *DVSHealthCheckCapability +} + +func init() { + t["BaseDVSHealthCheckCapability"] = reflect.TypeOf((*DVSHealthCheckCapability)(nil)).Elem() +} + +func (b *DVSHealthCheckConfig) GetDVSHealthCheckConfig() *DVSHealthCheckConfig { return b } + +type BaseDVSHealthCheckConfig interface { + GetDVSHealthCheckConfig() *DVSHealthCheckConfig +} + +func init() { + t["BaseDVSHealthCheckConfig"] = reflect.TypeOf((*DVSHealthCheckConfig)(nil)).Elem() +} + +func (b *DVSUplinkPortPolicy) GetDVSUplinkPortPolicy() *DVSUplinkPortPolicy { return b } + +type BaseDVSUplinkPortPolicy interface { + GetDVSUplinkPortPolicy() *DVSUplinkPortPolicy +} + +func init() { + t["BaseDVSUplinkPortPolicy"] = reflect.TypeOf((*DVSUplinkPortPolicy)(nil)).Elem() +} + +func (b *DailyTaskScheduler) GetDailyTaskScheduler() *DailyTaskScheduler { return b } + +type BaseDailyTaskScheduler interface { + GetDailyTaskScheduler() *DailyTaskScheduler +} + +func init() { + t["BaseDailyTaskScheduler"] = reflect.TypeOf((*DailyTaskScheduler)(nil)).Elem() +} + +func (b *DatacenterEvent) GetDatacenterEvent() *DatacenterEvent { return b } + +type BaseDatacenterEvent interface { + GetDatacenterEvent() *DatacenterEvent +} + +func init() { + t["BaseDatacenterEvent"] = reflect.TypeOf((*DatacenterEvent)(nil)).Elem() +} + +func (b *DatastoreEvent) GetDatastoreEvent() *DatastoreEvent { return b } + +type BaseDatastoreEvent interface { + GetDatastoreEvent() *DatastoreEvent +} + +func init() { + t["BaseDatastoreEvent"] = reflect.TypeOf((*DatastoreEvent)(nil)).Elem() +} + +func (b *DatastoreFileEvent) GetDatastoreFileEvent() *DatastoreFileEvent { return b } + +type BaseDatastoreFileEvent interface { + GetDatastoreFileEvent() *DatastoreFileEvent +} + +func init() { + t["BaseDatastoreFileEvent"] = reflect.TypeOf((*DatastoreFileEvent)(nil)).Elem() +} + +func (b *DatastoreInfo) GetDatastoreInfo() *DatastoreInfo { return b } + +type BaseDatastoreInfo interface { + GetDatastoreInfo() *DatastoreInfo +} + +func init() { + t["BaseDatastoreInfo"] = reflect.TypeOf((*DatastoreInfo)(nil)).Elem() +} + +func (b *DatastoreNotWritableOnHost) GetDatastoreNotWritableOnHost() *DatastoreNotWritableOnHost { + return b +} + +type BaseDatastoreNotWritableOnHost interface { + GetDatastoreNotWritableOnHost() *DatastoreNotWritableOnHost +} + +func init() { + t["BaseDatastoreNotWritableOnHost"] = reflect.TypeOf((*DatastoreNotWritableOnHost)(nil)).Elem() +} + +func (b *Description) GetDescription() *Description { return b } + +type BaseDescription interface { + GetDescription() *Description +} + +func init() { + t["BaseDescription"] = reflect.TypeOf((*Description)(nil)).Elem() +} + +func (b *DeviceBackingNotSupported) GetDeviceBackingNotSupported() *DeviceBackingNotSupported { + return b +} + +type BaseDeviceBackingNotSupported interface { + GetDeviceBackingNotSupported() *DeviceBackingNotSupported +} + +func init() { + t["BaseDeviceBackingNotSupported"] = reflect.TypeOf((*DeviceBackingNotSupported)(nil)).Elem() +} + +func (b *DeviceNotSupported) GetDeviceNotSupported() *DeviceNotSupported { return b } + +type BaseDeviceNotSupported interface { + GetDeviceNotSupported() *DeviceNotSupported +} + +func init() { + t["BaseDeviceNotSupported"] = reflect.TypeOf((*DeviceNotSupported)(nil)).Elem() +} + +func (b *DiskNotSupported) GetDiskNotSupported() *DiskNotSupported { return b } + +type BaseDiskNotSupported interface { + GetDiskNotSupported() *DiskNotSupported +} + +func init() { + t["BaseDiskNotSupported"] = reflect.TypeOf((*DiskNotSupported)(nil)).Elem() +} + +func (b *DistributedVirtualSwitchHostMemberBacking) GetDistributedVirtualSwitchHostMemberBacking() *DistributedVirtualSwitchHostMemberBacking { + return b +} + +type BaseDistributedVirtualSwitchHostMemberBacking interface { + GetDistributedVirtualSwitchHostMemberBacking() *DistributedVirtualSwitchHostMemberBacking +} + +func init() { + t["BaseDistributedVirtualSwitchHostMemberBacking"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberBacking)(nil)).Elem() +} + +func (b *DistributedVirtualSwitchManagerHostDvsFilterSpec) GetDistributedVirtualSwitchManagerHostDvsFilterSpec() *DistributedVirtualSwitchManagerHostDvsFilterSpec { + return b +} + +type BaseDistributedVirtualSwitchManagerHostDvsFilterSpec interface { + GetDistributedVirtualSwitchManagerHostDvsFilterSpec() *DistributedVirtualSwitchManagerHostDvsFilterSpec +} + +func init() { + t["BaseDistributedVirtualSwitchManagerHostDvsFilterSpec"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem() +} + +func (b *DvsEvent) GetDvsEvent() *DvsEvent { return b } + +type BaseDvsEvent interface { + GetDvsEvent() *DvsEvent +} + +func init() { + t["BaseDvsEvent"] = reflect.TypeOf((*DvsEvent)(nil)).Elem() +} + +func (b *DvsFault) GetDvsFault() *DvsFault { return b } + +type BaseDvsFault interface { + GetDvsFault() *DvsFault +} + +func init() { + t["BaseDvsFault"] = reflect.TypeOf((*DvsFault)(nil)).Elem() +} + +func (b *DvsFilterConfig) GetDvsFilterConfig() *DvsFilterConfig { return b } + +type BaseDvsFilterConfig interface { + GetDvsFilterConfig() *DvsFilterConfig +} + +func init() { + t["BaseDvsFilterConfig"] = reflect.TypeOf((*DvsFilterConfig)(nil)).Elem() +} + +func (b *DvsHealthStatusChangeEvent) GetDvsHealthStatusChangeEvent() *DvsHealthStatusChangeEvent { + return b +} + +type BaseDvsHealthStatusChangeEvent interface { + GetDvsHealthStatusChangeEvent() *DvsHealthStatusChangeEvent +} + +func init() { + t["BaseDvsHealthStatusChangeEvent"] = reflect.TypeOf((*DvsHealthStatusChangeEvent)(nil)).Elem() +} + +func (b *DvsIpPort) GetDvsIpPort() *DvsIpPort { return b } + +type BaseDvsIpPort interface { + GetDvsIpPort() *DvsIpPort +} + +func init() { + t["BaseDvsIpPort"] = reflect.TypeOf((*DvsIpPort)(nil)).Elem() +} + +func (b *DvsNetworkRuleAction) GetDvsNetworkRuleAction() *DvsNetworkRuleAction { return b } + +type BaseDvsNetworkRuleAction interface { + GetDvsNetworkRuleAction() *DvsNetworkRuleAction +} + +func init() { + t["BaseDvsNetworkRuleAction"] = reflect.TypeOf((*DvsNetworkRuleAction)(nil)).Elem() +} + +func (b *DvsNetworkRuleQualifier) GetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier { return b } + +type BaseDvsNetworkRuleQualifier interface { + GetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier +} + +func init() { + t["BaseDvsNetworkRuleQualifier"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem() +} + +func (b *DvsTrafficFilterConfig) GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig { return b } + +type BaseDvsTrafficFilterConfig interface { + GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig +} + +func init() { + t["BaseDvsTrafficFilterConfig"] = reflect.TypeOf((*DvsTrafficFilterConfig)(nil)).Elem() +} + +func (b *DvsVNicProfile) GetDvsVNicProfile() *DvsVNicProfile { return b } + +type BaseDvsVNicProfile interface { + GetDvsVNicProfile() *DvsVNicProfile +} + +func init() { + t["BaseDvsVNicProfile"] = reflect.TypeOf((*DvsVNicProfile)(nil)).Elem() +} + +func (b *DynamicData) GetDynamicData() *DynamicData { return b } + +type BaseDynamicData interface { + GetDynamicData() *DynamicData +} + +func init() { + t["BaseDynamicData"] = reflect.TypeOf((*DynamicData)(nil)).Elem() +} + +func (b *EVCAdmissionFailed) GetEVCAdmissionFailed() *EVCAdmissionFailed { return b } + +type BaseEVCAdmissionFailed interface { + GetEVCAdmissionFailed() *EVCAdmissionFailed +} + +func init() { + t["BaseEVCAdmissionFailed"] = reflect.TypeOf((*EVCAdmissionFailed)(nil)).Elem() +} + +func (b *EVCConfigFault) GetEVCConfigFault() *EVCConfigFault { return b } + +type BaseEVCConfigFault interface { + GetEVCConfigFault() *EVCConfigFault +} + +func init() { + t["BaseEVCConfigFault"] = reflect.TypeOf((*EVCConfigFault)(nil)).Elem() +} + +func (b *ElementDescription) GetElementDescription() *ElementDescription { return b } + +type BaseElementDescription interface { + GetElementDescription() *ElementDescription +} + +func init() { + t["BaseElementDescription"] = reflect.TypeOf((*ElementDescription)(nil)).Elem() +} + +func (b *EnteredStandbyModeEvent) GetEnteredStandbyModeEvent() *EnteredStandbyModeEvent { return b } + +type BaseEnteredStandbyModeEvent interface { + GetEnteredStandbyModeEvent() *EnteredStandbyModeEvent +} + +func init() { + t["BaseEnteredStandbyModeEvent"] = reflect.TypeOf((*EnteredStandbyModeEvent)(nil)).Elem() +} + +func (b *EnteringStandbyModeEvent) GetEnteringStandbyModeEvent() *EnteringStandbyModeEvent { return b } + +type BaseEnteringStandbyModeEvent interface { + GetEnteringStandbyModeEvent() *EnteringStandbyModeEvent +} + +func init() { + t["BaseEnteringStandbyModeEvent"] = reflect.TypeOf((*EnteringStandbyModeEvent)(nil)).Elem() +} + +func (b *EntityEventArgument) GetEntityEventArgument() *EntityEventArgument { return b } + +type BaseEntityEventArgument interface { + GetEntityEventArgument() *EntityEventArgument +} + +func init() { + t["BaseEntityEventArgument"] = reflect.TypeOf((*EntityEventArgument)(nil)).Elem() +} + +func (b *Event) GetEvent() *Event { return b } + +type BaseEvent interface { + GetEvent() *Event +} + +func init() { + t["BaseEvent"] = reflect.TypeOf((*Event)(nil)).Elem() +} + +func (b *EventArgument) GetEventArgument() *EventArgument { return b } + +type BaseEventArgument interface { + GetEventArgument() *EventArgument +} + +func init() { + t["BaseEventArgument"] = reflect.TypeOf((*EventArgument)(nil)).Elem() +} + +func (b *ExitStandbyModeFailedEvent) GetExitStandbyModeFailedEvent() *ExitStandbyModeFailedEvent { + return b +} + +type BaseExitStandbyModeFailedEvent interface { + GetExitStandbyModeFailedEvent() *ExitStandbyModeFailedEvent +} + +func init() { + t["BaseExitStandbyModeFailedEvent"] = reflect.TypeOf((*ExitStandbyModeFailedEvent)(nil)).Elem() +} + +func (b *ExitedStandbyModeEvent) GetExitedStandbyModeEvent() *ExitedStandbyModeEvent { return b } + +type BaseExitedStandbyModeEvent interface { + GetExitedStandbyModeEvent() *ExitedStandbyModeEvent +} + +func init() { + t["BaseExitedStandbyModeEvent"] = reflect.TypeOf((*ExitedStandbyModeEvent)(nil)).Elem() +} + +func (b *ExitingStandbyModeEvent) GetExitingStandbyModeEvent() *ExitingStandbyModeEvent { return b } + +type BaseExitingStandbyModeEvent interface { + GetExitingStandbyModeEvent() *ExitingStandbyModeEvent +} + +func init() { + t["BaseExitingStandbyModeEvent"] = reflect.TypeOf((*ExitingStandbyModeEvent)(nil)).Elem() +} + +func (b *ExpiredFeatureLicense) GetExpiredFeatureLicense() *ExpiredFeatureLicense { return b } + +type BaseExpiredFeatureLicense interface { + GetExpiredFeatureLicense() *ExpiredFeatureLicense +} + +func init() { + t["BaseExpiredFeatureLicense"] = reflect.TypeOf((*ExpiredFeatureLicense)(nil)).Elem() +} + +func (b *FaultToleranceConfigInfo) GetFaultToleranceConfigInfo() *FaultToleranceConfigInfo { return b } + +type BaseFaultToleranceConfigInfo interface { + GetFaultToleranceConfigInfo() *FaultToleranceConfigInfo +} + +func init() { + t["BaseFaultToleranceConfigInfo"] = reflect.TypeOf((*FaultToleranceConfigInfo)(nil)).Elem() +} + +func (b *FcoeFault) GetFcoeFault() *FcoeFault { return b } + +type BaseFcoeFault interface { + GetFcoeFault() *FcoeFault +} + +func init() { + t["BaseFcoeFault"] = reflect.TypeOf((*FcoeFault)(nil)).Elem() +} + +func (b *FileBackedVirtualDiskSpec) GetFileBackedVirtualDiskSpec() *FileBackedVirtualDiskSpec { + return b +} + +type BaseFileBackedVirtualDiskSpec interface { + GetFileBackedVirtualDiskSpec() *FileBackedVirtualDiskSpec +} + +func init() { + t["BaseFileBackedVirtualDiskSpec"] = reflect.TypeOf((*FileBackedVirtualDiskSpec)(nil)).Elem() +} + +func (b *FileFault) GetFileFault() *FileFault { return b } + +type BaseFileFault interface { + GetFileFault() *FileFault +} + +func init() { + t["BaseFileFault"] = reflect.TypeOf((*FileFault)(nil)).Elem() +} + +func (b *FileInfo) GetFileInfo() *FileInfo { return b } + +type BaseFileInfo interface { + GetFileInfo() *FileInfo +} + +func init() { + t["BaseFileInfo"] = reflect.TypeOf((*FileInfo)(nil)).Elem() +} + +func (b *FileQuery) GetFileQuery() *FileQuery { return b } + +type BaseFileQuery interface { + GetFileQuery() *FileQuery +} + +func init() { + t["BaseFileQuery"] = reflect.TypeOf((*FileQuery)(nil)).Elem() +} + +func (b *GatewayConnectFault) GetGatewayConnectFault() *GatewayConnectFault { return b } + +type BaseGatewayConnectFault interface { + GetGatewayConnectFault() *GatewayConnectFault +} + +func init() { + t["BaseGatewayConnectFault"] = reflect.TypeOf((*GatewayConnectFault)(nil)).Elem() +} + +func (b *GatewayToHostConnectFault) GetGatewayToHostConnectFault() *GatewayToHostConnectFault { + return b +} + +type BaseGatewayToHostConnectFault interface { + GetGatewayToHostConnectFault() *GatewayToHostConnectFault +} + +func init() { + t["BaseGatewayToHostConnectFault"] = reflect.TypeOf((*GatewayToHostConnectFault)(nil)).Elem() +} + +func (b *GeneralEvent) GetGeneralEvent() *GeneralEvent { return b } + +type BaseGeneralEvent interface { + GetGeneralEvent() *GeneralEvent +} + +func init() { + t["BaseGeneralEvent"] = reflect.TypeOf((*GeneralEvent)(nil)).Elem() +} + +func (b *GuestAuthSubject) GetGuestAuthSubject() *GuestAuthSubject { return b } + +type BaseGuestAuthSubject interface { + GetGuestAuthSubject() *GuestAuthSubject +} + +func init() { + t["BaseGuestAuthSubject"] = reflect.TypeOf((*GuestAuthSubject)(nil)).Elem() +} + +func (b *GuestAuthentication) GetGuestAuthentication() *GuestAuthentication { return b } + +type BaseGuestAuthentication interface { + GetGuestAuthentication() *GuestAuthentication +} + +func init() { + t["BaseGuestAuthentication"] = reflect.TypeOf((*GuestAuthentication)(nil)).Elem() +} + +func (b *GuestFileAttributes) GetGuestFileAttributes() *GuestFileAttributes { return b } + +type BaseGuestFileAttributes interface { + GetGuestFileAttributes() *GuestFileAttributes +} + +func init() { + t["BaseGuestFileAttributes"] = reflect.TypeOf((*GuestFileAttributes)(nil)).Elem() +} + +func (b *GuestOperationsFault) GetGuestOperationsFault() *GuestOperationsFault { return b } + +type BaseGuestOperationsFault interface { + GetGuestOperationsFault() *GuestOperationsFault +} + +func init() { + t["BaseGuestOperationsFault"] = reflect.TypeOf((*GuestOperationsFault)(nil)).Elem() +} + +func (b *GuestProgramSpec) GetGuestProgramSpec() *GuestProgramSpec { return b } + +type BaseGuestProgramSpec interface { + GetGuestProgramSpec() *GuestProgramSpec +} + +func init() { + t["BaseGuestProgramSpec"] = reflect.TypeOf((*GuestProgramSpec)(nil)).Elem() +} + +func (b *GuestRegValueDataSpec) GetGuestRegValueDataSpec() *GuestRegValueDataSpec { return b } + +type BaseGuestRegValueDataSpec interface { + GetGuestRegValueDataSpec() *GuestRegValueDataSpec +} + +func init() { + t["BaseGuestRegValueDataSpec"] = reflect.TypeOf((*GuestRegValueDataSpec)(nil)).Elem() +} + +func (b *GuestRegistryFault) GetGuestRegistryFault() *GuestRegistryFault { return b } + +type BaseGuestRegistryFault interface { + GetGuestRegistryFault() *GuestRegistryFault +} + +func init() { + t["BaseGuestRegistryFault"] = reflect.TypeOf((*GuestRegistryFault)(nil)).Elem() +} + +func (b *GuestRegistryKeyFault) GetGuestRegistryKeyFault() *GuestRegistryKeyFault { return b } + +type BaseGuestRegistryKeyFault interface { + GetGuestRegistryKeyFault() *GuestRegistryKeyFault +} + +func init() { + t["BaseGuestRegistryKeyFault"] = reflect.TypeOf((*GuestRegistryKeyFault)(nil)).Elem() +} + +func (b *GuestRegistryValueFault) GetGuestRegistryValueFault() *GuestRegistryValueFault { return b } + +type BaseGuestRegistryValueFault interface { + GetGuestRegistryValueFault() *GuestRegistryValueFault +} + +func init() { + t["BaseGuestRegistryValueFault"] = reflect.TypeOf((*GuestRegistryValueFault)(nil)).Elem() +} + +func (b *HostAccountSpec) GetHostAccountSpec() *HostAccountSpec { return b } + +type BaseHostAccountSpec interface { + GetHostAccountSpec() *HostAccountSpec +} + +func init() { + t["BaseHostAccountSpec"] = reflect.TypeOf((*HostAccountSpec)(nil)).Elem() +} + +func (b *HostAuthenticationStoreInfo) GetHostAuthenticationStoreInfo() *HostAuthenticationStoreInfo { + return b +} + +type BaseHostAuthenticationStoreInfo interface { + GetHostAuthenticationStoreInfo() *HostAuthenticationStoreInfo +} + +func init() { + t["BaseHostAuthenticationStoreInfo"] = reflect.TypeOf((*HostAuthenticationStoreInfo)(nil)).Elem() +} + +func (b *HostCommunication) GetHostCommunication() *HostCommunication { return b } + +type BaseHostCommunication interface { + GetHostCommunication() *HostCommunication +} + +func init() { + t["BaseHostCommunication"] = reflect.TypeOf((*HostCommunication)(nil)).Elem() +} + +func (b *HostConfigFault) GetHostConfigFault() *HostConfigFault { return b } + +type BaseHostConfigFault interface { + GetHostConfigFault() *HostConfigFault +} + +func init() { + t["BaseHostConfigFault"] = reflect.TypeOf((*HostConfigFault)(nil)).Elem() +} + +func (b *HostConnectFault) GetHostConnectFault() *HostConnectFault { return b } + +type BaseHostConnectFault interface { + GetHostConnectFault() *HostConnectFault +} + +func init() { + t["BaseHostConnectFault"] = reflect.TypeOf((*HostConnectFault)(nil)).Elem() +} + +func (b *HostConnectInfoNetworkInfo) GetHostConnectInfoNetworkInfo() *HostConnectInfoNetworkInfo { + return b +} + +type BaseHostConnectInfoNetworkInfo interface { + GetHostConnectInfoNetworkInfo() *HostConnectInfoNetworkInfo +} + +func init() { + t["BaseHostConnectInfoNetworkInfo"] = reflect.TypeOf((*HostConnectInfoNetworkInfo)(nil)).Elem() +} + +func (b *HostDasEvent) GetHostDasEvent() *HostDasEvent { return b } + +type BaseHostDasEvent interface { + GetHostDasEvent() *HostDasEvent +} + +func init() { + t["BaseHostDasEvent"] = reflect.TypeOf((*HostDasEvent)(nil)).Elem() +} + +func (b *HostDatastoreConnectInfo) GetHostDatastoreConnectInfo() *HostDatastoreConnectInfo { return b } + +type BaseHostDatastoreConnectInfo interface { + GetHostDatastoreConnectInfo() *HostDatastoreConnectInfo +} + +func init() { + t["BaseHostDatastoreConnectInfo"] = reflect.TypeOf((*HostDatastoreConnectInfo)(nil)).Elem() +} + +func (b *HostDevice) GetHostDevice() *HostDevice { return b } + +type BaseHostDevice interface { + GetHostDevice() *HostDevice +} + +func init() { + t["BaseHostDevice"] = reflect.TypeOf((*HostDevice)(nil)).Elem() +} + +func (b *HostDigestInfo) GetHostDigestInfo() *HostDigestInfo { return b } + +type BaseHostDigestInfo interface { + GetHostDigestInfo() *HostDigestInfo +} + +func init() { + t["BaseHostDigestInfo"] = reflect.TypeOf((*HostDigestInfo)(nil)).Elem() +} + +func (b *HostDirectoryStoreInfo) GetHostDirectoryStoreInfo() *HostDirectoryStoreInfo { return b } + +type BaseHostDirectoryStoreInfo interface { + GetHostDirectoryStoreInfo() *HostDirectoryStoreInfo +} + +func init() { + t["BaseHostDirectoryStoreInfo"] = reflect.TypeOf((*HostDirectoryStoreInfo)(nil)).Elem() +} + +func (b *HostDnsConfig) GetHostDnsConfig() *HostDnsConfig { return b } + +type BaseHostDnsConfig interface { + GetHostDnsConfig() *HostDnsConfig +} + +func init() { + t["BaseHostDnsConfig"] = reflect.TypeOf((*HostDnsConfig)(nil)).Elem() +} + +func (b *HostEvent) GetHostEvent() *HostEvent { return b } + +type BaseHostEvent interface { + GetHostEvent() *HostEvent +} + +func init() { + t["BaseHostEvent"] = reflect.TypeOf((*HostEvent)(nil)).Elem() +} + +func (b *HostFibreChannelHba) GetHostFibreChannelHba() *HostFibreChannelHba { return b } + +type BaseHostFibreChannelHba interface { + GetHostFibreChannelHba() *HostFibreChannelHba +} + +func init() { + t["BaseHostFibreChannelHba"] = reflect.TypeOf((*HostFibreChannelHba)(nil)).Elem() +} + +func (b *HostFibreChannelTargetTransport) GetHostFibreChannelTargetTransport() *HostFibreChannelTargetTransport { + return b +} + +type BaseHostFibreChannelTargetTransport interface { + GetHostFibreChannelTargetTransport() *HostFibreChannelTargetTransport +} + +func init() { + t["BaseHostFibreChannelTargetTransport"] = reflect.TypeOf((*HostFibreChannelTargetTransport)(nil)).Elem() +} + +func (b *HostFileSystemVolume) GetHostFileSystemVolume() *HostFileSystemVolume { return b } + +type BaseHostFileSystemVolume interface { + GetHostFileSystemVolume() *HostFileSystemVolume +} + +func init() { + t["BaseHostFileSystemVolume"] = reflect.TypeOf((*HostFileSystemVolume)(nil)).Elem() +} + +func (b *HostHardwareElementInfo) GetHostHardwareElementInfo() *HostHardwareElementInfo { return b } + +type BaseHostHardwareElementInfo interface { + GetHostHardwareElementInfo() *HostHardwareElementInfo +} + +func init() { + t["BaseHostHardwareElementInfo"] = reflect.TypeOf((*HostHardwareElementInfo)(nil)).Elem() +} + +func (b *HostHostBusAdapter) GetHostHostBusAdapter() *HostHostBusAdapter { return b } + +type BaseHostHostBusAdapter interface { + GetHostHostBusAdapter() *HostHostBusAdapter +} + +func init() { + t["BaseHostHostBusAdapter"] = reflect.TypeOf((*HostHostBusAdapter)(nil)).Elem() +} + +func (b *HostIpRouteConfig) GetHostIpRouteConfig() *HostIpRouteConfig { return b } + +type BaseHostIpRouteConfig interface { + GetHostIpRouteConfig() *HostIpRouteConfig +} + +func init() { + t["BaseHostIpRouteConfig"] = reflect.TypeOf((*HostIpRouteConfig)(nil)).Elem() +} + +func (b *HostMemberHealthCheckResult) GetHostMemberHealthCheckResult() *HostMemberHealthCheckResult { + return b +} + +type BaseHostMemberHealthCheckResult interface { + GetHostMemberHealthCheckResult() *HostMemberHealthCheckResult +} + +func init() { + t["BaseHostMemberHealthCheckResult"] = reflect.TypeOf((*HostMemberHealthCheckResult)(nil)).Elem() +} + +func (b *HostMemberUplinkHealthCheckResult) GetHostMemberUplinkHealthCheckResult() *HostMemberUplinkHealthCheckResult { + return b +} + +type BaseHostMemberUplinkHealthCheckResult interface { + GetHostMemberUplinkHealthCheckResult() *HostMemberUplinkHealthCheckResult +} + +func init() { + t["BaseHostMemberUplinkHealthCheckResult"] = reflect.TypeOf((*HostMemberUplinkHealthCheckResult)(nil)).Elem() +} + +func (b *HostMultipathInfoLogicalUnitPolicy) GetHostMultipathInfoLogicalUnitPolicy() *HostMultipathInfoLogicalUnitPolicy { + return b +} + +type BaseHostMultipathInfoLogicalUnitPolicy interface { + GetHostMultipathInfoLogicalUnitPolicy() *HostMultipathInfoLogicalUnitPolicy +} + +func init() { + t["BaseHostMultipathInfoLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitPolicy)(nil)).Elem() +} + +func (b *HostPciPassthruConfig) GetHostPciPassthruConfig() *HostPciPassthruConfig { return b } + +type BaseHostPciPassthruConfig interface { + GetHostPciPassthruConfig() *HostPciPassthruConfig +} + +func init() { + t["BaseHostPciPassthruConfig"] = reflect.TypeOf((*HostPciPassthruConfig)(nil)).Elem() +} + +func (b *HostPciPassthruInfo) GetHostPciPassthruInfo() *HostPciPassthruInfo { return b } + +type BaseHostPciPassthruInfo interface { + GetHostPciPassthruInfo() *HostPciPassthruInfo +} + +func init() { + t["BaseHostPciPassthruInfo"] = reflect.TypeOf((*HostPciPassthruInfo)(nil)).Elem() +} + +func (b *HostPowerOpFailed) GetHostPowerOpFailed() *HostPowerOpFailed { return b } + +type BaseHostPowerOpFailed interface { + GetHostPowerOpFailed() *HostPowerOpFailed +} + +func init() { + t["BaseHostPowerOpFailed"] = reflect.TypeOf((*HostPowerOpFailed)(nil)).Elem() +} + +func (b *HostProfileConfigSpec) GetHostProfileConfigSpec() *HostProfileConfigSpec { return b } + +type BaseHostProfileConfigSpec interface { + GetHostProfileConfigSpec() *HostProfileConfigSpec +} + +func init() { + t["BaseHostProfileConfigSpec"] = reflect.TypeOf((*HostProfileConfigSpec)(nil)).Elem() +} + +func (b *HostProfilesEntityCustomizations) GetHostProfilesEntityCustomizations() *HostProfilesEntityCustomizations { + return b +} + +type BaseHostProfilesEntityCustomizations interface { + GetHostProfilesEntityCustomizations() *HostProfilesEntityCustomizations +} + +func init() { + t["BaseHostProfilesEntityCustomizations"] = reflect.TypeOf((*HostProfilesEntityCustomizations)(nil)).Elem() +} + +func (b *HostSriovDevicePoolInfo) GetHostSriovDevicePoolInfo() *HostSriovDevicePoolInfo { return b } + +type BaseHostSriovDevicePoolInfo interface { + GetHostSriovDevicePoolInfo() *HostSriovDevicePoolInfo +} + +func init() { + t["BaseHostSriovDevicePoolInfo"] = reflect.TypeOf((*HostSriovDevicePoolInfo)(nil)).Elem() +} + +func (b *HostSystemSwapConfigurationSystemSwapOption) GetHostSystemSwapConfigurationSystemSwapOption() *HostSystemSwapConfigurationSystemSwapOption { + return b +} + +type BaseHostSystemSwapConfigurationSystemSwapOption interface { + GetHostSystemSwapConfigurationSystemSwapOption() *HostSystemSwapConfigurationSystemSwapOption +} + +func init() { + t["BaseHostSystemSwapConfigurationSystemSwapOption"] = reflect.TypeOf((*HostSystemSwapConfigurationSystemSwapOption)(nil)).Elem() +} + +func (b *HostTargetTransport) GetHostTargetTransport() *HostTargetTransport { return b } + +type BaseHostTargetTransport interface { + GetHostTargetTransport() *HostTargetTransport +} + +func init() { + t["BaseHostTargetTransport"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem() +} + +func (b *HostTpmEventDetails) GetHostTpmEventDetails() *HostTpmEventDetails { return b } + +type BaseHostTpmEventDetails interface { + GetHostTpmEventDetails() *HostTpmEventDetails +} + +func init() { + t["BaseHostTpmEventDetails"] = reflect.TypeOf((*HostTpmEventDetails)(nil)).Elem() +} + +func (b *HostVirtualSwitchBridge) GetHostVirtualSwitchBridge() *HostVirtualSwitchBridge { return b } + +type BaseHostVirtualSwitchBridge interface { + GetHostVirtualSwitchBridge() *HostVirtualSwitchBridge +} + +func init() { + t["BaseHostVirtualSwitchBridge"] = reflect.TypeOf((*HostVirtualSwitchBridge)(nil)).Elem() +} + +func (b *HourlyTaskScheduler) GetHourlyTaskScheduler() *HourlyTaskScheduler { return b } + +type BaseHourlyTaskScheduler interface { + GetHourlyTaskScheduler() *HourlyTaskScheduler +} + +func init() { + t["BaseHourlyTaskScheduler"] = reflect.TypeOf((*HourlyTaskScheduler)(nil)).Elem() +} + +func (b *ImportSpec) GetImportSpec() *ImportSpec { return b } + +type BaseImportSpec interface { + GetImportSpec() *ImportSpec +} + +func init() { + t["BaseImportSpec"] = reflect.TypeOf((*ImportSpec)(nil)).Elem() +} + +func (b *InaccessibleDatastore) GetInaccessibleDatastore() *InaccessibleDatastore { return b } + +type BaseInaccessibleDatastore interface { + GetInaccessibleDatastore() *InaccessibleDatastore +} + +func init() { + t["BaseInaccessibleDatastore"] = reflect.TypeOf((*InaccessibleDatastore)(nil)).Elem() +} + +func (b *InheritablePolicy) GetInheritablePolicy() *InheritablePolicy { return b } + +type BaseInheritablePolicy interface { + GetInheritablePolicy() *InheritablePolicy +} + +func init() { + t["BaseInheritablePolicy"] = reflect.TypeOf((*InheritablePolicy)(nil)).Elem() +} + +func (b *InsufficientHostCapacityFault) GetInsufficientHostCapacityFault() *InsufficientHostCapacityFault { + return b +} + +type BaseInsufficientHostCapacityFault interface { + GetInsufficientHostCapacityFault() *InsufficientHostCapacityFault +} + +func init() { + t["BaseInsufficientHostCapacityFault"] = reflect.TypeOf((*InsufficientHostCapacityFault)(nil)).Elem() +} + +func (b *InsufficientResourcesFault) GetInsufficientResourcesFault() *InsufficientResourcesFault { + return b +} + +type BaseInsufficientResourcesFault interface { + GetInsufficientResourcesFault() *InsufficientResourcesFault +} + +func init() { + t["BaseInsufficientResourcesFault"] = reflect.TypeOf((*InsufficientResourcesFault)(nil)).Elem() +} + +func (b *InsufficientStandbyResource) GetInsufficientStandbyResource() *InsufficientStandbyResource { + return b +} + +type BaseInsufficientStandbyResource interface { + GetInsufficientStandbyResource() *InsufficientStandbyResource +} + +func init() { + t["BaseInsufficientStandbyResource"] = reflect.TypeOf((*InsufficientStandbyResource)(nil)).Elem() +} + +func (b *InvalidArgument) GetInvalidArgument() *InvalidArgument { return b } + +type BaseInvalidArgument interface { + GetInvalidArgument() *InvalidArgument +} + +func init() { + t["BaseInvalidArgument"] = reflect.TypeOf((*InvalidArgument)(nil)).Elem() +} + +func (b *InvalidCAMServer) GetInvalidCAMServer() *InvalidCAMServer { return b } + +type BaseInvalidCAMServer interface { + GetInvalidCAMServer() *InvalidCAMServer +} + +func init() { + t["BaseInvalidCAMServer"] = reflect.TypeOf((*InvalidCAMServer)(nil)).Elem() +} + +func (b *InvalidDatastore) GetInvalidDatastore() *InvalidDatastore { return b } + +type BaseInvalidDatastore interface { + GetInvalidDatastore() *InvalidDatastore +} + +func init() { + t["BaseInvalidDatastore"] = reflect.TypeOf((*InvalidDatastore)(nil)).Elem() +} + +func (b *InvalidDeviceSpec) GetInvalidDeviceSpec() *InvalidDeviceSpec { return b } + +type BaseInvalidDeviceSpec interface { + GetInvalidDeviceSpec() *InvalidDeviceSpec +} + +func init() { + t["BaseInvalidDeviceSpec"] = reflect.TypeOf((*InvalidDeviceSpec)(nil)).Elem() +} + +func (b *InvalidFolder) GetInvalidFolder() *InvalidFolder { return b } + +type BaseInvalidFolder interface { + GetInvalidFolder() *InvalidFolder +} + +func init() { + t["BaseInvalidFolder"] = reflect.TypeOf((*InvalidFolder)(nil)).Elem() +} + +func (b *InvalidFormat) GetInvalidFormat() *InvalidFormat { return b } + +type BaseInvalidFormat interface { + GetInvalidFormat() *InvalidFormat +} + +func init() { + t["BaseInvalidFormat"] = reflect.TypeOf((*InvalidFormat)(nil)).Elem() +} + +func (b *InvalidHostState) GetInvalidHostState() *InvalidHostState { return b } + +type BaseInvalidHostState interface { + GetInvalidHostState() *InvalidHostState +} + +func init() { + t["BaseInvalidHostState"] = reflect.TypeOf((*InvalidHostState)(nil)).Elem() +} + +func (b *InvalidLogin) GetInvalidLogin() *InvalidLogin { return b } + +type BaseInvalidLogin interface { + GetInvalidLogin() *InvalidLogin +} + +func init() { + t["BaseInvalidLogin"] = reflect.TypeOf((*InvalidLogin)(nil)).Elem() +} + +func (b *InvalidPropertyValue) GetInvalidPropertyValue() *InvalidPropertyValue { return b } + +type BaseInvalidPropertyValue interface { + GetInvalidPropertyValue() *InvalidPropertyValue +} + +func init() { + t["BaseInvalidPropertyValue"] = reflect.TypeOf((*InvalidPropertyValue)(nil)).Elem() +} + +func (b *InvalidRequest) GetInvalidRequest() *InvalidRequest { return b } + +type BaseInvalidRequest interface { + GetInvalidRequest() *InvalidRequest +} + +func init() { + t["BaseInvalidRequest"] = reflect.TypeOf((*InvalidRequest)(nil)).Elem() +} + +func (b *InvalidState) GetInvalidState() *InvalidState { return b } + +type BaseInvalidState interface { + GetInvalidState() *InvalidState +} + +func init() { + t["BaseInvalidState"] = reflect.TypeOf((*InvalidState)(nil)).Elem() +} + +func (b *InvalidVmConfig) GetInvalidVmConfig() *InvalidVmConfig { return b } + +type BaseInvalidVmConfig interface { + GetInvalidVmConfig() *InvalidVmConfig +} + +func init() { + t["BaseInvalidVmConfig"] = reflect.TypeOf((*InvalidVmConfig)(nil)).Elem() +} + +func (b *IoFilterInfo) GetIoFilterInfo() *IoFilterInfo { return b } + +type BaseIoFilterInfo interface { + GetIoFilterInfo() *IoFilterInfo +} + +func init() { + t["BaseIoFilterInfo"] = reflect.TypeOf((*IoFilterInfo)(nil)).Elem() +} + +func (b *IpAddress) GetIpAddress() *IpAddress { return b } + +type BaseIpAddress interface { + GetIpAddress() *IpAddress +} + +func init() { + t["BaseIpAddress"] = reflect.TypeOf((*IpAddress)(nil)).Elem() +} + +func (b *IscsiFault) GetIscsiFault() *IscsiFault { return b } + +type BaseIscsiFault interface { + GetIscsiFault() *IscsiFault +} + +func init() { + t["BaseIscsiFault"] = reflect.TypeOf((*IscsiFault)(nil)).Elem() +} + +func (b *LicenseEvent) GetLicenseEvent() *LicenseEvent { return b } + +type BaseLicenseEvent interface { + GetLicenseEvent() *LicenseEvent +} + +func init() { + t["BaseLicenseEvent"] = reflect.TypeOf((*LicenseEvent)(nil)).Elem() +} + +func (b *LicenseSource) GetLicenseSource() *LicenseSource { return b } + +type BaseLicenseSource interface { + GetLicenseSource() *LicenseSource +} + +func init() { + t["BaseLicenseSource"] = reflect.TypeOf((*LicenseSource)(nil)).Elem() +} + +func (b *MacAddress) GetMacAddress() *MacAddress { return b } + +type BaseMacAddress interface { + GetMacAddress() *MacAddress +} + +func init() { + t["BaseMacAddress"] = reflect.TypeOf((*MacAddress)(nil)).Elem() +} + +func (b *MethodFault) GetMethodFault() *MethodFault { return b } + +type BaseMethodFault interface { + GetMethodFault() *MethodFault +} + +func init() { + t["BaseMethodFault"] = reflect.TypeOf((*MethodFault)(nil)).Elem() +} + +func (b *MigrationEvent) GetMigrationEvent() *MigrationEvent { return b } + +type BaseMigrationEvent interface { + GetMigrationEvent() *MigrationEvent +} + +func init() { + t["BaseMigrationEvent"] = reflect.TypeOf((*MigrationEvent)(nil)).Elem() +} + +func (b *MigrationFault) GetMigrationFault() *MigrationFault { return b } + +type BaseMigrationFault interface { + GetMigrationFault() *MigrationFault +} + +func init() { + t["BaseMigrationFault"] = reflect.TypeOf((*MigrationFault)(nil)).Elem() +} + +func (b *MigrationFeatureNotSupported) GetMigrationFeatureNotSupported() *MigrationFeatureNotSupported { + return b +} + +type BaseMigrationFeatureNotSupported interface { + GetMigrationFeatureNotSupported() *MigrationFeatureNotSupported +} + +func init() { + t["BaseMigrationFeatureNotSupported"] = reflect.TypeOf((*MigrationFeatureNotSupported)(nil)).Elem() +} + +func (b *MonthlyTaskScheduler) GetMonthlyTaskScheduler() *MonthlyTaskScheduler { return b } + +type BaseMonthlyTaskScheduler interface { + GetMonthlyTaskScheduler() *MonthlyTaskScheduler +} + +func init() { + t["BaseMonthlyTaskScheduler"] = reflect.TypeOf((*MonthlyTaskScheduler)(nil)).Elem() +} + +func (b *NasConfigFault) GetNasConfigFault() *NasConfigFault { return b } + +type BaseNasConfigFault interface { + GetNasConfigFault() *NasConfigFault +} + +func init() { + t["BaseNasConfigFault"] = reflect.TypeOf((*NasConfigFault)(nil)).Elem() +} + +func (b *NegatableExpression) GetNegatableExpression() *NegatableExpression { return b } + +type BaseNegatableExpression interface { + GetNegatableExpression() *NegatableExpression +} + +func init() { + t["BaseNegatableExpression"] = reflect.TypeOf((*NegatableExpression)(nil)).Elem() +} + +func (b *NetBIOSConfigInfo) GetNetBIOSConfigInfo() *NetBIOSConfigInfo { return b } + +type BaseNetBIOSConfigInfo interface { + GetNetBIOSConfigInfo() *NetBIOSConfigInfo +} + +func init() { + t["BaseNetBIOSConfigInfo"] = reflect.TypeOf((*NetBIOSConfigInfo)(nil)).Elem() +} + +func (b *NetworkSummary) GetNetworkSummary() *NetworkSummary { return b } + +type BaseNetworkSummary interface { + GetNetworkSummary() *NetworkSummary +} + +func init() { + t["BaseNetworkSummary"] = reflect.TypeOf((*NetworkSummary)(nil)).Elem() +} + +func (b *NoCompatibleHost) GetNoCompatibleHost() *NoCompatibleHost { return b } + +type BaseNoCompatibleHost interface { + GetNoCompatibleHost() *NoCompatibleHost +} + +func init() { + t["BaseNoCompatibleHost"] = reflect.TypeOf((*NoCompatibleHost)(nil)).Elem() +} + +func (b *NoPermission) GetNoPermission() *NoPermission { return b } + +type BaseNoPermission interface { + GetNoPermission() *NoPermission +} + +func init() { + t["BaseNoPermission"] = reflect.TypeOf((*NoPermission)(nil)).Elem() +} + +func (b *NodeDeploymentSpec) GetNodeDeploymentSpec() *NodeDeploymentSpec { return b } + +type BaseNodeDeploymentSpec interface { + GetNodeDeploymentSpec() *NodeDeploymentSpec +} + +func init() { + t["BaseNodeDeploymentSpec"] = reflect.TypeOf((*NodeDeploymentSpec)(nil)).Elem() +} + +func (b *NodeNetworkSpec) GetNodeNetworkSpec() *NodeNetworkSpec { return b } + +type BaseNodeNetworkSpec interface { + GetNodeNetworkSpec() *NodeNetworkSpec +} + +func init() { + t["BaseNodeNetworkSpec"] = reflect.TypeOf((*NodeNetworkSpec)(nil)).Elem() +} + +func (b *NotEnoughCpus) GetNotEnoughCpus() *NotEnoughCpus { return b } + +type BaseNotEnoughCpus interface { + GetNotEnoughCpus() *NotEnoughCpus +} + +func init() { + t["BaseNotEnoughCpus"] = reflect.TypeOf((*NotEnoughCpus)(nil)).Elem() +} + +func (b *NotEnoughLicenses) GetNotEnoughLicenses() *NotEnoughLicenses { return b } + +type BaseNotEnoughLicenses interface { + GetNotEnoughLicenses() *NotEnoughLicenses +} + +func init() { + t["BaseNotEnoughLicenses"] = reflect.TypeOf((*NotEnoughLicenses)(nil)).Elem() +} + +func (b *NotSupported) GetNotSupported() *NotSupported { return b } + +type BaseNotSupported interface { + GetNotSupported() *NotSupported +} + +func init() { + t["BaseNotSupported"] = reflect.TypeOf((*NotSupported)(nil)).Elem() +} + +func (b *NotSupportedHost) GetNotSupportedHost() *NotSupportedHost { return b } + +type BaseNotSupportedHost interface { + GetNotSupportedHost() *NotSupportedHost +} + +func init() { + t["BaseNotSupportedHost"] = reflect.TypeOf((*NotSupportedHost)(nil)).Elem() +} + +func (b *NotSupportedHostInCluster) GetNotSupportedHostInCluster() *NotSupportedHostInCluster { + return b +} + +type BaseNotSupportedHostInCluster interface { + GetNotSupportedHostInCluster() *NotSupportedHostInCluster +} + +func init() { + t["BaseNotSupportedHostInCluster"] = reflect.TypeOf((*NotSupportedHostInCluster)(nil)).Elem() +} + +func (b *OptionType) GetOptionType() *OptionType { return b } + +type BaseOptionType interface { + GetOptionType() *OptionType +} + +func init() { + t["BaseOptionType"] = reflect.TypeOf((*OptionType)(nil)).Elem() +} + +func (b *OptionValue) GetOptionValue() *OptionValue { return b } + +type BaseOptionValue interface { + GetOptionValue() *OptionValue +} + +func init() { + t["BaseOptionValue"] = reflect.TypeOf((*OptionValue)(nil)).Elem() +} + +func (b *OvfAttribute) GetOvfAttribute() *OvfAttribute { return b } + +type BaseOvfAttribute interface { + GetOvfAttribute() *OvfAttribute +} + +func init() { + t["BaseOvfAttribute"] = reflect.TypeOf((*OvfAttribute)(nil)).Elem() +} + +func (b *OvfConnectedDevice) GetOvfConnectedDevice() *OvfConnectedDevice { return b } + +type BaseOvfConnectedDevice interface { + GetOvfConnectedDevice() *OvfConnectedDevice +} + +func init() { + t["BaseOvfConnectedDevice"] = reflect.TypeOf((*OvfConnectedDevice)(nil)).Elem() +} + +func (b *OvfConstraint) GetOvfConstraint() *OvfConstraint { return b } + +type BaseOvfConstraint interface { + GetOvfConstraint() *OvfConstraint +} + +func init() { + t["BaseOvfConstraint"] = reflect.TypeOf((*OvfConstraint)(nil)).Elem() +} + +func (b *OvfConsumerCallbackFault) GetOvfConsumerCallbackFault() *OvfConsumerCallbackFault { return b } + +type BaseOvfConsumerCallbackFault interface { + GetOvfConsumerCallbackFault() *OvfConsumerCallbackFault +} + +func init() { + t["BaseOvfConsumerCallbackFault"] = reflect.TypeOf((*OvfConsumerCallbackFault)(nil)).Elem() +} + +func (b *OvfElement) GetOvfElement() *OvfElement { return b } + +type BaseOvfElement interface { + GetOvfElement() *OvfElement +} + +func init() { + t["BaseOvfElement"] = reflect.TypeOf((*OvfElement)(nil)).Elem() +} + +func (b *OvfExport) GetOvfExport() *OvfExport { return b } + +type BaseOvfExport interface { + GetOvfExport() *OvfExport +} + +func init() { + t["BaseOvfExport"] = reflect.TypeOf((*OvfExport)(nil)).Elem() +} + +func (b *OvfFault) GetOvfFault() *OvfFault { return b } + +type BaseOvfFault interface { + GetOvfFault() *OvfFault +} + +func init() { + t["BaseOvfFault"] = reflect.TypeOf((*OvfFault)(nil)).Elem() +} + +func (b *OvfHardwareExport) GetOvfHardwareExport() *OvfHardwareExport { return b } + +type BaseOvfHardwareExport interface { + GetOvfHardwareExport() *OvfHardwareExport +} + +func init() { + t["BaseOvfHardwareExport"] = reflect.TypeOf((*OvfHardwareExport)(nil)).Elem() +} + +func (b *OvfImport) GetOvfImport() *OvfImport { return b } + +type BaseOvfImport interface { + GetOvfImport() *OvfImport +} + +func init() { + t["BaseOvfImport"] = reflect.TypeOf((*OvfImport)(nil)).Elem() +} + +func (b *OvfInvalidPackage) GetOvfInvalidPackage() *OvfInvalidPackage { return b } + +type BaseOvfInvalidPackage interface { + GetOvfInvalidPackage() *OvfInvalidPackage +} + +func init() { + t["BaseOvfInvalidPackage"] = reflect.TypeOf((*OvfInvalidPackage)(nil)).Elem() +} + +func (b *OvfInvalidValue) GetOvfInvalidValue() *OvfInvalidValue { return b } + +type BaseOvfInvalidValue interface { + GetOvfInvalidValue() *OvfInvalidValue +} + +func init() { + t["BaseOvfInvalidValue"] = reflect.TypeOf((*OvfInvalidValue)(nil)).Elem() +} + +func (b *OvfManagerCommonParams) GetOvfManagerCommonParams() *OvfManagerCommonParams { return b } + +type BaseOvfManagerCommonParams interface { + GetOvfManagerCommonParams() *OvfManagerCommonParams +} + +func init() { + t["BaseOvfManagerCommonParams"] = reflect.TypeOf((*OvfManagerCommonParams)(nil)).Elem() +} + +func (b *OvfMissingElement) GetOvfMissingElement() *OvfMissingElement { return b } + +type BaseOvfMissingElement interface { + GetOvfMissingElement() *OvfMissingElement +} + +func init() { + t["BaseOvfMissingElement"] = reflect.TypeOf((*OvfMissingElement)(nil)).Elem() +} + +func (b *OvfProperty) GetOvfProperty() *OvfProperty { return b } + +type BaseOvfProperty interface { + GetOvfProperty() *OvfProperty +} + +func init() { + t["BaseOvfProperty"] = reflect.TypeOf((*OvfProperty)(nil)).Elem() +} + +func (b *OvfSystemFault) GetOvfSystemFault() *OvfSystemFault { return b } + +type BaseOvfSystemFault interface { + GetOvfSystemFault() *OvfSystemFault +} + +func init() { + t["BaseOvfSystemFault"] = reflect.TypeOf((*OvfSystemFault)(nil)).Elem() +} + +func (b *OvfUnsupportedAttribute) GetOvfUnsupportedAttribute() *OvfUnsupportedAttribute { return b } + +type BaseOvfUnsupportedAttribute interface { + GetOvfUnsupportedAttribute() *OvfUnsupportedAttribute +} + +func init() { + t["BaseOvfUnsupportedAttribute"] = reflect.TypeOf((*OvfUnsupportedAttribute)(nil)).Elem() +} + +func (b *OvfUnsupportedElement) GetOvfUnsupportedElement() *OvfUnsupportedElement { return b } + +type BaseOvfUnsupportedElement interface { + GetOvfUnsupportedElement() *OvfUnsupportedElement +} + +func init() { + t["BaseOvfUnsupportedElement"] = reflect.TypeOf((*OvfUnsupportedElement)(nil)).Elem() +} + +func (b *OvfUnsupportedPackage) GetOvfUnsupportedPackage() *OvfUnsupportedPackage { return b } + +type BaseOvfUnsupportedPackage interface { + GetOvfUnsupportedPackage() *OvfUnsupportedPackage +} + +func init() { + t["BaseOvfUnsupportedPackage"] = reflect.TypeOf((*OvfUnsupportedPackage)(nil)).Elem() +} + +func (b *PatchMetadataInvalid) GetPatchMetadataInvalid() *PatchMetadataInvalid { return b } + +type BasePatchMetadataInvalid interface { + GetPatchMetadataInvalid() *PatchMetadataInvalid +} + +func init() { + t["BasePatchMetadataInvalid"] = reflect.TypeOf((*PatchMetadataInvalid)(nil)).Elem() +} + +func (b *PatchNotApplicable) GetPatchNotApplicable() *PatchNotApplicable { return b } + +type BasePatchNotApplicable interface { + GetPatchNotApplicable() *PatchNotApplicable +} + +func init() { + t["BasePatchNotApplicable"] = reflect.TypeOf((*PatchNotApplicable)(nil)).Elem() +} + +func (b *PerfEntityMetricBase) GetPerfEntityMetricBase() *PerfEntityMetricBase { return b } + +type BasePerfEntityMetricBase interface { + GetPerfEntityMetricBase() *PerfEntityMetricBase +} + +func init() { + t["BasePerfEntityMetricBase"] = reflect.TypeOf((*PerfEntityMetricBase)(nil)).Elem() +} + +func (b *PerfMetricSeries) GetPerfMetricSeries() *PerfMetricSeries { return b } + +type BasePerfMetricSeries interface { + GetPerfMetricSeries() *PerfMetricSeries +} + +func init() { + t["BasePerfMetricSeries"] = reflect.TypeOf((*PerfMetricSeries)(nil)).Elem() +} + +func (b *PermissionEvent) GetPermissionEvent() *PermissionEvent { return b } + +type BasePermissionEvent interface { + GetPermissionEvent() *PermissionEvent +} + +func init() { + t["BasePermissionEvent"] = reflect.TypeOf((*PermissionEvent)(nil)).Elem() +} + +func (b *PhysicalNicHint) GetPhysicalNicHint() *PhysicalNicHint { return b } + +type BasePhysicalNicHint interface { + GetPhysicalNicHint() *PhysicalNicHint +} + +func init() { + t["BasePhysicalNicHint"] = reflect.TypeOf((*PhysicalNicHint)(nil)).Elem() +} + +func (b *PlatformConfigFault) GetPlatformConfigFault() *PlatformConfigFault { return b } + +type BasePlatformConfigFault interface { + GetPlatformConfigFault() *PlatformConfigFault +} + +func init() { + t["BasePlatformConfigFault"] = reflect.TypeOf((*PlatformConfigFault)(nil)).Elem() +} + +func (b *PolicyOption) GetPolicyOption() *PolicyOption { return b } + +type BasePolicyOption interface { + GetPolicyOption() *PolicyOption +} + +func init() { + t["BasePolicyOption"] = reflect.TypeOf((*PolicyOption)(nil)).Elem() +} + +func (b *PortGroupProfile) GetPortGroupProfile() *PortGroupProfile { return b } + +type BasePortGroupProfile interface { + GetPortGroupProfile() *PortGroupProfile +} + +func init() { + t["BasePortGroupProfile"] = reflect.TypeOf((*PortGroupProfile)(nil)).Elem() +} + +func (b *ProfileConfigInfo) GetProfileConfigInfo() *ProfileConfigInfo { return b } + +type BaseProfileConfigInfo interface { + GetProfileConfigInfo() *ProfileConfigInfo +} + +func init() { + t["BaseProfileConfigInfo"] = reflect.TypeOf((*ProfileConfigInfo)(nil)).Elem() +} + +func (b *ProfileCreateSpec) GetProfileCreateSpec() *ProfileCreateSpec { return b } + +type BaseProfileCreateSpec interface { + GetProfileCreateSpec() *ProfileCreateSpec +} + +func init() { + t["BaseProfileCreateSpec"] = reflect.TypeOf((*ProfileCreateSpec)(nil)).Elem() +} + +func (b *ProfileEvent) GetProfileEvent() *ProfileEvent { return b } + +type BaseProfileEvent interface { + GetProfileEvent() *ProfileEvent +} + +func init() { + t["BaseProfileEvent"] = reflect.TypeOf((*ProfileEvent)(nil)).Elem() +} + +func (b *ProfileExecuteResult) GetProfileExecuteResult() *ProfileExecuteResult { return b } + +type BaseProfileExecuteResult interface { + GetProfileExecuteResult() *ProfileExecuteResult +} + +func init() { + t["BaseProfileExecuteResult"] = reflect.TypeOf((*ProfileExecuteResult)(nil)).Elem() +} + +func (b *ProfileExpression) GetProfileExpression() *ProfileExpression { return b } + +type BaseProfileExpression interface { + GetProfileExpression() *ProfileExpression +} + +func init() { + t["BaseProfileExpression"] = reflect.TypeOf((*ProfileExpression)(nil)).Elem() +} + +func (b *ProfilePolicyOptionMetadata) GetProfilePolicyOptionMetadata() *ProfilePolicyOptionMetadata { + return b +} + +type BaseProfilePolicyOptionMetadata interface { + GetProfilePolicyOptionMetadata() *ProfilePolicyOptionMetadata +} + +func init() { + t["BaseProfilePolicyOptionMetadata"] = reflect.TypeOf((*ProfilePolicyOptionMetadata)(nil)).Elem() +} + +func (b *ProfileSerializedCreateSpec) GetProfileSerializedCreateSpec() *ProfileSerializedCreateSpec { + return b +} + +type BaseProfileSerializedCreateSpec interface { + GetProfileSerializedCreateSpec() *ProfileSerializedCreateSpec +} + +func init() { + t["BaseProfileSerializedCreateSpec"] = reflect.TypeOf((*ProfileSerializedCreateSpec)(nil)).Elem() +} + +func (b *RDMNotSupported) GetRDMNotSupported() *RDMNotSupported { return b } + +type BaseRDMNotSupported interface { + GetRDMNotSupported() *RDMNotSupported +} + +func init() { + t["BaseRDMNotSupported"] = reflect.TypeOf((*RDMNotSupported)(nil)).Elem() +} + +func (b *RecurrentTaskScheduler) GetRecurrentTaskScheduler() *RecurrentTaskScheduler { return b } + +type BaseRecurrentTaskScheduler interface { + GetRecurrentTaskScheduler() *RecurrentTaskScheduler +} + +func init() { + t["BaseRecurrentTaskScheduler"] = reflect.TypeOf((*RecurrentTaskScheduler)(nil)).Elem() +} + +func (b *ReplicationConfigFault) GetReplicationConfigFault() *ReplicationConfigFault { return b } + +type BaseReplicationConfigFault interface { + GetReplicationConfigFault() *ReplicationConfigFault +} + +func init() { + t["BaseReplicationConfigFault"] = reflect.TypeOf((*ReplicationConfigFault)(nil)).Elem() +} + +func (b *ReplicationFault) GetReplicationFault() *ReplicationFault { return b } + +type BaseReplicationFault interface { + GetReplicationFault() *ReplicationFault +} + +func init() { + t["BaseReplicationFault"] = reflect.TypeOf((*ReplicationFault)(nil)).Elem() +} + +func (b *ReplicationVmFault) GetReplicationVmFault() *ReplicationVmFault { return b } + +type BaseReplicationVmFault interface { + GetReplicationVmFault() *ReplicationVmFault +} + +func init() { + t["BaseReplicationVmFault"] = reflect.TypeOf((*ReplicationVmFault)(nil)).Elem() +} + +func (b *ResourceInUse) GetResourceInUse() *ResourceInUse { return b } + +type BaseResourceInUse interface { + GetResourceInUse() *ResourceInUse +} + +func init() { + t["BaseResourceInUse"] = reflect.TypeOf((*ResourceInUse)(nil)).Elem() +} + +func (b *ResourcePoolEvent) GetResourcePoolEvent() *ResourcePoolEvent { return b } + +type BaseResourcePoolEvent interface { + GetResourcePoolEvent() *ResourcePoolEvent +} + +func init() { + t["BaseResourcePoolEvent"] = reflect.TypeOf((*ResourcePoolEvent)(nil)).Elem() +} + +func (b *ResourcePoolSummary) GetResourcePoolSummary() *ResourcePoolSummary { return b } + +type BaseResourcePoolSummary interface { + GetResourcePoolSummary() *ResourcePoolSummary +} + +func init() { + t["BaseResourcePoolSummary"] = reflect.TypeOf((*ResourcePoolSummary)(nil)).Elem() +} + +func (b *RoleEvent) GetRoleEvent() *RoleEvent { return b } + +type BaseRoleEvent interface { + GetRoleEvent() *RoleEvent +} + +func init() { + t["BaseRoleEvent"] = reflect.TypeOf((*RoleEvent)(nil)).Elem() +} + +func (b *RuntimeFault) GetRuntimeFault() *RuntimeFault { return b } + +type BaseRuntimeFault interface { + GetRuntimeFault() *RuntimeFault +} + +func init() { + t["BaseRuntimeFault"] = reflect.TypeOf((*RuntimeFault)(nil)).Elem() +} + +func (b *ScheduledTaskEvent) GetScheduledTaskEvent() *ScheduledTaskEvent { return b } + +type BaseScheduledTaskEvent interface { + GetScheduledTaskEvent() *ScheduledTaskEvent +} + +func init() { + t["BaseScheduledTaskEvent"] = reflect.TypeOf((*ScheduledTaskEvent)(nil)).Elem() +} + +func (b *ScheduledTaskSpec) GetScheduledTaskSpec() *ScheduledTaskSpec { return b } + +type BaseScheduledTaskSpec interface { + GetScheduledTaskSpec() *ScheduledTaskSpec +} + +func init() { + t["BaseScheduledTaskSpec"] = reflect.TypeOf((*ScheduledTaskSpec)(nil)).Elem() +} + +func (b *ScsiLun) GetScsiLun() *ScsiLun { return b } + +type BaseScsiLun interface { + GetScsiLun() *ScsiLun +} + +func init() { + t["BaseScsiLun"] = reflect.TypeOf((*ScsiLun)(nil)).Elem() +} + +func (b *SecurityError) GetSecurityError() *SecurityError { return b } + +type BaseSecurityError interface { + GetSecurityError() *SecurityError +} + +func init() { + t["BaseSecurityError"] = reflect.TypeOf((*SecurityError)(nil)).Elem() +} + +func (b *SelectionSet) GetSelectionSet() *SelectionSet { return b } + +type BaseSelectionSet interface { + GetSelectionSet() *SelectionSet +} + +func init() { + t["BaseSelectionSet"] = reflect.TypeOf((*SelectionSet)(nil)).Elem() +} + +func (b *SelectionSpec) GetSelectionSpec() *SelectionSpec { return b } + +type BaseSelectionSpec interface { + GetSelectionSpec() *SelectionSpec +} + +func init() { + t["BaseSelectionSpec"] = reflect.TypeOf((*SelectionSpec)(nil)).Elem() +} + +func (b *ServiceLocatorCredential) GetServiceLocatorCredential() *ServiceLocatorCredential { return b } + +type BaseServiceLocatorCredential interface { + GetServiceLocatorCredential() *ServiceLocatorCredential +} + +func init() { + t["BaseServiceLocatorCredential"] = reflect.TypeOf((*ServiceLocatorCredential)(nil)).Elem() +} + +func (b *SessionEvent) GetSessionEvent() *SessionEvent { return b } + +type BaseSessionEvent interface { + GetSessionEvent() *SessionEvent +} + +func init() { + t["BaseSessionEvent"] = reflect.TypeOf((*SessionEvent)(nil)).Elem() +} + +func (b *SessionManagerServiceRequestSpec) GetSessionManagerServiceRequestSpec() *SessionManagerServiceRequestSpec { + return b +} + +type BaseSessionManagerServiceRequestSpec interface { + GetSessionManagerServiceRequestSpec() *SessionManagerServiceRequestSpec +} + +func init() { + t["BaseSessionManagerServiceRequestSpec"] = reflect.TypeOf((*SessionManagerServiceRequestSpec)(nil)).Elem() +} + +func (b *SnapshotCopyNotSupported) GetSnapshotCopyNotSupported() *SnapshotCopyNotSupported { return b } + +type BaseSnapshotCopyNotSupported interface { + GetSnapshotCopyNotSupported() *SnapshotCopyNotSupported +} + +func init() { + t["BaseSnapshotCopyNotSupported"] = reflect.TypeOf((*SnapshotCopyNotSupported)(nil)).Elem() +} + +func (b *SnapshotFault) GetSnapshotFault() *SnapshotFault { return b } + +type BaseSnapshotFault interface { + GetSnapshotFault() *SnapshotFault +} + +func init() { + t["BaseSnapshotFault"] = reflect.TypeOf((*SnapshotFault)(nil)).Elem() +} + +func (b *TaskEvent) GetTaskEvent() *TaskEvent { return b } + +type BaseTaskEvent interface { + GetTaskEvent() *TaskEvent +} + +func init() { + t["BaseTaskEvent"] = reflect.TypeOf((*TaskEvent)(nil)).Elem() +} + +func (b *TaskInProgress) GetTaskInProgress() *TaskInProgress { return b } + +type BaseTaskInProgress interface { + GetTaskInProgress() *TaskInProgress +} + +func init() { + t["BaseTaskInProgress"] = reflect.TypeOf((*TaskInProgress)(nil)).Elem() +} + +func (b *TaskReason) GetTaskReason() *TaskReason { return b } + +type BaseTaskReason interface { + GetTaskReason() *TaskReason +} + +func init() { + t["BaseTaskReason"] = reflect.TypeOf((*TaskReason)(nil)).Elem() +} + +func (b *TaskScheduler) GetTaskScheduler() *TaskScheduler { return b } + +type BaseTaskScheduler interface { + GetTaskScheduler() *TaskScheduler +} + +func init() { + t["BaseTaskScheduler"] = reflect.TypeOf((*TaskScheduler)(nil)).Elem() +} + +func (b *TemplateUpgradeEvent) GetTemplateUpgradeEvent() *TemplateUpgradeEvent { return b } + +type BaseTemplateUpgradeEvent interface { + GetTemplateUpgradeEvent() *TemplateUpgradeEvent +} + +func init() { + t["BaseTemplateUpgradeEvent"] = reflect.TypeOf((*TemplateUpgradeEvent)(nil)).Elem() +} + +func (b *Timedout) GetTimedout() *Timedout { return b } + +type BaseTimedout interface { + GetTimedout() *Timedout +} + +func init() { + t["BaseTimedout"] = reflect.TypeOf((*Timedout)(nil)).Elem() +} + +func (b *TypeDescription) GetTypeDescription() *TypeDescription { return b } + +type BaseTypeDescription interface { + GetTypeDescription() *TypeDescription +} + +func init() { + t["BaseTypeDescription"] = reflect.TypeOf((*TypeDescription)(nil)).Elem() +} + +func (b *UnsupportedDatastore) GetUnsupportedDatastore() *UnsupportedDatastore { return b } + +type BaseUnsupportedDatastore interface { + GetUnsupportedDatastore() *UnsupportedDatastore +} + +func init() { + t["BaseUnsupportedDatastore"] = reflect.TypeOf((*UnsupportedDatastore)(nil)).Elem() +} + +func (b *UpgradeEvent) GetUpgradeEvent() *UpgradeEvent { return b } + +type BaseUpgradeEvent interface { + GetUpgradeEvent() *UpgradeEvent +} + +func init() { + t["BaseUpgradeEvent"] = reflect.TypeOf((*UpgradeEvent)(nil)).Elem() +} + +func (b *UserSearchResult) GetUserSearchResult() *UserSearchResult { return b } + +type BaseUserSearchResult interface { + GetUserSearchResult() *UserSearchResult +} + +func init() { + t["BaseUserSearchResult"] = reflect.TypeOf((*UserSearchResult)(nil)).Elem() +} + +func (b *VAppConfigFault) GetVAppConfigFault() *VAppConfigFault { return b } + +type BaseVAppConfigFault interface { + GetVAppConfigFault() *VAppConfigFault +} + +func init() { + t["BaseVAppConfigFault"] = reflect.TypeOf((*VAppConfigFault)(nil)).Elem() +} + +func (b *VAppPropertyFault) GetVAppPropertyFault() *VAppPropertyFault { return b } + +type BaseVAppPropertyFault interface { + GetVAppPropertyFault() *VAppPropertyFault +} + +func init() { + t["BaseVAppPropertyFault"] = reflect.TypeOf((*VAppPropertyFault)(nil)).Elem() +} + +func (b *VMotionInterfaceIssue) GetVMotionInterfaceIssue() *VMotionInterfaceIssue { return b } + +type BaseVMotionInterfaceIssue interface { + GetVMotionInterfaceIssue() *VMotionInterfaceIssue +} + +func init() { + t["BaseVMotionInterfaceIssue"] = reflect.TypeOf((*VMotionInterfaceIssue)(nil)).Elem() +} + +func (b *VMwareDVSHealthCheckConfig) GetVMwareDVSHealthCheckConfig() *VMwareDVSHealthCheckConfig { + return b +} + +type BaseVMwareDVSHealthCheckConfig interface { + GetVMwareDVSHealthCheckConfig() *VMwareDVSHealthCheckConfig +} + +func init() { + t["BaseVMwareDVSHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSHealthCheckConfig)(nil)).Elem() +} + +func (b *VimFault) GetVimFault() *VimFault { return b } + +type BaseVimFault interface { + GetVimFault() *VimFault +} + +func init() { + t["BaseVimFault"] = reflect.TypeOf((*VimFault)(nil)).Elem() +} + +func (b *VirtualController) GetVirtualController() *VirtualController { return b } + +type BaseVirtualController interface { + GetVirtualController() *VirtualController +} + +func init() { + t["BaseVirtualController"] = reflect.TypeOf((*VirtualController)(nil)).Elem() +} + +func (b *VirtualControllerOption) GetVirtualControllerOption() *VirtualControllerOption { return b } + +type BaseVirtualControllerOption interface { + GetVirtualControllerOption() *VirtualControllerOption +} + +func init() { + t["BaseVirtualControllerOption"] = reflect.TypeOf((*VirtualControllerOption)(nil)).Elem() +} + +func (b *VirtualDevice) GetVirtualDevice() *VirtualDevice { return b } + +type BaseVirtualDevice interface { + GetVirtualDevice() *VirtualDevice +} + +func init() { + t["BaseVirtualDevice"] = reflect.TypeOf((*VirtualDevice)(nil)).Elem() +} + +func (b *VirtualDeviceBackingInfo) GetVirtualDeviceBackingInfo() *VirtualDeviceBackingInfo { return b } + +type BaseVirtualDeviceBackingInfo interface { + GetVirtualDeviceBackingInfo() *VirtualDeviceBackingInfo +} + +func init() { + t["BaseVirtualDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceBackingOption) GetVirtualDeviceBackingOption() *VirtualDeviceBackingOption { + return b +} + +type BaseVirtualDeviceBackingOption interface { + GetVirtualDeviceBackingOption() *VirtualDeviceBackingOption +} + +func init() { + t["BaseVirtualDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceBusSlotInfo) GetVirtualDeviceBusSlotInfo() *VirtualDeviceBusSlotInfo { return b } + +type BaseVirtualDeviceBusSlotInfo interface { + GetVirtualDeviceBusSlotInfo() *VirtualDeviceBusSlotInfo +} + +func init() { + t["BaseVirtualDeviceBusSlotInfo"] = reflect.TypeOf((*VirtualDeviceBusSlotInfo)(nil)).Elem() +} + +func (b *VirtualDeviceConfigSpec) GetVirtualDeviceConfigSpec() *VirtualDeviceConfigSpec { return b } + +type BaseVirtualDeviceConfigSpec interface { + GetVirtualDeviceConfigSpec() *VirtualDeviceConfigSpec +} + +func init() { + t["BaseVirtualDeviceConfigSpec"] = reflect.TypeOf((*VirtualDeviceConfigSpec)(nil)).Elem() +} + +func (b *VirtualDeviceDeviceBackingInfo) GetVirtualDeviceDeviceBackingInfo() *VirtualDeviceDeviceBackingInfo { + return b +} + +type BaseVirtualDeviceDeviceBackingInfo interface { + GetVirtualDeviceDeviceBackingInfo() *VirtualDeviceDeviceBackingInfo +} + +func init() { + t["BaseVirtualDeviceDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceDeviceBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceDeviceBackingOption) GetVirtualDeviceDeviceBackingOption() *VirtualDeviceDeviceBackingOption { + return b +} + +type BaseVirtualDeviceDeviceBackingOption interface { + GetVirtualDeviceDeviceBackingOption() *VirtualDeviceDeviceBackingOption +} + +func init() { + t["BaseVirtualDeviceDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceDeviceBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceFileBackingInfo) GetVirtualDeviceFileBackingInfo() *VirtualDeviceFileBackingInfo { + return b +} + +type BaseVirtualDeviceFileBackingInfo interface { + GetVirtualDeviceFileBackingInfo() *VirtualDeviceFileBackingInfo +} + +func init() { + t["BaseVirtualDeviceFileBackingInfo"] = reflect.TypeOf((*VirtualDeviceFileBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceFileBackingOption) GetVirtualDeviceFileBackingOption() *VirtualDeviceFileBackingOption { + return b +} + +type BaseVirtualDeviceFileBackingOption interface { + GetVirtualDeviceFileBackingOption() *VirtualDeviceFileBackingOption +} + +func init() { + t["BaseVirtualDeviceFileBackingOption"] = reflect.TypeOf((*VirtualDeviceFileBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceOption) GetVirtualDeviceOption() *VirtualDeviceOption { return b } + +type BaseVirtualDeviceOption interface { + GetVirtualDeviceOption() *VirtualDeviceOption +} + +func init() { + t["BaseVirtualDeviceOption"] = reflect.TypeOf((*VirtualDeviceOption)(nil)).Elem() +} + +func (b *VirtualDevicePciBusSlotInfo) GetVirtualDevicePciBusSlotInfo() *VirtualDevicePciBusSlotInfo { + return b +} + +type BaseVirtualDevicePciBusSlotInfo interface { + GetVirtualDevicePciBusSlotInfo() *VirtualDevicePciBusSlotInfo +} + +func init() { + t["BaseVirtualDevicePciBusSlotInfo"] = reflect.TypeOf((*VirtualDevicePciBusSlotInfo)(nil)).Elem() +} + +func (b *VirtualDevicePipeBackingInfo) GetVirtualDevicePipeBackingInfo() *VirtualDevicePipeBackingInfo { + return b +} + +type BaseVirtualDevicePipeBackingInfo interface { + GetVirtualDevicePipeBackingInfo() *VirtualDevicePipeBackingInfo +} + +func init() { + t["BaseVirtualDevicePipeBackingInfo"] = reflect.TypeOf((*VirtualDevicePipeBackingInfo)(nil)).Elem() +} + +func (b *VirtualDevicePipeBackingOption) GetVirtualDevicePipeBackingOption() *VirtualDevicePipeBackingOption { + return b +} + +type BaseVirtualDevicePipeBackingOption interface { + GetVirtualDevicePipeBackingOption() *VirtualDevicePipeBackingOption +} + +func init() { + t["BaseVirtualDevicePipeBackingOption"] = reflect.TypeOf((*VirtualDevicePipeBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceRemoteDeviceBackingInfo) GetVirtualDeviceRemoteDeviceBackingInfo() *VirtualDeviceRemoteDeviceBackingInfo { + return b +} + +type BaseVirtualDeviceRemoteDeviceBackingInfo interface { + GetVirtualDeviceRemoteDeviceBackingInfo() *VirtualDeviceRemoteDeviceBackingInfo +} + +func init() { + t["BaseVirtualDeviceRemoteDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceRemoteDeviceBackingOption) GetVirtualDeviceRemoteDeviceBackingOption() *VirtualDeviceRemoteDeviceBackingOption { + return b +} + +type BaseVirtualDeviceRemoteDeviceBackingOption interface { + GetVirtualDeviceRemoteDeviceBackingOption() *VirtualDeviceRemoteDeviceBackingOption +} + +func init() { + t["BaseVirtualDeviceRemoteDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingOption)(nil)).Elem() +} + +func (b *VirtualDeviceURIBackingInfo) GetVirtualDeviceURIBackingInfo() *VirtualDeviceURIBackingInfo { + return b +} + +type BaseVirtualDeviceURIBackingInfo interface { + GetVirtualDeviceURIBackingInfo() *VirtualDeviceURIBackingInfo +} + +func init() { + t["BaseVirtualDeviceURIBackingInfo"] = reflect.TypeOf((*VirtualDeviceURIBackingInfo)(nil)).Elem() +} + +func (b *VirtualDeviceURIBackingOption) GetVirtualDeviceURIBackingOption() *VirtualDeviceURIBackingOption { + return b +} + +type BaseVirtualDeviceURIBackingOption interface { + GetVirtualDeviceURIBackingOption() *VirtualDeviceURIBackingOption +} + +func init() { + t["BaseVirtualDeviceURIBackingOption"] = reflect.TypeOf((*VirtualDeviceURIBackingOption)(nil)).Elem() +} + +func (b *VirtualDiskRawDiskVer2BackingInfo) GetVirtualDiskRawDiskVer2BackingInfo() *VirtualDiskRawDiskVer2BackingInfo { + return b +} + +type BaseVirtualDiskRawDiskVer2BackingInfo interface { + GetVirtualDiskRawDiskVer2BackingInfo() *VirtualDiskRawDiskVer2BackingInfo +} + +func init() { + t["BaseVirtualDiskRawDiskVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingInfo)(nil)).Elem() +} + +func (b *VirtualDiskRawDiskVer2BackingOption) GetVirtualDiskRawDiskVer2BackingOption() *VirtualDiskRawDiskVer2BackingOption { + return b +} + +type BaseVirtualDiskRawDiskVer2BackingOption interface { + GetVirtualDiskRawDiskVer2BackingOption() *VirtualDiskRawDiskVer2BackingOption +} + +func init() { + t["BaseVirtualDiskRawDiskVer2BackingOption"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingOption)(nil)).Elem() +} + +func (b *VirtualDiskSpec) GetVirtualDiskSpec() *VirtualDiskSpec { return b } + +type BaseVirtualDiskSpec interface { + GetVirtualDiskSpec() *VirtualDiskSpec +} + +func init() { + t["BaseVirtualDiskSpec"] = reflect.TypeOf((*VirtualDiskSpec)(nil)).Elem() +} + +func (b *VirtualEthernetCard) GetVirtualEthernetCard() *VirtualEthernetCard { return b } + +type BaseVirtualEthernetCard interface { + GetVirtualEthernetCard() *VirtualEthernetCard +} + +func init() { + t["BaseVirtualEthernetCard"] = reflect.TypeOf((*VirtualEthernetCard)(nil)).Elem() +} + +func (b *VirtualEthernetCardOption) GetVirtualEthernetCardOption() *VirtualEthernetCardOption { + return b +} + +type BaseVirtualEthernetCardOption interface { + GetVirtualEthernetCardOption() *VirtualEthernetCardOption +} + +func init() { + t["BaseVirtualEthernetCardOption"] = reflect.TypeOf((*VirtualEthernetCardOption)(nil)).Elem() +} + +func (b *VirtualHardwareCompatibilityIssue) GetVirtualHardwareCompatibilityIssue() *VirtualHardwareCompatibilityIssue { + return b +} + +type BaseVirtualHardwareCompatibilityIssue interface { + GetVirtualHardwareCompatibilityIssue() *VirtualHardwareCompatibilityIssue +} + +func init() { + t["BaseVirtualHardwareCompatibilityIssue"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssue)(nil)).Elem() +} + +func (b *VirtualMachineBootOptionsBootableDevice) GetVirtualMachineBootOptionsBootableDevice() *VirtualMachineBootOptionsBootableDevice { + return b +} + +type BaseVirtualMachineBootOptionsBootableDevice interface { + GetVirtualMachineBootOptionsBootableDevice() *VirtualMachineBootOptionsBootableDevice +} + +func init() { + t["BaseVirtualMachineBootOptionsBootableDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDevice)(nil)).Elem() +} + +func (b *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState) GetVirtualMachineDeviceRuntimeInfoDeviceRuntimeState() *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState { + return b +} + +type BaseVirtualMachineDeviceRuntimeInfoDeviceRuntimeState interface { + GetVirtualMachineDeviceRuntimeInfoDeviceRuntimeState() *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState +} + +func init() { + t["BaseVirtualMachineDeviceRuntimeInfoDeviceRuntimeState"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoDeviceRuntimeState)(nil)).Elem() +} + +func (b *VirtualMachineDiskDeviceInfo) GetVirtualMachineDiskDeviceInfo() *VirtualMachineDiskDeviceInfo { + return b +} + +type BaseVirtualMachineDiskDeviceInfo interface { + GetVirtualMachineDiskDeviceInfo() *VirtualMachineDiskDeviceInfo +} + +func init() { + t["BaseVirtualMachineDiskDeviceInfo"] = reflect.TypeOf((*VirtualMachineDiskDeviceInfo)(nil)).Elem() +} + +func (b *VirtualMachineGuestQuiesceSpec) GetVirtualMachineGuestQuiesceSpec() *VirtualMachineGuestQuiesceSpec { + return b +} + +type BaseVirtualMachineGuestQuiesceSpec interface { + GetVirtualMachineGuestQuiesceSpec() *VirtualMachineGuestQuiesceSpec +} + +func init() { + t["BaseVirtualMachineGuestQuiesceSpec"] = reflect.TypeOf((*VirtualMachineGuestQuiesceSpec)(nil)).Elem() +} + +func (b *VirtualMachinePciPassthroughInfo) GetVirtualMachinePciPassthroughInfo() *VirtualMachinePciPassthroughInfo { + return b +} + +type BaseVirtualMachinePciPassthroughInfo interface { + GetVirtualMachinePciPassthroughInfo() *VirtualMachinePciPassthroughInfo +} + +func init() { + t["BaseVirtualMachinePciPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciPassthroughInfo)(nil)).Elem() +} + +func (b *VirtualMachineProfileSpec) GetVirtualMachineProfileSpec() *VirtualMachineProfileSpec { + return b +} + +type BaseVirtualMachineProfileSpec interface { + GetVirtualMachineProfileSpec() *VirtualMachineProfileSpec +} + +func init() { + t["BaseVirtualMachineProfileSpec"] = reflect.TypeOf((*VirtualMachineProfileSpec)(nil)).Elem() +} + +func (b *VirtualMachineSriovDevicePoolInfo) GetVirtualMachineSriovDevicePoolInfo() *VirtualMachineSriovDevicePoolInfo { + return b +} + +type BaseVirtualMachineSriovDevicePoolInfo interface { + GetVirtualMachineSriovDevicePoolInfo() *VirtualMachineSriovDevicePoolInfo +} + +func init() { + t["BaseVirtualMachineSriovDevicePoolInfo"] = reflect.TypeOf((*VirtualMachineSriovDevicePoolInfo)(nil)).Elem() +} + +func (b *VirtualMachineTargetInfo) GetVirtualMachineTargetInfo() *VirtualMachineTargetInfo { return b } + +type BaseVirtualMachineTargetInfo interface { + GetVirtualMachineTargetInfo() *VirtualMachineTargetInfo +} + +func init() { + t["BaseVirtualMachineTargetInfo"] = reflect.TypeOf((*VirtualMachineTargetInfo)(nil)).Elem() +} + +func (b *VirtualPCIPassthroughPluginBackingInfo) GetVirtualPCIPassthroughPluginBackingInfo() *VirtualPCIPassthroughPluginBackingInfo { + return b +} + +type BaseVirtualPCIPassthroughPluginBackingInfo interface { + GetVirtualPCIPassthroughPluginBackingInfo() *VirtualPCIPassthroughPluginBackingInfo +} + +func init() { + t["BaseVirtualPCIPassthroughPluginBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingInfo)(nil)).Elem() +} + +func (b *VirtualPCIPassthroughPluginBackingOption) GetVirtualPCIPassthroughPluginBackingOption() *VirtualPCIPassthroughPluginBackingOption { + return b +} + +type BaseVirtualPCIPassthroughPluginBackingOption interface { + GetVirtualPCIPassthroughPluginBackingOption() *VirtualPCIPassthroughPluginBackingOption +} + +func init() { + t["BaseVirtualPCIPassthroughPluginBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingOption)(nil)).Elem() +} + +func (b *VirtualSATAController) GetVirtualSATAController() *VirtualSATAController { return b } + +type BaseVirtualSATAController interface { + GetVirtualSATAController() *VirtualSATAController +} + +func init() { + t["BaseVirtualSATAController"] = reflect.TypeOf((*VirtualSATAController)(nil)).Elem() +} + +func (b *VirtualSATAControllerOption) GetVirtualSATAControllerOption() *VirtualSATAControllerOption { + return b +} + +type BaseVirtualSATAControllerOption interface { + GetVirtualSATAControllerOption() *VirtualSATAControllerOption +} + +func init() { + t["BaseVirtualSATAControllerOption"] = reflect.TypeOf((*VirtualSATAControllerOption)(nil)).Elem() +} + +func (b *VirtualSCSIController) GetVirtualSCSIController() *VirtualSCSIController { return b } + +type BaseVirtualSCSIController interface { + GetVirtualSCSIController() *VirtualSCSIController +} + +func init() { + t["BaseVirtualSCSIController"] = reflect.TypeOf((*VirtualSCSIController)(nil)).Elem() +} + +func (b *VirtualSCSIControllerOption) GetVirtualSCSIControllerOption() *VirtualSCSIControllerOption { + return b +} + +type BaseVirtualSCSIControllerOption interface { + GetVirtualSCSIControllerOption() *VirtualSCSIControllerOption +} + +func init() { + t["BaseVirtualSCSIControllerOption"] = reflect.TypeOf((*VirtualSCSIControllerOption)(nil)).Elem() +} + +func (b *VirtualSoundCard) GetVirtualSoundCard() *VirtualSoundCard { return b } + +type BaseVirtualSoundCard interface { + GetVirtualSoundCard() *VirtualSoundCard +} + +func init() { + t["BaseVirtualSoundCard"] = reflect.TypeOf((*VirtualSoundCard)(nil)).Elem() +} + +func (b *VirtualSoundCardOption) GetVirtualSoundCardOption() *VirtualSoundCardOption { return b } + +type BaseVirtualSoundCardOption interface { + GetVirtualSoundCardOption() *VirtualSoundCardOption +} + +func init() { + t["BaseVirtualSoundCardOption"] = reflect.TypeOf((*VirtualSoundCardOption)(nil)).Elem() +} + +func (b *VirtualVmxnet) GetVirtualVmxnet() *VirtualVmxnet { return b } + +type BaseVirtualVmxnet interface { + GetVirtualVmxnet() *VirtualVmxnet +} + +func init() { + t["BaseVirtualVmxnet"] = reflect.TypeOf((*VirtualVmxnet)(nil)).Elem() +} + +func (b *VirtualVmxnet3) GetVirtualVmxnet3() *VirtualVmxnet3 { return b } + +type BaseVirtualVmxnet3 interface { + GetVirtualVmxnet3() *VirtualVmxnet3 +} + +func init() { + t["BaseVirtualVmxnet3"] = reflect.TypeOf((*VirtualVmxnet3)(nil)).Elem() +} + +func (b *VirtualVmxnet3Option) GetVirtualVmxnet3Option() *VirtualVmxnet3Option { return b } + +type BaseVirtualVmxnet3Option interface { + GetVirtualVmxnet3Option() *VirtualVmxnet3Option +} + +func init() { + t["BaseVirtualVmxnet3Option"] = reflect.TypeOf((*VirtualVmxnet3Option)(nil)).Elem() +} + +func (b *VirtualVmxnetOption) GetVirtualVmxnetOption() *VirtualVmxnetOption { return b } + +type BaseVirtualVmxnetOption interface { + GetVirtualVmxnetOption() *VirtualVmxnetOption +} + +func init() { + t["BaseVirtualVmxnetOption"] = reflect.TypeOf((*VirtualVmxnetOption)(nil)).Elem() +} + +func (b *VmCloneEvent) GetVmCloneEvent() *VmCloneEvent { return b } + +type BaseVmCloneEvent interface { + GetVmCloneEvent() *VmCloneEvent +} + +func init() { + t["BaseVmCloneEvent"] = reflect.TypeOf((*VmCloneEvent)(nil)).Elem() +} + +func (b *VmConfigFault) GetVmConfigFault() *VmConfigFault { return b } + +type BaseVmConfigFault interface { + GetVmConfigFault() *VmConfigFault +} + +func init() { + t["BaseVmConfigFault"] = reflect.TypeOf((*VmConfigFault)(nil)).Elem() +} + +func (b *VmConfigFileInfo) GetVmConfigFileInfo() *VmConfigFileInfo { return b } + +type BaseVmConfigFileInfo interface { + GetVmConfigFileInfo() *VmConfigFileInfo +} + +func init() { + t["BaseVmConfigFileInfo"] = reflect.TypeOf((*VmConfigFileInfo)(nil)).Elem() +} + +func (b *VmConfigFileQuery) GetVmConfigFileQuery() *VmConfigFileQuery { return b } + +type BaseVmConfigFileQuery interface { + GetVmConfigFileQuery() *VmConfigFileQuery +} + +func init() { + t["BaseVmConfigFileQuery"] = reflect.TypeOf((*VmConfigFileQuery)(nil)).Elem() +} + +func (b *VmConfigInfo) GetVmConfigInfo() *VmConfigInfo { return b } + +type BaseVmConfigInfo interface { + GetVmConfigInfo() *VmConfigInfo +} + +func init() { + t["BaseVmConfigInfo"] = reflect.TypeOf((*VmConfigInfo)(nil)).Elem() +} + +func (b *VmConfigSpec) GetVmConfigSpec() *VmConfigSpec { return b } + +type BaseVmConfigSpec interface { + GetVmConfigSpec() *VmConfigSpec +} + +func init() { + t["BaseVmConfigSpec"] = reflect.TypeOf((*VmConfigSpec)(nil)).Elem() +} + +func (b *VmDasBeingResetEvent) GetVmDasBeingResetEvent() *VmDasBeingResetEvent { return b } + +type BaseVmDasBeingResetEvent interface { + GetVmDasBeingResetEvent() *VmDasBeingResetEvent +} + +func init() { + t["BaseVmDasBeingResetEvent"] = reflect.TypeOf((*VmDasBeingResetEvent)(nil)).Elem() +} + +func (b *VmEvent) GetVmEvent() *VmEvent { return b } + +type BaseVmEvent interface { + GetVmEvent() *VmEvent +} + +func init() { + t["BaseVmEvent"] = reflect.TypeOf((*VmEvent)(nil)).Elem() +} + +func (b *VmFaultToleranceIssue) GetVmFaultToleranceIssue() *VmFaultToleranceIssue { return b } + +type BaseVmFaultToleranceIssue interface { + GetVmFaultToleranceIssue() *VmFaultToleranceIssue +} + +func init() { + t["BaseVmFaultToleranceIssue"] = reflect.TypeOf((*VmFaultToleranceIssue)(nil)).Elem() +} + +func (b *VmMigratedEvent) GetVmMigratedEvent() *VmMigratedEvent { return b } + +type BaseVmMigratedEvent interface { + GetVmMigratedEvent() *VmMigratedEvent +} + +func init() { + t["BaseVmMigratedEvent"] = reflect.TypeOf((*VmMigratedEvent)(nil)).Elem() +} + +func (b *VmPoweredOffEvent) GetVmPoweredOffEvent() *VmPoweredOffEvent { return b } + +type BaseVmPoweredOffEvent interface { + GetVmPoweredOffEvent() *VmPoweredOffEvent +} + +func init() { + t["BaseVmPoweredOffEvent"] = reflect.TypeOf((*VmPoweredOffEvent)(nil)).Elem() +} + +func (b *VmPoweredOnEvent) GetVmPoweredOnEvent() *VmPoweredOnEvent { return b } + +type BaseVmPoweredOnEvent interface { + GetVmPoweredOnEvent() *VmPoweredOnEvent +} + +func init() { + t["BaseVmPoweredOnEvent"] = reflect.TypeOf((*VmPoweredOnEvent)(nil)).Elem() +} + +func (b *VmRelocateSpecEvent) GetVmRelocateSpecEvent() *VmRelocateSpecEvent { return b } + +type BaseVmRelocateSpecEvent interface { + GetVmRelocateSpecEvent() *VmRelocateSpecEvent +} + +func init() { + t["BaseVmRelocateSpecEvent"] = reflect.TypeOf((*VmRelocateSpecEvent)(nil)).Elem() +} + +func (b *VmStartingEvent) GetVmStartingEvent() *VmStartingEvent { return b } + +type BaseVmStartingEvent interface { + GetVmStartingEvent() *VmStartingEvent +} + +func init() { + t["BaseVmStartingEvent"] = reflect.TypeOf((*VmStartingEvent)(nil)).Elem() +} + +func (b *VmToolsUpgradeFault) GetVmToolsUpgradeFault() *VmToolsUpgradeFault { return b } + +type BaseVmToolsUpgradeFault interface { + GetVmToolsUpgradeFault() *VmToolsUpgradeFault +} + +func init() { + t["BaseVmToolsUpgradeFault"] = reflect.TypeOf((*VmToolsUpgradeFault)(nil)).Elem() +} + +func (b *VmfsDatastoreBaseOption) GetVmfsDatastoreBaseOption() *VmfsDatastoreBaseOption { return b } + +type BaseVmfsDatastoreBaseOption interface { + GetVmfsDatastoreBaseOption() *VmfsDatastoreBaseOption +} + +func init() { + t["BaseVmfsDatastoreBaseOption"] = reflect.TypeOf((*VmfsDatastoreBaseOption)(nil)).Elem() +} + +func (b *VmfsDatastoreSingleExtentOption) GetVmfsDatastoreSingleExtentOption() *VmfsDatastoreSingleExtentOption { + return b +} + +type BaseVmfsDatastoreSingleExtentOption interface { + GetVmfsDatastoreSingleExtentOption() *VmfsDatastoreSingleExtentOption +} + +func init() { + t["BaseVmfsDatastoreSingleExtentOption"] = reflect.TypeOf((*VmfsDatastoreSingleExtentOption)(nil)).Elem() +} + +func (b *VmfsDatastoreSpec) GetVmfsDatastoreSpec() *VmfsDatastoreSpec { return b } + +type BaseVmfsDatastoreSpec interface { + GetVmfsDatastoreSpec() *VmfsDatastoreSpec +} + +func init() { + t["BaseVmfsDatastoreSpec"] = reflect.TypeOf((*VmfsDatastoreSpec)(nil)).Elem() +} + +func (b *VmfsMountFault) GetVmfsMountFault() *VmfsMountFault { return b } + +type BaseVmfsMountFault interface { + GetVmfsMountFault() *VmfsMountFault +} + +func init() { + t["BaseVmfsMountFault"] = reflect.TypeOf((*VmfsMountFault)(nil)).Elem() +} + +func (b *VmwareDistributedVirtualSwitchVlanSpec) GetVmwareDistributedVirtualSwitchVlanSpec() *VmwareDistributedVirtualSwitchVlanSpec { + return b +} + +type BaseVmwareDistributedVirtualSwitchVlanSpec interface { + GetVmwareDistributedVirtualSwitchVlanSpec() *VmwareDistributedVirtualSwitchVlanSpec +} + +func init() { + t["BaseVmwareDistributedVirtualSwitchVlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanSpec)(nil)).Elem() +} + +func (b *VsanDiskFault) GetVsanDiskFault() *VsanDiskFault { return b } + +type BaseVsanDiskFault interface { + GetVsanDiskFault() *VsanDiskFault +} + +func init() { + t["BaseVsanDiskFault"] = reflect.TypeOf((*VsanDiskFault)(nil)).Elem() +} + +func (b *VsanFault) GetVsanFault() *VsanFault { return b } + +type BaseVsanFault interface { + GetVsanFault() *VsanFault +} + +func init() { + t["BaseVsanFault"] = reflect.TypeOf((*VsanFault)(nil)).Elem() +} + +func (b *VsanUpgradeSystemPreflightCheckIssue) GetVsanUpgradeSystemPreflightCheckIssue() *VsanUpgradeSystemPreflightCheckIssue { + return b +} + +type BaseVsanUpgradeSystemPreflightCheckIssue interface { + GetVsanUpgradeSystemPreflightCheckIssue() *VsanUpgradeSystemPreflightCheckIssue +} + +func init() { + t["BaseVsanUpgradeSystemPreflightCheckIssue"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckIssue)(nil)).Elem() +} + +func (b *VsanUpgradeSystemUpgradeHistoryItem) GetVsanUpgradeSystemUpgradeHistoryItem() *VsanUpgradeSystemUpgradeHistoryItem { + return b +} + +type BaseVsanUpgradeSystemUpgradeHistoryItem interface { + GetVsanUpgradeSystemUpgradeHistoryItem() *VsanUpgradeSystemUpgradeHistoryItem +} + +func init() { + t["BaseVsanUpgradeSystemUpgradeHistoryItem"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem() +} + +func (b *VslmCreateSpecBackingSpec) GetVslmCreateSpecBackingSpec() *VslmCreateSpecBackingSpec { + return b +} + +type BaseVslmCreateSpecBackingSpec interface { + GetVslmCreateSpecBackingSpec() *VslmCreateSpecBackingSpec +} + +func init() { + t["BaseVslmCreateSpecBackingSpec"] = reflect.TypeOf((*VslmCreateSpecBackingSpec)(nil)).Elem() +} + +func (b *VslmMigrateSpec) GetVslmMigrateSpec() *VslmMigrateSpec { return b } + +type BaseVslmMigrateSpec interface { + GetVslmMigrateSpec() *VslmMigrateSpec +} + +func init() { + t["BaseVslmMigrateSpec"] = reflect.TypeOf((*VslmMigrateSpec)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/registry.go b/vendor/github.com/vmware/govmomi/vim25/types/registry.go new file mode 100644 index 000000000000..ff7c302d3185 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/registry.go @@ -0,0 +1,43 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "reflect" + "strings" +) + +var t = map[string]reflect.Type{} + +func Add(name string, kind reflect.Type) { + t[name] = kind +} + +type Func func(string) (reflect.Type, bool) + +func TypeFunc() Func { + return func(name string) (reflect.Type, bool) { + typ, ok := t[name] + if !ok { + // The /sdk endpoint does not prefix types with the namespace, + // but extension endpoints, such as /pbm/sdk do. + name = strings.TrimPrefix(name, "vim25:") + typ, ok = t[name] + } + return typ, ok + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/types.go b/vendor/github.com/vmware/govmomi/vim25/types/types.go new file mode 100644 index 000000000000..e990e273e222 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/types.go @@ -0,0 +1,55433 @@ +/* +Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "reflect" + "time" +) + +type AbdicateDomOwnership AbdicateDomOwnershipRequestType + +func init() { + t["AbdicateDomOwnership"] = reflect.TypeOf((*AbdicateDomOwnership)(nil)).Elem() +} + +type AbdicateDomOwnershipRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids"` +} + +func init() { + t["AbdicateDomOwnershipRequestType"] = reflect.TypeOf((*AbdicateDomOwnershipRequestType)(nil)).Elem() +} + +type AbdicateDomOwnershipResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type AboutInfo struct { + DynamicData + + Name string `xml:"name"` + FullName string `xml:"fullName"` + Vendor string `xml:"vendor"` + Version string `xml:"version"` + Build string `xml:"build"` + LocaleVersion string `xml:"localeVersion,omitempty"` + LocaleBuild string `xml:"localeBuild,omitempty"` + OsType string `xml:"osType"` + ProductLineId string `xml:"productLineId"` + ApiType string `xml:"apiType"` + ApiVersion string `xml:"apiVersion"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + LicenseProductName string `xml:"licenseProductName,omitempty"` + LicenseProductVersion string `xml:"licenseProductVersion,omitempty"` +} + +func init() { + t["AboutInfo"] = reflect.TypeOf((*AboutInfo)(nil)).Elem() +} + +type AccountCreatedEvent struct { + HostEvent + + Spec BaseHostAccountSpec `xml:"spec,typeattr"` + Group bool `xml:"group"` +} + +func init() { + t["AccountCreatedEvent"] = reflect.TypeOf((*AccountCreatedEvent)(nil)).Elem() +} + +type AccountRemovedEvent struct { + HostEvent + + Account string `xml:"account"` + Group bool `xml:"group"` +} + +func init() { + t["AccountRemovedEvent"] = reflect.TypeOf((*AccountRemovedEvent)(nil)).Elem() +} + +type AccountUpdatedEvent struct { + HostEvent + + Spec BaseHostAccountSpec `xml:"spec,typeattr"` + Group bool `xml:"group"` + PrevDescription string `xml:"prevDescription,omitempty"` +} + +func init() { + t["AccountUpdatedEvent"] = reflect.TypeOf((*AccountUpdatedEvent)(nil)).Elem() +} + +type AcknowledgeAlarm AcknowledgeAlarmRequestType + +func init() { + t["AcknowledgeAlarm"] = reflect.TypeOf((*AcknowledgeAlarm)(nil)).Elem() +} + +type AcknowledgeAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Alarm ManagedObjectReference `xml:"alarm"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["AcknowledgeAlarmRequestType"] = reflect.TypeOf((*AcknowledgeAlarmRequestType)(nil)).Elem() +} + +type AcknowledgeAlarmResponse struct { +} + +type AcquireCimServicesTicket AcquireCimServicesTicketRequestType + +func init() { + t["AcquireCimServicesTicket"] = reflect.TypeOf((*AcquireCimServicesTicket)(nil)).Elem() +} + +type AcquireCimServicesTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AcquireCimServicesTicketRequestType"] = reflect.TypeOf((*AcquireCimServicesTicketRequestType)(nil)).Elem() +} + +type AcquireCimServicesTicketResponse struct { + Returnval HostServiceTicket `xml:"returnval"` +} + +type AcquireCloneTicket AcquireCloneTicketRequestType + +func init() { + t["AcquireCloneTicket"] = reflect.TypeOf((*AcquireCloneTicket)(nil)).Elem() +} + +type AcquireCloneTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AcquireCloneTicketRequestType"] = reflect.TypeOf((*AcquireCloneTicketRequestType)(nil)).Elem() +} + +type AcquireCloneTicketResponse struct { + Returnval string `xml:"returnval"` +} + +type AcquireCredentialsInGuest AcquireCredentialsInGuestRequestType + +func init() { + t["AcquireCredentialsInGuest"] = reflect.TypeOf((*AcquireCredentialsInGuest)(nil)).Elem() +} + +type AcquireCredentialsInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + RequestedAuth BaseGuestAuthentication `xml:"requestedAuth,typeattr"` + SessionID int64 `xml:"sessionID,omitempty"` +} + +func init() { + t["AcquireCredentialsInGuestRequestType"] = reflect.TypeOf((*AcquireCredentialsInGuestRequestType)(nil)).Elem() +} + +type AcquireCredentialsInGuestResponse struct { + Returnval BaseGuestAuthentication `xml:"returnval,typeattr"` +} + +type AcquireGenericServiceTicket AcquireGenericServiceTicketRequestType + +func init() { + t["AcquireGenericServiceTicket"] = reflect.TypeOf((*AcquireGenericServiceTicket)(nil)).Elem() +} + +type AcquireGenericServiceTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseSessionManagerServiceRequestSpec `xml:"spec,typeattr"` +} + +func init() { + t["AcquireGenericServiceTicketRequestType"] = reflect.TypeOf((*AcquireGenericServiceTicketRequestType)(nil)).Elem() +} + +type AcquireGenericServiceTicketResponse struct { + Returnval SessionManagerGenericServiceTicket `xml:"returnval"` +} + +type AcquireLocalTicket AcquireLocalTicketRequestType + +func init() { + t["AcquireLocalTicket"] = reflect.TypeOf((*AcquireLocalTicket)(nil)).Elem() +} + +type AcquireLocalTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` +} + +func init() { + t["AcquireLocalTicketRequestType"] = reflect.TypeOf((*AcquireLocalTicketRequestType)(nil)).Elem() +} + +type AcquireLocalTicketResponse struct { + Returnval SessionManagerLocalTicket `xml:"returnval"` +} + +type AcquireMksTicket AcquireMksTicketRequestType + +func init() { + t["AcquireMksTicket"] = reflect.TypeOf((*AcquireMksTicket)(nil)).Elem() +} + +type AcquireMksTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AcquireMksTicketRequestType"] = reflect.TypeOf((*AcquireMksTicketRequestType)(nil)).Elem() +} + +type AcquireMksTicketResponse struct { + Returnval VirtualMachineMksTicket `xml:"returnval"` +} + +type AcquireTicket AcquireTicketRequestType + +func init() { + t["AcquireTicket"] = reflect.TypeOf((*AcquireTicket)(nil)).Elem() +} + +type AcquireTicketRequestType struct { + This ManagedObjectReference `xml:"_this"` + TicketType string `xml:"ticketType"` +} + +func init() { + t["AcquireTicketRequestType"] = reflect.TypeOf((*AcquireTicketRequestType)(nil)).Elem() +} + +type AcquireTicketResponse struct { + Returnval VirtualMachineTicket `xml:"returnval"` +} + +type Action struct { + DynamicData +} + +func init() { + t["Action"] = reflect.TypeOf((*Action)(nil)).Elem() +} + +type ActiveDirectoryFault struct { + VimFault + + ErrorCode int32 `xml:"errorCode,omitempty"` +} + +func init() { + t["ActiveDirectoryFault"] = reflect.TypeOf((*ActiveDirectoryFault)(nil)).Elem() +} + +type ActiveDirectoryFaultFault BaseActiveDirectoryFault + +func init() { + t["ActiveDirectoryFaultFault"] = reflect.TypeOf((*ActiveDirectoryFaultFault)(nil)).Elem() +} + +type ActiveDirectoryProfile struct { + ApplyProfile +} + +func init() { + t["ActiveDirectoryProfile"] = reflect.TypeOf((*ActiveDirectoryProfile)(nil)).Elem() +} + +type ActiveVMsBlockingEVC struct { + EVCConfigFault + + EvcMode string `xml:"evcMode,omitempty"` + Host []ManagedObjectReference `xml:"host,omitempty"` + HostName []string `xml:"hostName,omitempty"` +} + +func init() { + t["ActiveVMsBlockingEVC"] = reflect.TypeOf((*ActiveVMsBlockingEVC)(nil)).Elem() +} + +type ActiveVMsBlockingEVCFault ActiveVMsBlockingEVC + +func init() { + t["ActiveVMsBlockingEVCFault"] = reflect.TypeOf((*ActiveVMsBlockingEVCFault)(nil)).Elem() +} + +type AddAuthorizationRole AddAuthorizationRoleRequestType + +func init() { + t["AddAuthorizationRole"] = reflect.TypeOf((*AddAuthorizationRole)(nil)).Elem() +} + +type AddAuthorizationRoleRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + PrivIds []string `xml:"privIds,omitempty"` +} + +func init() { + t["AddAuthorizationRoleRequestType"] = reflect.TypeOf((*AddAuthorizationRoleRequestType)(nil)).Elem() +} + +type AddAuthorizationRoleResponse struct { + Returnval int32 `xml:"returnval"` +} + +type AddCustomFieldDef AddCustomFieldDefRequestType + +func init() { + t["AddCustomFieldDef"] = reflect.TypeOf((*AddCustomFieldDef)(nil)).Elem() +} + +type AddCustomFieldDefRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + MoType string `xml:"moType,omitempty"` + FieldDefPolicy *PrivilegePolicyDef `xml:"fieldDefPolicy,omitempty"` + FieldPolicy *PrivilegePolicyDef `xml:"fieldPolicy,omitempty"` +} + +func init() { + t["AddCustomFieldDefRequestType"] = reflect.TypeOf((*AddCustomFieldDefRequestType)(nil)).Elem() +} + +type AddCustomFieldDefResponse struct { + Returnval CustomFieldDef `xml:"returnval"` +} + +type AddDVPortgroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec []DVPortgroupConfigSpec `xml:"spec"` +} + +func init() { + t["AddDVPortgroupRequestType"] = reflect.TypeOf((*AddDVPortgroupRequestType)(nil)).Elem() +} + +type AddDVPortgroup_Task AddDVPortgroupRequestType + +func init() { + t["AddDVPortgroup_Task"] = reflect.TypeOf((*AddDVPortgroup_Task)(nil)).Elem() +} + +type AddDVPortgroup_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AddDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Disk []HostScsiDisk `xml:"disk"` +} + +func init() { + t["AddDisksRequestType"] = reflect.TypeOf((*AddDisksRequestType)(nil)).Elem() +} + +type AddDisks_Task AddDisksRequestType + +func init() { + t["AddDisks_Task"] = reflect.TypeOf((*AddDisks_Task)(nil)).Elem() +} + +type AddDisks_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AddFilter AddFilterRequestType + +func init() { + t["AddFilter"] = reflect.TypeOf((*AddFilter)(nil)).Elem() +} + +type AddFilterEntities AddFilterEntitiesRequestType + +func init() { + t["AddFilterEntities"] = reflect.TypeOf((*AddFilterEntities)(nil)).Elem() +} + +type AddFilterEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + Entities []ManagedObjectReference `xml:"entities,omitempty"` +} + +func init() { + t["AddFilterEntitiesRequestType"] = reflect.TypeOf((*AddFilterEntitiesRequestType)(nil)).Elem() +} + +type AddFilterEntitiesResponse struct { +} + +type AddFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` + FilterName string `xml:"filterName"` + InfoIds []string `xml:"infoIds,omitempty"` +} + +func init() { + t["AddFilterRequestType"] = reflect.TypeOf((*AddFilterRequestType)(nil)).Elem() +} + +type AddFilterResponse struct { + Returnval string `xml:"returnval"` +} + +type AddGuestAlias AddGuestAliasRequestType + +func init() { + t["AddGuestAlias"] = reflect.TypeOf((*AddGuestAlias)(nil)).Elem() +} + +type AddGuestAliasRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Username string `xml:"username"` + MapCert bool `xml:"mapCert"` + Base64Cert string `xml:"base64Cert"` + AliasInfo GuestAuthAliasInfo `xml:"aliasInfo"` +} + +func init() { + t["AddGuestAliasRequestType"] = reflect.TypeOf((*AddGuestAliasRequestType)(nil)).Elem() +} + +type AddGuestAliasResponse struct { +} + +type AddHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostConnectSpec `xml:"spec"` + AsConnected bool `xml:"asConnected"` + ResourcePool *ManagedObjectReference `xml:"resourcePool,omitempty"` + License string `xml:"license,omitempty"` +} + +func init() { + t["AddHostRequestType"] = reflect.TypeOf((*AddHostRequestType)(nil)).Elem() +} + +type AddHost_Task AddHostRequestType + +func init() { + t["AddHost_Task"] = reflect.TypeOf((*AddHost_Task)(nil)).Elem() +} + +type AddHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AddInternetScsiSendTargets AddInternetScsiSendTargetsRequestType + +func init() { + t["AddInternetScsiSendTargets"] = reflect.TypeOf((*AddInternetScsiSendTargets)(nil)).Elem() +} + +type AddInternetScsiSendTargetsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + Targets []HostInternetScsiHbaSendTarget `xml:"targets"` +} + +func init() { + t["AddInternetScsiSendTargetsRequestType"] = reflect.TypeOf((*AddInternetScsiSendTargetsRequestType)(nil)).Elem() +} + +type AddInternetScsiSendTargetsResponse struct { +} + +type AddInternetScsiStaticTargets AddInternetScsiStaticTargetsRequestType + +func init() { + t["AddInternetScsiStaticTargets"] = reflect.TypeOf((*AddInternetScsiStaticTargets)(nil)).Elem() +} + +type AddInternetScsiStaticTargetsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + Targets []HostInternetScsiHbaStaticTarget `xml:"targets"` +} + +func init() { + t["AddInternetScsiStaticTargetsRequestType"] = reflect.TypeOf((*AddInternetScsiStaticTargetsRequestType)(nil)).Elem() +} + +type AddInternetScsiStaticTargetsResponse struct { +} + +type AddKey AddKeyRequestType + +func init() { + t["AddKey"] = reflect.TypeOf((*AddKey)(nil)).Elem() +} + +type AddKeyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key CryptoKeyPlain `xml:"key"` +} + +func init() { + t["AddKeyRequestType"] = reflect.TypeOf((*AddKeyRequestType)(nil)).Elem() +} + +type AddKeyResponse struct { +} + +type AddKeys AddKeysRequestType + +func init() { + t["AddKeys"] = reflect.TypeOf((*AddKeys)(nil)).Elem() +} + +type AddKeysRequestType struct { + This ManagedObjectReference `xml:"_this"` + Keys []CryptoKeyPlain `xml:"keys,omitempty"` +} + +func init() { + t["AddKeysRequestType"] = reflect.TypeOf((*AddKeysRequestType)(nil)).Elem() +} + +type AddKeysResponse struct { + Returnval []CryptoKeyResult `xml:"returnval,omitempty"` +} + +type AddLicense AddLicenseRequestType + +func init() { + t["AddLicense"] = reflect.TypeOf((*AddLicense)(nil)).Elem() +} + +type AddLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` + Labels []KeyValue `xml:"labels,omitempty"` +} + +func init() { + t["AddLicenseRequestType"] = reflect.TypeOf((*AddLicenseRequestType)(nil)).Elem() +} + +type AddLicenseResponse struct { + Returnval LicenseManagerLicenseInfo `xml:"returnval"` +} + +type AddMonitoredEntities AddMonitoredEntitiesRequestType + +func init() { + t["AddMonitoredEntities"] = reflect.TypeOf((*AddMonitoredEntities)(nil)).Elem() +} + +type AddMonitoredEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` + Entities []ManagedObjectReference `xml:"entities,omitempty"` +} + +func init() { + t["AddMonitoredEntitiesRequestType"] = reflect.TypeOf((*AddMonitoredEntitiesRequestType)(nil)).Elem() +} + +type AddMonitoredEntitiesResponse struct { +} + +type AddNetworkResourcePool AddNetworkResourcePoolRequestType + +func init() { + t["AddNetworkResourcePool"] = reflect.TypeOf((*AddNetworkResourcePool)(nil)).Elem() +} + +type AddNetworkResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec []DVSNetworkResourcePoolConfigSpec `xml:"configSpec"` +} + +func init() { + t["AddNetworkResourcePoolRequestType"] = reflect.TypeOf((*AddNetworkResourcePoolRequestType)(nil)).Elem() +} + +type AddNetworkResourcePoolResponse struct { +} + +type AddPortGroup AddPortGroupRequestType + +func init() { + t["AddPortGroup"] = reflect.TypeOf((*AddPortGroup)(nil)).Elem() +} + +type AddPortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Portgrp HostPortGroupSpec `xml:"portgrp"` +} + +func init() { + t["AddPortGroupRequestType"] = reflect.TypeOf((*AddPortGroupRequestType)(nil)).Elem() +} + +type AddPortGroupResponse struct { +} + +type AddServiceConsoleVirtualNic AddServiceConsoleVirtualNicRequestType + +func init() { + t["AddServiceConsoleVirtualNic"] = reflect.TypeOf((*AddServiceConsoleVirtualNic)(nil)).Elem() +} + +type AddServiceConsoleVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Portgroup string `xml:"portgroup"` + Nic HostVirtualNicSpec `xml:"nic"` +} + +func init() { + t["AddServiceConsoleVirtualNicRequestType"] = reflect.TypeOf((*AddServiceConsoleVirtualNicRequestType)(nil)).Elem() +} + +type AddServiceConsoleVirtualNicResponse struct { + Returnval string `xml:"returnval"` +} + +type AddStandaloneHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostConnectSpec `xml:"spec"` + CompResSpec BaseComputeResourceConfigSpec `xml:"compResSpec,omitempty,typeattr"` + AddConnected bool `xml:"addConnected"` + License string `xml:"license,omitempty"` +} + +func init() { + t["AddStandaloneHostRequestType"] = reflect.TypeOf((*AddStandaloneHostRequestType)(nil)).Elem() +} + +type AddStandaloneHost_Task AddStandaloneHostRequestType + +func init() { + t["AddStandaloneHost_Task"] = reflect.TypeOf((*AddStandaloneHost_Task)(nil)).Elem() +} + +type AddStandaloneHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AddVirtualNic AddVirtualNicRequestType + +func init() { + t["AddVirtualNic"] = reflect.TypeOf((*AddVirtualNic)(nil)).Elem() +} + +type AddVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Portgroup string `xml:"portgroup"` + Nic HostVirtualNicSpec `xml:"nic"` +} + +func init() { + t["AddVirtualNicRequestType"] = reflect.TypeOf((*AddVirtualNicRequestType)(nil)).Elem() +} + +type AddVirtualNicResponse struct { + Returnval string `xml:"returnval"` +} + +type AddVirtualSwitch AddVirtualSwitchRequestType + +func init() { + t["AddVirtualSwitch"] = reflect.TypeOf((*AddVirtualSwitch)(nil)).Elem() +} + +type AddVirtualSwitchRequestType struct { + This ManagedObjectReference `xml:"_this"` + VswitchName string `xml:"vswitchName"` + Spec *HostVirtualSwitchSpec `xml:"spec,omitempty"` +} + +func init() { + t["AddVirtualSwitchRequestType"] = reflect.TypeOf((*AddVirtualSwitchRequestType)(nil)).Elem() +} + +type AddVirtualSwitchResponse struct { +} + +type AdminDisabled struct { + HostConfigFault +} + +func init() { + t["AdminDisabled"] = reflect.TypeOf((*AdminDisabled)(nil)).Elem() +} + +type AdminDisabledFault AdminDisabled + +func init() { + t["AdminDisabledFault"] = reflect.TypeOf((*AdminDisabledFault)(nil)).Elem() +} + +type AdminNotDisabled struct { + HostConfigFault +} + +func init() { + t["AdminNotDisabled"] = reflect.TypeOf((*AdminNotDisabled)(nil)).Elem() +} + +type AdminNotDisabledFault AdminNotDisabled + +func init() { + t["AdminNotDisabledFault"] = reflect.TypeOf((*AdminNotDisabledFault)(nil)).Elem() +} + +type AdminPasswordNotChangedEvent struct { + HostEvent +} + +func init() { + t["AdminPasswordNotChangedEvent"] = reflect.TypeOf((*AdminPasswordNotChangedEvent)(nil)).Elem() +} + +type AffinityConfigured struct { + MigrationFault + + ConfiguredAffinity []string `xml:"configuredAffinity"` +} + +func init() { + t["AffinityConfigured"] = reflect.TypeOf((*AffinityConfigured)(nil)).Elem() +} + +type AffinityConfiguredFault AffinityConfigured + +func init() { + t["AffinityConfiguredFault"] = reflect.TypeOf((*AffinityConfiguredFault)(nil)).Elem() +} + +type AfterStartupTaskScheduler struct { + TaskScheduler + + Minute int32 `xml:"minute"` +} + +func init() { + t["AfterStartupTaskScheduler"] = reflect.TypeOf((*AfterStartupTaskScheduler)(nil)).Elem() +} + +type AgentInstallFailed struct { + HostConnectFault + + Reason string `xml:"reason,omitempty"` + StatusCode int32 `xml:"statusCode,omitempty"` + InstallerOutput string `xml:"installerOutput,omitempty"` +} + +func init() { + t["AgentInstallFailed"] = reflect.TypeOf((*AgentInstallFailed)(nil)).Elem() +} + +type AgentInstallFailedFault AgentInstallFailed + +func init() { + t["AgentInstallFailedFault"] = reflect.TypeOf((*AgentInstallFailedFault)(nil)).Elem() +} + +type AlarmAcknowledgedEvent struct { + AlarmEvent + + Source ManagedEntityEventArgument `xml:"source"` + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmAcknowledgedEvent"] = reflect.TypeOf((*AlarmAcknowledgedEvent)(nil)).Elem() +} + +type AlarmAction struct { + DynamicData +} + +func init() { + t["AlarmAction"] = reflect.TypeOf((*AlarmAction)(nil)).Elem() +} + +type AlarmActionTriggeredEvent struct { + AlarmEvent + + Source ManagedEntityEventArgument `xml:"source"` + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmActionTriggeredEvent"] = reflect.TypeOf((*AlarmActionTriggeredEvent)(nil)).Elem() +} + +type AlarmClearedEvent struct { + AlarmEvent + + Source ManagedEntityEventArgument `xml:"source"` + Entity ManagedEntityEventArgument `xml:"entity"` + From string `xml:"from"` +} + +func init() { + t["AlarmClearedEvent"] = reflect.TypeOf((*AlarmClearedEvent)(nil)).Elem() +} + +type AlarmCreatedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmCreatedEvent"] = reflect.TypeOf((*AlarmCreatedEvent)(nil)).Elem() +} + +type AlarmDescription struct { + DynamicData + + Expr []BaseTypeDescription `xml:"expr,typeattr"` + StateOperator []BaseElementDescription `xml:"stateOperator,typeattr"` + MetricOperator []BaseElementDescription `xml:"metricOperator,typeattr"` + HostSystemConnectionState []BaseElementDescription `xml:"hostSystemConnectionState,typeattr"` + VirtualMachinePowerState []BaseElementDescription `xml:"virtualMachinePowerState,typeattr"` + DatastoreConnectionState []BaseElementDescription `xml:"datastoreConnectionState,omitempty,typeattr"` + HostSystemPowerState []BaseElementDescription `xml:"hostSystemPowerState,omitempty,typeattr"` + VirtualMachineGuestHeartbeatStatus []BaseElementDescription `xml:"virtualMachineGuestHeartbeatStatus,omitempty,typeattr"` + EntityStatus []BaseElementDescription `xml:"entityStatus,typeattr"` + Action []BaseTypeDescription `xml:"action,typeattr"` +} + +func init() { + t["AlarmDescription"] = reflect.TypeOf((*AlarmDescription)(nil)).Elem() +} + +type AlarmEmailCompletedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + To string `xml:"to"` +} + +func init() { + t["AlarmEmailCompletedEvent"] = reflect.TypeOf((*AlarmEmailCompletedEvent)(nil)).Elem() +} + +type AlarmEmailFailedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + To string `xml:"to"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["AlarmEmailFailedEvent"] = reflect.TypeOf((*AlarmEmailFailedEvent)(nil)).Elem() +} + +type AlarmEvent struct { + Event + + Alarm AlarmEventArgument `xml:"alarm"` +} + +func init() { + t["AlarmEvent"] = reflect.TypeOf((*AlarmEvent)(nil)).Elem() +} + +type AlarmEventArgument struct { + EntityEventArgument + + Alarm ManagedObjectReference `xml:"alarm"` +} + +func init() { + t["AlarmEventArgument"] = reflect.TypeOf((*AlarmEventArgument)(nil)).Elem() +} + +type AlarmExpression struct { + DynamicData +} + +func init() { + t["AlarmExpression"] = reflect.TypeOf((*AlarmExpression)(nil)).Elem() +} + +type AlarmFilterSpec struct { + DynamicData + + Status []ManagedEntityStatus `xml:"status,omitempty"` + TypeEntity string `xml:"typeEntity,omitempty"` + TypeTrigger string `xml:"typeTrigger,omitempty"` +} + +func init() { + t["AlarmFilterSpec"] = reflect.TypeOf((*AlarmFilterSpec)(nil)).Elem() +} + +type AlarmInfo struct { + AlarmSpec + + Key string `xml:"key"` + Alarm ManagedObjectReference `xml:"alarm"` + Entity ManagedObjectReference `xml:"entity"` + LastModifiedTime time.Time `xml:"lastModifiedTime"` + LastModifiedUser string `xml:"lastModifiedUser"` + CreationEventId int32 `xml:"creationEventId"` +} + +func init() { + t["AlarmInfo"] = reflect.TypeOf((*AlarmInfo)(nil)).Elem() +} + +type AlarmReconfiguredEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty"` +} + +func init() { + t["AlarmReconfiguredEvent"] = reflect.TypeOf((*AlarmReconfiguredEvent)(nil)).Elem() +} + +type AlarmRemovedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmRemovedEvent"] = reflect.TypeOf((*AlarmRemovedEvent)(nil)).Elem() +} + +type AlarmScriptCompleteEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + Script string `xml:"script"` +} + +func init() { + t["AlarmScriptCompleteEvent"] = reflect.TypeOf((*AlarmScriptCompleteEvent)(nil)).Elem() +} + +type AlarmScriptFailedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + Script string `xml:"script"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["AlarmScriptFailedEvent"] = reflect.TypeOf((*AlarmScriptFailedEvent)(nil)).Elem() +} + +type AlarmSetting struct { + DynamicData + + ToleranceRange int32 `xml:"toleranceRange"` + ReportingFrequency int32 `xml:"reportingFrequency"` +} + +func init() { + t["AlarmSetting"] = reflect.TypeOf((*AlarmSetting)(nil)).Elem() +} + +type AlarmSnmpCompletedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["AlarmSnmpCompletedEvent"] = reflect.TypeOf((*AlarmSnmpCompletedEvent)(nil)).Elem() +} + +type AlarmSnmpFailedEvent struct { + AlarmEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["AlarmSnmpFailedEvent"] = reflect.TypeOf((*AlarmSnmpFailedEvent)(nil)).Elem() +} + +type AlarmSpec struct { + DynamicData + + Name string `xml:"name"` + SystemName string `xml:"systemName,omitempty"` + Description string `xml:"description"` + Enabled bool `xml:"enabled"` + Expression BaseAlarmExpression `xml:"expression,typeattr"` + Action BaseAlarmAction `xml:"action,omitempty,typeattr"` + ActionFrequency int32 `xml:"actionFrequency,omitempty"` + Setting *AlarmSetting `xml:"setting,omitempty"` +} + +func init() { + t["AlarmSpec"] = reflect.TypeOf((*AlarmSpec)(nil)).Elem() +} + +type AlarmState struct { + DynamicData + + Key string `xml:"key"` + Entity ManagedObjectReference `xml:"entity"` + Alarm ManagedObjectReference `xml:"alarm"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + Time time.Time `xml:"time"` + Acknowledged *bool `xml:"acknowledged"` + AcknowledgedByUser string `xml:"acknowledgedByUser,omitempty"` + AcknowledgedTime *time.Time `xml:"acknowledgedTime"` + EventKey int32 `xml:"eventKey,omitempty"` +} + +func init() { + t["AlarmState"] = reflect.TypeOf((*AlarmState)(nil)).Elem() +} + +type AlarmStatusChangedEvent struct { + AlarmEvent + + Source ManagedEntityEventArgument `xml:"source"` + Entity ManagedEntityEventArgument `xml:"entity"` + From string `xml:"from"` + To string `xml:"to"` +} + +func init() { + t["AlarmStatusChangedEvent"] = reflect.TypeOf((*AlarmStatusChangedEvent)(nil)).Elem() +} + +type AlarmTriggeringAction struct { + AlarmAction + + Action BaseAction `xml:"action,typeattr"` + TransitionSpecs []AlarmTriggeringActionTransitionSpec `xml:"transitionSpecs,omitempty"` + Green2yellow bool `xml:"green2yellow"` + Yellow2red bool `xml:"yellow2red"` + Red2yellow bool `xml:"red2yellow"` + Yellow2green bool `xml:"yellow2green"` +} + +func init() { + t["AlarmTriggeringAction"] = reflect.TypeOf((*AlarmTriggeringAction)(nil)).Elem() +} + +type AlarmTriggeringActionTransitionSpec struct { + DynamicData + + StartState ManagedEntityStatus `xml:"startState"` + FinalState ManagedEntityStatus `xml:"finalState"` + Repeats bool `xml:"repeats"` +} + +func init() { + t["AlarmTriggeringActionTransitionSpec"] = reflect.TypeOf((*AlarmTriggeringActionTransitionSpec)(nil)).Elem() +} + +type AllVirtualMachinesLicensedEvent struct { + LicenseEvent +} + +func init() { + t["AllVirtualMachinesLicensedEvent"] = reflect.TypeOf((*AllVirtualMachinesLicensedEvent)(nil)).Elem() +} + +type AllocateIpv4Address AllocateIpv4AddressRequestType + +func init() { + t["AllocateIpv4Address"] = reflect.TypeOf((*AllocateIpv4Address)(nil)).Elem() +} + +type AllocateIpv4AddressRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + PoolId int32 `xml:"poolId"` + AllocationId string `xml:"allocationId"` +} + +func init() { + t["AllocateIpv4AddressRequestType"] = reflect.TypeOf((*AllocateIpv4AddressRequestType)(nil)).Elem() +} + +type AllocateIpv4AddressResponse struct { + Returnval string `xml:"returnval"` +} + +type AllocateIpv6Address AllocateIpv6AddressRequestType + +func init() { + t["AllocateIpv6Address"] = reflect.TypeOf((*AllocateIpv6Address)(nil)).Elem() +} + +type AllocateIpv6AddressRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + PoolId int32 `xml:"poolId"` + AllocationId string `xml:"allocationId"` +} + +func init() { + t["AllocateIpv6AddressRequestType"] = reflect.TypeOf((*AllocateIpv6AddressRequestType)(nil)).Elem() +} + +type AllocateIpv6AddressResponse struct { + Returnval string `xml:"returnval"` +} + +type AlreadyAuthenticatedSessionEvent struct { + SessionEvent +} + +func init() { + t["AlreadyAuthenticatedSessionEvent"] = reflect.TypeOf((*AlreadyAuthenticatedSessionEvent)(nil)).Elem() +} + +type AlreadyBeingManaged struct { + HostConnectFault + + IpAddress string `xml:"ipAddress"` +} + +func init() { + t["AlreadyBeingManaged"] = reflect.TypeOf((*AlreadyBeingManaged)(nil)).Elem() +} + +type AlreadyBeingManagedFault AlreadyBeingManaged + +func init() { + t["AlreadyBeingManagedFault"] = reflect.TypeOf((*AlreadyBeingManagedFault)(nil)).Elem() +} + +type AlreadyConnected struct { + HostConnectFault + + Name string `xml:"name"` +} + +func init() { + t["AlreadyConnected"] = reflect.TypeOf((*AlreadyConnected)(nil)).Elem() +} + +type AlreadyConnectedFault AlreadyConnected + +func init() { + t["AlreadyConnectedFault"] = reflect.TypeOf((*AlreadyConnectedFault)(nil)).Elem() +} + +type AlreadyExists struct { + VimFault + + Name string `xml:"name,omitempty"` +} + +func init() { + t["AlreadyExists"] = reflect.TypeOf((*AlreadyExists)(nil)).Elem() +} + +type AlreadyExistsFault AlreadyExists + +func init() { + t["AlreadyExistsFault"] = reflect.TypeOf((*AlreadyExistsFault)(nil)).Elem() +} + +type AlreadyUpgraded struct { + VimFault +} + +func init() { + t["AlreadyUpgraded"] = reflect.TypeOf((*AlreadyUpgraded)(nil)).Elem() +} + +type AlreadyUpgradedFault AlreadyUpgraded + +func init() { + t["AlreadyUpgradedFault"] = reflect.TypeOf((*AlreadyUpgradedFault)(nil)).Elem() +} + +type AndAlarmExpression struct { + AlarmExpression + + Expression []BaseAlarmExpression `xml:"expression,typeattr"` +} + +func init() { + t["AndAlarmExpression"] = reflect.TypeOf((*AndAlarmExpression)(nil)).Elem() +} + +type AnswerFile struct { + DynamicData + + UserInput []ProfileDeferredPolicyOptionParameter `xml:"userInput,omitempty"` + CreatedTime time.Time `xml:"createdTime"` + ModifiedTime time.Time `xml:"modifiedTime"` +} + +func init() { + t["AnswerFile"] = reflect.TypeOf((*AnswerFile)(nil)).Elem() +} + +type AnswerFileCreateSpec struct { + DynamicData + + Validating *bool `xml:"validating"` +} + +func init() { + t["AnswerFileCreateSpec"] = reflect.TypeOf((*AnswerFileCreateSpec)(nil)).Elem() +} + +type AnswerFileOptionsCreateSpec struct { + AnswerFileCreateSpec + + UserInput []ProfileDeferredPolicyOptionParameter `xml:"userInput,omitempty"` +} + +func init() { + t["AnswerFileOptionsCreateSpec"] = reflect.TypeOf((*AnswerFileOptionsCreateSpec)(nil)).Elem() +} + +type AnswerFileSerializedCreateSpec struct { + AnswerFileCreateSpec + + AnswerFileConfigString string `xml:"answerFileConfigString"` +} + +func init() { + t["AnswerFileSerializedCreateSpec"] = reflect.TypeOf((*AnswerFileSerializedCreateSpec)(nil)).Elem() +} + +type AnswerFileStatusError struct { + DynamicData + + UserInputPath ProfilePropertyPath `xml:"userInputPath"` + ErrMsg LocalizableMessage `xml:"errMsg"` +} + +func init() { + t["AnswerFileStatusError"] = reflect.TypeOf((*AnswerFileStatusError)(nil)).Elem() +} + +type AnswerFileStatusResult struct { + DynamicData + + CheckedTime time.Time `xml:"checkedTime"` + Host ManagedObjectReference `xml:"host"` + Status string `xml:"status"` + Error []AnswerFileStatusError `xml:"error,omitempty"` +} + +func init() { + t["AnswerFileStatusResult"] = reflect.TypeOf((*AnswerFileStatusResult)(nil)).Elem() +} + +type AnswerFileUpdateFailed struct { + VimFault + + Failure []AnswerFileUpdateFailure `xml:"failure"` +} + +func init() { + t["AnswerFileUpdateFailed"] = reflect.TypeOf((*AnswerFileUpdateFailed)(nil)).Elem() +} + +type AnswerFileUpdateFailedFault AnswerFileUpdateFailed + +func init() { + t["AnswerFileUpdateFailedFault"] = reflect.TypeOf((*AnswerFileUpdateFailedFault)(nil)).Elem() +} + +type AnswerFileUpdateFailure struct { + DynamicData + + UserInputPath ProfilePropertyPath `xml:"userInputPath"` + ErrMsg LocalizableMessage `xml:"errMsg"` +} + +func init() { + t["AnswerFileUpdateFailure"] = reflect.TypeOf((*AnswerFileUpdateFailure)(nil)).Elem() +} + +type AnswerVM AnswerVMRequestType + +func init() { + t["AnswerVM"] = reflect.TypeOf((*AnswerVM)(nil)).Elem() +} + +type AnswerVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + QuestionId string `xml:"questionId"` + AnswerChoice string `xml:"answerChoice"` +} + +func init() { + t["AnswerVMRequestType"] = reflect.TypeOf((*AnswerVMRequestType)(nil)).Elem() +} + +type AnswerVMResponse struct { +} + +type ApplicationQuiesceFault struct { + SnapshotFault +} + +func init() { + t["ApplicationQuiesceFault"] = reflect.TypeOf((*ApplicationQuiesceFault)(nil)).Elem() +} + +type ApplicationQuiesceFaultFault ApplicationQuiesceFault + +func init() { + t["ApplicationQuiesceFaultFault"] = reflect.TypeOf((*ApplicationQuiesceFaultFault)(nil)).Elem() +} + +type ApplyEntitiesConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + ApplyConfigSpecs []ApplyHostProfileConfigurationSpec `xml:"applyConfigSpecs,omitempty"` +} + +func init() { + t["ApplyEntitiesConfigRequestType"] = reflect.TypeOf((*ApplyEntitiesConfigRequestType)(nil)).Elem() +} + +type ApplyEntitiesConfig_Task ApplyEntitiesConfigRequestType + +func init() { + t["ApplyEntitiesConfig_Task"] = reflect.TypeOf((*ApplyEntitiesConfig_Task)(nil)).Elem() +} + +type ApplyEntitiesConfig_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ApplyEvcModeVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mask []HostFeatureMask `xml:"mask,omitempty"` + CompleteMasks *bool `xml:"completeMasks"` +} + +func init() { + t["ApplyEvcModeVMRequestType"] = reflect.TypeOf((*ApplyEvcModeVMRequestType)(nil)).Elem() +} + +type ApplyEvcModeVM_Task ApplyEvcModeVMRequestType + +func init() { + t["ApplyEvcModeVM_Task"] = reflect.TypeOf((*ApplyEvcModeVM_Task)(nil)).Elem() +} + +type ApplyEvcModeVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ApplyHostConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + ConfigSpec HostConfigSpec `xml:"configSpec"` + UserInput []ProfileDeferredPolicyOptionParameter `xml:"userInput,omitempty"` +} + +func init() { + t["ApplyHostConfigRequestType"] = reflect.TypeOf((*ApplyHostConfigRequestType)(nil)).Elem() +} + +type ApplyHostConfig_Task ApplyHostConfigRequestType + +func init() { + t["ApplyHostConfig_Task"] = reflect.TypeOf((*ApplyHostConfig_Task)(nil)).Elem() +} + +type ApplyHostConfig_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ApplyHostProfileConfigurationResult struct { + DynamicData + + StartTime time.Time `xml:"startTime"` + CompleteTime time.Time `xml:"completeTime"` + Host ManagedObjectReference `xml:"host"` + Status string `xml:"status"` + Errors []LocalizedMethodFault `xml:"errors,omitempty"` +} + +func init() { + t["ApplyHostProfileConfigurationResult"] = reflect.TypeOf((*ApplyHostProfileConfigurationResult)(nil)).Elem() +} + +type ApplyHostProfileConfigurationSpec struct { + ProfileExecuteResult + + Host ManagedObjectReference `xml:"host"` + TaskListRequirement []string `xml:"taskListRequirement,omitempty"` + TaskDescription []LocalizableMessage `xml:"taskDescription,omitempty"` + RebootStateless *bool `xml:"rebootStateless"` + RebootHost *bool `xml:"rebootHost"` + FaultData *LocalizedMethodFault `xml:"faultData,omitempty"` +} + +func init() { + t["ApplyHostProfileConfigurationSpec"] = reflect.TypeOf((*ApplyHostProfileConfigurationSpec)(nil)).Elem() +} + +type ApplyProfile struct { + DynamicData + + Enabled bool `xml:"enabled"` + Policy []ProfilePolicy `xml:"policy,omitempty"` + ProfileTypeName string `xml:"profileTypeName,omitempty"` + ProfileVersion string `xml:"profileVersion,omitempty"` + Property []ProfileApplyProfileProperty `xml:"property,omitempty"` + Favorite *bool `xml:"favorite"` + ToBeMerged *bool `xml:"toBeMerged"` + ToReplaceWith *bool `xml:"toReplaceWith"` + ToBeDeleted *bool `xml:"toBeDeleted"` + CopyEnableStatus *bool `xml:"copyEnableStatus"` + Hidden *bool `xml:"hidden"` +} + +func init() { + t["ApplyProfile"] = reflect.TypeOf((*ApplyProfile)(nil)).Elem() +} + +type ApplyRecommendation ApplyRecommendationRequestType + +func init() { + t["ApplyRecommendation"] = reflect.TypeOf((*ApplyRecommendation)(nil)).Elem() +} + +type ApplyRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key string `xml:"key"` +} + +func init() { + t["ApplyRecommendationRequestType"] = reflect.TypeOf((*ApplyRecommendationRequestType)(nil)).Elem() +} + +type ApplyRecommendationResponse struct { +} + +type ApplyStorageDrsRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key []string `xml:"key"` +} + +func init() { + t["ApplyStorageDrsRecommendationRequestType"] = reflect.TypeOf((*ApplyStorageDrsRecommendationRequestType)(nil)).Elem() +} + +type ApplyStorageDrsRecommendationToPodRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pod ManagedObjectReference `xml:"pod"` + Key string `xml:"key"` +} + +func init() { + t["ApplyStorageDrsRecommendationToPodRequestType"] = reflect.TypeOf((*ApplyStorageDrsRecommendationToPodRequestType)(nil)).Elem() +} + +type ApplyStorageDrsRecommendationToPod_Task ApplyStorageDrsRecommendationToPodRequestType + +func init() { + t["ApplyStorageDrsRecommendationToPod_Task"] = reflect.TypeOf((*ApplyStorageDrsRecommendationToPod_Task)(nil)).Elem() +} + +type ApplyStorageDrsRecommendationToPod_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ApplyStorageDrsRecommendation_Task ApplyStorageDrsRecommendationRequestType + +func init() { + t["ApplyStorageDrsRecommendation_Task"] = reflect.TypeOf((*ApplyStorageDrsRecommendation_Task)(nil)).Elem() +} + +type ApplyStorageDrsRecommendation_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ApplyStorageRecommendationResult struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["ApplyStorageRecommendationResult"] = reflect.TypeOf((*ApplyStorageRecommendationResult)(nil)).Elem() +} + +type AreAlarmActionsEnabled AreAlarmActionsEnabledRequestType + +func init() { + t["AreAlarmActionsEnabled"] = reflect.TypeOf((*AreAlarmActionsEnabled)(nil)).Elem() +} + +type AreAlarmActionsEnabledRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["AreAlarmActionsEnabledRequestType"] = reflect.TypeOf((*AreAlarmActionsEnabledRequestType)(nil)).Elem() +} + +type AreAlarmActionsEnabledResponse struct { + Returnval bool `xml:"returnval"` +} + +type ArrayOfAlarmAction struct { + AlarmAction []BaseAlarmAction `xml:"AlarmAction,omitempty,typeattr"` +} + +func init() { + t["ArrayOfAlarmAction"] = reflect.TypeOf((*ArrayOfAlarmAction)(nil)).Elem() +} + +type ArrayOfAlarmExpression struct { + AlarmExpression []BaseAlarmExpression `xml:"AlarmExpression,omitempty,typeattr"` +} + +func init() { + t["ArrayOfAlarmExpression"] = reflect.TypeOf((*ArrayOfAlarmExpression)(nil)).Elem() +} + +type ArrayOfAlarmState struct { + AlarmState []AlarmState `xml:"AlarmState,omitempty"` +} + +func init() { + t["ArrayOfAlarmState"] = reflect.TypeOf((*ArrayOfAlarmState)(nil)).Elem() +} + +type ArrayOfAlarmTriggeringActionTransitionSpec struct { + AlarmTriggeringActionTransitionSpec []AlarmTriggeringActionTransitionSpec `xml:"AlarmTriggeringActionTransitionSpec,omitempty"` +} + +func init() { + t["ArrayOfAlarmTriggeringActionTransitionSpec"] = reflect.TypeOf((*ArrayOfAlarmTriggeringActionTransitionSpec)(nil)).Elem() +} + +type ArrayOfAnswerFileStatusError struct { + AnswerFileStatusError []AnswerFileStatusError `xml:"AnswerFileStatusError,omitempty"` +} + +func init() { + t["ArrayOfAnswerFileStatusError"] = reflect.TypeOf((*ArrayOfAnswerFileStatusError)(nil)).Elem() +} + +type ArrayOfAnswerFileStatusResult struct { + AnswerFileStatusResult []AnswerFileStatusResult `xml:"AnswerFileStatusResult,omitempty"` +} + +func init() { + t["ArrayOfAnswerFileStatusResult"] = reflect.TypeOf((*ArrayOfAnswerFileStatusResult)(nil)).Elem() +} + +type ArrayOfAnswerFileUpdateFailure struct { + AnswerFileUpdateFailure []AnswerFileUpdateFailure `xml:"AnswerFileUpdateFailure,omitempty"` +} + +func init() { + t["ArrayOfAnswerFileUpdateFailure"] = reflect.TypeOf((*ArrayOfAnswerFileUpdateFailure)(nil)).Elem() +} + +type ArrayOfAnyType struct { + AnyType []AnyType `xml:"anyType,omitempty,typeattr"` +} + +func init() { + t["ArrayOfAnyType"] = reflect.TypeOf((*ArrayOfAnyType)(nil)).Elem() +} + +type ArrayOfAnyURI struct { + AnyURI []string `xml:"anyURI,omitempty"` +} + +func init() { + t["ArrayOfAnyURI"] = reflect.TypeOf((*ArrayOfAnyURI)(nil)).Elem() +} + +type ArrayOfApplyHostProfileConfigurationResult struct { + ApplyHostProfileConfigurationResult []ApplyHostProfileConfigurationResult `xml:"ApplyHostProfileConfigurationResult,omitempty"` +} + +func init() { + t["ArrayOfApplyHostProfileConfigurationResult"] = reflect.TypeOf((*ArrayOfApplyHostProfileConfigurationResult)(nil)).Elem() +} + +type ArrayOfApplyHostProfileConfigurationSpec struct { + ApplyHostProfileConfigurationSpec []ApplyHostProfileConfigurationSpec `xml:"ApplyHostProfileConfigurationSpec,omitempty"` +} + +func init() { + t["ArrayOfApplyHostProfileConfigurationSpec"] = reflect.TypeOf((*ArrayOfApplyHostProfileConfigurationSpec)(nil)).Elem() +} + +type ArrayOfApplyProfile struct { + ApplyProfile []BaseApplyProfile `xml:"ApplyProfile,omitempty,typeattr"` +} + +func init() { + t["ArrayOfApplyProfile"] = reflect.TypeOf((*ArrayOfApplyProfile)(nil)).Elem() +} + +type ArrayOfAuthorizationPrivilege struct { + AuthorizationPrivilege []AuthorizationPrivilege `xml:"AuthorizationPrivilege,omitempty"` +} + +func init() { + t["ArrayOfAuthorizationPrivilege"] = reflect.TypeOf((*ArrayOfAuthorizationPrivilege)(nil)).Elem() +} + +type ArrayOfAuthorizationRole struct { + AuthorizationRole []AuthorizationRole `xml:"AuthorizationRole,omitempty"` +} + +func init() { + t["ArrayOfAuthorizationRole"] = reflect.TypeOf((*ArrayOfAuthorizationRole)(nil)).Elem() +} + +type ArrayOfAutoStartPowerInfo struct { + AutoStartPowerInfo []AutoStartPowerInfo `xml:"AutoStartPowerInfo,omitempty"` +} + +func init() { + t["ArrayOfAutoStartPowerInfo"] = reflect.TypeOf((*ArrayOfAutoStartPowerInfo)(nil)).Elem() +} + +type ArrayOfBase64Binary struct { + Base64Binary [][]byte `xml:"base64Binary,omitempty"` +} + +func init() { + t["ArrayOfBase64Binary"] = reflect.TypeOf((*ArrayOfBase64Binary)(nil)).Elem() +} + +type ArrayOfBoolean struct { + Boolean []bool `xml:"boolean,omitempty"` +} + +func init() { + t["ArrayOfBoolean"] = reflect.TypeOf((*ArrayOfBoolean)(nil)).Elem() +} + +type ArrayOfByte struct { + Byte []byte `xml:"byte,omitempty"` +} + +func init() { + t["ArrayOfByte"] = reflect.TypeOf((*ArrayOfByte)(nil)).Elem() +} + +type ArrayOfChangesInfoEventArgument struct { + ChangesInfoEventArgument []ChangesInfoEventArgument `xml:"ChangesInfoEventArgument,omitempty"` +} + +func init() { + t["ArrayOfChangesInfoEventArgument"] = reflect.TypeOf((*ArrayOfChangesInfoEventArgument)(nil)).Elem() +} + +type ArrayOfCheckResult struct { + CheckResult []CheckResult `xml:"CheckResult,omitempty"` +} + +func init() { + t["ArrayOfCheckResult"] = reflect.TypeOf((*ArrayOfCheckResult)(nil)).Elem() +} + +type ArrayOfClusterAction struct { + ClusterAction []BaseClusterAction `xml:"ClusterAction,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterAction"] = reflect.TypeOf((*ArrayOfClusterAction)(nil)).Elem() +} + +type ArrayOfClusterActionHistory struct { + ClusterActionHistory []ClusterActionHistory `xml:"ClusterActionHistory,omitempty"` +} + +func init() { + t["ArrayOfClusterActionHistory"] = reflect.TypeOf((*ArrayOfClusterActionHistory)(nil)).Elem() +} + +type ArrayOfClusterAttemptedVmInfo struct { + ClusterAttemptedVmInfo []ClusterAttemptedVmInfo `xml:"ClusterAttemptedVmInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterAttemptedVmInfo"] = reflect.TypeOf((*ArrayOfClusterAttemptedVmInfo)(nil)).Elem() +} + +type ArrayOfClusterDasAamNodeState struct { + ClusterDasAamNodeState []ClusterDasAamNodeState `xml:"ClusterDasAamNodeState,omitempty"` +} + +func init() { + t["ArrayOfClusterDasAamNodeState"] = reflect.TypeOf((*ArrayOfClusterDasAamNodeState)(nil)).Elem() +} + +type ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots struct { + ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots `xml:"ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots,omitempty"` +} + +func init() { + t["ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots"] = reflect.TypeOf((*ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots)(nil)).Elem() +} + +type ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots struct { + ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots `xml:"ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots,omitempty"` +} + +func init() { + t["ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots"] = reflect.TypeOf((*ArrayOfClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots)(nil)).Elem() +} + +type ArrayOfClusterDasVmConfigInfo struct { + ClusterDasVmConfigInfo []ClusterDasVmConfigInfo `xml:"ClusterDasVmConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterDasVmConfigInfo"] = reflect.TypeOf((*ArrayOfClusterDasVmConfigInfo)(nil)).Elem() +} + +type ArrayOfClusterDasVmConfigSpec struct { + ClusterDasVmConfigSpec []ClusterDasVmConfigSpec `xml:"ClusterDasVmConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterDasVmConfigSpec"] = reflect.TypeOf((*ArrayOfClusterDasVmConfigSpec)(nil)).Elem() +} + +type ArrayOfClusterDpmHostConfigInfo struct { + ClusterDpmHostConfigInfo []ClusterDpmHostConfigInfo `xml:"ClusterDpmHostConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterDpmHostConfigInfo"] = reflect.TypeOf((*ArrayOfClusterDpmHostConfigInfo)(nil)).Elem() +} + +type ArrayOfClusterDpmHostConfigSpec struct { + ClusterDpmHostConfigSpec []ClusterDpmHostConfigSpec `xml:"ClusterDpmHostConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterDpmHostConfigSpec"] = reflect.TypeOf((*ArrayOfClusterDpmHostConfigSpec)(nil)).Elem() +} + +type ArrayOfClusterDrsFaults struct { + ClusterDrsFaults []ClusterDrsFaults `xml:"ClusterDrsFaults,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsFaults"] = reflect.TypeOf((*ArrayOfClusterDrsFaults)(nil)).Elem() +} + +type ArrayOfClusterDrsFaultsFaultsByVm struct { + ClusterDrsFaultsFaultsByVm []BaseClusterDrsFaultsFaultsByVm `xml:"ClusterDrsFaultsFaultsByVm,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterDrsFaultsFaultsByVm"] = reflect.TypeOf((*ArrayOfClusterDrsFaultsFaultsByVm)(nil)).Elem() +} + +type ArrayOfClusterDrsMigration struct { + ClusterDrsMigration []ClusterDrsMigration `xml:"ClusterDrsMigration,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsMigration"] = reflect.TypeOf((*ArrayOfClusterDrsMigration)(nil)).Elem() +} + +type ArrayOfClusterDrsRecommendation struct { + ClusterDrsRecommendation []ClusterDrsRecommendation `xml:"ClusterDrsRecommendation,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsRecommendation"] = reflect.TypeOf((*ArrayOfClusterDrsRecommendation)(nil)).Elem() +} + +type ArrayOfClusterDrsVmConfigInfo struct { + ClusterDrsVmConfigInfo []ClusterDrsVmConfigInfo `xml:"ClusterDrsVmConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsVmConfigInfo"] = reflect.TypeOf((*ArrayOfClusterDrsVmConfigInfo)(nil)).Elem() +} + +type ArrayOfClusterDrsVmConfigSpec struct { + ClusterDrsVmConfigSpec []ClusterDrsVmConfigSpec `xml:"ClusterDrsVmConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterDrsVmConfigSpec"] = reflect.TypeOf((*ArrayOfClusterDrsVmConfigSpec)(nil)).Elem() +} + +type ArrayOfClusterEVCManagerCheckResult struct { + ClusterEVCManagerCheckResult []ClusterEVCManagerCheckResult `xml:"ClusterEVCManagerCheckResult,omitempty"` +} + +func init() { + t["ArrayOfClusterEVCManagerCheckResult"] = reflect.TypeOf((*ArrayOfClusterEVCManagerCheckResult)(nil)).Elem() +} + +type ArrayOfClusterFailoverHostAdmissionControlInfoHostStatus struct { + ClusterFailoverHostAdmissionControlInfoHostStatus []ClusterFailoverHostAdmissionControlInfoHostStatus `xml:"ClusterFailoverHostAdmissionControlInfoHostStatus,omitempty"` +} + +func init() { + t["ArrayOfClusterFailoverHostAdmissionControlInfoHostStatus"] = reflect.TypeOf((*ArrayOfClusterFailoverHostAdmissionControlInfoHostStatus)(nil)).Elem() +} + +type ArrayOfClusterGroupInfo struct { + ClusterGroupInfo []BaseClusterGroupInfo `xml:"ClusterGroupInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterGroupInfo"] = reflect.TypeOf((*ArrayOfClusterGroupInfo)(nil)).Elem() +} + +type ArrayOfClusterGroupSpec struct { + ClusterGroupSpec []ClusterGroupSpec `xml:"ClusterGroupSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterGroupSpec"] = reflect.TypeOf((*ArrayOfClusterGroupSpec)(nil)).Elem() +} + +type ArrayOfClusterHostRecommendation struct { + ClusterHostRecommendation []ClusterHostRecommendation `xml:"ClusterHostRecommendation,omitempty"` +} + +func init() { + t["ArrayOfClusterHostRecommendation"] = reflect.TypeOf((*ArrayOfClusterHostRecommendation)(nil)).Elem() +} + +type ArrayOfClusterIoFilterInfo struct { + ClusterIoFilterInfo []ClusterIoFilterInfo `xml:"ClusterIoFilterInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterIoFilterInfo"] = reflect.TypeOf((*ArrayOfClusterIoFilterInfo)(nil)).Elem() +} + +type ArrayOfClusterNotAttemptedVmInfo struct { + ClusterNotAttemptedVmInfo []ClusterNotAttemptedVmInfo `xml:"ClusterNotAttemptedVmInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterNotAttemptedVmInfo"] = reflect.TypeOf((*ArrayOfClusterNotAttemptedVmInfo)(nil)).Elem() +} + +type ArrayOfClusterRecommendation struct { + ClusterRecommendation []ClusterRecommendation `xml:"ClusterRecommendation,omitempty"` +} + +func init() { + t["ArrayOfClusterRecommendation"] = reflect.TypeOf((*ArrayOfClusterRecommendation)(nil)).Elem() +} + +type ArrayOfClusterRuleInfo struct { + ClusterRuleInfo []BaseClusterRuleInfo `xml:"ClusterRuleInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterRuleInfo"] = reflect.TypeOf((*ArrayOfClusterRuleInfo)(nil)).Elem() +} + +type ArrayOfClusterRuleSpec struct { + ClusterRuleSpec []ClusterRuleSpec `xml:"ClusterRuleSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterRuleSpec"] = reflect.TypeOf((*ArrayOfClusterRuleSpec)(nil)).Elem() +} + +type ArrayOfClusterVmOrchestrationInfo struct { + ClusterVmOrchestrationInfo []ClusterVmOrchestrationInfo `xml:"ClusterVmOrchestrationInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterVmOrchestrationInfo"] = reflect.TypeOf((*ArrayOfClusterVmOrchestrationInfo)(nil)).Elem() +} + +type ArrayOfClusterVmOrchestrationSpec struct { + ClusterVmOrchestrationSpec []ClusterVmOrchestrationSpec `xml:"ClusterVmOrchestrationSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterVmOrchestrationSpec"] = reflect.TypeOf((*ArrayOfClusterVmOrchestrationSpec)(nil)).Elem() +} + +type ArrayOfComplianceFailure struct { + ComplianceFailure []ComplianceFailure `xml:"ComplianceFailure,omitempty"` +} + +func init() { + t["ArrayOfComplianceFailure"] = reflect.TypeOf((*ArrayOfComplianceFailure)(nil)).Elem() +} + +type ArrayOfComplianceFailureComplianceFailureValues struct { + ComplianceFailureComplianceFailureValues []ComplianceFailureComplianceFailureValues `xml:"ComplianceFailureComplianceFailureValues,omitempty"` +} + +func init() { + t["ArrayOfComplianceFailureComplianceFailureValues"] = reflect.TypeOf((*ArrayOfComplianceFailureComplianceFailureValues)(nil)).Elem() +} + +type ArrayOfComplianceLocator struct { + ComplianceLocator []ComplianceLocator `xml:"ComplianceLocator,omitempty"` +} + +func init() { + t["ArrayOfComplianceLocator"] = reflect.TypeOf((*ArrayOfComplianceLocator)(nil)).Elem() +} + +type ArrayOfComplianceResult struct { + ComplianceResult []ComplianceResult `xml:"ComplianceResult,omitempty"` +} + +func init() { + t["ArrayOfComplianceResult"] = reflect.TypeOf((*ArrayOfComplianceResult)(nil)).Elem() +} + +type ArrayOfComputeResourceHostSPBMLicenseInfo struct { + ComputeResourceHostSPBMLicenseInfo []ComputeResourceHostSPBMLicenseInfo `xml:"ComputeResourceHostSPBMLicenseInfo,omitempty"` +} + +func init() { + t["ArrayOfComputeResourceHostSPBMLicenseInfo"] = reflect.TypeOf((*ArrayOfComputeResourceHostSPBMLicenseInfo)(nil)).Elem() +} + +type ArrayOfConflictingConfigurationConfig struct { + ConflictingConfigurationConfig []ConflictingConfigurationConfig `xml:"ConflictingConfigurationConfig,omitempty"` +} + +func init() { + t["ArrayOfConflictingConfigurationConfig"] = reflect.TypeOf((*ArrayOfConflictingConfigurationConfig)(nil)).Elem() +} + +type ArrayOfCryptoKeyId struct { + CryptoKeyId []CryptoKeyId `xml:"CryptoKeyId,omitempty"` +} + +func init() { + t["ArrayOfCryptoKeyId"] = reflect.TypeOf((*ArrayOfCryptoKeyId)(nil)).Elem() +} + +type ArrayOfCryptoKeyPlain struct { + CryptoKeyPlain []CryptoKeyPlain `xml:"CryptoKeyPlain,omitempty"` +} + +func init() { + t["ArrayOfCryptoKeyPlain"] = reflect.TypeOf((*ArrayOfCryptoKeyPlain)(nil)).Elem() +} + +type ArrayOfCryptoKeyResult struct { + CryptoKeyResult []CryptoKeyResult `xml:"CryptoKeyResult,omitempty"` +} + +func init() { + t["ArrayOfCryptoKeyResult"] = reflect.TypeOf((*ArrayOfCryptoKeyResult)(nil)).Elem() +} + +type ArrayOfCryptoManagerKmipClusterStatus struct { + CryptoManagerKmipClusterStatus []CryptoManagerKmipClusterStatus `xml:"CryptoManagerKmipClusterStatus,omitempty"` +} + +func init() { + t["ArrayOfCryptoManagerKmipClusterStatus"] = reflect.TypeOf((*ArrayOfCryptoManagerKmipClusterStatus)(nil)).Elem() +} + +type ArrayOfCryptoManagerKmipServerStatus struct { + CryptoManagerKmipServerStatus []CryptoManagerKmipServerStatus `xml:"CryptoManagerKmipServerStatus,omitempty"` +} + +func init() { + t["ArrayOfCryptoManagerKmipServerStatus"] = reflect.TypeOf((*ArrayOfCryptoManagerKmipServerStatus)(nil)).Elem() +} + +type ArrayOfCustomFieldDef struct { + CustomFieldDef []CustomFieldDef `xml:"CustomFieldDef,omitempty"` +} + +func init() { + t["ArrayOfCustomFieldDef"] = reflect.TypeOf((*ArrayOfCustomFieldDef)(nil)).Elem() +} + +type ArrayOfCustomFieldValue struct { + CustomFieldValue []BaseCustomFieldValue `xml:"CustomFieldValue,omitempty,typeattr"` +} + +func init() { + t["ArrayOfCustomFieldValue"] = reflect.TypeOf((*ArrayOfCustomFieldValue)(nil)).Elem() +} + +type ArrayOfCustomizationAdapterMapping struct { + CustomizationAdapterMapping []CustomizationAdapterMapping `xml:"CustomizationAdapterMapping,omitempty"` +} + +func init() { + t["ArrayOfCustomizationAdapterMapping"] = reflect.TypeOf((*ArrayOfCustomizationAdapterMapping)(nil)).Elem() +} + +type ArrayOfCustomizationIpV6Generator struct { + CustomizationIpV6Generator []BaseCustomizationIpV6Generator `xml:"CustomizationIpV6Generator,omitempty,typeattr"` +} + +func init() { + t["ArrayOfCustomizationIpV6Generator"] = reflect.TypeOf((*ArrayOfCustomizationIpV6Generator)(nil)).Elem() +} + +type ArrayOfCustomizationSpecInfo struct { + CustomizationSpecInfo []CustomizationSpecInfo `xml:"CustomizationSpecInfo,omitempty"` +} + +func init() { + t["ArrayOfCustomizationSpecInfo"] = reflect.TypeOf((*ArrayOfCustomizationSpecInfo)(nil)).Elem() +} + +type ArrayOfDVPortConfigSpec struct { + DVPortConfigSpec []DVPortConfigSpec `xml:"DVPortConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDVPortConfigSpec"] = reflect.TypeOf((*ArrayOfDVPortConfigSpec)(nil)).Elem() +} + +type ArrayOfDVPortgroupConfigSpec struct { + DVPortgroupConfigSpec []DVPortgroupConfigSpec `xml:"DVPortgroupConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDVPortgroupConfigSpec"] = reflect.TypeOf((*ArrayOfDVPortgroupConfigSpec)(nil)).Elem() +} + +type ArrayOfDVSHealthCheckConfig struct { + DVSHealthCheckConfig []BaseDVSHealthCheckConfig `xml:"DVSHealthCheckConfig,omitempty,typeattr"` +} + +func init() { + t["ArrayOfDVSHealthCheckConfig"] = reflect.TypeOf((*ArrayOfDVSHealthCheckConfig)(nil)).Elem() +} + +type ArrayOfDVSNetworkResourcePool struct { + DVSNetworkResourcePool []DVSNetworkResourcePool `xml:"DVSNetworkResourcePool,omitempty"` +} + +func init() { + t["ArrayOfDVSNetworkResourcePool"] = reflect.TypeOf((*ArrayOfDVSNetworkResourcePool)(nil)).Elem() +} + +type ArrayOfDVSNetworkResourcePoolConfigSpec struct { + DVSNetworkResourcePoolConfigSpec []DVSNetworkResourcePoolConfigSpec `xml:"DVSNetworkResourcePoolConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDVSNetworkResourcePoolConfigSpec"] = reflect.TypeOf((*ArrayOfDVSNetworkResourcePoolConfigSpec)(nil)).Elem() +} + +type ArrayOfDVSVmVnicNetworkResourcePool struct { + DVSVmVnicNetworkResourcePool []DVSVmVnicNetworkResourcePool `xml:"DVSVmVnicNetworkResourcePool,omitempty"` +} + +func init() { + t["ArrayOfDVSVmVnicNetworkResourcePool"] = reflect.TypeOf((*ArrayOfDVSVmVnicNetworkResourcePool)(nil)).Elem() +} + +type ArrayOfDasHeartbeatDatastoreInfo struct { + DasHeartbeatDatastoreInfo []DasHeartbeatDatastoreInfo `xml:"DasHeartbeatDatastoreInfo,omitempty"` +} + +func init() { + t["ArrayOfDasHeartbeatDatastoreInfo"] = reflect.TypeOf((*ArrayOfDasHeartbeatDatastoreInfo)(nil)).Elem() +} + +type ArrayOfDatacenterMismatchArgument struct { + DatacenterMismatchArgument []DatacenterMismatchArgument `xml:"DatacenterMismatchArgument,omitempty"` +} + +func init() { + t["ArrayOfDatacenterMismatchArgument"] = reflect.TypeOf((*ArrayOfDatacenterMismatchArgument)(nil)).Elem() +} + +type ArrayOfDatastoreHostMount struct { + DatastoreHostMount []DatastoreHostMount `xml:"DatastoreHostMount,omitempty"` +} + +func init() { + t["ArrayOfDatastoreHostMount"] = reflect.TypeOf((*ArrayOfDatastoreHostMount)(nil)).Elem() +} + +type ArrayOfDatastoreMountPathDatastorePair struct { + DatastoreMountPathDatastorePair []DatastoreMountPathDatastorePair `xml:"DatastoreMountPathDatastorePair,omitempty"` +} + +func init() { + t["ArrayOfDatastoreMountPathDatastorePair"] = reflect.TypeOf((*ArrayOfDatastoreMountPathDatastorePair)(nil)).Elem() +} + +type ArrayOfDatastoreVVolContainerFailoverPair struct { + DatastoreVVolContainerFailoverPair []DatastoreVVolContainerFailoverPair `xml:"DatastoreVVolContainerFailoverPair,omitempty"` +} + +func init() { + t["ArrayOfDatastoreVVolContainerFailoverPair"] = reflect.TypeOf((*ArrayOfDatastoreVVolContainerFailoverPair)(nil)).Elem() +} + +type ArrayOfDiagnosticManagerBundleInfo struct { + DiagnosticManagerBundleInfo []DiagnosticManagerBundleInfo `xml:"DiagnosticManagerBundleInfo,omitempty"` +} + +func init() { + t["ArrayOfDiagnosticManagerBundleInfo"] = reflect.TypeOf((*ArrayOfDiagnosticManagerBundleInfo)(nil)).Elem() +} + +type ArrayOfDiagnosticManagerLogDescriptor struct { + DiagnosticManagerLogDescriptor []DiagnosticManagerLogDescriptor `xml:"DiagnosticManagerLogDescriptor,omitempty"` +} + +func init() { + t["ArrayOfDiagnosticManagerLogDescriptor"] = reflect.TypeOf((*ArrayOfDiagnosticManagerLogDescriptor)(nil)).Elem() +} + +type ArrayOfDiskChangeExtent struct { + DiskChangeExtent []DiskChangeExtent `xml:"DiskChangeExtent,omitempty"` +} + +func init() { + t["ArrayOfDiskChangeExtent"] = reflect.TypeOf((*ArrayOfDiskChangeExtent)(nil)).Elem() +} + +type ArrayOfDistributedVirtualPort struct { + DistributedVirtualPort []DistributedVirtualPort `xml:"DistributedVirtualPort,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualPort"] = reflect.TypeOf((*ArrayOfDistributedVirtualPort)(nil)).Elem() +} + +type ArrayOfDistributedVirtualPortgroupInfo struct { + DistributedVirtualPortgroupInfo []DistributedVirtualPortgroupInfo `xml:"DistributedVirtualPortgroupInfo,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualPortgroupInfo"] = reflect.TypeOf((*ArrayOfDistributedVirtualPortgroupInfo)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchHostMember struct { + DistributedVirtualSwitchHostMember []DistributedVirtualSwitchHostMember `xml:"DistributedVirtualSwitchHostMember,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostMember"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMember)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchHostMemberConfigSpec struct { + DistributedVirtualSwitchHostMemberConfigSpec []DistributedVirtualSwitchHostMemberConfigSpec `xml:"DistributedVirtualSwitchHostMemberConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostMemberConfigSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberConfigSpec)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchHostMemberPnicSpec struct { + DistributedVirtualSwitchHostMemberPnicSpec []DistributedVirtualSwitchHostMemberPnicSpec `xml:"DistributedVirtualSwitchHostMemberPnicSpec,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostMemberPnicSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberPnicSpec)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchHostProductSpec struct { + DistributedVirtualSwitchHostProductSpec []DistributedVirtualSwitchHostProductSpec `xml:"DistributedVirtualSwitchHostProductSpec,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostProductSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostProductSpec)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchInfo struct { + DistributedVirtualSwitchInfo []DistributedVirtualSwitchInfo `xml:"DistributedVirtualSwitchInfo,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchInfo"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchInfo)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchKeyedOpaqueBlob struct { + DistributedVirtualSwitchKeyedOpaqueBlob []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"DistributedVirtualSwitchKeyedOpaqueBlob,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchKeyedOpaqueBlob"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchKeyedOpaqueBlob)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchManagerCompatibilityResult struct { + DistributedVirtualSwitchManagerCompatibilityResult []DistributedVirtualSwitchManagerCompatibilityResult `xml:"DistributedVirtualSwitchManagerCompatibilityResult,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchManagerCompatibilityResult"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchManagerCompatibilityResult)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec struct { + DistributedVirtualSwitchManagerHostDvsFilterSpec []BaseDistributedVirtualSwitchManagerHostDvsFilterSpec `xml:"DistributedVirtualSwitchManagerHostDvsFilterSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem() +} + +type ArrayOfDistributedVirtualSwitchProductSpec struct { + DistributedVirtualSwitchProductSpec []DistributedVirtualSwitchProductSpec `xml:"DistributedVirtualSwitchProductSpec,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchProductSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchProductSpec)(nil)).Elem() +} + +type ArrayOfDouble struct { + Double []float64 `xml:"double,omitempty"` +} + +func init() { + t["ArrayOfDouble"] = reflect.TypeOf((*ArrayOfDouble)(nil)).Elem() +} + +type ArrayOfDvsApplyOperationFaultFaultOnObject struct { + DvsApplyOperationFaultFaultOnObject []DvsApplyOperationFaultFaultOnObject `xml:"DvsApplyOperationFaultFaultOnObject,omitempty"` +} + +func init() { + t["ArrayOfDvsApplyOperationFaultFaultOnObject"] = reflect.TypeOf((*ArrayOfDvsApplyOperationFaultFaultOnObject)(nil)).Elem() +} + +type ArrayOfDvsFilterConfig struct { + DvsFilterConfig []BaseDvsFilterConfig `xml:"DvsFilterConfig,omitempty,typeattr"` +} + +func init() { + t["ArrayOfDvsFilterConfig"] = reflect.TypeOf((*ArrayOfDvsFilterConfig)(nil)).Elem() +} + +type ArrayOfDvsHostInfrastructureTrafficResource struct { + DvsHostInfrastructureTrafficResource []DvsHostInfrastructureTrafficResource `xml:"DvsHostInfrastructureTrafficResource,omitempty"` +} + +func init() { + t["ArrayOfDvsHostInfrastructureTrafficResource"] = reflect.TypeOf((*ArrayOfDvsHostInfrastructureTrafficResource)(nil)).Elem() +} + +type ArrayOfDvsHostVNicProfile struct { + DvsHostVNicProfile []DvsHostVNicProfile `xml:"DvsHostVNicProfile,omitempty"` +} + +func init() { + t["ArrayOfDvsHostVNicProfile"] = reflect.TypeOf((*ArrayOfDvsHostVNicProfile)(nil)).Elem() +} + +type ArrayOfDvsNetworkRuleQualifier struct { + DvsNetworkRuleQualifier []BaseDvsNetworkRuleQualifier `xml:"DvsNetworkRuleQualifier,omitempty,typeattr"` +} + +func init() { + t["ArrayOfDvsNetworkRuleQualifier"] = reflect.TypeOf((*ArrayOfDvsNetworkRuleQualifier)(nil)).Elem() +} + +type ArrayOfDvsOperationBulkFaultFaultOnHost struct { + DvsOperationBulkFaultFaultOnHost []DvsOperationBulkFaultFaultOnHost `xml:"DvsOperationBulkFaultFaultOnHost,omitempty"` +} + +func init() { + t["ArrayOfDvsOperationBulkFaultFaultOnHost"] = reflect.TypeOf((*ArrayOfDvsOperationBulkFaultFaultOnHost)(nil)).Elem() +} + +type ArrayOfDvsOutOfSyncHostArgument struct { + DvsOutOfSyncHostArgument []DvsOutOfSyncHostArgument `xml:"DvsOutOfSyncHostArgument,omitempty"` +} + +func init() { + t["ArrayOfDvsOutOfSyncHostArgument"] = reflect.TypeOf((*ArrayOfDvsOutOfSyncHostArgument)(nil)).Elem() +} + +type ArrayOfDvsProfile struct { + DvsProfile []DvsProfile `xml:"DvsProfile,omitempty"` +} + +func init() { + t["ArrayOfDvsProfile"] = reflect.TypeOf((*ArrayOfDvsProfile)(nil)).Elem() +} + +type ArrayOfDvsServiceConsoleVNicProfile struct { + DvsServiceConsoleVNicProfile []DvsServiceConsoleVNicProfile `xml:"DvsServiceConsoleVNicProfile,omitempty"` +} + +func init() { + t["ArrayOfDvsServiceConsoleVNicProfile"] = reflect.TypeOf((*ArrayOfDvsServiceConsoleVNicProfile)(nil)).Elem() +} + +type ArrayOfDvsTrafficRule struct { + DvsTrafficRule []DvsTrafficRule `xml:"DvsTrafficRule,omitempty"` +} + +func init() { + t["ArrayOfDvsTrafficRule"] = reflect.TypeOf((*ArrayOfDvsTrafficRule)(nil)).Elem() +} + +type ArrayOfDvsVmVnicNetworkResourcePoolRuntimeInfo struct { + DvsVmVnicNetworkResourcePoolRuntimeInfo []DvsVmVnicNetworkResourcePoolRuntimeInfo `xml:"DvsVmVnicNetworkResourcePoolRuntimeInfo,omitempty"` +} + +func init() { + t["ArrayOfDvsVmVnicNetworkResourcePoolRuntimeInfo"] = reflect.TypeOf((*ArrayOfDvsVmVnicNetworkResourcePoolRuntimeInfo)(nil)).Elem() +} + +type ArrayOfDvsVmVnicResourcePoolConfigSpec struct { + DvsVmVnicResourcePoolConfigSpec []DvsVmVnicResourcePoolConfigSpec `xml:"DvsVmVnicResourcePoolConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfDvsVmVnicResourcePoolConfigSpec"] = reflect.TypeOf((*ArrayOfDvsVmVnicResourcePoolConfigSpec)(nil)).Elem() +} + +type ArrayOfDvsVnicAllocatedResource struct { + DvsVnicAllocatedResource []DvsVnicAllocatedResource `xml:"DvsVnicAllocatedResource,omitempty"` +} + +func init() { + t["ArrayOfDvsVnicAllocatedResource"] = reflect.TypeOf((*ArrayOfDvsVnicAllocatedResource)(nil)).Elem() +} + +type ArrayOfDynamicProperty struct { + DynamicProperty []DynamicProperty `xml:"DynamicProperty,omitempty"` +} + +func init() { + t["ArrayOfDynamicProperty"] = reflect.TypeOf((*ArrayOfDynamicProperty)(nil)).Elem() +} + +type ArrayOfEVCMode struct { + EVCMode []EVCMode `xml:"EVCMode,omitempty"` +} + +func init() { + t["ArrayOfEVCMode"] = reflect.TypeOf((*ArrayOfEVCMode)(nil)).Elem() +} + +type ArrayOfElementDescription struct { + ElementDescription []BaseElementDescription `xml:"ElementDescription,omitempty,typeattr"` +} + +func init() { + t["ArrayOfElementDescription"] = reflect.TypeOf((*ArrayOfElementDescription)(nil)).Elem() +} + +type ArrayOfEntityBackupConfig struct { + EntityBackupConfig []EntityBackupConfig `xml:"EntityBackupConfig,omitempty"` +} + +func init() { + t["ArrayOfEntityBackupConfig"] = reflect.TypeOf((*ArrayOfEntityBackupConfig)(nil)).Elem() +} + +type ArrayOfEntityPrivilege struct { + EntityPrivilege []EntityPrivilege `xml:"EntityPrivilege,omitempty"` +} + +func init() { + t["ArrayOfEntityPrivilege"] = reflect.TypeOf((*ArrayOfEntityPrivilege)(nil)).Elem() +} + +type ArrayOfEnumDescription struct { + EnumDescription []EnumDescription `xml:"EnumDescription,omitempty"` +} + +func init() { + t["ArrayOfEnumDescription"] = reflect.TypeOf((*ArrayOfEnumDescription)(nil)).Elem() +} + +type ArrayOfEvent struct { + Event []BaseEvent `xml:"Event,omitempty,typeattr"` +} + +func init() { + t["ArrayOfEvent"] = reflect.TypeOf((*ArrayOfEvent)(nil)).Elem() +} + +type ArrayOfEventAlarmExpressionComparison struct { + EventAlarmExpressionComparison []EventAlarmExpressionComparison `xml:"EventAlarmExpressionComparison,omitempty"` +} + +func init() { + t["ArrayOfEventAlarmExpressionComparison"] = reflect.TypeOf((*ArrayOfEventAlarmExpressionComparison)(nil)).Elem() +} + +type ArrayOfEventArgDesc struct { + EventArgDesc []EventArgDesc `xml:"EventArgDesc,omitempty"` +} + +func init() { + t["ArrayOfEventArgDesc"] = reflect.TypeOf((*ArrayOfEventArgDesc)(nil)).Elem() +} + +type ArrayOfEventDescriptionEventDetail struct { + EventDescriptionEventDetail []EventDescriptionEventDetail `xml:"EventDescriptionEventDetail,omitempty"` +} + +func init() { + t["ArrayOfEventDescriptionEventDetail"] = reflect.TypeOf((*ArrayOfEventDescriptionEventDetail)(nil)).Elem() +} + +type ArrayOfExtManagedEntityInfo struct { + ExtManagedEntityInfo []ExtManagedEntityInfo `xml:"ExtManagedEntityInfo,omitempty"` +} + +func init() { + t["ArrayOfExtManagedEntityInfo"] = reflect.TypeOf((*ArrayOfExtManagedEntityInfo)(nil)).Elem() +} + +type ArrayOfExtSolutionManagerInfoTabInfo struct { + ExtSolutionManagerInfoTabInfo []ExtSolutionManagerInfoTabInfo `xml:"ExtSolutionManagerInfoTabInfo,omitempty"` +} + +func init() { + t["ArrayOfExtSolutionManagerInfoTabInfo"] = reflect.TypeOf((*ArrayOfExtSolutionManagerInfoTabInfo)(nil)).Elem() +} + +type ArrayOfExtendedEventPair struct { + ExtendedEventPair []ExtendedEventPair `xml:"ExtendedEventPair,omitempty"` +} + +func init() { + t["ArrayOfExtendedEventPair"] = reflect.TypeOf((*ArrayOfExtendedEventPair)(nil)).Elem() +} + +type ArrayOfExtension struct { + Extension []Extension `xml:"Extension,omitempty"` +} + +func init() { + t["ArrayOfExtension"] = reflect.TypeOf((*ArrayOfExtension)(nil)).Elem() +} + +type ArrayOfExtensionClientInfo struct { + ExtensionClientInfo []ExtensionClientInfo `xml:"ExtensionClientInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionClientInfo"] = reflect.TypeOf((*ArrayOfExtensionClientInfo)(nil)).Elem() +} + +type ArrayOfExtensionEventTypeInfo struct { + ExtensionEventTypeInfo []ExtensionEventTypeInfo `xml:"ExtensionEventTypeInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionEventTypeInfo"] = reflect.TypeOf((*ArrayOfExtensionEventTypeInfo)(nil)).Elem() +} + +type ArrayOfExtensionFaultTypeInfo struct { + ExtensionFaultTypeInfo []ExtensionFaultTypeInfo `xml:"ExtensionFaultTypeInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionFaultTypeInfo"] = reflect.TypeOf((*ArrayOfExtensionFaultTypeInfo)(nil)).Elem() +} + +type ArrayOfExtensionManagerIpAllocationUsage struct { + ExtensionManagerIpAllocationUsage []ExtensionManagerIpAllocationUsage `xml:"ExtensionManagerIpAllocationUsage,omitempty"` +} + +func init() { + t["ArrayOfExtensionManagerIpAllocationUsage"] = reflect.TypeOf((*ArrayOfExtensionManagerIpAllocationUsage)(nil)).Elem() +} + +type ArrayOfExtensionPrivilegeInfo struct { + ExtensionPrivilegeInfo []ExtensionPrivilegeInfo `xml:"ExtensionPrivilegeInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionPrivilegeInfo"] = reflect.TypeOf((*ArrayOfExtensionPrivilegeInfo)(nil)).Elem() +} + +type ArrayOfExtensionResourceInfo struct { + ExtensionResourceInfo []ExtensionResourceInfo `xml:"ExtensionResourceInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionResourceInfo"] = reflect.TypeOf((*ArrayOfExtensionResourceInfo)(nil)).Elem() +} + +type ArrayOfExtensionServerInfo struct { + ExtensionServerInfo []ExtensionServerInfo `xml:"ExtensionServerInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionServerInfo"] = reflect.TypeOf((*ArrayOfExtensionServerInfo)(nil)).Elem() +} + +type ArrayOfExtensionTaskTypeInfo struct { + ExtensionTaskTypeInfo []ExtensionTaskTypeInfo `xml:"ExtensionTaskTypeInfo,omitempty"` +} + +func init() { + t["ArrayOfExtensionTaskTypeInfo"] = reflect.TypeOf((*ArrayOfExtensionTaskTypeInfo)(nil)).Elem() +} + +type ArrayOfFaultToleranceDiskSpec struct { + FaultToleranceDiskSpec []FaultToleranceDiskSpec `xml:"FaultToleranceDiskSpec,omitempty"` +} + +func init() { + t["ArrayOfFaultToleranceDiskSpec"] = reflect.TypeOf((*ArrayOfFaultToleranceDiskSpec)(nil)).Elem() +} + +type ArrayOfFaultsByHost struct { + FaultsByHost []FaultsByHost `xml:"FaultsByHost,omitempty"` +} + +func init() { + t["ArrayOfFaultsByHost"] = reflect.TypeOf((*ArrayOfFaultsByHost)(nil)).Elem() +} + +type ArrayOfFaultsByVM struct { + FaultsByVM []FaultsByVM `xml:"FaultsByVM,omitempty"` +} + +func init() { + t["ArrayOfFaultsByVM"] = reflect.TypeOf((*ArrayOfFaultsByVM)(nil)).Elem() +} + +type ArrayOfFcoeConfigVlanRange struct { + FcoeConfigVlanRange []FcoeConfigVlanRange `xml:"FcoeConfigVlanRange,omitempty"` +} + +func init() { + t["ArrayOfFcoeConfigVlanRange"] = reflect.TypeOf((*ArrayOfFcoeConfigVlanRange)(nil)).Elem() +} + +type ArrayOfFileInfo struct { + FileInfo []BaseFileInfo `xml:"FileInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfFileInfo"] = reflect.TypeOf((*ArrayOfFileInfo)(nil)).Elem() +} + +type ArrayOfFileQuery struct { + FileQuery []BaseFileQuery `xml:"FileQuery,omitempty,typeattr"` +} + +func init() { + t["ArrayOfFileQuery"] = reflect.TypeOf((*ArrayOfFileQuery)(nil)).Elem() +} + +type ArrayOfFirewallProfileRulesetProfile struct { + FirewallProfileRulesetProfile []FirewallProfileRulesetProfile `xml:"FirewallProfileRulesetProfile,omitempty"` +} + +func init() { + t["ArrayOfFirewallProfileRulesetProfile"] = reflect.TypeOf((*ArrayOfFirewallProfileRulesetProfile)(nil)).Elem() +} + +type ArrayOfGuestAliases struct { + GuestAliases []GuestAliases `xml:"GuestAliases,omitempty"` +} + +func init() { + t["ArrayOfGuestAliases"] = reflect.TypeOf((*ArrayOfGuestAliases)(nil)).Elem() +} + +type ArrayOfGuestAuthAliasInfo struct { + GuestAuthAliasInfo []GuestAuthAliasInfo `xml:"GuestAuthAliasInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestAuthAliasInfo"] = reflect.TypeOf((*ArrayOfGuestAuthAliasInfo)(nil)).Elem() +} + +type ArrayOfGuestAuthSubject struct { + GuestAuthSubject []BaseGuestAuthSubject `xml:"GuestAuthSubject,omitempty,typeattr"` +} + +func init() { + t["ArrayOfGuestAuthSubject"] = reflect.TypeOf((*ArrayOfGuestAuthSubject)(nil)).Elem() +} + +type ArrayOfGuestDiskInfo struct { + GuestDiskInfo []GuestDiskInfo `xml:"GuestDiskInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestDiskInfo"] = reflect.TypeOf((*ArrayOfGuestDiskInfo)(nil)).Elem() +} + +type ArrayOfGuestFileInfo struct { + GuestFileInfo []GuestFileInfo `xml:"GuestFileInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestFileInfo"] = reflect.TypeOf((*ArrayOfGuestFileInfo)(nil)).Elem() +} + +type ArrayOfGuestInfoNamespaceGenerationInfo struct { + GuestInfoNamespaceGenerationInfo []GuestInfoNamespaceGenerationInfo `xml:"GuestInfoNamespaceGenerationInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestInfoNamespaceGenerationInfo"] = reflect.TypeOf((*ArrayOfGuestInfoNamespaceGenerationInfo)(nil)).Elem() +} + +type ArrayOfGuestMappedAliases struct { + GuestMappedAliases []GuestMappedAliases `xml:"GuestMappedAliases,omitempty"` +} + +func init() { + t["ArrayOfGuestMappedAliases"] = reflect.TypeOf((*ArrayOfGuestMappedAliases)(nil)).Elem() +} + +type ArrayOfGuestNicInfo struct { + GuestNicInfo []GuestNicInfo `xml:"GuestNicInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestNicInfo"] = reflect.TypeOf((*ArrayOfGuestNicInfo)(nil)).Elem() +} + +type ArrayOfGuestOsDescriptor struct { + GuestOsDescriptor []GuestOsDescriptor `xml:"GuestOsDescriptor,omitempty"` +} + +func init() { + t["ArrayOfGuestOsDescriptor"] = reflect.TypeOf((*ArrayOfGuestOsDescriptor)(nil)).Elem() +} + +type ArrayOfGuestProcessInfo struct { + GuestProcessInfo []GuestProcessInfo `xml:"GuestProcessInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestProcessInfo"] = reflect.TypeOf((*ArrayOfGuestProcessInfo)(nil)).Elem() +} + +type ArrayOfGuestRegKeyRecordSpec struct { + GuestRegKeyRecordSpec []GuestRegKeyRecordSpec `xml:"GuestRegKeyRecordSpec,omitempty"` +} + +func init() { + t["ArrayOfGuestRegKeyRecordSpec"] = reflect.TypeOf((*ArrayOfGuestRegKeyRecordSpec)(nil)).Elem() +} + +type ArrayOfGuestRegValueSpec struct { + GuestRegValueSpec []GuestRegValueSpec `xml:"GuestRegValueSpec,omitempty"` +} + +func init() { + t["ArrayOfGuestRegValueSpec"] = reflect.TypeOf((*ArrayOfGuestRegValueSpec)(nil)).Elem() +} + +type ArrayOfGuestStackInfo struct { + GuestStackInfo []GuestStackInfo `xml:"GuestStackInfo,omitempty"` +} + +func init() { + t["ArrayOfGuestStackInfo"] = reflect.TypeOf((*ArrayOfGuestStackInfo)(nil)).Elem() +} + +type ArrayOfHbrManagerVmReplicationCapability struct { + HbrManagerVmReplicationCapability []HbrManagerVmReplicationCapability `xml:"HbrManagerVmReplicationCapability,omitempty"` +} + +func init() { + t["ArrayOfHbrManagerVmReplicationCapability"] = reflect.TypeOf((*ArrayOfHbrManagerVmReplicationCapability)(nil)).Elem() +} + +type ArrayOfHealthUpdate struct { + HealthUpdate []HealthUpdate `xml:"HealthUpdate,omitempty"` +} + +func init() { + t["ArrayOfHealthUpdate"] = reflect.TypeOf((*ArrayOfHealthUpdate)(nil)).Elem() +} + +type ArrayOfHealthUpdateInfo struct { + HealthUpdateInfo []HealthUpdateInfo `xml:"HealthUpdateInfo,omitempty"` +} + +func init() { + t["ArrayOfHealthUpdateInfo"] = reflect.TypeOf((*ArrayOfHealthUpdateInfo)(nil)).Elem() +} + +type ArrayOfHostAccessControlEntry struct { + HostAccessControlEntry []HostAccessControlEntry `xml:"HostAccessControlEntry,omitempty"` +} + +func init() { + t["ArrayOfHostAccessControlEntry"] = reflect.TypeOf((*ArrayOfHostAccessControlEntry)(nil)).Elem() +} + +type ArrayOfHostAccountSpec struct { + HostAccountSpec []BaseHostAccountSpec `xml:"HostAccountSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostAccountSpec"] = reflect.TypeOf((*ArrayOfHostAccountSpec)(nil)).Elem() +} + +type ArrayOfHostActiveDirectory struct { + HostActiveDirectory []HostActiveDirectory `xml:"HostActiveDirectory,omitempty"` +} + +func init() { + t["ArrayOfHostActiveDirectory"] = reflect.TypeOf((*ArrayOfHostActiveDirectory)(nil)).Elem() +} + +type ArrayOfHostAuthenticationStoreInfo struct { + HostAuthenticationStoreInfo []BaseHostAuthenticationStoreInfo `xml:"HostAuthenticationStoreInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostAuthenticationStoreInfo"] = reflect.TypeOf((*ArrayOfHostAuthenticationStoreInfo)(nil)).Elem() +} + +type ArrayOfHostBootDevice struct { + HostBootDevice []HostBootDevice `xml:"HostBootDevice,omitempty"` +} + +func init() { + t["ArrayOfHostBootDevice"] = reflect.TypeOf((*ArrayOfHostBootDevice)(nil)).Elem() +} + +type ArrayOfHostCacheConfigurationInfo struct { + HostCacheConfigurationInfo []HostCacheConfigurationInfo `xml:"HostCacheConfigurationInfo,omitempty"` +} + +func init() { + t["ArrayOfHostCacheConfigurationInfo"] = reflect.TypeOf((*ArrayOfHostCacheConfigurationInfo)(nil)).Elem() +} + +type ArrayOfHostConnectInfoNetworkInfo struct { + HostConnectInfoNetworkInfo []BaseHostConnectInfoNetworkInfo `xml:"HostConnectInfoNetworkInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostConnectInfoNetworkInfo"] = reflect.TypeOf((*ArrayOfHostConnectInfoNetworkInfo)(nil)).Elem() +} + +type ArrayOfHostCpuIdInfo struct { + HostCpuIdInfo []HostCpuIdInfo `xml:"HostCpuIdInfo,omitempty"` +} + +func init() { + t["ArrayOfHostCpuIdInfo"] = reflect.TypeOf((*ArrayOfHostCpuIdInfo)(nil)).Elem() +} + +type ArrayOfHostCpuPackage struct { + HostCpuPackage []HostCpuPackage `xml:"HostCpuPackage,omitempty"` +} + +func init() { + t["ArrayOfHostCpuPackage"] = reflect.TypeOf((*ArrayOfHostCpuPackage)(nil)).Elem() +} + +type ArrayOfHostDatastoreBrowserSearchResults struct { + HostDatastoreBrowserSearchResults []HostDatastoreBrowserSearchResults `xml:"HostDatastoreBrowserSearchResults,omitempty"` +} + +func init() { + t["ArrayOfHostDatastoreBrowserSearchResults"] = reflect.TypeOf((*ArrayOfHostDatastoreBrowserSearchResults)(nil)).Elem() +} + +type ArrayOfHostDatastoreConnectInfo struct { + HostDatastoreConnectInfo []BaseHostDatastoreConnectInfo `xml:"HostDatastoreConnectInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostDatastoreConnectInfo"] = reflect.TypeOf((*ArrayOfHostDatastoreConnectInfo)(nil)).Elem() +} + +type ArrayOfHostDatastoreSystemDatastoreResult struct { + HostDatastoreSystemDatastoreResult []HostDatastoreSystemDatastoreResult `xml:"HostDatastoreSystemDatastoreResult,omitempty"` +} + +func init() { + t["ArrayOfHostDatastoreSystemDatastoreResult"] = reflect.TypeOf((*ArrayOfHostDatastoreSystemDatastoreResult)(nil)).Elem() +} + +type ArrayOfHostDateTimeSystemTimeZone struct { + HostDateTimeSystemTimeZone []HostDateTimeSystemTimeZone `xml:"HostDateTimeSystemTimeZone,omitempty"` +} + +func init() { + t["ArrayOfHostDateTimeSystemTimeZone"] = reflect.TypeOf((*ArrayOfHostDateTimeSystemTimeZone)(nil)).Elem() +} + +type ArrayOfHostDhcpService struct { + HostDhcpService []HostDhcpService `xml:"HostDhcpService,omitempty"` +} + +func init() { + t["ArrayOfHostDhcpService"] = reflect.TypeOf((*ArrayOfHostDhcpService)(nil)).Elem() +} + +type ArrayOfHostDhcpServiceConfig struct { + HostDhcpServiceConfig []HostDhcpServiceConfig `xml:"HostDhcpServiceConfig,omitempty"` +} + +func init() { + t["ArrayOfHostDhcpServiceConfig"] = reflect.TypeOf((*ArrayOfHostDhcpServiceConfig)(nil)).Elem() +} + +type ArrayOfHostDiagnosticPartition struct { + HostDiagnosticPartition []HostDiagnosticPartition `xml:"HostDiagnosticPartition,omitempty"` +} + +func init() { + t["ArrayOfHostDiagnosticPartition"] = reflect.TypeOf((*ArrayOfHostDiagnosticPartition)(nil)).Elem() +} + +type ArrayOfHostDiagnosticPartitionCreateOption struct { + HostDiagnosticPartitionCreateOption []HostDiagnosticPartitionCreateOption `xml:"HostDiagnosticPartitionCreateOption,omitempty"` +} + +func init() { + t["ArrayOfHostDiagnosticPartitionCreateOption"] = reflect.TypeOf((*ArrayOfHostDiagnosticPartitionCreateOption)(nil)).Elem() +} + +type ArrayOfHostDiskConfigurationResult struct { + HostDiskConfigurationResult []HostDiskConfigurationResult `xml:"HostDiskConfigurationResult,omitempty"` +} + +func init() { + t["ArrayOfHostDiskConfigurationResult"] = reflect.TypeOf((*ArrayOfHostDiskConfigurationResult)(nil)).Elem() +} + +type ArrayOfHostDiskMappingPartitionOption struct { + HostDiskMappingPartitionOption []HostDiskMappingPartitionOption `xml:"HostDiskMappingPartitionOption,omitempty"` +} + +func init() { + t["ArrayOfHostDiskMappingPartitionOption"] = reflect.TypeOf((*ArrayOfHostDiskMappingPartitionOption)(nil)).Elem() +} + +type ArrayOfHostDiskPartitionAttributes struct { + HostDiskPartitionAttributes []HostDiskPartitionAttributes `xml:"HostDiskPartitionAttributes,omitempty"` +} + +func init() { + t["ArrayOfHostDiskPartitionAttributes"] = reflect.TypeOf((*ArrayOfHostDiskPartitionAttributes)(nil)).Elem() +} + +type ArrayOfHostDiskPartitionBlockRange struct { + HostDiskPartitionBlockRange []HostDiskPartitionBlockRange `xml:"HostDiskPartitionBlockRange,omitempty"` +} + +func init() { + t["ArrayOfHostDiskPartitionBlockRange"] = reflect.TypeOf((*ArrayOfHostDiskPartitionBlockRange)(nil)).Elem() +} + +type ArrayOfHostDiskPartitionInfo struct { + HostDiskPartitionInfo []HostDiskPartitionInfo `xml:"HostDiskPartitionInfo,omitempty"` +} + +func init() { + t["ArrayOfHostDiskPartitionInfo"] = reflect.TypeOf((*ArrayOfHostDiskPartitionInfo)(nil)).Elem() +} + +type ArrayOfHostEventArgument struct { + HostEventArgument []HostEventArgument `xml:"HostEventArgument,omitempty"` +} + +func init() { + t["ArrayOfHostEventArgument"] = reflect.TypeOf((*ArrayOfHostEventArgument)(nil)).Elem() +} + +type ArrayOfHostFeatureCapability struct { + HostFeatureCapability []HostFeatureCapability `xml:"HostFeatureCapability,omitempty"` +} + +func init() { + t["ArrayOfHostFeatureCapability"] = reflect.TypeOf((*ArrayOfHostFeatureCapability)(nil)).Elem() +} + +type ArrayOfHostFeatureMask struct { + HostFeatureMask []HostFeatureMask `xml:"HostFeatureMask,omitempty"` +} + +func init() { + t["ArrayOfHostFeatureMask"] = reflect.TypeOf((*ArrayOfHostFeatureMask)(nil)).Elem() +} + +type ArrayOfHostFeatureVersionInfo struct { + HostFeatureVersionInfo []HostFeatureVersionInfo `xml:"HostFeatureVersionInfo,omitempty"` +} + +func init() { + t["ArrayOfHostFeatureVersionInfo"] = reflect.TypeOf((*ArrayOfHostFeatureVersionInfo)(nil)).Elem() +} + +type ArrayOfHostFileSystemMountInfo struct { + HostFileSystemMountInfo []HostFileSystemMountInfo `xml:"HostFileSystemMountInfo,omitempty"` +} + +func init() { + t["ArrayOfHostFileSystemMountInfo"] = reflect.TypeOf((*ArrayOfHostFileSystemMountInfo)(nil)).Elem() +} + +type ArrayOfHostFirewallConfigRuleSetConfig struct { + HostFirewallConfigRuleSetConfig []HostFirewallConfigRuleSetConfig `xml:"HostFirewallConfigRuleSetConfig,omitempty"` +} + +func init() { + t["ArrayOfHostFirewallConfigRuleSetConfig"] = reflect.TypeOf((*ArrayOfHostFirewallConfigRuleSetConfig)(nil)).Elem() +} + +type ArrayOfHostFirewallRule struct { + HostFirewallRule []HostFirewallRule `xml:"HostFirewallRule,omitempty"` +} + +func init() { + t["ArrayOfHostFirewallRule"] = reflect.TypeOf((*ArrayOfHostFirewallRule)(nil)).Elem() +} + +type ArrayOfHostFirewallRuleset struct { + HostFirewallRuleset []HostFirewallRuleset `xml:"HostFirewallRuleset,omitempty"` +} + +func init() { + t["ArrayOfHostFirewallRuleset"] = reflect.TypeOf((*ArrayOfHostFirewallRuleset)(nil)).Elem() +} + +type ArrayOfHostFirewallRulesetIpNetwork struct { + HostFirewallRulesetIpNetwork []HostFirewallRulesetIpNetwork `xml:"HostFirewallRulesetIpNetwork,omitempty"` +} + +func init() { + t["ArrayOfHostFirewallRulesetIpNetwork"] = reflect.TypeOf((*ArrayOfHostFirewallRulesetIpNetwork)(nil)).Elem() +} + +type ArrayOfHostGraphicsConfigDeviceType struct { + HostGraphicsConfigDeviceType []HostGraphicsConfigDeviceType `xml:"HostGraphicsConfigDeviceType,omitempty"` +} + +func init() { + t["ArrayOfHostGraphicsConfigDeviceType"] = reflect.TypeOf((*ArrayOfHostGraphicsConfigDeviceType)(nil)).Elem() +} + +type ArrayOfHostGraphicsInfo struct { + HostGraphicsInfo []HostGraphicsInfo `xml:"HostGraphicsInfo,omitempty"` +} + +func init() { + t["ArrayOfHostGraphicsInfo"] = reflect.TypeOf((*ArrayOfHostGraphicsInfo)(nil)).Elem() +} + +type ArrayOfHostHardwareElementInfo struct { + HostHardwareElementInfo []BaseHostHardwareElementInfo `xml:"HostHardwareElementInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostHardwareElementInfo"] = reflect.TypeOf((*ArrayOfHostHardwareElementInfo)(nil)).Elem() +} + +type ArrayOfHostHostBusAdapter struct { + HostHostBusAdapter []BaseHostHostBusAdapter `xml:"HostHostBusAdapter,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostHostBusAdapter"] = reflect.TypeOf((*ArrayOfHostHostBusAdapter)(nil)).Elem() +} + +type ArrayOfHostInternetScsiHbaIscsiIpv6Address struct { + HostInternetScsiHbaIscsiIpv6Address []HostInternetScsiHbaIscsiIpv6Address `xml:"HostInternetScsiHbaIscsiIpv6Address,omitempty"` +} + +func init() { + t["ArrayOfHostInternetScsiHbaIscsiIpv6Address"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaIscsiIpv6Address)(nil)).Elem() +} + +type ArrayOfHostInternetScsiHbaParamValue struct { + HostInternetScsiHbaParamValue []HostInternetScsiHbaParamValue `xml:"HostInternetScsiHbaParamValue,omitempty"` +} + +func init() { + t["ArrayOfHostInternetScsiHbaParamValue"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaParamValue)(nil)).Elem() +} + +type ArrayOfHostInternetScsiHbaSendTarget struct { + HostInternetScsiHbaSendTarget []HostInternetScsiHbaSendTarget `xml:"HostInternetScsiHbaSendTarget,omitempty"` +} + +func init() { + t["ArrayOfHostInternetScsiHbaSendTarget"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaSendTarget)(nil)).Elem() +} + +type ArrayOfHostInternetScsiHbaStaticTarget struct { + HostInternetScsiHbaStaticTarget []HostInternetScsiHbaStaticTarget `xml:"HostInternetScsiHbaStaticTarget,omitempty"` +} + +func init() { + t["ArrayOfHostInternetScsiHbaStaticTarget"] = reflect.TypeOf((*ArrayOfHostInternetScsiHbaStaticTarget)(nil)).Elem() +} + +type ArrayOfHostIoFilterInfo struct { + HostIoFilterInfo []HostIoFilterInfo `xml:"HostIoFilterInfo,omitempty"` +} + +func init() { + t["ArrayOfHostIoFilterInfo"] = reflect.TypeOf((*ArrayOfHostIoFilterInfo)(nil)).Elem() +} + +type ArrayOfHostIpConfigIpV6Address struct { + HostIpConfigIpV6Address []HostIpConfigIpV6Address `xml:"HostIpConfigIpV6Address,omitempty"` +} + +func init() { + t["ArrayOfHostIpConfigIpV6Address"] = reflect.TypeOf((*ArrayOfHostIpConfigIpV6Address)(nil)).Elem() +} + +type ArrayOfHostIpRouteEntry struct { + HostIpRouteEntry []HostIpRouteEntry `xml:"HostIpRouteEntry,omitempty"` +} + +func init() { + t["ArrayOfHostIpRouteEntry"] = reflect.TypeOf((*ArrayOfHostIpRouteEntry)(nil)).Elem() +} + +type ArrayOfHostIpRouteOp struct { + HostIpRouteOp []HostIpRouteOp `xml:"HostIpRouteOp,omitempty"` +} + +func init() { + t["ArrayOfHostIpRouteOp"] = reflect.TypeOf((*ArrayOfHostIpRouteOp)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerDiskLayoutSpec struct { + HostLowLevelProvisioningManagerDiskLayoutSpec []HostLowLevelProvisioningManagerDiskLayoutSpec `xml:"HostLowLevelProvisioningManagerDiskLayoutSpec,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerDiskLayoutSpec"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerDiskLayoutSpec)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerFileDeleteResult struct { + HostLowLevelProvisioningManagerFileDeleteResult []HostLowLevelProvisioningManagerFileDeleteResult `xml:"HostLowLevelProvisioningManagerFileDeleteResult,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerFileDeleteResult"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileDeleteResult)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerFileDeleteSpec struct { + HostLowLevelProvisioningManagerFileDeleteSpec []HostLowLevelProvisioningManagerFileDeleteSpec `xml:"HostLowLevelProvisioningManagerFileDeleteSpec,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerFileDeleteSpec"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileDeleteSpec)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerFileReserveResult struct { + HostLowLevelProvisioningManagerFileReserveResult []HostLowLevelProvisioningManagerFileReserveResult `xml:"HostLowLevelProvisioningManagerFileReserveResult,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerFileReserveResult"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileReserveResult)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerFileReserveSpec struct { + HostLowLevelProvisioningManagerFileReserveSpec []HostLowLevelProvisioningManagerFileReserveSpec `xml:"HostLowLevelProvisioningManagerFileReserveSpec,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerFileReserveSpec"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerFileReserveSpec)(nil)).Elem() +} + +type ArrayOfHostLowLevelProvisioningManagerSnapshotLayoutSpec struct { + HostLowLevelProvisioningManagerSnapshotLayoutSpec []HostLowLevelProvisioningManagerSnapshotLayoutSpec `xml:"HostLowLevelProvisioningManagerSnapshotLayoutSpec,omitempty"` +} + +func init() { + t["ArrayOfHostLowLevelProvisioningManagerSnapshotLayoutSpec"] = reflect.TypeOf((*ArrayOfHostLowLevelProvisioningManagerSnapshotLayoutSpec)(nil)).Elem() +} + +type ArrayOfHostMemberHealthCheckResult struct { + HostMemberHealthCheckResult []BaseHostMemberHealthCheckResult `xml:"HostMemberHealthCheckResult,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostMemberHealthCheckResult"] = reflect.TypeOf((*ArrayOfHostMemberHealthCheckResult)(nil)).Elem() +} + +type ArrayOfHostMemberRuntimeInfo struct { + HostMemberRuntimeInfo []HostMemberRuntimeInfo `xml:"HostMemberRuntimeInfo,omitempty"` +} + +func init() { + t["ArrayOfHostMemberRuntimeInfo"] = reflect.TypeOf((*ArrayOfHostMemberRuntimeInfo)(nil)).Elem() +} + +type ArrayOfHostMultipathInfoLogicalUnit struct { + HostMultipathInfoLogicalUnit []HostMultipathInfoLogicalUnit `xml:"HostMultipathInfoLogicalUnit,omitempty"` +} + +func init() { + t["ArrayOfHostMultipathInfoLogicalUnit"] = reflect.TypeOf((*ArrayOfHostMultipathInfoLogicalUnit)(nil)).Elem() +} + +type ArrayOfHostMultipathInfoPath struct { + HostMultipathInfoPath []HostMultipathInfoPath `xml:"HostMultipathInfoPath,omitempty"` +} + +func init() { + t["ArrayOfHostMultipathInfoPath"] = reflect.TypeOf((*ArrayOfHostMultipathInfoPath)(nil)).Elem() +} + +type ArrayOfHostMultipathStateInfoPath struct { + HostMultipathStateInfoPath []HostMultipathStateInfoPath `xml:"HostMultipathStateInfoPath,omitempty"` +} + +func init() { + t["ArrayOfHostMultipathStateInfoPath"] = reflect.TypeOf((*ArrayOfHostMultipathStateInfoPath)(nil)).Elem() +} + +type ArrayOfHostNasVolumeConfig struct { + HostNasVolumeConfig []HostNasVolumeConfig `xml:"HostNasVolumeConfig,omitempty"` +} + +func init() { + t["ArrayOfHostNasVolumeConfig"] = reflect.TypeOf((*ArrayOfHostNasVolumeConfig)(nil)).Elem() +} + +type ArrayOfHostNatService struct { + HostNatService []HostNatService `xml:"HostNatService,omitempty"` +} + +func init() { + t["ArrayOfHostNatService"] = reflect.TypeOf((*ArrayOfHostNatService)(nil)).Elem() +} + +type ArrayOfHostNatServiceConfig struct { + HostNatServiceConfig []HostNatServiceConfig `xml:"HostNatServiceConfig,omitempty"` +} + +func init() { + t["ArrayOfHostNatServiceConfig"] = reflect.TypeOf((*ArrayOfHostNatServiceConfig)(nil)).Elem() +} + +type ArrayOfHostNatServicePortForwardSpec struct { + HostNatServicePortForwardSpec []HostNatServicePortForwardSpec `xml:"HostNatServicePortForwardSpec,omitempty"` +} + +func init() { + t["ArrayOfHostNatServicePortForwardSpec"] = reflect.TypeOf((*ArrayOfHostNatServicePortForwardSpec)(nil)).Elem() +} + +type ArrayOfHostNetStackInstance struct { + HostNetStackInstance []HostNetStackInstance `xml:"HostNetStackInstance,omitempty"` +} + +func init() { + t["ArrayOfHostNetStackInstance"] = reflect.TypeOf((*ArrayOfHostNetStackInstance)(nil)).Elem() +} + +type ArrayOfHostNetworkConfigNetStackSpec struct { + HostNetworkConfigNetStackSpec []HostNetworkConfigNetStackSpec `xml:"HostNetworkConfigNetStackSpec,omitempty"` +} + +func init() { + t["ArrayOfHostNetworkConfigNetStackSpec"] = reflect.TypeOf((*ArrayOfHostNetworkConfigNetStackSpec)(nil)).Elem() +} + +type ArrayOfHostNumaNode struct { + HostNumaNode []HostNumaNode `xml:"HostNumaNode,omitempty"` +} + +func init() { + t["ArrayOfHostNumaNode"] = reflect.TypeOf((*ArrayOfHostNumaNode)(nil)).Elem() +} + +type ArrayOfHostNumericSensorInfo struct { + HostNumericSensorInfo []HostNumericSensorInfo `xml:"HostNumericSensorInfo,omitempty"` +} + +func init() { + t["ArrayOfHostNumericSensorInfo"] = reflect.TypeOf((*ArrayOfHostNumericSensorInfo)(nil)).Elem() +} + +type ArrayOfHostOpaqueNetworkInfo struct { + HostOpaqueNetworkInfo []HostOpaqueNetworkInfo `xml:"HostOpaqueNetworkInfo,omitempty"` +} + +func init() { + t["ArrayOfHostOpaqueNetworkInfo"] = reflect.TypeOf((*ArrayOfHostOpaqueNetworkInfo)(nil)).Elem() +} + +type ArrayOfHostOpaqueSwitch struct { + HostOpaqueSwitch []HostOpaqueSwitch `xml:"HostOpaqueSwitch,omitempty"` +} + +func init() { + t["ArrayOfHostOpaqueSwitch"] = reflect.TypeOf((*ArrayOfHostOpaqueSwitch)(nil)).Elem() +} + +type ArrayOfHostOpaqueSwitchPhysicalNicZone struct { + HostOpaqueSwitchPhysicalNicZone []HostOpaqueSwitchPhysicalNicZone `xml:"HostOpaqueSwitchPhysicalNicZone,omitempty"` +} + +func init() { + t["ArrayOfHostOpaqueSwitchPhysicalNicZone"] = reflect.TypeOf((*ArrayOfHostOpaqueSwitchPhysicalNicZone)(nil)).Elem() +} + +type ArrayOfHostPatchManagerStatus struct { + HostPatchManagerStatus []HostPatchManagerStatus `xml:"HostPatchManagerStatus,omitempty"` +} + +func init() { + t["ArrayOfHostPatchManagerStatus"] = reflect.TypeOf((*ArrayOfHostPatchManagerStatus)(nil)).Elem() +} + +type ArrayOfHostPatchManagerStatusPrerequisitePatch struct { + HostPatchManagerStatusPrerequisitePatch []HostPatchManagerStatusPrerequisitePatch `xml:"HostPatchManagerStatusPrerequisitePatch,omitempty"` +} + +func init() { + t["ArrayOfHostPatchManagerStatusPrerequisitePatch"] = reflect.TypeOf((*ArrayOfHostPatchManagerStatusPrerequisitePatch)(nil)).Elem() +} + +type ArrayOfHostPathSelectionPolicyOption struct { + HostPathSelectionPolicyOption []HostPathSelectionPolicyOption `xml:"HostPathSelectionPolicyOption,omitempty"` +} + +func init() { + t["ArrayOfHostPathSelectionPolicyOption"] = reflect.TypeOf((*ArrayOfHostPathSelectionPolicyOption)(nil)).Elem() +} + +type ArrayOfHostPciDevice struct { + HostPciDevice []HostPciDevice `xml:"HostPciDevice,omitempty"` +} + +func init() { + t["ArrayOfHostPciDevice"] = reflect.TypeOf((*ArrayOfHostPciDevice)(nil)).Elem() +} + +type ArrayOfHostPciPassthruConfig struct { + HostPciPassthruConfig []BaseHostPciPassthruConfig `xml:"HostPciPassthruConfig,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostPciPassthruConfig"] = reflect.TypeOf((*ArrayOfHostPciPassthruConfig)(nil)).Elem() +} + +type ArrayOfHostPciPassthruInfo struct { + HostPciPassthruInfo []BaseHostPciPassthruInfo `xml:"HostPciPassthruInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostPciPassthruInfo"] = reflect.TypeOf((*ArrayOfHostPciPassthruInfo)(nil)).Elem() +} + +type ArrayOfHostPlacedVirtualNicIdentifier struct { + HostPlacedVirtualNicIdentifier []HostPlacedVirtualNicIdentifier `xml:"HostPlacedVirtualNicIdentifier,omitempty"` +} + +func init() { + t["ArrayOfHostPlacedVirtualNicIdentifier"] = reflect.TypeOf((*ArrayOfHostPlacedVirtualNicIdentifier)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyAdapter struct { + HostPlugStoreTopologyAdapter []HostPlugStoreTopologyAdapter `xml:"HostPlugStoreTopologyAdapter,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyAdapter"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyAdapter)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyDevice struct { + HostPlugStoreTopologyDevice []HostPlugStoreTopologyDevice `xml:"HostPlugStoreTopologyDevice,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyDevice"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyDevice)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyPath struct { + HostPlugStoreTopologyPath []HostPlugStoreTopologyPath `xml:"HostPlugStoreTopologyPath,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyPath"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyPath)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyPlugin struct { + HostPlugStoreTopologyPlugin []HostPlugStoreTopologyPlugin `xml:"HostPlugStoreTopologyPlugin,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyPlugin"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyPlugin)(nil)).Elem() +} + +type ArrayOfHostPlugStoreTopologyTarget struct { + HostPlugStoreTopologyTarget []HostPlugStoreTopologyTarget `xml:"HostPlugStoreTopologyTarget,omitempty"` +} + +func init() { + t["ArrayOfHostPlugStoreTopologyTarget"] = reflect.TypeOf((*ArrayOfHostPlugStoreTopologyTarget)(nil)).Elem() +} + +type ArrayOfHostPnicNetworkResourceInfo struct { + HostPnicNetworkResourceInfo []HostPnicNetworkResourceInfo `xml:"HostPnicNetworkResourceInfo,omitempty"` +} + +func init() { + t["ArrayOfHostPnicNetworkResourceInfo"] = reflect.TypeOf((*ArrayOfHostPnicNetworkResourceInfo)(nil)).Elem() +} + +type ArrayOfHostPortGroup struct { + HostPortGroup []HostPortGroup `xml:"HostPortGroup,omitempty"` +} + +func init() { + t["ArrayOfHostPortGroup"] = reflect.TypeOf((*ArrayOfHostPortGroup)(nil)).Elem() +} + +type ArrayOfHostPortGroupConfig struct { + HostPortGroupConfig []HostPortGroupConfig `xml:"HostPortGroupConfig,omitempty"` +} + +func init() { + t["ArrayOfHostPortGroupConfig"] = reflect.TypeOf((*ArrayOfHostPortGroupConfig)(nil)).Elem() +} + +type ArrayOfHostPortGroupPort struct { + HostPortGroupPort []HostPortGroupPort `xml:"HostPortGroupPort,omitempty"` +} + +func init() { + t["ArrayOfHostPortGroupPort"] = reflect.TypeOf((*ArrayOfHostPortGroupPort)(nil)).Elem() +} + +type ArrayOfHostPortGroupProfile struct { + HostPortGroupProfile []HostPortGroupProfile `xml:"HostPortGroupProfile,omitempty"` +} + +func init() { + t["ArrayOfHostPortGroupProfile"] = reflect.TypeOf((*ArrayOfHostPortGroupProfile)(nil)).Elem() +} + +type ArrayOfHostPowerPolicy struct { + HostPowerPolicy []HostPowerPolicy `xml:"HostPowerPolicy,omitempty"` +} + +func init() { + t["ArrayOfHostPowerPolicy"] = reflect.TypeOf((*ArrayOfHostPowerPolicy)(nil)).Elem() +} + +type ArrayOfHostProfileManagerCompositionResultResultElement struct { + HostProfileManagerCompositionResultResultElement []HostProfileManagerCompositionResultResultElement `xml:"HostProfileManagerCompositionResultResultElement,omitempty"` +} + +func init() { + t["ArrayOfHostProfileManagerCompositionResultResultElement"] = reflect.TypeOf((*ArrayOfHostProfileManagerCompositionResultResultElement)(nil)).Elem() +} + +type ArrayOfHostProfileManagerCompositionValidationResultResultElement struct { + HostProfileManagerCompositionValidationResultResultElement []HostProfileManagerCompositionValidationResultResultElement `xml:"HostProfileManagerCompositionValidationResultResultElement,omitempty"` +} + +func init() { + t["ArrayOfHostProfileManagerCompositionValidationResultResultElement"] = reflect.TypeOf((*ArrayOfHostProfileManagerCompositionValidationResultResultElement)(nil)).Elem() +} + +type ArrayOfHostProfileManagerHostToConfigSpecMap struct { + HostProfileManagerHostToConfigSpecMap []HostProfileManagerHostToConfigSpecMap `xml:"HostProfileManagerHostToConfigSpecMap,omitempty"` +} + +func init() { + t["ArrayOfHostProfileManagerHostToConfigSpecMap"] = reflect.TypeOf((*ArrayOfHostProfileManagerHostToConfigSpecMap)(nil)).Elem() +} + +type ArrayOfHostProfilesEntityCustomizations struct { + HostProfilesEntityCustomizations []BaseHostProfilesEntityCustomizations `xml:"HostProfilesEntityCustomizations,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostProfilesEntityCustomizations"] = reflect.TypeOf((*ArrayOfHostProfilesEntityCustomizations)(nil)).Elem() +} + +type ArrayOfHostProtocolEndpoint struct { + HostProtocolEndpoint []HostProtocolEndpoint `xml:"HostProtocolEndpoint,omitempty"` +} + +func init() { + t["ArrayOfHostProtocolEndpoint"] = reflect.TypeOf((*ArrayOfHostProtocolEndpoint)(nil)).Elem() +} + +type ArrayOfHostProxySwitch struct { + HostProxySwitch []HostProxySwitch `xml:"HostProxySwitch,omitempty"` +} + +func init() { + t["ArrayOfHostProxySwitch"] = reflect.TypeOf((*ArrayOfHostProxySwitch)(nil)).Elem() +} + +type ArrayOfHostProxySwitchConfig struct { + HostProxySwitchConfig []HostProxySwitchConfig `xml:"HostProxySwitchConfig,omitempty"` +} + +func init() { + t["ArrayOfHostProxySwitchConfig"] = reflect.TypeOf((*ArrayOfHostProxySwitchConfig)(nil)).Elem() +} + +type ArrayOfHostProxySwitchHostLagConfig struct { + HostProxySwitchHostLagConfig []HostProxySwitchHostLagConfig `xml:"HostProxySwitchHostLagConfig,omitempty"` +} + +func init() { + t["ArrayOfHostProxySwitchHostLagConfig"] = reflect.TypeOf((*ArrayOfHostProxySwitchHostLagConfig)(nil)).Elem() +} + +type ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo struct { + HostRuntimeInfoNetStackInstanceRuntimeInfo []HostRuntimeInfoNetStackInstanceRuntimeInfo `xml:"HostRuntimeInfoNetStackInstanceRuntimeInfo,omitempty"` +} + +func init() { + t["ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo"] = reflect.TypeOf((*ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo)(nil)).Elem() +} + +type ArrayOfHostScsiDisk struct { + HostScsiDisk []HostScsiDisk `xml:"HostScsiDisk,omitempty"` +} + +func init() { + t["ArrayOfHostScsiDisk"] = reflect.TypeOf((*ArrayOfHostScsiDisk)(nil)).Elem() +} + +type ArrayOfHostScsiDiskPartition struct { + HostScsiDiskPartition []HostScsiDiskPartition `xml:"HostScsiDiskPartition,omitempty"` +} + +func init() { + t["ArrayOfHostScsiDiskPartition"] = reflect.TypeOf((*ArrayOfHostScsiDiskPartition)(nil)).Elem() +} + +type ArrayOfHostScsiTopologyInterface struct { + HostScsiTopologyInterface []HostScsiTopologyInterface `xml:"HostScsiTopologyInterface,omitempty"` +} + +func init() { + t["ArrayOfHostScsiTopologyInterface"] = reflect.TypeOf((*ArrayOfHostScsiTopologyInterface)(nil)).Elem() +} + +type ArrayOfHostScsiTopologyLun struct { + HostScsiTopologyLun []HostScsiTopologyLun `xml:"HostScsiTopologyLun,omitempty"` +} + +func init() { + t["ArrayOfHostScsiTopologyLun"] = reflect.TypeOf((*ArrayOfHostScsiTopologyLun)(nil)).Elem() +} + +type ArrayOfHostScsiTopologyTarget struct { + HostScsiTopologyTarget []HostScsiTopologyTarget `xml:"HostScsiTopologyTarget,omitempty"` +} + +func init() { + t["ArrayOfHostScsiTopologyTarget"] = reflect.TypeOf((*ArrayOfHostScsiTopologyTarget)(nil)).Elem() +} + +type ArrayOfHostService struct { + HostService []HostService `xml:"HostService,omitempty"` +} + +func init() { + t["ArrayOfHostService"] = reflect.TypeOf((*ArrayOfHostService)(nil)).Elem() +} + +type ArrayOfHostServiceConfig struct { + HostServiceConfig []HostServiceConfig `xml:"HostServiceConfig,omitempty"` +} + +func init() { + t["ArrayOfHostServiceConfig"] = reflect.TypeOf((*ArrayOfHostServiceConfig)(nil)).Elem() +} + +type ArrayOfHostSharedGpuCapabilities struct { + HostSharedGpuCapabilities []HostSharedGpuCapabilities `xml:"HostSharedGpuCapabilities,omitempty"` +} + +func init() { + t["ArrayOfHostSharedGpuCapabilities"] = reflect.TypeOf((*ArrayOfHostSharedGpuCapabilities)(nil)).Elem() +} + +type ArrayOfHostSnmpDestination struct { + HostSnmpDestination []HostSnmpDestination `xml:"HostSnmpDestination,omitempty"` +} + +func init() { + t["ArrayOfHostSnmpDestination"] = reflect.TypeOf((*ArrayOfHostSnmpDestination)(nil)).Elem() +} + +type ArrayOfHostSriovDevicePoolInfo struct { + HostSriovDevicePoolInfo []BaseHostSriovDevicePoolInfo `xml:"HostSriovDevicePoolInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostSriovDevicePoolInfo"] = reflect.TypeOf((*ArrayOfHostSriovDevicePoolInfo)(nil)).Elem() +} + +type ArrayOfHostSslThumbprintInfo struct { + HostSslThumbprintInfo []HostSslThumbprintInfo `xml:"HostSslThumbprintInfo,omitempty"` +} + +func init() { + t["ArrayOfHostSslThumbprintInfo"] = reflect.TypeOf((*ArrayOfHostSslThumbprintInfo)(nil)).Elem() +} + +type ArrayOfHostStorageArrayTypePolicyOption struct { + HostStorageArrayTypePolicyOption []HostStorageArrayTypePolicyOption `xml:"HostStorageArrayTypePolicyOption,omitempty"` +} + +func init() { + t["ArrayOfHostStorageArrayTypePolicyOption"] = reflect.TypeOf((*ArrayOfHostStorageArrayTypePolicyOption)(nil)).Elem() +} + +type ArrayOfHostStorageElementInfo struct { + HostStorageElementInfo []HostStorageElementInfo `xml:"HostStorageElementInfo,omitempty"` +} + +func init() { + t["ArrayOfHostStorageElementInfo"] = reflect.TypeOf((*ArrayOfHostStorageElementInfo)(nil)).Elem() +} + +type ArrayOfHostStorageOperationalInfo struct { + HostStorageOperationalInfo []HostStorageOperationalInfo `xml:"HostStorageOperationalInfo,omitempty"` +} + +func init() { + t["ArrayOfHostStorageOperationalInfo"] = reflect.TypeOf((*ArrayOfHostStorageOperationalInfo)(nil)).Elem() +} + +type ArrayOfHostStorageSystemDiskLocatorLedResult struct { + HostStorageSystemDiskLocatorLedResult []HostStorageSystemDiskLocatorLedResult `xml:"HostStorageSystemDiskLocatorLedResult,omitempty"` +} + +func init() { + t["ArrayOfHostStorageSystemDiskLocatorLedResult"] = reflect.TypeOf((*ArrayOfHostStorageSystemDiskLocatorLedResult)(nil)).Elem() +} + +type ArrayOfHostStorageSystemScsiLunResult struct { + HostStorageSystemScsiLunResult []HostStorageSystemScsiLunResult `xml:"HostStorageSystemScsiLunResult,omitempty"` +} + +func init() { + t["ArrayOfHostStorageSystemScsiLunResult"] = reflect.TypeOf((*ArrayOfHostStorageSystemScsiLunResult)(nil)).Elem() +} + +type ArrayOfHostStorageSystemVmfsVolumeResult struct { + HostStorageSystemVmfsVolumeResult []HostStorageSystemVmfsVolumeResult `xml:"HostStorageSystemVmfsVolumeResult,omitempty"` +} + +func init() { + t["ArrayOfHostStorageSystemVmfsVolumeResult"] = reflect.TypeOf((*ArrayOfHostStorageSystemVmfsVolumeResult)(nil)).Elem() +} + +type ArrayOfHostSubSpecification struct { + HostSubSpecification []HostSubSpecification `xml:"HostSubSpecification,omitempty"` +} + +func init() { + t["ArrayOfHostSubSpecification"] = reflect.TypeOf((*ArrayOfHostSubSpecification)(nil)).Elem() +} + +type ArrayOfHostSystemIdentificationInfo struct { + HostSystemIdentificationInfo []HostSystemIdentificationInfo `xml:"HostSystemIdentificationInfo,omitempty"` +} + +func init() { + t["ArrayOfHostSystemIdentificationInfo"] = reflect.TypeOf((*ArrayOfHostSystemIdentificationInfo)(nil)).Elem() +} + +type ArrayOfHostSystemResourceInfo struct { + HostSystemResourceInfo []HostSystemResourceInfo `xml:"HostSystemResourceInfo,omitempty"` +} + +func init() { + t["ArrayOfHostSystemResourceInfo"] = reflect.TypeOf((*ArrayOfHostSystemResourceInfo)(nil)).Elem() +} + +type ArrayOfHostSystemSwapConfigurationSystemSwapOption struct { + HostSystemSwapConfigurationSystemSwapOption []BaseHostSystemSwapConfigurationSystemSwapOption `xml:"HostSystemSwapConfigurationSystemSwapOption,omitempty,typeattr"` +} + +func init() { + t["ArrayOfHostSystemSwapConfigurationSystemSwapOption"] = reflect.TypeOf((*ArrayOfHostSystemSwapConfigurationSystemSwapOption)(nil)).Elem() +} + +type ArrayOfHostTpmDigestInfo struct { + HostTpmDigestInfo []HostTpmDigestInfo `xml:"HostTpmDigestInfo,omitempty"` +} + +func init() { + t["ArrayOfHostTpmDigestInfo"] = reflect.TypeOf((*ArrayOfHostTpmDigestInfo)(nil)).Elem() +} + +type ArrayOfHostTpmEventLogEntry struct { + HostTpmEventLogEntry []HostTpmEventLogEntry `xml:"HostTpmEventLogEntry,omitempty"` +} + +func init() { + t["ArrayOfHostTpmEventLogEntry"] = reflect.TypeOf((*ArrayOfHostTpmEventLogEntry)(nil)).Elem() +} + +type ArrayOfHostUnresolvedVmfsExtent struct { + HostUnresolvedVmfsExtent []HostUnresolvedVmfsExtent `xml:"HostUnresolvedVmfsExtent,omitempty"` +} + +func init() { + t["ArrayOfHostUnresolvedVmfsExtent"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsExtent)(nil)).Elem() +} + +type ArrayOfHostUnresolvedVmfsResolutionResult struct { + HostUnresolvedVmfsResolutionResult []HostUnresolvedVmfsResolutionResult `xml:"HostUnresolvedVmfsResolutionResult,omitempty"` +} + +func init() { + t["ArrayOfHostUnresolvedVmfsResolutionResult"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsResolutionResult)(nil)).Elem() +} + +type ArrayOfHostUnresolvedVmfsResolutionSpec struct { + HostUnresolvedVmfsResolutionSpec []HostUnresolvedVmfsResolutionSpec `xml:"HostUnresolvedVmfsResolutionSpec,omitempty"` +} + +func init() { + t["ArrayOfHostUnresolvedVmfsResolutionSpec"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsResolutionSpec)(nil)).Elem() +} + +type ArrayOfHostUnresolvedVmfsVolume struct { + HostUnresolvedVmfsVolume []HostUnresolvedVmfsVolume `xml:"HostUnresolvedVmfsVolume,omitempty"` +} + +func init() { + t["ArrayOfHostUnresolvedVmfsVolume"] = reflect.TypeOf((*ArrayOfHostUnresolvedVmfsVolume)(nil)).Elem() +} + +type ArrayOfHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption struct { + HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption []HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption `xml:"HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption,omitempty"` +} + +func init() { + t["ArrayOfHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption"] = reflect.TypeOf((*ArrayOfHostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption)(nil)).Elem() +} + +type ArrayOfHostVMotionCompatibility struct { + HostVMotionCompatibility []HostVMotionCompatibility `xml:"HostVMotionCompatibility,omitempty"` +} + +func init() { + t["ArrayOfHostVMotionCompatibility"] = reflect.TypeOf((*ArrayOfHostVMotionCompatibility)(nil)).Elem() +} + +type ArrayOfHostVirtualNic struct { + HostVirtualNic []HostVirtualNic `xml:"HostVirtualNic,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualNic"] = reflect.TypeOf((*ArrayOfHostVirtualNic)(nil)).Elem() +} + +type ArrayOfHostVirtualNicConfig struct { + HostVirtualNicConfig []HostVirtualNicConfig `xml:"HostVirtualNicConfig,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualNicConfig"] = reflect.TypeOf((*ArrayOfHostVirtualNicConfig)(nil)).Elem() +} + +type ArrayOfHostVirtualNicManagerNicTypeSelection struct { + HostVirtualNicManagerNicTypeSelection []HostVirtualNicManagerNicTypeSelection `xml:"HostVirtualNicManagerNicTypeSelection,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualNicManagerNicTypeSelection"] = reflect.TypeOf((*ArrayOfHostVirtualNicManagerNicTypeSelection)(nil)).Elem() +} + +type ArrayOfHostVirtualSwitch struct { + HostVirtualSwitch []HostVirtualSwitch `xml:"HostVirtualSwitch,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualSwitch"] = reflect.TypeOf((*ArrayOfHostVirtualSwitch)(nil)).Elem() +} + +type ArrayOfHostVirtualSwitchConfig struct { + HostVirtualSwitchConfig []HostVirtualSwitchConfig `xml:"HostVirtualSwitchConfig,omitempty"` +} + +func init() { + t["ArrayOfHostVirtualSwitchConfig"] = reflect.TypeOf((*ArrayOfHostVirtualSwitchConfig)(nil)).Elem() +} + +type ArrayOfHostVmciAccessManagerAccessSpec struct { + HostVmciAccessManagerAccessSpec []HostVmciAccessManagerAccessSpec `xml:"HostVmciAccessManagerAccessSpec,omitempty"` +} + +func init() { + t["ArrayOfHostVmciAccessManagerAccessSpec"] = reflect.TypeOf((*ArrayOfHostVmciAccessManagerAccessSpec)(nil)).Elem() +} + +type ArrayOfHostVmfsRescanResult struct { + HostVmfsRescanResult []HostVmfsRescanResult `xml:"HostVmfsRescanResult,omitempty"` +} + +func init() { + t["ArrayOfHostVmfsRescanResult"] = reflect.TypeOf((*ArrayOfHostVmfsRescanResult)(nil)).Elem() +} + +type ArrayOfHostVsanInternalSystemCmmdsQuery struct { + HostVsanInternalSystemCmmdsQuery []HostVsanInternalSystemCmmdsQuery `xml:"HostVsanInternalSystemCmmdsQuery,omitempty"` +} + +func init() { + t["ArrayOfHostVsanInternalSystemCmmdsQuery"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemCmmdsQuery)(nil)).Elem() +} + +type ArrayOfHostVsanInternalSystemDeleteVsanObjectsResult struct { + HostVsanInternalSystemDeleteVsanObjectsResult []HostVsanInternalSystemDeleteVsanObjectsResult `xml:"HostVsanInternalSystemDeleteVsanObjectsResult,omitempty"` +} + +func init() { + t["ArrayOfHostVsanInternalSystemDeleteVsanObjectsResult"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemDeleteVsanObjectsResult)(nil)).Elem() +} + +type ArrayOfHostVsanInternalSystemVsanObjectOperationResult struct { + HostVsanInternalSystemVsanObjectOperationResult []HostVsanInternalSystemVsanObjectOperationResult `xml:"HostVsanInternalSystemVsanObjectOperationResult,omitempty"` +} + +func init() { + t["ArrayOfHostVsanInternalSystemVsanObjectOperationResult"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemVsanObjectOperationResult)(nil)).Elem() +} + +type ArrayOfHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult struct { + HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult []HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult `xml:"HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult,omitempty"` +} + +func init() { + t["ArrayOfHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult"] = reflect.TypeOf((*ArrayOfHostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult)(nil)).Elem() +} + +type ArrayOfHttpNfcLeaseDatastoreLeaseInfo struct { + HttpNfcLeaseDatastoreLeaseInfo []HttpNfcLeaseDatastoreLeaseInfo `xml:"HttpNfcLeaseDatastoreLeaseInfo,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseDatastoreLeaseInfo"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseDatastoreLeaseInfo)(nil)).Elem() +} + +type ArrayOfHttpNfcLeaseDeviceUrl struct { + HttpNfcLeaseDeviceUrl []HttpNfcLeaseDeviceUrl `xml:"HttpNfcLeaseDeviceUrl,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseDeviceUrl"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseDeviceUrl)(nil)).Elem() +} + +type ArrayOfHttpNfcLeaseHostInfo struct { + HttpNfcLeaseHostInfo []HttpNfcLeaseHostInfo `xml:"HttpNfcLeaseHostInfo,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseHostInfo"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseHostInfo)(nil)).Elem() +} + +type ArrayOfHttpNfcLeaseManifestEntry struct { + HttpNfcLeaseManifestEntry []HttpNfcLeaseManifestEntry `xml:"HttpNfcLeaseManifestEntry,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseManifestEntry"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseManifestEntry)(nil)).Elem() +} + +type ArrayOfHttpNfcLeaseSourceFile struct { + HttpNfcLeaseSourceFile []HttpNfcLeaseSourceFile `xml:"HttpNfcLeaseSourceFile,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseSourceFile"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseSourceFile)(nil)).Elem() +} + +type ArrayOfID struct { + ID []ID `xml:"ID,omitempty"` +} + +func init() { + t["ArrayOfID"] = reflect.TypeOf((*ArrayOfID)(nil)).Elem() +} + +type ArrayOfImportOperationBulkFaultFaultOnImport struct { + ImportOperationBulkFaultFaultOnImport []ImportOperationBulkFaultFaultOnImport `xml:"ImportOperationBulkFaultFaultOnImport,omitempty"` +} + +func init() { + t["ArrayOfImportOperationBulkFaultFaultOnImport"] = reflect.TypeOf((*ArrayOfImportOperationBulkFaultFaultOnImport)(nil)).Elem() +} + +type ArrayOfImportSpec struct { + ImportSpec []BaseImportSpec `xml:"ImportSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfImportSpec"] = reflect.TypeOf((*ArrayOfImportSpec)(nil)).Elem() +} + +type ArrayOfInt struct { + Int []int32 `xml:"int,omitempty"` +} + +func init() { + t["ArrayOfInt"] = reflect.TypeOf((*ArrayOfInt)(nil)).Elem() +} + +type ArrayOfIoFilterHostIssue struct { + IoFilterHostIssue []IoFilterHostIssue `xml:"IoFilterHostIssue,omitempty"` +} + +func init() { + t["ArrayOfIoFilterHostIssue"] = reflect.TypeOf((*ArrayOfIoFilterHostIssue)(nil)).Elem() +} + +type ArrayOfIpPool struct { + IpPool []IpPool `xml:"IpPool,omitempty"` +} + +func init() { + t["ArrayOfIpPool"] = reflect.TypeOf((*ArrayOfIpPool)(nil)).Elem() +} + +type ArrayOfIpPoolAssociation struct { + IpPoolAssociation []IpPoolAssociation `xml:"IpPoolAssociation,omitempty"` +} + +func init() { + t["ArrayOfIpPoolAssociation"] = reflect.TypeOf((*ArrayOfIpPoolAssociation)(nil)).Elem() +} + +type ArrayOfIpPoolManagerIpAllocation struct { + IpPoolManagerIpAllocation []IpPoolManagerIpAllocation `xml:"IpPoolManagerIpAllocation,omitempty"` +} + +func init() { + t["ArrayOfIpPoolManagerIpAllocation"] = reflect.TypeOf((*ArrayOfIpPoolManagerIpAllocation)(nil)).Elem() +} + +type ArrayOfIscsiDependencyEntity struct { + IscsiDependencyEntity []IscsiDependencyEntity `xml:"IscsiDependencyEntity,omitempty"` +} + +func init() { + t["ArrayOfIscsiDependencyEntity"] = reflect.TypeOf((*ArrayOfIscsiDependencyEntity)(nil)).Elem() +} + +type ArrayOfIscsiPortInfo struct { + IscsiPortInfo []IscsiPortInfo `xml:"IscsiPortInfo,omitempty"` +} + +func init() { + t["ArrayOfIscsiPortInfo"] = reflect.TypeOf((*ArrayOfIscsiPortInfo)(nil)).Elem() +} + +type ArrayOfKernelModuleInfo struct { + KernelModuleInfo []KernelModuleInfo `xml:"KernelModuleInfo,omitempty"` +} + +func init() { + t["ArrayOfKernelModuleInfo"] = reflect.TypeOf((*ArrayOfKernelModuleInfo)(nil)).Elem() +} + +type ArrayOfKeyAnyValue struct { + KeyAnyValue []KeyAnyValue `xml:"KeyAnyValue,omitempty"` +} + +func init() { + t["ArrayOfKeyAnyValue"] = reflect.TypeOf((*ArrayOfKeyAnyValue)(nil)).Elem() +} + +type ArrayOfKeyValue struct { + KeyValue []KeyValue `xml:"KeyValue,omitempty"` +} + +func init() { + t["ArrayOfKeyValue"] = reflect.TypeOf((*ArrayOfKeyValue)(nil)).Elem() +} + +type ArrayOfKmipClusterInfo struct { + KmipClusterInfo []KmipClusterInfo `xml:"KmipClusterInfo,omitempty"` +} + +func init() { + t["ArrayOfKmipClusterInfo"] = reflect.TypeOf((*ArrayOfKmipClusterInfo)(nil)).Elem() +} + +type ArrayOfKmipServerInfo struct { + KmipServerInfo []KmipServerInfo `xml:"KmipServerInfo,omitempty"` +} + +func init() { + t["ArrayOfKmipServerInfo"] = reflect.TypeOf((*ArrayOfKmipServerInfo)(nil)).Elem() +} + +type ArrayOfLicenseAssignmentManagerLicenseAssignment struct { + LicenseAssignmentManagerLicenseAssignment []LicenseAssignmentManagerLicenseAssignment `xml:"LicenseAssignmentManagerLicenseAssignment,omitempty"` +} + +func init() { + t["ArrayOfLicenseAssignmentManagerLicenseAssignment"] = reflect.TypeOf((*ArrayOfLicenseAssignmentManagerLicenseAssignment)(nil)).Elem() +} + +type ArrayOfLicenseAvailabilityInfo struct { + LicenseAvailabilityInfo []LicenseAvailabilityInfo `xml:"LicenseAvailabilityInfo,omitempty"` +} + +func init() { + t["ArrayOfLicenseAvailabilityInfo"] = reflect.TypeOf((*ArrayOfLicenseAvailabilityInfo)(nil)).Elem() +} + +type ArrayOfLicenseFeatureInfo struct { + LicenseFeatureInfo []LicenseFeatureInfo `xml:"LicenseFeatureInfo,omitempty"` +} + +func init() { + t["ArrayOfLicenseFeatureInfo"] = reflect.TypeOf((*ArrayOfLicenseFeatureInfo)(nil)).Elem() +} + +type ArrayOfLicenseManagerLicenseInfo struct { + LicenseManagerLicenseInfo []LicenseManagerLicenseInfo `xml:"LicenseManagerLicenseInfo,omitempty"` +} + +func init() { + t["ArrayOfLicenseManagerLicenseInfo"] = reflect.TypeOf((*ArrayOfLicenseManagerLicenseInfo)(nil)).Elem() +} + +type ArrayOfLicenseReservationInfo struct { + LicenseReservationInfo []LicenseReservationInfo `xml:"LicenseReservationInfo,omitempty"` +} + +func init() { + t["ArrayOfLicenseReservationInfo"] = reflect.TypeOf((*ArrayOfLicenseReservationInfo)(nil)).Elem() +} + +type ArrayOfLocalizableMessage struct { + LocalizableMessage []LocalizableMessage `xml:"LocalizableMessage,omitempty"` +} + +func init() { + t["ArrayOfLocalizableMessage"] = reflect.TypeOf((*ArrayOfLocalizableMessage)(nil)).Elem() +} + +type ArrayOfLocalizationManagerMessageCatalog struct { + LocalizationManagerMessageCatalog []LocalizationManagerMessageCatalog `xml:"LocalizationManagerMessageCatalog,omitempty"` +} + +func init() { + t["ArrayOfLocalizationManagerMessageCatalog"] = reflect.TypeOf((*ArrayOfLocalizationManagerMessageCatalog)(nil)).Elem() +} + +type ArrayOfLong struct { + Long []int64 `xml:"long,omitempty"` +} + +func init() { + t["ArrayOfLong"] = reflect.TypeOf((*ArrayOfLong)(nil)).Elem() +} + +type ArrayOfManagedEntityStatus struct { + ManagedEntityStatus []ManagedEntityStatus `xml:"ManagedEntityStatus,omitempty"` +} + +func init() { + t["ArrayOfManagedEntityStatus"] = reflect.TypeOf((*ArrayOfManagedEntityStatus)(nil)).Elem() +} + +type ArrayOfManagedObjectReference struct { + ManagedObjectReference []ManagedObjectReference `xml:"ManagedObjectReference,omitempty"` +} + +func init() { + t["ArrayOfManagedObjectReference"] = reflect.TypeOf((*ArrayOfManagedObjectReference)(nil)).Elem() +} + +type ArrayOfMethodActionArgument struct { + MethodActionArgument []MethodActionArgument `xml:"MethodActionArgument,omitempty"` +} + +func init() { + t["ArrayOfMethodActionArgument"] = reflect.TypeOf((*ArrayOfMethodActionArgument)(nil)).Elem() +} + +type ArrayOfMethodFault struct { + MethodFault []BaseMethodFault `xml:"MethodFault,omitempty,typeattr"` +} + +func init() { + t["ArrayOfMethodFault"] = reflect.TypeOf((*ArrayOfMethodFault)(nil)).Elem() +} + +type ArrayOfMissingObject struct { + MissingObject []MissingObject `xml:"MissingObject,omitempty"` +} + +func init() { + t["ArrayOfMissingObject"] = reflect.TypeOf((*ArrayOfMissingObject)(nil)).Elem() +} + +type ArrayOfMissingProperty struct { + MissingProperty []MissingProperty `xml:"MissingProperty,omitempty"` +} + +func init() { + t["ArrayOfMissingProperty"] = reflect.TypeOf((*ArrayOfMissingProperty)(nil)).Elem() +} + +type ArrayOfMultipleCertificatesVerifyFaultThumbprintData struct { + MultipleCertificatesVerifyFaultThumbprintData []MultipleCertificatesVerifyFaultThumbprintData `xml:"MultipleCertificatesVerifyFaultThumbprintData,omitempty"` +} + +func init() { + t["ArrayOfMultipleCertificatesVerifyFaultThumbprintData"] = reflect.TypeOf((*ArrayOfMultipleCertificatesVerifyFaultThumbprintData)(nil)).Elem() +} + +type ArrayOfNasStorageProfile struct { + NasStorageProfile []NasStorageProfile `xml:"NasStorageProfile,omitempty"` +} + +func init() { + t["ArrayOfNasStorageProfile"] = reflect.TypeOf((*ArrayOfNasStorageProfile)(nil)).Elem() +} + +type ArrayOfNetIpConfigInfoIpAddress struct { + NetIpConfigInfoIpAddress []NetIpConfigInfoIpAddress `xml:"NetIpConfigInfoIpAddress,omitempty"` +} + +func init() { + t["ArrayOfNetIpConfigInfoIpAddress"] = reflect.TypeOf((*ArrayOfNetIpConfigInfoIpAddress)(nil)).Elem() +} + +type ArrayOfNetIpConfigSpecIpAddressSpec struct { + NetIpConfigSpecIpAddressSpec []NetIpConfigSpecIpAddressSpec `xml:"NetIpConfigSpecIpAddressSpec,omitempty"` +} + +func init() { + t["ArrayOfNetIpConfigSpecIpAddressSpec"] = reflect.TypeOf((*ArrayOfNetIpConfigSpecIpAddressSpec)(nil)).Elem() +} + +type ArrayOfNetIpRouteConfigInfoIpRoute struct { + NetIpRouteConfigInfoIpRoute []NetIpRouteConfigInfoIpRoute `xml:"NetIpRouteConfigInfoIpRoute,omitempty"` +} + +func init() { + t["ArrayOfNetIpRouteConfigInfoIpRoute"] = reflect.TypeOf((*ArrayOfNetIpRouteConfigInfoIpRoute)(nil)).Elem() +} + +type ArrayOfNetIpRouteConfigSpecIpRouteSpec struct { + NetIpRouteConfigSpecIpRouteSpec []NetIpRouteConfigSpecIpRouteSpec `xml:"NetIpRouteConfigSpecIpRouteSpec,omitempty"` +} + +func init() { + t["ArrayOfNetIpRouteConfigSpecIpRouteSpec"] = reflect.TypeOf((*ArrayOfNetIpRouteConfigSpecIpRouteSpec)(nil)).Elem() +} + +type ArrayOfNetIpStackInfoDefaultRouter struct { + NetIpStackInfoDefaultRouter []NetIpStackInfoDefaultRouter `xml:"NetIpStackInfoDefaultRouter,omitempty"` +} + +func init() { + t["ArrayOfNetIpStackInfoDefaultRouter"] = reflect.TypeOf((*ArrayOfNetIpStackInfoDefaultRouter)(nil)).Elem() +} + +type ArrayOfNetIpStackInfoNetToMedia struct { + NetIpStackInfoNetToMedia []NetIpStackInfoNetToMedia `xml:"NetIpStackInfoNetToMedia,omitempty"` +} + +func init() { + t["ArrayOfNetIpStackInfoNetToMedia"] = reflect.TypeOf((*ArrayOfNetIpStackInfoNetToMedia)(nil)).Elem() +} + +type ArrayOfNetStackInstanceProfile struct { + NetStackInstanceProfile []NetStackInstanceProfile `xml:"NetStackInstanceProfile,omitempty"` +} + +func init() { + t["ArrayOfNetStackInstanceProfile"] = reflect.TypeOf((*ArrayOfNetStackInstanceProfile)(nil)).Elem() +} + +type ArrayOfNsxHostVNicProfile struct { + NsxHostVNicProfile []NsxHostVNicProfile `xml:"NsxHostVNicProfile,omitempty"` +} + +func init() { + t["ArrayOfNsxHostVNicProfile"] = reflect.TypeOf((*ArrayOfNsxHostVNicProfile)(nil)).Elem() +} + +type ArrayOfNumericRange struct { + NumericRange []NumericRange `xml:"NumericRange,omitempty"` +} + +func init() { + t["ArrayOfNumericRange"] = reflect.TypeOf((*ArrayOfNumericRange)(nil)).Elem() +} + +type ArrayOfNvdimmDimmInfo struct { + NvdimmDimmInfo []NvdimmDimmInfo `xml:"NvdimmDimmInfo,omitempty"` +} + +func init() { + t["ArrayOfNvdimmDimmInfo"] = reflect.TypeOf((*ArrayOfNvdimmDimmInfo)(nil)).Elem() +} + +type ArrayOfNvdimmGuid struct { + NvdimmGuid []NvdimmGuid `xml:"NvdimmGuid,omitempty"` +} + +func init() { + t["ArrayOfNvdimmGuid"] = reflect.TypeOf((*ArrayOfNvdimmGuid)(nil)).Elem() +} + +type ArrayOfNvdimmInterleaveSetInfo struct { + NvdimmInterleaveSetInfo []NvdimmInterleaveSetInfo `xml:"NvdimmInterleaveSetInfo,omitempty"` +} + +func init() { + t["ArrayOfNvdimmInterleaveSetInfo"] = reflect.TypeOf((*ArrayOfNvdimmInterleaveSetInfo)(nil)).Elem() +} + +type ArrayOfNvdimmNamespaceInfo struct { + NvdimmNamespaceInfo []NvdimmNamespaceInfo `xml:"NvdimmNamespaceInfo,omitempty"` +} + +func init() { + t["ArrayOfNvdimmNamespaceInfo"] = reflect.TypeOf((*ArrayOfNvdimmNamespaceInfo)(nil)).Elem() +} + +type ArrayOfNvdimmRegionInfo struct { + NvdimmRegionInfo []NvdimmRegionInfo `xml:"NvdimmRegionInfo,omitempty"` +} + +func init() { + t["ArrayOfNvdimmRegionInfo"] = reflect.TypeOf((*ArrayOfNvdimmRegionInfo)(nil)).Elem() +} + +type ArrayOfObjectContent struct { + ObjectContent []ObjectContent `xml:"ObjectContent,omitempty"` +} + +func init() { + t["ArrayOfObjectContent"] = reflect.TypeOf((*ArrayOfObjectContent)(nil)).Elem() +} + +type ArrayOfObjectSpec struct { + ObjectSpec []ObjectSpec `xml:"ObjectSpec,omitempty"` +} + +func init() { + t["ArrayOfObjectSpec"] = reflect.TypeOf((*ArrayOfObjectSpec)(nil)).Elem() +} + +type ArrayOfObjectUpdate struct { + ObjectUpdate []ObjectUpdate `xml:"ObjectUpdate,omitempty"` +} + +func init() { + t["ArrayOfObjectUpdate"] = reflect.TypeOf((*ArrayOfObjectUpdate)(nil)).Elem() +} + +type ArrayOfOpaqueNetworkTargetInfo struct { + OpaqueNetworkTargetInfo []OpaqueNetworkTargetInfo `xml:"OpaqueNetworkTargetInfo,omitempty"` +} + +func init() { + t["ArrayOfOpaqueNetworkTargetInfo"] = reflect.TypeOf((*ArrayOfOpaqueNetworkTargetInfo)(nil)).Elem() +} + +type ArrayOfOptionDef struct { + OptionDef []OptionDef `xml:"OptionDef,omitempty"` +} + +func init() { + t["ArrayOfOptionDef"] = reflect.TypeOf((*ArrayOfOptionDef)(nil)).Elem() +} + +type ArrayOfOptionProfile struct { + OptionProfile []OptionProfile `xml:"OptionProfile,omitempty"` +} + +func init() { + t["ArrayOfOptionProfile"] = reflect.TypeOf((*ArrayOfOptionProfile)(nil)).Elem() +} + +type ArrayOfOptionValue struct { + OptionValue []BaseOptionValue `xml:"OptionValue,omitempty,typeattr"` +} + +func init() { + t["ArrayOfOptionValue"] = reflect.TypeOf((*ArrayOfOptionValue)(nil)).Elem() +} + +type ArrayOfOvfConsumerOstNode struct { + OvfConsumerOstNode []OvfConsumerOstNode `xml:"OvfConsumerOstNode,omitempty"` +} + +func init() { + t["ArrayOfOvfConsumerOstNode"] = reflect.TypeOf((*ArrayOfOvfConsumerOstNode)(nil)).Elem() +} + +type ArrayOfOvfConsumerOvfSection struct { + OvfConsumerOvfSection []OvfConsumerOvfSection `xml:"OvfConsumerOvfSection,omitempty"` +} + +func init() { + t["ArrayOfOvfConsumerOvfSection"] = reflect.TypeOf((*ArrayOfOvfConsumerOvfSection)(nil)).Elem() +} + +type ArrayOfOvfDeploymentOption struct { + OvfDeploymentOption []OvfDeploymentOption `xml:"OvfDeploymentOption,omitempty"` +} + +func init() { + t["ArrayOfOvfDeploymentOption"] = reflect.TypeOf((*ArrayOfOvfDeploymentOption)(nil)).Elem() +} + +type ArrayOfOvfFile struct { + OvfFile []OvfFile `xml:"OvfFile,omitempty"` +} + +func init() { + t["ArrayOfOvfFile"] = reflect.TypeOf((*ArrayOfOvfFile)(nil)).Elem() +} + +type ArrayOfOvfFileItem struct { + OvfFileItem []OvfFileItem `xml:"OvfFileItem,omitempty"` +} + +func init() { + t["ArrayOfOvfFileItem"] = reflect.TypeOf((*ArrayOfOvfFileItem)(nil)).Elem() +} + +type ArrayOfOvfNetworkInfo struct { + OvfNetworkInfo []OvfNetworkInfo `xml:"OvfNetworkInfo,omitempty"` +} + +func init() { + t["ArrayOfOvfNetworkInfo"] = reflect.TypeOf((*ArrayOfOvfNetworkInfo)(nil)).Elem() +} + +type ArrayOfOvfNetworkMapping struct { + OvfNetworkMapping []OvfNetworkMapping `xml:"OvfNetworkMapping,omitempty"` +} + +func init() { + t["ArrayOfOvfNetworkMapping"] = reflect.TypeOf((*ArrayOfOvfNetworkMapping)(nil)).Elem() +} + +type ArrayOfOvfOptionInfo struct { + OvfOptionInfo []OvfOptionInfo `xml:"OvfOptionInfo,omitempty"` +} + +func init() { + t["ArrayOfOvfOptionInfo"] = reflect.TypeOf((*ArrayOfOvfOptionInfo)(nil)).Elem() +} + +type ArrayOfOvfResourceMap struct { + OvfResourceMap []OvfResourceMap `xml:"OvfResourceMap,omitempty"` +} + +func init() { + t["ArrayOfOvfResourceMap"] = reflect.TypeOf((*ArrayOfOvfResourceMap)(nil)).Elem() +} + +type ArrayOfPerfCounterInfo struct { + PerfCounterInfo []PerfCounterInfo `xml:"PerfCounterInfo,omitempty"` +} + +func init() { + t["ArrayOfPerfCounterInfo"] = reflect.TypeOf((*ArrayOfPerfCounterInfo)(nil)).Elem() +} + +type ArrayOfPerfEntityMetricBase struct { + PerfEntityMetricBase []BasePerfEntityMetricBase `xml:"PerfEntityMetricBase,omitempty,typeattr"` +} + +func init() { + t["ArrayOfPerfEntityMetricBase"] = reflect.TypeOf((*ArrayOfPerfEntityMetricBase)(nil)).Elem() +} + +type ArrayOfPerfInterval struct { + PerfInterval []PerfInterval `xml:"PerfInterval,omitempty"` +} + +func init() { + t["ArrayOfPerfInterval"] = reflect.TypeOf((*ArrayOfPerfInterval)(nil)).Elem() +} + +type ArrayOfPerfMetricId struct { + PerfMetricId []PerfMetricId `xml:"PerfMetricId,omitempty"` +} + +func init() { + t["ArrayOfPerfMetricId"] = reflect.TypeOf((*ArrayOfPerfMetricId)(nil)).Elem() +} + +type ArrayOfPerfMetricSeries struct { + PerfMetricSeries []BasePerfMetricSeries `xml:"PerfMetricSeries,omitempty,typeattr"` +} + +func init() { + t["ArrayOfPerfMetricSeries"] = reflect.TypeOf((*ArrayOfPerfMetricSeries)(nil)).Elem() +} + +type ArrayOfPerfMetricSeriesCSV struct { + PerfMetricSeriesCSV []PerfMetricSeriesCSV `xml:"PerfMetricSeriesCSV,omitempty"` +} + +func init() { + t["ArrayOfPerfMetricSeriesCSV"] = reflect.TypeOf((*ArrayOfPerfMetricSeriesCSV)(nil)).Elem() +} + +type ArrayOfPerfQuerySpec struct { + PerfQuerySpec []PerfQuerySpec `xml:"PerfQuerySpec,omitempty"` +} + +func init() { + t["ArrayOfPerfQuerySpec"] = reflect.TypeOf((*ArrayOfPerfQuerySpec)(nil)).Elem() +} + +type ArrayOfPerfSampleInfo struct { + PerfSampleInfo []PerfSampleInfo `xml:"PerfSampleInfo,omitempty"` +} + +func init() { + t["ArrayOfPerfSampleInfo"] = reflect.TypeOf((*ArrayOfPerfSampleInfo)(nil)).Elem() +} + +type ArrayOfPerformanceManagerCounterLevelMapping struct { + PerformanceManagerCounterLevelMapping []PerformanceManagerCounterLevelMapping `xml:"PerformanceManagerCounterLevelMapping,omitempty"` +} + +func init() { + t["ArrayOfPerformanceManagerCounterLevelMapping"] = reflect.TypeOf((*ArrayOfPerformanceManagerCounterLevelMapping)(nil)).Elem() +} + +type ArrayOfPermission struct { + Permission []Permission `xml:"Permission,omitempty"` +} + +func init() { + t["ArrayOfPermission"] = reflect.TypeOf((*ArrayOfPermission)(nil)).Elem() +} + +type ArrayOfPermissionProfile struct { + PermissionProfile []PermissionProfile `xml:"PermissionProfile,omitempty"` +} + +func init() { + t["ArrayOfPermissionProfile"] = reflect.TypeOf((*ArrayOfPermissionProfile)(nil)).Elem() +} + +type ArrayOfPhysicalNic struct { + PhysicalNic []PhysicalNic `xml:"PhysicalNic,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNic"] = reflect.TypeOf((*ArrayOfPhysicalNic)(nil)).Elem() +} + +type ArrayOfPhysicalNicConfig struct { + PhysicalNicConfig []PhysicalNicConfig `xml:"PhysicalNicConfig,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicConfig"] = reflect.TypeOf((*ArrayOfPhysicalNicConfig)(nil)).Elem() +} + +type ArrayOfPhysicalNicHintInfo struct { + PhysicalNicHintInfo []PhysicalNicHintInfo `xml:"PhysicalNicHintInfo,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicHintInfo"] = reflect.TypeOf((*ArrayOfPhysicalNicHintInfo)(nil)).Elem() +} + +type ArrayOfPhysicalNicIpHint struct { + PhysicalNicIpHint []PhysicalNicIpHint `xml:"PhysicalNicIpHint,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicIpHint"] = reflect.TypeOf((*ArrayOfPhysicalNicIpHint)(nil)).Elem() +} + +type ArrayOfPhysicalNicLinkInfo struct { + PhysicalNicLinkInfo []PhysicalNicLinkInfo `xml:"PhysicalNicLinkInfo,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicLinkInfo"] = reflect.TypeOf((*ArrayOfPhysicalNicLinkInfo)(nil)).Elem() +} + +type ArrayOfPhysicalNicNameHint struct { + PhysicalNicNameHint []PhysicalNicNameHint `xml:"PhysicalNicNameHint,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicNameHint"] = reflect.TypeOf((*ArrayOfPhysicalNicNameHint)(nil)).Elem() +} + +type ArrayOfPhysicalNicProfile struct { + PhysicalNicProfile []PhysicalNicProfile `xml:"PhysicalNicProfile,omitempty"` +} + +func init() { + t["ArrayOfPhysicalNicProfile"] = reflect.TypeOf((*ArrayOfPhysicalNicProfile)(nil)).Elem() +} + +type ArrayOfPlacementAffinityRule struct { + PlacementAffinityRule []PlacementAffinityRule `xml:"PlacementAffinityRule,omitempty"` +} + +func init() { + t["ArrayOfPlacementAffinityRule"] = reflect.TypeOf((*ArrayOfPlacementAffinityRule)(nil)).Elem() +} + +type ArrayOfPlacementSpec struct { + PlacementSpec []PlacementSpec `xml:"PlacementSpec,omitempty"` +} + +func init() { + t["ArrayOfPlacementSpec"] = reflect.TypeOf((*ArrayOfPlacementSpec)(nil)).Elem() +} + +type ArrayOfPnicUplinkProfile struct { + PnicUplinkProfile []PnicUplinkProfile `xml:"PnicUplinkProfile,omitempty"` +} + +func init() { + t["ArrayOfPnicUplinkProfile"] = reflect.TypeOf((*ArrayOfPnicUplinkProfile)(nil)).Elem() +} + +type ArrayOfPodDiskLocator struct { + PodDiskLocator []PodDiskLocator `xml:"PodDiskLocator,omitempty"` +} + +func init() { + t["ArrayOfPodDiskLocator"] = reflect.TypeOf((*ArrayOfPodDiskLocator)(nil)).Elem() +} + +type ArrayOfPolicyOption struct { + PolicyOption []BasePolicyOption `xml:"PolicyOption,omitempty,typeattr"` +} + +func init() { + t["ArrayOfPolicyOption"] = reflect.TypeOf((*ArrayOfPolicyOption)(nil)).Elem() +} + +type ArrayOfPrivilegeAvailability struct { + PrivilegeAvailability []PrivilegeAvailability `xml:"PrivilegeAvailability,omitempty"` +} + +func init() { + t["ArrayOfPrivilegeAvailability"] = reflect.TypeOf((*ArrayOfPrivilegeAvailability)(nil)).Elem() +} + +type ArrayOfProductComponentInfo struct { + ProductComponentInfo []ProductComponentInfo `xml:"ProductComponentInfo,omitempty"` +} + +func init() { + t["ArrayOfProductComponentInfo"] = reflect.TypeOf((*ArrayOfProductComponentInfo)(nil)).Elem() +} + +type ArrayOfProfileApplyProfileProperty struct { + ProfileApplyProfileProperty []ProfileApplyProfileProperty `xml:"ProfileApplyProfileProperty,omitempty"` +} + +func init() { + t["ArrayOfProfileApplyProfileProperty"] = reflect.TypeOf((*ArrayOfProfileApplyProfileProperty)(nil)).Elem() +} + +type ArrayOfProfileDeferredPolicyOptionParameter struct { + ProfileDeferredPolicyOptionParameter []ProfileDeferredPolicyOptionParameter `xml:"ProfileDeferredPolicyOptionParameter,omitempty"` +} + +func init() { + t["ArrayOfProfileDeferredPolicyOptionParameter"] = reflect.TypeOf((*ArrayOfProfileDeferredPolicyOptionParameter)(nil)).Elem() +} + +type ArrayOfProfileDescriptionSection struct { + ProfileDescriptionSection []ProfileDescriptionSection `xml:"ProfileDescriptionSection,omitempty"` +} + +func init() { + t["ArrayOfProfileDescriptionSection"] = reflect.TypeOf((*ArrayOfProfileDescriptionSection)(nil)).Elem() +} + +type ArrayOfProfileExecuteError struct { + ProfileExecuteError []ProfileExecuteError `xml:"ProfileExecuteError,omitempty"` +} + +func init() { + t["ArrayOfProfileExecuteError"] = reflect.TypeOf((*ArrayOfProfileExecuteError)(nil)).Elem() +} + +type ArrayOfProfileExpression struct { + ProfileExpression []BaseProfileExpression `xml:"ProfileExpression,omitempty,typeattr"` +} + +func init() { + t["ArrayOfProfileExpression"] = reflect.TypeOf((*ArrayOfProfileExpression)(nil)).Elem() +} + +type ArrayOfProfileExpressionMetadata struct { + ProfileExpressionMetadata []ProfileExpressionMetadata `xml:"ProfileExpressionMetadata,omitempty"` +} + +func init() { + t["ArrayOfProfileExpressionMetadata"] = reflect.TypeOf((*ArrayOfProfileExpressionMetadata)(nil)).Elem() +} + +type ArrayOfProfileMetadata struct { + ProfileMetadata []ProfileMetadata `xml:"ProfileMetadata,omitempty"` +} + +func init() { + t["ArrayOfProfileMetadata"] = reflect.TypeOf((*ArrayOfProfileMetadata)(nil)).Elem() +} + +type ArrayOfProfileMetadataProfileOperationMessage struct { + ProfileMetadataProfileOperationMessage []ProfileMetadataProfileOperationMessage `xml:"ProfileMetadataProfileOperationMessage,omitempty"` +} + +func init() { + t["ArrayOfProfileMetadataProfileOperationMessage"] = reflect.TypeOf((*ArrayOfProfileMetadataProfileOperationMessage)(nil)).Elem() +} + +type ArrayOfProfileMetadataProfileSortSpec struct { + ProfileMetadataProfileSortSpec []ProfileMetadataProfileSortSpec `xml:"ProfileMetadataProfileSortSpec,omitempty"` +} + +func init() { + t["ArrayOfProfileMetadataProfileSortSpec"] = reflect.TypeOf((*ArrayOfProfileMetadataProfileSortSpec)(nil)).Elem() +} + +type ArrayOfProfileParameterMetadata struct { + ProfileParameterMetadata []ProfileParameterMetadata `xml:"ProfileParameterMetadata,omitempty"` +} + +func init() { + t["ArrayOfProfileParameterMetadata"] = reflect.TypeOf((*ArrayOfProfileParameterMetadata)(nil)).Elem() +} + +type ArrayOfProfileParameterMetadataParameterRelationMetadata struct { + ProfileParameterMetadataParameterRelationMetadata []ProfileParameterMetadataParameterRelationMetadata `xml:"ProfileParameterMetadataParameterRelationMetadata,omitempty"` +} + +func init() { + t["ArrayOfProfileParameterMetadataParameterRelationMetadata"] = reflect.TypeOf((*ArrayOfProfileParameterMetadataParameterRelationMetadata)(nil)).Elem() +} + +type ArrayOfProfilePolicy struct { + ProfilePolicy []ProfilePolicy `xml:"ProfilePolicy,omitempty"` +} + +func init() { + t["ArrayOfProfilePolicy"] = reflect.TypeOf((*ArrayOfProfilePolicy)(nil)).Elem() +} + +type ArrayOfProfilePolicyMetadata struct { + ProfilePolicyMetadata []ProfilePolicyMetadata `xml:"ProfilePolicyMetadata,omitempty"` +} + +func init() { + t["ArrayOfProfilePolicyMetadata"] = reflect.TypeOf((*ArrayOfProfilePolicyMetadata)(nil)).Elem() +} + +type ArrayOfProfilePolicyOptionMetadata struct { + ProfilePolicyOptionMetadata []BaseProfilePolicyOptionMetadata `xml:"ProfilePolicyOptionMetadata,omitempty,typeattr"` +} + +func init() { + t["ArrayOfProfilePolicyOptionMetadata"] = reflect.TypeOf((*ArrayOfProfilePolicyOptionMetadata)(nil)).Elem() +} + +type ArrayOfProfileProfileStructureProperty struct { + ProfileProfileStructureProperty []ProfileProfileStructureProperty `xml:"ProfileProfileStructureProperty,omitempty"` +} + +func init() { + t["ArrayOfProfileProfileStructureProperty"] = reflect.TypeOf((*ArrayOfProfileProfileStructureProperty)(nil)).Elem() +} + +type ArrayOfProfilePropertyPath struct { + ProfilePropertyPath []ProfilePropertyPath `xml:"ProfilePropertyPath,omitempty"` +} + +func init() { + t["ArrayOfProfilePropertyPath"] = reflect.TypeOf((*ArrayOfProfilePropertyPath)(nil)).Elem() +} + +type ArrayOfProfileUpdateFailedUpdateFailure struct { + ProfileUpdateFailedUpdateFailure []ProfileUpdateFailedUpdateFailure `xml:"ProfileUpdateFailedUpdateFailure,omitempty"` +} + +func init() { + t["ArrayOfProfileUpdateFailedUpdateFailure"] = reflect.TypeOf((*ArrayOfProfileUpdateFailedUpdateFailure)(nil)).Elem() +} + +type ArrayOfPropertyChange struct { + PropertyChange []PropertyChange `xml:"PropertyChange,omitempty"` +} + +func init() { + t["ArrayOfPropertyChange"] = reflect.TypeOf((*ArrayOfPropertyChange)(nil)).Elem() +} + +type ArrayOfPropertyFilterSpec struct { + PropertyFilterSpec []PropertyFilterSpec `xml:"PropertyFilterSpec,omitempty"` +} + +func init() { + t["ArrayOfPropertyFilterSpec"] = reflect.TypeOf((*ArrayOfPropertyFilterSpec)(nil)).Elem() +} + +type ArrayOfPropertyFilterUpdate struct { + PropertyFilterUpdate []PropertyFilterUpdate `xml:"PropertyFilterUpdate,omitempty"` +} + +func init() { + t["ArrayOfPropertyFilterUpdate"] = reflect.TypeOf((*ArrayOfPropertyFilterUpdate)(nil)).Elem() +} + +type ArrayOfPropertySpec struct { + PropertySpec []PropertySpec `xml:"PropertySpec,omitempty"` +} + +func init() { + t["ArrayOfPropertySpec"] = reflect.TypeOf((*ArrayOfPropertySpec)(nil)).Elem() +} + +type ArrayOfRelation struct { + Relation []Relation `xml:"Relation,omitempty"` +} + +func init() { + t["ArrayOfRelation"] = reflect.TypeOf((*ArrayOfRelation)(nil)).Elem() +} + +type ArrayOfReplicationInfoDiskSettings struct { + ReplicationInfoDiskSettings []ReplicationInfoDiskSettings `xml:"ReplicationInfoDiskSettings,omitempty"` +} + +func init() { + t["ArrayOfReplicationInfoDiskSettings"] = reflect.TypeOf((*ArrayOfReplicationInfoDiskSettings)(nil)).Elem() +} + +type ArrayOfResourceConfigSpec struct { + ResourceConfigSpec []ResourceConfigSpec `xml:"ResourceConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfResourceConfigSpec"] = reflect.TypeOf((*ArrayOfResourceConfigSpec)(nil)).Elem() +} + +type ArrayOfRetrieveVStorageObjSpec struct { + RetrieveVStorageObjSpec []RetrieveVStorageObjSpec `xml:"RetrieveVStorageObjSpec,omitempty"` +} + +func init() { + t["ArrayOfRetrieveVStorageObjSpec"] = reflect.TypeOf((*ArrayOfRetrieveVStorageObjSpec)(nil)).Elem() +} + +type ArrayOfScheduledTaskDetail struct { + ScheduledTaskDetail []ScheduledTaskDetail `xml:"ScheduledTaskDetail,omitempty"` +} + +func init() { + t["ArrayOfScheduledTaskDetail"] = reflect.TypeOf((*ArrayOfScheduledTaskDetail)(nil)).Elem() +} + +type ArrayOfScsiLun struct { + ScsiLun []BaseScsiLun `xml:"ScsiLun,omitempty,typeattr"` +} + +func init() { + t["ArrayOfScsiLun"] = reflect.TypeOf((*ArrayOfScsiLun)(nil)).Elem() +} + +type ArrayOfScsiLunDescriptor struct { + ScsiLunDescriptor []ScsiLunDescriptor `xml:"ScsiLunDescriptor,omitempty"` +} + +func init() { + t["ArrayOfScsiLunDescriptor"] = reflect.TypeOf((*ArrayOfScsiLunDescriptor)(nil)).Elem() +} + +type ArrayOfScsiLunDurableName struct { + ScsiLunDurableName []ScsiLunDurableName `xml:"ScsiLunDurableName,omitempty"` +} + +func init() { + t["ArrayOfScsiLunDurableName"] = reflect.TypeOf((*ArrayOfScsiLunDurableName)(nil)).Elem() +} + +type ArrayOfSelectionSet struct { + SelectionSet []BaseSelectionSet `xml:"SelectionSet,omitempty,typeattr"` +} + +func init() { + t["ArrayOfSelectionSet"] = reflect.TypeOf((*ArrayOfSelectionSet)(nil)).Elem() +} + +type ArrayOfSelectionSpec struct { + SelectionSpec []BaseSelectionSpec `xml:"SelectionSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfSelectionSpec"] = reflect.TypeOf((*ArrayOfSelectionSpec)(nil)).Elem() +} + +type ArrayOfServiceConsolePortGroupProfile struct { + ServiceConsolePortGroupProfile []ServiceConsolePortGroupProfile `xml:"ServiceConsolePortGroupProfile,omitempty"` +} + +func init() { + t["ArrayOfServiceConsolePortGroupProfile"] = reflect.TypeOf((*ArrayOfServiceConsolePortGroupProfile)(nil)).Elem() +} + +type ArrayOfServiceLocator struct { + ServiceLocator []ServiceLocator `xml:"ServiceLocator,omitempty"` +} + +func init() { + t["ArrayOfServiceLocator"] = reflect.TypeOf((*ArrayOfServiceLocator)(nil)).Elem() +} + +type ArrayOfServiceManagerServiceInfo struct { + ServiceManagerServiceInfo []ServiceManagerServiceInfo `xml:"ServiceManagerServiceInfo,omitempty"` +} + +func init() { + t["ArrayOfServiceManagerServiceInfo"] = reflect.TypeOf((*ArrayOfServiceManagerServiceInfo)(nil)).Elem() +} + +type ArrayOfServiceProfile struct { + ServiceProfile []ServiceProfile `xml:"ServiceProfile,omitempty"` +} + +func init() { + t["ArrayOfServiceProfile"] = reflect.TypeOf((*ArrayOfServiceProfile)(nil)).Elem() +} + +type ArrayOfShort struct { + Short []int16 `xml:"short,omitempty"` +} + +func init() { + t["ArrayOfShort"] = reflect.TypeOf((*ArrayOfShort)(nil)).Elem() +} + +type ArrayOfSoftwarePackage struct { + SoftwarePackage []SoftwarePackage `xml:"SoftwarePackage,omitempty"` +} + +func init() { + t["ArrayOfSoftwarePackage"] = reflect.TypeOf((*ArrayOfSoftwarePackage)(nil)).Elem() +} + +type ArrayOfStaticRouteProfile struct { + StaticRouteProfile []StaticRouteProfile `xml:"StaticRouteProfile,omitempty"` +} + +func init() { + t["ArrayOfStaticRouteProfile"] = reflect.TypeOf((*ArrayOfStaticRouteProfile)(nil)).Elem() +} + +type ArrayOfStorageDrsOptionSpec struct { + StorageDrsOptionSpec []StorageDrsOptionSpec `xml:"StorageDrsOptionSpec,omitempty"` +} + +func init() { + t["ArrayOfStorageDrsOptionSpec"] = reflect.TypeOf((*ArrayOfStorageDrsOptionSpec)(nil)).Elem() +} + +type ArrayOfStorageDrsPlacementRankVmSpec struct { + StorageDrsPlacementRankVmSpec []StorageDrsPlacementRankVmSpec `xml:"StorageDrsPlacementRankVmSpec,omitempty"` +} + +func init() { + t["ArrayOfStorageDrsPlacementRankVmSpec"] = reflect.TypeOf((*ArrayOfStorageDrsPlacementRankVmSpec)(nil)).Elem() +} + +type ArrayOfStorageDrsVmConfigInfo struct { + StorageDrsVmConfigInfo []StorageDrsVmConfigInfo `xml:"StorageDrsVmConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfStorageDrsVmConfigInfo"] = reflect.TypeOf((*ArrayOfStorageDrsVmConfigInfo)(nil)).Elem() +} + +type ArrayOfStorageDrsVmConfigSpec struct { + StorageDrsVmConfigSpec []StorageDrsVmConfigSpec `xml:"StorageDrsVmConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfStorageDrsVmConfigSpec"] = reflect.TypeOf((*ArrayOfStorageDrsVmConfigSpec)(nil)).Elem() +} + +type ArrayOfStoragePerformanceSummary struct { + StoragePerformanceSummary []StoragePerformanceSummary `xml:"StoragePerformanceSummary,omitempty"` +} + +func init() { + t["ArrayOfStoragePerformanceSummary"] = reflect.TypeOf((*ArrayOfStoragePerformanceSummary)(nil)).Elem() +} + +type ArrayOfStorageRequirement struct { + StorageRequirement []StorageRequirement `xml:"StorageRequirement,omitempty"` +} + +func init() { + t["ArrayOfStorageRequirement"] = reflect.TypeOf((*ArrayOfStorageRequirement)(nil)).Elem() +} + +type ArrayOfString struct { + String []string `xml:"string,omitempty"` +} + +func init() { + t["ArrayOfString"] = reflect.TypeOf((*ArrayOfString)(nil)).Elem() +} + +type ArrayOfStructuredCustomizations struct { + StructuredCustomizations []StructuredCustomizations `xml:"StructuredCustomizations,omitempty"` +} + +func init() { + t["ArrayOfStructuredCustomizations"] = reflect.TypeOf((*ArrayOfStructuredCustomizations)(nil)).Elem() +} + +type ArrayOfSystemEventInfo struct { + SystemEventInfo []SystemEventInfo `xml:"SystemEventInfo,omitempty"` +} + +func init() { + t["ArrayOfSystemEventInfo"] = reflect.TypeOf((*ArrayOfSystemEventInfo)(nil)).Elem() +} + +type ArrayOfTag struct { + Tag []Tag `xml:"Tag,omitempty"` +} + +func init() { + t["ArrayOfTag"] = reflect.TypeOf((*ArrayOfTag)(nil)).Elem() +} + +type ArrayOfTaskInfo struct { + TaskInfo []TaskInfo `xml:"TaskInfo,omitempty"` +} + +func init() { + t["ArrayOfTaskInfo"] = reflect.TypeOf((*ArrayOfTaskInfo)(nil)).Elem() +} + +type ArrayOfTaskInfoState struct { + TaskInfoState []TaskInfoState `xml:"TaskInfoState,omitempty"` +} + +func init() { + t["ArrayOfTaskInfoState"] = reflect.TypeOf((*ArrayOfTaskInfoState)(nil)).Elem() +} + +type ArrayOfTypeDescription struct { + TypeDescription []BaseTypeDescription `xml:"TypeDescription,omitempty,typeattr"` +} + +func init() { + t["ArrayOfTypeDescription"] = reflect.TypeOf((*ArrayOfTypeDescription)(nil)).Elem() +} + +type ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo struct { + UpdateVirtualMachineFilesResultFailedVmFileInfo []UpdateVirtualMachineFilesResultFailedVmFileInfo `xml:"UpdateVirtualMachineFilesResultFailedVmFileInfo,omitempty"` +} + +func init() { + t["ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo"] = reflect.TypeOf((*ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo)(nil)).Elem() +} + +type ArrayOfUsbScanCodeSpecKeyEvent struct { + UsbScanCodeSpecKeyEvent []UsbScanCodeSpecKeyEvent `xml:"UsbScanCodeSpecKeyEvent,omitempty"` +} + +func init() { + t["ArrayOfUsbScanCodeSpecKeyEvent"] = reflect.TypeOf((*ArrayOfUsbScanCodeSpecKeyEvent)(nil)).Elem() +} + +type ArrayOfUserGroupProfile struct { + UserGroupProfile []UserGroupProfile `xml:"UserGroupProfile,omitempty"` +} + +func init() { + t["ArrayOfUserGroupProfile"] = reflect.TypeOf((*ArrayOfUserGroupProfile)(nil)).Elem() +} + +type ArrayOfUserPrivilegeResult struct { + UserPrivilegeResult []UserPrivilegeResult `xml:"UserPrivilegeResult,omitempty"` +} + +func init() { + t["ArrayOfUserPrivilegeResult"] = reflect.TypeOf((*ArrayOfUserPrivilegeResult)(nil)).Elem() +} + +type ArrayOfUserProfile struct { + UserProfile []UserProfile `xml:"UserProfile,omitempty"` +} + +func init() { + t["ArrayOfUserProfile"] = reflect.TypeOf((*ArrayOfUserProfile)(nil)).Elem() +} + +type ArrayOfUserSearchResult struct { + UserSearchResult []BaseUserSearchResult `xml:"UserSearchResult,omitempty,typeattr"` +} + +func init() { + t["ArrayOfUserSearchResult"] = reflect.TypeOf((*ArrayOfUserSearchResult)(nil)).Elem() +} + +type ArrayOfUserSession struct { + UserSession []UserSession `xml:"UserSession,omitempty"` +} + +func init() { + t["ArrayOfUserSession"] = reflect.TypeOf((*ArrayOfUserSession)(nil)).Elem() +} + +type ArrayOfVASAStorageArray struct { + VASAStorageArray []VASAStorageArray `xml:"VASAStorageArray,omitempty"` +} + +func init() { + t["ArrayOfVASAStorageArray"] = reflect.TypeOf((*ArrayOfVASAStorageArray)(nil)).Elem() +} + +type ArrayOfVAppCloneSpecNetworkMappingPair struct { + VAppCloneSpecNetworkMappingPair []VAppCloneSpecNetworkMappingPair `xml:"VAppCloneSpecNetworkMappingPair,omitempty"` +} + +func init() { + t["ArrayOfVAppCloneSpecNetworkMappingPair"] = reflect.TypeOf((*ArrayOfVAppCloneSpecNetworkMappingPair)(nil)).Elem() +} + +type ArrayOfVAppCloneSpecResourceMap struct { + VAppCloneSpecResourceMap []VAppCloneSpecResourceMap `xml:"VAppCloneSpecResourceMap,omitempty"` +} + +func init() { + t["ArrayOfVAppCloneSpecResourceMap"] = reflect.TypeOf((*ArrayOfVAppCloneSpecResourceMap)(nil)).Elem() +} + +type ArrayOfVAppEntityConfigInfo struct { + VAppEntityConfigInfo []VAppEntityConfigInfo `xml:"VAppEntityConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfVAppEntityConfigInfo"] = reflect.TypeOf((*ArrayOfVAppEntityConfigInfo)(nil)).Elem() +} + +type ArrayOfVAppOvfSectionInfo struct { + VAppOvfSectionInfo []VAppOvfSectionInfo `xml:"VAppOvfSectionInfo,omitempty"` +} + +func init() { + t["ArrayOfVAppOvfSectionInfo"] = reflect.TypeOf((*ArrayOfVAppOvfSectionInfo)(nil)).Elem() +} + +type ArrayOfVAppOvfSectionSpec struct { + VAppOvfSectionSpec []VAppOvfSectionSpec `xml:"VAppOvfSectionSpec,omitempty"` +} + +func init() { + t["ArrayOfVAppOvfSectionSpec"] = reflect.TypeOf((*ArrayOfVAppOvfSectionSpec)(nil)).Elem() +} + +type ArrayOfVAppProductInfo struct { + VAppProductInfo []VAppProductInfo `xml:"VAppProductInfo,omitempty"` +} + +func init() { + t["ArrayOfVAppProductInfo"] = reflect.TypeOf((*ArrayOfVAppProductInfo)(nil)).Elem() +} + +type ArrayOfVAppProductSpec struct { + VAppProductSpec []VAppProductSpec `xml:"VAppProductSpec,omitempty"` +} + +func init() { + t["ArrayOfVAppProductSpec"] = reflect.TypeOf((*ArrayOfVAppProductSpec)(nil)).Elem() +} + +type ArrayOfVAppPropertyInfo struct { + VAppPropertyInfo []VAppPropertyInfo `xml:"VAppPropertyInfo,omitempty"` +} + +func init() { + t["ArrayOfVAppPropertyInfo"] = reflect.TypeOf((*ArrayOfVAppPropertyInfo)(nil)).Elem() +} + +type ArrayOfVAppPropertySpec struct { + VAppPropertySpec []VAppPropertySpec `xml:"VAppPropertySpec,omitempty"` +} + +func init() { + t["ArrayOfVAppPropertySpec"] = reflect.TypeOf((*ArrayOfVAppPropertySpec)(nil)).Elem() +} + +type ArrayOfVMwareDVSPvlanConfigSpec struct { + VMwareDVSPvlanConfigSpec []VMwareDVSPvlanConfigSpec `xml:"VMwareDVSPvlanConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfVMwareDVSPvlanConfigSpec"] = reflect.TypeOf((*ArrayOfVMwareDVSPvlanConfigSpec)(nil)).Elem() +} + +type ArrayOfVMwareDVSPvlanMapEntry struct { + VMwareDVSPvlanMapEntry []VMwareDVSPvlanMapEntry `xml:"VMwareDVSPvlanMapEntry,omitempty"` +} + +func init() { + t["ArrayOfVMwareDVSPvlanMapEntry"] = reflect.TypeOf((*ArrayOfVMwareDVSPvlanMapEntry)(nil)).Elem() +} + +type ArrayOfVMwareDVSVspanConfigSpec struct { + VMwareDVSVspanConfigSpec []VMwareDVSVspanConfigSpec `xml:"VMwareDVSVspanConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfVMwareDVSVspanConfigSpec"] = reflect.TypeOf((*ArrayOfVMwareDVSVspanConfigSpec)(nil)).Elem() +} + +type ArrayOfVMwareDvsLacpGroupConfig struct { + VMwareDvsLacpGroupConfig []VMwareDvsLacpGroupConfig `xml:"VMwareDvsLacpGroupConfig,omitempty"` +} + +func init() { + t["ArrayOfVMwareDvsLacpGroupConfig"] = reflect.TypeOf((*ArrayOfVMwareDvsLacpGroupConfig)(nil)).Elem() +} + +type ArrayOfVMwareDvsLacpGroupSpec struct { + VMwareDvsLacpGroupSpec []VMwareDvsLacpGroupSpec `xml:"VMwareDvsLacpGroupSpec,omitempty"` +} + +func init() { + t["ArrayOfVMwareDvsLacpGroupSpec"] = reflect.TypeOf((*ArrayOfVMwareDvsLacpGroupSpec)(nil)).Elem() +} + +type ArrayOfVMwareVspanSession struct { + VMwareVspanSession []VMwareVspanSession `xml:"VMwareVspanSession,omitempty"` +} + +func init() { + t["ArrayOfVMwareVspanSession"] = reflect.TypeOf((*ArrayOfVMwareVspanSession)(nil)).Elem() +} + +type ArrayOfVStorageObjectAssociations struct { + VStorageObjectAssociations []VStorageObjectAssociations `xml:"VStorageObjectAssociations,omitempty"` +} + +func init() { + t["ArrayOfVStorageObjectAssociations"] = reflect.TypeOf((*ArrayOfVStorageObjectAssociations)(nil)).Elem() +} + +type ArrayOfVStorageObjectAssociationsVmDiskAssociations struct { + VStorageObjectAssociationsVmDiskAssociations []VStorageObjectAssociationsVmDiskAssociations `xml:"VStorageObjectAssociationsVmDiskAssociations,omitempty"` +} + +func init() { + t["ArrayOfVStorageObjectAssociationsVmDiskAssociations"] = reflect.TypeOf((*ArrayOfVStorageObjectAssociationsVmDiskAssociations)(nil)).Elem() +} + +type ArrayOfVStorageObjectSnapshotInfoVStorageObjectSnapshot struct { + VStorageObjectSnapshotInfoVStorageObjectSnapshot []VStorageObjectSnapshotInfoVStorageObjectSnapshot `xml:"VStorageObjectSnapshotInfoVStorageObjectSnapshot,omitempty"` +} + +func init() { + t["ArrayOfVStorageObjectSnapshotInfoVStorageObjectSnapshot"] = reflect.TypeOf((*ArrayOfVStorageObjectSnapshotInfoVStorageObjectSnapshot)(nil)).Elem() +} + +type ArrayOfVVolHostPE struct { + VVolHostPE []VVolHostPE `xml:"VVolHostPE,omitempty"` +} + +func init() { + t["ArrayOfVVolHostPE"] = reflect.TypeOf((*ArrayOfVVolHostPE)(nil)).Elem() +} + +type ArrayOfVVolVmConfigFileUpdateResultFailedVmConfigFileInfo struct { + VVolVmConfigFileUpdateResultFailedVmConfigFileInfo []VVolVmConfigFileUpdateResultFailedVmConfigFileInfo `xml:"VVolVmConfigFileUpdateResultFailedVmConfigFileInfo,omitempty"` +} + +func init() { + t["ArrayOfVVolVmConfigFileUpdateResultFailedVmConfigFileInfo"] = reflect.TypeOf((*ArrayOfVVolVmConfigFileUpdateResultFailedVmConfigFileInfo)(nil)).Elem() +} + +type ArrayOfVchaNodeRuntimeInfo struct { + VchaNodeRuntimeInfo []VchaNodeRuntimeInfo `xml:"VchaNodeRuntimeInfo,omitempty"` +} + +func init() { + t["ArrayOfVchaNodeRuntimeInfo"] = reflect.TypeOf((*ArrayOfVchaNodeRuntimeInfo)(nil)).Elem() +} + +type ArrayOfVimVasaProviderInfo struct { + VimVasaProviderInfo []VimVasaProviderInfo `xml:"VimVasaProviderInfo,omitempty"` +} + +func init() { + t["ArrayOfVimVasaProviderInfo"] = reflect.TypeOf((*ArrayOfVimVasaProviderInfo)(nil)).Elem() +} + +type ArrayOfVimVasaProviderStatePerArray struct { + VimVasaProviderStatePerArray []VimVasaProviderStatePerArray `xml:"VimVasaProviderStatePerArray,omitempty"` +} + +func init() { + t["ArrayOfVimVasaProviderStatePerArray"] = reflect.TypeOf((*ArrayOfVimVasaProviderStatePerArray)(nil)).Elem() +} + +type ArrayOfVirtualAppLinkInfo struct { + VirtualAppLinkInfo []VirtualAppLinkInfo `xml:"VirtualAppLinkInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualAppLinkInfo"] = reflect.TypeOf((*ArrayOfVirtualAppLinkInfo)(nil)).Elem() +} + +type ArrayOfVirtualDevice struct { + VirtualDevice []BaseVirtualDevice `xml:"VirtualDevice,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualDevice"] = reflect.TypeOf((*ArrayOfVirtualDevice)(nil)).Elem() +} + +type ArrayOfVirtualDeviceBackingOption struct { + VirtualDeviceBackingOption []BaseVirtualDeviceBackingOption `xml:"VirtualDeviceBackingOption,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualDeviceBackingOption"] = reflect.TypeOf((*ArrayOfVirtualDeviceBackingOption)(nil)).Elem() +} + +type ArrayOfVirtualDeviceConfigSpec struct { + VirtualDeviceConfigSpec []BaseVirtualDeviceConfigSpec `xml:"VirtualDeviceConfigSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualDeviceConfigSpec"] = reflect.TypeOf((*ArrayOfVirtualDeviceConfigSpec)(nil)).Elem() +} + +type ArrayOfVirtualDeviceOption struct { + VirtualDeviceOption []BaseVirtualDeviceOption `xml:"VirtualDeviceOption,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualDeviceOption"] = reflect.TypeOf((*ArrayOfVirtualDeviceOption)(nil)).Elem() +} + +type ArrayOfVirtualDisk struct { + VirtualDisk []VirtualDisk `xml:"VirtualDisk,omitempty"` +} + +func init() { + t["ArrayOfVirtualDisk"] = reflect.TypeOf((*ArrayOfVirtualDisk)(nil)).Elem() +} + +type ArrayOfVirtualDiskDeltaDiskFormatsSupported struct { + VirtualDiskDeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:"VirtualDiskDeltaDiskFormatsSupported,omitempty"` +} + +func init() { + t["ArrayOfVirtualDiskDeltaDiskFormatsSupported"] = reflect.TypeOf((*ArrayOfVirtualDiskDeltaDiskFormatsSupported)(nil)).Elem() +} + +type ArrayOfVirtualDiskId struct { + VirtualDiskId []VirtualDiskId `xml:"VirtualDiskId,omitempty"` +} + +func init() { + t["ArrayOfVirtualDiskId"] = reflect.TypeOf((*ArrayOfVirtualDiskId)(nil)).Elem() +} + +type ArrayOfVirtualDiskRuleSpec struct { + VirtualDiskRuleSpec []VirtualDiskRuleSpec `xml:"VirtualDiskRuleSpec,omitempty"` +} + +func init() { + t["ArrayOfVirtualDiskRuleSpec"] = reflect.TypeOf((*ArrayOfVirtualDiskRuleSpec)(nil)).Elem() +} + +type ArrayOfVirtualMachineBootOptionsBootableDevice struct { + VirtualMachineBootOptionsBootableDevice []BaseVirtualMachineBootOptionsBootableDevice `xml:"VirtualMachineBootOptionsBootableDevice,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachineBootOptionsBootableDevice"] = reflect.TypeOf((*ArrayOfVirtualMachineBootOptionsBootableDevice)(nil)).Elem() +} + +type ArrayOfVirtualMachineCdromInfo struct { + VirtualMachineCdromInfo []VirtualMachineCdromInfo `xml:"VirtualMachineCdromInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineCdromInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineCdromInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineConfigInfoDatastoreUrlPair struct { + VirtualMachineConfigInfoDatastoreUrlPair []VirtualMachineConfigInfoDatastoreUrlPair `xml:"VirtualMachineConfigInfoDatastoreUrlPair,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineConfigInfoDatastoreUrlPair"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigInfoDatastoreUrlPair)(nil)).Elem() +} + +type ArrayOfVirtualMachineConfigOptionDescriptor struct { + VirtualMachineConfigOptionDescriptor []VirtualMachineConfigOptionDescriptor `xml:"VirtualMachineConfigOptionDescriptor,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineConfigOptionDescriptor"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigOptionDescriptor)(nil)).Elem() +} + +type ArrayOfVirtualMachineCpuIdInfoSpec struct { + VirtualMachineCpuIdInfoSpec []VirtualMachineCpuIdInfoSpec `xml:"VirtualMachineCpuIdInfoSpec,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineCpuIdInfoSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineCpuIdInfoSpec)(nil)).Elem() +} + +type ArrayOfVirtualMachineDatastoreInfo struct { + VirtualMachineDatastoreInfo []VirtualMachineDatastoreInfo `xml:"VirtualMachineDatastoreInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDatastoreInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineDatastoreInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineDatastoreVolumeOption struct { + VirtualMachineDatastoreVolumeOption []VirtualMachineDatastoreVolumeOption `xml:"VirtualMachineDatastoreVolumeOption,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDatastoreVolumeOption"] = reflect.TypeOf((*ArrayOfVirtualMachineDatastoreVolumeOption)(nil)).Elem() +} + +type ArrayOfVirtualMachineDeviceRuntimeInfo struct { + VirtualMachineDeviceRuntimeInfo []VirtualMachineDeviceRuntimeInfo `xml:"VirtualMachineDeviceRuntimeInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDeviceRuntimeInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineDeviceRuntimeInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineDisplayTopology struct { + VirtualMachineDisplayTopology []VirtualMachineDisplayTopology `xml:"VirtualMachineDisplayTopology,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDisplayTopology"] = reflect.TypeOf((*ArrayOfVirtualMachineDisplayTopology)(nil)).Elem() +} + +type ArrayOfVirtualMachineFeatureRequirement struct { + VirtualMachineFeatureRequirement []VirtualMachineFeatureRequirement `xml:"VirtualMachineFeatureRequirement,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFeatureRequirement"] = reflect.TypeOf((*ArrayOfVirtualMachineFeatureRequirement)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutDiskLayout struct { + VirtualMachineFileLayoutDiskLayout []VirtualMachineFileLayoutDiskLayout `xml:"VirtualMachineFileLayoutDiskLayout,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutDiskLayout"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutDiskLayout)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutExDiskLayout struct { + VirtualMachineFileLayoutExDiskLayout []VirtualMachineFileLayoutExDiskLayout `xml:"VirtualMachineFileLayoutExDiskLayout,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutExDiskLayout"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExDiskLayout)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutExDiskUnit struct { + VirtualMachineFileLayoutExDiskUnit []VirtualMachineFileLayoutExDiskUnit `xml:"VirtualMachineFileLayoutExDiskUnit,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutExDiskUnit"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExDiskUnit)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutExFileInfo struct { + VirtualMachineFileLayoutExFileInfo []VirtualMachineFileLayoutExFileInfo `xml:"VirtualMachineFileLayoutExFileInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutExFileInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExFileInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutExSnapshotLayout struct { + VirtualMachineFileLayoutExSnapshotLayout []VirtualMachineFileLayoutExSnapshotLayout `xml:"VirtualMachineFileLayoutExSnapshotLayout,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutExSnapshotLayout"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutExSnapshotLayout)(nil)).Elem() +} + +type ArrayOfVirtualMachineFileLayoutSnapshotLayout struct { + VirtualMachineFileLayoutSnapshotLayout []VirtualMachineFileLayoutSnapshotLayout `xml:"VirtualMachineFileLayoutSnapshotLayout,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFileLayoutSnapshotLayout"] = reflect.TypeOf((*ArrayOfVirtualMachineFileLayoutSnapshotLayout)(nil)).Elem() +} + +type ArrayOfVirtualMachineFloppyInfo struct { + VirtualMachineFloppyInfo []VirtualMachineFloppyInfo `xml:"VirtualMachineFloppyInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineFloppyInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineFloppyInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineIdeDiskDeviceInfo struct { + VirtualMachineIdeDiskDeviceInfo []VirtualMachineIdeDiskDeviceInfo `xml:"VirtualMachineIdeDiskDeviceInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineIdeDiskDeviceInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineIdeDiskDeviceInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineIdeDiskDevicePartitionInfo struct { + VirtualMachineIdeDiskDevicePartitionInfo []VirtualMachineIdeDiskDevicePartitionInfo `xml:"VirtualMachineIdeDiskDevicePartitionInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineIdeDiskDevicePartitionInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineIdeDiskDevicePartitionInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineLegacyNetworkSwitchInfo struct { + VirtualMachineLegacyNetworkSwitchInfo []VirtualMachineLegacyNetworkSwitchInfo `xml:"VirtualMachineLegacyNetworkSwitchInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineLegacyNetworkSwitchInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineLegacyNetworkSwitchInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineMessage struct { + VirtualMachineMessage []VirtualMachineMessage `xml:"VirtualMachineMessage,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineMessage"] = reflect.TypeOf((*ArrayOfVirtualMachineMessage)(nil)).Elem() +} + +type ArrayOfVirtualMachineMetadataManagerVmMetadataInput struct { + VirtualMachineMetadataManagerVmMetadataInput []VirtualMachineMetadataManagerVmMetadataInput `xml:"VirtualMachineMetadataManagerVmMetadataInput,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineMetadataManagerVmMetadataInput"] = reflect.TypeOf((*ArrayOfVirtualMachineMetadataManagerVmMetadataInput)(nil)).Elem() +} + +type ArrayOfVirtualMachineMetadataManagerVmMetadataResult struct { + VirtualMachineMetadataManagerVmMetadataResult []VirtualMachineMetadataManagerVmMetadataResult `xml:"VirtualMachineMetadataManagerVmMetadataResult,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineMetadataManagerVmMetadataResult"] = reflect.TypeOf((*ArrayOfVirtualMachineMetadataManagerVmMetadataResult)(nil)).Elem() +} + +type ArrayOfVirtualMachineNetworkInfo struct { + VirtualMachineNetworkInfo []VirtualMachineNetworkInfo `xml:"VirtualMachineNetworkInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineNetworkInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineNetworkInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineParallelInfo struct { + VirtualMachineParallelInfo []VirtualMachineParallelInfo `xml:"VirtualMachineParallelInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineParallelInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineParallelInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachinePciPassthroughInfo struct { + VirtualMachinePciPassthroughInfo []BaseVirtualMachinePciPassthroughInfo `xml:"VirtualMachinePciPassthroughInfo,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachinePciPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachinePciPassthroughInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachinePciSharedGpuPassthroughInfo struct { + VirtualMachinePciSharedGpuPassthroughInfo []VirtualMachinePciSharedGpuPassthroughInfo `xml:"VirtualMachinePciSharedGpuPassthroughInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachinePciSharedGpuPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineProfileDetailsDiskProfileDetails struct { + VirtualMachineProfileDetailsDiskProfileDetails []VirtualMachineProfileDetailsDiskProfileDetails `xml:"VirtualMachineProfileDetailsDiskProfileDetails,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineProfileDetailsDiskProfileDetails"] = reflect.TypeOf((*ArrayOfVirtualMachineProfileDetailsDiskProfileDetails)(nil)).Elem() +} + +type ArrayOfVirtualMachineProfileSpec struct { + VirtualMachineProfileSpec []BaseVirtualMachineProfileSpec `xml:"VirtualMachineProfileSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachineProfileSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineProfileSpec)(nil)).Elem() +} + +type ArrayOfVirtualMachinePropertyRelation struct { + VirtualMachinePropertyRelation []VirtualMachinePropertyRelation `xml:"VirtualMachinePropertyRelation,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachinePropertyRelation"] = reflect.TypeOf((*ArrayOfVirtualMachinePropertyRelation)(nil)).Elem() +} + +type ArrayOfVirtualMachineRelocateSpecDiskLocator struct { + VirtualMachineRelocateSpecDiskLocator []VirtualMachineRelocateSpecDiskLocator `xml:"VirtualMachineRelocateSpecDiskLocator,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineRelocateSpecDiskLocator"] = reflect.TypeOf((*ArrayOfVirtualMachineRelocateSpecDiskLocator)(nil)).Elem() +} + +type ArrayOfVirtualMachineScsiDiskDeviceInfo struct { + VirtualMachineScsiDiskDeviceInfo []VirtualMachineScsiDiskDeviceInfo `xml:"VirtualMachineScsiDiskDeviceInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineScsiDiskDeviceInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineScsiDiskDeviceInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineScsiPassthroughInfo struct { + VirtualMachineScsiPassthroughInfo []VirtualMachineScsiPassthroughInfo `xml:"VirtualMachineScsiPassthroughInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineScsiPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineScsiPassthroughInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineSerialInfo struct { + VirtualMachineSerialInfo []VirtualMachineSerialInfo `xml:"VirtualMachineSerialInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSerialInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineSerialInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineSnapshotTree struct { + VirtualMachineSnapshotTree []VirtualMachineSnapshotTree `xml:"VirtualMachineSnapshotTree,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSnapshotTree"] = reflect.TypeOf((*ArrayOfVirtualMachineSnapshotTree)(nil)).Elem() +} + +type ArrayOfVirtualMachineSoundInfo struct { + VirtualMachineSoundInfo []VirtualMachineSoundInfo `xml:"VirtualMachineSoundInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSoundInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineSoundInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineSriovInfo struct { + VirtualMachineSriovInfo []VirtualMachineSriovInfo `xml:"VirtualMachineSriovInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSriovInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineSriovInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineSummary struct { + VirtualMachineSummary []VirtualMachineSummary `xml:"VirtualMachineSummary,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineSummary"] = reflect.TypeOf((*ArrayOfVirtualMachineSummary)(nil)).Elem() +} + +type ArrayOfVirtualMachineUsageOnDatastore struct { + VirtualMachineUsageOnDatastore []VirtualMachineUsageOnDatastore `xml:"VirtualMachineUsageOnDatastore,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineUsageOnDatastore"] = reflect.TypeOf((*ArrayOfVirtualMachineUsageOnDatastore)(nil)).Elem() +} + +type ArrayOfVirtualMachineUsbInfo struct { + VirtualMachineUsbInfo []VirtualMachineUsbInfo `xml:"VirtualMachineUsbInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineUsbInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineUsbInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineVFlashModuleInfo struct { + VirtualMachineVFlashModuleInfo []VirtualMachineVFlashModuleInfo `xml:"VirtualMachineVFlashModuleInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVFlashModuleInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVFlashModuleInfo)(nil)).Elem() +} + +type ArrayOfVirtualMachineVMCIDeviceFilterSpec struct { + VirtualMachineVMCIDeviceFilterSpec []VirtualMachineVMCIDeviceFilterSpec `xml:"VirtualMachineVMCIDeviceFilterSpec,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVMCIDeviceFilterSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineVMCIDeviceFilterSpec)(nil)).Elem() +} + +type ArrayOfVirtualNicManagerNetConfig struct { + VirtualNicManagerNetConfig []VirtualNicManagerNetConfig `xml:"VirtualNicManagerNetConfig,omitempty"` +} + +func init() { + t["ArrayOfVirtualNicManagerNetConfig"] = reflect.TypeOf((*ArrayOfVirtualNicManagerNetConfig)(nil)).Elem() +} + +type ArrayOfVirtualSCSISharing struct { + VirtualSCSISharing []VirtualSCSISharing `xml:"VirtualSCSISharing,omitempty"` +} + +func init() { + t["ArrayOfVirtualSCSISharing"] = reflect.TypeOf((*ArrayOfVirtualSCSISharing)(nil)).Elem() +} + +type ArrayOfVirtualSwitchProfile struct { + VirtualSwitchProfile []VirtualSwitchProfile `xml:"VirtualSwitchProfile,omitempty"` +} + +func init() { + t["ArrayOfVirtualSwitchProfile"] = reflect.TypeOf((*ArrayOfVirtualSwitchProfile)(nil)).Elem() +} + +type ArrayOfVmEventArgument struct { + VmEventArgument []VmEventArgument `xml:"VmEventArgument,omitempty"` +} + +func init() { + t["ArrayOfVmEventArgument"] = reflect.TypeOf((*ArrayOfVmEventArgument)(nil)).Elem() +} + +type ArrayOfVmPodConfigForPlacement struct { + VmPodConfigForPlacement []VmPodConfigForPlacement `xml:"VmPodConfigForPlacement,omitempty"` +} + +func init() { + t["ArrayOfVmPodConfigForPlacement"] = reflect.TypeOf((*ArrayOfVmPodConfigForPlacement)(nil)).Elem() +} + +type ArrayOfVmPortGroupProfile struct { + VmPortGroupProfile []VmPortGroupProfile `xml:"VmPortGroupProfile,omitempty"` +} + +func init() { + t["ArrayOfVmPortGroupProfile"] = reflect.TypeOf((*ArrayOfVmPortGroupProfile)(nil)).Elem() +} + +type ArrayOfVmfsConfigOption struct { + VmfsConfigOption []VmfsConfigOption `xml:"VmfsConfigOption,omitempty"` +} + +func init() { + t["ArrayOfVmfsConfigOption"] = reflect.TypeOf((*ArrayOfVmfsConfigOption)(nil)).Elem() +} + +type ArrayOfVmfsDatastoreOption struct { + VmfsDatastoreOption []VmfsDatastoreOption `xml:"VmfsDatastoreOption,omitempty"` +} + +func init() { + t["ArrayOfVmfsDatastoreOption"] = reflect.TypeOf((*ArrayOfVmfsDatastoreOption)(nil)).Elem() +} + +type ArrayOfVnicPortArgument struct { + VnicPortArgument []VnicPortArgument `xml:"VnicPortArgument,omitempty"` +} + +func init() { + t["ArrayOfVnicPortArgument"] = reflect.TypeOf((*ArrayOfVnicPortArgument)(nil)).Elem() +} + +type ArrayOfVsanHostConfigInfo struct { + VsanHostConfigInfo []VsanHostConfigInfo `xml:"VsanHostConfigInfo,omitempty"` +} + +func init() { + t["ArrayOfVsanHostConfigInfo"] = reflect.TypeOf((*ArrayOfVsanHostConfigInfo)(nil)).Elem() +} + +type ArrayOfVsanHostConfigInfoNetworkInfoPortConfig struct { + VsanHostConfigInfoNetworkInfoPortConfig []VsanHostConfigInfoNetworkInfoPortConfig `xml:"VsanHostConfigInfoNetworkInfoPortConfig,omitempty"` +} + +func init() { + t["ArrayOfVsanHostConfigInfoNetworkInfoPortConfig"] = reflect.TypeOf((*ArrayOfVsanHostConfigInfoNetworkInfoPortConfig)(nil)).Elem() +} + +type ArrayOfVsanHostDiskMapInfo struct { + VsanHostDiskMapInfo []VsanHostDiskMapInfo `xml:"VsanHostDiskMapInfo,omitempty"` +} + +func init() { + t["ArrayOfVsanHostDiskMapInfo"] = reflect.TypeOf((*ArrayOfVsanHostDiskMapInfo)(nil)).Elem() +} + +type ArrayOfVsanHostDiskMapResult struct { + VsanHostDiskMapResult []VsanHostDiskMapResult `xml:"VsanHostDiskMapResult,omitempty"` +} + +func init() { + t["ArrayOfVsanHostDiskMapResult"] = reflect.TypeOf((*ArrayOfVsanHostDiskMapResult)(nil)).Elem() +} + +type ArrayOfVsanHostDiskMapping struct { + VsanHostDiskMapping []VsanHostDiskMapping `xml:"VsanHostDiskMapping,omitempty"` +} + +func init() { + t["ArrayOfVsanHostDiskMapping"] = reflect.TypeOf((*ArrayOfVsanHostDiskMapping)(nil)).Elem() +} + +type ArrayOfVsanHostDiskResult struct { + VsanHostDiskResult []VsanHostDiskResult `xml:"VsanHostDiskResult,omitempty"` +} + +func init() { + t["ArrayOfVsanHostDiskResult"] = reflect.TypeOf((*ArrayOfVsanHostDiskResult)(nil)).Elem() +} + +type ArrayOfVsanHostMembershipInfo struct { + VsanHostMembershipInfo []VsanHostMembershipInfo `xml:"VsanHostMembershipInfo,omitempty"` +} + +func init() { + t["ArrayOfVsanHostMembershipInfo"] = reflect.TypeOf((*ArrayOfVsanHostMembershipInfo)(nil)).Elem() +} + +type ArrayOfVsanHostRuntimeInfoDiskIssue struct { + VsanHostRuntimeInfoDiskIssue []VsanHostRuntimeInfoDiskIssue `xml:"VsanHostRuntimeInfoDiskIssue,omitempty"` +} + +func init() { + t["ArrayOfVsanHostRuntimeInfoDiskIssue"] = reflect.TypeOf((*ArrayOfVsanHostRuntimeInfoDiskIssue)(nil)).Elem() +} + +type ArrayOfVsanNewPolicyBatch struct { + VsanNewPolicyBatch []VsanNewPolicyBatch `xml:"VsanNewPolicyBatch,omitempty"` +} + +func init() { + t["ArrayOfVsanNewPolicyBatch"] = reflect.TypeOf((*ArrayOfVsanNewPolicyBatch)(nil)).Elem() +} + +type ArrayOfVsanPolicyChangeBatch struct { + VsanPolicyChangeBatch []VsanPolicyChangeBatch `xml:"VsanPolicyChangeBatch,omitempty"` +} + +func init() { + t["ArrayOfVsanPolicyChangeBatch"] = reflect.TypeOf((*ArrayOfVsanPolicyChangeBatch)(nil)).Elem() +} + +type ArrayOfVsanPolicySatisfiability struct { + VsanPolicySatisfiability []VsanPolicySatisfiability `xml:"VsanPolicySatisfiability,omitempty"` +} + +func init() { + t["ArrayOfVsanPolicySatisfiability"] = reflect.TypeOf((*ArrayOfVsanPolicySatisfiability)(nil)).Elem() +} + +type ArrayOfVsanUpgradeSystemNetworkPartitionInfo struct { + VsanUpgradeSystemNetworkPartitionInfo []VsanUpgradeSystemNetworkPartitionInfo `xml:"VsanUpgradeSystemNetworkPartitionInfo,omitempty"` +} + +func init() { + t["ArrayOfVsanUpgradeSystemNetworkPartitionInfo"] = reflect.TypeOf((*ArrayOfVsanUpgradeSystemNetworkPartitionInfo)(nil)).Elem() +} + +type ArrayOfVsanUpgradeSystemPreflightCheckIssue struct { + VsanUpgradeSystemPreflightCheckIssue []BaseVsanUpgradeSystemPreflightCheckIssue `xml:"VsanUpgradeSystemPreflightCheckIssue,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVsanUpgradeSystemPreflightCheckIssue"] = reflect.TypeOf((*ArrayOfVsanUpgradeSystemPreflightCheckIssue)(nil)).Elem() +} + +type ArrayOfVsanUpgradeSystemUpgradeHistoryItem struct { + VsanUpgradeSystemUpgradeHistoryItem []BaseVsanUpgradeSystemUpgradeHistoryItem `xml:"VsanUpgradeSystemUpgradeHistoryItem,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVsanUpgradeSystemUpgradeHistoryItem"] = reflect.TypeOf((*ArrayOfVsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem() +} + +type ArrayOfVslmTagEntry struct { + VslmTagEntry []VslmTagEntry `xml:"VslmTagEntry,omitempty"` +} + +func init() { + t["ArrayOfVslmTagEntry"] = reflect.TypeOf((*ArrayOfVslmTagEntry)(nil)).Elem() +} + +type ArrayOfVslmInfrastructureObjectPolicy struct { + VslmInfrastructureObjectPolicy []VslmInfrastructureObjectPolicy `xml:"vslmInfrastructureObjectPolicy,omitempty"` +} + +func init() { + t["ArrayOfvslmInfrastructureObjectPolicy"] = reflect.TypeOf((*ArrayOfVslmInfrastructureObjectPolicy)(nil)).Elem() +} + +type ArrayUpdateSpec struct { + DynamicData + + Operation ArrayUpdateOperation `xml:"operation"` + RemoveKey AnyType `xml:"removeKey,omitempty,typeattr"` +} + +func init() { + t["ArrayUpdateSpec"] = reflect.TypeOf((*ArrayUpdateSpec)(nil)).Elem() +} + +type AssignUserToGroup AssignUserToGroupRequestType + +func init() { + t["AssignUserToGroup"] = reflect.TypeOf((*AssignUserToGroup)(nil)).Elem() +} + +type AssignUserToGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + User string `xml:"user"` + Group string `xml:"group"` +} + +func init() { + t["AssignUserToGroupRequestType"] = reflect.TypeOf((*AssignUserToGroupRequestType)(nil)).Elem() +} + +type AssignUserToGroupResponse struct { +} + +type AssociateProfile AssociateProfileRequestType + +func init() { + t["AssociateProfile"] = reflect.TypeOf((*AssociateProfile)(nil)).Elem() +} + +type AssociateProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity"` +} + +func init() { + t["AssociateProfileRequestType"] = reflect.TypeOf((*AssociateProfileRequestType)(nil)).Elem() +} + +type AssociateProfileResponse struct { +} + +type AttachDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + DiskId ID `xml:"diskId"` + Datastore ManagedObjectReference `xml:"datastore"` + ControllerKey int32 `xml:"controllerKey,omitempty"` + UnitNumber *int32 `xml:"unitNumber"` +} + +func init() { + t["AttachDiskRequestType"] = reflect.TypeOf((*AttachDiskRequestType)(nil)).Elem() +} + +type AttachDisk_Task AttachDiskRequestType + +func init() { + t["AttachDisk_Task"] = reflect.TypeOf((*AttachDisk_Task)(nil)).Elem() +} + +type AttachDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AttachScsiLun AttachScsiLunRequestType + +func init() { + t["AttachScsiLun"] = reflect.TypeOf((*AttachScsiLun)(nil)).Elem() +} + +type AttachScsiLunExRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid []string `xml:"lunUuid"` +} + +func init() { + t["AttachScsiLunExRequestType"] = reflect.TypeOf((*AttachScsiLunExRequestType)(nil)).Elem() +} + +type AttachScsiLunEx_Task AttachScsiLunExRequestType + +func init() { + t["AttachScsiLunEx_Task"] = reflect.TypeOf((*AttachScsiLunEx_Task)(nil)).Elem() +} + +type AttachScsiLunEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type AttachScsiLunRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` +} + +func init() { + t["AttachScsiLunRequestType"] = reflect.TypeOf((*AttachScsiLunRequestType)(nil)).Elem() +} + +type AttachScsiLunResponse struct { +} + +type AttachTagToVStorageObject AttachTagToVStorageObjectRequestType + +func init() { + t["AttachTagToVStorageObject"] = reflect.TypeOf((*AttachTagToVStorageObject)(nil)).Elem() +} + +type AttachTagToVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Category string `xml:"category"` + Tag string `xml:"tag"` +} + +func init() { + t["AttachTagToVStorageObjectRequestType"] = reflect.TypeOf((*AttachTagToVStorageObjectRequestType)(nil)).Elem() +} + +type AttachTagToVStorageObjectResponse struct { +} + +type AttachVmfsExtent AttachVmfsExtentRequestType + +func init() { + t["AttachVmfsExtent"] = reflect.TypeOf((*AttachVmfsExtent)(nil)).Elem() +} + +type AttachVmfsExtentRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsPath string `xml:"vmfsPath"` + Extent HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["AttachVmfsExtentRequestType"] = reflect.TypeOf((*AttachVmfsExtentRequestType)(nil)).Elem() +} + +type AttachVmfsExtentResponse struct { +} + +type AuthMinimumAdminPermission struct { + VimFault +} + +func init() { + t["AuthMinimumAdminPermission"] = reflect.TypeOf((*AuthMinimumAdminPermission)(nil)).Elem() +} + +type AuthMinimumAdminPermissionFault AuthMinimumAdminPermission + +func init() { + t["AuthMinimumAdminPermissionFault"] = reflect.TypeOf((*AuthMinimumAdminPermissionFault)(nil)).Elem() +} + +type AuthenticationProfile struct { + ApplyProfile + + ActiveDirectory *ActiveDirectoryProfile `xml:"activeDirectory,omitempty"` +} + +func init() { + t["AuthenticationProfile"] = reflect.TypeOf((*AuthenticationProfile)(nil)).Elem() +} + +type AuthorizationDescription struct { + DynamicData + + Privilege []BaseElementDescription `xml:"privilege,typeattr"` + PrivilegeGroup []BaseElementDescription `xml:"privilegeGroup,typeattr"` +} + +func init() { + t["AuthorizationDescription"] = reflect.TypeOf((*AuthorizationDescription)(nil)).Elem() +} + +type AuthorizationEvent struct { + Event +} + +func init() { + t["AuthorizationEvent"] = reflect.TypeOf((*AuthorizationEvent)(nil)).Elem() +} + +type AuthorizationPrivilege struct { + DynamicData + + PrivId string `xml:"privId"` + OnParent bool `xml:"onParent"` + Name string `xml:"name"` + PrivGroupName string `xml:"privGroupName"` +} + +func init() { + t["AuthorizationPrivilege"] = reflect.TypeOf((*AuthorizationPrivilege)(nil)).Elem() +} + +type AuthorizationRole struct { + DynamicData + + RoleId int32 `xml:"roleId"` + System bool `xml:"system"` + Name string `xml:"name"` + Info BaseDescription `xml:"info,typeattr"` + Privilege []string `xml:"privilege,omitempty"` +} + +func init() { + t["AuthorizationRole"] = reflect.TypeOf((*AuthorizationRole)(nil)).Elem() +} + +type AutoStartDefaults struct { + DynamicData + + Enabled *bool `xml:"enabled"` + StartDelay int32 `xml:"startDelay,omitempty"` + StopDelay int32 `xml:"stopDelay,omitempty"` + WaitForHeartbeat *bool `xml:"waitForHeartbeat"` + StopAction string `xml:"stopAction,omitempty"` +} + +func init() { + t["AutoStartDefaults"] = reflect.TypeOf((*AutoStartDefaults)(nil)).Elem() +} + +type AutoStartPowerInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + StartOrder int32 `xml:"startOrder"` + StartDelay int32 `xml:"startDelay"` + WaitForHeartbeat AutoStartWaitHeartbeatSetting `xml:"waitForHeartbeat"` + StartAction string `xml:"startAction"` + StopDelay int32 `xml:"stopDelay"` + StopAction string `xml:"stopAction"` +} + +func init() { + t["AutoStartPowerInfo"] = reflect.TypeOf((*AutoStartPowerInfo)(nil)).Elem() +} + +type AutoStartPowerOff AutoStartPowerOffRequestType + +func init() { + t["AutoStartPowerOff"] = reflect.TypeOf((*AutoStartPowerOff)(nil)).Elem() +} + +type AutoStartPowerOffRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AutoStartPowerOffRequestType"] = reflect.TypeOf((*AutoStartPowerOffRequestType)(nil)).Elem() +} + +type AutoStartPowerOffResponse struct { +} + +type AutoStartPowerOn AutoStartPowerOnRequestType + +func init() { + t["AutoStartPowerOn"] = reflect.TypeOf((*AutoStartPowerOn)(nil)).Elem() +} + +type AutoStartPowerOnRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AutoStartPowerOnRequestType"] = reflect.TypeOf((*AutoStartPowerOnRequestType)(nil)).Elem() +} + +type AutoStartPowerOnResponse struct { +} + +type BackupBlobReadFailure struct { + DvsFault + + EntityName string `xml:"entityName"` + EntityType string `xml:"entityType"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["BackupBlobReadFailure"] = reflect.TypeOf((*BackupBlobReadFailure)(nil)).Elem() +} + +type BackupBlobReadFailureFault BackupBlobReadFailure + +func init() { + t["BackupBlobReadFailureFault"] = reflect.TypeOf((*BackupBlobReadFailureFault)(nil)).Elem() +} + +type BackupBlobWriteFailure struct { + DvsFault + + EntityName string `xml:"entityName"` + EntityType string `xml:"entityType"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["BackupBlobWriteFailure"] = reflect.TypeOf((*BackupBlobWriteFailure)(nil)).Elem() +} + +type BackupBlobWriteFailureFault BackupBlobWriteFailure + +func init() { + t["BackupBlobWriteFailureFault"] = reflect.TypeOf((*BackupBlobWriteFailureFault)(nil)).Elem() +} + +type BackupFirmwareConfiguration BackupFirmwareConfigurationRequestType + +func init() { + t["BackupFirmwareConfiguration"] = reflect.TypeOf((*BackupFirmwareConfiguration)(nil)).Elem() +} + +type BackupFirmwareConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["BackupFirmwareConfigurationRequestType"] = reflect.TypeOf((*BackupFirmwareConfigurationRequestType)(nil)).Elem() +} + +type BackupFirmwareConfigurationResponse struct { + Returnval string `xml:"returnval"` +} + +type BadUsernameSessionEvent struct { + SessionEvent + + IpAddress string `xml:"ipAddress"` +} + +func init() { + t["BadUsernameSessionEvent"] = reflect.TypeOf((*BadUsernameSessionEvent)(nil)).Elem() +} + +type BaseConfigInfo struct { + DynamicData + + Id ID `xml:"id"` + Name string `xml:"name"` + CreateTime time.Time `xml:"createTime"` + KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm"` + RelocationDisabled *bool `xml:"relocationDisabled"` + NativeSnapshotSupported *bool `xml:"nativeSnapshotSupported"` + ChangedBlockTrackingEnabled *bool `xml:"changedBlockTrackingEnabled"` + Backing BaseBaseConfigInfoBackingInfo `xml:"backing,typeattr"` + Iofilter []string `xml:"iofilter,omitempty"` +} + +func init() { + t["BaseConfigInfo"] = reflect.TypeOf((*BaseConfigInfo)(nil)).Elem() +} + +type BaseConfigInfoBackingInfo struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["BaseConfigInfoBackingInfo"] = reflect.TypeOf((*BaseConfigInfoBackingInfo)(nil)).Elem() +} + +type BaseConfigInfoDiskFileBackingInfo struct { + BaseConfigInfoFileBackingInfo + + ProvisioningType string `xml:"provisioningType"` +} + +func init() { + t["BaseConfigInfoDiskFileBackingInfo"] = reflect.TypeOf((*BaseConfigInfoDiskFileBackingInfo)(nil)).Elem() +} + +type BaseConfigInfoFileBackingInfo struct { + BaseConfigInfoBackingInfo + + FilePath string `xml:"filePath"` + BackingObjectId string `xml:"backingObjectId,omitempty"` + Parent BaseBaseConfigInfoFileBackingInfo `xml:"parent,omitempty,typeattr"` + DeltaSizeInMB int64 `xml:"deltaSizeInMB,omitempty"` +} + +func init() { + t["BaseConfigInfoFileBackingInfo"] = reflect.TypeOf((*BaseConfigInfoFileBackingInfo)(nil)).Elem() +} + +type BaseConfigInfoRawDiskMappingBackingInfo struct { + BaseConfigInfoFileBackingInfo + + LunUuid string `xml:"lunUuid"` + CompatibilityMode string `xml:"compatibilityMode"` +} + +func init() { + t["BaseConfigInfoRawDiskMappingBackingInfo"] = reflect.TypeOf((*BaseConfigInfoRawDiskMappingBackingInfo)(nil)).Elem() +} + +type BatchResult struct { + DynamicData + + Result string `xml:"result"` + HostKey string `xml:"hostKey"` + Ds *ManagedObjectReference `xml:"ds,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["BatchResult"] = reflect.TypeOf((*BatchResult)(nil)).Elem() +} + +type BindVnic BindVnicRequestType + +func init() { + t["BindVnic"] = reflect.TypeOf((*BindVnic)(nil)).Elem() +} + +type BindVnicRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaName string `xml:"iScsiHbaName"` + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["BindVnicRequestType"] = reflect.TypeOf((*BindVnicRequestType)(nil)).Elem() +} + +type BindVnicResponse struct { +} + +type BlockedByFirewall struct { + HostConfigFault +} + +func init() { + t["BlockedByFirewall"] = reflect.TypeOf((*BlockedByFirewall)(nil)).Elem() +} + +type BlockedByFirewallFault BlockedByFirewall + +func init() { + t["BlockedByFirewallFault"] = reflect.TypeOf((*BlockedByFirewallFault)(nil)).Elem() +} + +type BoolOption struct { + OptionType + + Supported bool `xml:"supported"` + DefaultValue bool `xml:"defaultValue"` +} + +func init() { + t["BoolOption"] = reflect.TypeOf((*BoolOption)(nil)).Elem() +} + +type BoolPolicy struct { + InheritablePolicy + + Value *bool `xml:"value"` +} + +func init() { + t["BoolPolicy"] = reflect.TypeOf((*BoolPolicy)(nil)).Elem() +} + +type BrowseDiagnosticLog BrowseDiagnosticLogRequestType + +func init() { + t["BrowseDiagnosticLog"] = reflect.TypeOf((*BrowseDiagnosticLog)(nil)).Elem() +} + +type BrowseDiagnosticLogRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Key string `xml:"key"` + Start int32 `xml:"start,omitempty"` + Lines int32 `xml:"lines,omitempty"` +} + +func init() { + t["BrowseDiagnosticLogRequestType"] = reflect.TypeOf((*BrowseDiagnosticLogRequestType)(nil)).Elem() +} + +type BrowseDiagnosticLogResponse struct { + Returnval DiagnosticManagerLogHeader `xml:"returnval"` +} + +type CAMServerRefusedConnection struct { + InvalidCAMServer +} + +func init() { + t["CAMServerRefusedConnection"] = reflect.TypeOf((*CAMServerRefusedConnection)(nil)).Elem() +} + +type CAMServerRefusedConnectionFault CAMServerRefusedConnection + +func init() { + t["CAMServerRefusedConnectionFault"] = reflect.TypeOf((*CAMServerRefusedConnectionFault)(nil)).Elem() +} + +type CanProvisionObjects CanProvisionObjectsRequestType + +func init() { + t["CanProvisionObjects"] = reflect.TypeOf((*CanProvisionObjects)(nil)).Elem() +} + +type CanProvisionObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Npbs []VsanNewPolicyBatch `xml:"npbs"` + IgnoreSatisfiability *bool `xml:"ignoreSatisfiability"` +} + +func init() { + t["CanProvisionObjectsRequestType"] = reflect.TypeOf((*CanProvisionObjectsRequestType)(nil)).Elem() +} + +type CanProvisionObjectsResponse struct { + Returnval []VsanPolicySatisfiability `xml:"returnval"` +} + +type CancelRecommendation CancelRecommendationRequestType + +func init() { + t["CancelRecommendation"] = reflect.TypeOf((*CancelRecommendation)(nil)).Elem() +} + +type CancelRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key string `xml:"key"` +} + +func init() { + t["CancelRecommendationRequestType"] = reflect.TypeOf((*CancelRecommendationRequestType)(nil)).Elem() +} + +type CancelRecommendationResponse struct { +} + +type CancelRetrievePropertiesEx CancelRetrievePropertiesExRequestType + +func init() { + t["CancelRetrievePropertiesEx"] = reflect.TypeOf((*CancelRetrievePropertiesEx)(nil)).Elem() +} + +type CancelRetrievePropertiesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Token string `xml:"token"` +} + +func init() { + t["CancelRetrievePropertiesExRequestType"] = reflect.TypeOf((*CancelRetrievePropertiesExRequestType)(nil)).Elem() +} + +type CancelRetrievePropertiesExResponse struct { +} + +type CancelStorageDrsRecommendation CancelStorageDrsRecommendationRequestType + +func init() { + t["CancelStorageDrsRecommendation"] = reflect.TypeOf((*CancelStorageDrsRecommendation)(nil)).Elem() +} + +type CancelStorageDrsRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key []string `xml:"key"` +} + +func init() { + t["CancelStorageDrsRecommendationRequestType"] = reflect.TypeOf((*CancelStorageDrsRecommendationRequestType)(nil)).Elem() +} + +type CancelStorageDrsRecommendationResponse struct { +} + +type CancelTask CancelTaskRequestType + +func init() { + t["CancelTask"] = reflect.TypeOf((*CancelTask)(nil)).Elem() +} + +type CancelTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CancelTaskRequestType"] = reflect.TypeOf((*CancelTaskRequestType)(nil)).Elem() +} + +type CancelTaskResponse struct { +} + +type CancelWaitForUpdates CancelWaitForUpdatesRequestType + +func init() { + t["CancelWaitForUpdates"] = reflect.TypeOf((*CancelWaitForUpdates)(nil)).Elem() +} + +type CancelWaitForUpdatesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CancelWaitForUpdatesRequestType"] = reflect.TypeOf((*CancelWaitForUpdatesRequestType)(nil)).Elem() +} + +type CancelWaitForUpdatesResponse struct { +} + +type CanceledHostOperationEvent struct { + HostEvent +} + +func init() { + t["CanceledHostOperationEvent"] = reflect.TypeOf((*CanceledHostOperationEvent)(nil)).Elem() +} + +type CannotAccessFile struct { + FileFault +} + +func init() { + t["CannotAccessFile"] = reflect.TypeOf((*CannotAccessFile)(nil)).Elem() +} + +type CannotAccessFileFault CannotAccessFile + +func init() { + t["CannotAccessFileFault"] = reflect.TypeOf((*CannotAccessFileFault)(nil)).Elem() +} + +type CannotAccessLocalSource struct { + VimFault +} + +func init() { + t["CannotAccessLocalSource"] = reflect.TypeOf((*CannotAccessLocalSource)(nil)).Elem() +} + +type CannotAccessLocalSourceFault CannotAccessLocalSource + +func init() { + t["CannotAccessLocalSourceFault"] = reflect.TypeOf((*CannotAccessLocalSourceFault)(nil)).Elem() +} + +type CannotAccessNetwork struct { + CannotAccessVmDevice + + Network *ManagedObjectReference `xml:"network,omitempty"` +} + +func init() { + t["CannotAccessNetwork"] = reflect.TypeOf((*CannotAccessNetwork)(nil)).Elem() +} + +type CannotAccessNetworkFault BaseCannotAccessNetwork + +func init() { + t["CannotAccessNetworkFault"] = reflect.TypeOf((*CannotAccessNetworkFault)(nil)).Elem() +} + +type CannotAccessVmComponent struct { + VmConfigFault +} + +func init() { + t["CannotAccessVmComponent"] = reflect.TypeOf((*CannotAccessVmComponent)(nil)).Elem() +} + +type CannotAccessVmComponentFault BaseCannotAccessVmComponent + +func init() { + t["CannotAccessVmComponentFault"] = reflect.TypeOf((*CannotAccessVmComponentFault)(nil)).Elem() +} + +type CannotAccessVmConfig struct { + CannotAccessVmComponent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["CannotAccessVmConfig"] = reflect.TypeOf((*CannotAccessVmConfig)(nil)).Elem() +} + +type CannotAccessVmConfigFault CannotAccessVmConfig + +func init() { + t["CannotAccessVmConfigFault"] = reflect.TypeOf((*CannotAccessVmConfigFault)(nil)).Elem() +} + +type CannotAccessVmDevice struct { + CannotAccessVmComponent + + Device string `xml:"device"` + Backing string `xml:"backing"` + Connected bool `xml:"connected"` +} + +func init() { + t["CannotAccessVmDevice"] = reflect.TypeOf((*CannotAccessVmDevice)(nil)).Elem() +} + +type CannotAccessVmDeviceFault BaseCannotAccessVmDevice + +func init() { + t["CannotAccessVmDeviceFault"] = reflect.TypeOf((*CannotAccessVmDeviceFault)(nil)).Elem() +} + +type CannotAccessVmDisk struct { + CannotAccessVmDevice + + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["CannotAccessVmDisk"] = reflect.TypeOf((*CannotAccessVmDisk)(nil)).Elem() +} + +type CannotAccessVmDiskFault BaseCannotAccessVmDisk + +func init() { + t["CannotAccessVmDiskFault"] = reflect.TypeOf((*CannotAccessVmDiskFault)(nil)).Elem() +} + +type CannotAddHostWithFTVmAsStandalone struct { + HostConnectFault +} + +func init() { + t["CannotAddHostWithFTVmAsStandalone"] = reflect.TypeOf((*CannotAddHostWithFTVmAsStandalone)(nil)).Elem() +} + +type CannotAddHostWithFTVmAsStandaloneFault CannotAddHostWithFTVmAsStandalone + +func init() { + t["CannotAddHostWithFTVmAsStandaloneFault"] = reflect.TypeOf((*CannotAddHostWithFTVmAsStandaloneFault)(nil)).Elem() +} + +type CannotAddHostWithFTVmToDifferentCluster struct { + HostConnectFault +} + +func init() { + t["CannotAddHostWithFTVmToDifferentCluster"] = reflect.TypeOf((*CannotAddHostWithFTVmToDifferentCluster)(nil)).Elem() +} + +type CannotAddHostWithFTVmToDifferentClusterFault CannotAddHostWithFTVmToDifferentCluster + +func init() { + t["CannotAddHostWithFTVmToDifferentClusterFault"] = reflect.TypeOf((*CannotAddHostWithFTVmToDifferentClusterFault)(nil)).Elem() +} + +type CannotAddHostWithFTVmToNonHACluster struct { + HostConnectFault +} + +func init() { + t["CannotAddHostWithFTVmToNonHACluster"] = reflect.TypeOf((*CannotAddHostWithFTVmToNonHACluster)(nil)).Elem() +} + +type CannotAddHostWithFTVmToNonHAClusterFault CannotAddHostWithFTVmToNonHACluster + +func init() { + t["CannotAddHostWithFTVmToNonHAClusterFault"] = reflect.TypeOf((*CannotAddHostWithFTVmToNonHAClusterFault)(nil)).Elem() +} + +type CannotChangeDrsBehaviorForFtSecondary struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotChangeDrsBehaviorForFtSecondary"] = reflect.TypeOf((*CannotChangeDrsBehaviorForFtSecondary)(nil)).Elem() +} + +type CannotChangeDrsBehaviorForFtSecondaryFault CannotChangeDrsBehaviorForFtSecondary + +func init() { + t["CannotChangeDrsBehaviorForFtSecondaryFault"] = reflect.TypeOf((*CannotChangeDrsBehaviorForFtSecondaryFault)(nil)).Elem() +} + +type CannotChangeHaSettingsForFtSecondary struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotChangeHaSettingsForFtSecondary"] = reflect.TypeOf((*CannotChangeHaSettingsForFtSecondary)(nil)).Elem() +} + +type CannotChangeHaSettingsForFtSecondaryFault CannotChangeHaSettingsForFtSecondary + +func init() { + t["CannotChangeHaSettingsForFtSecondaryFault"] = reflect.TypeOf((*CannotChangeHaSettingsForFtSecondaryFault)(nil)).Elem() +} + +type CannotChangeVsanClusterUuid struct { + VsanFault +} + +func init() { + t["CannotChangeVsanClusterUuid"] = reflect.TypeOf((*CannotChangeVsanClusterUuid)(nil)).Elem() +} + +type CannotChangeVsanClusterUuidFault CannotChangeVsanClusterUuid + +func init() { + t["CannotChangeVsanClusterUuidFault"] = reflect.TypeOf((*CannotChangeVsanClusterUuidFault)(nil)).Elem() +} + +type CannotChangeVsanNodeUuid struct { + VsanFault +} + +func init() { + t["CannotChangeVsanNodeUuid"] = reflect.TypeOf((*CannotChangeVsanNodeUuid)(nil)).Elem() +} + +type CannotChangeVsanNodeUuidFault CannotChangeVsanNodeUuid + +func init() { + t["CannotChangeVsanNodeUuidFault"] = reflect.TypeOf((*CannotChangeVsanNodeUuidFault)(nil)).Elem() +} + +type CannotComputeFTCompatibleHosts struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotComputeFTCompatibleHosts"] = reflect.TypeOf((*CannotComputeFTCompatibleHosts)(nil)).Elem() +} + +type CannotComputeFTCompatibleHostsFault CannotComputeFTCompatibleHosts + +func init() { + t["CannotComputeFTCompatibleHostsFault"] = reflect.TypeOf((*CannotComputeFTCompatibleHostsFault)(nil)).Elem() +} + +type CannotCreateFile struct { + FileFault +} + +func init() { + t["CannotCreateFile"] = reflect.TypeOf((*CannotCreateFile)(nil)).Elem() +} + +type CannotCreateFileFault CannotCreateFile + +func init() { + t["CannotCreateFileFault"] = reflect.TypeOf((*CannotCreateFileFault)(nil)).Elem() +} + +type CannotDecryptPasswords struct { + CustomizationFault +} + +func init() { + t["CannotDecryptPasswords"] = reflect.TypeOf((*CannotDecryptPasswords)(nil)).Elem() +} + +type CannotDecryptPasswordsFault CannotDecryptPasswords + +func init() { + t["CannotDecryptPasswordsFault"] = reflect.TypeOf((*CannotDecryptPasswordsFault)(nil)).Elem() +} + +type CannotDeleteFile struct { + FileFault +} + +func init() { + t["CannotDeleteFile"] = reflect.TypeOf((*CannotDeleteFile)(nil)).Elem() +} + +type CannotDeleteFileFault CannotDeleteFile + +func init() { + t["CannotDeleteFileFault"] = reflect.TypeOf((*CannotDeleteFileFault)(nil)).Elem() +} + +type CannotDisableDrsOnClustersWithVApps struct { + RuntimeFault +} + +func init() { + t["CannotDisableDrsOnClustersWithVApps"] = reflect.TypeOf((*CannotDisableDrsOnClustersWithVApps)(nil)).Elem() +} + +type CannotDisableDrsOnClustersWithVAppsFault CannotDisableDrsOnClustersWithVApps + +func init() { + t["CannotDisableDrsOnClustersWithVAppsFault"] = reflect.TypeOf((*CannotDisableDrsOnClustersWithVAppsFault)(nil)).Elem() +} + +type CannotDisableSnapshot struct { + VmConfigFault +} + +func init() { + t["CannotDisableSnapshot"] = reflect.TypeOf((*CannotDisableSnapshot)(nil)).Elem() +} + +type CannotDisableSnapshotFault CannotDisableSnapshot + +func init() { + t["CannotDisableSnapshotFault"] = reflect.TypeOf((*CannotDisableSnapshotFault)(nil)).Elem() +} + +type CannotDisconnectHostWithFaultToleranceVm struct { + VimFault + + HostName string `xml:"hostName"` +} + +func init() { + t["CannotDisconnectHostWithFaultToleranceVm"] = reflect.TypeOf((*CannotDisconnectHostWithFaultToleranceVm)(nil)).Elem() +} + +type CannotDisconnectHostWithFaultToleranceVmFault CannotDisconnectHostWithFaultToleranceVm + +func init() { + t["CannotDisconnectHostWithFaultToleranceVmFault"] = reflect.TypeOf((*CannotDisconnectHostWithFaultToleranceVmFault)(nil)).Elem() +} + +type CannotEnableVmcpForCluster struct { + VimFault + + Host *ManagedObjectReference `xml:"host,omitempty"` + HostName string `xml:"hostName,omitempty"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["CannotEnableVmcpForCluster"] = reflect.TypeOf((*CannotEnableVmcpForCluster)(nil)).Elem() +} + +type CannotEnableVmcpForClusterFault CannotEnableVmcpForCluster + +func init() { + t["CannotEnableVmcpForClusterFault"] = reflect.TypeOf((*CannotEnableVmcpForClusterFault)(nil)).Elem() +} + +type CannotModifyConfigCpuRequirements struct { + MigrationFault +} + +func init() { + t["CannotModifyConfigCpuRequirements"] = reflect.TypeOf((*CannotModifyConfigCpuRequirements)(nil)).Elem() +} + +type CannotModifyConfigCpuRequirementsFault CannotModifyConfigCpuRequirements + +func init() { + t["CannotModifyConfigCpuRequirementsFault"] = reflect.TypeOf((*CannotModifyConfigCpuRequirementsFault)(nil)).Elem() +} + +type CannotMoveFaultToleranceVm struct { + VimFault + + MoveType string `xml:"moveType"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotMoveFaultToleranceVm"] = reflect.TypeOf((*CannotMoveFaultToleranceVm)(nil)).Elem() +} + +type CannotMoveFaultToleranceVmFault CannotMoveFaultToleranceVm + +func init() { + t["CannotMoveFaultToleranceVmFault"] = reflect.TypeOf((*CannotMoveFaultToleranceVmFault)(nil)).Elem() +} + +type CannotMoveHostWithFaultToleranceVm struct { + VimFault +} + +func init() { + t["CannotMoveHostWithFaultToleranceVm"] = reflect.TypeOf((*CannotMoveHostWithFaultToleranceVm)(nil)).Elem() +} + +type CannotMoveHostWithFaultToleranceVmFault CannotMoveHostWithFaultToleranceVm + +func init() { + t["CannotMoveHostWithFaultToleranceVmFault"] = reflect.TypeOf((*CannotMoveHostWithFaultToleranceVmFault)(nil)).Elem() +} + +type CannotMoveVmWithDeltaDisk struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["CannotMoveVmWithDeltaDisk"] = reflect.TypeOf((*CannotMoveVmWithDeltaDisk)(nil)).Elem() +} + +type CannotMoveVmWithDeltaDiskFault CannotMoveVmWithDeltaDisk + +func init() { + t["CannotMoveVmWithDeltaDiskFault"] = reflect.TypeOf((*CannotMoveVmWithDeltaDiskFault)(nil)).Elem() +} + +type CannotMoveVmWithNativeDeltaDisk struct { + MigrationFault +} + +func init() { + t["CannotMoveVmWithNativeDeltaDisk"] = reflect.TypeOf((*CannotMoveVmWithNativeDeltaDisk)(nil)).Elem() +} + +type CannotMoveVmWithNativeDeltaDiskFault CannotMoveVmWithNativeDeltaDisk + +func init() { + t["CannotMoveVmWithNativeDeltaDiskFault"] = reflect.TypeOf((*CannotMoveVmWithNativeDeltaDiskFault)(nil)).Elem() +} + +type CannotMoveVsanEnabledHost struct { + VsanFault +} + +func init() { + t["CannotMoveVsanEnabledHost"] = reflect.TypeOf((*CannotMoveVsanEnabledHost)(nil)).Elem() +} + +type CannotMoveVsanEnabledHostFault BaseCannotMoveVsanEnabledHost + +func init() { + t["CannotMoveVsanEnabledHostFault"] = reflect.TypeOf((*CannotMoveVsanEnabledHostFault)(nil)).Elem() +} + +type CannotPlaceWithoutPrerequisiteMoves struct { + VimFault +} + +func init() { + t["CannotPlaceWithoutPrerequisiteMoves"] = reflect.TypeOf((*CannotPlaceWithoutPrerequisiteMoves)(nil)).Elem() +} + +type CannotPlaceWithoutPrerequisiteMovesFault CannotPlaceWithoutPrerequisiteMoves + +func init() { + t["CannotPlaceWithoutPrerequisiteMovesFault"] = reflect.TypeOf((*CannotPlaceWithoutPrerequisiteMovesFault)(nil)).Elem() +} + +type CannotPowerOffVmInCluster struct { + InvalidState + + Operation string `xml:"operation"` + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["CannotPowerOffVmInCluster"] = reflect.TypeOf((*CannotPowerOffVmInCluster)(nil)).Elem() +} + +type CannotPowerOffVmInClusterFault CannotPowerOffVmInCluster + +func init() { + t["CannotPowerOffVmInClusterFault"] = reflect.TypeOf((*CannotPowerOffVmInClusterFault)(nil)).Elem() +} + +type CannotReconfigureVsanWhenHaEnabled struct { + VsanFault +} + +func init() { + t["CannotReconfigureVsanWhenHaEnabled"] = reflect.TypeOf((*CannotReconfigureVsanWhenHaEnabled)(nil)).Elem() +} + +type CannotReconfigureVsanWhenHaEnabledFault CannotReconfigureVsanWhenHaEnabled + +func init() { + t["CannotReconfigureVsanWhenHaEnabledFault"] = reflect.TypeOf((*CannotReconfigureVsanWhenHaEnabledFault)(nil)).Elem() +} + +type CannotUseNetwork struct { + VmConfigFault + + Device string `xml:"device"` + Backing string `xml:"backing"` + Connected bool `xml:"connected"` + Reason string `xml:"reason"` + Network *ManagedObjectReference `xml:"network,omitempty"` +} + +func init() { + t["CannotUseNetwork"] = reflect.TypeOf((*CannotUseNetwork)(nil)).Elem() +} + +type CannotUseNetworkFault CannotUseNetwork + +func init() { + t["CannotUseNetworkFault"] = reflect.TypeOf((*CannotUseNetworkFault)(nil)).Elem() +} + +type Capability struct { + DynamicData + + ProvisioningSupported bool `xml:"provisioningSupported"` + MultiHostSupported bool `xml:"multiHostSupported"` + UserShellAccessSupported bool `xml:"userShellAccessSupported"` + SupportedEVCMode []EVCMode `xml:"supportedEVCMode,omitempty"` + NetworkBackupAndRestoreSupported *bool `xml:"networkBackupAndRestoreSupported"` + FtDrsWithoutEvcSupported *bool `xml:"ftDrsWithoutEvcSupported"` +} + +func init() { + t["Capability"] = reflect.TypeOf((*Capability)(nil)).Elem() +} + +type CertMgrRefreshCACertificatesAndCRLsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["CertMgrRefreshCACertificatesAndCRLsRequestType"] = reflect.TypeOf((*CertMgrRefreshCACertificatesAndCRLsRequestType)(nil)).Elem() +} + +type CertMgrRefreshCACertificatesAndCRLs_Task CertMgrRefreshCACertificatesAndCRLsRequestType + +func init() { + t["CertMgrRefreshCACertificatesAndCRLs_Task"] = reflect.TypeOf((*CertMgrRefreshCACertificatesAndCRLs_Task)(nil)).Elem() +} + +type CertMgrRefreshCACertificatesAndCRLs_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CertMgrRefreshCertificatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["CertMgrRefreshCertificatesRequestType"] = reflect.TypeOf((*CertMgrRefreshCertificatesRequestType)(nil)).Elem() +} + +type CertMgrRefreshCertificates_Task CertMgrRefreshCertificatesRequestType + +func init() { + t["CertMgrRefreshCertificates_Task"] = reflect.TypeOf((*CertMgrRefreshCertificates_Task)(nil)).Elem() +} + +type CertMgrRefreshCertificates_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CertMgrRevokeCertificatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["CertMgrRevokeCertificatesRequestType"] = reflect.TypeOf((*CertMgrRevokeCertificatesRequestType)(nil)).Elem() +} + +type CertMgrRevokeCertificates_Task CertMgrRevokeCertificatesRequestType + +func init() { + t["CertMgrRevokeCertificates_Task"] = reflect.TypeOf((*CertMgrRevokeCertificates_Task)(nil)).Elem() +} + +type CertMgrRevokeCertificates_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ChangeAccessMode ChangeAccessModeRequestType + +func init() { + t["ChangeAccessMode"] = reflect.TypeOf((*ChangeAccessMode)(nil)).Elem() +} + +type ChangeAccessModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Principal string `xml:"principal"` + IsGroup bool `xml:"isGroup"` + AccessMode HostAccessMode `xml:"accessMode"` +} + +func init() { + t["ChangeAccessModeRequestType"] = reflect.TypeOf((*ChangeAccessModeRequestType)(nil)).Elem() +} + +type ChangeAccessModeResponse struct { +} + +type ChangeFileAttributesInGuest ChangeFileAttributesInGuestRequestType + +func init() { + t["ChangeFileAttributesInGuest"] = reflect.TypeOf((*ChangeFileAttributesInGuest)(nil)).Elem() +} + +type ChangeFileAttributesInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + GuestFilePath string `xml:"guestFilePath"` + FileAttributes BaseGuestFileAttributes `xml:"fileAttributes,typeattr"` +} + +func init() { + t["ChangeFileAttributesInGuestRequestType"] = reflect.TypeOf((*ChangeFileAttributesInGuestRequestType)(nil)).Elem() +} + +type ChangeFileAttributesInGuestResponse struct { +} + +type ChangeKeyRequestType struct { + This ManagedObjectReference `xml:"_this"` + NewKey CryptoKeyPlain `xml:"newKey"` +} + +func init() { + t["ChangeKeyRequestType"] = reflect.TypeOf((*ChangeKeyRequestType)(nil)).Elem() +} + +type ChangeKey_Task ChangeKeyRequestType + +func init() { + t["ChangeKey_Task"] = reflect.TypeOf((*ChangeKey_Task)(nil)).Elem() +} + +type ChangeKey_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ChangeLockdownMode ChangeLockdownModeRequestType + +func init() { + t["ChangeLockdownMode"] = reflect.TypeOf((*ChangeLockdownMode)(nil)).Elem() +} + +type ChangeLockdownModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mode HostLockdownMode `xml:"mode"` +} + +func init() { + t["ChangeLockdownModeRequestType"] = reflect.TypeOf((*ChangeLockdownModeRequestType)(nil)).Elem() +} + +type ChangeLockdownModeResponse struct { +} + +type ChangeNFSUserPassword ChangeNFSUserPasswordRequestType + +func init() { + t["ChangeNFSUserPassword"] = reflect.TypeOf((*ChangeNFSUserPassword)(nil)).Elem() +} + +type ChangeNFSUserPasswordRequestType struct { + This ManagedObjectReference `xml:"_this"` + Password string `xml:"password"` +} + +func init() { + t["ChangeNFSUserPasswordRequestType"] = reflect.TypeOf((*ChangeNFSUserPasswordRequestType)(nil)).Elem() +} + +type ChangeNFSUserPasswordResponse struct { +} + +type ChangeOwner ChangeOwnerRequestType + +func init() { + t["ChangeOwner"] = reflect.TypeOf((*ChangeOwner)(nil)).Elem() +} + +type ChangeOwnerRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Owner string `xml:"owner"` +} + +func init() { + t["ChangeOwnerRequestType"] = reflect.TypeOf((*ChangeOwnerRequestType)(nil)).Elem() +} + +type ChangeOwnerResponse struct { +} + +type ChangesInfoEventArgument struct { + DynamicData + + Modified string `xml:"modified,omitempty"` + Added string `xml:"added,omitempty"` + Deleted string `xml:"deleted,omitempty"` +} + +func init() { + t["ChangesInfoEventArgument"] = reflect.TypeOf((*ChangesInfoEventArgument)(nil)).Elem() +} + +type CheckAddHostEvcRequestType struct { + This ManagedObjectReference `xml:"_this"` + CnxSpec HostConnectSpec `xml:"cnxSpec"` +} + +func init() { + t["CheckAddHostEvcRequestType"] = reflect.TypeOf((*CheckAddHostEvcRequestType)(nil)).Elem() +} + +type CheckAddHostEvc_Task CheckAddHostEvcRequestType + +func init() { + t["CheckAddHostEvc_Task"] = reflect.TypeOf((*CheckAddHostEvc_Task)(nil)).Elem() +} + +type CheckAddHostEvc_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckAnswerFileStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["CheckAnswerFileStatusRequestType"] = reflect.TypeOf((*CheckAnswerFileStatusRequestType)(nil)).Elem() +} + +type CheckAnswerFileStatus_Task CheckAnswerFileStatusRequestType + +func init() { + t["CheckAnswerFileStatus_Task"] = reflect.TypeOf((*CheckAnswerFileStatus_Task)(nil)).Elem() +} + +type CheckAnswerFileStatus_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckCloneRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Folder ManagedObjectReference `xml:"folder"` + Name string `xml:"name"` + Spec VirtualMachineCloneSpec `xml:"spec"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckCloneRequestType"] = reflect.TypeOf((*CheckCloneRequestType)(nil)).Elem() +} + +type CheckClone_Task CheckCloneRequestType + +func init() { + t["CheckClone_Task"] = reflect.TypeOf((*CheckClone_Task)(nil)).Elem() +} + +type CheckClone_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckCompatibilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckCompatibilityRequestType"] = reflect.TypeOf((*CheckCompatibilityRequestType)(nil)).Elem() +} + +type CheckCompatibility_Task CheckCompatibilityRequestType + +func init() { + t["CheckCompatibility_Task"] = reflect.TypeOf((*CheckCompatibility_Task)(nil)).Elem() +} + +type CheckCompatibility_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckComplianceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Profile []ManagedObjectReference `xml:"profile,omitempty"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["CheckComplianceRequestType"] = reflect.TypeOf((*CheckComplianceRequestType)(nil)).Elem() +} + +type CheckCompliance_Task CheckComplianceRequestType + +func init() { + t["CheckCompliance_Task"] = reflect.TypeOf((*CheckCompliance_Task)(nil)).Elem() +} + +type CheckCompliance_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckConfigureEvcModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + EvcModeKey string `xml:"evcModeKey"` +} + +func init() { + t["CheckConfigureEvcModeRequestType"] = reflect.TypeOf((*CheckConfigureEvcModeRequestType)(nil)).Elem() +} + +type CheckConfigureEvcMode_Task CheckConfigureEvcModeRequestType + +func init() { + t["CheckConfigureEvcMode_Task"] = reflect.TypeOf((*CheckConfigureEvcMode_Task)(nil)).Elem() +} + +type CheckConfigureEvcMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckCustomizationResources CheckCustomizationResourcesRequestType + +func init() { + t["CheckCustomizationResources"] = reflect.TypeOf((*CheckCustomizationResources)(nil)).Elem() +} + +type CheckCustomizationResourcesRequestType struct { + This ManagedObjectReference `xml:"_this"` + GuestOs string `xml:"guestOs"` +} + +func init() { + t["CheckCustomizationResourcesRequestType"] = reflect.TypeOf((*CheckCustomizationResourcesRequestType)(nil)).Elem() +} + +type CheckCustomizationResourcesResponse struct { +} + +type CheckCustomizationSpec CheckCustomizationSpecRequestType + +func init() { + t["CheckCustomizationSpec"] = reflect.TypeOf((*CheckCustomizationSpec)(nil)).Elem() +} + +type CheckCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec CustomizationSpec `xml:"spec"` +} + +func init() { + t["CheckCustomizationSpecRequestType"] = reflect.TypeOf((*CheckCustomizationSpecRequestType)(nil)).Elem() +} + +type CheckCustomizationSpecResponse struct { +} + +type CheckForUpdates CheckForUpdatesRequestType + +func init() { + t["CheckForUpdates"] = reflect.TypeOf((*CheckForUpdates)(nil)).Elem() +} + +type CheckForUpdatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["CheckForUpdatesRequestType"] = reflect.TypeOf((*CheckForUpdatesRequestType)(nil)).Elem() +} + +type CheckForUpdatesResponse struct { + Returnval *UpdateSet `xml:"returnval,omitempty"` +} + +type CheckHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + MetaUrls []string `xml:"metaUrls,omitempty"` + BundleUrls []string `xml:"bundleUrls,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["CheckHostPatchRequestType"] = reflect.TypeOf((*CheckHostPatchRequestType)(nil)).Elem() +} + +type CheckHostPatch_Task CheckHostPatchRequestType + +func init() { + t["CheckHostPatch_Task"] = reflect.TypeOf((*CheckHostPatch_Task)(nil)).Elem() +} + +type CheckHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckInstantCloneRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Spec VirtualMachineInstantCloneSpec `xml:"spec"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckInstantCloneRequestType"] = reflect.TypeOf((*CheckInstantCloneRequestType)(nil)).Elem() +} + +type CheckInstantClone_Task CheckInstantCloneRequestType + +func init() { + t["CheckInstantClone_Task"] = reflect.TypeOf((*CheckInstantClone_Task)(nil)).Elem() +} + +type CheckInstantClone_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckLicenseFeature CheckLicenseFeatureRequestType + +func init() { + t["CheckLicenseFeature"] = reflect.TypeOf((*CheckLicenseFeature)(nil)).Elem() +} + +type CheckLicenseFeatureRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + FeatureKey string `xml:"featureKey"` +} + +func init() { + t["CheckLicenseFeatureRequestType"] = reflect.TypeOf((*CheckLicenseFeatureRequestType)(nil)).Elem() +} + +type CheckLicenseFeatureResponse struct { + Returnval bool `xml:"returnval"` +} + +type CheckMigrateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + State VirtualMachinePowerState `xml:"state,omitempty"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckMigrateRequestType"] = reflect.TypeOf((*CheckMigrateRequestType)(nil)).Elem() +} + +type CheckMigrate_Task CheckMigrateRequestType + +func init() { + t["CheckMigrate_Task"] = reflect.TypeOf((*CheckMigrate_Task)(nil)).Elem() +} + +type CheckMigrate_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckPowerOnRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckPowerOnRequestType"] = reflect.TypeOf((*CheckPowerOnRequestType)(nil)).Elem() +} + +type CheckPowerOn_Task CheckPowerOnRequestType + +func init() { + t["CheckPowerOn_Task"] = reflect.TypeOf((*CheckPowerOn_Task)(nil)).Elem() +} + +type CheckPowerOn_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckProfileComplianceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["CheckProfileComplianceRequestType"] = reflect.TypeOf((*CheckProfileComplianceRequestType)(nil)).Elem() +} + +type CheckProfileCompliance_Task CheckProfileComplianceRequestType + +func init() { + t["CheckProfileCompliance_Task"] = reflect.TypeOf((*CheckProfileCompliance_Task)(nil)).Elem() +} + +type CheckProfileCompliance_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckRelocateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Spec VirtualMachineRelocateSpec `xml:"spec"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckRelocateRequestType"] = reflect.TypeOf((*CheckRelocateRequestType)(nil)).Elem() +} + +type CheckRelocate_Task CheckRelocateRequestType + +func init() { + t["CheckRelocate_Task"] = reflect.TypeOf((*CheckRelocate_Task)(nil)).Elem() +} + +type CheckRelocate_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CheckResult struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["CheckResult"] = reflect.TypeOf((*CheckResult)(nil)).Elem() +} + +type CheckVmConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VirtualMachineConfigSpec `xml:"spec"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + TestType []string `xml:"testType,omitempty"` +} + +func init() { + t["CheckVmConfigRequestType"] = reflect.TypeOf((*CheckVmConfigRequestType)(nil)).Elem() +} + +type CheckVmConfig_Task CheckVmConfigRequestType + +func init() { + t["CheckVmConfig_Task"] = reflect.TypeOf((*CheckVmConfig_Task)(nil)).Elem() +} + +type CheckVmConfig_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ChoiceOption struct { + OptionType + + ChoiceInfo []BaseElementDescription `xml:"choiceInfo,typeattr"` + DefaultIndex int32 `xml:"defaultIndex,omitempty"` +} + +func init() { + t["ChoiceOption"] = reflect.TypeOf((*ChoiceOption)(nil)).Elem() +} + +type ClearComplianceStatus ClearComplianceStatusRequestType + +func init() { + t["ClearComplianceStatus"] = reflect.TypeOf((*ClearComplianceStatus)(nil)).Elem() +} + +type ClearComplianceStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Profile []ManagedObjectReference `xml:"profile,omitempty"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["ClearComplianceStatusRequestType"] = reflect.TypeOf((*ClearComplianceStatusRequestType)(nil)).Elem() +} + +type ClearComplianceStatusResponse struct { +} + +type ClearNFSUser ClearNFSUserRequestType + +func init() { + t["ClearNFSUser"] = reflect.TypeOf((*ClearNFSUser)(nil)).Elem() +} + +type ClearNFSUserRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ClearNFSUserRequestType"] = reflect.TypeOf((*ClearNFSUserRequestType)(nil)).Elem() +} + +type ClearNFSUserResponse struct { +} + +type ClearSystemEventLog ClearSystemEventLogRequestType + +func init() { + t["ClearSystemEventLog"] = reflect.TypeOf((*ClearSystemEventLog)(nil)).Elem() +} + +type ClearSystemEventLogRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ClearSystemEventLogRequestType"] = reflect.TypeOf((*ClearSystemEventLogRequestType)(nil)).Elem() +} + +type ClearSystemEventLogResponse struct { +} + +type ClearTriggeredAlarms ClearTriggeredAlarmsRequestType + +func init() { + t["ClearTriggeredAlarms"] = reflect.TypeOf((*ClearTriggeredAlarms)(nil)).Elem() +} + +type ClearTriggeredAlarmsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Filter AlarmFilterSpec `xml:"filter"` +} + +func init() { + t["ClearTriggeredAlarmsRequestType"] = reflect.TypeOf((*ClearTriggeredAlarmsRequestType)(nil)).Elem() +} + +type ClearTriggeredAlarmsResponse struct { +} + +type ClearVStorageObjectControlFlags ClearVStorageObjectControlFlagsRequestType + +func init() { + t["ClearVStorageObjectControlFlags"] = reflect.TypeOf((*ClearVStorageObjectControlFlags)(nil)).Elem() +} + +type ClearVStorageObjectControlFlagsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + ControlFlags []string `xml:"controlFlags,omitempty"` +} + +func init() { + t["ClearVStorageObjectControlFlagsRequestType"] = reflect.TypeOf((*ClearVStorageObjectControlFlagsRequestType)(nil)).Elem() +} + +type ClearVStorageObjectControlFlagsResponse struct { +} + +type ClockSkew struct { + HostConfigFault +} + +func init() { + t["ClockSkew"] = reflect.TypeOf((*ClockSkew)(nil)).Elem() +} + +type ClockSkewFault ClockSkew + +func init() { + t["ClockSkewFault"] = reflect.TypeOf((*ClockSkewFault)(nil)).Elem() +} + +type CloneFromSnapshotNotSupported struct { + MigrationFault +} + +func init() { + t["CloneFromSnapshotNotSupported"] = reflect.TypeOf((*CloneFromSnapshotNotSupported)(nil)).Elem() +} + +type CloneFromSnapshotNotSupportedFault CloneFromSnapshotNotSupported + +func init() { + t["CloneFromSnapshotNotSupportedFault"] = reflect.TypeOf((*CloneFromSnapshotNotSupportedFault)(nil)).Elem() +} + +type CloneSession CloneSessionRequestType + +func init() { + t["CloneSession"] = reflect.TypeOf((*CloneSession)(nil)).Elem() +} + +type CloneSessionRequestType struct { + This ManagedObjectReference `xml:"_this"` + CloneTicket string `xml:"cloneTicket"` +} + +func init() { + t["CloneSessionRequestType"] = reflect.TypeOf((*CloneSessionRequestType)(nil)).Elem() +} + +type CloneSessionResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type CloneVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Target ManagedObjectReference `xml:"target"` + Spec VAppCloneSpec `xml:"spec"` +} + +func init() { + t["CloneVAppRequestType"] = reflect.TypeOf((*CloneVAppRequestType)(nil)).Elem() +} + +type CloneVApp_Task CloneVAppRequestType + +func init() { + t["CloneVApp_Task"] = reflect.TypeOf((*CloneVApp_Task)(nil)).Elem() +} + +type CloneVApp_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CloneVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Folder ManagedObjectReference `xml:"folder"` + Name string `xml:"name"` + Spec VirtualMachineCloneSpec `xml:"spec"` +} + +func init() { + t["CloneVMRequestType"] = reflect.TypeOf((*CloneVMRequestType)(nil)).Elem() +} + +type CloneVM_Task CloneVMRequestType + +func init() { + t["CloneVM_Task"] = reflect.TypeOf((*CloneVM_Task)(nil)).Elem() +} + +type CloneVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CloneVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec VslmCloneSpec `xml:"spec"` +} + +func init() { + t["CloneVStorageObjectRequestType"] = reflect.TypeOf((*CloneVStorageObjectRequestType)(nil)).Elem() +} + +type CloneVStorageObject_Task CloneVStorageObjectRequestType + +func init() { + t["CloneVStorageObject_Task"] = reflect.TypeOf((*CloneVStorageObject_Task)(nil)).Elem() +} + +type CloneVStorageObject_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CloseInventoryViewFolder CloseInventoryViewFolderRequestType + +func init() { + t["CloseInventoryViewFolder"] = reflect.TypeOf((*CloseInventoryViewFolder)(nil)).Elem() +} + +type CloseInventoryViewFolderRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity"` +} + +func init() { + t["CloseInventoryViewFolderRequestType"] = reflect.TypeOf((*CloseInventoryViewFolderRequestType)(nil)).Elem() +} + +type CloseInventoryViewFolderResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type ClusterAction struct { + DynamicData + + Type string `xml:"type"` + Target *ManagedObjectReference `xml:"target,omitempty"` +} + +func init() { + t["ClusterAction"] = reflect.TypeOf((*ClusterAction)(nil)).Elem() +} + +type ClusterActionHistory struct { + DynamicData + + Action BaseClusterAction `xml:"action,typeattr"` + Time time.Time `xml:"time"` +} + +func init() { + t["ClusterActionHistory"] = reflect.TypeOf((*ClusterActionHistory)(nil)).Elem() +} + +type ClusterAffinityRuleSpec struct { + ClusterRuleInfo + + Vm []ManagedObjectReference `xml:"vm"` +} + +func init() { + t["ClusterAffinityRuleSpec"] = reflect.TypeOf((*ClusterAffinityRuleSpec)(nil)).Elem() +} + +type ClusterAntiAffinityRuleSpec struct { + ClusterRuleInfo + + Vm []ManagedObjectReference `xml:"vm"` +} + +func init() { + t["ClusterAntiAffinityRuleSpec"] = reflect.TypeOf((*ClusterAntiAffinityRuleSpec)(nil)).Elem() +} + +type ClusterAttemptedVmInfo struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + Task *ManagedObjectReference `xml:"task,omitempty"` +} + +func init() { + t["ClusterAttemptedVmInfo"] = reflect.TypeOf((*ClusterAttemptedVmInfo)(nil)).Elem() +} + +type ClusterComplianceCheckedEvent struct { + ClusterEvent + + Profile ProfileEventArgument `xml:"profile"` +} + +func init() { + t["ClusterComplianceCheckedEvent"] = reflect.TypeOf((*ClusterComplianceCheckedEvent)(nil)).Elem() +} + +type ClusterComputeResourceSummary struct { + ComputeResourceSummary + + CurrentFailoverLevel int32 `xml:"currentFailoverLevel"` + AdmissionControlInfo BaseClusterDasAdmissionControlInfo `xml:"admissionControlInfo,omitempty,typeattr"` + NumVmotions int32 `xml:"numVmotions"` + TargetBalance int32 `xml:"targetBalance,omitempty"` + CurrentBalance int32 `xml:"currentBalance,omitempty"` + UsageSummary *ClusterUsageSummary `xml:"usageSummary,omitempty"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + DasData BaseClusterDasData `xml:"dasData,omitempty,typeattr"` +} + +func init() { + t["ClusterComputeResourceSummary"] = reflect.TypeOf((*ClusterComputeResourceSummary)(nil)).Elem() +} + +type ClusterConfigInfo struct { + DynamicData + + DasConfig ClusterDasConfigInfo `xml:"dasConfig"` + DasVmConfig []ClusterDasVmConfigInfo `xml:"dasVmConfig,omitempty"` + DrsConfig ClusterDrsConfigInfo `xml:"drsConfig"` + DrsVmConfig []ClusterDrsVmConfigInfo `xml:"drsVmConfig,omitempty"` + Rule []BaseClusterRuleInfo `xml:"rule,omitempty,typeattr"` +} + +func init() { + t["ClusterConfigInfo"] = reflect.TypeOf((*ClusterConfigInfo)(nil)).Elem() +} + +type ClusterConfigInfoEx struct { + ComputeResourceConfigInfo + + DasConfig ClusterDasConfigInfo `xml:"dasConfig"` + DasVmConfig []ClusterDasVmConfigInfo `xml:"dasVmConfig,omitempty"` + DrsConfig ClusterDrsConfigInfo `xml:"drsConfig"` + DrsVmConfig []ClusterDrsVmConfigInfo `xml:"drsVmConfig,omitempty"` + Rule []BaseClusterRuleInfo `xml:"rule,omitempty,typeattr"` + Orchestration *ClusterOrchestrationInfo `xml:"orchestration,omitempty"` + VmOrchestration []ClusterVmOrchestrationInfo `xml:"vmOrchestration,omitempty"` + DpmConfigInfo *ClusterDpmConfigInfo `xml:"dpmConfigInfo,omitempty"` + DpmHostConfig []ClusterDpmHostConfigInfo `xml:"dpmHostConfig,omitempty"` + VsanConfigInfo *VsanClusterConfigInfo `xml:"vsanConfigInfo,omitempty"` + VsanHostConfig []VsanHostConfigInfo `xml:"vsanHostConfig,omitempty"` + Group []BaseClusterGroupInfo `xml:"group,omitempty,typeattr"` + InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty"` + ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty"` +} + +func init() { + t["ClusterConfigInfoEx"] = reflect.TypeOf((*ClusterConfigInfoEx)(nil)).Elem() +} + +type ClusterConfigSpec struct { + DynamicData + + DasConfig *ClusterDasConfigInfo `xml:"dasConfig,omitempty"` + DasVmConfigSpec []ClusterDasVmConfigSpec `xml:"dasVmConfigSpec,omitempty"` + DrsConfig *ClusterDrsConfigInfo `xml:"drsConfig,omitempty"` + DrsVmConfigSpec []ClusterDrsVmConfigSpec `xml:"drsVmConfigSpec,omitempty"` + RulesSpec []ClusterRuleSpec `xml:"rulesSpec,omitempty"` +} + +func init() { + t["ClusterConfigSpec"] = reflect.TypeOf((*ClusterConfigSpec)(nil)).Elem() +} + +type ClusterConfigSpecEx struct { + ComputeResourceConfigSpec + + DasConfig *ClusterDasConfigInfo `xml:"dasConfig,omitempty"` + DasVmConfigSpec []ClusterDasVmConfigSpec `xml:"dasVmConfigSpec,omitempty"` + DrsConfig *ClusterDrsConfigInfo `xml:"drsConfig,omitempty"` + DrsVmConfigSpec []ClusterDrsVmConfigSpec `xml:"drsVmConfigSpec,omitempty"` + RulesSpec []ClusterRuleSpec `xml:"rulesSpec,omitempty"` + Orchestration *ClusterOrchestrationInfo `xml:"orchestration,omitempty"` + VmOrchestrationSpec []ClusterVmOrchestrationSpec `xml:"vmOrchestrationSpec,omitempty"` + DpmConfig *ClusterDpmConfigInfo `xml:"dpmConfig,omitempty"` + DpmHostConfigSpec []ClusterDpmHostConfigSpec `xml:"dpmHostConfigSpec,omitempty"` + VsanConfig *VsanClusterConfigInfo `xml:"vsanConfig,omitempty"` + VsanHostConfigSpec []VsanHostConfigInfo `xml:"vsanHostConfigSpec,omitempty"` + GroupSpec []ClusterGroupSpec `xml:"groupSpec,omitempty"` + InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty"` + ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty"` +} + +func init() { + t["ClusterConfigSpecEx"] = reflect.TypeOf((*ClusterConfigSpecEx)(nil)).Elem() +} + +type ClusterCreatedEvent struct { + ClusterEvent + + Parent FolderEventArgument `xml:"parent"` +} + +func init() { + t["ClusterCreatedEvent"] = reflect.TypeOf((*ClusterCreatedEvent)(nil)).Elem() +} + +type ClusterDasAamHostInfo struct { + ClusterDasHostInfo + + HostDasState []ClusterDasAamNodeState `xml:"hostDasState,omitempty"` + PrimaryHosts []string `xml:"primaryHosts,omitempty"` +} + +func init() { + t["ClusterDasAamHostInfo"] = reflect.TypeOf((*ClusterDasAamHostInfo)(nil)).Elem() +} + +type ClusterDasAamNodeState struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Name string `xml:"name"` + ConfigState string `xml:"configState"` + RuntimeState string `xml:"runtimeState"` +} + +func init() { + t["ClusterDasAamNodeState"] = reflect.TypeOf((*ClusterDasAamNodeState)(nil)).Elem() +} + +type ClusterDasAdmissionControlInfo struct { + DynamicData +} + +func init() { + t["ClusterDasAdmissionControlInfo"] = reflect.TypeOf((*ClusterDasAdmissionControlInfo)(nil)).Elem() +} + +type ClusterDasAdmissionControlPolicy struct { + DynamicData + + ResourceReductionToToleratePercent int32 `xml:"resourceReductionToToleratePercent,omitempty"` +} + +func init() { + t["ClusterDasAdmissionControlPolicy"] = reflect.TypeOf((*ClusterDasAdmissionControlPolicy)(nil)).Elem() +} + +type ClusterDasAdvancedRuntimeInfo struct { + DynamicData + + DasHostInfo BaseClusterDasHostInfo `xml:"dasHostInfo,omitempty,typeattr"` + VmcpSupported *ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo `xml:"vmcpSupported,omitempty"` + HeartbeatDatastoreInfo []DasHeartbeatDatastoreInfo `xml:"heartbeatDatastoreInfo,omitempty"` +} + +func init() { + t["ClusterDasAdvancedRuntimeInfo"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfo)(nil)).Elem() +} + +type ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo struct { + DynamicData + + StorageAPDSupported bool `xml:"storageAPDSupported"` + StoragePDLSupported bool `xml:"storagePDLSupported"` +} + +func init() { + t["ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo)(nil)).Elem() +} + +type ClusterDasConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + VmMonitoring string `xml:"vmMonitoring,omitempty"` + HostMonitoring string `xml:"hostMonitoring,omitempty"` + VmComponentProtecting string `xml:"vmComponentProtecting,omitempty"` + FailoverLevel int32 `xml:"failoverLevel,omitempty"` + AdmissionControlPolicy BaseClusterDasAdmissionControlPolicy `xml:"admissionControlPolicy,omitempty,typeattr"` + AdmissionControlEnabled *bool `xml:"admissionControlEnabled"` + DefaultVmSettings *ClusterDasVmSettings `xml:"defaultVmSettings,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` + HeartbeatDatastore []ManagedObjectReference `xml:"heartbeatDatastore,omitempty"` + HBDatastoreCandidatePolicy string `xml:"hBDatastoreCandidatePolicy,omitempty"` +} + +func init() { + t["ClusterDasConfigInfo"] = reflect.TypeOf((*ClusterDasConfigInfo)(nil)).Elem() +} + +type ClusterDasData struct { + DynamicData +} + +func init() { + t["ClusterDasData"] = reflect.TypeOf((*ClusterDasData)(nil)).Elem() +} + +type ClusterDasDataSummary struct { + ClusterDasData + + HostListVersion int64 `xml:"hostListVersion"` + ClusterConfigVersion int64 `xml:"clusterConfigVersion"` + CompatListVersion int64 `xml:"compatListVersion"` +} + +func init() { + t["ClusterDasDataSummary"] = reflect.TypeOf((*ClusterDasDataSummary)(nil)).Elem() +} + +type ClusterDasFailoverLevelAdvancedRuntimeInfo struct { + ClusterDasAdvancedRuntimeInfo + + SlotInfo ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo `xml:"slotInfo"` + TotalSlots int32 `xml:"totalSlots"` + UsedSlots int32 `xml:"usedSlots"` + UnreservedSlots int32 `xml:"unreservedSlots"` + TotalVms int32 `xml:"totalVms"` + TotalHosts int32 `xml:"totalHosts"` + TotalGoodHosts int32 `xml:"totalGoodHosts"` + HostSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots `xml:"hostSlots,omitempty"` + VmsRequiringMultipleSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots `xml:"vmsRequiringMultipleSlots,omitempty"` +} + +func init() { + t["ClusterDasFailoverLevelAdvancedRuntimeInfo"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfo)(nil)).Elem() +} + +type ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Slots int32 `xml:"slots"` +} + +func init() { + t["ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots)(nil)).Elem() +} + +type ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo struct { + DynamicData + + NumVcpus int32 `xml:"numVcpus"` + CpuMHz int32 `xml:"cpuMHz"` + MemoryMB int32 `xml:"memoryMB"` +} + +func init() { + t["ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo)(nil)).Elem() +} + +type ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + Slots int32 `xml:"slots"` +} + +func init() { + t["ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots)(nil)).Elem() +} + +type ClusterDasFdmHostState struct { + DynamicData + + State string `xml:"state"` + StateReporter *ManagedObjectReference `xml:"stateReporter,omitempty"` +} + +func init() { + t["ClusterDasFdmHostState"] = reflect.TypeOf((*ClusterDasFdmHostState)(nil)).Elem() +} + +type ClusterDasHostInfo struct { + DynamicData +} + +func init() { + t["ClusterDasHostInfo"] = reflect.TypeOf((*ClusterDasHostInfo)(nil)).Elem() +} + +type ClusterDasHostRecommendation struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + DrsRating int32 `xml:"drsRating,omitempty"` +} + +func init() { + t["ClusterDasHostRecommendation"] = reflect.TypeOf((*ClusterDasHostRecommendation)(nil)).Elem() +} + +type ClusterDasVmConfigInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + RestartPriority DasVmPriority `xml:"restartPriority,omitempty"` + PowerOffOnIsolation *bool `xml:"powerOffOnIsolation"` + DasSettings *ClusterDasVmSettings `xml:"dasSettings,omitempty"` +} + +func init() { + t["ClusterDasVmConfigInfo"] = reflect.TypeOf((*ClusterDasVmConfigInfo)(nil)).Elem() +} + +type ClusterDasVmConfigSpec struct { + ArrayUpdateSpec + + Info *ClusterDasVmConfigInfo `xml:"info,omitempty"` +} + +func init() { + t["ClusterDasVmConfigSpec"] = reflect.TypeOf((*ClusterDasVmConfigSpec)(nil)).Elem() +} + +type ClusterDasVmSettings struct { + DynamicData + + RestartPriority string `xml:"restartPriority,omitempty"` + RestartPriorityTimeout int32 `xml:"restartPriorityTimeout,omitempty"` + IsolationResponse string `xml:"isolationResponse,omitempty"` + VmToolsMonitoringSettings *ClusterVmToolsMonitoringSettings `xml:"vmToolsMonitoringSettings,omitempty"` + VmComponentProtectionSettings *ClusterVmComponentProtectionSettings `xml:"vmComponentProtectionSettings,omitempty"` +} + +func init() { + t["ClusterDasVmSettings"] = reflect.TypeOf((*ClusterDasVmSettings)(nil)).Elem() +} + +type ClusterDependencyRuleInfo struct { + ClusterRuleInfo + + VmGroup string `xml:"vmGroup"` + DependsOnVmGroup string `xml:"dependsOnVmGroup"` +} + +func init() { + t["ClusterDependencyRuleInfo"] = reflect.TypeOf((*ClusterDependencyRuleInfo)(nil)).Elem() +} + +type ClusterDestroyedEvent struct { + ClusterEvent +} + +func init() { + t["ClusterDestroyedEvent"] = reflect.TypeOf((*ClusterDestroyedEvent)(nil)).Elem() +} + +type ClusterDpmConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + DefaultDpmBehavior DpmBehavior `xml:"defaultDpmBehavior,omitempty"` + HostPowerActionRate int32 `xml:"hostPowerActionRate,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["ClusterDpmConfigInfo"] = reflect.TypeOf((*ClusterDpmConfigInfo)(nil)).Elem() +} + +type ClusterDpmHostConfigInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + Enabled *bool `xml:"enabled"` + Behavior DpmBehavior `xml:"behavior,omitempty"` +} + +func init() { + t["ClusterDpmHostConfigInfo"] = reflect.TypeOf((*ClusterDpmHostConfigInfo)(nil)).Elem() +} + +type ClusterDpmHostConfigSpec struct { + ArrayUpdateSpec + + Info *ClusterDpmHostConfigInfo `xml:"info,omitempty"` +} + +func init() { + t["ClusterDpmHostConfigSpec"] = reflect.TypeOf((*ClusterDpmHostConfigSpec)(nil)).Elem() +} + +type ClusterDrsConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + EnableVmBehaviorOverrides *bool `xml:"enableVmBehaviorOverrides"` + DefaultVmBehavior DrsBehavior `xml:"defaultVmBehavior,omitempty"` + VmotionRate int32 `xml:"vmotionRate,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["ClusterDrsConfigInfo"] = reflect.TypeOf((*ClusterDrsConfigInfo)(nil)).Elem() +} + +type ClusterDrsFaults struct { + DynamicData + + Reason string `xml:"reason"` + FaultsByVm []BaseClusterDrsFaultsFaultsByVm `xml:"faultsByVm,typeattr"` +} + +func init() { + t["ClusterDrsFaults"] = reflect.TypeOf((*ClusterDrsFaults)(nil)).Elem() +} + +type ClusterDrsFaultsFaultsByVirtualDisk struct { + ClusterDrsFaultsFaultsByVm + + Disk *VirtualDiskId `xml:"disk,omitempty"` +} + +func init() { + t["ClusterDrsFaultsFaultsByVirtualDisk"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVirtualDisk)(nil)).Elem() +} + +type ClusterDrsFaultsFaultsByVm struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Fault []LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["ClusterDrsFaultsFaultsByVm"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVm)(nil)).Elem() +} + +type ClusterDrsMigration struct { + DynamicData + + Key string `xml:"key"` + Time time.Time `xml:"time"` + Vm ManagedObjectReference `xml:"vm"` + CpuLoad int32 `xml:"cpuLoad,omitempty"` + MemoryLoad int64 `xml:"memoryLoad,omitempty"` + Source ManagedObjectReference `xml:"source"` + SourceCpuLoad int32 `xml:"sourceCpuLoad,omitempty"` + SourceMemoryLoad int64 `xml:"sourceMemoryLoad,omitempty"` + Destination ManagedObjectReference `xml:"destination"` + DestinationCpuLoad int32 `xml:"destinationCpuLoad,omitempty"` + DestinationMemoryLoad int64 `xml:"destinationMemoryLoad,omitempty"` +} + +func init() { + t["ClusterDrsMigration"] = reflect.TypeOf((*ClusterDrsMigration)(nil)).Elem() +} + +type ClusterDrsRecommendation struct { + DynamicData + + Key string `xml:"key"` + Rating int32 `xml:"rating"` + Reason string `xml:"reason"` + ReasonText string `xml:"reasonText"` + MigrationList []ClusterDrsMigration `xml:"migrationList"` +} + +func init() { + t["ClusterDrsRecommendation"] = reflect.TypeOf((*ClusterDrsRecommendation)(nil)).Elem() +} + +type ClusterDrsVmConfigInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + Enabled *bool `xml:"enabled"` + Behavior DrsBehavior `xml:"behavior,omitempty"` +} + +func init() { + t["ClusterDrsVmConfigInfo"] = reflect.TypeOf((*ClusterDrsVmConfigInfo)(nil)).Elem() +} + +type ClusterDrsVmConfigSpec struct { + ArrayUpdateSpec + + Info *ClusterDrsVmConfigInfo `xml:"info,omitempty"` +} + +func init() { + t["ClusterDrsVmConfigSpec"] = reflect.TypeOf((*ClusterDrsVmConfigSpec)(nil)).Elem() +} + +type ClusterEVCManagerCheckResult struct { + DynamicData + + EvcModeKey string `xml:"evcModeKey"` + Error LocalizedMethodFault `xml:"error"` + Host []ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["ClusterEVCManagerCheckResult"] = reflect.TypeOf((*ClusterEVCManagerCheckResult)(nil)).Elem() +} + +type ClusterEVCManagerEVCState struct { + DynamicData + + SupportedEVCMode []EVCMode `xml:"supportedEVCMode"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + GuaranteedCPUFeatures []HostCpuIdInfo `xml:"guaranteedCPUFeatures,omitempty"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty"` + FeatureMask []HostFeatureMask `xml:"featureMask,omitempty"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` +} + +func init() { + t["ClusterEVCManagerEVCState"] = reflect.TypeOf((*ClusterEVCManagerEVCState)(nil)).Elem() +} + +type ClusterEnterMaintenanceMode ClusterEnterMaintenanceModeRequestType + +func init() { + t["ClusterEnterMaintenanceMode"] = reflect.TypeOf((*ClusterEnterMaintenanceMode)(nil)).Elem() +} + +type ClusterEnterMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["ClusterEnterMaintenanceModeRequestType"] = reflect.TypeOf((*ClusterEnterMaintenanceModeRequestType)(nil)).Elem() +} + +type ClusterEnterMaintenanceModeResponse struct { + Returnval ClusterEnterMaintenanceResult `xml:"returnval"` +} + +type ClusterEnterMaintenanceResult struct { + DynamicData + + Recommendations []ClusterRecommendation `xml:"recommendations,omitempty"` + Fault *ClusterDrsFaults `xml:"fault,omitempty"` +} + +func init() { + t["ClusterEnterMaintenanceResult"] = reflect.TypeOf((*ClusterEnterMaintenanceResult)(nil)).Elem() +} + +type ClusterEvent struct { + Event +} + +func init() { + t["ClusterEvent"] = reflect.TypeOf((*ClusterEvent)(nil)).Elem() +} + +type ClusterFailoverHostAdmissionControlInfo struct { + ClusterDasAdmissionControlInfo + + HostStatus []ClusterFailoverHostAdmissionControlInfoHostStatus `xml:"hostStatus,omitempty"` +} + +func init() { + t["ClusterFailoverHostAdmissionControlInfo"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlInfo)(nil)).Elem() +} + +type ClusterFailoverHostAdmissionControlInfoHostStatus struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Status ManagedEntityStatus `xml:"status"` +} + +func init() { + t["ClusterFailoverHostAdmissionControlInfoHostStatus"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlInfoHostStatus)(nil)).Elem() +} + +type ClusterFailoverHostAdmissionControlPolicy struct { + ClusterDasAdmissionControlPolicy + + FailoverHosts []ManagedObjectReference `xml:"failoverHosts,omitempty"` + FailoverLevel int32 `xml:"failoverLevel,omitempty"` +} + +func init() { + t["ClusterFailoverHostAdmissionControlPolicy"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlPolicy)(nil)).Elem() +} + +type ClusterFailoverLevelAdmissionControlInfo struct { + ClusterDasAdmissionControlInfo + + CurrentFailoverLevel int32 `xml:"currentFailoverLevel"` +} + +func init() { + t["ClusterFailoverLevelAdmissionControlInfo"] = reflect.TypeOf((*ClusterFailoverLevelAdmissionControlInfo)(nil)).Elem() +} + +type ClusterFailoverLevelAdmissionControlPolicy struct { + ClusterDasAdmissionControlPolicy + + FailoverLevel int32 `xml:"failoverLevel"` + SlotPolicy BaseClusterSlotPolicy `xml:"slotPolicy,omitempty,typeattr"` +} + +func init() { + t["ClusterFailoverLevelAdmissionControlPolicy"] = reflect.TypeOf((*ClusterFailoverLevelAdmissionControlPolicy)(nil)).Elem() +} + +type ClusterFailoverResourcesAdmissionControlInfo struct { + ClusterDasAdmissionControlInfo + + CurrentCpuFailoverResourcesPercent int32 `xml:"currentCpuFailoverResourcesPercent"` + CurrentMemoryFailoverResourcesPercent int32 `xml:"currentMemoryFailoverResourcesPercent"` +} + +func init() { + t["ClusterFailoverResourcesAdmissionControlInfo"] = reflect.TypeOf((*ClusterFailoverResourcesAdmissionControlInfo)(nil)).Elem() +} + +type ClusterFailoverResourcesAdmissionControlPolicy struct { + ClusterDasAdmissionControlPolicy + + CpuFailoverResourcesPercent int32 `xml:"cpuFailoverResourcesPercent"` + MemoryFailoverResourcesPercent int32 `xml:"memoryFailoverResourcesPercent"` + FailoverLevel int32 `xml:"failoverLevel,omitempty"` + AutoComputePercentages *bool `xml:"autoComputePercentages"` +} + +func init() { + t["ClusterFailoverResourcesAdmissionControlPolicy"] = reflect.TypeOf((*ClusterFailoverResourcesAdmissionControlPolicy)(nil)).Elem() +} + +type ClusterFixedSizeSlotPolicy struct { + ClusterSlotPolicy + + Cpu int32 `xml:"cpu"` + Memory int32 `xml:"memory"` +} + +func init() { + t["ClusterFixedSizeSlotPolicy"] = reflect.TypeOf((*ClusterFixedSizeSlotPolicy)(nil)).Elem() +} + +type ClusterGroupInfo struct { + DynamicData + + Name string `xml:"name"` + UserCreated *bool `xml:"userCreated"` + UniqueID string `xml:"uniqueID,omitempty"` +} + +func init() { + t["ClusterGroupInfo"] = reflect.TypeOf((*ClusterGroupInfo)(nil)).Elem() +} + +type ClusterGroupSpec struct { + ArrayUpdateSpec + + Info BaseClusterGroupInfo `xml:"info,omitempty,typeattr"` +} + +func init() { + t["ClusterGroupSpec"] = reflect.TypeOf((*ClusterGroupSpec)(nil)).Elem() +} + +type ClusterHostGroup struct { + ClusterGroupInfo + + Host []ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["ClusterHostGroup"] = reflect.TypeOf((*ClusterHostGroup)(nil)).Elem() +} + +type ClusterHostInfraUpdateHaModeAction struct { + ClusterAction + + OperationType string `xml:"operationType"` +} + +func init() { + t["ClusterHostInfraUpdateHaModeAction"] = reflect.TypeOf((*ClusterHostInfraUpdateHaModeAction)(nil)).Elem() +} + +type ClusterHostPowerAction struct { + ClusterAction + + OperationType HostPowerOperationType `xml:"operationType"` + PowerConsumptionWatt int32 `xml:"powerConsumptionWatt,omitempty"` + CpuCapacityMHz int32 `xml:"cpuCapacityMHz,omitempty"` + MemCapacityMB int32 `xml:"memCapacityMB,omitempty"` +} + +func init() { + t["ClusterHostPowerAction"] = reflect.TypeOf((*ClusterHostPowerAction)(nil)).Elem() +} + +type ClusterHostRecommendation struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Rating int32 `xml:"rating"` +} + +func init() { + t["ClusterHostRecommendation"] = reflect.TypeOf((*ClusterHostRecommendation)(nil)).Elem() +} + +type ClusterInfraUpdateHaConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + Behavior string `xml:"behavior,omitempty"` + ModerateRemediation string `xml:"moderateRemediation,omitempty"` + SevereRemediation string `xml:"severeRemediation,omitempty"` + Providers []string `xml:"providers,omitempty"` +} + +func init() { + t["ClusterInfraUpdateHaConfigInfo"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfo)(nil)).Elem() +} + +type ClusterInitialPlacementAction struct { + ClusterAction + + TargetHost ManagedObjectReference `xml:"targetHost"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` +} + +func init() { + t["ClusterInitialPlacementAction"] = reflect.TypeOf((*ClusterInitialPlacementAction)(nil)).Elem() +} + +type ClusterIoFilterInfo struct { + IoFilterInfo + + OpType string `xml:"opType"` + VibUrl string `xml:"vibUrl,omitempty"` +} + +func init() { + t["ClusterIoFilterInfo"] = reflect.TypeOf((*ClusterIoFilterInfo)(nil)).Elem() +} + +type ClusterMigrationAction struct { + ClusterAction + + DrsMigration *ClusterDrsMigration `xml:"drsMigration,omitempty"` +} + +func init() { + t["ClusterMigrationAction"] = reflect.TypeOf((*ClusterMigrationAction)(nil)).Elem() +} + +type ClusterNetworkConfigSpec struct { + DynamicData + + NetworkPortGroup ManagedObjectReference `xml:"networkPortGroup"` + IpSettings CustomizationIPSettings `xml:"ipSettings"` +} + +func init() { + t["ClusterNetworkConfigSpec"] = reflect.TypeOf((*ClusterNetworkConfigSpec)(nil)).Elem() +} + +type ClusterNotAttemptedVmInfo struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["ClusterNotAttemptedVmInfo"] = reflect.TypeOf((*ClusterNotAttemptedVmInfo)(nil)).Elem() +} + +type ClusterOrchestrationInfo struct { + DynamicData + + DefaultVmReadiness *ClusterVmReadiness `xml:"defaultVmReadiness,omitempty"` +} + +func init() { + t["ClusterOrchestrationInfo"] = reflect.TypeOf((*ClusterOrchestrationInfo)(nil)).Elem() +} + +type ClusterOvercommittedEvent struct { + ClusterEvent +} + +func init() { + t["ClusterOvercommittedEvent"] = reflect.TypeOf((*ClusterOvercommittedEvent)(nil)).Elem() +} + +type ClusterPowerOnVmResult struct { + DynamicData + + Attempted []ClusterAttemptedVmInfo `xml:"attempted,omitempty"` + NotAttempted []ClusterNotAttemptedVmInfo `xml:"notAttempted,omitempty"` + Recommendations []ClusterRecommendation `xml:"recommendations,omitempty"` +} + +func init() { + t["ClusterPowerOnVmResult"] = reflect.TypeOf((*ClusterPowerOnVmResult)(nil)).Elem() +} + +type ClusterProactiveDrsConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` +} + +func init() { + t["ClusterProactiveDrsConfigInfo"] = reflect.TypeOf((*ClusterProactiveDrsConfigInfo)(nil)).Elem() +} + +type ClusterProfileCompleteConfigSpec struct { + ClusterProfileConfigSpec + + ComplyProfile *ComplianceProfile `xml:"complyProfile,omitempty"` +} + +func init() { + t["ClusterProfileCompleteConfigSpec"] = reflect.TypeOf((*ClusterProfileCompleteConfigSpec)(nil)).Elem() +} + +type ClusterProfileConfigInfo struct { + ProfileConfigInfo + + ComplyProfile *ComplianceProfile `xml:"complyProfile,omitempty"` +} + +func init() { + t["ClusterProfileConfigInfo"] = reflect.TypeOf((*ClusterProfileConfigInfo)(nil)).Elem() +} + +type ClusterProfileConfigServiceCreateSpec struct { + ClusterProfileConfigSpec + + ServiceType []string `xml:"serviceType,omitempty"` +} + +func init() { + t["ClusterProfileConfigServiceCreateSpec"] = reflect.TypeOf((*ClusterProfileConfigServiceCreateSpec)(nil)).Elem() +} + +type ClusterProfileConfigSpec struct { + ClusterProfileCreateSpec +} + +func init() { + t["ClusterProfileConfigSpec"] = reflect.TypeOf((*ClusterProfileConfigSpec)(nil)).Elem() +} + +type ClusterProfileCreateSpec struct { + ProfileCreateSpec +} + +func init() { + t["ClusterProfileCreateSpec"] = reflect.TypeOf((*ClusterProfileCreateSpec)(nil)).Elem() +} + +type ClusterRecommendation struct { + DynamicData + + Key string `xml:"key"` + Type string `xml:"type"` + Time time.Time `xml:"time"` + Rating int32 `xml:"rating"` + Reason string `xml:"reason"` + ReasonText string `xml:"reasonText"` + WarningText string `xml:"warningText,omitempty"` + WarningDetails *LocalizableMessage `xml:"warningDetails,omitempty"` + Prerequisite []string `xml:"prerequisite,omitempty"` + Action []BaseClusterAction `xml:"action,omitempty,typeattr"` + Target *ManagedObjectReference `xml:"target,omitempty"` +} + +func init() { + t["ClusterRecommendation"] = reflect.TypeOf((*ClusterRecommendation)(nil)).Elem() +} + +type ClusterReconfiguredEvent struct { + ClusterEvent + + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty"` +} + +func init() { + t["ClusterReconfiguredEvent"] = reflect.TypeOf((*ClusterReconfiguredEvent)(nil)).Elem() +} + +type ClusterResourceUsageSummary struct { + DynamicData + + CpuUsedMHz int32 `xml:"cpuUsedMHz"` + CpuCapacityMHz int32 `xml:"cpuCapacityMHz"` + MemUsedMB int32 `xml:"memUsedMB"` + MemCapacityMB int32 `xml:"memCapacityMB"` + PMemAvailableMB int64 `xml:"pMemAvailableMB,omitempty"` + PMemCapacityMB int64 `xml:"pMemCapacityMB,omitempty"` + StorageUsedMB int64 `xml:"storageUsedMB"` + StorageCapacityMB int64 `xml:"storageCapacityMB"` +} + +func init() { + t["ClusterResourceUsageSummary"] = reflect.TypeOf((*ClusterResourceUsageSummary)(nil)).Elem() +} + +type ClusterRuleInfo struct { + DynamicData + + Key int32 `xml:"key,omitempty"` + Status ManagedEntityStatus `xml:"status,omitempty"` + Enabled *bool `xml:"enabled"` + Name string `xml:"name,omitempty"` + Mandatory *bool `xml:"mandatory"` + UserCreated *bool `xml:"userCreated"` + InCompliance *bool `xml:"inCompliance"` + RuleUuid string `xml:"ruleUuid,omitempty"` +} + +func init() { + t["ClusterRuleInfo"] = reflect.TypeOf((*ClusterRuleInfo)(nil)).Elem() +} + +type ClusterRuleSpec struct { + ArrayUpdateSpec + + Info BaseClusterRuleInfo `xml:"info,omitempty,typeattr"` +} + +func init() { + t["ClusterRuleSpec"] = reflect.TypeOf((*ClusterRuleSpec)(nil)).Elem() +} + +type ClusterSlotPolicy struct { + DynamicData +} + +func init() { + t["ClusterSlotPolicy"] = reflect.TypeOf((*ClusterSlotPolicy)(nil)).Elem() +} + +type ClusterStatusChangedEvent struct { + ClusterEvent + + OldStatus string `xml:"oldStatus"` + NewStatus string `xml:"newStatus"` +} + +func init() { + t["ClusterStatusChangedEvent"] = reflect.TypeOf((*ClusterStatusChangedEvent)(nil)).Elem() +} + +type ClusterUsageSummary struct { + DynamicData + + TotalCpuCapacityMhz int32 `xml:"totalCpuCapacityMhz"` + TotalMemCapacityMB int32 `xml:"totalMemCapacityMB"` + CpuReservationMhz int32 `xml:"cpuReservationMhz"` + MemReservationMB int32 `xml:"memReservationMB"` + PoweredOffCpuReservationMhz int32 `xml:"poweredOffCpuReservationMhz,omitempty"` + PoweredOffMemReservationMB int32 `xml:"poweredOffMemReservationMB,omitempty"` + CpuDemandMhz int32 `xml:"cpuDemandMhz"` + MemDemandMB int32 `xml:"memDemandMB"` + StatsGenNumber int64 `xml:"statsGenNumber"` + CpuEntitledMhz int32 `xml:"cpuEntitledMhz"` + MemEntitledMB int32 `xml:"memEntitledMB"` + PoweredOffVmCount int32 `xml:"poweredOffVmCount"` + TotalVmCount int32 `xml:"totalVmCount"` +} + +func init() { + t["ClusterUsageSummary"] = reflect.TypeOf((*ClusterUsageSummary)(nil)).Elem() +} + +type ClusterVmComponentProtectionSettings struct { + DynamicData + + VmStorageProtectionForAPD string `xml:"vmStorageProtectionForAPD,omitempty"` + EnableAPDTimeoutForHosts *bool `xml:"enableAPDTimeoutForHosts"` + VmTerminateDelayForAPDSec int32 `xml:"vmTerminateDelayForAPDSec,omitempty"` + VmReactionOnAPDCleared string `xml:"vmReactionOnAPDCleared,omitempty"` + VmStorageProtectionForPDL string `xml:"vmStorageProtectionForPDL,omitempty"` +} + +func init() { + t["ClusterVmComponentProtectionSettings"] = reflect.TypeOf((*ClusterVmComponentProtectionSettings)(nil)).Elem() +} + +type ClusterVmGroup struct { + ClusterGroupInfo + + Vm []ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["ClusterVmGroup"] = reflect.TypeOf((*ClusterVmGroup)(nil)).Elem() +} + +type ClusterVmHostRuleInfo struct { + ClusterRuleInfo + + VmGroupName string `xml:"vmGroupName,omitempty"` + AffineHostGroupName string `xml:"affineHostGroupName,omitempty"` + AntiAffineHostGroupName string `xml:"antiAffineHostGroupName,omitempty"` +} + +func init() { + t["ClusterVmHostRuleInfo"] = reflect.TypeOf((*ClusterVmHostRuleInfo)(nil)).Elem() +} + +type ClusterVmOrchestrationInfo struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + VmReadiness ClusterVmReadiness `xml:"vmReadiness"` +} + +func init() { + t["ClusterVmOrchestrationInfo"] = reflect.TypeOf((*ClusterVmOrchestrationInfo)(nil)).Elem() +} + +type ClusterVmOrchestrationSpec struct { + ArrayUpdateSpec + + Info *ClusterVmOrchestrationInfo `xml:"info,omitempty"` +} + +func init() { + t["ClusterVmOrchestrationSpec"] = reflect.TypeOf((*ClusterVmOrchestrationSpec)(nil)).Elem() +} + +type ClusterVmReadiness struct { + DynamicData + + ReadyCondition string `xml:"readyCondition,omitempty"` + PostReadyDelay int32 `xml:"postReadyDelay,omitempty"` +} + +func init() { + t["ClusterVmReadiness"] = reflect.TypeOf((*ClusterVmReadiness)(nil)).Elem() +} + +type ClusterVmToolsMonitoringSettings struct { + DynamicData + + Enabled *bool `xml:"enabled"` + VmMonitoring string `xml:"vmMonitoring,omitempty"` + ClusterSettings *bool `xml:"clusterSettings"` + FailureInterval int32 `xml:"failureInterval,omitempty"` + MinUpTime int32 `xml:"minUpTime,omitempty"` + MaxFailures int32 `xml:"maxFailures,omitempty"` + MaxFailureWindow int32 `xml:"maxFailureWindow,omitempty"` +} + +func init() { + t["ClusterVmToolsMonitoringSettings"] = reflect.TypeOf((*ClusterVmToolsMonitoringSettings)(nil)).Elem() +} + +type CollectorAddressUnset struct { + DvsFault +} + +func init() { + t["CollectorAddressUnset"] = reflect.TypeOf((*CollectorAddressUnset)(nil)).Elem() +} + +type CollectorAddressUnsetFault CollectorAddressUnset + +func init() { + t["CollectorAddressUnsetFault"] = reflect.TypeOf((*CollectorAddressUnsetFault)(nil)).Elem() +} + +type ComplianceFailure struct { + DynamicData + + FailureType string `xml:"failureType"` + Message LocalizableMessage `xml:"message"` + ExpressionName string `xml:"expressionName,omitempty"` + FailureValues []ComplianceFailureComplianceFailureValues `xml:"failureValues,omitempty"` +} + +func init() { + t["ComplianceFailure"] = reflect.TypeOf((*ComplianceFailure)(nil)).Elem() +} + +type ComplianceFailureComplianceFailureValues struct { + DynamicData + + ComparisonIdentifier string `xml:"comparisonIdentifier"` + ProfileInstance string `xml:"profileInstance,omitempty"` + HostValue AnyType `xml:"hostValue,omitempty,typeattr"` + ProfileValue AnyType `xml:"profileValue,omitempty,typeattr"` +} + +func init() { + t["ComplianceFailureComplianceFailureValues"] = reflect.TypeOf((*ComplianceFailureComplianceFailureValues)(nil)).Elem() +} + +type ComplianceLocator struct { + DynamicData + + ExpressionName string `xml:"expressionName"` + ApplyPath ProfilePropertyPath `xml:"applyPath"` +} + +func init() { + t["ComplianceLocator"] = reflect.TypeOf((*ComplianceLocator)(nil)).Elem() +} + +type ComplianceProfile struct { + DynamicData + + Expression []BaseProfileExpression `xml:"expression,typeattr"` + RootExpression string `xml:"rootExpression"` +} + +func init() { + t["ComplianceProfile"] = reflect.TypeOf((*ComplianceProfile)(nil)).Elem() +} + +type ComplianceResult struct { + DynamicData + + Profile *ManagedObjectReference `xml:"profile,omitempty"` + ComplianceStatus string `xml:"complianceStatus"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + CheckTime *time.Time `xml:"checkTime"` + Failure []ComplianceFailure `xml:"failure,omitempty"` +} + +func init() { + t["ComplianceResult"] = reflect.TypeOf((*ComplianceResult)(nil)).Elem() +} + +type CompositeHostProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Source ManagedObjectReference `xml:"source"` + Targets []ManagedObjectReference `xml:"targets,omitempty"` + ToBeMerged *HostApplyProfile `xml:"toBeMerged,omitempty"` + ToBeReplacedWith *HostApplyProfile `xml:"toBeReplacedWith,omitempty"` + ToBeDeleted *HostApplyProfile `xml:"toBeDeleted,omitempty"` + EnableStatusToBeCopied *HostApplyProfile `xml:"enableStatusToBeCopied,omitempty"` +} + +func init() { + t["CompositeHostProfileRequestType"] = reflect.TypeOf((*CompositeHostProfileRequestType)(nil)).Elem() +} + +type CompositeHostProfile_Task CompositeHostProfileRequestType + +func init() { + t["CompositeHostProfile_Task"] = reflect.TypeOf((*CompositeHostProfile_Task)(nil)).Elem() +} + +type CompositeHostProfile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CompositePolicyOption struct { + PolicyOption + + Option []BasePolicyOption `xml:"option,omitempty,typeattr"` +} + +func init() { + t["CompositePolicyOption"] = reflect.TypeOf((*CompositePolicyOption)(nil)).Elem() +} + +type ComputeDiskPartitionInfo ComputeDiskPartitionInfoRequestType + +func init() { + t["ComputeDiskPartitionInfo"] = reflect.TypeOf((*ComputeDiskPartitionInfo)(nil)).Elem() +} + +type ComputeDiskPartitionInfoForResize ComputeDiskPartitionInfoForResizeRequestType + +func init() { + t["ComputeDiskPartitionInfoForResize"] = reflect.TypeOf((*ComputeDiskPartitionInfoForResize)(nil)).Elem() +} + +type ComputeDiskPartitionInfoForResizeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Partition HostScsiDiskPartition `xml:"partition"` + BlockRange HostDiskPartitionBlockRange `xml:"blockRange"` + PartitionFormat string `xml:"partitionFormat,omitempty"` +} + +func init() { + t["ComputeDiskPartitionInfoForResizeRequestType"] = reflect.TypeOf((*ComputeDiskPartitionInfoForResizeRequestType)(nil)).Elem() +} + +type ComputeDiskPartitionInfoForResizeResponse struct { + Returnval HostDiskPartitionInfo `xml:"returnval"` +} + +type ComputeDiskPartitionInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath string `xml:"devicePath"` + Layout HostDiskPartitionLayout `xml:"layout"` + PartitionFormat string `xml:"partitionFormat,omitempty"` +} + +func init() { + t["ComputeDiskPartitionInfoRequestType"] = reflect.TypeOf((*ComputeDiskPartitionInfoRequestType)(nil)).Elem() +} + +type ComputeDiskPartitionInfoResponse struct { + Returnval HostDiskPartitionInfo `xml:"returnval"` +} + +type ComputeResourceConfigInfo struct { + DynamicData + + VmSwapPlacement string `xml:"vmSwapPlacement"` + SpbmEnabled *bool `xml:"spbmEnabled"` + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` +} + +func init() { + t["ComputeResourceConfigInfo"] = reflect.TypeOf((*ComputeResourceConfigInfo)(nil)).Elem() +} + +type ComputeResourceConfigSpec struct { + DynamicData + + VmSwapPlacement string `xml:"vmSwapPlacement,omitempty"` + SpbmEnabled *bool `xml:"spbmEnabled"` + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` +} + +func init() { + t["ComputeResourceConfigSpec"] = reflect.TypeOf((*ComputeResourceConfigSpec)(nil)).Elem() +} + +type ComputeResourceEventArgument struct { + EntityEventArgument + + ComputeResource ManagedObjectReference `xml:"computeResource"` +} + +func init() { + t["ComputeResourceEventArgument"] = reflect.TypeOf((*ComputeResourceEventArgument)(nil)).Elem() +} + +type ComputeResourceHostSPBMLicenseInfo struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + LicenseState ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState `xml:"licenseState"` +} + +func init() { + t["ComputeResourceHostSPBMLicenseInfo"] = reflect.TypeOf((*ComputeResourceHostSPBMLicenseInfo)(nil)).Elem() +} + +type ComputeResourceSummary struct { + DynamicData + + TotalCpu int32 `xml:"totalCpu"` + TotalMemory int64 `xml:"totalMemory"` + NumCpuCores int16 `xml:"numCpuCores"` + NumCpuThreads int16 `xml:"numCpuThreads"` + EffectiveCpu int32 `xml:"effectiveCpu"` + EffectiveMemory int64 `xml:"effectiveMemory"` + NumHosts int32 `xml:"numHosts"` + NumEffectiveHosts int32 `xml:"numEffectiveHosts"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` +} + +func init() { + t["ComputeResourceSummary"] = reflect.TypeOf((*ComputeResourceSummary)(nil)).Elem() +} + +type ConcurrentAccess struct { + VimFault +} + +func init() { + t["ConcurrentAccess"] = reflect.TypeOf((*ConcurrentAccess)(nil)).Elem() +} + +type ConcurrentAccessFault ConcurrentAccess + +func init() { + t["ConcurrentAccessFault"] = reflect.TypeOf((*ConcurrentAccessFault)(nil)).Elem() +} + +type ConfigTarget struct { + DynamicData + + NumCpus int32 `xml:"numCpus"` + NumCpuCores int32 `xml:"numCpuCores"` + NumNumaNodes int32 `xml:"numNumaNodes"` + SmcPresent *bool `xml:"smcPresent"` + Datastore []VirtualMachineDatastoreInfo `xml:"datastore,omitempty"` + Network []VirtualMachineNetworkInfo `xml:"network,omitempty"` + OpaqueNetwork []OpaqueNetworkTargetInfo `xml:"opaqueNetwork,omitempty"` + DistributedVirtualPortgroup []DistributedVirtualPortgroupInfo `xml:"distributedVirtualPortgroup,omitempty"` + DistributedVirtualSwitch []DistributedVirtualSwitchInfo `xml:"distributedVirtualSwitch,omitempty"` + CdRom []VirtualMachineCdromInfo `xml:"cdRom,omitempty"` + Serial []VirtualMachineSerialInfo `xml:"serial,omitempty"` + Parallel []VirtualMachineParallelInfo `xml:"parallel,omitempty"` + Sound []VirtualMachineSoundInfo `xml:"sound,omitempty"` + Usb []VirtualMachineUsbInfo `xml:"usb,omitempty"` + Floppy []VirtualMachineFloppyInfo `xml:"floppy,omitempty"` + LegacyNetworkInfo []VirtualMachineLegacyNetworkSwitchInfo `xml:"legacyNetworkInfo,omitempty"` + ScsiPassthrough []VirtualMachineScsiPassthroughInfo `xml:"scsiPassthrough,omitempty"` + ScsiDisk []VirtualMachineScsiDiskDeviceInfo `xml:"scsiDisk,omitempty"` + IdeDisk []VirtualMachineIdeDiskDeviceInfo `xml:"ideDisk,omitempty"` + MaxMemMBOptimalPerf int32 `xml:"maxMemMBOptimalPerf"` + ResourcePool *ResourcePoolRuntimeInfo `xml:"resourcePool,omitempty"` + AutoVmotion *bool `xml:"autoVmotion"` + PciPassthrough []BaseVirtualMachinePciPassthroughInfo `xml:"pciPassthrough,omitempty,typeattr"` + Sriov []VirtualMachineSriovInfo `xml:"sriov,omitempty"` + VFlashModule []VirtualMachineVFlashModuleInfo `xml:"vFlashModule,omitempty"` + SharedGpuPassthroughTypes []VirtualMachinePciSharedGpuPassthroughInfo `xml:"sharedGpuPassthroughTypes,omitempty"` + AvailablePersistentMemoryReservationMB int64 `xml:"availablePersistentMemoryReservationMB,omitempty"` +} + +func init() { + t["ConfigTarget"] = reflect.TypeOf((*ConfigTarget)(nil)).Elem() +} + +type ConfigureCryptoKey ConfigureCryptoKeyRequestType + +func init() { + t["ConfigureCryptoKey"] = reflect.TypeOf((*ConfigureCryptoKey)(nil)).Elem() +} + +type ConfigureCryptoKeyRequestType struct { + This ManagedObjectReference `xml:"_this"` + KeyId *CryptoKeyId `xml:"keyId,omitempty"` +} + +func init() { + t["ConfigureCryptoKeyRequestType"] = reflect.TypeOf((*ConfigureCryptoKeyRequestType)(nil)).Elem() +} + +type ConfigureCryptoKeyResponse struct { +} + +type ConfigureDatastoreIORMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec StorageIORMConfigSpec `xml:"spec"` +} + +func init() { + t["ConfigureDatastoreIORMRequestType"] = reflect.TypeOf((*ConfigureDatastoreIORMRequestType)(nil)).Elem() +} + +type ConfigureDatastoreIORM_Task ConfigureDatastoreIORMRequestType + +func init() { + t["ConfigureDatastoreIORM_Task"] = reflect.TypeOf((*ConfigureDatastoreIORM_Task)(nil)).Elem() +} + +type ConfigureDatastoreIORM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConfigureDatastorePrincipal ConfigureDatastorePrincipalRequestType + +func init() { + t["ConfigureDatastorePrincipal"] = reflect.TypeOf((*ConfigureDatastorePrincipal)(nil)).Elem() +} + +type ConfigureDatastorePrincipalRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` + Password string `xml:"password,omitempty"` +} + +func init() { + t["ConfigureDatastorePrincipalRequestType"] = reflect.TypeOf((*ConfigureDatastorePrincipalRequestType)(nil)).Elem() +} + +type ConfigureDatastorePrincipalResponse struct { +} + +type ConfigureEvcModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + EvcModeKey string `xml:"evcModeKey"` +} + +func init() { + t["ConfigureEvcModeRequestType"] = reflect.TypeOf((*ConfigureEvcModeRequestType)(nil)).Elem() +} + +type ConfigureEvcMode_Task ConfigureEvcModeRequestType + +func init() { + t["ConfigureEvcMode_Task"] = reflect.TypeOf((*ConfigureEvcMode_Task)(nil)).Elem() +} + +type ConfigureEvcMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConfigureHostCacheRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostCacheConfigurationSpec `xml:"spec"` +} + +func init() { + t["ConfigureHostCacheRequestType"] = reflect.TypeOf((*ConfigureHostCacheRequestType)(nil)).Elem() +} + +type ConfigureHostCache_Task ConfigureHostCacheRequestType + +func init() { + t["ConfigureHostCache_Task"] = reflect.TypeOf((*ConfigureHostCache_Task)(nil)).Elem() +} + +type ConfigureHostCache_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConfigureLicenseSource ConfigureLicenseSourceRequestType + +func init() { + t["ConfigureLicenseSource"] = reflect.TypeOf((*ConfigureLicenseSource)(nil)).Elem() +} + +type ConfigureLicenseSourceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + LicenseSource BaseLicenseSource `xml:"licenseSource,typeattr"` +} + +func init() { + t["ConfigureLicenseSourceRequestType"] = reflect.TypeOf((*ConfigureLicenseSourceRequestType)(nil)).Elem() +} + +type ConfigureLicenseSourceResponse struct { +} + +type ConfigurePowerPolicy ConfigurePowerPolicyRequestType + +func init() { + t["ConfigurePowerPolicy"] = reflect.TypeOf((*ConfigurePowerPolicy)(nil)).Elem() +} + +type ConfigurePowerPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key int32 `xml:"key"` +} + +func init() { + t["ConfigurePowerPolicyRequestType"] = reflect.TypeOf((*ConfigurePowerPolicyRequestType)(nil)).Elem() +} + +type ConfigurePowerPolicyResponse struct { +} + +type ConfigureStorageDrsForPodRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pod ManagedObjectReference `xml:"pod"` + Spec StorageDrsConfigSpec `xml:"spec"` + Modify bool `xml:"modify"` +} + +func init() { + t["ConfigureStorageDrsForPodRequestType"] = reflect.TypeOf((*ConfigureStorageDrsForPodRequestType)(nil)).Elem() +} + +type ConfigureStorageDrsForPod_Task ConfigureStorageDrsForPodRequestType + +func init() { + t["ConfigureStorageDrsForPod_Task"] = reflect.TypeOf((*ConfigureStorageDrsForPod_Task)(nil)).Elem() +} + +type ConfigureStorageDrsForPod_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConfigureVFlashResourceExRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath []string `xml:"devicePath,omitempty"` +} + +func init() { + t["ConfigureVFlashResourceExRequestType"] = reflect.TypeOf((*ConfigureVFlashResourceExRequestType)(nil)).Elem() +} + +type ConfigureVFlashResourceEx_Task ConfigureVFlashResourceExRequestType + +func init() { + t["ConfigureVFlashResourceEx_Task"] = reflect.TypeOf((*ConfigureVFlashResourceEx_Task)(nil)).Elem() +} + +type ConfigureVFlashResourceEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConflictingConfiguration struct { + DvsFault + + ConfigInConflict []ConflictingConfigurationConfig `xml:"configInConflict"` +} + +func init() { + t["ConflictingConfiguration"] = reflect.TypeOf((*ConflictingConfiguration)(nil)).Elem() +} + +type ConflictingConfigurationConfig struct { + DynamicData + + Entity *ManagedObjectReference `xml:"entity,omitempty"` + PropertyPath string `xml:"propertyPath"` +} + +func init() { + t["ConflictingConfigurationConfig"] = reflect.TypeOf((*ConflictingConfigurationConfig)(nil)).Elem() +} + +type ConflictingConfigurationFault ConflictingConfiguration + +func init() { + t["ConflictingConfigurationFault"] = reflect.TypeOf((*ConflictingConfigurationFault)(nil)).Elem() +} + +type ConflictingDatastoreFound struct { + RuntimeFault + + Name string `xml:"name"` + Url string `xml:"url"` +} + +func init() { + t["ConflictingDatastoreFound"] = reflect.TypeOf((*ConflictingDatastoreFound)(nil)).Elem() +} + +type ConflictingDatastoreFoundFault ConflictingDatastoreFound + +func init() { + t["ConflictingDatastoreFoundFault"] = reflect.TypeOf((*ConflictingDatastoreFoundFault)(nil)).Elem() +} + +type ConnectedIso struct { + OvfExport + + Cdrom VirtualCdrom `xml:"cdrom"` + Filename string `xml:"filename"` +} + +func init() { + t["ConnectedIso"] = reflect.TypeOf((*ConnectedIso)(nil)).Elem() +} + +type ConnectedIsoFault ConnectedIso + +func init() { + t["ConnectedIsoFault"] = reflect.TypeOf((*ConnectedIsoFault)(nil)).Elem() +} + +type ConsolidateVMDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ConsolidateVMDisksRequestType"] = reflect.TypeOf((*ConsolidateVMDisksRequestType)(nil)).Elem() +} + +type ConsolidateVMDisks_Task ConsolidateVMDisksRequestType + +func init() { + t["ConsolidateVMDisks_Task"] = reflect.TypeOf((*ConsolidateVMDisks_Task)(nil)).Elem() +} + +type ConsolidateVMDisks_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ContinueRetrievePropertiesEx ContinueRetrievePropertiesExRequestType + +func init() { + t["ContinueRetrievePropertiesEx"] = reflect.TypeOf((*ContinueRetrievePropertiesEx)(nil)).Elem() +} + +type ContinueRetrievePropertiesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Token string `xml:"token"` +} + +func init() { + t["ContinueRetrievePropertiesExRequestType"] = reflect.TypeOf((*ContinueRetrievePropertiesExRequestType)(nil)).Elem() +} + +type ContinueRetrievePropertiesExResponse struct { + Returnval RetrieveResult `xml:"returnval"` +} + +type ConvertNamespacePathToUuidPath ConvertNamespacePathToUuidPathRequestType + +func init() { + t["ConvertNamespacePathToUuidPath"] = reflect.TypeOf((*ConvertNamespacePathToUuidPath)(nil)).Elem() +} + +type ConvertNamespacePathToUuidPathRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + NamespaceUrl string `xml:"namespaceUrl"` +} + +func init() { + t["ConvertNamespacePathToUuidPathRequestType"] = reflect.TypeOf((*ConvertNamespacePathToUuidPathRequestType)(nil)).Elem() +} + +type ConvertNamespacePathToUuidPathResponse struct { + Returnval string `xml:"returnval"` +} + +type CopyDatastoreFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + SourceName string `xml:"sourceName"` + SourceDatacenter *ManagedObjectReference `xml:"sourceDatacenter,omitempty"` + DestinationName string `xml:"destinationName"` + DestinationDatacenter *ManagedObjectReference `xml:"destinationDatacenter,omitempty"` + Force *bool `xml:"force"` +} + +func init() { + t["CopyDatastoreFileRequestType"] = reflect.TypeOf((*CopyDatastoreFileRequestType)(nil)).Elem() +} + +type CopyDatastoreFile_Task CopyDatastoreFileRequestType + +func init() { + t["CopyDatastoreFile_Task"] = reflect.TypeOf((*CopyDatastoreFile_Task)(nil)).Elem() +} + +type CopyDatastoreFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CopyVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + SourceName string `xml:"sourceName"` + SourceDatacenter *ManagedObjectReference `xml:"sourceDatacenter,omitempty"` + DestName string `xml:"destName"` + DestDatacenter *ManagedObjectReference `xml:"destDatacenter,omitempty"` + DestSpec BaseVirtualDiskSpec `xml:"destSpec,omitempty,typeattr"` + Force *bool `xml:"force"` +} + +func init() { + t["CopyVirtualDiskRequestType"] = reflect.TypeOf((*CopyVirtualDiskRequestType)(nil)).Elem() +} + +type CopyVirtualDisk_Task CopyVirtualDiskRequestType + +func init() { + t["CopyVirtualDisk_Task"] = reflect.TypeOf((*CopyVirtualDisk_Task)(nil)).Elem() +} + +type CopyVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CpuCompatibilityUnknown struct { + CpuIncompatible +} + +func init() { + t["CpuCompatibilityUnknown"] = reflect.TypeOf((*CpuCompatibilityUnknown)(nil)).Elem() +} + +type CpuCompatibilityUnknownFault CpuCompatibilityUnknown + +func init() { + t["CpuCompatibilityUnknownFault"] = reflect.TypeOf((*CpuCompatibilityUnknownFault)(nil)).Elem() +} + +type CpuHotPlugNotSupported struct { + VmConfigFault +} + +func init() { + t["CpuHotPlugNotSupported"] = reflect.TypeOf((*CpuHotPlugNotSupported)(nil)).Elem() +} + +type CpuHotPlugNotSupportedFault CpuHotPlugNotSupported + +func init() { + t["CpuHotPlugNotSupportedFault"] = reflect.TypeOf((*CpuHotPlugNotSupportedFault)(nil)).Elem() +} + +type CpuIncompatible struct { + VirtualHardwareCompatibilityIssue + + Level int32 `xml:"level"` + RegisterName string `xml:"registerName"` + RegisterBits string `xml:"registerBits,omitempty"` + DesiredBits string `xml:"desiredBits,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["CpuIncompatible"] = reflect.TypeOf((*CpuIncompatible)(nil)).Elem() +} + +type CpuIncompatible1ECX struct { + CpuIncompatible + + Sse3 bool `xml:"sse3"` + Pclmulqdq *bool `xml:"pclmulqdq"` + Ssse3 bool `xml:"ssse3"` + Sse41 bool `xml:"sse41"` + Sse42 bool `xml:"sse42"` + Aes *bool `xml:"aes"` + Other bool `xml:"other"` + OtherOnly bool `xml:"otherOnly"` +} + +func init() { + t["CpuIncompatible1ECX"] = reflect.TypeOf((*CpuIncompatible1ECX)(nil)).Elem() +} + +type CpuIncompatible1ECXFault CpuIncompatible1ECX + +func init() { + t["CpuIncompatible1ECXFault"] = reflect.TypeOf((*CpuIncompatible1ECXFault)(nil)).Elem() +} + +type CpuIncompatible81EDX struct { + CpuIncompatible + + Nx bool `xml:"nx"` + Ffxsr bool `xml:"ffxsr"` + Rdtscp bool `xml:"rdtscp"` + Lm bool `xml:"lm"` + Other bool `xml:"other"` + OtherOnly bool `xml:"otherOnly"` +} + +func init() { + t["CpuIncompatible81EDX"] = reflect.TypeOf((*CpuIncompatible81EDX)(nil)).Elem() +} + +type CpuIncompatible81EDXFault CpuIncompatible81EDX + +func init() { + t["CpuIncompatible81EDXFault"] = reflect.TypeOf((*CpuIncompatible81EDXFault)(nil)).Elem() +} + +type CpuIncompatibleFault BaseCpuIncompatible + +func init() { + t["CpuIncompatibleFault"] = reflect.TypeOf((*CpuIncompatibleFault)(nil)).Elem() +} + +type CreateAlarm CreateAlarmRequestType + +func init() { + t["CreateAlarm"] = reflect.TypeOf((*CreateAlarm)(nil)).Elem() +} + +type CreateAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Spec BaseAlarmSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateAlarmRequestType"] = reflect.TypeOf((*CreateAlarmRequestType)(nil)).Elem() +} + +type CreateAlarmResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateChildVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config VirtualMachineConfigSpec `xml:"config"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["CreateChildVMRequestType"] = reflect.TypeOf((*CreateChildVMRequestType)(nil)).Elem() +} + +type CreateChildVM_Task CreateChildVMRequestType + +func init() { + t["CreateChildVM_Task"] = reflect.TypeOf((*CreateChildVM_Task)(nil)).Elem() +} + +type CreateChildVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateCluster CreateClusterRequestType + +func init() { + t["CreateCluster"] = reflect.TypeOf((*CreateCluster)(nil)).Elem() +} + +type CreateClusterEx CreateClusterExRequestType + +func init() { + t["CreateClusterEx"] = reflect.TypeOf((*CreateClusterEx)(nil)).Elem() +} + +type CreateClusterExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Spec ClusterConfigSpecEx `xml:"spec"` +} + +func init() { + t["CreateClusterExRequestType"] = reflect.TypeOf((*CreateClusterExRequestType)(nil)).Elem() +} + +type CreateClusterExResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Spec ClusterConfigSpec `xml:"spec"` +} + +func init() { + t["CreateClusterRequestType"] = reflect.TypeOf((*CreateClusterRequestType)(nil)).Elem() +} + +type CreateClusterResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateCollectorForEvents CreateCollectorForEventsRequestType + +func init() { + t["CreateCollectorForEvents"] = reflect.TypeOf((*CreateCollectorForEvents)(nil)).Elem() +} + +type CreateCollectorForEventsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Filter EventFilterSpec `xml:"filter"` +} + +func init() { + t["CreateCollectorForEventsRequestType"] = reflect.TypeOf((*CreateCollectorForEventsRequestType)(nil)).Elem() +} + +type CreateCollectorForEventsResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateCollectorForTasks CreateCollectorForTasksRequestType + +func init() { + t["CreateCollectorForTasks"] = reflect.TypeOf((*CreateCollectorForTasks)(nil)).Elem() +} + +type CreateCollectorForTasksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Filter TaskFilterSpec `xml:"filter"` +} + +func init() { + t["CreateCollectorForTasksRequestType"] = reflect.TypeOf((*CreateCollectorForTasksRequestType)(nil)).Elem() +} + +type CreateCollectorForTasksResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateContainerView CreateContainerViewRequestType + +func init() { + t["CreateContainerView"] = reflect.TypeOf((*CreateContainerView)(nil)).Elem() +} + +type CreateContainerViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + Container ManagedObjectReference `xml:"container"` + Type []string `xml:"type,omitempty"` + Recursive bool `xml:"recursive"` +} + +func init() { + t["CreateContainerViewRequestType"] = reflect.TypeOf((*CreateContainerViewRequestType)(nil)).Elem() +} + +type CreateContainerViewResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateCustomizationSpec CreateCustomizationSpecRequestType + +func init() { + t["CreateCustomizationSpec"] = reflect.TypeOf((*CreateCustomizationSpec)(nil)).Elem() +} + +type CreateCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Item CustomizationSpecItem `xml:"item"` +} + +func init() { + t["CreateCustomizationSpecRequestType"] = reflect.TypeOf((*CreateCustomizationSpecRequestType)(nil)).Elem() +} + +type CreateCustomizationSpecResponse struct { +} + +type CreateDVPortgroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec DVPortgroupConfigSpec `xml:"spec"` +} + +func init() { + t["CreateDVPortgroupRequestType"] = reflect.TypeOf((*CreateDVPortgroupRequestType)(nil)).Elem() +} + +type CreateDVPortgroup_Task CreateDVPortgroupRequestType + +func init() { + t["CreateDVPortgroup_Task"] = reflect.TypeOf((*CreateDVPortgroup_Task)(nil)).Elem() +} + +type CreateDVPortgroup_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateDVSRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec DVSCreateSpec `xml:"spec"` +} + +func init() { + t["CreateDVSRequestType"] = reflect.TypeOf((*CreateDVSRequestType)(nil)).Elem() +} + +type CreateDVS_Task CreateDVSRequestType + +func init() { + t["CreateDVS_Task"] = reflect.TypeOf((*CreateDVS_Task)(nil)).Elem() +} + +type CreateDVS_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateDatacenter CreateDatacenterRequestType + +func init() { + t["CreateDatacenter"] = reflect.TypeOf((*CreateDatacenter)(nil)).Elem() +} + +type CreateDatacenterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["CreateDatacenterRequestType"] = reflect.TypeOf((*CreateDatacenterRequestType)(nil)).Elem() +} + +type CreateDatacenterResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateDefaultProfile CreateDefaultProfileRequestType + +func init() { + t["CreateDefaultProfile"] = reflect.TypeOf((*CreateDefaultProfile)(nil)).Elem() +} + +type CreateDefaultProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProfileType string `xml:"profileType"` + ProfileTypeName string `xml:"profileTypeName,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["CreateDefaultProfileRequestType"] = reflect.TypeOf((*CreateDefaultProfileRequestType)(nil)).Elem() +} + +type CreateDefaultProfileResponse struct { + Returnval BaseApplyProfile `xml:"returnval,typeattr"` +} + +type CreateDescriptor CreateDescriptorRequestType + +func init() { + t["CreateDescriptor"] = reflect.TypeOf((*CreateDescriptor)(nil)).Elem() +} + +type CreateDescriptorRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj ManagedObjectReference `xml:"obj"` + Cdp OvfCreateDescriptorParams `xml:"cdp"` +} + +func init() { + t["CreateDescriptorRequestType"] = reflect.TypeOf((*CreateDescriptorRequestType)(nil)).Elem() +} + +type CreateDescriptorResponse struct { + Returnval OvfCreateDescriptorResult `xml:"returnval"` +} + +type CreateDiagnosticPartition CreateDiagnosticPartitionRequestType + +func init() { + t["CreateDiagnosticPartition"] = reflect.TypeOf((*CreateDiagnosticPartition)(nil)).Elem() +} + +type CreateDiagnosticPartitionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostDiagnosticPartitionCreateSpec `xml:"spec"` +} + +func init() { + t["CreateDiagnosticPartitionRequestType"] = reflect.TypeOf((*CreateDiagnosticPartitionRequestType)(nil)).Elem() +} + +type CreateDiagnosticPartitionResponse struct { +} + +type CreateDirectory CreateDirectoryRequestType + +func init() { + t["CreateDirectory"] = reflect.TypeOf((*CreateDirectory)(nil)).Elem() +} + +type CreateDirectoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + DisplayName string `xml:"displayName,omitempty"` + Policy string `xml:"policy,omitempty"` +} + +func init() { + t["CreateDirectoryRequestType"] = reflect.TypeOf((*CreateDirectoryRequestType)(nil)).Elem() +} + +type CreateDirectoryResponse struct { + Returnval string `xml:"returnval"` +} + +type CreateDiskFromSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` + Name string `xml:"name"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` + Path string `xml:"path,omitempty"` +} + +func init() { + t["CreateDiskFromSnapshotRequestType"] = reflect.TypeOf((*CreateDiskFromSnapshotRequestType)(nil)).Elem() +} + +type CreateDiskFromSnapshot_Task CreateDiskFromSnapshotRequestType + +func init() { + t["CreateDiskFromSnapshot_Task"] = reflect.TypeOf((*CreateDiskFromSnapshot_Task)(nil)).Elem() +} + +type CreateDiskFromSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VslmCreateSpec `xml:"spec"` +} + +func init() { + t["CreateDiskRequestType"] = reflect.TypeOf((*CreateDiskRequestType)(nil)).Elem() +} + +type CreateDisk_Task CreateDiskRequestType + +func init() { + t["CreateDisk_Task"] = reflect.TypeOf((*CreateDisk_Task)(nil)).Elem() +} + +type CreateDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateFilter CreateFilterRequestType + +func init() { + t["CreateFilter"] = reflect.TypeOf((*CreateFilter)(nil)).Elem() +} + +type CreateFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec PropertyFilterSpec `xml:"spec"` + PartialUpdates bool `xml:"partialUpdates"` +} + +func init() { + t["CreateFilterRequestType"] = reflect.TypeOf((*CreateFilterRequestType)(nil)).Elem() +} + +type CreateFilterResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateFolder CreateFolderRequestType + +func init() { + t["CreateFolder"] = reflect.TypeOf((*CreateFolder)(nil)).Elem() +} + +type CreateFolderRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["CreateFolderRequestType"] = reflect.TypeOf((*CreateFolderRequestType)(nil)).Elem() +} + +type CreateFolderResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateGroup CreateGroupRequestType + +func init() { + t["CreateGroup"] = reflect.TypeOf((*CreateGroup)(nil)).Elem() +} + +type CreateGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Group BaseHostAccountSpec `xml:"group,typeattr"` +} + +func init() { + t["CreateGroupRequestType"] = reflect.TypeOf((*CreateGroupRequestType)(nil)).Elem() +} + +type CreateGroupResponse struct { +} + +type CreateImportSpec CreateImportSpecRequestType + +func init() { + t["CreateImportSpec"] = reflect.TypeOf((*CreateImportSpec)(nil)).Elem() +} + +type CreateImportSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + OvfDescriptor string `xml:"ovfDescriptor"` + ResourcePool ManagedObjectReference `xml:"resourcePool"` + Datastore ManagedObjectReference `xml:"datastore"` + Cisp OvfCreateImportSpecParams `xml:"cisp"` +} + +func init() { + t["CreateImportSpecRequestType"] = reflect.TypeOf((*CreateImportSpecRequestType)(nil)).Elem() +} + +type CreateImportSpecResponse struct { + Returnval OvfCreateImportSpecResult `xml:"returnval"` +} + +type CreateInventoryView CreateInventoryViewRequestType + +func init() { + t["CreateInventoryView"] = reflect.TypeOf((*CreateInventoryView)(nil)).Elem() +} + +type CreateInventoryViewRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CreateInventoryViewRequestType"] = reflect.TypeOf((*CreateInventoryViewRequestType)(nil)).Elem() +} + +type CreateInventoryViewResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateIpPool CreateIpPoolRequestType + +func init() { + t["CreateIpPool"] = reflect.TypeOf((*CreateIpPool)(nil)).Elem() +} + +type CreateIpPoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + Pool IpPool `xml:"pool"` +} + +func init() { + t["CreateIpPoolRequestType"] = reflect.TypeOf((*CreateIpPoolRequestType)(nil)).Elem() +} + +type CreateIpPoolResponse struct { + Returnval int32 `xml:"returnval"` +} + +type CreateListView CreateListViewRequestType + +func init() { + t["CreateListView"] = reflect.TypeOf((*CreateListView)(nil)).Elem() +} + +type CreateListViewFromView CreateListViewFromViewRequestType + +func init() { + t["CreateListViewFromView"] = reflect.TypeOf((*CreateListViewFromView)(nil)).Elem() +} + +type CreateListViewFromViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + View ManagedObjectReference `xml:"view"` +} + +func init() { + t["CreateListViewFromViewRequestType"] = reflect.TypeOf((*CreateListViewFromViewRequestType)(nil)).Elem() +} + +type CreateListViewFromViewResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateListViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj []ManagedObjectReference `xml:"obj,omitempty"` +} + +func init() { + t["CreateListViewRequestType"] = reflect.TypeOf((*CreateListViewRequestType)(nil)).Elem() +} + +type CreateListViewResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateLocalDatastore CreateLocalDatastoreRequestType + +func init() { + t["CreateLocalDatastore"] = reflect.TypeOf((*CreateLocalDatastore)(nil)).Elem() +} + +type CreateLocalDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Path string `xml:"path"` +} + +func init() { + t["CreateLocalDatastoreRequestType"] = reflect.TypeOf((*CreateLocalDatastoreRequestType)(nil)).Elem() +} + +type CreateLocalDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateNasDatastore CreateNasDatastoreRequestType + +func init() { + t["CreateNasDatastore"] = reflect.TypeOf((*CreateNasDatastore)(nil)).Elem() +} + +type CreateNasDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostNasVolumeSpec `xml:"spec"` +} + +func init() { + t["CreateNasDatastoreRequestType"] = reflect.TypeOf((*CreateNasDatastoreRequestType)(nil)).Elem() +} + +type CreateNasDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateNvdimmNamespaceRequestType struct { + This ManagedObjectReference `xml:"_this"` + CreateSpec NvdimmNamespaceCreateSpec `xml:"createSpec"` +} + +func init() { + t["CreateNvdimmNamespaceRequestType"] = reflect.TypeOf((*CreateNvdimmNamespaceRequestType)(nil)).Elem() +} + +type CreateNvdimmNamespace_Task CreateNvdimmNamespaceRequestType + +func init() { + t["CreateNvdimmNamespace_Task"] = reflect.TypeOf((*CreateNvdimmNamespace_Task)(nil)).Elem() +} + +type CreateNvdimmNamespace_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateObjectScheduledTask CreateObjectScheduledTaskRequestType + +func init() { + t["CreateObjectScheduledTask"] = reflect.TypeOf((*CreateObjectScheduledTask)(nil)).Elem() +} + +type CreateObjectScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj ManagedObjectReference `xml:"obj"` + Spec BaseScheduledTaskSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateObjectScheduledTaskRequestType"] = reflect.TypeOf((*CreateObjectScheduledTaskRequestType)(nil)).Elem() +} + +type CreateObjectScheduledTaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreatePerfInterval CreatePerfIntervalRequestType + +func init() { + t["CreatePerfInterval"] = reflect.TypeOf((*CreatePerfInterval)(nil)).Elem() +} + +type CreatePerfIntervalRequestType struct { + This ManagedObjectReference `xml:"_this"` + IntervalId PerfInterval `xml:"intervalId"` +} + +func init() { + t["CreatePerfIntervalRequestType"] = reflect.TypeOf((*CreatePerfIntervalRequestType)(nil)).Elem() +} + +type CreatePerfIntervalResponse struct { +} + +type CreateProfile CreateProfileRequestType + +func init() { + t["CreateProfile"] = reflect.TypeOf((*CreateProfile)(nil)).Elem() +} + +type CreateProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + CreateSpec BaseProfileCreateSpec `xml:"createSpec,typeattr"` +} + +func init() { + t["CreateProfileRequestType"] = reflect.TypeOf((*CreateProfileRequestType)(nil)).Elem() +} + +type CreateProfileResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreatePropertyCollector CreatePropertyCollectorRequestType + +func init() { + t["CreatePropertyCollector"] = reflect.TypeOf((*CreatePropertyCollector)(nil)).Elem() +} + +type CreatePropertyCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CreatePropertyCollectorRequestType"] = reflect.TypeOf((*CreatePropertyCollectorRequestType)(nil)).Elem() +} + +type CreatePropertyCollectorResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateRegistryKeyInGuest CreateRegistryKeyInGuestRequestType + +func init() { + t["CreateRegistryKeyInGuest"] = reflect.TypeOf((*CreateRegistryKeyInGuest)(nil)).Elem() +} + +type CreateRegistryKeyInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + KeyName GuestRegKeyNameSpec `xml:"keyName"` + IsVolatile bool `xml:"isVolatile"` + ClassType string `xml:"classType,omitempty"` +} + +func init() { + t["CreateRegistryKeyInGuestRequestType"] = reflect.TypeOf((*CreateRegistryKeyInGuestRequestType)(nil)).Elem() +} + +type CreateRegistryKeyInGuestResponse struct { +} + +type CreateResourcePool CreateResourcePoolRequestType + +func init() { + t["CreateResourcePool"] = reflect.TypeOf((*CreateResourcePool)(nil)).Elem() +} + +type CreateResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Spec ResourceConfigSpec `xml:"spec"` +} + +func init() { + t["CreateResourcePoolRequestType"] = reflect.TypeOf((*CreateResourcePoolRequestType)(nil)).Elem() +} + +type CreateResourcePoolResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateScheduledTask CreateScheduledTaskRequestType + +func init() { + t["CreateScheduledTask"] = reflect.TypeOf((*CreateScheduledTask)(nil)).Elem() +} + +type CreateScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Spec BaseScheduledTaskSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateScheduledTaskRequestType"] = reflect.TypeOf((*CreateScheduledTaskRequestType)(nil)).Elem() +} + +type CreateScheduledTaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateScreenshotRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CreateScreenshotRequestType"] = reflect.TypeOf((*CreateScreenshotRequestType)(nil)).Elem() +} + +type CreateScreenshot_Task CreateScreenshotRequestType + +func init() { + t["CreateScreenshot_Task"] = reflect.TypeOf((*CreateScreenshot_Task)(nil)).Elem() +} + +type CreateScreenshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateSecondaryVMExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Spec *FaultToleranceConfigSpec `xml:"spec,omitempty"` +} + +func init() { + t["CreateSecondaryVMExRequestType"] = reflect.TypeOf((*CreateSecondaryVMExRequestType)(nil)).Elem() +} + +type CreateSecondaryVMEx_Task CreateSecondaryVMExRequestType + +func init() { + t["CreateSecondaryVMEx_Task"] = reflect.TypeOf((*CreateSecondaryVMEx_Task)(nil)).Elem() +} + +type CreateSecondaryVMEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateSecondaryVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["CreateSecondaryVMRequestType"] = reflect.TypeOf((*CreateSecondaryVMRequestType)(nil)).Elem() +} + +type CreateSecondaryVM_Task CreateSecondaryVMRequestType + +func init() { + t["CreateSecondaryVM_Task"] = reflect.TypeOf((*CreateSecondaryVM_Task)(nil)).Elem() +} + +type CreateSecondaryVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateSnapshotExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Description string `xml:"description,omitempty"` + Memory bool `xml:"memory"` + QuiesceSpec BaseVirtualMachineGuestQuiesceSpec `xml:"quiesceSpec,omitempty,typeattr"` +} + +func init() { + t["CreateSnapshotExRequestType"] = reflect.TypeOf((*CreateSnapshotExRequestType)(nil)).Elem() +} + +type CreateSnapshotEx_Task CreateSnapshotExRequestType + +func init() { + t["CreateSnapshotEx_Task"] = reflect.TypeOf((*CreateSnapshotEx_Task)(nil)).Elem() +} + +type CreateSnapshotEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Description string `xml:"description,omitempty"` + Memory bool `xml:"memory"` + Quiesce bool `xml:"quiesce"` +} + +func init() { + t["CreateSnapshotRequestType"] = reflect.TypeOf((*CreateSnapshotRequestType)(nil)).Elem() +} + +type CreateSnapshot_Task CreateSnapshotRequestType + +func init() { + t["CreateSnapshot_Task"] = reflect.TypeOf((*CreateSnapshot_Task)(nil)).Elem() +} + +type CreateSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateStoragePod CreateStoragePodRequestType + +func init() { + t["CreateStoragePod"] = reflect.TypeOf((*CreateStoragePod)(nil)).Elem() +} + +type CreateStoragePodRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["CreateStoragePodRequestType"] = reflect.TypeOf((*CreateStoragePodRequestType)(nil)).Elem() +} + +type CreateStoragePodResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateTask CreateTaskRequestType + +func init() { + t["CreateTask"] = reflect.TypeOf((*CreateTask)(nil)).Elem() +} + +type CreateTaskAction struct { + Action + + TaskTypeId string `xml:"taskTypeId"` + Cancelable bool `xml:"cancelable"` +} + +func init() { + t["CreateTaskAction"] = reflect.TypeOf((*CreateTaskAction)(nil)).Elem() +} + +type CreateTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj ManagedObjectReference `xml:"obj"` + TaskTypeId string `xml:"taskTypeId"` + InitiatedBy string `xml:"initiatedBy,omitempty"` + Cancelable bool `xml:"cancelable"` + ParentTaskKey string `xml:"parentTaskKey,omitempty"` + ActivationId string `xml:"activationId,omitempty"` +} + +func init() { + t["CreateTaskRequestType"] = reflect.TypeOf((*CreateTaskRequestType)(nil)).Elem() +} + +type CreateTaskResponse struct { + Returnval TaskInfo `xml:"returnval"` +} + +type CreateTemporaryDirectoryInGuest CreateTemporaryDirectoryInGuestRequestType + +func init() { + t["CreateTemporaryDirectoryInGuest"] = reflect.TypeOf((*CreateTemporaryDirectoryInGuest)(nil)).Elem() +} + +type CreateTemporaryDirectoryInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Prefix string `xml:"prefix"` + Suffix string `xml:"suffix"` + DirectoryPath string `xml:"directoryPath,omitempty"` +} + +func init() { + t["CreateTemporaryDirectoryInGuestRequestType"] = reflect.TypeOf((*CreateTemporaryDirectoryInGuestRequestType)(nil)).Elem() +} + +type CreateTemporaryDirectoryInGuestResponse struct { + Returnval string `xml:"returnval"` +} + +type CreateTemporaryFileInGuest CreateTemporaryFileInGuestRequestType + +func init() { + t["CreateTemporaryFileInGuest"] = reflect.TypeOf((*CreateTemporaryFileInGuest)(nil)).Elem() +} + +type CreateTemporaryFileInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Prefix string `xml:"prefix"` + Suffix string `xml:"suffix"` + DirectoryPath string `xml:"directoryPath,omitempty"` +} + +func init() { + t["CreateTemporaryFileInGuestRequestType"] = reflect.TypeOf((*CreateTemporaryFileInGuestRequestType)(nil)).Elem() +} + +type CreateTemporaryFileInGuestResponse struct { + Returnval string `xml:"returnval"` +} + +type CreateUser CreateUserRequestType + +func init() { + t["CreateUser"] = reflect.TypeOf((*CreateUser)(nil)).Elem() +} + +type CreateUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + User BaseHostAccountSpec `xml:"user,typeattr"` +} + +func init() { + t["CreateUserRequestType"] = reflect.TypeOf((*CreateUserRequestType)(nil)).Elem() +} + +type CreateUserResponse struct { +} + +type CreateVApp CreateVAppRequestType + +func init() { + t["CreateVApp"] = reflect.TypeOf((*CreateVApp)(nil)).Elem() +} + +type CreateVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + ResSpec ResourceConfigSpec `xml:"resSpec"` + ConfigSpec VAppConfigSpec `xml:"configSpec"` + VmFolder *ManagedObjectReference `xml:"vmFolder,omitempty"` +} + +func init() { + t["CreateVAppRequestType"] = reflect.TypeOf((*CreateVAppRequestType)(nil)).Elem() +} + +type CreateVAppResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config VirtualMachineConfigSpec `xml:"config"` + Pool ManagedObjectReference `xml:"pool"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["CreateVMRequestType"] = reflect.TypeOf((*CreateVMRequestType)(nil)).Elem() +} + +type CreateVM_Task CreateVMRequestType + +func init() { + t["CreateVM_Task"] = reflect.TypeOf((*CreateVM_Task)(nil)).Elem() +} + +type CreateVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Spec BaseVirtualDiskSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateVirtualDiskRequestType"] = reflect.TypeOf((*CreateVirtualDiskRequestType)(nil)).Elem() +} + +type CreateVirtualDisk_Task CreateVirtualDiskRequestType + +func init() { + t["CreateVirtualDisk_Task"] = reflect.TypeOf((*CreateVirtualDisk_Task)(nil)).Elem() +} + +type CreateVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateVmfsDatastore CreateVmfsDatastoreRequestType + +func init() { + t["CreateVmfsDatastore"] = reflect.TypeOf((*CreateVmfsDatastore)(nil)).Elem() +} + +type CreateVmfsDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VmfsDatastoreCreateSpec `xml:"spec"` +} + +func init() { + t["CreateVmfsDatastoreRequestType"] = reflect.TypeOf((*CreateVmfsDatastoreRequestType)(nil)).Elem() +} + +type CreateVmfsDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateVvolDatastore CreateVvolDatastoreRequestType + +func init() { + t["CreateVvolDatastore"] = reflect.TypeOf((*CreateVvolDatastore)(nil)).Elem() +} + +type CreateVvolDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostDatastoreSystemVvolDatastoreSpec `xml:"spec"` +} + +func init() { + t["CreateVvolDatastoreRequestType"] = reflect.TypeOf((*CreateVvolDatastoreRequestType)(nil)).Elem() +} + +type CreateVvolDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CryptoKeyId struct { + DynamicData + + KeyId string `xml:"keyId"` + ProviderId *KeyProviderId `xml:"providerId,omitempty"` +} + +func init() { + t["CryptoKeyId"] = reflect.TypeOf((*CryptoKeyId)(nil)).Elem() +} + +type CryptoKeyPlain struct { + DynamicData + + KeyId CryptoKeyId `xml:"keyId"` + Algorithm string `xml:"algorithm"` + KeyData string `xml:"keyData"` +} + +func init() { + t["CryptoKeyPlain"] = reflect.TypeOf((*CryptoKeyPlain)(nil)).Elem() +} + +type CryptoKeyResult struct { + DynamicData + + KeyId CryptoKeyId `xml:"keyId"` + Success bool `xml:"success"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["CryptoKeyResult"] = reflect.TypeOf((*CryptoKeyResult)(nil)).Elem() +} + +type CryptoManagerHostEnable CryptoManagerHostEnableRequestType + +func init() { + t["CryptoManagerHostEnable"] = reflect.TypeOf((*CryptoManagerHostEnable)(nil)).Elem() +} + +type CryptoManagerHostEnableRequestType struct { + This ManagedObjectReference `xml:"_this"` + InitialKey CryptoKeyPlain `xml:"initialKey"` +} + +func init() { + t["CryptoManagerHostEnableRequestType"] = reflect.TypeOf((*CryptoManagerHostEnableRequestType)(nil)).Elem() +} + +type CryptoManagerHostEnableResponse struct { +} + +type CryptoManagerHostPrepare CryptoManagerHostPrepareRequestType + +func init() { + t["CryptoManagerHostPrepare"] = reflect.TypeOf((*CryptoManagerHostPrepare)(nil)).Elem() +} + +type CryptoManagerHostPrepareRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CryptoManagerHostPrepareRequestType"] = reflect.TypeOf((*CryptoManagerHostPrepareRequestType)(nil)).Elem() +} + +type CryptoManagerHostPrepareResponse struct { +} + +type CryptoManagerKmipCertificateInfo struct { + DynamicData + + Subject string `xml:"subject"` + Issuer string `xml:"issuer"` + SerialNumber string `xml:"serialNumber"` + NotBefore time.Time `xml:"notBefore"` + NotAfter time.Time `xml:"notAfter"` + Fingerprint string `xml:"fingerprint"` + CheckTime time.Time `xml:"checkTime"` + SecondsSinceValid int32 `xml:"secondsSinceValid,omitempty"` + SecondsBeforeExpire int32 `xml:"secondsBeforeExpire,omitempty"` +} + +func init() { + t["CryptoManagerKmipCertificateInfo"] = reflect.TypeOf((*CryptoManagerKmipCertificateInfo)(nil)).Elem() +} + +type CryptoManagerKmipClusterStatus struct { + DynamicData + + ClusterId KeyProviderId `xml:"clusterId"` + Servers []CryptoManagerKmipServerStatus `xml:"servers"` + ClientCertInfo *CryptoManagerKmipCertificateInfo `xml:"clientCertInfo,omitempty"` +} + +func init() { + t["CryptoManagerKmipClusterStatus"] = reflect.TypeOf((*CryptoManagerKmipClusterStatus)(nil)).Elem() +} + +type CryptoManagerKmipServerCertInfo struct { + DynamicData + + Certificate string `xml:"certificate"` + CertInfo *CryptoManagerKmipCertificateInfo `xml:"certInfo,omitempty"` + ClientTrustServer *bool `xml:"clientTrustServer"` +} + +func init() { + t["CryptoManagerKmipServerCertInfo"] = reflect.TypeOf((*CryptoManagerKmipServerCertInfo)(nil)).Elem() +} + +type CryptoManagerKmipServerStatus struct { + DynamicData + + Name string `xml:"name"` + Status ManagedEntityStatus `xml:"status"` + ConnectionStatus string `xml:"connectionStatus"` + CertInfo *CryptoManagerKmipCertificateInfo `xml:"certInfo,omitempty"` + ClientTrustServer *bool `xml:"clientTrustServer"` + ServerTrustClient *bool `xml:"serverTrustClient"` +} + +func init() { + t["CryptoManagerKmipServerStatus"] = reflect.TypeOf((*CryptoManagerKmipServerStatus)(nil)).Elem() +} + +type CryptoSpec struct { + DynamicData +} + +func init() { + t["CryptoSpec"] = reflect.TypeOf((*CryptoSpec)(nil)).Elem() +} + +type CryptoSpecDecrypt struct { + CryptoSpec +} + +func init() { + t["CryptoSpecDecrypt"] = reflect.TypeOf((*CryptoSpecDecrypt)(nil)).Elem() +} + +type CryptoSpecDeepRecrypt struct { + CryptoSpec + + NewKeyId CryptoKeyId `xml:"newKeyId"` +} + +func init() { + t["CryptoSpecDeepRecrypt"] = reflect.TypeOf((*CryptoSpecDeepRecrypt)(nil)).Elem() +} + +type CryptoSpecEncrypt struct { + CryptoSpec + + CryptoKeyId CryptoKeyId `xml:"cryptoKeyId"` +} + +func init() { + t["CryptoSpecEncrypt"] = reflect.TypeOf((*CryptoSpecEncrypt)(nil)).Elem() +} + +type CryptoSpecNoOp struct { + CryptoSpec +} + +func init() { + t["CryptoSpecNoOp"] = reflect.TypeOf((*CryptoSpecNoOp)(nil)).Elem() +} + +type CryptoSpecRegister struct { + CryptoSpecNoOp + + CryptoKeyId CryptoKeyId `xml:"cryptoKeyId"` +} + +func init() { + t["CryptoSpecRegister"] = reflect.TypeOf((*CryptoSpecRegister)(nil)).Elem() +} + +type CryptoSpecShallowRecrypt struct { + CryptoSpec + + NewKeyId CryptoKeyId `xml:"newKeyId"` +} + +func init() { + t["CryptoSpecShallowRecrypt"] = reflect.TypeOf((*CryptoSpecShallowRecrypt)(nil)).Elem() +} + +type CryptoUnlockRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CryptoUnlockRequestType"] = reflect.TypeOf((*CryptoUnlockRequestType)(nil)).Elem() +} + +type CryptoUnlock_Task CryptoUnlockRequestType + +func init() { + t["CryptoUnlock_Task"] = reflect.TypeOf((*CryptoUnlock_Task)(nil)).Elem() +} + +type CryptoUnlock_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CurrentTime CurrentTimeRequestType + +func init() { + t["CurrentTime"] = reflect.TypeOf((*CurrentTime)(nil)).Elem() +} + +type CurrentTimeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CurrentTimeRequestType"] = reflect.TypeOf((*CurrentTimeRequestType)(nil)).Elem() +} + +type CurrentTimeResponse struct { + Returnval time.Time `xml:"returnval"` +} + +type CustomFieldDef struct { + DynamicData + + Key int32 `xml:"key"` + Name string `xml:"name"` + Type string `xml:"type"` + ManagedObjectType string `xml:"managedObjectType,omitempty"` + FieldDefPrivileges *PrivilegePolicyDef `xml:"fieldDefPrivileges,omitempty"` + FieldInstancePrivileges *PrivilegePolicyDef `xml:"fieldInstancePrivileges,omitempty"` +} + +func init() { + t["CustomFieldDef"] = reflect.TypeOf((*CustomFieldDef)(nil)).Elem() +} + +type CustomFieldDefAddedEvent struct { + CustomFieldDefEvent +} + +func init() { + t["CustomFieldDefAddedEvent"] = reflect.TypeOf((*CustomFieldDefAddedEvent)(nil)).Elem() +} + +type CustomFieldDefEvent struct { + CustomFieldEvent + + FieldKey int32 `xml:"fieldKey"` + Name string `xml:"name"` +} + +func init() { + t["CustomFieldDefEvent"] = reflect.TypeOf((*CustomFieldDefEvent)(nil)).Elem() +} + +type CustomFieldDefRemovedEvent struct { + CustomFieldDefEvent +} + +func init() { + t["CustomFieldDefRemovedEvent"] = reflect.TypeOf((*CustomFieldDefRemovedEvent)(nil)).Elem() +} + +type CustomFieldDefRenamedEvent struct { + CustomFieldDefEvent + + NewName string `xml:"newName"` +} + +func init() { + t["CustomFieldDefRenamedEvent"] = reflect.TypeOf((*CustomFieldDefRenamedEvent)(nil)).Elem() +} + +type CustomFieldEvent struct { + Event +} + +func init() { + t["CustomFieldEvent"] = reflect.TypeOf((*CustomFieldEvent)(nil)).Elem() +} + +type CustomFieldStringValue struct { + CustomFieldValue + + Value string `xml:"value"` +} + +func init() { + t["CustomFieldStringValue"] = reflect.TypeOf((*CustomFieldStringValue)(nil)).Elem() +} + +type CustomFieldValue struct { + DynamicData + + Key int32 `xml:"key"` +} + +func init() { + t["CustomFieldValue"] = reflect.TypeOf((*CustomFieldValue)(nil)).Elem() +} + +type CustomFieldValueChangedEvent struct { + CustomFieldEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + FieldKey int32 `xml:"fieldKey"` + Name string `xml:"name"` + Value string `xml:"value"` + PrevState string `xml:"prevState,omitempty"` +} + +func init() { + t["CustomFieldValueChangedEvent"] = reflect.TypeOf((*CustomFieldValueChangedEvent)(nil)).Elem() +} + +type CustomizationAdapterMapping struct { + DynamicData + + MacAddress string `xml:"macAddress,omitempty"` + Adapter CustomizationIPSettings `xml:"adapter"` +} + +func init() { + t["CustomizationAdapterMapping"] = reflect.TypeOf((*CustomizationAdapterMapping)(nil)).Elem() +} + +type CustomizationAutoIpV6Generator struct { + CustomizationIpV6Generator +} + +func init() { + t["CustomizationAutoIpV6Generator"] = reflect.TypeOf((*CustomizationAutoIpV6Generator)(nil)).Elem() +} + +type CustomizationCustomIpGenerator struct { + CustomizationIpGenerator + + Argument string `xml:"argument,omitempty"` +} + +func init() { + t["CustomizationCustomIpGenerator"] = reflect.TypeOf((*CustomizationCustomIpGenerator)(nil)).Elem() +} + +type CustomizationCustomIpV6Generator struct { + CustomizationIpV6Generator + + Argument string `xml:"argument,omitempty"` +} + +func init() { + t["CustomizationCustomIpV6Generator"] = reflect.TypeOf((*CustomizationCustomIpV6Generator)(nil)).Elem() +} + +type CustomizationCustomName struct { + CustomizationName + + Argument string `xml:"argument,omitempty"` +} + +func init() { + t["CustomizationCustomName"] = reflect.TypeOf((*CustomizationCustomName)(nil)).Elem() +} + +type CustomizationDhcpIpGenerator struct { + CustomizationIpGenerator +} + +func init() { + t["CustomizationDhcpIpGenerator"] = reflect.TypeOf((*CustomizationDhcpIpGenerator)(nil)).Elem() +} + +type CustomizationDhcpIpV6Generator struct { + CustomizationIpV6Generator +} + +func init() { + t["CustomizationDhcpIpV6Generator"] = reflect.TypeOf((*CustomizationDhcpIpV6Generator)(nil)).Elem() +} + +type CustomizationEvent struct { + VmEvent + + LogLocation string `xml:"logLocation,omitempty"` +} + +func init() { + t["CustomizationEvent"] = reflect.TypeOf((*CustomizationEvent)(nil)).Elem() +} + +type CustomizationFailed struct { + CustomizationEvent +} + +func init() { + t["CustomizationFailed"] = reflect.TypeOf((*CustomizationFailed)(nil)).Elem() +} + +type CustomizationFault struct { + VimFault +} + +func init() { + t["CustomizationFault"] = reflect.TypeOf((*CustomizationFault)(nil)).Elem() +} + +type CustomizationFaultFault BaseCustomizationFault + +func init() { + t["CustomizationFaultFault"] = reflect.TypeOf((*CustomizationFaultFault)(nil)).Elem() +} + +type CustomizationFixedIp struct { + CustomizationIpGenerator + + IpAddress string `xml:"ipAddress"` +} + +func init() { + t["CustomizationFixedIp"] = reflect.TypeOf((*CustomizationFixedIp)(nil)).Elem() +} + +type CustomizationFixedIpV6 struct { + CustomizationIpV6Generator + + IpAddress string `xml:"ipAddress"` + SubnetMask int32 `xml:"subnetMask"` +} + +func init() { + t["CustomizationFixedIpV6"] = reflect.TypeOf((*CustomizationFixedIpV6)(nil)).Elem() +} + +type CustomizationFixedName struct { + CustomizationName + + Name string `xml:"name"` +} + +func init() { + t["CustomizationFixedName"] = reflect.TypeOf((*CustomizationFixedName)(nil)).Elem() +} + +type CustomizationGlobalIPSettings struct { + DynamicData + + DnsSuffixList []string `xml:"dnsSuffixList,omitempty"` + DnsServerList []string `xml:"dnsServerList,omitempty"` +} + +func init() { + t["CustomizationGlobalIPSettings"] = reflect.TypeOf((*CustomizationGlobalIPSettings)(nil)).Elem() +} + +type CustomizationGuiRunOnce struct { + DynamicData + + CommandList []string `xml:"commandList"` +} + +func init() { + t["CustomizationGuiRunOnce"] = reflect.TypeOf((*CustomizationGuiRunOnce)(nil)).Elem() +} + +type CustomizationGuiUnattended struct { + DynamicData + + Password *CustomizationPassword `xml:"password,omitempty"` + TimeZone int32 `xml:"timeZone"` + AutoLogon bool `xml:"autoLogon"` + AutoLogonCount int32 `xml:"autoLogonCount"` +} + +func init() { + t["CustomizationGuiUnattended"] = reflect.TypeOf((*CustomizationGuiUnattended)(nil)).Elem() +} + +type CustomizationIPSettings struct { + DynamicData + + Ip BaseCustomizationIpGenerator `xml:"ip,typeattr"` + SubnetMask string `xml:"subnetMask,omitempty"` + Gateway []string `xml:"gateway,omitempty"` + IpV6Spec *CustomizationIPSettingsIpV6AddressSpec `xml:"ipV6Spec,omitempty"` + DnsServerList []string `xml:"dnsServerList,omitempty"` + DnsDomain string `xml:"dnsDomain,omitempty"` + PrimaryWINS string `xml:"primaryWINS,omitempty"` + SecondaryWINS string `xml:"secondaryWINS,omitempty"` + NetBIOS CustomizationNetBIOSMode `xml:"netBIOS,omitempty"` +} + +func init() { + t["CustomizationIPSettings"] = reflect.TypeOf((*CustomizationIPSettings)(nil)).Elem() +} + +type CustomizationIPSettingsIpV6AddressSpec struct { + DynamicData + + Ip []BaseCustomizationIpV6Generator `xml:"ip,typeattr"` + Gateway []string `xml:"gateway,omitempty"` +} + +func init() { + t["CustomizationIPSettingsIpV6AddressSpec"] = reflect.TypeOf((*CustomizationIPSettingsIpV6AddressSpec)(nil)).Elem() +} + +type CustomizationIdentification struct { + DynamicData + + JoinWorkgroup string `xml:"joinWorkgroup,omitempty"` + JoinDomain string `xml:"joinDomain,omitempty"` + DomainAdmin string `xml:"domainAdmin,omitempty"` + DomainAdminPassword *CustomizationPassword `xml:"domainAdminPassword,omitempty"` +} + +func init() { + t["CustomizationIdentification"] = reflect.TypeOf((*CustomizationIdentification)(nil)).Elem() +} + +type CustomizationIdentitySettings struct { + DynamicData +} + +func init() { + t["CustomizationIdentitySettings"] = reflect.TypeOf((*CustomizationIdentitySettings)(nil)).Elem() +} + +type CustomizationIpGenerator struct { + DynamicData +} + +func init() { + t["CustomizationIpGenerator"] = reflect.TypeOf((*CustomizationIpGenerator)(nil)).Elem() +} + +type CustomizationIpV6Generator struct { + DynamicData +} + +func init() { + t["CustomizationIpV6Generator"] = reflect.TypeOf((*CustomizationIpV6Generator)(nil)).Elem() +} + +type CustomizationLicenseFilePrintData struct { + DynamicData + + AutoMode CustomizationLicenseDataMode `xml:"autoMode"` + AutoUsers int32 `xml:"autoUsers,omitempty"` +} + +func init() { + t["CustomizationLicenseFilePrintData"] = reflect.TypeOf((*CustomizationLicenseFilePrintData)(nil)).Elem() +} + +type CustomizationLinuxIdentityFailed struct { + CustomizationFailed +} + +func init() { + t["CustomizationLinuxIdentityFailed"] = reflect.TypeOf((*CustomizationLinuxIdentityFailed)(nil)).Elem() +} + +type CustomizationLinuxOptions struct { + CustomizationOptions +} + +func init() { + t["CustomizationLinuxOptions"] = reflect.TypeOf((*CustomizationLinuxOptions)(nil)).Elem() +} + +type CustomizationLinuxPrep struct { + CustomizationIdentitySettings + + HostName BaseCustomizationName `xml:"hostName,typeattr"` + Domain string `xml:"domain"` + TimeZone string `xml:"timeZone,omitempty"` + HwClockUTC *bool `xml:"hwClockUTC"` +} + +func init() { + t["CustomizationLinuxPrep"] = reflect.TypeOf((*CustomizationLinuxPrep)(nil)).Elem() +} + +type CustomizationName struct { + DynamicData +} + +func init() { + t["CustomizationName"] = reflect.TypeOf((*CustomizationName)(nil)).Elem() +} + +type CustomizationNetworkSetupFailed struct { + CustomizationFailed +} + +func init() { + t["CustomizationNetworkSetupFailed"] = reflect.TypeOf((*CustomizationNetworkSetupFailed)(nil)).Elem() +} + +type CustomizationOptions struct { + DynamicData +} + +func init() { + t["CustomizationOptions"] = reflect.TypeOf((*CustomizationOptions)(nil)).Elem() +} + +type CustomizationPassword struct { + DynamicData + + Value string `xml:"value"` + PlainText bool `xml:"plainText"` +} + +func init() { + t["CustomizationPassword"] = reflect.TypeOf((*CustomizationPassword)(nil)).Elem() +} + +type CustomizationPending struct { + CustomizationFault +} + +func init() { + t["CustomizationPending"] = reflect.TypeOf((*CustomizationPending)(nil)).Elem() +} + +type CustomizationPendingFault CustomizationPending + +func init() { + t["CustomizationPendingFault"] = reflect.TypeOf((*CustomizationPendingFault)(nil)).Elem() +} + +type CustomizationPrefixName struct { + CustomizationName + + Base string `xml:"base"` +} + +func init() { + t["CustomizationPrefixName"] = reflect.TypeOf((*CustomizationPrefixName)(nil)).Elem() +} + +type CustomizationSpec struct { + DynamicData + + Options BaseCustomizationOptions `xml:"options,omitempty,typeattr"` + Identity BaseCustomizationIdentitySettings `xml:"identity,typeattr"` + GlobalIPSettings CustomizationGlobalIPSettings `xml:"globalIPSettings"` + NicSettingMap []CustomizationAdapterMapping `xml:"nicSettingMap,omitempty"` + EncryptionKey []byte `xml:"encryptionKey,omitempty"` +} + +func init() { + t["CustomizationSpec"] = reflect.TypeOf((*CustomizationSpec)(nil)).Elem() +} + +type CustomizationSpecInfo struct { + DynamicData + + Name string `xml:"name"` + Description string `xml:"description"` + Type string `xml:"type"` + ChangeVersion string `xml:"changeVersion,omitempty"` + LastUpdateTime *time.Time `xml:"lastUpdateTime"` +} + +func init() { + t["CustomizationSpecInfo"] = reflect.TypeOf((*CustomizationSpecInfo)(nil)).Elem() +} + +type CustomizationSpecItem struct { + DynamicData + + Info CustomizationSpecInfo `xml:"info"` + Spec CustomizationSpec `xml:"spec"` +} + +func init() { + t["CustomizationSpecItem"] = reflect.TypeOf((*CustomizationSpecItem)(nil)).Elem() +} + +type CustomizationSpecItemToXml CustomizationSpecItemToXmlRequestType + +func init() { + t["CustomizationSpecItemToXml"] = reflect.TypeOf((*CustomizationSpecItemToXml)(nil)).Elem() +} + +type CustomizationSpecItemToXmlRequestType struct { + This ManagedObjectReference `xml:"_this"` + Item CustomizationSpecItem `xml:"item"` +} + +func init() { + t["CustomizationSpecItemToXmlRequestType"] = reflect.TypeOf((*CustomizationSpecItemToXmlRequestType)(nil)).Elem() +} + +type CustomizationSpecItemToXmlResponse struct { + Returnval string `xml:"returnval"` +} + +type CustomizationStartedEvent struct { + CustomizationEvent +} + +func init() { + t["CustomizationStartedEvent"] = reflect.TypeOf((*CustomizationStartedEvent)(nil)).Elem() +} + +type CustomizationStatelessIpV6Generator struct { + CustomizationIpV6Generator +} + +func init() { + t["CustomizationStatelessIpV6Generator"] = reflect.TypeOf((*CustomizationStatelessIpV6Generator)(nil)).Elem() +} + +type CustomizationSucceeded struct { + CustomizationEvent +} + +func init() { + t["CustomizationSucceeded"] = reflect.TypeOf((*CustomizationSucceeded)(nil)).Elem() +} + +type CustomizationSysprep struct { + CustomizationIdentitySettings + + GuiUnattended CustomizationGuiUnattended `xml:"guiUnattended"` + UserData CustomizationUserData `xml:"userData"` + GuiRunOnce *CustomizationGuiRunOnce `xml:"guiRunOnce,omitempty"` + Identification CustomizationIdentification `xml:"identification"` + LicenseFilePrintData *CustomizationLicenseFilePrintData `xml:"licenseFilePrintData,omitempty"` +} + +func init() { + t["CustomizationSysprep"] = reflect.TypeOf((*CustomizationSysprep)(nil)).Elem() +} + +type CustomizationSysprepFailed struct { + CustomizationFailed + + SysprepVersion string `xml:"sysprepVersion"` + SystemVersion string `xml:"systemVersion"` +} + +func init() { + t["CustomizationSysprepFailed"] = reflect.TypeOf((*CustomizationSysprepFailed)(nil)).Elem() +} + +type CustomizationSysprepText struct { + CustomizationIdentitySettings + + Value string `xml:"value"` +} + +func init() { + t["CustomizationSysprepText"] = reflect.TypeOf((*CustomizationSysprepText)(nil)).Elem() +} + +type CustomizationUnknownFailure struct { + CustomizationFailed +} + +func init() { + t["CustomizationUnknownFailure"] = reflect.TypeOf((*CustomizationUnknownFailure)(nil)).Elem() +} + +type CustomizationUnknownIpGenerator struct { + CustomizationIpGenerator +} + +func init() { + t["CustomizationUnknownIpGenerator"] = reflect.TypeOf((*CustomizationUnknownIpGenerator)(nil)).Elem() +} + +type CustomizationUnknownIpV6Generator struct { + CustomizationIpV6Generator +} + +func init() { + t["CustomizationUnknownIpV6Generator"] = reflect.TypeOf((*CustomizationUnknownIpV6Generator)(nil)).Elem() +} + +type CustomizationUnknownName struct { + CustomizationName +} + +func init() { + t["CustomizationUnknownName"] = reflect.TypeOf((*CustomizationUnknownName)(nil)).Elem() +} + +type CustomizationUserData struct { + DynamicData + + FullName string `xml:"fullName"` + OrgName string `xml:"orgName"` + ComputerName BaseCustomizationName `xml:"computerName,typeattr"` + ProductId string `xml:"productId"` +} + +func init() { + t["CustomizationUserData"] = reflect.TypeOf((*CustomizationUserData)(nil)).Elem() +} + +type CustomizationVirtualMachineName struct { + CustomizationName +} + +func init() { + t["CustomizationVirtualMachineName"] = reflect.TypeOf((*CustomizationVirtualMachineName)(nil)).Elem() +} + +type CustomizationWinOptions struct { + CustomizationOptions + + ChangeSID bool `xml:"changeSID"` + DeleteAccounts bool `xml:"deleteAccounts"` + Reboot CustomizationSysprepRebootOption `xml:"reboot,omitempty"` +} + +func init() { + t["CustomizationWinOptions"] = reflect.TypeOf((*CustomizationWinOptions)(nil)).Elem() +} + +type CustomizeVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec CustomizationSpec `xml:"spec"` +} + +func init() { + t["CustomizeVMRequestType"] = reflect.TypeOf((*CustomizeVMRequestType)(nil)).Elem() +} + +type CustomizeVM_Task CustomizeVMRequestType + +func init() { + t["CustomizeVM_Task"] = reflect.TypeOf((*CustomizeVM_Task)(nil)).Elem() +} + +type CustomizeVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVPortConfigInfo struct { + DynamicData + + Name string `xml:"name,omitempty"` + Scope []ManagedObjectReference `xml:"scope,omitempty"` + Description string `xml:"description,omitempty"` + Setting BaseDVPortSetting `xml:"setting,omitempty,typeattr"` + ConfigVersion string `xml:"configVersion"` +} + +func init() { + t["DVPortConfigInfo"] = reflect.TypeOf((*DVPortConfigInfo)(nil)).Elem() +} + +type DVPortConfigSpec struct { + DynamicData + + Operation string `xml:"operation"` + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + Scope []ManagedObjectReference `xml:"scope,omitempty"` + Description string `xml:"description,omitempty"` + Setting BaseDVPortSetting `xml:"setting,omitempty,typeattr"` + ConfigVersion string `xml:"configVersion,omitempty"` +} + +func init() { + t["DVPortConfigSpec"] = reflect.TypeOf((*DVPortConfigSpec)(nil)).Elem() +} + +type DVPortNotSupported struct { + DeviceBackingNotSupported +} + +func init() { + t["DVPortNotSupported"] = reflect.TypeOf((*DVPortNotSupported)(nil)).Elem() +} + +type DVPortNotSupportedFault DVPortNotSupported + +func init() { + t["DVPortNotSupportedFault"] = reflect.TypeOf((*DVPortNotSupportedFault)(nil)).Elem() +} + +type DVPortSetting struct { + DynamicData + + Blocked *BoolPolicy `xml:"blocked,omitempty"` + VmDirectPathGen2Allowed *BoolPolicy `xml:"vmDirectPathGen2Allowed,omitempty"` + InShapingPolicy *DVSTrafficShapingPolicy `xml:"inShapingPolicy,omitempty"` + OutShapingPolicy *DVSTrafficShapingPolicy `xml:"outShapingPolicy,omitempty"` + VendorSpecificConfig *DVSVendorSpecificConfig `xml:"vendorSpecificConfig,omitempty"` + NetworkResourcePoolKey *StringPolicy `xml:"networkResourcePoolKey,omitempty"` + FilterPolicy *DvsFilterPolicy `xml:"filterPolicy,omitempty"` +} + +func init() { + t["DVPortSetting"] = reflect.TypeOf((*DVPortSetting)(nil)).Elem() +} + +type DVPortState struct { + DynamicData + + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` + Stats DistributedVirtualSwitchPortStatistics `xml:"stats"` + VendorSpecificState []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificState,omitempty"` +} + +func init() { + t["DVPortState"] = reflect.TypeOf((*DVPortState)(nil)).Elem() +} + +type DVPortStatus struct { + DynamicData + + LinkUp bool `xml:"linkUp"` + Blocked bool `xml:"blocked"` + VlanIds []NumericRange `xml:"vlanIds,omitempty"` + TrunkingMode *bool `xml:"trunkingMode"` + Mtu int32 `xml:"mtu,omitempty"` + LinkPeer string `xml:"linkPeer,omitempty"` + MacAddress string `xml:"macAddress,omitempty"` + StatusDetail string `xml:"statusDetail,omitempty"` + VmDirectPathGen2Active *bool `xml:"vmDirectPathGen2Active"` + VmDirectPathGen2InactiveReasonNetwork []string `xml:"vmDirectPathGen2InactiveReasonNetwork,omitempty"` + VmDirectPathGen2InactiveReasonOther []string `xml:"vmDirectPathGen2InactiveReasonOther,omitempty"` + VmDirectPathGen2InactiveReasonExtended string `xml:"vmDirectPathGen2InactiveReasonExtended,omitempty"` +} + +func init() { + t["DVPortStatus"] = reflect.TypeOf((*DVPortStatus)(nil)).Elem() +} + +type DVPortgroupConfigInfo struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + NumPorts int32 `xml:"numPorts"` + DistributedVirtualSwitch *ManagedObjectReference `xml:"distributedVirtualSwitch,omitempty"` + DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` + Description string `xml:"description,omitempty"` + Type string `xml:"type"` + Policy BaseDVPortgroupPolicy `xml:"policy,typeattr"` + PortNameFormat string `xml:"portNameFormat,omitempty"` + Scope []ManagedObjectReference `xml:"scope,omitempty"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + ConfigVersion string `xml:"configVersion,omitempty"` + AutoExpand *bool `xml:"autoExpand"` + VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty"` + Uplink *bool `xml:"uplink"` +} + +func init() { + t["DVPortgroupConfigInfo"] = reflect.TypeOf((*DVPortgroupConfigInfo)(nil)).Elem() +} + +type DVPortgroupConfigSpec struct { + DynamicData + + ConfigVersion string `xml:"configVersion,omitempty"` + Name string `xml:"name,omitempty"` + NumPorts int32 `xml:"numPorts,omitempty"` + PortNameFormat string `xml:"portNameFormat,omitempty"` + DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` + Description string `xml:"description,omitempty"` + Type string `xml:"type,omitempty"` + Scope []ManagedObjectReference `xml:"scope,omitempty"` + Policy BaseDVPortgroupPolicy `xml:"policy,omitempty,typeattr"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + AutoExpand *bool `xml:"autoExpand"` + VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty"` +} + +func init() { + t["DVPortgroupConfigSpec"] = reflect.TypeOf((*DVPortgroupConfigSpec)(nil)).Elem() +} + +type DVPortgroupCreatedEvent struct { + DVPortgroupEvent +} + +func init() { + t["DVPortgroupCreatedEvent"] = reflect.TypeOf((*DVPortgroupCreatedEvent)(nil)).Elem() +} + +type DVPortgroupDestroyedEvent struct { + DVPortgroupEvent +} + +func init() { + t["DVPortgroupDestroyedEvent"] = reflect.TypeOf((*DVPortgroupDestroyedEvent)(nil)).Elem() +} + +type DVPortgroupEvent struct { + Event +} + +func init() { + t["DVPortgroupEvent"] = reflect.TypeOf((*DVPortgroupEvent)(nil)).Elem() +} + +type DVPortgroupPolicy struct { + DynamicData + + BlockOverrideAllowed bool `xml:"blockOverrideAllowed"` + ShapingOverrideAllowed bool `xml:"shapingOverrideAllowed"` + VendorConfigOverrideAllowed bool `xml:"vendorConfigOverrideAllowed"` + LivePortMovingAllowed bool `xml:"livePortMovingAllowed"` + PortConfigResetAtDisconnect bool `xml:"portConfigResetAtDisconnect"` + NetworkResourcePoolOverrideAllowed *bool `xml:"networkResourcePoolOverrideAllowed"` + TrafficFilterOverrideAllowed *bool `xml:"trafficFilterOverrideAllowed"` +} + +func init() { + t["DVPortgroupPolicy"] = reflect.TypeOf((*DVPortgroupPolicy)(nil)).Elem() +} + +type DVPortgroupReconfiguredEvent struct { + DVPortgroupEvent + + ConfigSpec DVPortgroupConfigSpec `xml:"configSpec"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty"` +} + +func init() { + t["DVPortgroupReconfiguredEvent"] = reflect.TypeOf((*DVPortgroupReconfiguredEvent)(nil)).Elem() +} + +type DVPortgroupRenamedEvent struct { + DVPortgroupEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DVPortgroupRenamedEvent"] = reflect.TypeOf((*DVPortgroupRenamedEvent)(nil)).Elem() +} + +type DVPortgroupRollbackRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityBackup *EntityBackupConfig `xml:"entityBackup,omitempty"` +} + +func init() { + t["DVPortgroupRollbackRequestType"] = reflect.TypeOf((*DVPortgroupRollbackRequestType)(nil)).Elem() +} + +type DVPortgroupRollback_Task DVPortgroupRollbackRequestType + +func init() { + t["DVPortgroupRollback_Task"] = reflect.TypeOf((*DVPortgroupRollback_Task)(nil)).Elem() +} + +type DVPortgroupRollback_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVPortgroupSelection struct { + SelectionSet + + DvsUuid string `xml:"dvsUuid"` + PortgroupKey []string `xml:"portgroupKey"` +} + +func init() { + t["DVPortgroupSelection"] = reflect.TypeOf((*DVPortgroupSelection)(nil)).Elem() +} + +type DVSBackupRestoreCapability struct { + DynamicData + + BackupRestoreSupported bool `xml:"backupRestoreSupported"` +} + +func init() { + t["DVSBackupRestoreCapability"] = reflect.TypeOf((*DVSBackupRestoreCapability)(nil)).Elem() +} + +type DVSCapability struct { + DynamicData + + DvsOperationSupported *bool `xml:"dvsOperationSupported"` + DvPortGroupOperationSupported *bool `xml:"dvPortGroupOperationSupported"` + DvPortOperationSupported *bool `xml:"dvPortOperationSupported"` + CompatibleHostComponentProductInfo []DistributedVirtualSwitchHostProductSpec `xml:"compatibleHostComponentProductInfo,omitempty"` + FeaturesSupported BaseDVSFeatureCapability `xml:"featuresSupported,omitempty,typeattr"` +} + +func init() { + t["DVSCapability"] = reflect.TypeOf((*DVSCapability)(nil)).Elem() +} + +type DVSConfigInfo struct { + DynamicData + + Uuid string `xml:"uuid"` + Name string `xml:"name"` + NumStandalonePorts int32 `xml:"numStandalonePorts"` + NumPorts int32 `xml:"numPorts"` + MaxPorts int32 `xml:"maxPorts"` + UplinkPortPolicy BaseDVSUplinkPortPolicy `xml:"uplinkPortPolicy,typeattr"` + UplinkPortgroup []ManagedObjectReference `xml:"uplinkPortgroup,omitempty"` + DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,typeattr"` + Host []DistributedVirtualSwitchHostMember `xml:"host,omitempty"` + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` + TargetInfo *DistributedVirtualSwitchProductSpec `xml:"targetInfo,omitempty"` + ExtensionKey string `xml:"extensionKey,omitempty"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + Policy *DVSPolicy `xml:"policy,omitempty"` + Description string `xml:"description,omitempty"` + ConfigVersion string `xml:"configVersion"` + Contact DVSContactInfo `xml:"contact"` + SwitchIpAddress string `xml:"switchIpAddress,omitempty"` + CreateTime time.Time `xml:"createTime"` + NetworkResourceManagementEnabled *bool `xml:"networkResourceManagementEnabled"` + DefaultProxySwitchMaxNumPorts int32 `xml:"defaultProxySwitchMaxNumPorts,omitempty"` + HealthCheckConfig []BaseDVSHealthCheckConfig `xml:"healthCheckConfig,omitempty,typeattr"` + InfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"infrastructureTrafficResourceConfig,omitempty"` + NetResourcePoolTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"netResourcePoolTrafficResourceConfig,omitempty"` + NetworkResourceControlVersion string `xml:"networkResourceControlVersion,omitempty"` + VmVnicNetworkResourcePool []DVSVmVnicNetworkResourcePool `xml:"vmVnicNetworkResourcePool,omitempty"` + PnicCapacityRatioForReservation int32 `xml:"pnicCapacityRatioForReservation,omitempty"` +} + +func init() { + t["DVSConfigInfo"] = reflect.TypeOf((*DVSConfigInfo)(nil)).Elem() +} + +type DVSConfigSpec struct { + DynamicData + + ConfigVersion string `xml:"configVersion,omitempty"` + Name string `xml:"name,omitempty"` + NumStandalonePorts int32 `xml:"numStandalonePorts,omitempty"` + MaxPorts int32 `xml:"maxPorts,omitempty"` + UplinkPortPolicy BaseDVSUplinkPortPolicy `xml:"uplinkPortPolicy,omitempty,typeattr"` + UplinkPortgroup []ManagedObjectReference `xml:"uplinkPortgroup,omitempty"` + DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` + Host []DistributedVirtualSwitchHostMemberConfigSpec `xml:"host,omitempty"` + ExtensionKey string `xml:"extensionKey,omitempty"` + Description string `xml:"description,omitempty"` + Policy *DVSPolicy `xml:"policy,omitempty"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + Contact *DVSContactInfo `xml:"contact,omitempty"` + SwitchIpAddress string `xml:"switchIpAddress,omitempty"` + DefaultProxySwitchMaxNumPorts int32 `xml:"defaultProxySwitchMaxNumPorts,omitempty"` + InfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"infrastructureTrafficResourceConfig,omitempty"` + NetResourcePoolTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"netResourcePoolTrafficResourceConfig,omitempty"` + NetworkResourceControlVersion string `xml:"networkResourceControlVersion,omitempty"` +} + +func init() { + t["DVSConfigSpec"] = reflect.TypeOf((*DVSConfigSpec)(nil)).Elem() +} + +type DVSContactInfo struct { + DynamicData + + Name string `xml:"name,omitempty"` + Contact string `xml:"contact,omitempty"` +} + +func init() { + t["DVSContactInfo"] = reflect.TypeOf((*DVSContactInfo)(nil)).Elem() +} + +type DVSCreateSpec struct { + DynamicData + + ConfigSpec BaseDVSConfigSpec `xml:"configSpec,typeattr"` + ProductInfo *DistributedVirtualSwitchProductSpec `xml:"productInfo,omitempty"` + Capability *DVSCapability `xml:"capability,omitempty"` +} + +func init() { + t["DVSCreateSpec"] = reflect.TypeOf((*DVSCreateSpec)(nil)).Elem() +} + +type DVSFailureCriteria struct { + InheritablePolicy + + CheckSpeed *StringPolicy `xml:"checkSpeed,omitempty"` + Speed *IntPolicy `xml:"speed,omitempty"` + CheckDuplex *BoolPolicy `xml:"checkDuplex,omitempty"` + FullDuplex *BoolPolicy `xml:"fullDuplex,omitempty"` + CheckErrorPercent *BoolPolicy `xml:"checkErrorPercent,omitempty"` + Percentage *IntPolicy `xml:"percentage,omitempty"` + CheckBeacon *BoolPolicy `xml:"checkBeacon,omitempty"` +} + +func init() { + t["DVSFailureCriteria"] = reflect.TypeOf((*DVSFailureCriteria)(nil)).Elem() +} + +type DVSFeatureCapability struct { + DynamicData + + NetworkResourceManagementSupported bool `xml:"networkResourceManagementSupported"` + VmDirectPathGen2Supported bool `xml:"vmDirectPathGen2Supported"` + NicTeamingPolicy []string `xml:"nicTeamingPolicy,omitempty"` + NetworkResourcePoolHighShareValue int32 `xml:"networkResourcePoolHighShareValue,omitempty"` + NetworkResourceManagementCapability *DVSNetworkResourceManagementCapability `xml:"networkResourceManagementCapability,omitempty"` + HealthCheckCapability BaseDVSHealthCheckCapability `xml:"healthCheckCapability,omitempty,typeattr"` + RollbackCapability *DVSRollbackCapability `xml:"rollbackCapability,omitempty"` + BackupRestoreCapability *DVSBackupRestoreCapability `xml:"backupRestoreCapability,omitempty"` + NetworkFilterSupported *bool `xml:"networkFilterSupported"` + MacLearningSupported *bool `xml:"macLearningSupported"` +} + +func init() { + t["DVSFeatureCapability"] = reflect.TypeOf((*DVSFeatureCapability)(nil)).Elem() +} + +type DVSHealthCheckCapability struct { + DynamicData +} + +func init() { + t["DVSHealthCheckCapability"] = reflect.TypeOf((*DVSHealthCheckCapability)(nil)).Elem() +} + +type DVSHealthCheckConfig struct { + DynamicData + + Enable *bool `xml:"enable"` + Interval int32 `xml:"interval,omitempty"` +} + +func init() { + t["DVSHealthCheckConfig"] = reflect.TypeOf((*DVSHealthCheckConfig)(nil)).Elem() +} + +type DVSHostLocalPortInfo struct { + DynamicData + + SwitchUuid string `xml:"switchUuid"` + PortKey string `xml:"portKey"` + Setting BaseDVPortSetting `xml:"setting,typeattr"` + Vnic string `xml:"vnic"` +} + +func init() { + t["DVSHostLocalPortInfo"] = reflect.TypeOf((*DVSHostLocalPortInfo)(nil)).Elem() +} + +type DVSMacLearningPolicy struct { + InheritablePolicy + + Enabled bool `xml:"enabled"` + AllowUnicastFlooding *bool `xml:"allowUnicastFlooding"` + Limit *int32 `xml:"limit"` + LimitPolicy string `xml:"limitPolicy,omitempty"` +} + +func init() { + t["DVSMacLearningPolicy"] = reflect.TypeOf((*DVSMacLearningPolicy)(nil)).Elem() +} + +type DVSMacManagementPolicy struct { + InheritablePolicy + + AllowPromiscuous *bool `xml:"allowPromiscuous"` + MacChanges *bool `xml:"macChanges"` + ForgedTransmits *bool `xml:"forgedTransmits"` + MacLearningPolicy *DVSMacLearningPolicy `xml:"macLearningPolicy,omitempty"` +} + +func init() { + t["DVSMacManagementPolicy"] = reflect.TypeOf((*DVSMacManagementPolicy)(nil)).Elem() +} + +type DVSManagerDvsConfigTarget struct { + DynamicData + + DistributedVirtualPortgroup []DistributedVirtualPortgroupInfo `xml:"distributedVirtualPortgroup,omitempty"` + DistributedVirtualSwitch []DistributedVirtualSwitchInfo `xml:"distributedVirtualSwitch,omitempty"` +} + +func init() { + t["DVSManagerDvsConfigTarget"] = reflect.TypeOf((*DVSManagerDvsConfigTarget)(nil)).Elem() +} + +type DVSManagerExportEntityRequestType struct { + This ManagedObjectReference `xml:"_this"` + SelectionSet []BaseSelectionSet `xml:"selectionSet,typeattr"` +} + +func init() { + t["DVSManagerExportEntityRequestType"] = reflect.TypeOf((*DVSManagerExportEntityRequestType)(nil)).Elem() +} + +type DVSManagerExportEntity_Task DVSManagerExportEntityRequestType + +func init() { + t["DVSManagerExportEntity_Task"] = reflect.TypeOf((*DVSManagerExportEntity_Task)(nil)).Elem() +} + +type DVSManagerExportEntity_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVSManagerImportEntityRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityBackup []EntityBackupConfig `xml:"entityBackup"` + ImportType string `xml:"importType"` +} + +func init() { + t["DVSManagerImportEntityRequestType"] = reflect.TypeOf((*DVSManagerImportEntityRequestType)(nil)).Elem() +} + +type DVSManagerImportEntity_Task DVSManagerImportEntityRequestType + +func init() { + t["DVSManagerImportEntity_Task"] = reflect.TypeOf((*DVSManagerImportEntity_Task)(nil)).Elem() +} + +type DVSManagerImportEntity_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVSManagerLookupDvPortGroup DVSManagerLookupDvPortGroupRequestType + +func init() { + t["DVSManagerLookupDvPortGroup"] = reflect.TypeOf((*DVSManagerLookupDvPortGroup)(nil)).Elem() +} + +type DVSManagerLookupDvPortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + SwitchUuid string `xml:"switchUuid"` + PortgroupKey string `xml:"portgroupKey"` +} + +func init() { + t["DVSManagerLookupDvPortGroupRequestType"] = reflect.TypeOf((*DVSManagerLookupDvPortGroupRequestType)(nil)).Elem() +} + +type DVSManagerLookupDvPortGroupResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type DVSNameArrayUplinkPortPolicy struct { + DVSUplinkPortPolicy + + UplinkPortName []string `xml:"uplinkPortName"` +} + +func init() { + t["DVSNameArrayUplinkPortPolicy"] = reflect.TypeOf((*DVSNameArrayUplinkPortPolicy)(nil)).Elem() +} + +type DVSNetworkResourceManagementCapability struct { + DynamicData + + NetworkResourceManagementSupported bool `xml:"networkResourceManagementSupported"` + NetworkResourcePoolHighShareValue int32 `xml:"networkResourcePoolHighShareValue"` + QosSupported bool `xml:"qosSupported"` + UserDefinedNetworkResourcePoolsSupported bool `xml:"userDefinedNetworkResourcePoolsSupported"` + NetworkResourceControlVersion3Supported *bool `xml:"networkResourceControlVersion3Supported"` + UserDefinedInfraTrafficPoolSupported *bool `xml:"userDefinedInfraTrafficPoolSupported"` +} + +func init() { + t["DVSNetworkResourceManagementCapability"] = reflect.TypeOf((*DVSNetworkResourceManagementCapability)(nil)).Elem() +} + +type DVSNetworkResourcePool struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` + ConfigVersion string `xml:"configVersion"` + AllocationInfo DVSNetworkResourcePoolAllocationInfo `xml:"allocationInfo"` +} + +func init() { + t["DVSNetworkResourcePool"] = reflect.TypeOf((*DVSNetworkResourcePool)(nil)).Elem() +} + +type DVSNetworkResourcePoolAllocationInfo struct { + DynamicData + + Limit *int64 `xml:"limit"` + Shares *SharesInfo `xml:"shares,omitempty"` + PriorityTag int32 `xml:"priorityTag,omitempty"` +} + +func init() { + t["DVSNetworkResourcePoolAllocationInfo"] = reflect.TypeOf((*DVSNetworkResourcePoolAllocationInfo)(nil)).Elem() +} + +type DVSNetworkResourcePoolConfigSpec struct { + DynamicData + + Key string `xml:"key"` + ConfigVersion string `xml:"configVersion,omitempty"` + AllocationInfo *DVSNetworkResourcePoolAllocationInfo `xml:"allocationInfo,omitempty"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["DVSNetworkResourcePoolConfigSpec"] = reflect.TypeOf((*DVSNetworkResourcePoolConfigSpec)(nil)).Elem() +} + +type DVSPolicy struct { + DynamicData + + AutoPreInstallAllowed *bool `xml:"autoPreInstallAllowed"` + AutoUpgradeAllowed *bool `xml:"autoUpgradeAllowed"` + PartialUpgradeAllowed *bool `xml:"partialUpgradeAllowed"` +} + +func init() { + t["DVSPolicy"] = reflect.TypeOf((*DVSPolicy)(nil)).Elem() +} + +type DVSRollbackCapability struct { + DynamicData + + RollbackSupported bool `xml:"rollbackSupported"` +} + +func init() { + t["DVSRollbackCapability"] = reflect.TypeOf((*DVSRollbackCapability)(nil)).Elem() +} + +type DVSRollbackRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityBackup *EntityBackupConfig `xml:"entityBackup,omitempty"` +} + +func init() { + t["DVSRollbackRequestType"] = reflect.TypeOf((*DVSRollbackRequestType)(nil)).Elem() +} + +type DVSRollback_Task DVSRollbackRequestType + +func init() { + t["DVSRollback_Task"] = reflect.TypeOf((*DVSRollback_Task)(nil)).Elem() +} + +type DVSRollback_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DVSRuntimeInfo struct { + DynamicData + + HostMemberRuntime []HostMemberRuntimeInfo `xml:"hostMemberRuntime,omitempty"` + ResourceRuntimeInfo *DvsResourceRuntimeInfo `xml:"resourceRuntimeInfo,omitempty"` +} + +func init() { + t["DVSRuntimeInfo"] = reflect.TypeOf((*DVSRuntimeInfo)(nil)).Elem() +} + +type DVSSecurityPolicy struct { + InheritablePolicy + + AllowPromiscuous *BoolPolicy `xml:"allowPromiscuous,omitempty"` + MacChanges *BoolPolicy `xml:"macChanges,omitempty"` + ForgedTransmits *BoolPolicy `xml:"forgedTransmits,omitempty"` +} + +func init() { + t["DVSSecurityPolicy"] = reflect.TypeOf((*DVSSecurityPolicy)(nil)).Elem() +} + +type DVSSelection struct { + SelectionSet + + DvsUuid string `xml:"dvsUuid"` +} + +func init() { + t["DVSSelection"] = reflect.TypeOf((*DVSSelection)(nil)).Elem() +} + +type DVSSummary struct { + DynamicData + + Name string `xml:"name"` + Uuid string `xml:"uuid"` + NumPorts int32 `xml:"numPorts"` + ProductInfo *DistributedVirtualSwitchProductSpec `xml:"productInfo,omitempty"` + HostMember []ManagedObjectReference `xml:"hostMember,omitempty"` + Vm []ManagedObjectReference `xml:"vm,omitempty"` + Host []ManagedObjectReference `xml:"host,omitempty"` + PortgroupName []string `xml:"portgroupName,omitempty"` + Description string `xml:"description,omitempty"` + Contact *DVSContactInfo `xml:"contact,omitempty"` + NumHosts int32 `xml:"numHosts,omitempty"` +} + +func init() { + t["DVSSummary"] = reflect.TypeOf((*DVSSummary)(nil)).Elem() +} + +type DVSTrafficShapingPolicy struct { + InheritablePolicy + + Enabled *BoolPolicy `xml:"enabled,omitempty"` + AverageBandwidth *LongPolicy `xml:"averageBandwidth,omitempty"` + PeakBandwidth *LongPolicy `xml:"peakBandwidth,omitempty"` + BurstSize *LongPolicy `xml:"burstSize,omitempty"` +} + +func init() { + t["DVSTrafficShapingPolicy"] = reflect.TypeOf((*DVSTrafficShapingPolicy)(nil)).Elem() +} + +type DVSUplinkPortPolicy struct { + DynamicData +} + +func init() { + t["DVSUplinkPortPolicy"] = reflect.TypeOf((*DVSUplinkPortPolicy)(nil)).Elem() +} + +type DVSVendorSpecificConfig struct { + InheritablePolicy + + KeyValue []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"keyValue,omitempty"` +} + +func init() { + t["DVSVendorSpecificConfig"] = reflect.TypeOf((*DVSVendorSpecificConfig)(nil)).Elem() +} + +type DVSVmVnicNetworkResourcePool struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` + ConfigVersion string `xml:"configVersion"` + AllocationInfo *DvsVmVnicResourceAllocation `xml:"allocationInfo,omitempty"` +} + +func init() { + t["DVSVmVnicNetworkResourcePool"] = reflect.TypeOf((*DVSVmVnicNetworkResourcePool)(nil)).Elem() +} + +type DailyTaskScheduler struct { + HourlyTaskScheduler + + Hour int32 `xml:"hour"` +} + +func init() { + t["DailyTaskScheduler"] = reflect.TypeOf((*DailyTaskScheduler)(nil)).Elem() +} + +type DasAdmissionControlDisabledEvent struct { + ClusterEvent +} + +func init() { + t["DasAdmissionControlDisabledEvent"] = reflect.TypeOf((*DasAdmissionControlDisabledEvent)(nil)).Elem() +} + +type DasAdmissionControlEnabledEvent struct { + ClusterEvent +} + +func init() { + t["DasAdmissionControlEnabledEvent"] = reflect.TypeOf((*DasAdmissionControlEnabledEvent)(nil)).Elem() +} + +type DasAgentFoundEvent struct { + ClusterEvent +} + +func init() { + t["DasAgentFoundEvent"] = reflect.TypeOf((*DasAgentFoundEvent)(nil)).Elem() +} + +type DasAgentUnavailableEvent struct { + ClusterEvent +} + +func init() { + t["DasAgentUnavailableEvent"] = reflect.TypeOf((*DasAgentUnavailableEvent)(nil)).Elem() +} + +type DasClusterIsolatedEvent struct { + ClusterEvent +} + +func init() { + t["DasClusterIsolatedEvent"] = reflect.TypeOf((*DasClusterIsolatedEvent)(nil)).Elem() +} + +type DasConfigFault struct { + VimFault + + Reason string `xml:"reason,omitempty"` + Output string `xml:"output,omitempty"` + Event []BaseEvent `xml:"event,omitempty,typeattr"` +} + +func init() { + t["DasConfigFault"] = reflect.TypeOf((*DasConfigFault)(nil)).Elem() +} + +type DasConfigFaultFault DasConfigFault + +func init() { + t["DasConfigFaultFault"] = reflect.TypeOf((*DasConfigFaultFault)(nil)).Elem() +} + +type DasDisabledEvent struct { + ClusterEvent +} + +func init() { + t["DasDisabledEvent"] = reflect.TypeOf((*DasDisabledEvent)(nil)).Elem() +} + +type DasEnabledEvent struct { + ClusterEvent +} + +func init() { + t["DasEnabledEvent"] = reflect.TypeOf((*DasEnabledEvent)(nil)).Elem() +} + +type DasHeartbeatDatastoreInfo struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["DasHeartbeatDatastoreInfo"] = reflect.TypeOf((*DasHeartbeatDatastoreInfo)(nil)).Elem() +} + +type DasHostFailedEvent struct { + ClusterEvent + + FailedHost HostEventArgument `xml:"failedHost"` +} + +func init() { + t["DasHostFailedEvent"] = reflect.TypeOf((*DasHostFailedEvent)(nil)).Elem() +} + +type DasHostIsolatedEvent struct { + ClusterEvent + + IsolatedHost HostEventArgument `xml:"isolatedHost"` +} + +func init() { + t["DasHostIsolatedEvent"] = reflect.TypeOf((*DasHostIsolatedEvent)(nil)).Elem() +} + +type DatabaseError struct { + RuntimeFault +} + +func init() { + t["DatabaseError"] = reflect.TypeOf((*DatabaseError)(nil)).Elem() +} + +type DatabaseErrorFault DatabaseError + +func init() { + t["DatabaseErrorFault"] = reflect.TypeOf((*DatabaseErrorFault)(nil)).Elem() +} + +type DatabaseSizeEstimate struct { + DynamicData + + Size int64 `xml:"size"` +} + +func init() { + t["DatabaseSizeEstimate"] = reflect.TypeOf((*DatabaseSizeEstimate)(nil)).Elem() +} + +type DatabaseSizeParam struct { + DynamicData + + InventoryDesc InventoryDescription `xml:"inventoryDesc"` + PerfStatsDesc *PerformanceStatisticsDescription `xml:"perfStatsDesc,omitempty"` +} + +func init() { + t["DatabaseSizeParam"] = reflect.TypeOf((*DatabaseSizeParam)(nil)).Elem() +} + +type DatacenterConfigInfo struct { + DynamicData + + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` +} + +func init() { + t["DatacenterConfigInfo"] = reflect.TypeOf((*DatacenterConfigInfo)(nil)).Elem() +} + +type DatacenterConfigSpec struct { + DynamicData + + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` +} + +func init() { + t["DatacenterConfigSpec"] = reflect.TypeOf((*DatacenterConfigSpec)(nil)).Elem() +} + +type DatacenterCreatedEvent struct { + DatacenterEvent + + Parent FolderEventArgument `xml:"parent"` +} + +func init() { + t["DatacenterCreatedEvent"] = reflect.TypeOf((*DatacenterCreatedEvent)(nil)).Elem() +} + +type DatacenterEvent struct { + Event +} + +func init() { + t["DatacenterEvent"] = reflect.TypeOf((*DatacenterEvent)(nil)).Elem() +} + +type DatacenterEventArgument struct { + EntityEventArgument + + Datacenter ManagedObjectReference `xml:"datacenter"` +} + +func init() { + t["DatacenterEventArgument"] = reflect.TypeOf((*DatacenterEventArgument)(nil)).Elem() +} + +type DatacenterMismatch struct { + MigrationFault + + InvalidArgument []DatacenterMismatchArgument `xml:"invalidArgument"` + ExpectedDatacenter ManagedObjectReference `xml:"expectedDatacenter"` +} + +func init() { + t["DatacenterMismatch"] = reflect.TypeOf((*DatacenterMismatch)(nil)).Elem() +} + +type DatacenterMismatchArgument struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + InputDatacenter *ManagedObjectReference `xml:"inputDatacenter,omitempty"` +} + +func init() { + t["DatacenterMismatchArgument"] = reflect.TypeOf((*DatacenterMismatchArgument)(nil)).Elem() +} + +type DatacenterMismatchFault DatacenterMismatch + +func init() { + t["DatacenterMismatchFault"] = reflect.TypeOf((*DatacenterMismatchFault)(nil)).Elem() +} + +type DatacenterRenamedEvent struct { + DatacenterEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DatacenterRenamedEvent"] = reflect.TypeOf((*DatacenterRenamedEvent)(nil)).Elem() +} + +type DatastoreCapability struct { + DynamicData + + DirectoryHierarchySupported bool `xml:"directoryHierarchySupported"` + RawDiskMappingsSupported bool `xml:"rawDiskMappingsSupported"` + PerFileThinProvisioningSupported bool `xml:"perFileThinProvisioningSupported"` + StorageIORMSupported *bool `xml:"storageIORMSupported"` + NativeSnapshotSupported *bool `xml:"nativeSnapshotSupported"` + TopLevelDirectoryCreateSupported *bool `xml:"topLevelDirectoryCreateSupported"` + SeSparseSupported *bool `xml:"seSparseSupported"` + VmfsSparseSupported *bool `xml:"vmfsSparseSupported"` + VsanSparseSupported *bool `xml:"vsanSparseSupported"` + UpitSupported *bool `xml:"upitSupported"` + VmdkExpandSupported *bool `xml:"vmdkExpandSupported"` +} + +func init() { + t["DatastoreCapability"] = reflect.TypeOf((*DatastoreCapability)(nil)).Elem() +} + +type DatastoreCapacityIncreasedEvent struct { + DatastoreEvent + + OldCapacity int64 `xml:"oldCapacity"` + NewCapacity int64 `xml:"newCapacity"` +} + +func init() { + t["DatastoreCapacityIncreasedEvent"] = reflect.TypeOf((*DatastoreCapacityIncreasedEvent)(nil)).Elem() +} + +type DatastoreDestroyedEvent struct { + DatastoreEvent +} + +func init() { + t["DatastoreDestroyedEvent"] = reflect.TypeOf((*DatastoreDestroyedEvent)(nil)).Elem() +} + +type DatastoreDiscoveredEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["DatastoreDiscoveredEvent"] = reflect.TypeOf((*DatastoreDiscoveredEvent)(nil)).Elem() +} + +type DatastoreDuplicatedEvent struct { + DatastoreEvent +} + +func init() { + t["DatastoreDuplicatedEvent"] = reflect.TypeOf((*DatastoreDuplicatedEvent)(nil)).Elem() +} + +type DatastoreEnterMaintenanceMode DatastoreEnterMaintenanceModeRequestType + +func init() { + t["DatastoreEnterMaintenanceMode"] = reflect.TypeOf((*DatastoreEnterMaintenanceMode)(nil)).Elem() +} + +type DatastoreEnterMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DatastoreEnterMaintenanceModeRequestType"] = reflect.TypeOf((*DatastoreEnterMaintenanceModeRequestType)(nil)).Elem() +} + +type DatastoreEnterMaintenanceModeResponse struct { + Returnval StoragePlacementResult `xml:"returnval"` +} + +type DatastoreEvent struct { + Event + + Datastore *DatastoreEventArgument `xml:"datastore,omitempty"` +} + +func init() { + t["DatastoreEvent"] = reflect.TypeOf((*DatastoreEvent)(nil)).Elem() +} + +type DatastoreEventArgument struct { + EntityEventArgument + + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["DatastoreEventArgument"] = reflect.TypeOf((*DatastoreEventArgument)(nil)).Elem() +} + +type DatastoreExitMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DatastoreExitMaintenanceModeRequestType"] = reflect.TypeOf((*DatastoreExitMaintenanceModeRequestType)(nil)).Elem() +} + +type DatastoreExitMaintenanceMode_Task DatastoreExitMaintenanceModeRequestType + +func init() { + t["DatastoreExitMaintenanceMode_Task"] = reflect.TypeOf((*DatastoreExitMaintenanceMode_Task)(nil)).Elem() +} + +type DatastoreExitMaintenanceMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DatastoreFileCopiedEvent struct { + DatastoreFileEvent + + SourceDatastore DatastoreEventArgument `xml:"sourceDatastore"` + SourceFile string `xml:"sourceFile"` +} + +func init() { + t["DatastoreFileCopiedEvent"] = reflect.TypeOf((*DatastoreFileCopiedEvent)(nil)).Elem() +} + +type DatastoreFileDeletedEvent struct { + DatastoreFileEvent +} + +func init() { + t["DatastoreFileDeletedEvent"] = reflect.TypeOf((*DatastoreFileDeletedEvent)(nil)).Elem() +} + +type DatastoreFileEvent struct { + DatastoreEvent + + TargetFile string `xml:"targetFile"` + SourceOfOperation string `xml:"sourceOfOperation,omitempty"` + Succeeded *bool `xml:"succeeded"` +} + +func init() { + t["DatastoreFileEvent"] = reflect.TypeOf((*DatastoreFileEvent)(nil)).Elem() +} + +type DatastoreFileMovedEvent struct { + DatastoreFileEvent + + SourceDatastore DatastoreEventArgument `xml:"sourceDatastore"` + SourceFile string `xml:"sourceFile"` +} + +func init() { + t["DatastoreFileMovedEvent"] = reflect.TypeOf((*DatastoreFileMovedEvent)(nil)).Elem() +} + +type DatastoreHostMount struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + MountInfo HostMountInfo `xml:"mountInfo"` +} + +func init() { + t["DatastoreHostMount"] = reflect.TypeOf((*DatastoreHostMount)(nil)).Elem() +} + +type DatastoreIORMReconfiguredEvent struct { + DatastoreEvent +} + +func init() { + t["DatastoreIORMReconfiguredEvent"] = reflect.TypeOf((*DatastoreIORMReconfiguredEvent)(nil)).Elem() +} + +type DatastoreInfo struct { + DynamicData + + Name string `xml:"name"` + Url string `xml:"url"` + FreeSpace int64 `xml:"freeSpace"` + MaxFileSize int64 `xml:"maxFileSize"` + MaxVirtualDiskCapacity int64 `xml:"maxVirtualDiskCapacity,omitempty"` + MaxMemoryFileSize int64 `xml:"maxMemoryFileSize,omitempty"` + Timestamp *time.Time `xml:"timestamp"` + ContainerId string `xml:"containerId,omitempty"` +} + +func init() { + t["DatastoreInfo"] = reflect.TypeOf((*DatastoreInfo)(nil)).Elem() +} + +type DatastoreMountPathDatastorePair struct { + DynamicData + + OldMountPath string `xml:"oldMountPath"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["DatastoreMountPathDatastorePair"] = reflect.TypeOf((*DatastoreMountPathDatastorePair)(nil)).Elem() +} + +type DatastoreNotWritableOnHost struct { + InvalidDatastore + + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["DatastoreNotWritableOnHost"] = reflect.TypeOf((*DatastoreNotWritableOnHost)(nil)).Elem() +} + +type DatastoreNotWritableOnHostFault BaseDatastoreNotWritableOnHost + +func init() { + t["DatastoreNotWritableOnHostFault"] = reflect.TypeOf((*DatastoreNotWritableOnHostFault)(nil)).Elem() +} + +type DatastoreOption struct { + DynamicData + + UnsupportedVolumes []VirtualMachineDatastoreVolumeOption `xml:"unsupportedVolumes,omitempty"` +} + +func init() { + t["DatastoreOption"] = reflect.TypeOf((*DatastoreOption)(nil)).Elem() +} + +type DatastorePrincipalConfigured struct { + HostEvent + + DatastorePrincipal string `xml:"datastorePrincipal"` +} + +func init() { + t["DatastorePrincipalConfigured"] = reflect.TypeOf((*DatastorePrincipalConfigured)(nil)).Elem() +} + +type DatastoreRemovedOnHostEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["DatastoreRemovedOnHostEvent"] = reflect.TypeOf((*DatastoreRemovedOnHostEvent)(nil)).Elem() +} + +type DatastoreRenamedEvent struct { + DatastoreEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DatastoreRenamedEvent"] = reflect.TypeOf((*DatastoreRenamedEvent)(nil)).Elem() +} + +type DatastoreRenamedOnHostEvent struct { + HostEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DatastoreRenamedOnHostEvent"] = reflect.TypeOf((*DatastoreRenamedOnHostEvent)(nil)).Elem() +} + +type DatastoreSummary struct { + DynamicData + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + Name string `xml:"name"` + Url string `xml:"url"` + Capacity int64 `xml:"capacity"` + FreeSpace int64 `xml:"freeSpace"` + Uncommitted int64 `xml:"uncommitted,omitempty"` + Accessible bool `xml:"accessible"` + MultipleHostAccess *bool `xml:"multipleHostAccess"` + Type string `xml:"type"` + MaintenanceMode string `xml:"maintenanceMode,omitempty"` +} + +func init() { + t["DatastoreSummary"] = reflect.TypeOf((*DatastoreSummary)(nil)).Elem() +} + +type DatastoreVVolContainerFailoverPair struct { + DynamicData + + SrcContainer string `xml:"srcContainer,omitempty"` + TgtContainer string `xml:"tgtContainer"` + VvolMapping []KeyValue `xml:"vvolMapping,omitempty"` +} + +func init() { + t["DatastoreVVolContainerFailoverPair"] = reflect.TypeOf((*DatastoreVVolContainerFailoverPair)(nil)).Elem() +} + +type DateTimeProfile struct { + ApplyProfile +} + +func init() { + t["DateTimeProfile"] = reflect.TypeOf((*DateTimeProfile)(nil)).Elem() +} + +type DecodeLicense DecodeLicenseRequestType + +func init() { + t["DecodeLicense"] = reflect.TypeOf((*DecodeLicense)(nil)).Elem() +} + +type DecodeLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` +} + +func init() { + t["DecodeLicenseRequestType"] = reflect.TypeOf((*DecodeLicenseRequestType)(nil)).Elem() +} + +type DecodeLicenseResponse struct { + Returnval LicenseManagerLicenseInfo `xml:"returnval"` +} + +type DefragmentAllDisks DefragmentAllDisksRequestType + +func init() { + t["DefragmentAllDisks"] = reflect.TypeOf((*DefragmentAllDisks)(nil)).Elem() +} + +type DefragmentAllDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DefragmentAllDisksRequestType"] = reflect.TypeOf((*DefragmentAllDisksRequestType)(nil)).Elem() +} + +type DefragmentAllDisksResponse struct { +} + +type DefragmentVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["DefragmentVirtualDiskRequestType"] = reflect.TypeOf((*DefragmentVirtualDiskRequestType)(nil)).Elem() +} + +type DefragmentVirtualDisk_Task DefragmentVirtualDiskRequestType + +func init() { + t["DefragmentVirtualDisk_Task"] = reflect.TypeOf((*DefragmentVirtualDisk_Task)(nil)).Elem() +} + +type DefragmentVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteCustomizationSpec DeleteCustomizationSpecRequestType + +func init() { + t["DeleteCustomizationSpec"] = reflect.TypeOf((*DeleteCustomizationSpec)(nil)).Elem() +} + +type DeleteCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["DeleteCustomizationSpecRequestType"] = reflect.TypeOf((*DeleteCustomizationSpecRequestType)(nil)).Elem() +} + +type DeleteCustomizationSpecResponse struct { +} + +type DeleteDatastoreFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["DeleteDatastoreFileRequestType"] = reflect.TypeOf((*DeleteDatastoreFileRequestType)(nil)).Elem() +} + +type DeleteDatastoreFile_Task DeleteDatastoreFileRequestType + +func init() { + t["DeleteDatastoreFile_Task"] = reflect.TypeOf((*DeleteDatastoreFile_Task)(nil)).Elem() +} + +type DeleteDatastoreFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteDirectory DeleteDirectoryRequestType + +func init() { + t["DeleteDirectory"] = reflect.TypeOf((*DeleteDirectory)(nil)).Elem() +} + +type DeleteDirectoryInGuest DeleteDirectoryInGuestRequestType + +func init() { + t["DeleteDirectoryInGuest"] = reflect.TypeOf((*DeleteDirectoryInGuest)(nil)).Elem() +} + +type DeleteDirectoryInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + DirectoryPath string `xml:"directoryPath"` + Recursive bool `xml:"recursive"` +} + +func init() { + t["DeleteDirectoryInGuestRequestType"] = reflect.TypeOf((*DeleteDirectoryInGuestRequestType)(nil)).Elem() +} + +type DeleteDirectoryInGuestResponse struct { +} + +type DeleteDirectoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + DatastorePath string `xml:"datastorePath"` +} + +func init() { + t["DeleteDirectoryRequestType"] = reflect.TypeOf((*DeleteDirectoryRequestType)(nil)).Elem() +} + +type DeleteDirectoryResponse struct { +} + +type DeleteFile DeleteFileRequestType + +func init() { + t["DeleteFile"] = reflect.TypeOf((*DeleteFile)(nil)).Elem() +} + +type DeleteFileInGuest DeleteFileInGuestRequestType + +func init() { + t["DeleteFileInGuest"] = reflect.TypeOf((*DeleteFileInGuest)(nil)).Elem() +} + +type DeleteFileInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + FilePath string `xml:"filePath"` +} + +func init() { + t["DeleteFileInGuestRequestType"] = reflect.TypeOf((*DeleteFileInGuestRequestType)(nil)).Elem() +} + +type DeleteFileInGuestResponse struct { +} + +type DeleteFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + DatastorePath string `xml:"datastorePath"` +} + +func init() { + t["DeleteFileRequestType"] = reflect.TypeOf((*DeleteFileRequestType)(nil)).Elem() +} + +type DeleteFileResponse struct { +} + +type DeleteHostSpecification DeleteHostSpecificationRequestType + +func init() { + t["DeleteHostSpecification"] = reflect.TypeOf((*DeleteHostSpecification)(nil)).Elem() +} + +type DeleteHostSpecificationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["DeleteHostSpecificationRequestType"] = reflect.TypeOf((*DeleteHostSpecificationRequestType)(nil)).Elem() +} + +type DeleteHostSpecificationResponse struct { +} + +type DeleteHostSubSpecification DeleteHostSubSpecificationRequestType + +func init() { + t["DeleteHostSubSpecification"] = reflect.TypeOf((*DeleteHostSubSpecification)(nil)).Elem() +} + +type DeleteHostSubSpecificationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + SubSpecName string `xml:"subSpecName"` +} + +func init() { + t["DeleteHostSubSpecificationRequestType"] = reflect.TypeOf((*DeleteHostSubSpecificationRequestType)(nil)).Elem() +} + +type DeleteHostSubSpecificationResponse struct { +} + +type DeleteNvdimmBlockNamespacesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DeleteNvdimmBlockNamespacesRequestType"] = reflect.TypeOf((*DeleteNvdimmBlockNamespacesRequestType)(nil)).Elem() +} + +type DeleteNvdimmBlockNamespaces_Task DeleteNvdimmBlockNamespacesRequestType + +func init() { + t["DeleteNvdimmBlockNamespaces_Task"] = reflect.TypeOf((*DeleteNvdimmBlockNamespaces_Task)(nil)).Elem() +} + +type DeleteNvdimmBlockNamespaces_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteNvdimmNamespaceRequestType struct { + This ManagedObjectReference `xml:"_this"` + DeleteSpec NvdimmNamespaceDeleteSpec `xml:"deleteSpec"` +} + +func init() { + t["DeleteNvdimmNamespaceRequestType"] = reflect.TypeOf((*DeleteNvdimmNamespaceRequestType)(nil)).Elem() +} + +type DeleteNvdimmNamespace_Task DeleteNvdimmNamespaceRequestType + +func init() { + t["DeleteNvdimmNamespace_Task"] = reflect.TypeOf((*DeleteNvdimmNamespace_Task)(nil)).Elem() +} + +type DeleteNvdimmNamespace_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteRegistryKeyInGuest DeleteRegistryKeyInGuestRequestType + +func init() { + t["DeleteRegistryKeyInGuest"] = reflect.TypeOf((*DeleteRegistryKeyInGuest)(nil)).Elem() +} + +type DeleteRegistryKeyInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + KeyName GuestRegKeyNameSpec `xml:"keyName"` + Recursive bool `xml:"recursive"` +} + +func init() { + t["DeleteRegistryKeyInGuestRequestType"] = reflect.TypeOf((*DeleteRegistryKeyInGuestRequestType)(nil)).Elem() +} + +type DeleteRegistryKeyInGuestResponse struct { +} + +type DeleteRegistryValueInGuest DeleteRegistryValueInGuestRequestType + +func init() { + t["DeleteRegistryValueInGuest"] = reflect.TypeOf((*DeleteRegistryValueInGuest)(nil)).Elem() +} + +type DeleteRegistryValueInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + ValueName GuestRegValueNameSpec `xml:"valueName"` +} + +func init() { + t["DeleteRegistryValueInGuestRequestType"] = reflect.TypeOf((*DeleteRegistryValueInGuestRequestType)(nil)).Elem() +} + +type DeleteRegistryValueInGuestResponse struct { +} + +type DeleteScsiLunState DeleteScsiLunStateRequestType + +func init() { + t["DeleteScsiLunState"] = reflect.TypeOf((*DeleteScsiLunState)(nil)).Elem() +} + +type DeleteScsiLunStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunCanonicalName string `xml:"lunCanonicalName"` +} + +func init() { + t["DeleteScsiLunStateRequestType"] = reflect.TypeOf((*DeleteScsiLunStateRequestType)(nil)).Elem() +} + +type DeleteScsiLunStateResponse struct { +} + +type DeleteSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` +} + +func init() { + t["DeleteSnapshotRequestType"] = reflect.TypeOf((*DeleteSnapshotRequestType)(nil)).Elem() +} + +type DeleteSnapshot_Task DeleteSnapshotRequestType + +func init() { + t["DeleteSnapshot_Task"] = reflect.TypeOf((*DeleteSnapshot_Task)(nil)).Elem() +} + +type DeleteSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["DeleteVStorageObjectRequestType"] = reflect.TypeOf((*DeleteVStorageObjectRequestType)(nil)).Elem() +} + +type DeleteVStorageObject_Task DeleteVStorageObjectRequestType + +func init() { + t["DeleteVStorageObject_Task"] = reflect.TypeOf((*DeleteVStorageObject_Task)(nil)).Elem() +} + +type DeleteVStorageObject_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteVffsVolumeState DeleteVffsVolumeStateRequestType + +func init() { + t["DeleteVffsVolumeState"] = reflect.TypeOf((*DeleteVffsVolumeState)(nil)).Elem() +} + +type DeleteVffsVolumeStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsUuid string `xml:"vffsUuid"` +} + +func init() { + t["DeleteVffsVolumeStateRequestType"] = reflect.TypeOf((*DeleteVffsVolumeStateRequestType)(nil)).Elem() +} + +type DeleteVffsVolumeStateResponse struct { +} + +type DeleteVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["DeleteVirtualDiskRequestType"] = reflect.TypeOf((*DeleteVirtualDiskRequestType)(nil)).Elem() +} + +type DeleteVirtualDisk_Task DeleteVirtualDiskRequestType + +func init() { + t["DeleteVirtualDisk_Task"] = reflect.TypeOf((*DeleteVirtualDisk_Task)(nil)).Elem() +} + +type DeleteVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeleteVmfsVolumeState DeleteVmfsVolumeStateRequestType + +func init() { + t["DeleteVmfsVolumeState"] = reflect.TypeOf((*DeleteVmfsVolumeState)(nil)).Elem() +} + +type DeleteVmfsVolumeStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` +} + +func init() { + t["DeleteVmfsVolumeStateRequestType"] = reflect.TypeOf((*DeleteVmfsVolumeStateRequestType)(nil)).Elem() +} + +type DeleteVmfsVolumeStateResponse struct { +} + +type DeleteVsanObjects DeleteVsanObjectsRequestType + +func init() { + t["DeleteVsanObjects"] = reflect.TypeOf((*DeleteVsanObjects)(nil)).Elem() +} + +type DeleteVsanObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids"` + Force *bool `xml:"force"` +} + +func init() { + t["DeleteVsanObjectsRequestType"] = reflect.TypeOf((*DeleteVsanObjectsRequestType)(nil)).Elem() +} + +type DeleteVsanObjectsResponse struct { + Returnval []HostVsanInternalSystemDeleteVsanObjectsResult `xml:"returnval"` +} + +type DeltaDiskFormatNotSupported struct { + VmConfigFault + + Datastore []ManagedObjectReference `xml:"datastore,omitempty"` + DeltaDiskFormat string `xml:"deltaDiskFormat"` +} + +func init() { + t["DeltaDiskFormatNotSupported"] = reflect.TypeOf((*DeltaDiskFormatNotSupported)(nil)).Elem() +} + +type DeltaDiskFormatNotSupportedFault DeltaDiskFormatNotSupported + +func init() { + t["DeltaDiskFormatNotSupportedFault"] = reflect.TypeOf((*DeltaDiskFormatNotSupportedFault)(nil)).Elem() +} + +type Description struct { + DynamicData + + Label string `xml:"label"` + Summary string `xml:"summary"` +} + +func init() { + t["Description"] = reflect.TypeOf((*Description)(nil)).Elem() +} + +type DeselectVnic DeselectVnicRequestType + +func init() { + t["DeselectVnic"] = reflect.TypeOf((*DeselectVnic)(nil)).Elem() +} + +type DeselectVnicForNicType DeselectVnicForNicTypeRequestType + +func init() { + t["DeselectVnicForNicType"] = reflect.TypeOf((*DeselectVnicForNicType)(nil)).Elem() +} + +type DeselectVnicForNicTypeRequestType struct { + This ManagedObjectReference `xml:"_this"` + NicType string `xml:"nicType"` + Device string `xml:"device"` +} + +func init() { + t["DeselectVnicForNicTypeRequestType"] = reflect.TypeOf((*DeselectVnicForNicTypeRequestType)(nil)).Elem() +} + +type DeselectVnicForNicTypeResponse struct { +} + +type DeselectVnicRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DeselectVnicRequestType"] = reflect.TypeOf((*DeselectVnicRequestType)(nil)).Elem() +} + +type DeselectVnicResponse struct { +} + +type DestinationSwitchFull struct { + CannotAccessNetwork +} + +func init() { + t["DestinationSwitchFull"] = reflect.TypeOf((*DestinationSwitchFull)(nil)).Elem() +} + +type DestinationSwitchFullFault DestinationSwitchFull + +func init() { + t["DestinationSwitchFullFault"] = reflect.TypeOf((*DestinationSwitchFullFault)(nil)).Elem() +} + +type DestinationVsanDisabled struct { + CannotMoveVsanEnabledHost + + DestinationCluster string `xml:"destinationCluster"` +} + +func init() { + t["DestinationVsanDisabled"] = reflect.TypeOf((*DestinationVsanDisabled)(nil)).Elem() +} + +type DestinationVsanDisabledFault DestinationVsanDisabled + +func init() { + t["DestinationVsanDisabledFault"] = reflect.TypeOf((*DestinationVsanDisabledFault)(nil)).Elem() +} + +type DestroyChildren DestroyChildrenRequestType + +func init() { + t["DestroyChildren"] = reflect.TypeOf((*DestroyChildren)(nil)).Elem() +} + +type DestroyChildrenRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyChildrenRequestType"] = reflect.TypeOf((*DestroyChildrenRequestType)(nil)).Elem() +} + +type DestroyChildrenResponse struct { +} + +type DestroyCollector DestroyCollectorRequestType + +func init() { + t["DestroyCollector"] = reflect.TypeOf((*DestroyCollector)(nil)).Elem() +} + +type DestroyCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyCollectorRequestType"] = reflect.TypeOf((*DestroyCollectorRequestType)(nil)).Elem() +} + +type DestroyCollectorResponse struct { +} + +type DestroyDatastore DestroyDatastoreRequestType + +func init() { + t["DestroyDatastore"] = reflect.TypeOf((*DestroyDatastore)(nil)).Elem() +} + +type DestroyDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyDatastoreRequestType"] = reflect.TypeOf((*DestroyDatastoreRequestType)(nil)).Elem() +} + +type DestroyDatastoreResponse struct { +} + +type DestroyIpPool DestroyIpPoolRequestType + +func init() { + t["DestroyIpPool"] = reflect.TypeOf((*DestroyIpPool)(nil)).Elem() +} + +type DestroyIpPoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + Id int32 `xml:"id"` + Force bool `xml:"force"` +} + +func init() { + t["DestroyIpPoolRequestType"] = reflect.TypeOf((*DestroyIpPoolRequestType)(nil)).Elem() +} + +type DestroyIpPoolResponse struct { +} + +type DestroyNetwork DestroyNetworkRequestType + +func init() { + t["DestroyNetwork"] = reflect.TypeOf((*DestroyNetwork)(nil)).Elem() +} + +type DestroyNetworkRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyNetworkRequestType"] = reflect.TypeOf((*DestroyNetworkRequestType)(nil)).Elem() +} + +type DestroyNetworkResponse struct { +} + +type DestroyProfile DestroyProfileRequestType + +func init() { + t["DestroyProfile"] = reflect.TypeOf((*DestroyProfile)(nil)).Elem() +} + +type DestroyProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyProfileRequestType"] = reflect.TypeOf((*DestroyProfileRequestType)(nil)).Elem() +} + +type DestroyProfileResponse struct { +} + +type DestroyPropertyCollector DestroyPropertyCollectorRequestType + +func init() { + t["DestroyPropertyCollector"] = reflect.TypeOf((*DestroyPropertyCollector)(nil)).Elem() +} + +type DestroyPropertyCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyPropertyCollectorRequestType"] = reflect.TypeOf((*DestroyPropertyCollectorRequestType)(nil)).Elem() +} + +type DestroyPropertyCollectorResponse struct { +} + +type DestroyPropertyFilter DestroyPropertyFilterRequestType + +func init() { + t["DestroyPropertyFilter"] = reflect.TypeOf((*DestroyPropertyFilter)(nil)).Elem() +} + +type DestroyPropertyFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyPropertyFilterRequestType"] = reflect.TypeOf((*DestroyPropertyFilterRequestType)(nil)).Elem() +} + +type DestroyPropertyFilterResponse struct { +} + +type DestroyRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyRequestType"] = reflect.TypeOf((*DestroyRequestType)(nil)).Elem() +} + +type DestroyVffs DestroyVffsRequestType + +func init() { + t["DestroyVffs"] = reflect.TypeOf((*DestroyVffs)(nil)).Elem() +} + +type DestroyVffsRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsPath string `xml:"vffsPath"` +} + +func init() { + t["DestroyVffsRequestType"] = reflect.TypeOf((*DestroyVffsRequestType)(nil)).Elem() +} + +type DestroyVffsResponse struct { +} + +type DestroyView DestroyViewRequestType + +func init() { + t["DestroyView"] = reflect.TypeOf((*DestroyView)(nil)).Elem() +} + +type DestroyViewRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DestroyViewRequestType"] = reflect.TypeOf((*DestroyViewRequestType)(nil)).Elem() +} + +type DestroyViewResponse struct { +} + +type Destroy_Task DestroyRequestType + +func init() { + t["Destroy_Task"] = reflect.TypeOf((*Destroy_Task)(nil)).Elem() +} + +type Destroy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DetachDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + DiskId ID `xml:"diskId"` +} + +func init() { + t["DetachDiskRequestType"] = reflect.TypeOf((*DetachDiskRequestType)(nil)).Elem() +} + +type DetachDisk_Task DetachDiskRequestType + +func init() { + t["DetachDisk_Task"] = reflect.TypeOf((*DetachDisk_Task)(nil)).Elem() +} + +type DetachDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DetachScsiLun DetachScsiLunRequestType + +func init() { + t["DetachScsiLun"] = reflect.TypeOf((*DetachScsiLun)(nil)).Elem() +} + +type DetachScsiLunExRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid []string `xml:"lunUuid"` +} + +func init() { + t["DetachScsiLunExRequestType"] = reflect.TypeOf((*DetachScsiLunExRequestType)(nil)).Elem() +} + +type DetachScsiLunEx_Task DetachScsiLunExRequestType + +func init() { + t["DetachScsiLunEx_Task"] = reflect.TypeOf((*DetachScsiLunEx_Task)(nil)).Elem() +} + +type DetachScsiLunEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DetachScsiLunRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` +} + +func init() { + t["DetachScsiLunRequestType"] = reflect.TypeOf((*DetachScsiLunRequestType)(nil)).Elem() +} + +type DetachScsiLunResponse struct { +} + +type DetachTagFromVStorageObject DetachTagFromVStorageObjectRequestType + +func init() { + t["DetachTagFromVStorageObject"] = reflect.TypeOf((*DetachTagFromVStorageObject)(nil)).Elem() +} + +type DetachTagFromVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Category string `xml:"category"` + Tag string `xml:"tag"` +} + +func init() { + t["DetachTagFromVStorageObjectRequestType"] = reflect.TypeOf((*DetachTagFromVStorageObjectRequestType)(nil)).Elem() +} + +type DetachTagFromVStorageObjectResponse struct { +} + +type DeviceBackedVirtualDiskSpec struct { + VirtualDiskSpec + + Device string `xml:"device"` +} + +func init() { + t["DeviceBackedVirtualDiskSpec"] = reflect.TypeOf((*DeviceBackedVirtualDiskSpec)(nil)).Elem() +} + +type DeviceBackingNotSupported struct { + DeviceNotSupported + + Backing string `xml:"backing"` +} + +func init() { + t["DeviceBackingNotSupported"] = reflect.TypeOf((*DeviceBackingNotSupported)(nil)).Elem() +} + +type DeviceBackingNotSupportedFault BaseDeviceBackingNotSupported + +func init() { + t["DeviceBackingNotSupportedFault"] = reflect.TypeOf((*DeviceBackingNotSupportedFault)(nil)).Elem() +} + +type DeviceControllerNotSupported struct { + DeviceNotSupported + + Controller string `xml:"controller"` +} + +func init() { + t["DeviceControllerNotSupported"] = reflect.TypeOf((*DeviceControllerNotSupported)(nil)).Elem() +} + +type DeviceControllerNotSupportedFault DeviceControllerNotSupported + +func init() { + t["DeviceControllerNotSupportedFault"] = reflect.TypeOf((*DeviceControllerNotSupportedFault)(nil)).Elem() +} + +type DeviceGroupId struct { + DynamicData + + Id string `xml:"id"` +} + +func init() { + t["DeviceGroupId"] = reflect.TypeOf((*DeviceGroupId)(nil)).Elem() +} + +type DeviceHotPlugNotSupported struct { + InvalidDeviceSpec +} + +func init() { + t["DeviceHotPlugNotSupported"] = reflect.TypeOf((*DeviceHotPlugNotSupported)(nil)).Elem() +} + +type DeviceHotPlugNotSupportedFault DeviceHotPlugNotSupported + +func init() { + t["DeviceHotPlugNotSupportedFault"] = reflect.TypeOf((*DeviceHotPlugNotSupportedFault)(nil)).Elem() +} + +type DeviceNotFound struct { + InvalidDeviceSpec +} + +func init() { + t["DeviceNotFound"] = reflect.TypeOf((*DeviceNotFound)(nil)).Elem() +} + +type DeviceNotFoundFault DeviceNotFound + +func init() { + t["DeviceNotFoundFault"] = reflect.TypeOf((*DeviceNotFoundFault)(nil)).Elem() +} + +type DeviceNotSupported struct { + VirtualHardwareCompatibilityIssue + + Device string `xml:"device"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["DeviceNotSupported"] = reflect.TypeOf((*DeviceNotSupported)(nil)).Elem() +} + +type DeviceNotSupportedFault BaseDeviceNotSupported + +func init() { + t["DeviceNotSupportedFault"] = reflect.TypeOf((*DeviceNotSupportedFault)(nil)).Elem() +} + +type DeviceUnsupportedForVmPlatform struct { + InvalidDeviceSpec +} + +func init() { + t["DeviceUnsupportedForVmPlatform"] = reflect.TypeOf((*DeviceUnsupportedForVmPlatform)(nil)).Elem() +} + +type DeviceUnsupportedForVmPlatformFault DeviceUnsupportedForVmPlatform + +func init() { + t["DeviceUnsupportedForVmPlatformFault"] = reflect.TypeOf((*DeviceUnsupportedForVmPlatformFault)(nil)).Elem() +} + +type DeviceUnsupportedForVmVersion struct { + InvalidDeviceSpec + + CurrentVersion string `xml:"currentVersion"` + ExpectedVersion string `xml:"expectedVersion"` +} + +func init() { + t["DeviceUnsupportedForVmVersion"] = reflect.TypeOf((*DeviceUnsupportedForVmVersion)(nil)).Elem() +} + +type DeviceUnsupportedForVmVersionFault DeviceUnsupportedForVmVersion + +func init() { + t["DeviceUnsupportedForVmVersionFault"] = reflect.TypeOf((*DeviceUnsupportedForVmVersionFault)(nil)).Elem() +} + +type DiagnosticManagerBundleInfo struct { + DynamicData + + System *ManagedObjectReference `xml:"system,omitempty"` + Url string `xml:"url"` +} + +func init() { + t["DiagnosticManagerBundleInfo"] = reflect.TypeOf((*DiagnosticManagerBundleInfo)(nil)).Elem() +} + +type DiagnosticManagerLogDescriptor struct { + DynamicData + + Key string `xml:"key"` + FileName string `xml:"fileName"` + Creator string `xml:"creator"` + Format string `xml:"format"` + MimeType string `xml:"mimeType"` + Info BaseDescription `xml:"info,typeattr"` +} + +func init() { + t["DiagnosticManagerLogDescriptor"] = reflect.TypeOf((*DiagnosticManagerLogDescriptor)(nil)).Elem() +} + +type DiagnosticManagerLogHeader struct { + DynamicData + + LineStart int32 `xml:"lineStart"` + LineEnd int32 `xml:"lineEnd"` + LineText []string `xml:"lineText,omitempty"` +} + +func init() { + t["DiagnosticManagerLogHeader"] = reflect.TypeOf((*DiagnosticManagerLogHeader)(nil)).Elem() +} + +type DigestNotSupported struct { + DeviceNotSupported +} + +func init() { + t["DigestNotSupported"] = reflect.TypeOf((*DigestNotSupported)(nil)).Elem() +} + +type DigestNotSupportedFault DigestNotSupported + +func init() { + t["DigestNotSupportedFault"] = reflect.TypeOf((*DigestNotSupportedFault)(nil)).Elem() +} + +type DirectoryNotEmpty struct { + FileFault +} + +func init() { + t["DirectoryNotEmpty"] = reflect.TypeOf((*DirectoryNotEmpty)(nil)).Elem() +} + +type DirectoryNotEmptyFault DirectoryNotEmpty + +func init() { + t["DirectoryNotEmptyFault"] = reflect.TypeOf((*DirectoryNotEmptyFault)(nil)).Elem() +} + +type DisableAdminNotSupported struct { + HostConfigFault +} + +func init() { + t["DisableAdminNotSupported"] = reflect.TypeOf((*DisableAdminNotSupported)(nil)).Elem() +} + +type DisableAdminNotSupportedFault DisableAdminNotSupported + +func init() { + t["DisableAdminNotSupportedFault"] = reflect.TypeOf((*DisableAdminNotSupportedFault)(nil)).Elem() +} + +type DisableEvcModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DisableEvcModeRequestType"] = reflect.TypeOf((*DisableEvcModeRequestType)(nil)).Elem() +} + +type DisableEvcMode_Task DisableEvcModeRequestType + +func init() { + t["DisableEvcMode_Task"] = reflect.TypeOf((*DisableEvcMode_Task)(nil)).Elem() +} + +type DisableEvcMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DisableFeature DisableFeatureRequestType + +func init() { + t["DisableFeature"] = reflect.TypeOf((*DisableFeature)(nil)).Elem() +} + +type DisableFeatureRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + FeatureKey string `xml:"featureKey"` +} + +func init() { + t["DisableFeatureRequestType"] = reflect.TypeOf((*DisableFeatureRequestType)(nil)).Elem() +} + +type DisableFeatureResponse struct { + Returnval bool `xml:"returnval"` +} + +type DisableHyperThreading DisableHyperThreadingRequestType + +func init() { + t["DisableHyperThreading"] = reflect.TypeOf((*DisableHyperThreading)(nil)).Elem() +} + +type DisableHyperThreadingRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DisableHyperThreadingRequestType"] = reflect.TypeOf((*DisableHyperThreadingRequestType)(nil)).Elem() +} + +type DisableHyperThreadingResponse struct { +} + +type DisableMultipathPath DisableMultipathPathRequestType + +func init() { + t["DisableMultipathPath"] = reflect.TypeOf((*DisableMultipathPath)(nil)).Elem() +} + +type DisableMultipathPathRequestType struct { + This ManagedObjectReference `xml:"_this"` + PathName string `xml:"pathName"` +} + +func init() { + t["DisableMultipathPathRequestType"] = reflect.TypeOf((*DisableMultipathPathRequestType)(nil)).Elem() +} + +type DisableMultipathPathResponse struct { +} + +type DisableRuleset DisableRulesetRequestType + +func init() { + t["DisableRuleset"] = reflect.TypeOf((*DisableRuleset)(nil)).Elem() +} + +type DisableRulesetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["DisableRulesetRequestType"] = reflect.TypeOf((*DisableRulesetRequestType)(nil)).Elem() +} + +type DisableRulesetResponse struct { +} + +type DisableSecondaryVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["DisableSecondaryVMRequestType"] = reflect.TypeOf((*DisableSecondaryVMRequestType)(nil)).Elem() +} + +type DisableSecondaryVM_Task DisableSecondaryVMRequestType + +func init() { + t["DisableSecondaryVM_Task"] = reflect.TypeOf((*DisableSecondaryVM_Task)(nil)).Elem() +} + +type DisableSecondaryVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DisableSmartCardAuthentication DisableSmartCardAuthenticationRequestType + +func init() { + t["DisableSmartCardAuthentication"] = reflect.TypeOf((*DisableSmartCardAuthentication)(nil)).Elem() +} + +type DisableSmartCardAuthenticationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DisableSmartCardAuthenticationRequestType"] = reflect.TypeOf((*DisableSmartCardAuthenticationRequestType)(nil)).Elem() +} + +type DisableSmartCardAuthenticationResponse struct { +} + +type DisallowedChangeByService struct { + RuntimeFault + + ServiceName string `xml:"serviceName"` + DisallowedChange string `xml:"disallowedChange,omitempty"` +} + +func init() { + t["DisallowedChangeByService"] = reflect.TypeOf((*DisallowedChangeByService)(nil)).Elem() +} + +type DisallowedChangeByServiceFault DisallowedChangeByService + +func init() { + t["DisallowedChangeByServiceFault"] = reflect.TypeOf((*DisallowedChangeByServiceFault)(nil)).Elem() +} + +type DisallowedDiskModeChange struct { + InvalidDeviceSpec +} + +func init() { + t["DisallowedDiskModeChange"] = reflect.TypeOf((*DisallowedDiskModeChange)(nil)).Elem() +} + +type DisallowedDiskModeChangeFault DisallowedDiskModeChange + +func init() { + t["DisallowedDiskModeChangeFault"] = reflect.TypeOf((*DisallowedDiskModeChangeFault)(nil)).Elem() +} + +type DisallowedMigrationDeviceAttached struct { + MigrationFault + + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["DisallowedMigrationDeviceAttached"] = reflect.TypeOf((*DisallowedMigrationDeviceAttached)(nil)).Elem() +} + +type DisallowedMigrationDeviceAttachedFault DisallowedMigrationDeviceAttached + +func init() { + t["DisallowedMigrationDeviceAttachedFault"] = reflect.TypeOf((*DisallowedMigrationDeviceAttachedFault)(nil)).Elem() +} + +type DisallowedOperationOnFailoverHost struct { + RuntimeFault + + Host ManagedObjectReference `xml:"host"` + Hostname string `xml:"hostname"` +} + +func init() { + t["DisallowedOperationOnFailoverHost"] = reflect.TypeOf((*DisallowedOperationOnFailoverHost)(nil)).Elem() +} + +type DisallowedOperationOnFailoverHostFault DisallowedOperationOnFailoverHost + +func init() { + t["DisallowedOperationOnFailoverHostFault"] = reflect.TypeOf((*DisallowedOperationOnFailoverHostFault)(nil)).Elem() +} + +type DisconnectHostRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DisconnectHostRequestType"] = reflect.TypeOf((*DisconnectHostRequestType)(nil)).Elem() +} + +type DisconnectHost_Task DisconnectHostRequestType + +func init() { + t["DisconnectHost_Task"] = reflect.TypeOf((*DisconnectHost_Task)(nil)).Elem() +} + +type DisconnectHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DisconnectedHostsBlockingEVC struct { + EVCConfigFault +} + +func init() { + t["DisconnectedHostsBlockingEVC"] = reflect.TypeOf((*DisconnectedHostsBlockingEVC)(nil)).Elem() +} + +type DisconnectedHostsBlockingEVCFault DisconnectedHostsBlockingEVC + +func init() { + t["DisconnectedHostsBlockingEVCFault"] = reflect.TypeOf((*DisconnectedHostsBlockingEVCFault)(nil)).Elem() +} + +type DiscoverFcoeHbas DiscoverFcoeHbasRequestType + +func init() { + t["DiscoverFcoeHbas"] = reflect.TypeOf((*DiscoverFcoeHbas)(nil)).Elem() +} + +type DiscoverFcoeHbasRequestType struct { + This ManagedObjectReference `xml:"_this"` + FcoeSpec FcoeConfigFcoeSpecification `xml:"fcoeSpec"` +} + +func init() { + t["DiscoverFcoeHbasRequestType"] = reflect.TypeOf((*DiscoverFcoeHbasRequestType)(nil)).Elem() +} + +type DiscoverFcoeHbasResponse struct { +} + +type DiskChangeExtent struct { + DynamicData + + Start int64 `xml:"start"` + Length int64 `xml:"length"` +} + +func init() { + t["DiskChangeExtent"] = reflect.TypeOf((*DiskChangeExtent)(nil)).Elem() +} + +type DiskChangeInfo struct { + DynamicData + + StartOffset int64 `xml:"startOffset"` + Length int64 `xml:"length"` + ChangedArea []DiskChangeExtent `xml:"changedArea,omitempty"` +} + +func init() { + t["DiskChangeInfo"] = reflect.TypeOf((*DiskChangeInfo)(nil)).Elem() +} + +type DiskHasPartitions struct { + VsanDiskFault +} + +func init() { + t["DiskHasPartitions"] = reflect.TypeOf((*DiskHasPartitions)(nil)).Elem() +} + +type DiskHasPartitionsFault DiskHasPartitions + +func init() { + t["DiskHasPartitionsFault"] = reflect.TypeOf((*DiskHasPartitionsFault)(nil)).Elem() +} + +type DiskIsLastRemainingNonSSD struct { + VsanDiskFault +} + +func init() { + t["DiskIsLastRemainingNonSSD"] = reflect.TypeOf((*DiskIsLastRemainingNonSSD)(nil)).Elem() +} + +type DiskIsLastRemainingNonSSDFault DiskIsLastRemainingNonSSD + +func init() { + t["DiskIsLastRemainingNonSSDFault"] = reflect.TypeOf((*DiskIsLastRemainingNonSSDFault)(nil)).Elem() +} + +type DiskIsNonLocal struct { + VsanDiskFault +} + +func init() { + t["DiskIsNonLocal"] = reflect.TypeOf((*DiskIsNonLocal)(nil)).Elem() +} + +type DiskIsNonLocalFault DiskIsNonLocal + +func init() { + t["DiskIsNonLocalFault"] = reflect.TypeOf((*DiskIsNonLocalFault)(nil)).Elem() +} + +type DiskIsUSB struct { + VsanDiskFault +} + +func init() { + t["DiskIsUSB"] = reflect.TypeOf((*DiskIsUSB)(nil)).Elem() +} + +type DiskIsUSBFault DiskIsUSB + +func init() { + t["DiskIsUSBFault"] = reflect.TypeOf((*DiskIsUSBFault)(nil)).Elem() +} + +type DiskMoveTypeNotSupported struct { + MigrationFault +} + +func init() { + t["DiskMoveTypeNotSupported"] = reflect.TypeOf((*DiskMoveTypeNotSupported)(nil)).Elem() +} + +type DiskMoveTypeNotSupportedFault DiskMoveTypeNotSupported + +func init() { + t["DiskMoveTypeNotSupportedFault"] = reflect.TypeOf((*DiskMoveTypeNotSupportedFault)(nil)).Elem() +} + +type DiskNotSupported struct { + VirtualHardwareCompatibilityIssue + + Disk int32 `xml:"disk"` +} + +func init() { + t["DiskNotSupported"] = reflect.TypeOf((*DiskNotSupported)(nil)).Elem() +} + +type DiskNotSupportedFault BaseDiskNotSupported + +func init() { + t["DiskNotSupportedFault"] = reflect.TypeOf((*DiskNotSupportedFault)(nil)).Elem() +} + +type DiskTooSmall struct { + VsanDiskFault +} + +func init() { + t["DiskTooSmall"] = reflect.TypeOf((*DiskTooSmall)(nil)).Elem() +} + +type DiskTooSmallFault DiskTooSmall + +func init() { + t["DiskTooSmallFault"] = reflect.TypeOf((*DiskTooSmallFault)(nil)).Elem() +} + +type DissociateProfile DissociateProfileRequestType + +func init() { + t["DissociateProfile"] = reflect.TypeOf((*DissociateProfile)(nil)).Elem() +} + +type DissociateProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["DissociateProfileRequestType"] = reflect.TypeOf((*DissociateProfileRequestType)(nil)).Elem() +} + +type DissociateProfileResponse struct { +} + +type DistributedVirtualPort struct { + DynamicData + + Key string `xml:"key"` + Config DVPortConfigInfo `xml:"config"` + DvsUuid string `xml:"dvsUuid"` + PortgroupKey string `xml:"portgroupKey,omitempty"` + ProxyHost *ManagedObjectReference `xml:"proxyHost,omitempty"` + Connectee *DistributedVirtualSwitchPortConnectee `xml:"connectee,omitempty"` + Conflict bool `xml:"conflict"` + ConflictPortKey string `xml:"conflictPortKey,omitempty"` + State *DVPortState `xml:"state,omitempty"` + ConnectionCookie int32 `xml:"connectionCookie,omitempty"` + LastStatusChange time.Time `xml:"lastStatusChange"` + HostLocalPort *bool `xml:"hostLocalPort"` +} + +func init() { + t["DistributedVirtualPort"] = reflect.TypeOf((*DistributedVirtualPort)(nil)).Elem() +} + +type DistributedVirtualPortgroupInfo struct { + DynamicData + + SwitchName string `xml:"switchName"` + SwitchUuid string `xml:"switchUuid"` + PortgroupName string `xml:"portgroupName"` + PortgroupKey string `xml:"portgroupKey"` + PortgroupType string `xml:"portgroupType"` + UplinkPortgroup bool `xml:"uplinkPortgroup"` + Portgroup ManagedObjectReference `xml:"portgroup"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` +} + +func init() { + t["DistributedVirtualPortgroupInfo"] = reflect.TypeOf((*DistributedVirtualPortgroupInfo)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMember struct { + DynamicData + + RuntimeState *DistributedVirtualSwitchHostMemberRuntimeState `xml:"runtimeState,omitempty"` + Config DistributedVirtualSwitchHostMemberConfigInfo `xml:"config"` + ProductInfo *DistributedVirtualSwitchProductSpec `xml:"productInfo,omitempty"` + UplinkPortKey []string `xml:"uplinkPortKey,omitempty"` + Status string `xml:"status"` + StatusDetail string `xml:"statusDetail,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostMember"] = reflect.TypeOf((*DistributedVirtualSwitchHostMember)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberBacking struct { + DynamicData +} + +func init() { + t["DistributedVirtualSwitchHostMemberBacking"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberBacking)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberConfigInfo struct { + DynamicData + + Host *ManagedObjectReference `xml:"host,omitempty"` + MaxProxySwitchPorts int32 `xml:"maxProxySwitchPorts"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,typeattr"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberConfigInfo"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberConfigInfo)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberConfigSpec struct { + DynamicData + + Operation string `xml:"operation"` + Host ManagedObjectReference `xml:"host"` + Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,omitempty,typeattr"` + MaxProxySwitchPorts int32 `xml:"maxProxySwitchPorts,omitempty"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberConfigSpec"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberConfigSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberPnicBacking struct { + DistributedVirtualSwitchHostMemberBacking + + PnicSpec []DistributedVirtualSwitchHostMemberPnicSpec `xml:"pnicSpec,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberPnicBacking"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberPnicBacking)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberPnicSpec struct { + DynamicData + + PnicDevice string `xml:"pnicDevice"` + UplinkPortKey string `xml:"uplinkPortKey,omitempty"` + UplinkPortgroupKey string `xml:"uplinkPortgroupKey,omitempty"` + ConnectionCookie int32 `xml:"connectionCookie,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberPnicSpec"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberPnicSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchHostMemberRuntimeState struct { + DynamicData + + CurrentMaxProxySwitchPorts int32 `xml:"currentMaxProxySwitchPorts"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberRuntimeState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberRuntimeState)(nil)).Elem() +} + +type DistributedVirtualSwitchHostProductSpec struct { + DynamicData + + ProductLineId string `xml:"productLineId,omitempty"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchHostProductSpec"] = reflect.TypeOf((*DistributedVirtualSwitchHostProductSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchInfo struct { + DynamicData + + SwitchName string `xml:"switchName"` + SwitchUuid string `xml:"switchUuid"` + DistributedVirtualSwitch ManagedObjectReference `xml:"distributedVirtualSwitch"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` +} + +func init() { + t["DistributedVirtualSwitchInfo"] = reflect.TypeOf((*DistributedVirtualSwitchInfo)(nil)).Elem() +} + +type DistributedVirtualSwitchKeyedOpaqueBlob struct { + DynamicData + + Key string `xml:"key"` + OpaqueData string `xml:"opaqueData"` +} + +func init() { + t["DistributedVirtualSwitchKeyedOpaqueBlob"] = reflect.TypeOf((*DistributedVirtualSwitchKeyedOpaqueBlob)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerCompatibilityResult struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchManagerCompatibilityResult"] = reflect.TypeOf((*DistributedVirtualSwitchManagerCompatibilityResult)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerDvsProductSpec struct { + DynamicData + + NewSwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:"newSwitchProductSpec,omitempty"` + DistributedVirtualSwitch *ManagedObjectReference `xml:"distributedVirtualSwitch,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchManagerDvsProductSpec"] = reflect.TypeOf((*DistributedVirtualSwitchManagerDvsProductSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostArrayFilter struct { + DistributedVirtualSwitchManagerHostDvsFilterSpec + + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostArrayFilter"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostArrayFilter)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostContainer struct { + DynamicData + + Container ManagedObjectReference `xml:"container"` + Recursive bool `xml:"recursive"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostContainer"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostContainer)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostContainerFilter struct { + DistributedVirtualSwitchManagerHostDvsFilterSpec + + HostContainer DistributedVirtualSwitchManagerHostContainer `xml:"hostContainer"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostContainerFilter"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostContainerFilter)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostDvsFilterSpec struct { + DynamicData + + Inclusive bool `xml:"inclusive"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostDvsFilterSpec"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerHostDvsMembershipFilter struct { + DistributedVirtualSwitchManagerHostDvsFilterSpec + + DistributedVirtualSwitch ManagedObjectReference `xml:"distributedVirtualSwitch"` +} + +func init() { + t["DistributedVirtualSwitchManagerHostDvsMembershipFilter"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsMembershipFilter)(nil)).Elem() +} + +type DistributedVirtualSwitchManagerImportResult struct { + DynamicData + + DistributedVirtualSwitch []ManagedObjectReference `xml:"distributedVirtualSwitch,omitempty"` + DistributedVirtualPortgroup []ManagedObjectReference `xml:"distributedVirtualPortgroup,omitempty"` + ImportFault []ImportOperationBulkFaultFaultOnImport `xml:"importFault,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchManagerImportResult"] = reflect.TypeOf((*DistributedVirtualSwitchManagerImportResult)(nil)).Elem() +} + +type DistributedVirtualSwitchPortConnectee struct { + DynamicData + + ConnectedEntity *ManagedObjectReference `xml:"connectedEntity,omitempty"` + NicKey string `xml:"nicKey,omitempty"` + Type string `xml:"type,omitempty"` + AddressHint string `xml:"addressHint,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchPortConnectee"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnectee)(nil)).Elem() +} + +type DistributedVirtualSwitchPortConnection struct { + DynamicData + + SwitchUuid string `xml:"switchUuid"` + PortgroupKey string `xml:"portgroupKey,omitempty"` + PortKey string `xml:"portKey,omitempty"` + ConnectionCookie int32 `xml:"connectionCookie,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchPortConnection"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnection)(nil)).Elem() +} + +type DistributedVirtualSwitchPortCriteria struct { + DynamicData + + Connected *bool `xml:"connected"` + Active *bool `xml:"active"` + UplinkPort *bool `xml:"uplinkPort"` + Scope *ManagedObjectReference `xml:"scope,omitempty"` + PortgroupKey []string `xml:"portgroupKey,omitempty"` + Inside *bool `xml:"inside"` + PortKey []string `xml:"portKey,omitempty"` + Host []ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchPortCriteria"] = reflect.TypeOf((*DistributedVirtualSwitchPortCriteria)(nil)).Elem() +} + +type DistributedVirtualSwitchPortStatistics struct { + DynamicData + + PacketsInMulticast int64 `xml:"packetsInMulticast"` + PacketsOutMulticast int64 `xml:"packetsOutMulticast"` + BytesInMulticast int64 `xml:"bytesInMulticast"` + BytesOutMulticast int64 `xml:"bytesOutMulticast"` + PacketsInUnicast int64 `xml:"packetsInUnicast"` + PacketsOutUnicast int64 `xml:"packetsOutUnicast"` + BytesInUnicast int64 `xml:"bytesInUnicast"` + BytesOutUnicast int64 `xml:"bytesOutUnicast"` + PacketsInBroadcast int64 `xml:"packetsInBroadcast"` + PacketsOutBroadcast int64 `xml:"packetsOutBroadcast"` + BytesInBroadcast int64 `xml:"bytesInBroadcast"` + BytesOutBroadcast int64 `xml:"bytesOutBroadcast"` + PacketsInDropped int64 `xml:"packetsInDropped"` + PacketsOutDropped int64 `xml:"packetsOutDropped"` + PacketsInException int64 `xml:"packetsInException"` + PacketsOutException int64 `xml:"packetsOutException"` + BytesInFromPnic int64 `xml:"bytesInFromPnic,omitempty"` + BytesOutToPnic int64 `xml:"bytesOutToPnic,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchPortStatistics"] = reflect.TypeOf((*DistributedVirtualSwitchPortStatistics)(nil)).Elem() +} + +type DistributedVirtualSwitchProductSpec struct { + DynamicData + + Name string `xml:"name,omitempty"` + Vendor string `xml:"vendor,omitempty"` + Version string `xml:"version,omitempty"` + Build string `xml:"build,omitempty"` + ForwardingClass string `xml:"forwardingClass,omitempty"` + BundleId string `xml:"bundleId,omitempty"` + BundleUrl string `xml:"bundleUrl,omitempty"` +} + +func init() { + t["DistributedVirtualSwitchProductSpec"] = reflect.TypeOf((*DistributedVirtualSwitchProductSpec)(nil)).Elem() +} + +type DoesCustomizationSpecExist DoesCustomizationSpecExistRequestType + +func init() { + t["DoesCustomizationSpecExist"] = reflect.TypeOf((*DoesCustomizationSpecExist)(nil)).Elem() +} + +type DoesCustomizationSpecExistRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["DoesCustomizationSpecExistRequestType"] = reflect.TypeOf((*DoesCustomizationSpecExistRequestType)(nil)).Elem() +} + +type DoesCustomizationSpecExistResponse struct { + Returnval bool `xml:"returnval"` +} + +type DomainNotFound struct { + ActiveDirectoryFault + + DomainName string `xml:"domainName"` +} + +func init() { + t["DomainNotFound"] = reflect.TypeOf((*DomainNotFound)(nil)).Elem() +} + +type DomainNotFoundFault DomainNotFound + +func init() { + t["DomainNotFoundFault"] = reflect.TypeOf((*DomainNotFoundFault)(nil)).Elem() +} + +type DrsDisabledEvent struct { + ClusterEvent +} + +func init() { + t["DrsDisabledEvent"] = reflect.TypeOf((*DrsDisabledEvent)(nil)).Elem() +} + +type DrsDisabledOnVm struct { + VimFault +} + +func init() { + t["DrsDisabledOnVm"] = reflect.TypeOf((*DrsDisabledOnVm)(nil)).Elem() +} + +type DrsDisabledOnVmFault DrsDisabledOnVm + +func init() { + t["DrsDisabledOnVmFault"] = reflect.TypeOf((*DrsDisabledOnVmFault)(nil)).Elem() +} + +type DrsEnabledEvent struct { + ClusterEvent + + Behavior string `xml:"behavior"` +} + +func init() { + t["DrsEnabledEvent"] = reflect.TypeOf((*DrsEnabledEvent)(nil)).Elem() +} + +type DrsEnteredStandbyModeEvent struct { + EnteredStandbyModeEvent +} + +func init() { + t["DrsEnteredStandbyModeEvent"] = reflect.TypeOf((*DrsEnteredStandbyModeEvent)(nil)).Elem() +} + +type DrsEnteringStandbyModeEvent struct { + EnteringStandbyModeEvent +} + +func init() { + t["DrsEnteringStandbyModeEvent"] = reflect.TypeOf((*DrsEnteringStandbyModeEvent)(nil)).Elem() +} + +type DrsExitStandbyModeFailedEvent struct { + ExitStandbyModeFailedEvent +} + +func init() { + t["DrsExitStandbyModeFailedEvent"] = reflect.TypeOf((*DrsExitStandbyModeFailedEvent)(nil)).Elem() +} + +type DrsExitedStandbyModeEvent struct { + ExitedStandbyModeEvent +} + +func init() { + t["DrsExitedStandbyModeEvent"] = reflect.TypeOf((*DrsExitedStandbyModeEvent)(nil)).Elem() +} + +type DrsExitingStandbyModeEvent struct { + ExitingStandbyModeEvent +} + +func init() { + t["DrsExitingStandbyModeEvent"] = reflect.TypeOf((*DrsExitingStandbyModeEvent)(nil)).Elem() +} + +type DrsInvocationFailedEvent struct { + ClusterEvent +} + +func init() { + t["DrsInvocationFailedEvent"] = reflect.TypeOf((*DrsInvocationFailedEvent)(nil)).Elem() +} + +type DrsRecoveredFromFailureEvent struct { + ClusterEvent +} + +func init() { + t["DrsRecoveredFromFailureEvent"] = reflect.TypeOf((*DrsRecoveredFromFailureEvent)(nil)).Elem() +} + +type DrsResourceConfigureFailedEvent struct { + HostEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["DrsResourceConfigureFailedEvent"] = reflect.TypeOf((*DrsResourceConfigureFailedEvent)(nil)).Elem() +} + +type DrsResourceConfigureSyncedEvent struct { + HostEvent +} + +func init() { + t["DrsResourceConfigureSyncedEvent"] = reflect.TypeOf((*DrsResourceConfigureSyncedEvent)(nil)).Elem() +} + +type DrsRuleComplianceEvent struct { + VmEvent +} + +func init() { + t["DrsRuleComplianceEvent"] = reflect.TypeOf((*DrsRuleComplianceEvent)(nil)).Elem() +} + +type DrsRuleViolationEvent struct { + VmEvent +} + +func init() { + t["DrsRuleViolationEvent"] = reflect.TypeOf((*DrsRuleViolationEvent)(nil)).Elem() +} + +type DrsSoftRuleViolationEvent struct { + VmEvent +} + +func init() { + t["DrsSoftRuleViolationEvent"] = reflect.TypeOf((*DrsSoftRuleViolationEvent)(nil)).Elem() +} + +type DrsVmMigratedEvent struct { + VmMigratedEvent +} + +func init() { + t["DrsVmMigratedEvent"] = reflect.TypeOf((*DrsVmMigratedEvent)(nil)).Elem() +} + +type DrsVmPoweredOnEvent struct { + VmPoweredOnEvent +} + +func init() { + t["DrsVmPoweredOnEvent"] = reflect.TypeOf((*DrsVmPoweredOnEvent)(nil)).Elem() +} + +type DrsVmotionIncompatibleFault struct { + VirtualHardwareCompatibilityIssue + + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["DrsVmotionIncompatibleFault"] = reflect.TypeOf((*DrsVmotionIncompatibleFault)(nil)).Elem() +} + +type DrsVmotionIncompatibleFaultFault DrsVmotionIncompatibleFault + +func init() { + t["DrsVmotionIncompatibleFaultFault"] = reflect.TypeOf((*DrsVmotionIncompatibleFaultFault)(nil)).Elem() +} + +type DuplicateCustomizationSpec DuplicateCustomizationSpecRequestType + +func init() { + t["DuplicateCustomizationSpec"] = reflect.TypeOf((*DuplicateCustomizationSpec)(nil)).Elem() +} + +type DuplicateCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + NewName string `xml:"newName"` +} + +func init() { + t["DuplicateCustomizationSpecRequestType"] = reflect.TypeOf((*DuplicateCustomizationSpecRequestType)(nil)).Elem() +} + +type DuplicateCustomizationSpecResponse struct { +} + +type DuplicateDisks struct { + VsanDiskFault +} + +func init() { + t["DuplicateDisks"] = reflect.TypeOf((*DuplicateDisks)(nil)).Elem() +} + +type DuplicateDisksFault DuplicateDisks + +func init() { + t["DuplicateDisksFault"] = reflect.TypeOf((*DuplicateDisksFault)(nil)).Elem() +} + +type DuplicateIpDetectedEvent struct { + HostEvent + + DuplicateIP string `xml:"duplicateIP"` + MacAddress string `xml:"macAddress"` +} + +func init() { + t["DuplicateIpDetectedEvent"] = reflect.TypeOf((*DuplicateIpDetectedEvent)(nil)).Elem() +} + +type DuplicateName struct { + VimFault + + Name string `xml:"name"` + Object ManagedObjectReference `xml:"object"` +} + +func init() { + t["DuplicateName"] = reflect.TypeOf((*DuplicateName)(nil)).Elem() +} + +type DuplicateNameFault DuplicateName + +func init() { + t["DuplicateNameFault"] = reflect.TypeOf((*DuplicateNameFault)(nil)).Elem() +} + +type DuplicateVsanNetworkInterface struct { + VsanFault + + Device string `xml:"device"` +} + +func init() { + t["DuplicateVsanNetworkInterface"] = reflect.TypeOf((*DuplicateVsanNetworkInterface)(nil)).Elem() +} + +type DuplicateVsanNetworkInterfaceFault DuplicateVsanNetworkInterface + +func init() { + t["DuplicateVsanNetworkInterfaceFault"] = reflect.TypeOf((*DuplicateVsanNetworkInterfaceFault)(nil)).Elem() +} + +type DvpgImportEvent struct { + DVPortgroupEvent + + ImportType string `xml:"importType"` +} + +func init() { + t["DvpgImportEvent"] = reflect.TypeOf((*DvpgImportEvent)(nil)).Elem() +} + +type DvpgRestoreEvent struct { + DVPortgroupEvent +} + +func init() { + t["DvpgRestoreEvent"] = reflect.TypeOf((*DvpgRestoreEvent)(nil)).Elem() +} + +type DvsAcceptNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsAcceptNetworkRuleAction"] = reflect.TypeOf((*DvsAcceptNetworkRuleAction)(nil)).Elem() +} + +type DvsApplyOperationFault struct { + DvsFault + + ObjectFault []DvsApplyOperationFaultFaultOnObject `xml:"objectFault"` +} + +func init() { + t["DvsApplyOperationFault"] = reflect.TypeOf((*DvsApplyOperationFault)(nil)).Elem() +} + +type DvsApplyOperationFaultFault DvsApplyOperationFault + +func init() { + t["DvsApplyOperationFaultFault"] = reflect.TypeOf((*DvsApplyOperationFaultFault)(nil)).Elem() +} + +type DvsApplyOperationFaultFaultOnObject struct { + DynamicData + + ObjectId string `xml:"objectId"` + Type string `xml:"type"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["DvsApplyOperationFaultFaultOnObject"] = reflect.TypeOf((*DvsApplyOperationFaultFaultOnObject)(nil)).Elem() +} + +type DvsCopyNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsCopyNetworkRuleAction"] = reflect.TypeOf((*DvsCopyNetworkRuleAction)(nil)).Elem() +} + +type DvsCreatedEvent struct { + DvsEvent + + Parent FolderEventArgument `xml:"parent"` +} + +func init() { + t["DvsCreatedEvent"] = reflect.TypeOf((*DvsCreatedEvent)(nil)).Elem() +} + +type DvsDestroyedEvent struct { + DvsEvent +} + +func init() { + t["DvsDestroyedEvent"] = reflect.TypeOf((*DvsDestroyedEvent)(nil)).Elem() +} + +type DvsDropNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsDropNetworkRuleAction"] = reflect.TypeOf((*DvsDropNetworkRuleAction)(nil)).Elem() +} + +type DvsEvent struct { + Event +} + +func init() { + t["DvsEvent"] = reflect.TypeOf((*DvsEvent)(nil)).Elem() +} + +type DvsEventArgument struct { + EntityEventArgument + + Dvs ManagedObjectReference `xml:"dvs"` +} + +func init() { + t["DvsEventArgument"] = reflect.TypeOf((*DvsEventArgument)(nil)).Elem() +} + +type DvsFault struct { + VimFault +} + +func init() { + t["DvsFault"] = reflect.TypeOf((*DvsFault)(nil)).Elem() +} + +type DvsFaultFault BaseDvsFault + +func init() { + t["DvsFaultFault"] = reflect.TypeOf((*DvsFaultFault)(nil)).Elem() +} + +type DvsFilterConfig struct { + InheritablePolicy + + Key string `xml:"key,omitempty"` + AgentName string `xml:"agentName,omitempty"` + SlotNumber string `xml:"slotNumber,omitempty"` + Parameters *DvsFilterParameter `xml:"parameters,omitempty"` + OnFailure string `xml:"onFailure,omitempty"` +} + +func init() { + t["DvsFilterConfig"] = reflect.TypeOf((*DvsFilterConfig)(nil)).Elem() +} + +type DvsFilterConfigSpec struct { + DvsFilterConfig + + Operation string `xml:"operation"` +} + +func init() { + t["DvsFilterConfigSpec"] = reflect.TypeOf((*DvsFilterConfigSpec)(nil)).Elem() +} + +type DvsFilterParameter struct { + DynamicData + + Parameters []string `xml:"parameters,omitempty"` +} + +func init() { + t["DvsFilterParameter"] = reflect.TypeOf((*DvsFilterParameter)(nil)).Elem() +} + +type DvsFilterPolicy struct { + InheritablePolicy + + FilterConfig []BaseDvsFilterConfig `xml:"filterConfig,omitempty,typeattr"` +} + +func init() { + t["DvsFilterPolicy"] = reflect.TypeOf((*DvsFilterPolicy)(nil)).Elem() +} + +type DvsGreEncapNetworkRuleAction struct { + DvsNetworkRuleAction + + EncapsulationIp SingleIp `xml:"encapsulationIp"` +} + +func init() { + t["DvsGreEncapNetworkRuleAction"] = reflect.TypeOf((*DvsGreEncapNetworkRuleAction)(nil)).Elem() +} + +type DvsHealthStatusChangeEvent struct { + HostEvent + + SwitchUuid string `xml:"switchUuid"` + HealthResult BaseHostMemberHealthCheckResult `xml:"healthResult,omitempty,typeattr"` +} + +func init() { + t["DvsHealthStatusChangeEvent"] = reflect.TypeOf((*DvsHealthStatusChangeEvent)(nil)).Elem() +} + +type DvsHostBackInSyncEvent struct { + DvsEvent + + HostBackInSync HostEventArgument `xml:"hostBackInSync"` +} + +func init() { + t["DvsHostBackInSyncEvent"] = reflect.TypeOf((*DvsHostBackInSyncEvent)(nil)).Elem() +} + +type DvsHostInfrastructureTrafficResource struct { + DynamicData + + Key string `xml:"key"` + Description string `xml:"description,omitempty"` + AllocationInfo DvsHostInfrastructureTrafficResourceAllocation `xml:"allocationInfo"` +} + +func init() { + t["DvsHostInfrastructureTrafficResource"] = reflect.TypeOf((*DvsHostInfrastructureTrafficResource)(nil)).Elem() +} + +type DvsHostInfrastructureTrafficResourceAllocation struct { + DynamicData + + Limit *int64 `xml:"limit"` + Shares *SharesInfo `xml:"shares,omitempty"` + Reservation *int64 `xml:"reservation"` +} + +func init() { + t["DvsHostInfrastructureTrafficResourceAllocation"] = reflect.TypeOf((*DvsHostInfrastructureTrafficResourceAllocation)(nil)).Elem() +} + +type DvsHostJoinedEvent struct { + DvsEvent + + HostJoined HostEventArgument `xml:"hostJoined"` +} + +func init() { + t["DvsHostJoinedEvent"] = reflect.TypeOf((*DvsHostJoinedEvent)(nil)).Elem() +} + +type DvsHostLeftEvent struct { + DvsEvent + + HostLeft HostEventArgument `xml:"hostLeft"` +} + +func init() { + t["DvsHostLeftEvent"] = reflect.TypeOf((*DvsHostLeftEvent)(nil)).Elem() +} + +type DvsHostStatusUpdated struct { + DvsEvent + + HostMember HostEventArgument `xml:"hostMember"` + OldStatus string `xml:"oldStatus,omitempty"` + NewStatus string `xml:"newStatus,omitempty"` + OldStatusDetail string `xml:"oldStatusDetail,omitempty"` + NewStatusDetail string `xml:"newStatusDetail,omitempty"` +} + +func init() { + t["DvsHostStatusUpdated"] = reflect.TypeOf((*DvsHostStatusUpdated)(nil)).Elem() +} + +type DvsHostVNicProfile struct { + DvsVNicProfile +} + +func init() { + t["DvsHostVNicProfile"] = reflect.TypeOf((*DvsHostVNicProfile)(nil)).Elem() +} + +type DvsHostWentOutOfSyncEvent struct { + DvsEvent + + HostOutOfSync DvsOutOfSyncHostArgument `xml:"hostOutOfSync"` +} + +func init() { + t["DvsHostWentOutOfSyncEvent"] = reflect.TypeOf((*DvsHostWentOutOfSyncEvent)(nil)).Elem() +} + +type DvsImportEvent struct { + DvsEvent + + ImportType string `xml:"importType"` +} + +func init() { + t["DvsImportEvent"] = reflect.TypeOf((*DvsImportEvent)(nil)).Elem() +} + +type DvsIpNetworkRuleQualifier struct { + DvsNetworkRuleQualifier + + SourceAddress BaseIpAddress `xml:"sourceAddress,omitempty,typeattr"` + DestinationAddress BaseIpAddress `xml:"destinationAddress,omitempty,typeattr"` + Protocol *IntExpression `xml:"protocol,omitempty"` + SourceIpPort BaseDvsIpPort `xml:"sourceIpPort,omitempty,typeattr"` + DestinationIpPort BaseDvsIpPort `xml:"destinationIpPort,omitempty,typeattr"` + TcpFlags *IntExpression `xml:"tcpFlags,omitempty"` +} + +func init() { + t["DvsIpNetworkRuleQualifier"] = reflect.TypeOf((*DvsIpNetworkRuleQualifier)(nil)).Elem() +} + +type DvsIpPort struct { + NegatableExpression +} + +func init() { + t["DvsIpPort"] = reflect.TypeOf((*DvsIpPort)(nil)).Elem() +} + +type DvsIpPortRange struct { + DvsIpPort + + StartPortNumber int32 `xml:"startPortNumber"` + EndPortNumber int32 `xml:"endPortNumber"` +} + +func init() { + t["DvsIpPortRange"] = reflect.TypeOf((*DvsIpPortRange)(nil)).Elem() +} + +type DvsLogNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsLogNetworkRuleAction"] = reflect.TypeOf((*DvsLogNetworkRuleAction)(nil)).Elem() +} + +type DvsMacNetworkRuleQualifier struct { + DvsNetworkRuleQualifier + + SourceAddress BaseMacAddress `xml:"sourceAddress,omitempty,typeattr"` + DestinationAddress BaseMacAddress `xml:"destinationAddress,omitempty,typeattr"` + Protocol *IntExpression `xml:"protocol,omitempty"` + VlanId *IntExpression `xml:"vlanId,omitempty"` +} + +func init() { + t["DvsMacNetworkRuleQualifier"] = reflect.TypeOf((*DvsMacNetworkRuleQualifier)(nil)).Elem() +} + +type DvsMacRewriteNetworkRuleAction struct { + DvsNetworkRuleAction + + RewriteMac string `xml:"rewriteMac"` +} + +func init() { + t["DvsMacRewriteNetworkRuleAction"] = reflect.TypeOf((*DvsMacRewriteNetworkRuleAction)(nil)).Elem() +} + +type DvsMergedEvent struct { + DvsEvent + + SourceDvs DvsEventArgument `xml:"sourceDvs"` + DestinationDvs DvsEventArgument `xml:"destinationDvs"` +} + +func init() { + t["DvsMergedEvent"] = reflect.TypeOf((*DvsMergedEvent)(nil)).Elem() +} + +type DvsNetworkRuleAction struct { + DynamicData +} + +func init() { + t["DvsNetworkRuleAction"] = reflect.TypeOf((*DvsNetworkRuleAction)(nil)).Elem() +} + +type DvsNetworkRuleQualifier struct { + DynamicData + + Key string `xml:"key,omitempty"` +} + +func init() { + t["DvsNetworkRuleQualifier"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem() +} + +type DvsNotAuthorized struct { + DvsFault + + SessionExtensionKey string `xml:"sessionExtensionKey,omitempty"` + DvsExtensionKey string `xml:"dvsExtensionKey,omitempty"` +} + +func init() { + t["DvsNotAuthorized"] = reflect.TypeOf((*DvsNotAuthorized)(nil)).Elem() +} + +type DvsNotAuthorizedFault DvsNotAuthorized + +func init() { + t["DvsNotAuthorizedFault"] = reflect.TypeOf((*DvsNotAuthorizedFault)(nil)).Elem() +} + +type DvsOperationBulkFault struct { + DvsFault + + HostFault []DvsOperationBulkFaultFaultOnHost `xml:"hostFault"` +} + +func init() { + t["DvsOperationBulkFault"] = reflect.TypeOf((*DvsOperationBulkFault)(nil)).Elem() +} + +type DvsOperationBulkFaultFault DvsOperationBulkFault + +func init() { + t["DvsOperationBulkFaultFault"] = reflect.TypeOf((*DvsOperationBulkFaultFault)(nil)).Elem() +} + +type DvsOperationBulkFaultFaultOnHost struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["DvsOperationBulkFaultFaultOnHost"] = reflect.TypeOf((*DvsOperationBulkFaultFaultOnHost)(nil)).Elem() +} + +type DvsOutOfSyncHostArgument struct { + DynamicData + + OutOfSyncHost HostEventArgument `xml:"outOfSyncHost"` + ConfigParamters []string `xml:"configParamters"` +} + +func init() { + t["DvsOutOfSyncHostArgument"] = reflect.TypeOf((*DvsOutOfSyncHostArgument)(nil)).Elem() +} + +type DvsPortBlockedEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + StatusDetail string `xml:"statusDetail,omitempty"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` + PrevBlockState string `xml:"prevBlockState,omitempty"` +} + +func init() { + t["DvsPortBlockedEvent"] = reflect.TypeOf((*DvsPortBlockedEvent)(nil)).Elem() +} + +type DvsPortConnectedEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + Connectee *DistributedVirtualSwitchPortConnectee `xml:"connectee,omitempty"` +} + +func init() { + t["DvsPortConnectedEvent"] = reflect.TypeOf((*DvsPortConnectedEvent)(nil)).Elem() +} + +type DvsPortCreatedEvent struct { + DvsEvent + + PortKey []string `xml:"portKey"` +} + +func init() { + t["DvsPortCreatedEvent"] = reflect.TypeOf((*DvsPortCreatedEvent)(nil)).Elem() +} + +type DvsPortDeletedEvent struct { + DvsEvent + + PortKey []string `xml:"portKey"` +} + +func init() { + t["DvsPortDeletedEvent"] = reflect.TypeOf((*DvsPortDeletedEvent)(nil)).Elem() +} + +type DvsPortDisconnectedEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + Connectee *DistributedVirtualSwitchPortConnectee `xml:"connectee,omitempty"` +} + +func init() { + t["DvsPortDisconnectedEvent"] = reflect.TypeOf((*DvsPortDisconnectedEvent)(nil)).Elem() +} + +type DvsPortEnteredPassthruEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortEnteredPassthruEvent"] = reflect.TypeOf((*DvsPortEnteredPassthruEvent)(nil)).Elem() +} + +type DvsPortExitedPassthruEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortExitedPassthruEvent"] = reflect.TypeOf((*DvsPortExitedPassthruEvent)(nil)).Elem() +} + +type DvsPortJoinPortgroupEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + PortgroupKey string `xml:"portgroupKey"` + PortgroupName string `xml:"portgroupName"` +} + +func init() { + t["DvsPortJoinPortgroupEvent"] = reflect.TypeOf((*DvsPortJoinPortgroupEvent)(nil)).Elem() +} + +type DvsPortLeavePortgroupEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + PortgroupKey string `xml:"portgroupKey"` + PortgroupName string `xml:"portgroupName"` +} + +func init() { + t["DvsPortLeavePortgroupEvent"] = reflect.TypeOf((*DvsPortLeavePortgroupEvent)(nil)).Elem() +} + +type DvsPortLinkDownEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortLinkDownEvent"] = reflect.TypeOf((*DvsPortLinkDownEvent)(nil)).Elem() +} + +type DvsPortLinkUpEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` +} + +func init() { + t["DvsPortLinkUpEvent"] = reflect.TypeOf((*DvsPortLinkUpEvent)(nil)).Elem() +} + +type DvsPortReconfiguredEvent struct { + DvsEvent + + PortKey []string `xml:"portKey"` + ConfigChanges []ChangesInfoEventArgument `xml:"configChanges,omitempty"` +} + +func init() { + t["DvsPortReconfiguredEvent"] = reflect.TypeOf((*DvsPortReconfiguredEvent)(nil)).Elem() +} + +type DvsPortRuntimeChangeEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo DVPortStatus `xml:"runtimeInfo"` +} + +func init() { + t["DvsPortRuntimeChangeEvent"] = reflect.TypeOf((*DvsPortRuntimeChangeEvent)(nil)).Elem() +} + +type DvsPortUnblockedEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty"` + PrevBlockState string `xml:"prevBlockState,omitempty"` +} + +func init() { + t["DvsPortUnblockedEvent"] = reflect.TypeOf((*DvsPortUnblockedEvent)(nil)).Elem() +} + +type DvsPortVendorSpecificStateChangeEvent struct { + DvsEvent + + PortKey string `xml:"portKey"` +} + +func init() { + t["DvsPortVendorSpecificStateChangeEvent"] = reflect.TypeOf((*DvsPortVendorSpecificStateChangeEvent)(nil)).Elem() +} + +type DvsProfile struct { + ApplyProfile + + Key string `xml:"key"` + Name string `xml:"name"` + Uplink []PnicUplinkProfile `xml:"uplink,omitempty"` +} + +func init() { + t["DvsProfile"] = reflect.TypeOf((*DvsProfile)(nil)).Elem() +} + +type DvsPuntNetworkRuleAction struct { + DvsNetworkRuleAction +} + +func init() { + t["DvsPuntNetworkRuleAction"] = reflect.TypeOf((*DvsPuntNetworkRuleAction)(nil)).Elem() +} + +type DvsRateLimitNetworkRuleAction struct { + DvsNetworkRuleAction + + PacketsPerSecond int32 `xml:"packetsPerSecond"` +} + +func init() { + t["DvsRateLimitNetworkRuleAction"] = reflect.TypeOf((*DvsRateLimitNetworkRuleAction)(nil)).Elem() +} + +type DvsReconfigureVmVnicNetworkResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec []DvsVmVnicResourcePoolConfigSpec `xml:"configSpec"` +} + +func init() { + t["DvsReconfigureVmVnicNetworkResourcePoolRequestType"] = reflect.TypeOf((*DvsReconfigureVmVnicNetworkResourcePoolRequestType)(nil)).Elem() +} + +type DvsReconfigureVmVnicNetworkResourcePool_Task DvsReconfigureVmVnicNetworkResourcePoolRequestType + +func init() { + t["DvsReconfigureVmVnicNetworkResourcePool_Task"] = reflect.TypeOf((*DvsReconfigureVmVnicNetworkResourcePool_Task)(nil)).Elem() +} + +type DvsReconfigureVmVnicNetworkResourcePool_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DvsReconfiguredEvent struct { + DvsEvent + + ConfigSpec BaseDVSConfigSpec `xml:"configSpec,typeattr"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty"` +} + +func init() { + t["DvsReconfiguredEvent"] = reflect.TypeOf((*DvsReconfiguredEvent)(nil)).Elem() +} + +type DvsRenamedEvent struct { + DvsEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["DvsRenamedEvent"] = reflect.TypeOf((*DvsRenamedEvent)(nil)).Elem() +} + +type DvsResourceRuntimeInfo struct { + DynamicData + + Capacity int32 `xml:"capacity,omitempty"` + Usage int32 `xml:"usage,omitempty"` + Available int32 `xml:"available,omitempty"` + AllocatedResource []DvsVnicAllocatedResource `xml:"allocatedResource,omitempty"` + VmVnicNetworkResourcePoolRuntime []DvsVmVnicNetworkResourcePoolRuntimeInfo `xml:"vmVnicNetworkResourcePoolRuntime,omitempty"` +} + +func init() { + t["DvsResourceRuntimeInfo"] = reflect.TypeOf((*DvsResourceRuntimeInfo)(nil)).Elem() +} + +type DvsRestoreEvent struct { + DvsEvent +} + +func init() { + t["DvsRestoreEvent"] = reflect.TypeOf((*DvsRestoreEvent)(nil)).Elem() +} + +type DvsScopeViolated struct { + DvsFault + + Scope []ManagedObjectReference `xml:"scope"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["DvsScopeViolated"] = reflect.TypeOf((*DvsScopeViolated)(nil)).Elem() +} + +type DvsScopeViolatedFault DvsScopeViolated + +func init() { + t["DvsScopeViolatedFault"] = reflect.TypeOf((*DvsScopeViolatedFault)(nil)).Elem() +} + +type DvsServiceConsoleVNicProfile struct { + DvsVNicProfile +} + +func init() { + t["DvsServiceConsoleVNicProfile"] = reflect.TypeOf((*DvsServiceConsoleVNicProfile)(nil)).Elem() +} + +type DvsSingleIpPort struct { + DvsIpPort + + PortNumber int32 `xml:"portNumber"` +} + +func init() { + t["DvsSingleIpPort"] = reflect.TypeOf((*DvsSingleIpPort)(nil)).Elem() +} + +type DvsSystemTrafficNetworkRuleQualifier struct { + DvsNetworkRuleQualifier + + TypeOfSystemTraffic *StringExpression `xml:"typeOfSystemTraffic,omitempty"` +} + +func init() { + t["DvsSystemTrafficNetworkRuleQualifier"] = reflect.TypeOf((*DvsSystemTrafficNetworkRuleQualifier)(nil)).Elem() +} + +type DvsTrafficFilterConfig struct { + DvsFilterConfig + + TrafficRuleset *DvsTrafficRuleset `xml:"trafficRuleset,omitempty"` +} + +func init() { + t["DvsTrafficFilterConfig"] = reflect.TypeOf((*DvsTrafficFilterConfig)(nil)).Elem() +} + +type DvsTrafficFilterConfigSpec struct { + DvsTrafficFilterConfig + + Operation string `xml:"operation"` +} + +func init() { + t["DvsTrafficFilterConfigSpec"] = reflect.TypeOf((*DvsTrafficFilterConfigSpec)(nil)).Elem() +} + +type DvsTrafficRule struct { + DynamicData + + Key string `xml:"key,omitempty"` + Description string `xml:"description,omitempty"` + Sequence int32 `xml:"sequence,omitempty"` + Qualifier []BaseDvsNetworkRuleQualifier `xml:"qualifier,omitempty,typeattr"` + Action BaseDvsNetworkRuleAction `xml:"action,omitempty,typeattr"` + Direction string `xml:"direction,omitempty"` +} + +func init() { + t["DvsTrafficRule"] = reflect.TypeOf((*DvsTrafficRule)(nil)).Elem() +} + +type DvsTrafficRuleset struct { + DynamicData + + Key string `xml:"key,omitempty"` + Enabled *bool `xml:"enabled"` + Precedence int32 `xml:"precedence,omitempty"` + Rules []DvsTrafficRule `xml:"rules,omitempty"` +} + +func init() { + t["DvsTrafficRuleset"] = reflect.TypeOf((*DvsTrafficRuleset)(nil)).Elem() +} + +type DvsUpdateTagNetworkRuleAction struct { + DvsNetworkRuleAction + + QosTag int32 `xml:"qosTag,omitempty"` + DscpTag int32 `xml:"dscpTag,omitempty"` +} + +func init() { + t["DvsUpdateTagNetworkRuleAction"] = reflect.TypeOf((*DvsUpdateTagNetworkRuleAction)(nil)).Elem() +} + +type DvsUpgradeAvailableEvent struct { + DvsEvent + + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` +} + +func init() { + t["DvsUpgradeAvailableEvent"] = reflect.TypeOf((*DvsUpgradeAvailableEvent)(nil)).Elem() +} + +type DvsUpgradeInProgressEvent struct { + DvsEvent + + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` +} + +func init() { + t["DvsUpgradeInProgressEvent"] = reflect.TypeOf((*DvsUpgradeInProgressEvent)(nil)).Elem() +} + +type DvsUpgradeRejectedEvent struct { + DvsEvent + + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` +} + +func init() { + t["DvsUpgradeRejectedEvent"] = reflect.TypeOf((*DvsUpgradeRejectedEvent)(nil)).Elem() +} + +type DvsUpgradedEvent struct { + DvsEvent + + ProductInfo DistributedVirtualSwitchProductSpec `xml:"productInfo"` +} + +func init() { + t["DvsUpgradedEvent"] = reflect.TypeOf((*DvsUpgradedEvent)(nil)).Elem() +} + +type DvsVNicProfile struct { + ApplyProfile + + Key string `xml:"key"` + IpConfig IpAddressProfile `xml:"ipConfig"` +} + +func init() { + t["DvsVNicProfile"] = reflect.TypeOf((*DvsVNicProfile)(nil)).Elem() +} + +type DvsVmVnicNetworkResourcePoolRuntimeInfo struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name,omitempty"` + Capacity int32 `xml:"capacity,omitempty"` + Usage int32 `xml:"usage,omitempty"` + Available int32 `xml:"available,omitempty"` + Status string `xml:"status"` + AllocatedResource []DvsVnicAllocatedResource `xml:"allocatedResource,omitempty"` +} + +func init() { + t["DvsVmVnicNetworkResourcePoolRuntimeInfo"] = reflect.TypeOf((*DvsVmVnicNetworkResourcePoolRuntimeInfo)(nil)).Elem() +} + +type DvsVmVnicResourceAllocation struct { + DynamicData + + ReservationQuota int64 `xml:"reservationQuota,omitempty"` +} + +func init() { + t["DvsVmVnicResourceAllocation"] = reflect.TypeOf((*DvsVmVnicResourceAllocation)(nil)).Elem() +} + +type DvsVmVnicResourcePoolConfigSpec struct { + DynamicData + + Operation string `xml:"operation"` + Key string `xml:"key,omitempty"` + ConfigVersion string `xml:"configVersion,omitempty"` + AllocationInfo *DvsVmVnicResourceAllocation `xml:"allocationInfo,omitempty"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["DvsVmVnicResourcePoolConfigSpec"] = reflect.TypeOf((*DvsVmVnicResourcePoolConfigSpec)(nil)).Elem() +} + +type DvsVnicAllocatedResource struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + VnicKey string `xml:"vnicKey"` + Reservation *int64 `xml:"reservation"` +} + +func init() { + t["DvsVnicAllocatedResource"] = reflect.TypeOf((*DvsVnicAllocatedResource)(nil)).Elem() +} + +type DynamicArray struct { + Val []AnyType `xml:"val,typeattr"` +} + +func init() { + t["DynamicArray"] = reflect.TypeOf((*DynamicArray)(nil)).Elem() +} + +type DynamicData struct { +} + +func init() { + t["DynamicData"] = reflect.TypeOf((*DynamicData)(nil)).Elem() +} + +type DynamicProperty struct { + Name string `xml:"name"` + Val AnyType `xml:"val,typeattr"` +} + +func init() { + t["DynamicProperty"] = reflect.TypeOf((*DynamicProperty)(nil)).Elem() +} + +type EVCAdmissionFailed struct { + NotSupportedHostInCluster + + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["EVCAdmissionFailed"] = reflect.TypeOf((*EVCAdmissionFailed)(nil)).Elem() +} + +type EVCAdmissionFailedCPUFeaturesForMode struct { + EVCAdmissionFailed + + CurrentEVCModeKey string `xml:"currentEVCModeKey"` +} + +func init() { + t["EVCAdmissionFailedCPUFeaturesForMode"] = reflect.TypeOf((*EVCAdmissionFailedCPUFeaturesForMode)(nil)).Elem() +} + +type EVCAdmissionFailedCPUFeaturesForModeFault EVCAdmissionFailedCPUFeaturesForMode + +func init() { + t["EVCAdmissionFailedCPUFeaturesForModeFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUFeaturesForModeFault)(nil)).Elem() +} + +type EVCAdmissionFailedCPUModel struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedCPUModel"] = reflect.TypeOf((*EVCAdmissionFailedCPUModel)(nil)).Elem() +} + +type EVCAdmissionFailedCPUModelFault EVCAdmissionFailedCPUModel + +func init() { + t["EVCAdmissionFailedCPUModelFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelFault)(nil)).Elem() +} + +type EVCAdmissionFailedCPUModelForMode struct { + EVCAdmissionFailed + + CurrentEVCModeKey string `xml:"currentEVCModeKey"` +} + +func init() { + t["EVCAdmissionFailedCPUModelForMode"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelForMode)(nil)).Elem() +} + +type EVCAdmissionFailedCPUModelForModeFault EVCAdmissionFailedCPUModelForMode + +func init() { + t["EVCAdmissionFailedCPUModelForModeFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelForModeFault)(nil)).Elem() +} + +type EVCAdmissionFailedCPUVendor struct { + EVCAdmissionFailed + + ClusterCPUVendor string `xml:"clusterCPUVendor"` + HostCPUVendor string `xml:"hostCPUVendor"` +} + +func init() { + t["EVCAdmissionFailedCPUVendor"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendor)(nil)).Elem() +} + +type EVCAdmissionFailedCPUVendorFault EVCAdmissionFailedCPUVendor + +func init() { + t["EVCAdmissionFailedCPUVendorFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorFault)(nil)).Elem() +} + +type EVCAdmissionFailedCPUVendorUnknown struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedCPUVendorUnknown"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorUnknown)(nil)).Elem() +} + +type EVCAdmissionFailedCPUVendorUnknownFault EVCAdmissionFailedCPUVendorUnknown + +func init() { + t["EVCAdmissionFailedCPUVendorUnknownFault"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorUnknownFault)(nil)).Elem() +} + +type EVCAdmissionFailedFault BaseEVCAdmissionFailed + +func init() { + t["EVCAdmissionFailedFault"] = reflect.TypeOf((*EVCAdmissionFailedFault)(nil)).Elem() +} + +type EVCAdmissionFailedHostDisconnected struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedHostDisconnected"] = reflect.TypeOf((*EVCAdmissionFailedHostDisconnected)(nil)).Elem() +} + +type EVCAdmissionFailedHostDisconnectedFault EVCAdmissionFailedHostDisconnected + +func init() { + t["EVCAdmissionFailedHostDisconnectedFault"] = reflect.TypeOf((*EVCAdmissionFailedHostDisconnectedFault)(nil)).Elem() +} + +type EVCAdmissionFailedHostSoftware struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedHostSoftware"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftware)(nil)).Elem() +} + +type EVCAdmissionFailedHostSoftwareFault EVCAdmissionFailedHostSoftware + +func init() { + t["EVCAdmissionFailedHostSoftwareFault"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareFault)(nil)).Elem() +} + +type EVCAdmissionFailedHostSoftwareForMode struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedHostSoftwareForMode"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareForMode)(nil)).Elem() +} + +type EVCAdmissionFailedHostSoftwareForModeFault EVCAdmissionFailedHostSoftwareForMode + +func init() { + t["EVCAdmissionFailedHostSoftwareForModeFault"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareForModeFault)(nil)).Elem() +} + +type EVCAdmissionFailedVmActive struct { + EVCAdmissionFailed +} + +func init() { + t["EVCAdmissionFailedVmActive"] = reflect.TypeOf((*EVCAdmissionFailedVmActive)(nil)).Elem() +} + +type EVCAdmissionFailedVmActiveFault EVCAdmissionFailedVmActive + +func init() { + t["EVCAdmissionFailedVmActiveFault"] = reflect.TypeOf((*EVCAdmissionFailedVmActiveFault)(nil)).Elem() +} + +type EVCConfigFault struct { + VimFault + + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["EVCConfigFault"] = reflect.TypeOf((*EVCConfigFault)(nil)).Elem() +} + +type EVCConfigFaultFault BaseEVCConfigFault + +func init() { + t["EVCConfigFaultFault"] = reflect.TypeOf((*EVCConfigFaultFault)(nil)).Elem() +} + +type EVCMode struct { + ElementDescription + + GuaranteedCPUFeatures []HostCpuIdInfo `xml:"guaranteedCPUFeatures,omitempty"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty"` + FeatureMask []HostFeatureMask `xml:"featureMask,omitempty"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` + Vendor string `xml:"vendor"` + Track []string `xml:"track,omitempty"` + VendorTier int32 `xml:"vendorTier"` +} + +func init() { + t["EVCMode"] = reflect.TypeOf((*EVCMode)(nil)).Elem() +} + +type EVCModeIllegalByVendor struct { + EVCConfigFault + + ClusterCPUVendor string `xml:"clusterCPUVendor"` + ModeCPUVendor string `xml:"modeCPUVendor"` +} + +func init() { + t["EVCModeIllegalByVendor"] = reflect.TypeOf((*EVCModeIllegalByVendor)(nil)).Elem() +} + +type EVCModeIllegalByVendorFault EVCModeIllegalByVendor + +func init() { + t["EVCModeIllegalByVendorFault"] = reflect.TypeOf((*EVCModeIllegalByVendorFault)(nil)).Elem() +} + +type EVCModeUnsupportedByHosts struct { + EVCConfigFault + + EvcMode string `xml:"evcMode,omitempty"` + Host []ManagedObjectReference `xml:"host,omitempty"` + HostName []string `xml:"hostName,omitempty"` +} + +func init() { + t["EVCModeUnsupportedByHosts"] = reflect.TypeOf((*EVCModeUnsupportedByHosts)(nil)).Elem() +} + +type EVCModeUnsupportedByHostsFault EVCModeUnsupportedByHosts + +func init() { + t["EVCModeUnsupportedByHostsFault"] = reflect.TypeOf((*EVCModeUnsupportedByHostsFault)(nil)).Elem() +} + +type EVCUnsupportedByHostHardware struct { + EVCConfigFault + + Host []ManagedObjectReference `xml:"host"` + HostName []string `xml:"hostName"` +} + +func init() { + t["EVCUnsupportedByHostHardware"] = reflect.TypeOf((*EVCUnsupportedByHostHardware)(nil)).Elem() +} + +type EVCUnsupportedByHostHardwareFault EVCUnsupportedByHostHardware + +func init() { + t["EVCUnsupportedByHostHardwareFault"] = reflect.TypeOf((*EVCUnsupportedByHostHardwareFault)(nil)).Elem() +} + +type EVCUnsupportedByHostSoftware struct { + EVCConfigFault + + Host []ManagedObjectReference `xml:"host"` + HostName []string `xml:"hostName"` +} + +func init() { + t["EVCUnsupportedByHostSoftware"] = reflect.TypeOf((*EVCUnsupportedByHostSoftware)(nil)).Elem() +} + +type EVCUnsupportedByHostSoftwareFault EVCUnsupportedByHostSoftware + +func init() { + t["EVCUnsupportedByHostSoftwareFault"] = reflect.TypeOf((*EVCUnsupportedByHostSoftwareFault)(nil)).Elem() +} + +type EagerZeroVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["EagerZeroVirtualDiskRequestType"] = reflect.TypeOf((*EagerZeroVirtualDiskRequestType)(nil)).Elem() +} + +type EagerZeroVirtualDisk_Task EagerZeroVirtualDiskRequestType + +func init() { + t["EagerZeroVirtualDisk_Task"] = reflect.TypeOf((*EagerZeroVirtualDisk_Task)(nil)).Elem() +} + +type EagerZeroVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EightHostLimitViolated struct { + VmConfigFault +} + +func init() { + t["EightHostLimitViolated"] = reflect.TypeOf((*EightHostLimitViolated)(nil)).Elem() +} + +type EightHostLimitViolatedFault EightHostLimitViolated + +func init() { + t["EightHostLimitViolatedFault"] = reflect.TypeOf((*EightHostLimitViolatedFault)(nil)).Elem() +} + +type ElementDescription struct { + Description + + Key string `xml:"key"` +} + +func init() { + t["ElementDescription"] = reflect.TypeOf((*ElementDescription)(nil)).Elem() +} + +type EnableAlarmActions EnableAlarmActionsRequestType + +func init() { + t["EnableAlarmActions"] = reflect.TypeOf((*EnableAlarmActions)(nil)).Elem() +} + +type EnableAlarmActionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["EnableAlarmActionsRequestType"] = reflect.TypeOf((*EnableAlarmActionsRequestType)(nil)).Elem() +} + +type EnableAlarmActionsResponse struct { +} + +type EnableCrypto EnableCryptoRequestType + +func init() { + t["EnableCrypto"] = reflect.TypeOf((*EnableCrypto)(nil)).Elem() +} + +type EnableCryptoRequestType struct { + This ManagedObjectReference `xml:"_this"` + KeyPlain CryptoKeyPlain `xml:"keyPlain"` +} + +func init() { + t["EnableCryptoRequestType"] = reflect.TypeOf((*EnableCryptoRequestType)(nil)).Elem() +} + +type EnableCryptoResponse struct { +} + +type EnableFeature EnableFeatureRequestType + +func init() { + t["EnableFeature"] = reflect.TypeOf((*EnableFeature)(nil)).Elem() +} + +type EnableFeatureRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + FeatureKey string `xml:"featureKey"` +} + +func init() { + t["EnableFeatureRequestType"] = reflect.TypeOf((*EnableFeatureRequestType)(nil)).Elem() +} + +type EnableFeatureResponse struct { + Returnval bool `xml:"returnval"` +} + +type EnableHyperThreading EnableHyperThreadingRequestType + +func init() { + t["EnableHyperThreading"] = reflect.TypeOf((*EnableHyperThreading)(nil)).Elem() +} + +type EnableHyperThreadingRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EnableHyperThreadingRequestType"] = reflect.TypeOf((*EnableHyperThreadingRequestType)(nil)).Elem() +} + +type EnableHyperThreadingResponse struct { +} + +type EnableMultipathPath EnableMultipathPathRequestType + +func init() { + t["EnableMultipathPath"] = reflect.TypeOf((*EnableMultipathPath)(nil)).Elem() +} + +type EnableMultipathPathRequestType struct { + This ManagedObjectReference `xml:"_this"` + PathName string `xml:"pathName"` +} + +func init() { + t["EnableMultipathPathRequestType"] = reflect.TypeOf((*EnableMultipathPathRequestType)(nil)).Elem() +} + +type EnableMultipathPathResponse struct { +} + +type EnableNetworkResourceManagement EnableNetworkResourceManagementRequestType + +func init() { + t["EnableNetworkResourceManagement"] = reflect.TypeOf((*EnableNetworkResourceManagement)(nil)).Elem() +} + +type EnableNetworkResourceManagementRequestType struct { + This ManagedObjectReference `xml:"_this"` + Enable bool `xml:"enable"` +} + +func init() { + t["EnableNetworkResourceManagementRequestType"] = reflect.TypeOf((*EnableNetworkResourceManagementRequestType)(nil)).Elem() +} + +type EnableNetworkResourceManagementResponse struct { +} + +type EnableRuleset EnableRulesetRequestType + +func init() { + t["EnableRuleset"] = reflect.TypeOf((*EnableRuleset)(nil)).Elem() +} + +type EnableRulesetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["EnableRulesetRequestType"] = reflect.TypeOf((*EnableRulesetRequestType)(nil)).Elem() +} + +type EnableRulesetResponse struct { +} + +type EnableSecondaryVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["EnableSecondaryVMRequestType"] = reflect.TypeOf((*EnableSecondaryVMRequestType)(nil)).Elem() +} + +type EnableSecondaryVM_Task EnableSecondaryVMRequestType + +func init() { + t["EnableSecondaryVM_Task"] = reflect.TypeOf((*EnableSecondaryVM_Task)(nil)).Elem() +} + +type EnableSecondaryVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EnableSmartCardAuthentication EnableSmartCardAuthenticationRequestType + +func init() { + t["EnableSmartCardAuthentication"] = reflect.TypeOf((*EnableSmartCardAuthentication)(nil)).Elem() +} + +type EnableSmartCardAuthenticationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EnableSmartCardAuthenticationRequestType"] = reflect.TypeOf((*EnableSmartCardAuthenticationRequestType)(nil)).Elem() +} + +type EnableSmartCardAuthenticationResponse struct { +} + +type EncryptionKeyRequired struct { + InvalidState + + RequiredKey []CryptoKeyId `xml:"requiredKey,omitempty"` +} + +func init() { + t["EncryptionKeyRequired"] = reflect.TypeOf((*EncryptionKeyRequired)(nil)).Elem() +} + +type EncryptionKeyRequiredFault EncryptionKeyRequired + +func init() { + t["EncryptionKeyRequiredFault"] = reflect.TypeOf((*EncryptionKeyRequiredFault)(nil)).Elem() +} + +type EnterLockdownMode EnterLockdownModeRequestType + +func init() { + t["EnterLockdownMode"] = reflect.TypeOf((*EnterLockdownMode)(nil)).Elem() +} + +type EnterLockdownModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EnterLockdownModeRequestType"] = reflect.TypeOf((*EnterLockdownModeRequestType)(nil)).Elem() +} + +type EnterLockdownModeResponse struct { +} + +type EnterMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Timeout int32 `xml:"timeout"` + EvacuatePoweredOffVms *bool `xml:"evacuatePoweredOffVms"` + MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty"` +} + +func init() { + t["EnterMaintenanceModeRequestType"] = reflect.TypeOf((*EnterMaintenanceModeRequestType)(nil)).Elem() +} + +type EnterMaintenanceMode_Task EnterMaintenanceModeRequestType + +func init() { + t["EnterMaintenanceMode_Task"] = reflect.TypeOf((*EnterMaintenanceMode_Task)(nil)).Elem() +} + +type EnterMaintenanceMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EnteredMaintenanceModeEvent struct { + HostEvent +} + +func init() { + t["EnteredMaintenanceModeEvent"] = reflect.TypeOf((*EnteredMaintenanceModeEvent)(nil)).Elem() +} + +type EnteredStandbyModeEvent struct { + HostEvent +} + +func init() { + t["EnteredStandbyModeEvent"] = reflect.TypeOf((*EnteredStandbyModeEvent)(nil)).Elem() +} + +type EnteringMaintenanceModeEvent struct { + HostEvent +} + +func init() { + t["EnteringMaintenanceModeEvent"] = reflect.TypeOf((*EnteringMaintenanceModeEvent)(nil)).Elem() +} + +type EnteringStandbyModeEvent struct { + HostEvent +} + +func init() { + t["EnteringStandbyModeEvent"] = reflect.TypeOf((*EnteringStandbyModeEvent)(nil)).Elem() +} + +type EntityBackup struct { + DynamicData +} + +func init() { + t["EntityBackup"] = reflect.TypeOf((*EntityBackup)(nil)).Elem() +} + +type EntityBackupConfig struct { + DynamicData + + EntityType string `xml:"entityType"` + ConfigBlob []byte `xml:"configBlob"` + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + Container *ManagedObjectReference `xml:"container,omitempty"` + ConfigVersion string `xml:"configVersion,omitempty"` +} + +func init() { + t["EntityBackupConfig"] = reflect.TypeOf((*EntityBackupConfig)(nil)).Elem() +} + +type EntityEventArgument struct { + EventArgument + + Name string `xml:"name"` +} + +func init() { + t["EntityEventArgument"] = reflect.TypeOf((*EntityEventArgument)(nil)).Elem() +} + +type EntityPrivilege struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + PrivAvailability []PrivilegeAvailability `xml:"privAvailability"` +} + +func init() { + t["EntityPrivilege"] = reflect.TypeOf((*EntityPrivilege)(nil)).Elem() +} + +type EnumDescription struct { + DynamicData + + Key string `xml:"key"` + Tags []BaseElementDescription `xml:"tags,typeattr"` +} + +func init() { + t["EnumDescription"] = reflect.TypeOf((*EnumDescription)(nil)).Elem() +} + +type EnvironmentBrowserConfigOptionQuerySpec struct { + DynamicData + + Key string `xml:"key,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + GuestId []string `xml:"guestId,omitempty"` +} + +func init() { + t["EnvironmentBrowserConfigOptionQuerySpec"] = reflect.TypeOf((*EnvironmentBrowserConfigOptionQuerySpec)(nil)).Elem() +} + +type ErrorUpgradeEvent struct { + UpgradeEvent +} + +func init() { + t["ErrorUpgradeEvent"] = reflect.TypeOf((*ErrorUpgradeEvent)(nil)).Elem() +} + +type EstimateDatabaseSize EstimateDatabaseSizeRequestType + +func init() { + t["EstimateDatabaseSize"] = reflect.TypeOf((*EstimateDatabaseSize)(nil)).Elem() +} + +type EstimateDatabaseSizeRequestType struct { + This ManagedObjectReference `xml:"_this"` + DbSizeParam DatabaseSizeParam `xml:"dbSizeParam"` +} + +func init() { + t["EstimateDatabaseSizeRequestType"] = reflect.TypeOf((*EstimateDatabaseSizeRequestType)(nil)).Elem() +} + +type EstimateDatabaseSizeResponse struct { + Returnval DatabaseSizeEstimate `xml:"returnval"` +} + +type EstimateStorageForConsolidateSnapshotsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EstimateStorageForConsolidateSnapshotsRequestType"] = reflect.TypeOf((*EstimateStorageForConsolidateSnapshotsRequestType)(nil)).Elem() +} + +type EstimateStorageForConsolidateSnapshots_Task EstimateStorageForConsolidateSnapshotsRequestType + +func init() { + t["EstimateStorageForConsolidateSnapshots_Task"] = reflect.TypeOf((*EstimateStorageForConsolidateSnapshots_Task)(nil)).Elem() +} + +type EstimateStorageForConsolidateSnapshots_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EsxAgentHostManagerUpdateConfig EsxAgentHostManagerUpdateConfigRequestType + +func init() { + t["EsxAgentHostManagerUpdateConfig"] = reflect.TypeOf((*EsxAgentHostManagerUpdateConfig)(nil)).Elem() +} + +type EsxAgentHostManagerUpdateConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigInfo HostEsxAgentHostManagerConfigInfo `xml:"configInfo"` +} + +func init() { + t["EsxAgentHostManagerUpdateConfigRequestType"] = reflect.TypeOf((*EsxAgentHostManagerUpdateConfigRequestType)(nil)).Elem() +} + +type EsxAgentHostManagerUpdateConfigResponse struct { +} + +type EvacuateVsanNodeRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaintenanceSpec HostMaintenanceSpec `xml:"maintenanceSpec"` + Timeout int32 `xml:"timeout"` +} + +func init() { + t["EvacuateVsanNodeRequestType"] = reflect.TypeOf((*EvacuateVsanNodeRequestType)(nil)).Elem() +} + +type EvacuateVsanNode_Task EvacuateVsanNodeRequestType + +func init() { + t["EvacuateVsanNode_Task"] = reflect.TypeOf((*EvacuateVsanNode_Task)(nil)).Elem() +} + +type EvacuateVsanNode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type EvaluationLicenseSource struct { + LicenseSource + + RemainingHours int64 `xml:"remainingHours,omitempty"` +} + +func init() { + t["EvaluationLicenseSource"] = reflect.TypeOf((*EvaluationLicenseSource)(nil)).Elem() +} + +type EvcManager EvcManagerRequestType + +func init() { + t["EvcManager"] = reflect.TypeOf((*EvcManager)(nil)).Elem() +} + +type EvcManagerRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["EvcManagerRequestType"] = reflect.TypeOf((*EvcManagerRequestType)(nil)).Elem() +} + +type EvcManagerResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type Event struct { + DynamicData + + Key int32 `xml:"key"` + ChainId int32 `xml:"chainId"` + CreatedTime time.Time `xml:"createdTime"` + UserName string `xml:"userName"` + Datacenter *DatacenterEventArgument `xml:"datacenter,omitempty"` + ComputeResource *ComputeResourceEventArgument `xml:"computeResource,omitempty"` + Host *HostEventArgument `xml:"host,omitempty"` + Vm *VmEventArgument `xml:"vm,omitempty"` + Ds *DatastoreEventArgument `xml:"ds,omitempty"` + Net *NetworkEventArgument `xml:"net,omitempty"` + Dvs *DvsEventArgument `xml:"dvs,omitempty"` + FullFormattedMessage string `xml:"fullFormattedMessage,omitempty"` + ChangeTag string `xml:"changeTag,omitempty"` +} + +func init() { + t["Event"] = reflect.TypeOf((*Event)(nil)).Elem() +} + +type EventAlarmExpression struct { + AlarmExpression + + Comparisons []EventAlarmExpressionComparison `xml:"comparisons,omitempty"` + EventType string `xml:"eventType"` + EventTypeId string `xml:"eventTypeId,omitempty"` + ObjectType string `xml:"objectType,omitempty"` + Status ManagedEntityStatus `xml:"status,omitempty"` +} + +func init() { + t["EventAlarmExpression"] = reflect.TypeOf((*EventAlarmExpression)(nil)).Elem() +} + +type EventAlarmExpressionComparison struct { + DynamicData + + AttributeName string `xml:"attributeName"` + Operator string `xml:"operator"` + Value string `xml:"value"` +} + +func init() { + t["EventAlarmExpressionComparison"] = reflect.TypeOf((*EventAlarmExpressionComparison)(nil)).Elem() +} + +type EventArgDesc struct { + DynamicData + + Name string `xml:"name"` + Type string `xml:"type"` + Description BaseElementDescription `xml:"description,omitempty,typeattr"` +} + +func init() { + t["EventArgDesc"] = reflect.TypeOf((*EventArgDesc)(nil)).Elem() +} + +type EventArgument struct { + DynamicData +} + +func init() { + t["EventArgument"] = reflect.TypeOf((*EventArgument)(nil)).Elem() +} + +type EventDescription struct { + DynamicData + + Category []BaseElementDescription `xml:"category,typeattr"` + EventInfo []EventDescriptionEventDetail `xml:"eventInfo"` + EnumeratedTypes []EnumDescription `xml:"enumeratedTypes,omitempty"` +} + +func init() { + t["EventDescription"] = reflect.TypeOf((*EventDescription)(nil)).Elem() +} + +type EventDescriptionEventDetail struct { + DynamicData + + Key string `xml:"key"` + Description string `xml:"description,omitempty"` + Category string `xml:"category"` + FormatOnDatacenter string `xml:"formatOnDatacenter"` + FormatOnComputeResource string `xml:"formatOnComputeResource"` + FormatOnHost string `xml:"formatOnHost"` + FormatOnVm string `xml:"formatOnVm"` + FullFormat string `xml:"fullFormat"` + LongDescription string `xml:"longDescription,omitempty"` +} + +func init() { + t["EventDescriptionEventDetail"] = reflect.TypeOf((*EventDescriptionEventDetail)(nil)).Elem() +} + +type EventEx struct { + Event + + EventTypeId string `xml:"eventTypeId"` + Severity string `xml:"severity,omitempty"` + Message string `xml:"message,omitempty"` + Arguments []KeyAnyValue `xml:"arguments,omitempty"` + ObjectId string `xml:"objectId,omitempty"` + ObjectType string `xml:"objectType,omitempty"` + ObjectName string `xml:"objectName,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["EventEx"] = reflect.TypeOf((*EventEx)(nil)).Elem() +} + +type EventFilterSpec struct { + DynamicData + + Entity *EventFilterSpecByEntity `xml:"entity,omitempty"` + Time *EventFilterSpecByTime `xml:"time,omitempty"` + UserName *EventFilterSpecByUsername `xml:"userName,omitempty"` + EventChainId int32 `xml:"eventChainId,omitempty"` + Alarm *ManagedObjectReference `xml:"alarm,omitempty"` + ScheduledTask *ManagedObjectReference `xml:"scheduledTask,omitempty"` + DisableFullMessage *bool `xml:"disableFullMessage"` + Category []string `xml:"category,omitempty"` + Type []string `xml:"type,omitempty"` + Tag []string `xml:"tag,omitempty"` + EventTypeId []string `xml:"eventTypeId,omitempty"` + MaxCount int32 `xml:"maxCount,omitempty"` +} + +func init() { + t["EventFilterSpec"] = reflect.TypeOf((*EventFilterSpec)(nil)).Elem() +} + +type EventFilterSpecByEntity struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + Recursion EventFilterSpecRecursionOption `xml:"recursion"` +} + +func init() { + t["EventFilterSpecByEntity"] = reflect.TypeOf((*EventFilterSpecByEntity)(nil)).Elem() +} + +type EventFilterSpecByTime struct { + DynamicData + + BeginTime *time.Time `xml:"beginTime"` + EndTime *time.Time `xml:"endTime"` +} + +func init() { + t["EventFilterSpecByTime"] = reflect.TypeOf((*EventFilterSpecByTime)(nil)).Elem() +} + +type EventFilterSpecByUsername struct { + DynamicData + + SystemUser bool `xml:"systemUser"` + UserList []string `xml:"userList,omitempty"` +} + +func init() { + t["EventFilterSpecByUsername"] = reflect.TypeOf((*EventFilterSpecByUsername)(nil)).Elem() +} + +type ExecuteHostProfile ExecuteHostProfileRequestType + +func init() { + t["ExecuteHostProfile"] = reflect.TypeOf((*ExecuteHostProfile)(nil)).Elem() +} + +type ExecuteHostProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + DeferredParam []ProfileDeferredPolicyOptionParameter `xml:"deferredParam,omitempty"` +} + +func init() { + t["ExecuteHostProfileRequestType"] = reflect.TypeOf((*ExecuteHostProfileRequestType)(nil)).Elem() +} + +type ExecuteHostProfileResponse struct { + Returnval BaseProfileExecuteResult `xml:"returnval,typeattr"` +} + +type ExecuteSimpleCommand ExecuteSimpleCommandRequestType + +func init() { + t["ExecuteSimpleCommand"] = reflect.TypeOf((*ExecuteSimpleCommand)(nil)).Elem() +} + +type ExecuteSimpleCommandRequestType struct { + This ManagedObjectReference `xml:"_this"` + Arguments []string `xml:"arguments,omitempty"` +} + +func init() { + t["ExecuteSimpleCommandRequestType"] = reflect.TypeOf((*ExecuteSimpleCommandRequestType)(nil)).Elem() +} + +type ExecuteSimpleCommandResponse struct { + Returnval string `xml:"returnval"` +} + +type ExitLockdownMode ExitLockdownModeRequestType + +func init() { + t["ExitLockdownMode"] = reflect.TypeOf((*ExitLockdownMode)(nil)).Elem() +} + +type ExitLockdownModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExitLockdownModeRequestType"] = reflect.TypeOf((*ExitLockdownModeRequestType)(nil)).Elem() +} + +type ExitLockdownModeResponse struct { +} + +type ExitMaintenanceModeEvent struct { + HostEvent +} + +func init() { + t["ExitMaintenanceModeEvent"] = reflect.TypeOf((*ExitMaintenanceModeEvent)(nil)).Elem() +} + +type ExitMaintenanceModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Timeout int32 `xml:"timeout"` +} + +func init() { + t["ExitMaintenanceModeRequestType"] = reflect.TypeOf((*ExitMaintenanceModeRequestType)(nil)).Elem() +} + +type ExitMaintenanceMode_Task ExitMaintenanceModeRequestType + +func init() { + t["ExitMaintenanceMode_Task"] = reflect.TypeOf((*ExitMaintenanceMode_Task)(nil)).Elem() +} + +type ExitMaintenanceMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExitStandbyModeFailedEvent struct { + HostEvent +} + +func init() { + t["ExitStandbyModeFailedEvent"] = reflect.TypeOf((*ExitStandbyModeFailedEvent)(nil)).Elem() +} + +type ExitedStandbyModeEvent struct { + HostEvent +} + +func init() { + t["ExitedStandbyModeEvent"] = reflect.TypeOf((*ExitedStandbyModeEvent)(nil)).Elem() +} + +type ExitingStandbyModeEvent struct { + HostEvent +} + +func init() { + t["ExitingStandbyModeEvent"] = reflect.TypeOf((*ExitingStandbyModeEvent)(nil)).Elem() +} + +type ExpandVmfsDatastore ExpandVmfsDatastoreRequestType + +func init() { + t["ExpandVmfsDatastore"] = reflect.TypeOf((*ExpandVmfsDatastore)(nil)).Elem() +} + +type ExpandVmfsDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec VmfsDatastoreExpandSpec `xml:"spec"` +} + +func init() { + t["ExpandVmfsDatastoreRequestType"] = reflect.TypeOf((*ExpandVmfsDatastoreRequestType)(nil)).Elem() +} + +type ExpandVmfsDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExpandVmfsExtent ExpandVmfsExtentRequestType + +func init() { + t["ExpandVmfsExtent"] = reflect.TypeOf((*ExpandVmfsExtent)(nil)).Elem() +} + +type ExpandVmfsExtentRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsPath string `xml:"vmfsPath"` + Extent HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["ExpandVmfsExtentRequestType"] = reflect.TypeOf((*ExpandVmfsExtentRequestType)(nil)).Elem() +} + +type ExpandVmfsExtentResponse struct { +} + +type ExpiredAddonLicense struct { + ExpiredFeatureLicense +} + +func init() { + t["ExpiredAddonLicense"] = reflect.TypeOf((*ExpiredAddonLicense)(nil)).Elem() +} + +type ExpiredAddonLicenseFault ExpiredAddonLicense + +func init() { + t["ExpiredAddonLicenseFault"] = reflect.TypeOf((*ExpiredAddonLicenseFault)(nil)).Elem() +} + +type ExpiredEditionLicense struct { + ExpiredFeatureLicense +} + +func init() { + t["ExpiredEditionLicense"] = reflect.TypeOf((*ExpiredEditionLicense)(nil)).Elem() +} + +type ExpiredEditionLicenseFault ExpiredEditionLicense + +func init() { + t["ExpiredEditionLicenseFault"] = reflect.TypeOf((*ExpiredEditionLicenseFault)(nil)).Elem() +} + +type ExpiredFeatureLicense struct { + NotEnoughLicenses + + Feature string `xml:"feature"` + Count int32 `xml:"count"` + ExpirationDate time.Time `xml:"expirationDate"` +} + +func init() { + t["ExpiredFeatureLicense"] = reflect.TypeOf((*ExpiredFeatureLicense)(nil)).Elem() +} + +type ExpiredFeatureLicenseFault BaseExpiredFeatureLicense + +func init() { + t["ExpiredFeatureLicenseFault"] = reflect.TypeOf((*ExpiredFeatureLicenseFault)(nil)).Elem() +} + +type ExportAnswerFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["ExportAnswerFileRequestType"] = reflect.TypeOf((*ExportAnswerFileRequestType)(nil)).Elem() +} + +type ExportAnswerFile_Task ExportAnswerFileRequestType + +func init() { + t["ExportAnswerFile_Task"] = reflect.TypeOf((*ExportAnswerFile_Task)(nil)).Elem() +} + +type ExportAnswerFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExportProfile ExportProfileRequestType + +func init() { + t["ExportProfile"] = reflect.TypeOf((*ExportProfile)(nil)).Elem() +} + +type ExportProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExportProfileRequestType"] = reflect.TypeOf((*ExportProfileRequestType)(nil)).Elem() +} + +type ExportProfileResponse struct { + Returnval string `xml:"returnval"` +} + +type ExportSnapshot ExportSnapshotRequestType + +func init() { + t["ExportSnapshot"] = reflect.TypeOf((*ExportSnapshot)(nil)).Elem() +} + +type ExportSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExportSnapshotRequestType"] = reflect.TypeOf((*ExportSnapshotRequestType)(nil)).Elem() +} + +type ExportSnapshotResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExportVApp ExportVAppRequestType + +func init() { + t["ExportVApp"] = reflect.TypeOf((*ExportVApp)(nil)).Elem() +} + +type ExportVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExportVAppRequestType"] = reflect.TypeOf((*ExportVAppRequestType)(nil)).Elem() +} + +type ExportVAppResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExportVm ExportVmRequestType + +func init() { + t["ExportVm"] = reflect.TypeOf((*ExportVm)(nil)).Elem() +} + +type ExportVmRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExportVmRequestType"] = reflect.TypeOf((*ExportVmRequestType)(nil)).Elem() +} + +type ExportVmResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExtExtendedProductInfo struct { + DynamicData + + CompanyUrl string `xml:"companyUrl,omitempty"` + ProductUrl string `xml:"productUrl,omitempty"` + ManagementUrl string `xml:"managementUrl,omitempty"` + Self *ManagedObjectReference `xml:"self,omitempty"` +} + +func init() { + t["ExtExtendedProductInfo"] = reflect.TypeOf((*ExtExtendedProductInfo)(nil)).Elem() +} + +type ExtManagedEntityInfo struct { + DynamicData + + Type string `xml:"type"` + SmallIconUrl string `xml:"smallIconUrl,omitempty"` + IconUrl string `xml:"iconUrl,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["ExtManagedEntityInfo"] = reflect.TypeOf((*ExtManagedEntityInfo)(nil)).Elem() +} + +type ExtSolutionManagerInfo struct { + DynamicData + + Tab []ExtSolutionManagerInfoTabInfo `xml:"tab,omitempty"` + SmallIconUrl string `xml:"smallIconUrl,omitempty"` +} + +func init() { + t["ExtSolutionManagerInfo"] = reflect.TypeOf((*ExtSolutionManagerInfo)(nil)).Elem() +} + +type ExtSolutionManagerInfoTabInfo struct { + DynamicData + + Label string `xml:"label"` + Url string `xml:"url"` +} + +func init() { + t["ExtSolutionManagerInfoTabInfo"] = reflect.TypeOf((*ExtSolutionManagerInfoTabInfo)(nil)).Elem() +} + +type ExtendDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + NewCapacityInMB int64 `xml:"newCapacityInMB"` +} + +func init() { + t["ExtendDiskRequestType"] = reflect.TypeOf((*ExtendDiskRequestType)(nil)).Elem() +} + +type ExtendDisk_Task ExtendDiskRequestType + +func init() { + t["ExtendDisk_Task"] = reflect.TypeOf((*ExtendDisk_Task)(nil)).Elem() +} + +type ExtendDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExtendVffs ExtendVffsRequestType + +func init() { + t["ExtendVffs"] = reflect.TypeOf((*ExtendVffs)(nil)).Elem() +} + +type ExtendVffsRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsPath string `xml:"vffsPath"` + DevicePath string `xml:"devicePath"` + Spec *HostDiskPartitionSpec `xml:"spec,omitempty"` +} + +func init() { + t["ExtendVffsRequestType"] = reflect.TypeOf((*ExtendVffsRequestType)(nil)).Elem() +} + +type ExtendVffsResponse struct { +} + +type ExtendVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + NewCapacityKb int64 `xml:"newCapacityKb"` + EagerZero *bool `xml:"eagerZero"` +} + +func init() { + t["ExtendVirtualDiskRequestType"] = reflect.TypeOf((*ExtendVirtualDiskRequestType)(nil)).Elem() +} + +type ExtendVirtualDisk_Task ExtendVirtualDiskRequestType + +func init() { + t["ExtendVirtualDisk_Task"] = reflect.TypeOf((*ExtendVirtualDisk_Task)(nil)).Elem() +} + +type ExtendVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExtendVmfsDatastore ExtendVmfsDatastoreRequestType + +func init() { + t["ExtendVmfsDatastore"] = reflect.TypeOf((*ExtendVmfsDatastore)(nil)).Elem() +} + +type ExtendVmfsDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec VmfsDatastoreExtendSpec `xml:"spec"` +} + +func init() { + t["ExtendVmfsDatastoreRequestType"] = reflect.TypeOf((*ExtendVmfsDatastoreRequestType)(nil)).Elem() +} + +type ExtendVmfsDatastoreResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ExtendedDescription struct { + Description + + MessageCatalogKeyPrefix string `xml:"messageCatalogKeyPrefix"` + MessageArg []KeyAnyValue `xml:"messageArg,omitempty"` +} + +func init() { + t["ExtendedDescription"] = reflect.TypeOf((*ExtendedDescription)(nil)).Elem() +} + +type ExtendedElementDescription struct { + ElementDescription + + MessageCatalogKeyPrefix string `xml:"messageCatalogKeyPrefix"` + MessageArg []KeyAnyValue `xml:"messageArg,omitempty"` +} + +func init() { + t["ExtendedElementDescription"] = reflect.TypeOf((*ExtendedElementDescription)(nil)).Elem() +} + +type ExtendedEvent struct { + GeneralEvent + + EventTypeId string `xml:"eventTypeId"` + ManagedObject ManagedObjectReference `xml:"managedObject"` + Data []ExtendedEventPair `xml:"data,omitempty"` +} + +func init() { + t["ExtendedEvent"] = reflect.TypeOf((*ExtendedEvent)(nil)).Elem() +} + +type ExtendedEventPair struct { + DynamicData + + Key string `xml:"key"` + Value string `xml:"value"` +} + +func init() { + t["ExtendedEventPair"] = reflect.TypeOf((*ExtendedEventPair)(nil)).Elem() +} + +type ExtendedFault struct { + VimFault + + FaultTypeId string `xml:"faultTypeId"` + Data []KeyValue `xml:"data,omitempty"` +} + +func init() { + t["ExtendedFault"] = reflect.TypeOf((*ExtendedFault)(nil)).Elem() +} + +type ExtendedFaultFault ExtendedFault + +func init() { + t["ExtendedFaultFault"] = reflect.TypeOf((*ExtendedFaultFault)(nil)).Elem() +} + +type Extension struct { + DynamicData + + Description BaseDescription `xml:"description,typeattr"` + Key string `xml:"key"` + Company string `xml:"company,omitempty"` + Type string `xml:"type,omitempty"` + Version string `xml:"version"` + SubjectName string `xml:"subjectName,omitempty"` + Server []ExtensionServerInfo `xml:"server,omitempty"` + Client []ExtensionClientInfo `xml:"client,omitempty"` + TaskList []ExtensionTaskTypeInfo `xml:"taskList,omitempty"` + EventList []ExtensionEventTypeInfo `xml:"eventList,omitempty"` + FaultList []ExtensionFaultTypeInfo `xml:"faultList,omitempty"` + PrivilegeList []ExtensionPrivilegeInfo `xml:"privilegeList,omitempty"` + ResourceList []ExtensionResourceInfo `xml:"resourceList,omitempty"` + LastHeartbeatTime time.Time `xml:"lastHeartbeatTime"` + HealthInfo *ExtensionHealthInfo `xml:"healthInfo,omitempty"` + OvfConsumerInfo *ExtensionOvfConsumerInfo `xml:"ovfConsumerInfo,omitempty"` + ExtendedProductInfo *ExtExtendedProductInfo `xml:"extendedProductInfo,omitempty"` + ManagedEntityInfo []ExtManagedEntityInfo `xml:"managedEntityInfo,omitempty"` + ShownInSolutionManager *bool `xml:"shownInSolutionManager"` + SolutionManagerInfo *ExtSolutionManagerInfo `xml:"solutionManagerInfo,omitempty"` +} + +func init() { + t["Extension"] = reflect.TypeOf((*Extension)(nil)).Elem() +} + +type ExtensionClientInfo struct { + DynamicData + + Version string `xml:"version"` + Description BaseDescription `xml:"description,typeattr"` + Company string `xml:"company"` + Type string `xml:"type"` + Url string `xml:"url"` +} + +func init() { + t["ExtensionClientInfo"] = reflect.TypeOf((*ExtensionClientInfo)(nil)).Elem() +} + +type ExtensionEventTypeInfo struct { + DynamicData + + EventID string `xml:"eventID"` + EventTypeSchema string `xml:"eventTypeSchema,omitempty"` +} + +func init() { + t["ExtensionEventTypeInfo"] = reflect.TypeOf((*ExtensionEventTypeInfo)(nil)).Elem() +} + +type ExtensionFaultTypeInfo struct { + DynamicData + + FaultID string `xml:"faultID"` +} + +func init() { + t["ExtensionFaultTypeInfo"] = reflect.TypeOf((*ExtensionFaultTypeInfo)(nil)).Elem() +} + +type ExtensionHealthInfo struct { + DynamicData + + Url string `xml:"url"` +} + +func init() { + t["ExtensionHealthInfo"] = reflect.TypeOf((*ExtensionHealthInfo)(nil)).Elem() +} + +type ExtensionManagerIpAllocationUsage struct { + DynamicData + + ExtensionKey string `xml:"extensionKey"` + NumAddresses int32 `xml:"numAddresses"` +} + +func init() { + t["ExtensionManagerIpAllocationUsage"] = reflect.TypeOf((*ExtensionManagerIpAllocationUsage)(nil)).Elem() +} + +type ExtensionOvfConsumerInfo struct { + DynamicData + + CallbackUrl string `xml:"callbackUrl"` + SectionType []string `xml:"sectionType"` +} + +func init() { + t["ExtensionOvfConsumerInfo"] = reflect.TypeOf((*ExtensionOvfConsumerInfo)(nil)).Elem() +} + +type ExtensionPrivilegeInfo struct { + DynamicData + + PrivID string `xml:"privID"` + PrivGroupName string `xml:"privGroupName"` +} + +func init() { + t["ExtensionPrivilegeInfo"] = reflect.TypeOf((*ExtensionPrivilegeInfo)(nil)).Elem() +} + +type ExtensionResourceInfo struct { + DynamicData + + Locale string `xml:"locale"` + Module string `xml:"module"` + Data []KeyValue `xml:"data"` +} + +func init() { + t["ExtensionResourceInfo"] = reflect.TypeOf((*ExtensionResourceInfo)(nil)).Elem() +} + +type ExtensionServerInfo struct { + DynamicData + + Url string `xml:"url"` + Description BaseDescription `xml:"description,typeattr"` + Company string `xml:"company"` + Type string `xml:"type"` + AdminEmail []string `xml:"adminEmail"` + ServerThumbprint string `xml:"serverThumbprint,omitempty"` +} + +func init() { + t["ExtensionServerInfo"] = reflect.TypeOf((*ExtensionServerInfo)(nil)).Elem() +} + +type ExtensionTaskTypeInfo struct { + DynamicData + + TaskID string `xml:"taskID"` +} + +func init() { + t["ExtensionTaskTypeInfo"] = reflect.TypeOf((*ExtensionTaskTypeInfo)(nil)).Elem() +} + +type ExtractOvfEnvironment ExtractOvfEnvironmentRequestType + +func init() { + t["ExtractOvfEnvironment"] = reflect.TypeOf((*ExtractOvfEnvironment)(nil)).Elem() +} + +type ExtractOvfEnvironmentRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ExtractOvfEnvironmentRequestType"] = reflect.TypeOf((*ExtractOvfEnvironmentRequestType)(nil)).Elem() +} + +type ExtractOvfEnvironmentResponse struct { + Returnval string `xml:"returnval"` +} + +type FailToEnableSPBM struct { + NotEnoughLicenses + + Cs ManagedObjectReference `xml:"cs"` + CsName string `xml:"csName"` + HostLicenseStates []ComputeResourceHostSPBMLicenseInfo `xml:"hostLicenseStates"` +} + +func init() { + t["FailToEnableSPBM"] = reflect.TypeOf((*FailToEnableSPBM)(nil)).Elem() +} + +type FailToEnableSPBMFault FailToEnableSPBM + +func init() { + t["FailToEnableSPBMFault"] = reflect.TypeOf((*FailToEnableSPBMFault)(nil)).Elem() +} + +type FailToLockFaultToleranceVMs struct { + RuntimeFault + + VmName string `xml:"vmName"` + Vm ManagedObjectReference `xml:"vm"` + AlreadyLockedVm ManagedObjectReference `xml:"alreadyLockedVm"` +} + +func init() { + t["FailToLockFaultToleranceVMs"] = reflect.TypeOf((*FailToLockFaultToleranceVMs)(nil)).Elem() +} + +type FailToLockFaultToleranceVMsFault FailToLockFaultToleranceVMs + +func init() { + t["FailToLockFaultToleranceVMsFault"] = reflect.TypeOf((*FailToLockFaultToleranceVMsFault)(nil)).Elem() +} + +type FailoverLevelRestored struct { + ClusterEvent +} + +func init() { + t["FailoverLevelRestored"] = reflect.TypeOf((*FailoverLevelRestored)(nil)).Elem() +} + +type FailoverNodeInfo struct { + DynamicData + + ClusterIpSettings CustomizationIPSettings `xml:"clusterIpSettings"` + FailoverIp *CustomizationIPSettings `xml:"failoverIp,omitempty"` + BiosUuid string `xml:"biosUuid,omitempty"` +} + +func init() { + t["FailoverNodeInfo"] = reflect.TypeOf((*FailoverNodeInfo)(nil)).Elem() +} + +type FaultDomainId struct { + DynamicData + + Id string `xml:"id"` +} + +func init() { + t["FaultDomainId"] = reflect.TypeOf((*FaultDomainId)(nil)).Elem() +} + +type FaultToleranceAntiAffinityViolated struct { + MigrationFault + + HostName string `xml:"hostName"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["FaultToleranceAntiAffinityViolated"] = reflect.TypeOf((*FaultToleranceAntiAffinityViolated)(nil)).Elem() +} + +type FaultToleranceAntiAffinityViolatedFault FaultToleranceAntiAffinityViolated + +func init() { + t["FaultToleranceAntiAffinityViolatedFault"] = reflect.TypeOf((*FaultToleranceAntiAffinityViolatedFault)(nil)).Elem() +} + +type FaultToleranceCannotEditMem struct { + VmConfigFault + + VmName string `xml:"vmName"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["FaultToleranceCannotEditMem"] = reflect.TypeOf((*FaultToleranceCannotEditMem)(nil)).Elem() +} + +type FaultToleranceCannotEditMemFault FaultToleranceCannotEditMem + +func init() { + t["FaultToleranceCannotEditMemFault"] = reflect.TypeOf((*FaultToleranceCannotEditMemFault)(nil)).Elem() +} + +type FaultToleranceConfigInfo struct { + DynamicData + + Role int32 `xml:"role"` + InstanceUuids []string `xml:"instanceUuids"` + ConfigPaths []string `xml:"configPaths"` + Orphaned *bool `xml:"orphaned"` +} + +func init() { + t["FaultToleranceConfigInfo"] = reflect.TypeOf((*FaultToleranceConfigInfo)(nil)).Elem() +} + +type FaultToleranceConfigSpec struct { + DynamicData + + MetaDataPath *FaultToleranceMetaSpec `xml:"metaDataPath,omitempty"` + SecondaryVmSpec *FaultToleranceVMConfigSpec `xml:"secondaryVmSpec,omitempty"` +} + +func init() { + t["FaultToleranceConfigSpec"] = reflect.TypeOf((*FaultToleranceConfigSpec)(nil)).Elem() +} + +type FaultToleranceCpuIncompatible struct { + CpuIncompatible + + Model bool `xml:"model"` + Family bool `xml:"family"` + Stepping bool `xml:"stepping"` +} + +func init() { + t["FaultToleranceCpuIncompatible"] = reflect.TypeOf((*FaultToleranceCpuIncompatible)(nil)).Elem() +} + +type FaultToleranceCpuIncompatibleFault FaultToleranceCpuIncompatible + +func init() { + t["FaultToleranceCpuIncompatibleFault"] = reflect.TypeOf((*FaultToleranceCpuIncompatibleFault)(nil)).Elem() +} + +type FaultToleranceDiskSpec struct { + DynamicData + + Disk BaseVirtualDevice `xml:"disk,typeattr"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["FaultToleranceDiskSpec"] = reflect.TypeOf((*FaultToleranceDiskSpec)(nil)).Elem() +} + +type FaultToleranceMetaSpec struct { + DynamicData + + MetaDataDatastore ManagedObjectReference `xml:"metaDataDatastore"` +} + +func init() { + t["FaultToleranceMetaSpec"] = reflect.TypeOf((*FaultToleranceMetaSpec)(nil)).Elem() +} + +type FaultToleranceNeedsThickDisk struct { + MigrationFault + + VmName string `xml:"vmName"` +} + +func init() { + t["FaultToleranceNeedsThickDisk"] = reflect.TypeOf((*FaultToleranceNeedsThickDisk)(nil)).Elem() +} + +type FaultToleranceNeedsThickDiskFault FaultToleranceNeedsThickDisk + +func init() { + t["FaultToleranceNeedsThickDiskFault"] = reflect.TypeOf((*FaultToleranceNeedsThickDiskFault)(nil)).Elem() +} + +type FaultToleranceNotLicensed struct { + VmFaultToleranceIssue + + HostName string `xml:"hostName,omitempty"` +} + +func init() { + t["FaultToleranceNotLicensed"] = reflect.TypeOf((*FaultToleranceNotLicensed)(nil)).Elem() +} + +type FaultToleranceNotLicensedFault FaultToleranceNotLicensed + +func init() { + t["FaultToleranceNotLicensedFault"] = reflect.TypeOf((*FaultToleranceNotLicensedFault)(nil)).Elem() +} + +type FaultToleranceNotSameBuild struct { + MigrationFault + + Build string `xml:"build"` +} + +func init() { + t["FaultToleranceNotSameBuild"] = reflect.TypeOf((*FaultToleranceNotSameBuild)(nil)).Elem() +} + +type FaultToleranceNotSameBuildFault FaultToleranceNotSameBuild + +func init() { + t["FaultToleranceNotSameBuildFault"] = reflect.TypeOf((*FaultToleranceNotSameBuildFault)(nil)).Elem() +} + +type FaultTolerancePrimaryConfigInfo struct { + FaultToleranceConfigInfo + + Secondaries []ManagedObjectReference `xml:"secondaries"` +} + +func init() { + t["FaultTolerancePrimaryConfigInfo"] = reflect.TypeOf((*FaultTolerancePrimaryConfigInfo)(nil)).Elem() +} + +type FaultTolerancePrimaryPowerOnNotAttempted struct { + VmFaultToleranceIssue + + SecondaryVm ManagedObjectReference `xml:"secondaryVm"` + PrimaryVm ManagedObjectReference `xml:"primaryVm"` +} + +func init() { + t["FaultTolerancePrimaryPowerOnNotAttempted"] = reflect.TypeOf((*FaultTolerancePrimaryPowerOnNotAttempted)(nil)).Elem() +} + +type FaultTolerancePrimaryPowerOnNotAttemptedFault FaultTolerancePrimaryPowerOnNotAttempted + +func init() { + t["FaultTolerancePrimaryPowerOnNotAttemptedFault"] = reflect.TypeOf((*FaultTolerancePrimaryPowerOnNotAttemptedFault)(nil)).Elem() +} + +type FaultToleranceSecondaryConfigInfo struct { + FaultToleranceConfigInfo + + PrimaryVM ManagedObjectReference `xml:"primaryVM"` +} + +func init() { + t["FaultToleranceSecondaryConfigInfo"] = reflect.TypeOf((*FaultToleranceSecondaryConfigInfo)(nil)).Elem() +} + +type FaultToleranceSecondaryOpResult struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + PowerOnAttempted bool `xml:"powerOnAttempted"` + PowerOnResult *ClusterPowerOnVmResult `xml:"powerOnResult,omitempty"` +} + +func init() { + t["FaultToleranceSecondaryOpResult"] = reflect.TypeOf((*FaultToleranceSecondaryOpResult)(nil)).Elem() +} + +type FaultToleranceVMConfigSpec struct { + DynamicData + + VmConfig *ManagedObjectReference `xml:"vmConfig,omitempty"` + Disks []FaultToleranceDiskSpec `xml:"disks,omitempty"` +} + +func init() { + t["FaultToleranceVMConfigSpec"] = reflect.TypeOf((*FaultToleranceVMConfigSpec)(nil)).Elem() +} + +type FaultToleranceVmNotDasProtected struct { + VimFault + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["FaultToleranceVmNotDasProtected"] = reflect.TypeOf((*FaultToleranceVmNotDasProtected)(nil)).Elem() +} + +type FaultToleranceVmNotDasProtectedFault FaultToleranceVmNotDasProtected + +func init() { + t["FaultToleranceVmNotDasProtectedFault"] = reflect.TypeOf((*FaultToleranceVmNotDasProtectedFault)(nil)).Elem() +} + +type FaultsByHost struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["FaultsByHost"] = reflect.TypeOf((*FaultsByHost)(nil)).Elem() +} + +type FaultsByVM struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["FaultsByVM"] = reflect.TypeOf((*FaultsByVM)(nil)).Elem() +} + +type FcoeConfig struct { + DynamicData + + PriorityClass int32 `xml:"priorityClass"` + SourceMac string `xml:"sourceMac"` + VlanRange []FcoeConfigVlanRange `xml:"vlanRange"` + Capabilities FcoeConfigFcoeCapabilities `xml:"capabilities"` + FcoeActive bool `xml:"fcoeActive"` +} + +func init() { + t["FcoeConfig"] = reflect.TypeOf((*FcoeConfig)(nil)).Elem() +} + +type FcoeConfigFcoeCapabilities struct { + DynamicData + + PriorityClass bool `xml:"priorityClass"` + SourceMacAddress bool `xml:"sourceMacAddress"` + VlanRange bool `xml:"vlanRange"` +} + +func init() { + t["FcoeConfigFcoeCapabilities"] = reflect.TypeOf((*FcoeConfigFcoeCapabilities)(nil)).Elem() +} + +type FcoeConfigFcoeSpecification struct { + DynamicData + + UnderlyingPnic string `xml:"underlyingPnic"` + PriorityClass int32 `xml:"priorityClass,omitempty"` + SourceMac string `xml:"sourceMac,omitempty"` + VlanRange []FcoeConfigVlanRange `xml:"vlanRange,omitempty"` +} + +func init() { + t["FcoeConfigFcoeSpecification"] = reflect.TypeOf((*FcoeConfigFcoeSpecification)(nil)).Elem() +} + +type FcoeConfigVlanRange struct { + DynamicData + + VlanLow int32 `xml:"vlanLow"` + VlanHigh int32 `xml:"vlanHigh"` +} + +func init() { + t["FcoeConfigVlanRange"] = reflect.TypeOf((*FcoeConfigVlanRange)(nil)).Elem() +} + +type FcoeFault struct { + VimFault +} + +func init() { + t["FcoeFault"] = reflect.TypeOf((*FcoeFault)(nil)).Elem() +} + +type FcoeFaultFault BaseFcoeFault + +func init() { + t["FcoeFaultFault"] = reflect.TypeOf((*FcoeFaultFault)(nil)).Elem() +} + +type FcoeFaultPnicHasNoPortSet struct { + FcoeFault + + NicDevice string `xml:"nicDevice"` +} + +func init() { + t["FcoeFaultPnicHasNoPortSet"] = reflect.TypeOf((*FcoeFaultPnicHasNoPortSet)(nil)).Elem() +} + +type FcoeFaultPnicHasNoPortSetFault FcoeFaultPnicHasNoPortSet + +func init() { + t["FcoeFaultPnicHasNoPortSetFault"] = reflect.TypeOf((*FcoeFaultPnicHasNoPortSetFault)(nil)).Elem() +} + +type FeatureRequirementsNotMet struct { + VirtualHardwareCompatibilityIssue + + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["FeatureRequirementsNotMet"] = reflect.TypeOf((*FeatureRequirementsNotMet)(nil)).Elem() +} + +type FeatureRequirementsNotMetFault FeatureRequirementsNotMet + +func init() { + t["FeatureRequirementsNotMetFault"] = reflect.TypeOf((*FeatureRequirementsNotMetFault)(nil)).Elem() +} + +type FetchDVPortKeys FetchDVPortKeysRequestType + +func init() { + t["FetchDVPortKeys"] = reflect.TypeOf((*FetchDVPortKeys)(nil)).Elem() +} + +type FetchDVPortKeysRequestType struct { + This ManagedObjectReference `xml:"_this"` + Criteria *DistributedVirtualSwitchPortCriteria `xml:"criteria,omitempty"` +} + +func init() { + t["FetchDVPortKeysRequestType"] = reflect.TypeOf((*FetchDVPortKeysRequestType)(nil)).Elem() +} + +type FetchDVPortKeysResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type FetchDVPorts FetchDVPortsRequestType + +func init() { + t["FetchDVPorts"] = reflect.TypeOf((*FetchDVPorts)(nil)).Elem() +} + +type FetchDVPortsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Criteria *DistributedVirtualSwitchPortCriteria `xml:"criteria,omitempty"` +} + +func init() { + t["FetchDVPortsRequestType"] = reflect.TypeOf((*FetchDVPortsRequestType)(nil)).Elem() +} + +type FetchDVPortsResponse struct { + Returnval []DistributedVirtualPort `xml:"returnval,omitempty"` +} + +type FetchSystemEventLog FetchSystemEventLogRequestType + +func init() { + t["FetchSystemEventLog"] = reflect.TypeOf((*FetchSystemEventLog)(nil)).Elem() +} + +type FetchSystemEventLogRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["FetchSystemEventLogRequestType"] = reflect.TypeOf((*FetchSystemEventLogRequestType)(nil)).Elem() +} + +type FetchSystemEventLogResponse struct { + Returnval []SystemEventInfo `xml:"returnval,omitempty"` +} + +type FetchUserPrivilegeOnEntities FetchUserPrivilegeOnEntitiesRequestType + +func init() { + t["FetchUserPrivilegeOnEntities"] = reflect.TypeOf((*FetchUserPrivilegeOnEntities)(nil)).Elem() +} + +type FetchUserPrivilegeOnEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entities []ManagedObjectReference `xml:"entities"` + UserName string `xml:"userName"` +} + +func init() { + t["FetchUserPrivilegeOnEntitiesRequestType"] = reflect.TypeOf((*FetchUserPrivilegeOnEntitiesRequestType)(nil)).Elem() +} + +type FetchUserPrivilegeOnEntitiesResponse struct { + Returnval []UserPrivilegeResult `xml:"returnval,omitempty"` +} + +type FileAlreadyExists struct { + FileFault +} + +func init() { + t["FileAlreadyExists"] = reflect.TypeOf((*FileAlreadyExists)(nil)).Elem() +} + +type FileAlreadyExistsFault FileAlreadyExists + +func init() { + t["FileAlreadyExistsFault"] = reflect.TypeOf((*FileAlreadyExistsFault)(nil)).Elem() +} + +type FileBackedPortNotSupported struct { + DeviceNotSupported +} + +func init() { + t["FileBackedPortNotSupported"] = reflect.TypeOf((*FileBackedPortNotSupported)(nil)).Elem() +} + +type FileBackedPortNotSupportedFault FileBackedPortNotSupported + +func init() { + t["FileBackedPortNotSupportedFault"] = reflect.TypeOf((*FileBackedPortNotSupportedFault)(nil)).Elem() +} + +type FileBackedVirtualDiskSpec struct { + VirtualDiskSpec + + CapacityKb int64 `xml:"capacityKb"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` +} + +func init() { + t["FileBackedVirtualDiskSpec"] = reflect.TypeOf((*FileBackedVirtualDiskSpec)(nil)).Elem() +} + +type FileFault struct { + VimFault + + File string `xml:"file"` +} + +func init() { + t["FileFault"] = reflect.TypeOf((*FileFault)(nil)).Elem() +} + +type FileFaultFault BaseFileFault + +func init() { + t["FileFaultFault"] = reflect.TypeOf((*FileFaultFault)(nil)).Elem() +} + +type FileInfo struct { + DynamicData + + Path string `xml:"path"` + FriendlyName string `xml:"friendlyName,omitempty"` + FileSize int64 `xml:"fileSize,omitempty"` + Modification *time.Time `xml:"modification"` + Owner string `xml:"owner,omitempty"` +} + +func init() { + t["FileInfo"] = reflect.TypeOf((*FileInfo)(nil)).Elem() +} + +type FileLocked struct { + FileFault +} + +func init() { + t["FileLocked"] = reflect.TypeOf((*FileLocked)(nil)).Elem() +} + +type FileLockedFault FileLocked + +func init() { + t["FileLockedFault"] = reflect.TypeOf((*FileLockedFault)(nil)).Elem() +} + +type FileNameTooLong struct { + FileFault +} + +func init() { + t["FileNameTooLong"] = reflect.TypeOf((*FileNameTooLong)(nil)).Elem() +} + +type FileNameTooLongFault FileNameTooLong + +func init() { + t["FileNameTooLongFault"] = reflect.TypeOf((*FileNameTooLongFault)(nil)).Elem() +} + +type FileNotFound struct { + FileFault +} + +func init() { + t["FileNotFound"] = reflect.TypeOf((*FileNotFound)(nil)).Elem() +} + +type FileNotFoundFault FileNotFound + +func init() { + t["FileNotFoundFault"] = reflect.TypeOf((*FileNotFoundFault)(nil)).Elem() +} + +type FileNotWritable struct { + FileFault +} + +func init() { + t["FileNotWritable"] = reflect.TypeOf((*FileNotWritable)(nil)).Elem() +} + +type FileNotWritableFault FileNotWritable + +func init() { + t["FileNotWritableFault"] = reflect.TypeOf((*FileNotWritableFault)(nil)).Elem() +} + +type FileQuery struct { + DynamicData +} + +func init() { + t["FileQuery"] = reflect.TypeOf((*FileQuery)(nil)).Elem() +} + +type FileQueryFlags struct { + DynamicData + + FileType bool `xml:"fileType"` + FileSize bool `xml:"fileSize"` + Modification bool `xml:"modification"` + FileOwner *bool `xml:"fileOwner"` +} + +func init() { + t["FileQueryFlags"] = reflect.TypeOf((*FileQueryFlags)(nil)).Elem() +} + +type FileTooLarge struct { + FileFault + + Datastore string `xml:"datastore"` + FileSize int64 `xml:"fileSize"` + MaxFileSize int64 `xml:"maxFileSize,omitempty"` +} + +func init() { + t["FileTooLarge"] = reflect.TypeOf((*FileTooLarge)(nil)).Elem() +} + +type FileTooLargeFault FileTooLarge + +func init() { + t["FileTooLargeFault"] = reflect.TypeOf((*FileTooLargeFault)(nil)).Elem() +} + +type FileTransferInformation struct { + DynamicData + + Attributes BaseGuestFileAttributes `xml:"attributes,typeattr"` + Size int64 `xml:"size"` + Url string `xml:"url"` +} + +func init() { + t["FileTransferInformation"] = reflect.TypeOf((*FileTransferInformation)(nil)).Elem() +} + +type FilesystemQuiesceFault struct { + SnapshotFault +} + +func init() { + t["FilesystemQuiesceFault"] = reflect.TypeOf((*FilesystemQuiesceFault)(nil)).Elem() +} + +type FilesystemQuiesceFaultFault FilesystemQuiesceFault + +func init() { + t["FilesystemQuiesceFaultFault"] = reflect.TypeOf((*FilesystemQuiesceFaultFault)(nil)).Elem() +} + +type FilterInUse struct { + ResourceInUse + + Disk []VirtualDiskId `xml:"disk,omitempty"` +} + +func init() { + t["FilterInUse"] = reflect.TypeOf((*FilterInUse)(nil)).Elem() +} + +type FilterInUseFault FilterInUse + +func init() { + t["FilterInUseFault"] = reflect.TypeOf((*FilterInUseFault)(nil)).Elem() +} + +type FindAllByDnsName FindAllByDnsNameRequestType + +func init() { + t["FindAllByDnsName"] = reflect.TypeOf((*FindAllByDnsName)(nil)).Elem() +} + +type FindAllByDnsNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + DnsName string `xml:"dnsName"` + VmSearch bool `xml:"vmSearch"` +} + +func init() { + t["FindAllByDnsNameRequestType"] = reflect.TypeOf((*FindAllByDnsNameRequestType)(nil)).Elem() +} + +type FindAllByDnsNameResponse struct { + Returnval []ManagedObjectReference `xml:"returnval"` +} + +type FindAllByIp FindAllByIpRequestType + +func init() { + t["FindAllByIp"] = reflect.TypeOf((*FindAllByIp)(nil)).Elem() +} + +type FindAllByIpRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Ip string `xml:"ip"` + VmSearch bool `xml:"vmSearch"` +} + +func init() { + t["FindAllByIpRequestType"] = reflect.TypeOf((*FindAllByIpRequestType)(nil)).Elem() +} + +type FindAllByIpResponse struct { + Returnval []ManagedObjectReference `xml:"returnval"` +} + +type FindAllByUuid FindAllByUuidRequestType + +func init() { + t["FindAllByUuid"] = reflect.TypeOf((*FindAllByUuid)(nil)).Elem() +} + +type FindAllByUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Uuid string `xml:"uuid"` + VmSearch bool `xml:"vmSearch"` + InstanceUuid *bool `xml:"instanceUuid"` +} + +func init() { + t["FindAllByUuidRequestType"] = reflect.TypeOf((*FindAllByUuidRequestType)(nil)).Elem() +} + +type FindAllByUuidResponse struct { + Returnval []ManagedObjectReference `xml:"returnval"` +} + +type FindAssociatedProfile FindAssociatedProfileRequestType + +func init() { + t["FindAssociatedProfile"] = reflect.TypeOf((*FindAssociatedProfile)(nil)).Elem() +} + +type FindAssociatedProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["FindAssociatedProfileRequestType"] = reflect.TypeOf((*FindAssociatedProfileRequestType)(nil)).Elem() +} + +type FindAssociatedProfileResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByDatastorePath FindByDatastorePathRequestType + +func init() { + t["FindByDatastorePath"] = reflect.TypeOf((*FindByDatastorePath)(nil)).Elem() +} + +type FindByDatastorePathRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter ManagedObjectReference `xml:"datacenter"` + Path string `xml:"path"` +} + +func init() { + t["FindByDatastorePathRequestType"] = reflect.TypeOf((*FindByDatastorePathRequestType)(nil)).Elem() +} + +type FindByDatastorePathResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByDnsName FindByDnsNameRequestType + +func init() { + t["FindByDnsName"] = reflect.TypeOf((*FindByDnsName)(nil)).Elem() +} + +type FindByDnsNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + DnsName string `xml:"dnsName"` + VmSearch bool `xml:"vmSearch"` +} + +func init() { + t["FindByDnsNameRequestType"] = reflect.TypeOf((*FindByDnsNameRequestType)(nil)).Elem() +} + +type FindByDnsNameResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByInventoryPath FindByInventoryPathRequestType + +func init() { + t["FindByInventoryPath"] = reflect.TypeOf((*FindByInventoryPath)(nil)).Elem() +} + +type FindByInventoryPathRequestType struct { + This ManagedObjectReference `xml:"_this"` + InventoryPath string `xml:"inventoryPath"` +} + +func init() { + t["FindByInventoryPathRequestType"] = reflect.TypeOf((*FindByInventoryPathRequestType)(nil)).Elem() +} + +type FindByInventoryPathResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByIp FindByIpRequestType + +func init() { + t["FindByIp"] = reflect.TypeOf((*FindByIp)(nil)).Elem() +} + +type FindByIpRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Ip string `xml:"ip"` + VmSearch bool `xml:"vmSearch"` +} + +func init() { + t["FindByIpRequestType"] = reflect.TypeOf((*FindByIpRequestType)(nil)).Elem() +} + +type FindByIpResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindByUuid FindByUuidRequestType + +func init() { + t["FindByUuid"] = reflect.TypeOf((*FindByUuid)(nil)).Elem() +} + +type FindByUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Uuid string `xml:"uuid"` + VmSearch bool `xml:"vmSearch"` + InstanceUuid *bool `xml:"instanceUuid"` +} + +func init() { + t["FindByUuidRequestType"] = reflect.TypeOf((*FindByUuidRequestType)(nil)).Elem() +} + +type FindByUuidResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindChild FindChildRequestType + +func init() { + t["FindChild"] = reflect.TypeOf((*FindChild)(nil)).Elem() +} + +type FindChildRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Name string `xml:"name"` +} + +func init() { + t["FindChildRequestType"] = reflect.TypeOf((*FindChildRequestType)(nil)).Elem() +} + +type FindChildResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type FindExtension FindExtensionRequestType + +func init() { + t["FindExtension"] = reflect.TypeOf((*FindExtension)(nil)).Elem() +} + +type FindExtensionRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` +} + +func init() { + t["FindExtensionRequestType"] = reflect.TypeOf((*FindExtensionRequestType)(nil)).Elem() +} + +type FindExtensionResponse struct { + Returnval *Extension `xml:"returnval,omitempty"` +} + +type FindRulesForVm FindRulesForVmRequestType + +func init() { + t["FindRulesForVm"] = reflect.TypeOf((*FindRulesForVm)(nil)).Elem() +} + +type FindRulesForVmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["FindRulesForVmRequestType"] = reflect.TypeOf((*FindRulesForVmRequestType)(nil)).Elem() +} + +type FindRulesForVmResponse struct { + Returnval []BaseClusterRuleInfo `xml:"returnval,omitempty,typeattr"` +} + +type FirewallProfile struct { + ApplyProfile + + Ruleset []FirewallProfileRulesetProfile `xml:"ruleset,omitempty"` +} + +func init() { + t["FirewallProfile"] = reflect.TypeOf((*FirewallProfile)(nil)).Elem() +} + +type FirewallProfileRulesetProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["FirewallProfileRulesetProfile"] = reflect.TypeOf((*FirewallProfileRulesetProfile)(nil)).Elem() +} + +type FloatOption struct { + OptionType + + Min float32 `xml:"min"` + Max float32 `xml:"max"` + DefaultValue float32 `xml:"defaultValue"` +} + +func init() { + t["FloatOption"] = reflect.TypeOf((*FloatOption)(nil)).Elem() +} + +type FloppyImageFileInfo struct { + FileInfo +} + +func init() { + t["FloppyImageFileInfo"] = reflect.TypeOf((*FloppyImageFileInfo)(nil)).Elem() +} + +type FloppyImageFileQuery struct { + FileQuery +} + +func init() { + t["FloppyImageFileQuery"] = reflect.TypeOf((*FloppyImageFileQuery)(nil)).Elem() +} + +type FolderEventArgument struct { + EntityEventArgument + + Folder ManagedObjectReference `xml:"folder"` +} + +func init() { + t["FolderEventArgument"] = reflect.TypeOf((*FolderEventArgument)(nil)).Elem() +} + +type FolderFileInfo struct { + FileInfo +} + +func init() { + t["FolderFileInfo"] = reflect.TypeOf((*FolderFileInfo)(nil)).Elem() +} + +type FolderFileQuery struct { + FileQuery +} + +func init() { + t["FolderFileQuery"] = reflect.TypeOf((*FolderFileQuery)(nil)).Elem() +} + +type FormatVffs FormatVffsRequestType + +func init() { + t["FormatVffs"] = reflect.TypeOf((*FormatVffs)(nil)).Elem() +} + +type FormatVffsRequestType struct { + This ManagedObjectReference `xml:"_this"` + CreateSpec HostVffsSpec `xml:"createSpec"` +} + +func init() { + t["FormatVffsRequestType"] = reflect.TypeOf((*FormatVffsRequestType)(nil)).Elem() +} + +type FormatVffsResponse struct { + Returnval HostVffsVolume `xml:"returnval"` +} + +type FormatVmfs FormatVmfsRequestType + +func init() { + t["FormatVmfs"] = reflect.TypeOf((*FormatVmfs)(nil)).Elem() +} + +type FormatVmfsRequestType struct { + This ManagedObjectReference `xml:"_this"` + CreateSpec HostVmfsSpec `xml:"createSpec"` +} + +func init() { + t["FormatVmfsRequestType"] = reflect.TypeOf((*FormatVmfsRequestType)(nil)).Elem() +} + +type FormatVmfsResponse struct { + Returnval HostVmfsVolume `xml:"returnval"` +} + +type FtIssuesOnHost struct { + VmFaultToleranceIssue + + Host ManagedObjectReference `xml:"host"` + HostName string `xml:"hostName"` + Errors []LocalizedMethodFault `xml:"errors,omitempty"` +} + +func init() { + t["FtIssuesOnHost"] = reflect.TypeOf((*FtIssuesOnHost)(nil)).Elem() +} + +type FtIssuesOnHostFault FtIssuesOnHost + +func init() { + t["FtIssuesOnHostFault"] = reflect.TypeOf((*FtIssuesOnHostFault)(nil)).Elem() +} + +type FullStorageVMotionNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["FullStorageVMotionNotSupported"] = reflect.TypeOf((*FullStorageVMotionNotSupported)(nil)).Elem() +} + +type FullStorageVMotionNotSupportedFault FullStorageVMotionNotSupported + +func init() { + t["FullStorageVMotionNotSupportedFault"] = reflect.TypeOf((*FullStorageVMotionNotSupportedFault)(nil)).Elem() +} + +type GatewayConnectFault struct { + HostConnectFault + + GatewayType string `xml:"gatewayType"` + GatewayId string `xml:"gatewayId"` + GatewayInfo string `xml:"gatewayInfo"` + Details *LocalizableMessage `xml:"details,omitempty"` +} + +func init() { + t["GatewayConnectFault"] = reflect.TypeOf((*GatewayConnectFault)(nil)).Elem() +} + +type GatewayConnectFaultFault BaseGatewayConnectFault + +func init() { + t["GatewayConnectFaultFault"] = reflect.TypeOf((*GatewayConnectFaultFault)(nil)).Elem() +} + +type GatewayHostNotReachable struct { + GatewayToHostConnectFault +} + +func init() { + t["GatewayHostNotReachable"] = reflect.TypeOf((*GatewayHostNotReachable)(nil)).Elem() +} + +type GatewayHostNotReachableFault GatewayHostNotReachable + +func init() { + t["GatewayHostNotReachableFault"] = reflect.TypeOf((*GatewayHostNotReachableFault)(nil)).Elem() +} + +type GatewayNotFound struct { + GatewayConnectFault +} + +func init() { + t["GatewayNotFound"] = reflect.TypeOf((*GatewayNotFound)(nil)).Elem() +} + +type GatewayNotFoundFault GatewayNotFound + +func init() { + t["GatewayNotFoundFault"] = reflect.TypeOf((*GatewayNotFoundFault)(nil)).Elem() +} + +type GatewayNotReachable struct { + GatewayConnectFault +} + +func init() { + t["GatewayNotReachable"] = reflect.TypeOf((*GatewayNotReachable)(nil)).Elem() +} + +type GatewayNotReachableFault GatewayNotReachable + +func init() { + t["GatewayNotReachableFault"] = reflect.TypeOf((*GatewayNotReachableFault)(nil)).Elem() +} + +type GatewayOperationRefused struct { + GatewayConnectFault +} + +func init() { + t["GatewayOperationRefused"] = reflect.TypeOf((*GatewayOperationRefused)(nil)).Elem() +} + +type GatewayOperationRefusedFault GatewayOperationRefused + +func init() { + t["GatewayOperationRefusedFault"] = reflect.TypeOf((*GatewayOperationRefusedFault)(nil)).Elem() +} + +type GatewayToHostAuthFault struct { + GatewayToHostConnectFault + + InvalidProperties []string `xml:"invalidProperties"` + MissingProperties []string `xml:"missingProperties"` +} + +func init() { + t["GatewayToHostAuthFault"] = reflect.TypeOf((*GatewayToHostAuthFault)(nil)).Elem() +} + +type GatewayToHostAuthFaultFault GatewayToHostAuthFault + +func init() { + t["GatewayToHostAuthFaultFault"] = reflect.TypeOf((*GatewayToHostAuthFaultFault)(nil)).Elem() +} + +type GatewayToHostConnectFault struct { + GatewayConnectFault + + Hostname string `xml:"hostname"` + Port int32 `xml:"port,omitempty"` +} + +func init() { + t["GatewayToHostConnectFault"] = reflect.TypeOf((*GatewayToHostConnectFault)(nil)).Elem() +} + +type GatewayToHostConnectFaultFault BaseGatewayToHostConnectFault + +func init() { + t["GatewayToHostConnectFaultFault"] = reflect.TypeOf((*GatewayToHostConnectFaultFault)(nil)).Elem() +} + +type GatewayToHostTrustVerifyFault struct { + GatewayToHostConnectFault + + VerificationToken string `xml:"verificationToken"` + PropertiesToVerify []KeyValue `xml:"propertiesToVerify"` +} + +func init() { + t["GatewayToHostTrustVerifyFault"] = reflect.TypeOf((*GatewayToHostTrustVerifyFault)(nil)).Elem() +} + +type GatewayToHostTrustVerifyFaultFault GatewayToHostTrustVerifyFault + +func init() { + t["GatewayToHostTrustVerifyFaultFault"] = reflect.TypeOf((*GatewayToHostTrustVerifyFaultFault)(nil)).Elem() +} + +type GeneralEvent struct { + Event + + Message string `xml:"message"` +} + +func init() { + t["GeneralEvent"] = reflect.TypeOf((*GeneralEvent)(nil)).Elem() +} + +type GeneralHostErrorEvent struct { + GeneralEvent +} + +func init() { + t["GeneralHostErrorEvent"] = reflect.TypeOf((*GeneralHostErrorEvent)(nil)).Elem() +} + +type GeneralHostInfoEvent struct { + GeneralEvent +} + +func init() { + t["GeneralHostInfoEvent"] = reflect.TypeOf((*GeneralHostInfoEvent)(nil)).Elem() +} + +type GeneralHostWarningEvent struct { + GeneralEvent +} + +func init() { + t["GeneralHostWarningEvent"] = reflect.TypeOf((*GeneralHostWarningEvent)(nil)).Elem() +} + +type GeneralUserEvent struct { + GeneralEvent + + Entity *ManagedEntityEventArgument `xml:"entity,omitempty"` +} + +func init() { + t["GeneralUserEvent"] = reflect.TypeOf((*GeneralUserEvent)(nil)).Elem() +} + +type GeneralVmErrorEvent struct { + GeneralEvent +} + +func init() { + t["GeneralVmErrorEvent"] = reflect.TypeOf((*GeneralVmErrorEvent)(nil)).Elem() +} + +type GeneralVmInfoEvent struct { + GeneralEvent +} + +func init() { + t["GeneralVmInfoEvent"] = reflect.TypeOf((*GeneralVmInfoEvent)(nil)).Elem() +} + +type GeneralVmWarningEvent struct { + GeneralEvent +} + +func init() { + t["GeneralVmWarningEvent"] = reflect.TypeOf((*GeneralVmWarningEvent)(nil)).Elem() +} + +type GenerateCertificateSigningRequest GenerateCertificateSigningRequestRequestType + +func init() { + t["GenerateCertificateSigningRequest"] = reflect.TypeOf((*GenerateCertificateSigningRequest)(nil)).Elem() +} + +type GenerateCertificateSigningRequestByDn GenerateCertificateSigningRequestByDnRequestType + +func init() { + t["GenerateCertificateSigningRequestByDn"] = reflect.TypeOf((*GenerateCertificateSigningRequestByDn)(nil)).Elem() +} + +type GenerateCertificateSigningRequestByDnRequestType struct { + This ManagedObjectReference `xml:"_this"` + DistinguishedName string `xml:"distinguishedName"` +} + +func init() { + t["GenerateCertificateSigningRequestByDnRequestType"] = reflect.TypeOf((*GenerateCertificateSigningRequestByDnRequestType)(nil)).Elem() +} + +type GenerateCertificateSigningRequestByDnResponse struct { + Returnval string `xml:"returnval"` +} + +type GenerateCertificateSigningRequestRequestType struct { + This ManagedObjectReference `xml:"_this"` + UseIpAddressAsCommonName bool `xml:"useIpAddressAsCommonName"` +} + +func init() { + t["GenerateCertificateSigningRequestRequestType"] = reflect.TypeOf((*GenerateCertificateSigningRequestRequestType)(nil)).Elem() +} + +type GenerateCertificateSigningRequestResponse struct { + Returnval string `xml:"returnval"` +} + +type GenerateClientCsr GenerateClientCsrRequestType + +func init() { + t["GenerateClientCsr"] = reflect.TypeOf((*GenerateClientCsr)(nil)).Elem() +} + +type GenerateClientCsrRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster KeyProviderId `xml:"cluster"` +} + +func init() { + t["GenerateClientCsrRequestType"] = reflect.TypeOf((*GenerateClientCsrRequestType)(nil)).Elem() +} + +type GenerateClientCsrResponse struct { + Returnval string `xml:"returnval"` +} + +type GenerateConfigTaskList GenerateConfigTaskListRequestType + +func init() { + t["GenerateConfigTaskList"] = reflect.TypeOf((*GenerateConfigTaskList)(nil)).Elem() +} + +type GenerateConfigTaskListRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec HostConfigSpec `xml:"configSpec"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["GenerateConfigTaskListRequestType"] = reflect.TypeOf((*GenerateConfigTaskListRequestType)(nil)).Elem() +} + +type GenerateConfigTaskListResponse struct { + Returnval HostProfileManagerConfigTaskList `xml:"returnval"` +} + +type GenerateHostConfigTaskSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + HostsInfo []StructuredCustomizations `xml:"hostsInfo,omitempty"` +} + +func init() { + t["GenerateHostConfigTaskSpecRequestType"] = reflect.TypeOf((*GenerateHostConfigTaskSpecRequestType)(nil)).Elem() +} + +type GenerateHostConfigTaskSpec_Task GenerateHostConfigTaskSpecRequestType + +func init() { + t["GenerateHostConfigTaskSpec_Task"] = reflect.TypeOf((*GenerateHostConfigTaskSpec_Task)(nil)).Elem() +} + +type GenerateHostConfigTaskSpec_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type GenerateHostProfileTaskListRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec HostConfigSpec `xml:"configSpec"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["GenerateHostProfileTaskListRequestType"] = reflect.TypeOf((*GenerateHostProfileTaskListRequestType)(nil)).Elem() +} + +type GenerateHostProfileTaskList_Task GenerateHostProfileTaskListRequestType + +func init() { + t["GenerateHostProfileTaskList_Task"] = reflect.TypeOf((*GenerateHostProfileTaskList_Task)(nil)).Elem() +} + +type GenerateHostProfileTaskList_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type GenerateKey GenerateKeyRequestType + +func init() { + t["GenerateKey"] = reflect.TypeOf((*GenerateKey)(nil)).Elem() +} + +type GenerateKeyRequestType struct { + This ManagedObjectReference `xml:"_this"` + KeyProvider *KeyProviderId `xml:"keyProvider,omitempty"` +} + +func init() { + t["GenerateKeyRequestType"] = reflect.TypeOf((*GenerateKeyRequestType)(nil)).Elem() +} + +type GenerateKeyResponse struct { + Returnval CryptoKeyResult `xml:"returnval"` +} + +type GenerateLogBundlesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IncludeDefault bool `xml:"includeDefault"` + Host []ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["GenerateLogBundlesRequestType"] = reflect.TypeOf((*GenerateLogBundlesRequestType)(nil)).Elem() +} + +type GenerateLogBundles_Task GenerateLogBundlesRequestType + +func init() { + t["GenerateLogBundles_Task"] = reflect.TypeOf((*GenerateLogBundles_Task)(nil)).Elem() +} + +type GenerateLogBundles_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type GenerateSelfSignedClientCert GenerateSelfSignedClientCertRequestType + +func init() { + t["GenerateSelfSignedClientCert"] = reflect.TypeOf((*GenerateSelfSignedClientCert)(nil)).Elem() +} + +type GenerateSelfSignedClientCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster KeyProviderId `xml:"cluster"` +} + +func init() { + t["GenerateSelfSignedClientCertRequestType"] = reflect.TypeOf((*GenerateSelfSignedClientCertRequestType)(nil)).Elem() +} + +type GenerateSelfSignedClientCertResponse struct { + Returnval string `xml:"returnval"` +} + +type GenericDrsFault struct { + VimFault + + HostFaults []LocalizedMethodFault `xml:"hostFaults,omitempty"` +} + +func init() { + t["GenericDrsFault"] = reflect.TypeOf((*GenericDrsFault)(nil)).Elem() +} + +type GenericDrsFaultFault GenericDrsFault + +func init() { + t["GenericDrsFaultFault"] = reflect.TypeOf((*GenericDrsFaultFault)(nil)).Elem() +} + +type GenericVmConfigFault struct { + VmConfigFault + + Reason string `xml:"reason"` +} + +func init() { + t["GenericVmConfigFault"] = reflect.TypeOf((*GenericVmConfigFault)(nil)).Elem() +} + +type GenericVmConfigFaultFault GenericVmConfigFault + +func init() { + t["GenericVmConfigFaultFault"] = reflect.TypeOf((*GenericVmConfigFaultFault)(nil)).Elem() +} + +type GetAlarm GetAlarmRequestType + +func init() { + t["GetAlarm"] = reflect.TypeOf((*GetAlarm)(nil)).Elem() +} + +type GetAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["GetAlarmRequestType"] = reflect.TypeOf((*GetAlarmRequestType)(nil)).Elem() +} + +type GetAlarmResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type GetAlarmState GetAlarmStateRequestType + +func init() { + t["GetAlarmState"] = reflect.TypeOf((*GetAlarmState)(nil)).Elem() +} + +type GetAlarmStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["GetAlarmStateRequestType"] = reflect.TypeOf((*GetAlarmStateRequestType)(nil)).Elem() +} + +type GetAlarmStateResponse struct { + Returnval []AlarmState `xml:"returnval,omitempty"` +} + +type GetCustomizationSpec GetCustomizationSpecRequestType + +func init() { + t["GetCustomizationSpec"] = reflect.TypeOf((*GetCustomizationSpec)(nil)).Elem() +} + +type GetCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["GetCustomizationSpecRequestType"] = reflect.TypeOf((*GetCustomizationSpecRequestType)(nil)).Elem() +} + +type GetCustomizationSpecResponse struct { + Returnval CustomizationSpecItem `xml:"returnval"` +} + +type GetPublicKey GetPublicKeyRequestType + +func init() { + t["GetPublicKey"] = reflect.TypeOf((*GetPublicKey)(nil)).Elem() +} + +type GetPublicKeyRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetPublicKeyRequestType"] = reflect.TypeOf((*GetPublicKeyRequestType)(nil)).Elem() +} + +type GetPublicKeyResponse struct { + Returnval string `xml:"returnval"` +} + +type GetResourceUsage GetResourceUsageRequestType + +func init() { + t["GetResourceUsage"] = reflect.TypeOf((*GetResourceUsage)(nil)).Elem() +} + +type GetResourceUsageRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetResourceUsageRequestType"] = reflect.TypeOf((*GetResourceUsageRequestType)(nil)).Elem() +} + +type GetResourceUsageResponse struct { + Returnval ClusterResourceUsageSummary `xml:"returnval"` +} + +type GetVchaClusterHealth GetVchaClusterHealthRequestType + +func init() { + t["GetVchaClusterHealth"] = reflect.TypeOf((*GetVchaClusterHealth)(nil)).Elem() +} + +type GetVchaClusterHealthRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetVchaClusterHealthRequestType"] = reflect.TypeOf((*GetVchaClusterHealthRequestType)(nil)).Elem() +} + +type GetVchaClusterHealthResponse struct { + Returnval VchaClusterHealth `xml:"returnval"` +} + +type GetVsanObjExtAttrs GetVsanObjExtAttrsRequestType + +func init() { + t["GetVsanObjExtAttrs"] = reflect.TypeOf((*GetVsanObjExtAttrs)(nil)).Elem() +} + +type GetVsanObjExtAttrsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids"` +} + +func init() { + t["GetVsanObjExtAttrsRequestType"] = reflect.TypeOf((*GetVsanObjExtAttrsRequestType)(nil)).Elem() +} + +type GetVsanObjExtAttrsResponse struct { + Returnval string `xml:"returnval"` +} + +type GhostDvsProxySwitchDetectedEvent struct { + HostEvent + + SwitchUuid []string `xml:"switchUuid"` +} + +func init() { + t["GhostDvsProxySwitchDetectedEvent"] = reflect.TypeOf((*GhostDvsProxySwitchDetectedEvent)(nil)).Elem() +} + +type GhostDvsProxySwitchRemovedEvent struct { + HostEvent + + SwitchUuid []string `xml:"switchUuid"` +} + +func init() { + t["GhostDvsProxySwitchRemovedEvent"] = reflect.TypeOf((*GhostDvsProxySwitchRemovedEvent)(nil)).Elem() +} + +type GlobalMessageChangedEvent struct { + SessionEvent + + Message string `xml:"message"` + PrevMessage string `xml:"prevMessage,omitempty"` +} + +func init() { + t["GlobalMessageChangedEvent"] = reflect.TypeOf((*GlobalMessageChangedEvent)(nil)).Elem() +} + +type GroupAlarmAction struct { + AlarmAction + + Action []BaseAlarmAction `xml:"action,typeattr"` +} + +func init() { + t["GroupAlarmAction"] = reflect.TypeOf((*GroupAlarmAction)(nil)).Elem() +} + +type GuestAliases struct { + DynamicData + + Base64Cert string `xml:"base64Cert"` + Aliases []GuestAuthAliasInfo `xml:"aliases"` +} + +func init() { + t["GuestAliases"] = reflect.TypeOf((*GuestAliases)(nil)).Elem() +} + +type GuestAuthAliasInfo struct { + DynamicData + + Subject BaseGuestAuthSubject `xml:"subject,typeattr"` + Comment string `xml:"comment"` +} + +func init() { + t["GuestAuthAliasInfo"] = reflect.TypeOf((*GuestAuthAliasInfo)(nil)).Elem() +} + +type GuestAuthAnySubject struct { + GuestAuthSubject +} + +func init() { + t["GuestAuthAnySubject"] = reflect.TypeOf((*GuestAuthAnySubject)(nil)).Elem() +} + +type GuestAuthNamedSubject struct { + GuestAuthSubject + + Name string `xml:"name"` +} + +func init() { + t["GuestAuthNamedSubject"] = reflect.TypeOf((*GuestAuthNamedSubject)(nil)).Elem() +} + +type GuestAuthSubject struct { + DynamicData +} + +func init() { + t["GuestAuthSubject"] = reflect.TypeOf((*GuestAuthSubject)(nil)).Elem() +} + +type GuestAuthentication struct { + DynamicData + + InteractiveSession bool `xml:"interactiveSession"` +} + +func init() { + t["GuestAuthentication"] = reflect.TypeOf((*GuestAuthentication)(nil)).Elem() +} + +type GuestAuthenticationChallenge struct { + GuestOperationsFault + + ServerChallenge BaseGuestAuthentication `xml:"serverChallenge,typeattr"` + SessionID int64 `xml:"sessionID"` +} + +func init() { + t["GuestAuthenticationChallenge"] = reflect.TypeOf((*GuestAuthenticationChallenge)(nil)).Elem() +} + +type GuestAuthenticationChallengeFault GuestAuthenticationChallenge + +func init() { + t["GuestAuthenticationChallengeFault"] = reflect.TypeOf((*GuestAuthenticationChallengeFault)(nil)).Elem() +} + +type GuestComponentsOutOfDate struct { + GuestOperationsFault +} + +func init() { + t["GuestComponentsOutOfDate"] = reflect.TypeOf((*GuestComponentsOutOfDate)(nil)).Elem() +} + +type GuestComponentsOutOfDateFault GuestComponentsOutOfDate + +func init() { + t["GuestComponentsOutOfDateFault"] = reflect.TypeOf((*GuestComponentsOutOfDateFault)(nil)).Elem() +} + +type GuestDiskInfo struct { + DynamicData + + DiskPath string `xml:"diskPath,omitempty"` + Capacity int64 `xml:"capacity,omitempty"` + FreeSpace int64 `xml:"freeSpace,omitempty"` +} + +func init() { + t["GuestDiskInfo"] = reflect.TypeOf((*GuestDiskInfo)(nil)).Elem() +} + +type GuestFileAttributes struct { + DynamicData + + ModificationTime *time.Time `xml:"modificationTime"` + AccessTime *time.Time `xml:"accessTime"` + SymlinkTarget string `xml:"symlinkTarget,omitempty"` +} + +func init() { + t["GuestFileAttributes"] = reflect.TypeOf((*GuestFileAttributes)(nil)).Elem() +} + +type GuestFileInfo struct { + DynamicData + + Path string `xml:"path"` + Type string `xml:"type"` + Size int64 `xml:"size"` + Attributes BaseGuestFileAttributes `xml:"attributes,typeattr"` +} + +func init() { + t["GuestFileInfo"] = reflect.TypeOf((*GuestFileInfo)(nil)).Elem() +} + +type GuestInfo struct { + DynamicData + + ToolsStatus VirtualMachineToolsStatus `xml:"toolsStatus,omitempty"` + ToolsVersionStatus string `xml:"toolsVersionStatus,omitempty"` + ToolsVersionStatus2 string `xml:"toolsVersionStatus2,omitempty"` + ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty"` + ToolsVersion string `xml:"toolsVersion,omitempty"` + ToolsInstallType string `xml:"toolsInstallType,omitempty"` + GuestId string `xml:"guestId,omitempty"` + GuestFamily string `xml:"guestFamily,omitempty"` + GuestFullName string `xml:"guestFullName,omitempty"` + HostName string `xml:"hostName,omitempty"` + IpAddress string `xml:"ipAddress,omitempty"` + Net []GuestNicInfo `xml:"net,omitempty"` + IpStack []GuestStackInfo `xml:"ipStack,omitempty"` + Disk []GuestDiskInfo `xml:"disk,omitempty"` + Screen *GuestScreenInfo `xml:"screen,omitempty"` + GuestState string `xml:"guestState"` + AppHeartbeatStatus string `xml:"appHeartbeatStatus,omitempty"` + GuestKernelCrashed *bool `xml:"guestKernelCrashed"` + AppState string `xml:"appState,omitempty"` + GuestOperationsReady *bool `xml:"guestOperationsReady"` + InteractiveGuestOperationsReady *bool `xml:"interactiveGuestOperationsReady"` + GuestStateChangeSupported *bool `xml:"guestStateChangeSupported"` + GenerationInfo []GuestInfoNamespaceGenerationInfo `xml:"generationInfo,omitempty"` +} + +func init() { + t["GuestInfo"] = reflect.TypeOf((*GuestInfo)(nil)).Elem() +} + +type GuestInfoNamespaceGenerationInfo struct { + DynamicData + + Key string `xml:"key"` + GenerationNo int32 `xml:"generationNo"` +} + +func init() { + t["GuestInfoNamespaceGenerationInfo"] = reflect.TypeOf((*GuestInfoNamespaceGenerationInfo)(nil)).Elem() +} + +type GuestListFileInfo struct { + DynamicData + + Files []GuestFileInfo `xml:"files,omitempty"` + Remaining int32 `xml:"remaining"` +} + +func init() { + t["GuestListFileInfo"] = reflect.TypeOf((*GuestListFileInfo)(nil)).Elem() +} + +type GuestMappedAliases struct { + DynamicData + + Base64Cert string `xml:"base64Cert"` + Username string `xml:"username"` + Subjects []BaseGuestAuthSubject `xml:"subjects,typeattr"` +} + +func init() { + t["GuestMappedAliases"] = reflect.TypeOf((*GuestMappedAliases)(nil)).Elem() +} + +type GuestMultipleMappings struct { + GuestOperationsFault +} + +func init() { + t["GuestMultipleMappings"] = reflect.TypeOf((*GuestMultipleMappings)(nil)).Elem() +} + +type GuestMultipleMappingsFault GuestMultipleMappings + +func init() { + t["GuestMultipleMappingsFault"] = reflect.TypeOf((*GuestMultipleMappingsFault)(nil)).Elem() +} + +type GuestNicInfo struct { + DynamicData + + Network string `xml:"network,omitempty"` + IpAddress []string `xml:"ipAddress,omitempty"` + MacAddress string `xml:"macAddress,omitempty"` + Connected bool `xml:"connected"` + DeviceConfigId int32 `xml:"deviceConfigId"` + DnsConfig *NetDnsConfigInfo `xml:"dnsConfig,omitempty"` + IpConfig *NetIpConfigInfo `xml:"ipConfig,omitempty"` + NetBIOSConfig BaseNetBIOSConfigInfo `xml:"netBIOSConfig,omitempty,typeattr"` +} + +func init() { + t["GuestNicInfo"] = reflect.TypeOf((*GuestNicInfo)(nil)).Elem() +} + +type GuestOperationsFault struct { + VimFault +} + +func init() { + t["GuestOperationsFault"] = reflect.TypeOf((*GuestOperationsFault)(nil)).Elem() +} + +type GuestOperationsFaultFault BaseGuestOperationsFault + +func init() { + t["GuestOperationsFaultFault"] = reflect.TypeOf((*GuestOperationsFaultFault)(nil)).Elem() +} + +type GuestOperationsUnavailable struct { + GuestOperationsFault +} + +func init() { + t["GuestOperationsUnavailable"] = reflect.TypeOf((*GuestOperationsUnavailable)(nil)).Elem() +} + +type GuestOperationsUnavailableFault GuestOperationsUnavailable + +func init() { + t["GuestOperationsUnavailableFault"] = reflect.TypeOf((*GuestOperationsUnavailableFault)(nil)).Elem() +} + +type GuestOsDescriptor struct { + DynamicData + + Id string `xml:"id"` + Family string `xml:"family"` + FullName string `xml:"fullName"` + SupportedMaxCPUs int32 `xml:"supportedMaxCPUs"` + NumSupportedPhysicalSockets int32 `xml:"numSupportedPhysicalSockets,omitempty"` + NumSupportedCoresPerSocket int32 `xml:"numSupportedCoresPerSocket,omitempty"` + SupportedMinMemMB int32 `xml:"supportedMinMemMB"` + SupportedMaxMemMB int32 `xml:"supportedMaxMemMB"` + RecommendedMemMB int32 `xml:"recommendedMemMB"` + RecommendedColorDepth int32 `xml:"recommendedColorDepth"` + SupportedDiskControllerList []string `xml:"supportedDiskControllerList"` + RecommendedSCSIController string `xml:"recommendedSCSIController,omitempty"` + RecommendedDiskController string `xml:"recommendedDiskController"` + SupportedNumDisks int32 `xml:"supportedNumDisks"` + RecommendedDiskSizeMB int32 `xml:"recommendedDiskSizeMB"` + RecommendedCdromController string `xml:"recommendedCdromController,omitempty"` + SupportedEthernetCard []string `xml:"supportedEthernetCard"` + RecommendedEthernetCard string `xml:"recommendedEthernetCard,omitempty"` + SupportsSlaveDisk *bool `xml:"supportsSlaveDisk"` + CpuFeatureMask []HostCpuIdInfo `xml:"cpuFeatureMask,omitempty"` + SmcRequired *bool `xml:"smcRequired"` + SupportsWakeOnLan bool `xml:"supportsWakeOnLan"` + SupportsVMI *bool `xml:"supportsVMI"` + SupportsMemoryHotAdd *bool `xml:"supportsMemoryHotAdd"` + SupportsCpuHotAdd *bool `xml:"supportsCpuHotAdd"` + SupportsCpuHotRemove *bool `xml:"supportsCpuHotRemove"` + SupportedFirmware []string `xml:"supportedFirmware,omitempty"` + RecommendedFirmware string `xml:"recommendedFirmware,omitempty"` + SupportedUSBControllerList []string `xml:"supportedUSBControllerList,omitempty"` + RecommendedUSBController string `xml:"recommendedUSBController,omitempty"` + Supports3D *bool `xml:"supports3D"` + Recommended3D *bool `xml:"recommended3D"` + SmcRecommended *bool `xml:"smcRecommended"` + Ich7mRecommended *bool `xml:"ich7mRecommended"` + UsbRecommended *bool `xml:"usbRecommended"` + SupportLevel string `xml:"supportLevel,omitempty"` + SupportedForCreate *bool `xml:"supportedForCreate"` + VRAMSizeInKB *IntOption `xml:"vRAMSizeInKB,omitempty"` + NumSupportedFloppyDevices int32 `xml:"numSupportedFloppyDevices,omitempty"` + WakeOnLanEthernetCard []string `xml:"wakeOnLanEthernetCard,omitempty"` + SupportsPvscsiControllerForBoot *bool `xml:"supportsPvscsiControllerForBoot"` + DiskUuidEnabled *bool `xml:"diskUuidEnabled"` + SupportsHotPlugPCI *bool `xml:"supportsHotPlugPCI"` + SupportsSecureBoot *bool `xml:"supportsSecureBoot"` + DefaultSecureBoot *bool `xml:"defaultSecureBoot"` + PersistentMemorySupported *bool `xml:"persistentMemorySupported"` + SupportedMinPersistentMemoryMB int64 `xml:"supportedMinPersistentMemoryMB,omitempty"` + SupportedMaxPersistentMemoryMB int64 `xml:"supportedMaxPersistentMemoryMB,omitempty"` + RecommendedPersistentMemoryMB int64 `xml:"recommendedPersistentMemoryMB,omitempty"` + PersistentMemoryHotAddSupported *bool `xml:"persistentMemoryHotAddSupported"` + PersistentMemoryHotRemoveSupported *bool `xml:"persistentMemoryHotRemoveSupported"` + PersistentMemoryColdGrowthSupported *bool `xml:"persistentMemoryColdGrowthSupported"` + PersistentMemoryColdGrowthGranularityMB int64 `xml:"persistentMemoryColdGrowthGranularityMB,omitempty"` + PersistentMemoryHotGrowthSupported *bool `xml:"persistentMemoryHotGrowthSupported"` + PersistentMemoryHotGrowthGranularityMB int64 `xml:"persistentMemoryHotGrowthGranularityMB,omitempty"` + NumRecommendedPhysicalSockets int32 `xml:"numRecommendedPhysicalSockets,omitempty"` + NumRecommendedCoresPerSocket int32 `xml:"numRecommendedCoresPerSocket,omitempty"` + VvtdSupported *BoolOption `xml:"vvtdSupported,omitempty"` + VbsSupported *BoolOption `xml:"vbsSupported,omitempty"` + SupportsTPM20 *bool `xml:"supportsTPM20"` +} + +func init() { + t["GuestOsDescriptor"] = reflect.TypeOf((*GuestOsDescriptor)(nil)).Elem() +} + +type GuestPermissionDenied struct { + GuestOperationsFault +} + +func init() { + t["GuestPermissionDenied"] = reflect.TypeOf((*GuestPermissionDenied)(nil)).Elem() +} + +type GuestPermissionDeniedFault GuestPermissionDenied + +func init() { + t["GuestPermissionDeniedFault"] = reflect.TypeOf((*GuestPermissionDeniedFault)(nil)).Elem() +} + +type GuestPosixFileAttributes struct { + GuestFileAttributes + + OwnerId *int32 `xml:"ownerId"` + GroupId *int32 `xml:"groupId"` + Permissions int64 `xml:"permissions,omitempty"` +} + +func init() { + t["GuestPosixFileAttributes"] = reflect.TypeOf((*GuestPosixFileAttributes)(nil)).Elem() +} + +type GuestProcessInfo struct { + DynamicData + + Name string `xml:"name"` + Pid int64 `xml:"pid"` + Owner string `xml:"owner"` + CmdLine string `xml:"cmdLine"` + StartTime time.Time `xml:"startTime"` + EndTime *time.Time `xml:"endTime"` + ExitCode int32 `xml:"exitCode,omitempty"` +} + +func init() { + t["GuestProcessInfo"] = reflect.TypeOf((*GuestProcessInfo)(nil)).Elem() +} + +type GuestProcessNotFound struct { + GuestOperationsFault + + Pid int64 `xml:"pid"` +} + +func init() { + t["GuestProcessNotFound"] = reflect.TypeOf((*GuestProcessNotFound)(nil)).Elem() +} + +type GuestProcessNotFoundFault GuestProcessNotFound + +func init() { + t["GuestProcessNotFoundFault"] = reflect.TypeOf((*GuestProcessNotFoundFault)(nil)).Elem() +} + +type GuestProgramSpec struct { + DynamicData + + ProgramPath string `xml:"programPath"` + Arguments string `xml:"arguments"` + WorkingDirectory string `xml:"workingDirectory,omitempty"` + EnvVariables []string `xml:"envVariables,omitempty"` +} + +func init() { + t["GuestProgramSpec"] = reflect.TypeOf((*GuestProgramSpec)(nil)).Elem() +} + +type GuestRegKeyNameSpec struct { + DynamicData + + RegistryPath string `xml:"registryPath"` + WowBitness string `xml:"wowBitness"` +} + +func init() { + t["GuestRegKeyNameSpec"] = reflect.TypeOf((*GuestRegKeyNameSpec)(nil)).Elem() +} + +type GuestRegKeyRecordSpec struct { + DynamicData + + Key GuestRegKeySpec `xml:"key"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["GuestRegKeyRecordSpec"] = reflect.TypeOf((*GuestRegKeyRecordSpec)(nil)).Elem() +} + +type GuestRegKeySpec struct { + DynamicData + + KeyName GuestRegKeyNameSpec `xml:"keyName"` + ClassType string `xml:"classType"` + LastWritten time.Time `xml:"lastWritten"` +} + +func init() { + t["GuestRegKeySpec"] = reflect.TypeOf((*GuestRegKeySpec)(nil)).Elem() +} + +type GuestRegValueBinarySpec struct { + GuestRegValueDataSpec + + Value []byte `xml:"value,omitempty"` +} + +func init() { + t["GuestRegValueBinarySpec"] = reflect.TypeOf((*GuestRegValueBinarySpec)(nil)).Elem() +} + +type GuestRegValueDataSpec struct { + DynamicData +} + +func init() { + t["GuestRegValueDataSpec"] = reflect.TypeOf((*GuestRegValueDataSpec)(nil)).Elem() +} + +type GuestRegValueDwordSpec struct { + GuestRegValueDataSpec + + Value int32 `xml:"value"` +} + +func init() { + t["GuestRegValueDwordSpec"] = reflect.TypeOf((*GuestRegValueDwordSpec)(nil)).Elem() +} + +type GuestRegValueExpandStringSpec struct { + GuestRegValueDataSpec + + Value string `xml:"value,omitempty"` +} + +func init() { + t["GuestRegValueExpandStringSpec"] = reflect.TypeOf((*GuestRegValueExpandStringSpec)(nil)).Elem() +} + +type GuestRegValueMultiStringSpec struct { + GuestRegValueDataSpec + + Value []string `xml:"value,omitempty"` +} + +func init() { + t["GuestRegValueMultiStringSpec"] = reflect.TypeOf((*GuestRegValueMultiStringSpec)(nil)).Elem() +} + +type GuestRegValueNameSpec struct { + DynamicData + + KeyName GuestRegKeyNameSpec `xml:"keyName"` + Name string `xml:"name"` +} + +func init() { + t["GuestRegValueNameSpec"] = reflect.TypeOf((*GuestRegValueNameSpec)(nil)).Elem() +} + +type GuestRegValueQwordSpec struct { + GuestRegValueDataSpec + + Value int64 `xml:"value"` +} + +func init() { + t["GuestRegValueQwordSpec"] = reflect.TypeOf((*GuestRegValueQwordSpec)(nil)).Elem() +} + +type GuestRegValueSpec struct { + DynamicData + + Name GuestRegValueNameSpec `xml:"name"` + Data BaseGuestRegValueDataSpec `xml:"data,typeattr"` +} + +func init() { + t["GuestRegValueSpec"] = reflect.TypeOf((*GuestRegValueSpec)(nil)).Elem() +} + +type GuestRegValueStringSpec struct { + GuestRegValueDataSpec + + Value string `xml:"value,omitempty"` +} + +func init() { + t["GuestRegValueStringSpec"] = reflect.TypeOf((*GuestRegValueStringSpec)(nil)).Elem() +} + +type GuestRegistryFault struct { + GuestOperationsFault + + WindowsSystemErrorCode int64 `xml:"windowsSystemErrorCode"` +} + +func init() { + t["GuestRegistryFault"] = reflect.TypeOf((*GuestRegistryFault)(nil)).Elem() +} + +type GuestRegistryFaultFault BaseGuestRegistryFault + +func init() { + t["GuestRegistryFaultFault"] = reflect.TypeOf((*GuestRegistryFaultFault)(nil)).Elem() +} + +type GuestRegistryKeyAlreadyExists struct { + GuestRegistryKeyFault +} + +func init() { + t["GuestRegistryKeyAlreadyExists"] = reflect.TypeOf((*GuestRegistryKeyAlreadyExists)(nil)).Elem() +} + +type GuestRegistryKeyAlreadyExistsFault GuestRegistryKeyAlreadyExists + +func init() { + t["GuestRegistryKeyAlreadyExistsFault"] = reflect.TypeOf((*GuestRegistryKeyAlreadyExistsFault)(nil)).Elem() +} + +type GuestRegistryKeyFault struct { + GuestRegistryFault + + KeyName string `xml:"keyName"` +} + +func init() { + t["GuestRegistryKeyFault"] = reflect.TypeOf((*GuestRegistryKeyFault)(nil)).Elem() +} + +type GuestRegistryKeyFaultFault BaseGuestRegistryKeyFault + +func init() { + t["GuestRegistryKeyFaultFault"] = reflect.TypeOf((*GuestRegistryKeyFaultFault)(nil)).Elem() +} + +type GuestRegistryKeyHasSubkeys struct { + GuestRegistryKeyFault +} + +func init() { + t["GuestRegistryKeyHasSubkeys"] = reflect.TypeOf((*GuestRegistryKeyHasSubkeys)(nil)).Elem() +} + +type GuestRegistryKeyHasSubkeysFault GuestRegistryKeyHasSubkeys + +func init() { + t["GuestRegistryKeyHasSubkeysFault"] = reflect.TypeOf((*GuestRegistryKeyHasSubkeysFault)(nil)).Elem() +} + +type GuestRegistryKeyInvalid struct { + GuestRegistryKeyFault +} + +func init() { + t["GuestRegistryKeyInvalid"] = reflect.TypeOf((*GuestRegistryKeyInvalid)(nil)).Elem() +} + +type GuestRegistryKeyInvalidFault GuestRegistryKeyInvalid + +func init() { + t["GuestRegistryKeyInvalidFault"] = reflect.TypeOf((*GuestRegistryKeyInvalidFault)(nil)).Elem() +} + +type GuestRegistryKeyParentVolatile struct { + GuestRegistryKeyFault +} + +func init() { + t["GuestRegistryKeyParentVolatile"] = reflect.TypeOf((*GuestRegistryKeyParentVolatile)(nil)).Elem() +} + +type GuestRegistryKeyParentVolatileFault GuestRegistryKeyParentVolatile + +func init() { + t["GuestRegistryKeyParentVolatileFault"] = reflect.TypeOf((*GuestRegistryKeyParentVolatileFault)(nil)).Elem() +} + +type GuestRegistryValueFault struct { + GuestRegistryFault + + KeyName string `xml:"keyName"` + ValueName string `xml:"valueName"` +} + +func init() { + t["GuestRegistryValueFault"] = reflect.TypeOf((*GuestRegistryValueFault)(nil)).Elem() +} + +type GuestRegistryValueFaultFault BaseGuestRegistryValueFault + +func init() { + t["GuestRegistryValueFaultFault"] = reflect.TypeOf((*GuestRegistryValueFaultFault)(nil)).Elem() +} + +type GuestRegistryValueNotFound struct { + GuestRegistryValueFault +} + +func init() { + t["GuestRegistryValueNotFound"] = reflect.TypeOf((*GuestRegistryValueNotFound)(nil)).Elem() +} + +type GuestRegistryValueNotFoundFault GuestRegistryValueNotFound + +func init() { + t["GuestRegistryValueNotFoundFault"] = reflect.TypeOf((*GuestRegistryValueNotFoundFault)(nil)).Elem() +} + +type GuestScreenInfo struct { + DynamicData + + Width int32 `xml:"width"` + Height int32 `xml:"height"` +} + +func init() { + t["GuestScreenInfo"] = reflect.TypeOf((*GuestScreenInfo)(nil)).Elem() +} + +type GuestStackInfo struct { + DynamicData + + DnsConfig *NetDnsConfigInfo `xml:"dnsConfig,omitempty"` + IpRouteConfig *NetIpRouteConfigInfo `xml:"ipRouteConfig,omitempty"` + IpStackConfig []KeyValue `xml:"ipStackConfig,omitempty"` + DhcpConfig *NetDhcpConfigInfo `xml:"dhcpConfig,omitempty"` +} + +func init() { + t["GuestStackInfo"] = reflect.TypeOf((*GuestStackInfo)(nil)).Elem() +} + +type GuestWindowsFileAttributes struct { + GuestFileAttributes + + Hidden *bool `xml:"hidden"` + ReadOnly *bool `xml:"readOnly"` + CreateTime *time.Time `xml:"createTime"` +} + +func init() { + t["GuestWindowsFileAttributes"] = reflect.TypeOf((*GuestWindowsFileAttributes)(nil)).Elem() +} + +type GuestWindowsProgramSpec struct { + GuestProgramSpec + + StartMinimized bool `xml:"startMinimized"` +} + +func init() { + t["GuestWindowsProgramSpec"] = reflect.TypeOf((*GuestWindowsProgramSpec)(nil)).Elem() +} + +type HAErrorsAtDest struct { + MigrationFault +} + +func init() { + t["HAErrorsAtDest"] = reflect.TypeOf((*HAErrorsAtDest)(nil)).Elem() +} + +type HAErrorsAtDestFault HAErrorsAtDest + +func init() { + t["HAErrorsAtDestFault"] = reflect.TypeOf((*HAErrorsAtDestFault)(nil)).Elem() +} + +type HasMonitoredEntity HasMonitoredEntityRequestType + +func init() { + t["HasMonitoredEntity"] = reflect.TypeOf((*HasMonitoredEntity)(nil)).Elem() +} + +type HasMonitoredEntityRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["HasMonitoredEntityRequestType"] = reflect.TypeOf((*HasMonitoredEntityRequestType)(nil)).Elem() +} + +type HasMonitoredEntityResponse struct { + Returnval bool `xml:"returnval"` +} + +type HasPrivilegeOnEntities HasPrivilegeOnEntitiesRequestType + +func init() { + t["HasPrivilegeOnEntities"] = reflect.TypeOf((*HasPrivilegeOnEntities)(nil)).Elem() +} + +type HasPrivilegeOnEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity"` + SessionId string `xml:"sessionId"` + PrivId []string `xml:"privId,omitempty"` +} + +func init() { + t["HasPrivilegeOnEntitiesRequestType"] = reflect.TypeOf((*HasPrivilegeOnEntitiesRequestType)(nil)).Elem() +} + +type HasPrivilegeOnEntitiesResponse struct { + Returnval []EntityPrivilege `xml:"returnval,omitempty"` +} + +type HasPrivilegeOnEntity HasPrivilegeOnEntityRequestType + +func init() { + t["HasPrivilegeOnEntity"] = reflect.TypeOf((*HasPrivilegeOnEntity)(nil)).Elem() +} + +type HasPrivilegeOnEntityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + SessionId string `xml:"sessionId"` + PrivId []string `xml:"privId,omitempty"` +} + +func init() { + t["HasPrivilegeOnEntityRequestType"] = reflect.TypeOf((*HasPrivilegeOnEntityRequestType)(nil)).Elem() +} + +type HasPrivilegeOnEntityResponse struct { + Returnval []bool `xml:"returnval,omitempty"` +} + +type HasProvider HasProviderRequestType + +func init() { + t["HasProvider"] = reflect.TypeOf((*HasProvider)(nil)).Elem() +} + +type HasProviderRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["HasProviderRequestType"] = reflect.TypeOf((*HasProviderRequestType)(nil)).Elem() +} + +type HasProviderResponse struct { + Returnval bool `xml:"returnval"` +} + +type HasUserPrivilegeOnEntities HasUserPrivilegeOnEntitiesRequestType + +func init() { + t["HasUserPrivilegeOnEntities"] = reflect.TypeOf((*HasUserPrivilegeOnEntities)(nil)).Elem() +} + +type HasUserPrivilegeOnEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entities []ManagedObjectReference `xml:"entities"` + UserName string `xml:"userName"` + PrivId []string `xml:"privId,omitempty"` +} + +func init() { + t["HasUserPrivilegeOnEntitiesRequestType"] = reflect.TypeOf((*HasUserPrivilegeOnEntitiesRequestType)(nil)).Elem() +} + +type HasUserPrivilegeOnEntitiesResponse struct { + Returnval []EntityPrivilege `xml:"returnval,omitempty"` +} + +type HbrDiskMigrationAction struct { + ClusterAction + + CollectionId string `xml:"collectionId"` + CollectionName string `xml:"collectionName"` + DiskIds []string `xml:"diskIds"` + Source ManagedObjectReference `xml:"source"` + Destination ManagedObjectReference `xml:"destination"` + SizeTransferred int64 `xml:"sizeTransferred"` + SpaceUtilSrcBefore float32 `xml:"spaceUtilSrcBefore,omitempty"` + SpaceUtilDstBefore float32 `xml:"spaceUtilDstBefore,omitempty"` + SpaceUtilSrcAfter float32 `xml:"spaceUtilSrcAfter,omitempty"` + SpaceUtilDstAfter float32 `xml:"spaceUtilDstAfter,omitempty"` + IoLatencySrcBefore float32 `xml:"ioLatencySrcBefore,omitempty"` + IoLatencyDstBefore float32 `xml:"ioLatencyDstBefore,omitempty"` +} + +func init() { + t["HbrDiskMigrationAction"] = reflect.TypeOf((*HbrDiskMigrationAction)(nil)).Elem() +} + +type HbrManagerReplicationVmInfo struct { + DynamicData + + State string `xml:"state"` + ProgressInfo *ReplicationVmProgressInfo `xml:"progressInfo,omitempty"` + ImageId string `xml:"imageId,omitempty"` + LastError *LocalizedMethodFault `xml:"lastError,omitempty"` +} + +func init() { + t["HbrManagerReplicationVmInfo"] = reflect.TypeOf((*HbrManagerReplicationVmInfo)(nil)).Elem() +} + +type HbrManagerVmReplicationCapability struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + SupportedQuiesceMode string `xml:"supportedQuiesceMode"` + CompressionSupported bool `xml:"compressionSupported"` + MaxSupportedSourceDiskCapacity int64 `xml:"maxSupportedSourceDiskCapacity"` + MinRpo int64 `xml:"minRpo,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HbrManagerVmReplicationCapability"] = reflect.TypeOf((*HbrManagerVmReplicationCapability)(nil)).Elem() +} + +type HealthStatusChangedEvent struct { + Event + + ComponentId string `xml:"componentId"` + OldStatus string `xml:"oldStatus"` + NewStatus string `xml:"newStatus"` + ComponentName string `xml:"componentName"` + ServiceId string `xml:"serviceId,omitempty"` +} + +func init() { + t["HealthStatusChangedEvent"] = reflect.TypeOf((*HealthStatusChangedEvent)(nil)).Elem() +} + +type HealthSystemRuntime struct { + DynamicData + + SystemHealthInfo *HostSystemHealthInfo `xml:"systemHealthInfo,omitempty"` + HardwareStatusInfo *HostHardwareStatusInfo `xml:"hardwareStatusInfo,omitempty"` +} + +func init() { + t["HealthSystemRuntime"] = reflect.TypeOf((*HealthSystemRuntime)(nil)).Elem() +} + +type HealthUpdate struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + HealthUpdateInfoId string `xml:"healthUpdateInfoId"` + Id string `xml:"id"` + Status ManagedEntityStatus `xml:"status"` + Remediation string `xml:"remediation"` +} + +func init() { + t["HealthUpdate"] = reflect.TypeOf((*HealthUpdate)(nil)).Elem() +} + +type HealthUpdateInfo struct { + DynamicData + + Id string `xml:"id"` + ComponentType string `xml:"componentType"` + Description string `xml:"description"` +} + +func init() { + t["HealthUpdateInfo"] = reflect.TypeOf((*HealthUpdateInfo)(nil)).Elem() +} + +type HeterogenousHostsBlockingEVC struct { + EVCConfigFault +} + +func init() { + t["HeterogenousHostsBlockingEVC"] = reflect.TypeOf((*HeterogenousHostsBlockingEVC)(nil)).Elem() +} + +type HeterogenousHostsBlockingEVCFault HeterogenousHostsBlockingEVC + +func init() { + t["HeterogenousHostsBlockingEVCFault"] = reflect.TypeOf((*HeterogenousHostsBlockingEVCFault)(nil)).Elem() +} + +type HostAccessControlEntry struct { + DynamicData + + Principal string `xml:"principal"` + Group bool `xml:"group"` + AccessMode HostAccessMode `xml:"accessMode"` +} + +func init() { + t["HostAccessControlEntry"] = reflect.TypeOf((*HostAccessControlEntry)(nil)).Elem() +} + +type HostAccessRestrictedToManagementServer struct { + NotSupported + + ManagementServer string `xml:"managementServer"` +} + +func init() { + t["HostAccessRestrictedToManagementServer"] = reflect.TypeOf((*HostAccessRestrictedToManagementServer)(nil)).Elem() +} + +type HostAccessRestrictedToManagementServerFault HostAccessRestrictedToManagementServer + +func init() { + t["HostAccessRestrictedToManagementServerFault"] = reflect.TypeOf((*HostAccessRestrictedToManagementServerFault)(nil)).Elem() +} + +type HostAccountSpec struct { + DynamicData + + Id string `xml:"id"` + Password string `xml:"password,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["HostAccountSpec"] = reflect.TypeOf((*HostAccountSpec)(nil)).Elem() +} + +type HostActiveDirectory struct { + DynamicData + + ChangeOperation string `xml:"changeOperation"` + Spec *HostActiveDirectorySpec `xml:"spec,omitempty"` +} + +func init() { + t["HostActiveDirectory"] = reflect.TypeOf((*HostActiveDirectory)(nil)).Elem() +} + +type HostActiveDirectoryInfo struct { + HostDirectoryStoreInfo + + JoinedDomain string `xml:"joinedDomain,omitempty"` + TrustedDomain []string `xml:"trustedDomain,omitempty"` + DomainMembershipStatus string `xml:"domainMembershipStatus,omitempty"` + SmartCardAuthenticationEnabled *bool `xml:"smartCardAuthenticationEnabled"` +} + +func init() { + t["HostActiveDirectoryInfo"] = reflect.TypeOf((*HostActiveDirectoryInfo)(nil)).Elem() +} + +type HostActiveDirectorySpec struct { + DynamicData + + DomainName string `xml:"domainName,omitempty"` + UserName string `xml:"userName,omitempty"` + Password string `xml:"password,omitempty"` + CamServer string `xml:"camServer,omitempty"` + Thumbprint string `xml:"thumbprint,omitempty"` + SmartCardAuthenticationEnabled *bool `xml:"smartCardAuthenticationEnabled"` + SmartCardTrustAnchors []string `xml:"smartCardTrustAnchors,omitempty"` +} + +func init() { + t["HostActiveDirectorySpec"] = reflect.TypeOf((*HostActiveDirectorySpec)(nil)).Elem() +} + +type HostAddFailedEvent struct { + HostEvent + + Hostname string `xml:"hostname"` +} + +func init() { + t["HostAddFailedEvent"] = reflect.TypeOf((*HostAddFailedEvent)(nil)).Elem() +} + +type HostAddedEvent struct { + HostEvent +} + +func init() { + t["HostAddedEvent"] = reflect.TypeOf((*HostAddedEvent)(nil)).Elem() +} + +type HostAdminDisableEvent struct { + HostEvent +} + +func init() { + t["HostAdminDisableEvent"] = reflect.TypeOf((*HostAdminDisableEvent)(nil)).Elem() +} + +type HostAdminEnableEvent struct { + HostEvent +} + +func init() { + t["HostAdminEnableEvent"] = reflect.TypeOf((*HostAdminEnableEvent)(nil)).Elem() +} + +type HostApplyProfile struct { + ApplyProfile + + Memory *HostMemoryProfile `xml:"memory,omitempty"` + Storage *StorageProfile `xml:"storage,omitempty"` + Network *NetworkProfile `xml:"network,omitempty"` + Datetime *DateTimeProfile `xml:"datetime,omitempty"` + Firewall *FirewallProfile `xml:"firewall,omitempty"` + Security *SecurityProfile `xml:"security,omitempty"` + Service []ServiceProfile `xml:"service,omitempty"` + Option []OptionProfile `xml:"option,omitempty"` + UserAccount []UserProfile `xml:"userAccount,omitempty"` + UsergroupAccount []UserGroupProfile `xml:"usergroupAccount,omitempty"` + Authentication *AuthenticationProfile `xml:"authentication,omitempty"` +} + +func init() { + t["HostApplyProfile"] = reflect.TypeOf((*HostApplyProfile)(nil)).Elem() +} + +type HostAuthenticationManagerInfo struct { + DynamicData + + AuthConfig []BaseHostAuthenticationStoreInfo `xml:"authConfig,typeattr"` +} + +func init() { + t["HostAuthenticationManagerInfo"] = reflect.TypeOf((*HostAuthenticationManagerInfo)(nil)).Elem() +} + +type HostAuthenticationStoreInfo struct { + DynamicData + + Enabled bool `xml:"enabled"` +} + +func init() { + t["HostAuthenticationStoreInfo"] = reflect.TypeOf((*HostAuthenticationStoreInfo)(nil)).Elem() +} + +type HostAutoStartManagerConfig struct { + DynamicData + + Defaults *AutoStartDefaults `xml:"defaults,omitempty"` + PowerInfo []AutoStartPowerInfo `xml:"powerInfo,omitempty"` +} + +func init() { + t["HostAutoStartManagerConfig"] = reflect.TypeOf((*HostAutoStartManagerConfig)(nil)).Elem() +} + +type HostBIOSInfo struct { + DynamicData + + BiosVersion string `xml:"biosVersion,omitempty"` + ReleaseDate *time.Time `xml:"releaseDate"` + Vendor string `xml:"vendor,omitempty"` + MajorRelease int32 `xml:"majorRelease,omitempty"` + MinorRelease int32 `xml:"minorRelease,omitempty"` + FirmwareMajorRelease int32 `xml:"firmwareMajorRelease,omitempty"` + FirmwareMinorRelease int32 `xml:"firmwareMinorRelease,omitempty"` +} + +func init() { + t["HostBIOSInfo"] = reflect.TypeOf((*HostBIOSInfo)(nil)).Elem() +} + +type HostBlockAdapterTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostBlockAdapterTargetTransport"] = reflect.TypeOf((*HostBlockAdapterTargetTransport)(nil)).Elem() +} + +type HostBlockHba struct { + HostHostBusAdapter +} + +func init() { + t["HostBlockHba"] = reflect.TypeOf((*HostBlockHba)(nil)).Elem() +} + +type HostBootDevice struct { + DynamicData + + Key string `xml:"key"` + Description string `xml:"description"` +} + +func init() { + t["HostBootDevice"] = reflect.TypeOf((*HostBootDevice)(nil)).Elem() +} + +type HostBootDeviceInfo struct { + DynamicData + + BootDevices []HostBootDevice `xml:"bootDevices,omitempty"` + CurrentBootDeviceKey string `xml:"currentBootDeviceKey,omitempty"` +} + +func init() { + t["HostBootDeviceInfo"] = reflect.TypeOf((*HostBootDeviceInfo)(nil)).Elem() +} + +type HostCacheConfigurationInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + SwapSize int64 `xml:"swapSize"` +} + +func init() { + t["HostCacheConfigurationInfo"] = reflect.TypeOf((*HostCacheConfigurationInfo)(nil)).Elem() +} + +type HostCacheConfigurationSpec struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + SwapSize int64 `xml:"swapSize"` +} + +func init() { + t["HostCacheConfigurationSpec"] = reflect.TypeOf((*HostCacheConfigurationSpec)(nil)).Elem() +} + +type HostCapability struct { + DynamicData + + RecursiveResourcePoolsSupported bool `xml:"recursiveResourcePoolsSupported"` + CpuMemoryResourceConfigurationSupported bool `xml:"cpuMemoryResourceConfigurationSupported"` + RebootSupported bool `xml:"rebootSupported"` + ShutdownSupported bool `xml:"shutdownSupported"` + VmotionSupported bool `xml:"vmotionSupported"` + StandbySupported bool `xml:"standbySupported"` + IpmiSupported *bool `xml:"ipmiSupported"` + MaxSupportedVMs int32 `xml:"maxSupportedVMs,omitempty"` + MaxRunningVMs int32 `xml:"maxRunningVMs,omitempty"` + MaxSupportedVcpus int32 `xml:"maxSupportedVcpus,omitempty"` + MaxRegisteredVMs int32 `xml:"maxRegisteredVMs,omitempty"` + DatastorePrincipalSupported bool `xml:"datastorePrincipalSupported"` + SanSupported bool `xml:"sanSupported"` + NfsSupported bool `xml:"nfsSupported"` + IscsiSupported bool `xml:"iscsiSupported"` + VlanTaggingSupported bool `xml:"vlanTaggingSupported"` + NicTeamingSupported bool `xml:"nicTeamingSupported"` + HighGuestMemSupported bool `xml:"highGuestMemSupported"` + MaintenanceModeSupported bool `xml:"maintenanceModeSupported"` + SuspendedRelocateSupported bool `xml:"suspendedRelocateSupported"` + RestrictedSnapshotRelocateSupported bool `xml:"restrictedSnapshotRelocateSupported"` + PerVmSwapFiles bool `xml:"perVmSwapFiles"` + LocalSwapDatastoreSupported bool `xml:"localSwapDatastoreSupported"` + UnsharedSwapVMotionSupported bool `xml:"unsharedSwapVMotionSupported"` + BackgroundSnapshotsSupported bool `xml:"backgroundSnapshotsSupported"` + PreAssignedPCIUnitNumbersSupported bool `xml:"preAssignedPCIUnitNumbersSupported"` + ScreenshotSupported bool `xml:"screenshotSupported"` + ScaledScreenshotSupported bool `xml:"scaledScreenshotSupported"` + StorageVMotionSupported *bool `xml:"storageVMotionSupported"` + VmotionWithStorageVMotionSupported *bool `xml:"vmotionWithStorageVMotionSupported"` + VmotionAcrossNetworkSupported *bool `xml:"vmotionAcrossNetworkSupported"` + MaxNumDisksSVMotion int32 `xml:"maxNumDisksSVMotion,omitempty"` + HbrNicSelectionSupported *bool `xml:"hbrNicSelectionSupported"` + VrNfcNicSelectionSupported *bool `xml:"vrNfcNicSelectionSupported"` + RecordReplaySupported *bool `xml:"recordReplaySupported"` + FtSupported *bool `xml:"ftSupported"` + ReplayUnsupportedReason string `xml:"replayUnsupportedReason,omitempty"` + ReplayCompatibilityIssues []string `xml:"replayCompatibilityIssues,omitempty"` + SmpFtSupported *bool `xml:"smpFtSupported"` + FtCompatibilityIssues []string `xml:"ftCompatibilityIssues,omitempty"` + SmpFtCompatibilityIssues []string `xml:"smpFtCompatibilityIssues,omitempty"` + MaxVcpusPerFtVm int32 `xml:"maxVcpusPerFtVm,omitempty"` + LoginBySSLThumbprintSupported *bool `xml:"loginBySSLThumbprintSupported"` + CloneFromSnapshotSupported *bool `xml:"cloneFromSnapshotSupported"` + DeltaDiskBackingsSupported *bool `xml:"deltaDiskBackingsSupported"` + PerVMNetworkTrafficShapingSupported *bool `xml:"perVMNetworkTrafficShapingSupported"` + TpmSupported *bool `xml:"tpmSupported"` + TpmVersion string `xml:"tpmVersion,omitempty"` + TxtEnabled *bool `xml:"txtEnabled"` + SupportedCpuFeature []HostCpuIdInfo `xml:"supportedCpuFeature,omitempty"` + VirtualExecUsageSupported *bool `xml:"virtualExecUsageSupported"` + StorageIORMSupported *bool `xml:"storageIORMSupported"` + VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported"` + VmDirectPathGen2UnsupportedReason []string `xml:"vmDirectPathGen2UnsupportedReason,omitempty"` + VmDirectPathGen2UnsupportedReasonExtended string `xml:"vmDirectPathGen2UnsupportedReasonExtended,omitempty"` + SupportedVmfsMajorVersion []int32 `xml:"supportedVmfsMajorVersion,omitempty"` + VStorageCapable *bool `xml:"vStorageCapable"` + SnapshotRelayoutSupported *bool `xml:"snapshotRelayoutSupported"` + FirewallIpRulesSupported *bool `xml:"firewallIpRulesSupported"` + ServicePackageInfoSupported *bool `xml:"servicePackageInfoSupported"` + MaxHostRunningVms int32 `xml:"maxHostRunningVms,omitempty"` + MaxHostSupportedVcpus int32 `xml:"maxHostSupportedVcpus,omitempty"` + VmfsDatastoreMountCapable *bool `xml:"vmfsDatastoreMountCapable"` + EightPlusHostVmfsSharedAccessSupported *bool `xml:"eightPlusHostVmfsSharedAccessSupported"` + NestedHVSupported *bool `xml:"nestedHVSupported"` + VPMCSupported *bool `xml:"vPMCSupported"` + InterVMCommunicationThroughVMCISupported *bool `xml:"interVMCommunicationThroughVMCISupported"` + ScheduledHardwareUpgradeSupported *bool `xml:"scheduledHardwareUpgradeSupported"` + FeatureCapabilitiesSupported *bool `xml:"featureCapabilitiesSupported"` + LatencySensitivitySupported *bool `xml:"latencySensitivitySupported"` + StoragePolicySupported *bool `xml:"storagePolicySupported"` + Accel3dSupported *bool `xml:"accel3dSupported"` + ReliableMemoryAware *bool `xml:"reliableMemoryAware"` + MultipleNetworkStackInstanceSupported *bool `xml:"multipleNetworkStackInstanceSupported"` + MessageBusProxySupported *bool `xml:"messageBusProxySupported"` + VsanSupported *bool `xml:"vsanSupported"` + VFlashSupported *bool `xml:"vFlashSupported"` + HostAccessManagerSupported *bool `xml:"hostAccessManagerSupported"` + ProvisioningNicSelectionSupported *bool `xml:"provisioningNicSelectionSupported"` + Nfs41Supported *bool `xml:"nfs41Supported"` + Nfs41Krb5iSupported *bool `xml:"nfs41Krb5iSupported"` + TurnDiskLocatorLedSupported *bool `xml:"turnDiskLocatorLedSupported"` + VirtualVolumeDatastoreSupported *bool `xml:"virtualVolumeDatastoreSupported"` + MarkAsSsdSupported *bool `xml:"markAsSsdSupported"` + MarkAsLocalSupported *bool `xml:"markAsLocalSupported"` + SmartCardAuthenticationSupported *bool `xml:"smartCardAuthenticationSupported"` + PMemSupported *bool `xml:"pMemSupported"` + PMemSnapshotSupported *bool `xml:"pMemSnapshotSupported"` + CryptoSupported *bool `xml:"cryptoSupported"` + OneKVolumeAPIsSupported *bool `xml:"oneKVolumeAPIsSupported"` + GatewayOnNicSupported *bool `xml:"gatewayOnNicSupported"` + UpitSupported *bool `xml:"upitSupported"` + CpuHwMmuSupported *bool `xml:"cpuHwMmuSupported"` + EncryptedVMotionSupported *bool `xml:"encryptedVMotionSupported"` + EncryptionChangeOnAddRemoveSupported *bool `xml:"encryptionChangeOnAddRemoveSupported"` + EncryptionHotOperationSupported *bool `xml:"encryptionHotOperationSupported"` + EncryptionWithSnapshotsSupported *bool `xml:"encryptionWithSnapshotsSupported"` + EncryptionFaultToleranceSupported *bool `xml:"encryptionFaultToleranceSupported"` + EncryptionMemorySaveSupported *bool `xml:"encryptionMemorySaveSupported"` + EncryptionRDMSupported *bool `xml:"encryptionRDMSupported"` + EncryptionVFlashSupported *bool `xml:"encryptionVFlashSupported"` + EncryptionCBRCSupported *bool `xml:"encryptionCBRCSupported"` + EncryptionHBRSupported *bool `xml:"encryptionHBRSupported"` + FtEfiSupported *bool `xml:"ftEfiSupported"` + UnmapMethodSupported string `xml:"unmapMethodSupported,omitempty"` + MaxMemMBPerFtVm int32 `xml:"maxMemMBPerFtVm,omitempty"` + VirtualMmuUsageIgnored *bool `xml:"virtualMmuUsageIgnored"` + VirtualExecUsageIgnored *bool `xml:"virtualExecUsageIgnored"` + VmCreateDateSupported *bool `xml:"vmCreateDateSupported"` + Vmfs3EOLSupported *bool `xml:"vmfs3EOLSupported"` + FtVmcpSupported *bool `xml:"ftVmcpSupported"` +} + +func init() { + t["HostCapability"] = reflect.TypeOf((*HostCapability)(nil)).Elem() +} + +type HostCertificateManagerCertificateInfo struct { + DynamicData + + Issuer string `xml:"issuer,omitempty"` + NotBefore *time.Time `xml:"notBefore"` + NotAfter *time.Time `xml:"notAfter"` + Subject string `xml:"subject,omitempty"` + Status string `xml:"status"` +} + +func init() { + t["HostCertificateManagerCertificateInfo"] = reflect.TypeOf((*HostCertificateManagerCertificateInfo)(nil)).Elem() +} + +type HostClearVStorageObjectControlFlags HostClearVStorageObjectControlFlagsRequestType + +func init() { + t["HostClearVStorageObjectControlFlags"] = reflect.TypeOf((*HostClearVStorageObjectControlFlags)(nil)).Elem() +} + +type HostClearVStorageObjectControlFlagsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + ControlFlags []string `xml:"controlFlags,omitempty"` +} + +func init() { + t["HostClearVStorageObjectControlFlagsRequestType"] = reflect.TypeOf((*HostClearVStorageObjectControlFlagsRequestType)(nil)).Elem() +} + +type HostClearVStorageObjectControlFlagsResponse struct { +} + +type HostCloneVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec VslmCloneSpec `xml:"spec"` +} + +func init() { + t["HostCloneVStorageObjectRequestType"] = reflect.TypeOf((*HostCloneVStorageObjectRequestType)(nil)).Elem() +} + +type HostCloneVStorageObject_Task HostCloneVStorageObjectRequestType + +func init() { + t["HostCloneVStorageObject_Task"] = reflect.TypeOf((*HostCloneVStorageObject_Task)(nil)).Elem() +} + +type HostCloneVStorageObject_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostCnxFailedAccountFailedEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedAccountFailedEvent"] = reflect.TypeOf((*HostCnxFailedAccountFailedEvent)(nil)).Elem() +} + +type HostCnxFailedAlreadyManagedEvent struct { + HostEvent + + ServerName string `xml:"serverName"` +} + +func init() { + t["HostCnxFailedAlreadyManagedEvent"] = reflect.TypeOf((*HostCnxFailedAlreadyManagedEvent)(nil)).Elem() +} + +type HostCnxFailedBadCcagentEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedBadCcagentEvent"] = reflect.TypeOf((*HostCnxFailedBadCcagentEvent)(nil)).Elem() +} + +type HostCnxFailedBadUsernameEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedBadUsernameEvent"] = reflect.TypeOf((*HostCnxFailedBadUsernameEvent)(nil)).Elem() +} + +type HostCnxFailedBadVersionEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedBadVersionEvent"] = reflect.TypeOf((*HostCnxFailedBadVersionEvent)(nil)).Elem() +} + +type HostCnxFailedCcagentUpgradeEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedCcagentUpgradeEvent"] = reflect.TypeOf((*HostCnxFailedCcagentUpgradeEvent)(nil)).Elem() +} + +type HostCnxFailedEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedEvent"] = reflect.TypeOf((*HostCnxFailedEvent)(nil)).Elem() +} + +type HostCnxFailedNetworkErrorEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNetworkErrorEvent"] = reflect.TypeOf((*HostCnxFailedNetworkErrorEvent)(nil)).Elem() +} + +type HostCnxFailedNoAccessEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNoAccessEvent"] = reflect.TypeOf((*HostCnxFailedNoAccessEvent)(nil)).Elem() +} + +type HostCnxFailedNoConnectionEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNoConnectionEvent"] = reflect.TypeOf((*HostCnxFailedNoConnectionEvent)(nil)).Elem() +} + +type HostCnxFailedNoLicenseEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNoLicenseEvent"] = reflect.TypeOf((*HostCnxFailedNoLicenseEvent)(nil)).Elem() +} + +type HostCnxFailedNotFoundEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedNotFoundEvent"] = reflect.TypeOf((*HostCnxFailedNotFoundEvent)(nil)).Elem() +} + +type HostCnxFailedTimeoutEvent struct { + HostEvent +} + +func init() { + t["HostCnxFailedTimeoutEvent"] = reflect.TypeOf((*HostCnxFailedTimeoutEvent)(nil)).Elem() +} + +type HostCommunication struct { + RuntimeFault +} + +func init() { + t["HostCommunication"] = reflect.TypeOf((*HostCommunication)(nil)).Elem() +} + +type HostCommunicationFault BaseHostCommunication + +func init() { + t["HostCommunicationFault"] = reflect.TypeOf((*HostCommunicationFault)(nil)).Elem() +} + +type HostComplianceCheckedEvent struct { + HostEvent + + Profile ProfileEventArgument `xml:"profile"` +} + +func init() { + t["HostComplianceCheckedEvent"] = reflect.TypeOf((*HostComplianceCheckedEvent)(nil)).Elem() +} + +type HostCompliantEvent struct { + HostEvent +} + +func init() { + t["HostCompliantEvent"] = reflect.TypeOf((*HostCompliantEvent)(nil)).Elem() +} + +type HostConfigAppliedEvent struct { + HostEvent +} + +func init() { + t["HostConfigAppliedEvent"] = reflect.TypeOf((*HostConfigAppliedEvent)(nil)).Elem() +} + +type HostConfigChange struct { + DynamicData +} + +func init() { + t["HostConfigChange"] = reflect.TypeOf((*HostConfigChange)(nil)).Elem() +} + +type HostConfigFailed struct { + HostConfigFault + + Failure []LocalizedMethodFault `xml:"failure"` +} + +func init() { + t["HostConfigFailed"] = reflect.TypeOf((*HostConfigFailed)(nil)).Elem() +} + +type HostConfigFailedFault HostConfigFailed + +func init() { + t["HostConfigFailedFault"] = reflect.TypeOf((*HostConfigFailedFault)(nil)).Elem() +} + +type HostConfigFault struct { + VimFault +} + +func init() { + t["HostConfigFault"] = reflect.TypeOf((*HostConfigFault)(nil)).Elem() +} + +type HostConfigFaultFault BaseHostConfigFault + +func init() { + t["HostConfigFaultFault"] = reflect.TypeOf((*HostConfigFaultFault)(nil)).Elem() +} + +type HostConfigInfo struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Product AboutInfo `xml:"product"` + DeploymentInfo *HostDeploymentInfo `xml:"deploymentInfo,omitempty"` + HyperThread *HostHyperThreadScheduleInfo `xml:"hyperThread,omitempty"` + ConsoleReservation *ServiceConsoleReservationInfo `xml:"consoleReservation,omitempty"` + VirtualMachineReservation *VirtualMachineMemoryReservationInfo `xml:"virtualMachineReservation,omitempty"` + StorageDevice *HostStorageDeviceInfo `xml:"storageDevice,omitempty"` + MultipathState *HostMultipathStateInfo `xml:"multipathState,omitempty"` + FileSystemVolume *HostFileSystemVolumeInfo `xml:"fileSystemVolume,omitempty"` + SystemFile []string `xml:"systemFile,omitempty"` + Network *HostNetworkInfo `xml:"network,omitempty"` + Vmotion *HostVMotionInfo `xml:"vmotion,omitempty"` + VirtualNicManagerInfo *HostVirtualNicManagerInfo `xml:"virtualNicManagerInfo,omitempty"` + Capabilities *HostNetCapabilities `xml:"capabilities,omitempty"` + DatastoreCapabilities *HostDatastoreSystemCapabilities `xml:"datastoreCapabilities,omitempty"` + OffloadCapabilities *HostNetOffloadCapabilities `xml:"offloadCapabilities,omitempty"` + Service *HostServiceInfo `xml:"service,omitempty"` + Firewall *HostFirewallInfo `xml:"firewall,omitempty"` + AutoStart *HostAutoStartManagerConfig `xml:"autoStart,omitempty"` + ActiveDiagnosticPartition *HostDiagnosticPartition `xml:"activeDiagnosticPartition,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` + OptionDef []OptionDef `xml:"optionDef,omitempty"` + DatastorePrincipal string `xml:"datastorePrincipal,omitempty"` + LocalSwapDatastore *ManagedObjectReference `xml:"localSwapDatastore,omitempty"` + SystemSwapConfiguration *HostSystemSwapConfiguration `xml:"systemSwapConfiguration,omitempty"` + SystemResources *HostSystemResourceInfo `xml:"systemResources,omitempty"` + DateTimeInfo *HostDateTimeInfo `xml:"dateTimeInfo,omitempty"` + Flags *HostFlagInfo `xml:"flags,omitempty"` + AdminDisabled *bool `xml:"adminDisabled"` + LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty"` + Ipmi *HostIpmiInfo `xml:"ipmi,omitempty"` + SslThumbprintInfo *HostSslThumbprintInfo `xml:"sslThumbprintInfo,omitempty"` + SslThumbprintData []HostSslThumbprintInfo `xml:"sslThumbprintData,omitempty"` + Certificate []byte `xml:"certificate,omitempty"` + PciPassthruInfo []BaseHostPciPassthruInfo `xml:"pciPassthruInfo,omitempty,typeattr"` + AuthenticationManagerInfo *HostAuthenticationManagerInfo `xml:"authenticationManagerInfo,omitempty"` + FeatureVersion []HostFeatureVersionInfo `xml:"featureVersion,omitempty"` + PowerSystemCapability *PowerSystemCapability `xml:"powerSystemCapability,omitempty"` + PowerSystemInfo *PowerSystemInfo `xml:"powerSystemInfo,omitempty"` + CacheConfigurationInfo []HostCacheConfigurationInfo `xml:"cacheConfigurationInfo,omitempty"` + WakeOnLanCapable *bool `xml:"wakeOnLanCapable"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty"` + MaskedFeatureCapability []HostFeatureCapability `xml:"maskedFeatureCapability,omitempty"` + VFlashConfigInfo *HostVFlashManagerVFlashConfigInfo `xml:"vFlashConfigInfo,omitempty"` + VsanHostConfig *VsanHostConfigInfo `xml:"vsanHostConfig,omitempty"` + DomainList []string `xml:"domainList,omitempty"` + ScriptCheckSum []byte `xml:"scriptCheckSum,omitempty"` + HostConfigCheckSum []byte `xml:"hostConfigCheckSum,omitempty"` + GraphicsInfo []HostGraphicsInfo `xml:"graphicsInfo,omitempty"` + SharedPassthruGpuTypes []string `xml:"sharedPassthruGpuTypes,omitempty"` + GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty"` + SharedGpuCapabilities []HostSharedGpuCapabilities `xml:"sharedGpuCapabilities,omitempty"` + IoFilterInfo []HostIoFilterInfo `xml:"ioFilterInfo,omitempty"` + SriovDevicePool []BaseHostSriovDevicePoolInfo `xml:"sriovDevicePool,omitempty,typeattr"` +} + +func init() { + t["HostConfigInfo"] = reflect.TypeOf((*HostConfigInfo)(nil)).Elem() +} + +type HostConfigManager struct { + DynamicData + + CpuScheduler *ManagedObjectReference `xml:"cpuScheduler,omitempty"` + DatastoreSystem *ManagedObjectReference `xml:"datastoreSystem,omitempty"` + MemoryManager *ManagedObjectReference `xml:"memoryManager,omitempty"` + StorageSystem *ManagedObjectReference `xml:"storageSystem,omitempty"` + NetworkSystem *ManagedObjectReference `xml:"networkSystem,omitempty"` + VmotionSystem *ManagedObjectReference `xml:"vmotionSystem,omitempty"` + VirtualNicManager *ManagedObjectReference `xml:"virtualNicManager,omitempty"` + ServiceSystem *ManagedObjectReference `xml:"serviceSystem,omitempty"` + FirewallSystem *ManagedObjectReference `xml:"firewallSystem,omitempty"` + AdvancedOption *ManagedObjectReference `xml:"advancedOption,omitempty"` + DiagnosticSystem *ManagedObjectReference `xml:"diagnosticSystem,omitempty"` + AutoStartManager *ManagedObjectReference `xml:"autoStartManager,omitempty"` + SnmpSystem *ManagedObjectReference `xml:"snmpSystem,omitempty"` + DateTimeSystem *ManagedObjectReference `xml:"dateTimeSystem,omitempty"` + PatchManager *ManagedObjectReference `xml:"patchManager,omitempty"` + ImageConfigManager *ManagedObjectReference `xml:"imageConfigManager,omitempty"` + BootDeviceSystem *ManagedObjectReference `xml:"bootDeviceSystem,omitempty"` + FirmwareSystem *ManagedObjectReference `xml:"firmwareSystem,omitempty"` + HealthStatusSystem *ManagedObjectReference `xml:"healthStatusSystem,omitempty"` + PciPassthruSystem *ManagedObjectReference `xml:"pciPassthruSystem,omitempty"` + LicenseManager *ManagedObjectReference `xml:"licenseManager,omitempty"` + KernelModuleSystem *ManagedObjectReference `xml:"kernelModuleSystem,omitempty"` + AuthenticationManager *ManagedObjectReference `xml:"authenticationManager,omitempty"` + PowerSystem *ManagedObjectReference `xml:"powerSystem,omitempty"` + CacheConfigurationManager *ManagedObjectReference `xml:"cacheConfigurationManager,omitempty"` + EsxAgentHostManager *ManagedObjectReference `xml:"esxAgentHostManager,omitempty"` + IscsiManager *ManagedObjectReference `xml:"iscsiManager,omitempty"` + VFlashManager *ManagedObjectReference `xml:"vFlashManager,omitempty"` + VsanSystem *ManagedObjectReference `xml:"vsanSystem,omitempty"` + MessageBusProxy *ManagedObjectReference `xml:"messageBusProxy,omitempty"` + UserDirectory *ManagedObjectReference `xml:"userDirectory,omitempty"` + AccountManager *ManagedObjectReference `xml:"accountManager,omitempty"` + HostAccessManager *ManagedObjectReference `xml:"hostAccessManager,omitempty"` + GraphicsManager *ManagedObjectReference `xml:"graphicsManager,omitempty"` + VsanInternalSystem *ManagedObjectReference `xml:"vsanInternalSystem,omitempty"` + CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty"` + CryptoManager *ManagedObjectReference `xml:"cryptoManager,omitempty"` + NvdimmSystem *ManagedObjectReference `xml:"nvdimmSystem,omitempty"` +} + +func init() { + t["HostConfigManager"] = reflect.TypeOf((*HostConfigManager)(nil)).Elem() +} + +type HostConfigSpec struct { + DynamicData + + NasDatastore []HostNasVolumeConfig `xml:"nasDatastore,omitempty"` + Network *HostNetworkConfig `xml:"network,omitempty"` + NicTypeSelection []HostVirtualNicManagerNicTypeSelection `xml:"nicTypeSelection,omitempty"` + Service []HostServiceConfig `xml:"service,omitempty"` + Firewall *HostFirewallConfig `xml:"firewall,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` + DatastorePrincipal string `xml:"datastorePrincipal,omitempty"` + DatastorePrincipalPasswd string `xml:"datastorePrincipalPasswd,omitempty"` + Datetime *HostDateTimeConfig `xml:"datetime,omitempty"` + StorageDevice *HostStorageDeviceInfo `xml:"storageDevice,omitempty"` + License *HostLicenseSpec `xml:"license,omitempty"` + Security *HostSecuritySpec `xml:"security,omitempty"` + UserAccount []BaseHostAccountSpec `xml:"userAccount,omitempty,typeattr"` + UsergroupAccount []BaseHostAccountSpec `xml:"usergroupAccount,omitempty,typeattr"` + Memory *HostMemorySpec `xml:"memory,omitempty"` + ActiveDirectory []HostActiveDirectory `xml:"activeDirectory,omitempty"` + GenericConfig []KeyAnyValue `xml:"genericConfig,omitempty"` + GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty"` +} + +func init() { + t["HostConfigSpec"] = reflect.TypeOf((*HostConfigSpec)(nil)).Elem() +} + +type HostConfigSummary struct { + DynamicData + + Name string `xml:"name"` + Port int32 `xml:"port"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` + Product *AboutInfo `xml:"product,omitempty"` + VmotionEnabled bool `xml:"vmotionEnabled"` + FaultToleranceEnabled *bool `xml:"faultToleranceEnabled"` + FeatureVersion []HostFeatureVersionInfo `xml:"featureVersion,omitempty"` + AgentVmDatastore *ManagedObjectReference `xml:"agentVmDatastore,omitempty"` + AgentVmNetwork *ManagedObjectReference `xml:"agentVmNetwork,omitempty"` +} + +func init() { + t["HostConfigSummary"] = reflect.TypeOf((*HostConfigSummary)(nil)).Elem() +} + +type HostConfigVFlashCache HostConfigVFlashCacheRequestType + +func init() { + t["HostConfigVFlashCache"] = reflect.TypeOf((*HostConfigVFlashCache)(nil)).Elem() +} + +type HostConfigVFlashCacheRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostVFlashManagerVFlashCacheConfigSpec `xml:"spec"` +} + +func init() { + t["HostConfigVFlashCacheRequestType"] = reflect.TypeOf((*HostConfigVFlashCacheRequestType)(nil)).Elem() +} + +type HostConfigVFlashCacheResponse struct { +} + +type HostConfigureVFlashResource HostConfigureVFlashResourceRequestType + +func init() { + t["HostConfigureVFlashResource"] = reflect.TypeOf((*HostConfigureVFlashResource)(nil)).Elem() +} + +type HostConfigureVFlashResourceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostVFlashManagerVFlashResourceConfigSpec `xml:"spec"` +} + +func init() { + t["HostConfigureVFlashResourceRequestType"] = reflect.TypeOf((*HostConfigureVFlashResourceRequestType)(nil)).Elem() +} + +type HostConfigureVFlashResourceResponse struct { +} + +type HostConnectFault struct { + VimFault +} + +func init() { + t["HostConnectFault"] = reflect.TypeOf((*HostConnectFault)(nil)).Elem() +} + +type HostConnectFaultFault BaseHostConnectFault + +func init() { + t["HostConnectFaultFault"] = reflect.TypeOf((*HostConnectFaultFault)(nil)).Elem() +} + +type HostConnectInfo struct { + DynamicData + + ServerIp string `xml:"serverIp,omitempty"` + InDasCluster *bool `xml:"inDasCluster"` + Host HostListSummary `xml:"host"` + Vm []VirtualMachineSummary `xml:"vm,omitempty"` + VimAccountNameRequired *bool `xml:"vimAccountNameRequired"` + ClusterSupported *bool `xml:"clusterSupported"` + Network []BaseHostConnectInfoNetworkInfo `xml:"network,omitempty,typeattr"` + Datastore []BaseHostDatastoreConnectInfo `xml:"datastore,omitempty,typeattr"` + License *HostLicenseConnectInfo `xml:"license,omitempty"` + Capability *HostCapability `xml:"capability,omitempty"` +} + +func init() { + t["HostConnectInfo"] = reflect.TypeOf((*HostConnectInfo)(nil)).Elem() +} + +type HostConnectInfoNetworkInfo struct { + DynamicData + + Summary BaseNetworkSummary `xml:"summary,typeattr"` +} + +func init() { + t["HostConnectInfoNetworkInfo"] = reflect.TypeOf((*HostConnectInfoNetworkInfo)(nil)).Elem() +} + +type HostConnectSpec struct { + DynamicData + + HostName string `xml:"hostName,omitempty"` + Port int32 `xml:"port,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` + UserName string `xml:"userName,omitempty"` + Password string `xml:"password,omitempty"` + VmFolder *ManagedObjectReference `xml:"vmFolder,omitempty"` + Force bool `xml:"force"` + VimAccountName string `xml:"vimAccountName,omitempty"` + VimAccountPassword string `xml:"vimAccountPassword,omitempty"` + ManagementIp string `xml:"managementIp,omitempty"` + LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty"` + HostGateway *HostGatewaySpec `xml:"hostGateway,omitempty"` +} + +func init() { + t["HostConnectSpec"] = reflect.TypeOf((*HostConnectSpec)(nil)).Elem() +} + +type HostConnectedEvent struct { + HostEvent +} + +func init() { + t["HostConnectedEvent"] = reflect.TypeOf((*HostConnectedEvent)(nil)).Elem() +} + +type HostConnectionLostEvent struct { + HostEvent +} + +func init() { + t["HostConnectionLostEvent"] = reflect.TypeOf((*HostConnectionLostEvent)(nil)).Elem() +} + +type HostCpuIdInfo struct { + DynamicData + + Level int32 `xml:"level"` + Vendor string `xml:"vendor,omitempty"` + Eax string `xml:"eax,omitempty"` + Ebx string `xml:"ebx,omitempty"` + Ecx string `xml:"ecx,omitempty"` + Edx string `xml:"edx,omitempty"` +} + +func init() { + t["HostCpuIdInfo"] = reflect.TypeOf((*HostCpuIdInfo)(nil)).Elem() +} + +type HostCpuInfo struct { + DynamicData + + NumCpuPackages int16 `xml:"numCpuPackages"` + NumCpuCores int16 `xml:"numCpuCores"` + NumCpuThreads int16 `xml:"numCpuThreads"` + Hz int64 `xml:"hz"` +} + +func init() { + t["HostCpuInfo"] = reflect.TypeOf((*HostCpuInfo)(nil)).Elem() +} + +type HostCpuPackage struct { + DynamicData + + Index int16 `xml:"index"` + Vendor string `xml:"vendor"` + Hz int64 `xml:"hz"` + BusHz int64 `xml:"busHz"` + Description string `xml:"description"` + ThreadId []int16 `xml:"threadId"` + CpuFeature []HostCpuIdInfo `xml:"cpuFeature,omitempty"` +} + +func init() { + t["HostCpuPackage"] = reflect.TypeOf((*HostCpuPackage)(nil)).Elem() +} + +type HostCpuPowerManagementInfo struct { + DynamicData + + CurrentPolicy string `xml:"currentPolicy,omitempty"` + HardwareSupport string `xml:"hardwareSupport,omitempty"` +} + +func init() { + t["HostCpuPowerManagementInfo"] = reflect.TypeOf((*HostCpuPowerManagementInfo)(nil)).Elem() +} + +type HostCreateDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VslmCreateSpec `xml:"spec"` +} + +func init() { + t["HostCreateDiskRequestType"] = reflect.TypeOf((*HostCreateDiskRequestType)(nil)).Elem() +} + +type HostCreateDisk_Task HostCreateDiskRequestType + +func init() { + t["HostCreateDisk_Task"] = reflect.TypeOf((*HostCreateDisk_Task)(nil)).Elem() +} + +type HostCreateDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostDasDisabledEvent struct { + HostEvent +} + +func init() { + t["HostDasDisabledEvent"] = reflect.TypeOf((*HostDasDisabledEvent)(nil)).Elem() +} + +type HostDasDisablingEvent struct { + HostEvent +} + +func init() { + t["HostDasDisablingEvent"] = reflect.TypeOf((*HostDasDisablingEvent)(nil)).Elem() +} + +type HostDasEnabledEvent struct { + HostEvent +} + +func init() { + t["HostDasEnabledEvent"] = reflect.TypeOf((*HostDasEnabledEvent)(nil)).Elem() +} + +type HostDasEnablingEvent struct { + HostEvent +} + +func init() { + t["HostDasEnablingEvent"] = reflect.TypeOf((*HostDasEnablingEvent)(nil)).Elem() +} + +type HostDasErrorEvent struct { + HostEvent + + Message string `xml:"message,omitempty"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["HostDasErrorEvent"] = reflect.TypeOf((*HostDasErrorEvent)(nil)).Elem() +} + +type HostDasEvent struct { + HostEvent +} + +func init() { + t["HostDasEvent"] = reflect.TypeOf((*HostDasEvent)(nil)).Elem() +} + +type HostDasOkEvent struct { + HostEvent +} + +func init() { + t["HostDasOkEvent"] = reflect.TypeOf((*HostDasOkEvent)(nil)).Elem() +} + +type HostDatastoreBrowserSearchResults struct { + DynamicData + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + FolderPath string `xml:"folderPath,omitempty"` + File []BaseFileInfo `xml:"file,omitempty,typeattr"` +} + +func init() { + t["HostDatastoreBrowserSearchResults"] = reflect.TypeOf((*HostDatastoreBrowserSearchResults)(nil)).Elem() +} + +type HostDatastoreBrowserSearchSpec struct { + DynamicData + + Query []BaseFileQuery `xml:"query,omitempty,typeattr"` + Details *FileQueryFlags `xml:"details,omitempty"` + SearchCaseInsensitive *bool `xml:"searchCaseInsensitive"` + MatchPattern []string `xml:"matchPattern,omitempty"` + SortFoldersFirst *bool `xml:"sortFoldersFirst"` +} + +func init() { + t["HostDatastoreBrowserSearchSpec"] = reflect.TypeOf((*HostDatastoreBrowserSearchSpec)(nil)).Elem() +} + +type HostDatastoreConnectInfo struct { + DynamicData + + Summary DatastoreSummary `xml:"summary"` +} + +func init() { + t["HostDatastoreConnectInfo"] = reflect.TypeOf((*HostDatastoreConnectInfo)(nil)).Elem() +} + +type HostDatastoreExistsConnectInfo struct { + HostDatastoreConnectInfo + + NewDatastoreName string `xml:"newDatastoreName"` +} + +func init() { + t["HostDatastoreExistsConnectInfo"] = reflect.TypeOf((*HostDatastoreExistsConnectInfo)(nil)).Elem() +} + +type HostDatastoreNameConflictConnectInfo struct { + HostDatastoreConnectInfo + + NewDatastoreName string `xml:"newDatastoreName"` +} + +func init() { + t["HostDatastoreNameConflictConnectInfo"] = reflect.TypeOf((*HostDatastoreNameConflictConnectInfo)(nil)).Elem() +} + +type HostDatastoreSystemCapabilities struct { + DynamicData + + NfsMountCreationRequired bool `xml:"nfsMountCreationRequired"` + NfsMountCreationSupported bool `xml:"nfsMountCreationSupported"` + LocalDatastoreSupported bool `xml:"localDatastoreSupported"` + VmfsExtentExpansionSupported *bool `xml:"vmfsExtentExpansionSupported"` +} + +func init() { + t["HostDatastoreSystemCapabilities"] = reflect.TypeOf((*HostDatastoreSystemCapabilities)(nil)).Elem() +} + +type HostDatastoreSystemDatastoreResult struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostDatastoreSystemDatastoreResult"] = reflect.TypeOf((*HostDatastoreSystemDatastoreResult)(nil)).Elem() +} + +type HostDatastoreSystemVvolDatastoreSpec struct { + DynamicData + + Name string `xml:"name"` + ScId string `xml:"scId"` +} + +func init() { + t["HostDatastoreSystemVvolDatastoreSpec"] = reflect.TypeOf((*HostDatastoreSystemVvolDatastoreSpec)(nil)).Elem() +} + +type HostDateTimeConfig struct { + DynamicData + + TimeZone string `xml:"timeZone,omitempty"` + NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` +} + +func init() { + t["HostDateTimeConfig"] = reflect.TypeOf((*HostDateTimeConfig)(nil)).Elem() +} + +type HostDateTimeInfo struct { + DynamicData + + TimeZone HostDateTimeSystemTimeZone `xml:"timeZone"` + NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` +} + +func init() { + t["HostDateTimeInfo"] = reflect.TypeOf((*HostDateTimeInfo)(nil)).Elem() +} + +type HostDateTimeSystemTimeZone struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + Description string `xml:"description"` + GmtOffset int32 `xml:"gmtOffset"` +} + +func init() { + t["HostDateTimeSystemTimeZone"] = reflect.TypeOf((*HostDateTimeSystemTimeZone)(nil)).Elem() +} + +type HostDeleteVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostDeleteVStorageObjectRequestType"] = reflect.TypeOf((*HostDeleteVStorageObjectRequestType)(nil)).Elem() +} + +type HostDeleteVStorageObject_Task HostDeleteVStorageObjectRequestType + +func init() { + t["HostDeleteVStorageObject_Task"] = reflect.TypeOf((*HostDeleteVStorageObject_Task)(nil)).Elem() +} + +type HostDeleteVStorageObject_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostDeploymentInfo struct { + DynamicData + + BootedFromStatelessCache *bool `xml:"bootedFromStatelessCache"` +} + +func init() { + t["HostDeploymentInfo"] = reflect.TypeOf((*HostDeploymentInfo)(nil)).Elem() +} + +type HostDevice struct { + DynamicData + + DeviceName string `xml:"deviceName"` + DeviceType string `xml:"deviceType"` +} + +func init() { + t["HostDevice"] = reflect.TypeOf((*HostDevice)(nil)).Elem() +} + +type HostDhcpService struct { + DynamicData + + Key string `xml:"key"` + Spec HostDhcpServiceSpec `xml:"spec"` +} + +func init() { + t["HostDhcpService"] = reflect.TypeOf((*HostDhcpService)(nil)).Elem() +} + +type HostDhcpServiceConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Key string `xml:"key"` + Spec HostDhcpServiceSpec `xml:"spec"` +} + +func init() { + t["HostDhcpServiceConfig"] = reflect.TypeOf((*HostDhcpServiceConfig)(nil)).Elem() +} + +type HostDhcpServiceSpec struct { + DynamicData + + VirtualSwitch string `xml:"virtualSwitch"` + DefaultLeaseDuration int32 `xml:"defaultLeaseDuration"` + LeaseBeginIp string `xml:"leaseBeginIp"` + LeaseEndIp string `xml:"leaseEndIp"` + MaxLeaseDuration int32 `xml:"maxLeaseDuration"` + UnlimitedLease bool `xml:"unlimitedLease"` + IpSubnetAddr string `xml:"ipSubnetAddr"` + IpSubnetMask string `xml:"ipSubnetMask"` +} + +func init() { + t["HostDhcpServiceSpec"] = reflect.TypeOf((*HostDhcpServiceSpec)(nil)).Elem() +} + +type HostDiagnosticPartition struct { + DynamicData + + StorageType string `xml:"storageType"` + DiagnosticType string `xml:"diagnosticType"` + Slots int32 `xml:"slots"` + Id HostScsiDiskPartition `xml:"id"` +} + +func init() { + t["HostDiagnosticPartition"] = reflect.TypeOf((*HostDiagnosticPartition)(nil)).Elem() +} + +type HostDiagnosticPartitionCreateDescription struct { + DynamicData + + Layout HostDiskPartitionLayout `xml:"layout"` + DiskUuid string `xml:"diskUuid"` + Spec HostDiagnosticPartitionCreateSpec `xml:"spec"` +} + +func init() { + t["HostDiagnosticPartitionCreateDescription"] = reflect.TypeOf((*HostDiagnosticPartitionCreateDescription)(nil)).Elem() +} + +type HostDiagnosticPartitionCreateOption struct { + DynamicData + + StorageType string `xml:"storageType"` + DiagnosticType string `xml:"diagnosticType"` + Disk HostScsiDisk `xml:"disk"` +} + +func init() { + t["HostDiagnosticPartitionCreateOption"] = reflect.TypeOf((*HostDiagnosticPartitionCreateOption)(nil)).Elem() +} + +type HostDiagnosticPartitionCreateSpec struct { + DynamicData + + StorageType string `xml:"storageType"` + DiagnosticType string `xml:"diagnosticType"` + Id HostScsiDiskPartition `xml:"id"` + Partition HostDiskPartitionSpec `xml:"partition"` + Active *bool `xml:"active"` +} + +func init() { + t["HostDiagnosticPartitionCreateSpec"] = reflect.TypeOf((*HostDiagnosticPartitionCreateSpec)(nil)).Elem() +} + +type HostDigestInfo struct { + DynamicData + + DigestMethod string `xml:"digestMethod"` + DigestValue []byte `xml:"digestValue"` + ObjectName string `xml:"objectName,omitempty"` +} + +func init() { + t["HostDigestInfo"] = reflect.TypeOf((*HostDigestInfo)(nil)).Elem() +} + +type HostDirectoryStoreInfo struct { + HostAuthenticationStoreInfo +} + +func init() { + t["HostDirectoryStoreInfo"] = reflect.TypeOf((*HostDirectoryStoreInfo)(nil)).Elem() +} + +type HostDisconnectedEvent struct { + HostEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["HostDisconnectedEvent"] = reflect.TypeOf((*HostDisconnectedEvent)(nil)).Elem() +} + +type HostDiskConfigurationResult struct { + DynamicData + + DevicePath string `xml:"devicePath,omitempty"` + Success *bool `xml:"success"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostDiskConfigurationResult"] = reflect.TypeOf((*HostDiskConfigurationResult)(nil)).Elem() +} + +type HostDiskDimensions struct { + DynamicData +} + +func init() { + t["HostDiskDimensions"] = reflect.TypeOf((*HostDiskDimensions)(nil)).Elem() +} + +type HostDiskDimensionsChs struct { + DynamicData + + Cylinder int64 `xml:"cylinder"` + Head int32 `xml:"head"` + Sector int32 `xml:"sector"` +} + +func init() { + t["HostDiskDimensionsChs"] = reflect.TypeOf((*HostDiskDimensionsChs)(nil)).Elem() +} + +type HostDiskDimensionsLba struct { + DynamicData + + BlockSize int32 `xml:"blockSize"` + Block int64 `xml:"block"` +} + +func init() { + t["HostDiskDimensionsLba"] = reflect.TypeOf((*HostDiskDimensionsLba)(nil)).Elem() +} + +type HostDiskMappingInfo struct { + DynamicData + + PhysicalPartition *HostDiskMappingPartitionInfo `xml:"physicalPartition,omitempty"` + Name string `xml:"name"` + Exclusive *bool `xml:"exclusive"` +} + +func init() { + t["HostDiskMappingInfo"] = reflect.TypeOf((*HostDiskMappingInfo)(nil)).Elem() +} + +type HostDiskMappingOption struct { + DynamicData + + PhysicalPartition []HostDiskMappingPartitionOption `xml:"physicalPartition,omitempty"` + Name string `xml:"name"` +} + +func init() { + t["HostDiskMappingOption"] = reflect.TypeOf((*HostDiskMappingOption)(nil)).Elem() +} + +type HostDiskMappingPartitionInfo struct { + DynamicData + + Name string `xml:"name"` + FileSystem string `xml:"fileSystem"` + CapacityInKb int64 `xml:"capacityInKb"` +} + +func init() { + t["HostDiskMappingPartitionInfo"] = reflect.TypeOf((*HostDiskMappingPartitionInfo)(nil)).Elem() +} + +type HostDiskMappingPartitionOption struct { + DynamicData + + Name string `xml:"name"` + FileSystem string `xml:"fileSystem"` + CapacityInKb int64 `xml:"capacityInKb"` +} + +func init() { + t["HostDiskMappingPartitionOption"] = reflect.TypeOf((*HostDiskMappingPartitionOption)(nil)).Elem() +} + +type HostDiskPartitionAttributes struct { + DynamicData + + Partition int32 `xml:"partition"` + StartSector int64 `xml:"startSector"` + EndSector int64 `xml:"endSector"` + Type string `xml:"type"` + Guid string `xml:"guid,omitempty"` + Logical bool `xml:"logical"` + Attributes byte `xml:"attributes"` + PartitionAlignment int64 `xml:"partitionAlignment,omitempty"` +} + +func init() { + t["HostDiskPartitionAttributes"] = reflect.TypeOf((*HostDiskPartitionAttributes)(nil)).Elem() +} + +type HostDiskPartitionBlockRange struct { + DynamicData + + Partition int32 `xml:"partition,omitempty"` + Type string `xml:"type"` + Start HostDiskDimensionsLba `xml:"start"` + End HostDiskDimensionsLba `xml:"end"` +} + +func init() { + t["HostDiskPartitionBlockRange"] = reflect.TypeOf((*HostDiskPartitionBlockRange)(nil)).Elem() +} + +type HostDiskPartitionInfo struct { + DynamicData + + DeviceName string `xml:"deviceName"` + Spec HostDiskPartitionSpec `xml:"spec"` + Layout HostDiskPartitionLayout `xml:"layout"` +} + +func init() { + t["HostDiskPartitionInfo"] = reflect.TypeOf((*HostDiskPartitionInfo)(nil)).Elem() +} + +type HostDiskPartitionLayout struct { + DynamicData + + Total *HostDiskDimensionsLba `xml:"total,omitempty"` + Partition []HostDiskPartitionBlockRange `xml:"partition"` +} + +func init() { + t["HostDiskPartitionLayout"] = reflect.TypeOf((*HostDiskPartitionLayout)(nil)).Elem() +} + +type HostDiskPartitionSpec struct { + DynamicData + + PartitionFormat string `xml:"partitionFormat,omitempty"` + Chs *HostDiskDimensionsChs `xml:"chs,omitempty"` + TotalSectors int64 `xml:"totalSectors,omitempty"` + Partition []HostDiskPartitionAttributes `xml:"partition,omitempty"` +} + +func init() { + t["HostDiskPartitionSpec"] = reflect.TypeOf((*HostDiskPartitionSpec)(nil)).Elem() +} + +type HostDnsConfig struct { + DynamicData + + Dhcp bool `xml:"dhcp"` + VirtualNicDevice string `xml:"virtualNicDevice,omitempty"` + Ipv6VirtualNicDevice string `xml:"ipv6VirtualNicDevice,omitempty"` + HostName string `xml:"hostName"` + DomainName string `xml:"domainName"` + Address []string `xml:"address,omitempty"` + SearchDomain []string `xml:"searchDomain,omitempty"` +} + +func init() { + t["HostDnsConfig"] = reflect.TypeOf((*HostDnsConfig)(nil)).Elem() +} + +type HostDnsConfigSpec struct { + HostDnsConfig + + VirtualNicConnection *HostVirtualNicConnection `xml:"virtualNicConnection,omitempty"` + VirtualNicConnectionV6 *HostVirtualNicConnection `xml:"virtualNicConnectionV6,omitempty"` +} + +func init() { + t["HostDnsConfigSpec"] = reflect.TypeOf((*HostDnsConfigSpec)(nil)).Elem() +} + +type HostEnableAdminFailedEvent struct { + HostEvent + + Permissions []Permission `xml:"permissions"` +} + +func init() { + t["HostEnableAdminFailedEvent"] = reflect.TypeOf((*HostEnableAdminFailedEvent)(nil)).Elem() +} + +type HostEnterMaintenanceResult struct { + DynamicData + + VmFaults []FaultsByVM `xml:"vmFaults,omitempty"` + HostFaults []FaultsByHost `xml:"hostFaults,omitempty"` +} + +func init() { + t["HostEnterMaintenanceResult"] = reflect.TypeOf((*HostEnterMaintenanceResult)(nil)).Elem() +} + +type HostEsxAgentHostManagerConfigInfo struct { + DynamicData + + AgentVmDatastore *ManagedObjectReference `xml:"agentVmDatastore,omitempty"` + AgentVmNetwork *ManagedObjectReference `xml:"agentVmNetwork,omitempty"` +} + +func init() { + t["HostEsxAgentHostManagerConfigInfo"] = reflect.TypeOf((*HostEsxAgentHostManagerConfigInfo)(nil)).Elem() +} + +type HostEvent struct { + Event +} + +func init() { + t["HostEvent"] = reflect.TypeOf((*HostEvent)(nil)).Elem() +} + +type HostEventArgument struct { + EntityEventArgument + + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["HostEventArgument"] = reflect.TypeOf((*HostEventArgument)(nil)).Elem() +} + +type HostExtendDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + NewCapacityInMB int64 `xml:"newCapacityInMB"` +} + +func init() { + t["HostExtendDiskRequestType"] = reflect.TypeOf((*HostExtendDiskRequestType)(nil)).Elem() +} + +type HostExtendDisk_Task HostExtendDiskRequestType + +func init() { + t["HostExtendDisk_Task"] = reflect.TypeOf((*HostExtendDisk_Task)(nil)).Elem() +} + +type HostExtendDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostExtraNetworksEvent struct { + HostDasEvent + + Ips string `xml:"ips,omitempty"` +} + +func init() { + t["HostExtraNetworksEvent"] = reflect.TypeOf((*HostExtraNetworksEvent)(nil)).Elem() +} + +type HostFaultToleranceManagerComponentHealthInfo struct { + DynamicData + + IsStorageHealthy bool `xml:"isStorageHealthy"` + IsNetworkHealthy bool `xml:"isNetworkHealthy"` +} + +func init() { + t["HostFaultToleranceManagerComponentHealthInfo"] = reflect.TypeOf((*HostFaultToleranceManagerComponentHealthInfo)(nil)).Elem() +} + +type HostFeatureCapability struct { + DynamicData + + Key string `xml:"key"` + FeatureName string `xml:"featureName"` + Value string `xml:"value"` +} + +func init() { + t["HostFeatureCapability"] = reflect.TypeOf((*HostFeatureCapability)(nil)).Elem() +} + +type HostFeatureMask struct { + DynamicData + + Key string `xml:"key"` + FeatureName string `xml:"featureName"` + Value string `xml:"value"` +} + +func init() { + t["HostFeatureMask"] = reflect.TypeOf((*HostFeatureMask)(nil)).Elem() +} + +type HostFeatureVersionInfo struct { + DynamicData + + Key string `xml:"key"` + Value string `xml:"value"` +} + +func init() { + t["HostFeatureVersionInfo"] = reflect.TypeOf((*HostFeatureVersionInfo)(nil)).Elem() +} + +type HostFibreChannelHba struct { + HostHostBusAdapter + + PortWorldWideName int64 `xml:"portWorldWideName"` + NodeWorldWideName int64 `xml:"nodeWorldWideName"` + PortType FibreChannelPortType `xml:"portType"` + Speed int64 `xml:"speed"` +} + +func init() { + t["HostFibreChannelHba"] = reflect.TypeOf((*HostFibreChannelHba)(nil)).Elem() +} + +type HostFibreChannelOverEthernetHba struct { + HostFibreChannelHba + + UnderlyingNic string `xml:"underlyingNic"` + LinkInfo HostFibreChannelOverEthernetHbaLinkInfo `xml:"linkInfo"` + IsSoftwareFcoe bool `xml:"isSoftwareFcoe"` + MarkedForRemoval bool `xml:"markedForRemoval"` +} + +func init() { + t["HostFibreChannelOverEthernetHba"] = reflect.TypeOf((*HostFibreChannelOverEthernetHba)(nil)).Elem() +} + +type HostFibreChannelOverEthernetHbaLinkInfo struct { + DynamicData + + VnportMac string `xml:"vnportMac"` + FcfMac string `xml:"fcfMac"` + VlanId int32 `xml:"vlanId"` +} + +func init() { + t["HostFibreChannelOverEthernetHbaLinkInfo"] = reflect.TypeOf((*HostFibreChannelOverEthernetHbaLinkInfo)(nil)).Elem() +} + +type HostFibreChannelOverEthernetTargetTransport struct { + HostFibreChannelTargetTransport + + VnportMac string `xml:"vnportMac"` + FcfMac string `xml:"fcfMac"` + VlanId int32 `xml:"vlanId"` +} + +func init() { + t["HostFibreChannelOverEthernetTargetTransport"] = reflect.TypeOf((*HostFibreChannelOverEthernetTargetTransport)(nil)).Elem() +} + +type HostFibreChannelTargetTransport struct { + HostTargetTransport + + PortWorldWideName int64 `xml:"portWorldWideName"` + NodeWorldWideName int64 `xml:"nodeWorldWideName"` +} + +func init() { + t["HostFibreChannelTargetTransport"] = reflect.TypeOf((*HostFibreChannelTargetTransport)(nil)).Elem() +} + +type HostFileAccess struct { + DynamicData + + Who string `xml:"who"` + What string `xml:"what"` +} + +func init() { + t["HostFileAccess"] = reflect.TypeOf((*HostFileAccess)(nil)).Elem() +} + +type HostFileSystemMountInfo struct { + DynamicData + + MountInfo HostMountInfo `xml:"mountInfo"` + Volume BaseHostFileSystemVolume `xml:"volume,typeattr"` + VStorageSupport string `xml:"vStorageSupport,omitempty"` +} + +func init() { + t["HostFileSystemMountInfo"] = reflect.TypeOf((*HostFileSystemMountInfo)(nil)).Elem() +} + +type HostFileSystemVolume struct { + DynamicData + + Type string `xml:"type"` + Name string `xml:"name"` + Capacity int64 `xml:"capacity"` +} + +func init() { + t["HostFileSystemVolume"] = reflect.TypeOf((*HostFileSystemVolume)(nil)).Elem() +} + +type HostFileSystemVolumeInfo struct { + DynamicData + + VolumeTypeList []string `xml:"volumeTypeList,omitempty"` + MountInfo []HostFileSystemMountInfo `xml:"mountInfo,omitempty"` +} + +func init() { + t["HostFileSystemVolumeInfo"] = reflect.TypeOf((*HostFileSystemVolumeInfo)(nil)).Elem() +} + +type HostFirewallConfig struct { + DynamicData + + Rule []HostFirewallConfigRuleSetConfig `xml:"rule,omitempty"` + DefaultBlockingPolicy HostFirewallDefaultPolicy `xml:"defaultBlockingPolicy"` +} + +func init() { + t["HostFirewallConfig"] = reflect.TypeOf((*HostFirewallConfig)(nil)).Elem() +} + +type HostFirewallConfigRuleSetConfig struct { + DynamicData + + RulesetId string `xml:"rulesetId"` + Enabled bool `xml:"enabled"` + AllowedHosts *HostFirewallRulesetIpList `xml:"allowedHosts,omitempty"` +} + +func init() { + t["HostFirewallConfigRuleSetConfig"] = reflect.TypeOf((*HostFirewallConfigRuleSetConfig)(nil)).Elem() +} + +type HostFirewallDefaultPolicy struct { + DynamicData + + IncomingBlocked *bool `xml:"incomingBlocked"` + OutgoingBlocked *bool `xml:"outgoingBlocked"` +} + +func init() { + t["HostFirewallDefaultPolicy"] = reflect.TypeOf((*HostFirewallDefaultPolicy)(nil)).Elem() +} + +type HostFirewallInfo struct { + DynamicData + + DefaultPolicy HostFirewallDefaultPolicy `xml:"defaultPolicy"` + Ruleset []HostFirewallRuleset `xml:"ruleset,omitempty"` +} + +func init() { + t["HostFirewallInfo"] = reflect.TypeOf((*HostFirewallInfo)(nil)).Elem() +} + +type HostFirewallRule struct { + DynamicData + + Port int32 `xml:"port"` + EndPort int32 `xml:"endPort,omitempty"` + Direction HostFirewallRuleDirection `xml:"direction"` + PortType HostFirewallRulePortType `xml:"portType,omitempty"` + Protocol string `xml:"protocol"` +} + +func init() { + t["HostFirewallRule"] = reflect.TypeOf((*HostFirewallRule)(nil)).Elem() +} + +type HostFirewallRuleset struct { + DynamicData + + Key string `xml:"key"` + Label string `xml:"label"` + Required bool `xml:"required"` + Rule []HostFirewallRule `xml:"rule"` + Service string `xml:"service,omitempty"` + Enabled bool `xml:"enabled"` + AllowedHosts *HostFirewallRulesetIpList `xml:"allowedHosts,omitempty"` +} + +func init() { + t["HostFirewallRuleset"] = reflect.TypeOf((*HostFirewallRuleset)(nil)).Elem() +} + +type HostFirewallRulesetIpList struct { + DynamicData + + IpAddress []string `xml:"ipAddress,omitempty"` + IpNetwork []HostFirewallRulesetIpNetwork `xml:"ipNetwork,omitempty"` + AllIp bool `xml:"allIp"` +} + +func init() { + t["HostFirewallRulesetIpList"] = reflect.TypeOf((*HostFirewallRulesetIpList)(nil)).Elem() +} + +type HostFirewallRulesetIpNetwork struct { + DynamicData + + Network string `xml:"network"` + PrefixLength int32 `xml:"prefixLength"` +} + +func init() { + t["HostFirewallRulesetIpNetwork"] = reflect.TypeOf((*HostFirewallRulesetIpNetwork)(nil)).Elem() +} + +type HostFirewallRulesetRulesetSpec struct { + DynamicData + + AllowedHosts HostFirewallRulesetIpList `xml:"allowedHosts"` +} + +func init() { + t["HostFirewallRulesetRulesetSpec"] = reflect.TypeOf((*HostFirewallRulesetRulesetSpec)(nil)).Elem() +} + +type HostFlagInfo struct { + DynamicData + + BackgroundSnapshotsEnabled *bool `xml:"backgroundSnapshotsEnabled"` +} + +func init() { + t["HostFlagInfo"] = reflect.TypeOf((*HostFlagInfo)(nil)).Elem() +} + +type HostForceMountedInfo struct { + DynamicData + + Persist bool `xml:"persist"` + Mounted bool `xml:"mounted"` +} + +func init() { + t["HostForceMountedInfo"] = reflect.TypeOf((*HostForceMountedInfo)(nil)).Elem() +} + +type HostGatewaySpec struct { + DynamicData + + GatewayType string `xml:"gatewayType"` + GatewayId string `xml:"gatewayId,omitempty"` + TrustVerificationToken string `xml:"trustVerificationToken,omitempty"` + HostAuthParams []KeyValue `xml:"hostAuthParams,omitempty"` +} + +func init() { + t["HostGatewaySpec"] = reflect.TypeOf((*HostGatewaySpec)(nil)).Elem() +} + +type HostGetShortNameFailedEvent struct { + HostEvent +} + +func init() { + t["HostGetShortNameFailedEvent"] = reflect.TypeOf((*HostGetShortNameFailedEvent)(nil)).Elem() +} + +type HostGetVFlashModuleDefaultConfig HostGetVFlashModuleDefaultConfigRequestType + +func init() { + t["HostGetVFlashModuleDefaultConfig"] = reflect.TypeOf((*HostGetVFlashModuleDefaultConfig)(nil)).Elem() +} + +type HostGetVFlashModuleDefaultConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + VFlashModule string `xml:"vFlashModule"` +} + +func init() { + t["HostGetVFlashModuleDefaultConfigRequestType"] = reflect.TypeOf((*HostGetVFlashModuleDefaultConfigRequestType)(nil)).Elem() +} + +type HostGetVFlashModuleDefaultConfigResponse struct { + Returnval VirtualDiskVFlashCacheConfigInfo `xml:"returnval"` +} + +type HostGraphicsConfig struct { + DynamicData + + HostDefaultGraphicsType string `xml:"hostDefaultGraphicsType"` + SharedPassthruAssignmentPolicy string `xml:"sharedPassthruAssignmentPolicy"` + DeviceType []HostGraphicsConfigDeviceType `xml:"deviceType,omitempty"` +} + +func init() { + t["HostGraphicsConfig"] = reflect.TypeOf((*HostGraphicsConfig)(nil)).Elem() +} + +type HostGraphicsConfigDeviceType struct { + DynamicData + + DeviceId string `xml:"deviceId"` + GraphicsType string `xml:"graphicsType"` +} + +func init() { + t["HostGraphicsConfigDeviceType"] = reflect.TypeOf((*HostGraphicsConfigDeviceType)(nil)).Elem() +} + +type HostGraphicsInfo struct { + DynamicData + + DeviceName string `xml:"deviceName"` + VendorName string `xml:"vendorName"` + PciId string `xml:"pciId"` + GraphicsType string `xml:"graphicsType"` + MemorySizeInKB int64 `xml:"memorySizeInKB"` + Vm []ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["HostGraphicsInfo"] = reflect.TypeOf((*HostGraphicsInfo)(nil)).Elem() +} + +type HostHardwareElementInfo struct { + DynamicData + + Name string `xml:"name"` + Status BaseElementDescription `xml:"status,typeattr"` +} + +func init() { + t["HostHardwareElementInfo"] = reflect.TypeOf((*HostHardwareElementInfo)(nil)).Elem() +} + +type HostHardwareInfo struct { + DynamicData + + SystemInfo HostSystemInfo `xml:"systemInfo"` + CpuPowerManagementInfo *HostCpuPowerManagementInfo `xml:"cpuPowerManagementInfo,omitempty"` + CpuInfo HostCpuInfo `xml:"cpuInfo"` + CpuPkg []HostCpuPackage `xml:"cpuPkg"` + MemorySize int64 `xml:"memorySize"` + NumaInfo *HostNumaInfo `xml:"numaInfo,omitempty"` + SmcPresent *bool `xml:"smcPresent"` + PciDevice []HostPciDevice `xml:"pciDevice,omitempty"` + CpuFeature []HostCpuIdInfo `xml:"cpuFeature,omitempty"` + BiosInfo *HostBIOSInfo `xml:"biosInfo,omitempty"` + ReliableMemoryInfo *HostReliableMemoryInfo `xml:"reliableMemoryInfo,omitempty"` + PersistentMemoryInfo *HostPersistentMemoryInfo `xml:"persistentMemoryInfo,omitempty"` +} + +func init() { + t["HostHardwareInfo"] = reflect.TypeOf((*HostHardwareInfo)(nil)).Elem() +} + +type HostHardwareStatusInfo struct { + DynamicData + + MemoryStatusInfo []BaseHostHardwareElementInfo `xml:"memoryStatusInfo,omitempty,typeattr"` + CpuStatusInfo []BaseHostHardwareElementInfo `xml:"cpuStatusInfo,omitempty,typeattr"` + StorageStatusInfo []HostStorageElementInfo `xml:"storageStatusInfo,omitempty"` +} + +func init() { + t["HostHardwareStatusInfo"] = reflect.TypeOf((*HostHardwareStatusInfo)(nil)).Elem() +} + +type HostHardwareSummary struct { + DynamicData + + Vendor string `xml:"vendor"` + Model string `xml:"model"` + Uuid string `xml:"uuid"` + OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty"` + MemorySize int64 `xml:"memorySize"` + CpuModel string `xml:"cpuModel"` + CpuMhz int32 `xml:"cpuMhz"` + NumCpuPkgs int16 `xml:"numCpuPkgs"` + NumCpuCores int16 `xml:"numCpuCores"` + NumCpuThreads int16 `xml:"numCpuThreads"` + NumNics int32 `xml:"numNics"` + NumHBAs int32 `xml:"numHBAs"` +} + +func init() { + t["HostHardwareSummary"] = reflect.TypeOf((*HostHardwareSummary)(nil)).Elem() +} + +type HostHasComponentFailure struct { + VimFault + + HostName string `xml:"hostName"` + ComponentType string `xml:"componentType"` + ComponentName string `xml:"componentName"` +} + +func init() { + t["HostHasComponentFailure"] = reflect.TypeOf((*HostHasComponentFailure)(nil)).Elem() +} + +type HostHasComponentFailureFault HostHasComponentFailure + +func init() { + t["HostHasComponentFailureFault"] = reflect.TypeOf((*HostHasComponentFailureFault)(nil)).Elem() +} + +type HostHostBusAdapter struct { + DynamicData + + Key string `xml:"key,omitempty"` + Device string `xml:"device"` + Bus int32 `xml:"bus"` + Status string `xml:"status"` + Model string `xml:"model"` + Driver string `xml:"driver,omitempty"` + Pci string `xml:"pci,omitempty"` +} + +func init() { + t["HostHostBusAdapter"] = reflect.TypeOf((*HostHostBusAdapter)(nil)).Elem() +} + +type HostHyperThreadScheduleInfo struct { + DynamicData + + Available bool `xml:"available"` + Active bool `xml:"active"` + Config bool `xml:"config"` +} + +func init() { + t["HostHyperThreadScheduleInfo"] = reflect.TypeOf((*HostHyperThreadScheduleInfo)(nil)).Elem() +} + +type HostImageConfigGetAcceptance HostImageConfigGetAcceptanceRequestType + +func init() { + t["HostImageConfigGetAcceptance"] = reflect.TypeOf((*HostImageConfigGetAcceptance)(nil)).Elem() +} + +type HostImageConfigGetAcceptanceRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HostImageConfigGetAcceptanceRequestType"] = reflect.TypeOf((*HostImageConfigGetAcceptanceRequestType)(nil)).Elem() +} + +type HostImageConfigGetAcceptanceResponse struct { + Returnval string `xml:"returnval"` +} + +type HostImageConfigGetProfile HostImageConfigGetProfileRequestType + +func init() { + t["HostImageConfigGetProfile"] = reflect.TypeOf((*HostImageConfigGetProfile)(nil)).Elem() +} + +type HostImageConfigGetProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HostImageConfigGetProfileRequestType"] = reflect.TypeOf((*HostImageConfigGetProfileRequestType)(nil)).Elem() +} + +type HostImageConfigGetProfileResponse struct { + Returnval HostImageProfileSummary `xml:"returnval"` +} + +type HostImageProfileSummary struct { + DynamicData + + Name string `xml:"name"` + Vendor string `xml:"vendor"` +} + +func init() { + t["HostImageProfileSummary"] = reflect.TypeOf((*HostImageProfileSummary)(nil)).Elem() +} + +type HostInAuditModeEvent struct { + HostEvent +} + +func init() { + t["HostInAuditModeEvent"] = reflect.TypeOf((*HostInAuditModeEvent)(nil)).Elem() +} + +type HostInDomain struct { + HostConfigFault +} + +func init() { + t["HostInDomain"] = reflect.TypeOf((*HostInDomain)(nil)).Elem() +} + +type HostInDomainFault HostInDomain + +func init() { + t["HostInDomainFault"] = reflect.TypeOf((*HostInDomainFault)(nil)).Elem() +} + +type HostIncompatibleForFaultTolerance struct { + VmFaultToleranceIssue + + HostName string `xml:"hostName,omitempty"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["HostIncompatibleForFaultTolerance"] = reflect.TypeOf((*HostIncompatibleForFaultTolerance)(nil)).Elem() +} + +type HostIncompatibleForFaultToleranceFault HostIncompatibleForFaultTolerance + +func init() { + t["HostIncompatibleForFaultToleranceFault"] = reflect.TypeOf((*HostIncompatibleForFaultToleranceFault)(nil)).Elem() +} + +type HostIncompatibleForRecordReplay struct { + VimFault + + HostName string `xml:"hostName,omitempty"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["HostIncompatibleForRecordReplay"] = reflect.TypeOf((*HostIncompatibleForRecordReplay)(nil)).Elem() +} + +type HostIncompatibleForRecordReplayFault HostIncompatibleForRecordReplay + +func init() { + t["HostIncompatibleForRecordReplayFault"] = reflect.TypeOf((*HostIncompatibleForRecordReplayFault)(nil)).Elem() +} + +type HostInflateDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostInflateDiskRequestType"] = reflect.TypeOf((*HostInflateDiskRequestType)(nil)).Elem() +} + +type HostInflateDisk_Task HostInflateDiskRequestType + +func init() { + t["HostInflateDisk_Task"] = reflect.TypeOf((*HostInflateDisk_Task)(nil)).Elem() +} + +type HostInflateDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostInternetScsiHba struct { + HostHostBusAdapter + + IsSoftwareBased bool `xml:"isSoftwareBased"` + CanBeDisabled *bool `xml:"canBeDisabled"` + NetworkBindingSupport HostInternetScsiHbaNetworkBindingSupportType `xml:"networkBindingSupport,omitempty"` + DiscoveryCapabilities HostInternetScsiHbaDiscoveryCapabilities `xml:"discoveryCapabilities"` + DiscoveryProperties HostInternetScsiHbaDiscoveryProperties `xml:"discoveryProperties"` + AuthenticationCapabilities HostInternetScsiHbaAuthenticationCapabilities `xml:"authenticationCapabilities"` + AuthenticationProperties HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties"` + DigestCapabilities *HostInternetScsiHbaDigestCapabilities `xml:"digestCapabilities,omitempty"` + DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty"` + IpCapabilities HostInternetScsiHbaIPCapabilities `xml:"ipCapabilities"` + IpProperties HostInternetScsiHbaIPProperties `xml:"ipProperties"` + SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty"` + AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty"` + IScsiName string `xml:"iScsiName"` + IScsiAlias string `xml:"iScsiAlias,omitempty"` + ConfiguredSendTarget []HostInternetScsiHbaSendTarget `xml:"configuredSendTarget,omitempty"` + ConfiguredStaticTarget []HostInternetScsiHbaStaticTarget `xml:"configuredStaticTarget,omitempty"` + MaxSpeedMb int32 `xml:"maxSpeedMb,omitempty"` + CurrentSpeedMb int32 `xml:"currentSpeedMb,omitempty"` +} + +func init() { + t["HostInternetScsiHba"] = reflect.TypeOf((*HostInternetScsiHba)(nil)).Elem() +} + +type HostInternetScsiHbaAuthenticationCapabilities struct { + DynamicData + + ChapAuthSettable bool `xml:"chapAuthSettable"` + Krb5AuthSettable bool `xml:"krb5AuthSettable"` + SrpAuthSettable bool `xml:"srpAuthSettable"` + SpkmAuthSettable bool `xml:"spkmAuthSettable"` + MutualChapSettable *bool `xml:"mutualChapSettable"` + TargetChapSettable *bool `xml:"targetChapSettable"` + TargetMutualChapSettable *bool `xml:"targetMutualChapSettable"` +} + +func init() { + t["HostInternetScsiHbaAuthenticationCapabilities"] = reflect.TypeOf((*HostInternetScsiHbaAuthenticationCapabilities)(nil)).Elem() +} + +type HostInternetScsiHbaAuthenticationProperties struct { + DynamicData + + ChapAuthEnabled bool `xml:"chapAuthEnabled"` + ChapName string `xml:"chapName,omitempty"` + ChapSecret string `xml:"chapSecret,omitempty"` + ChapAuthenticationType string `xml:"chapAuthenticationType,omitempty"` + ChapInherited *bool `xml:"chapInherited"` + MutualChapName string `xml:"mutualChapName,omitempty"` + MutualChapSecret string `xml:"mutualChapSecret,omitempty"` + MutualChapAuthenticationType string `xml:"mutualChapAuthenticationType,omitempty"` + MutualChapInherited *bool `xml:"mutualChapInherited"` +} + +func init() { + t["HostInternetScsiHbaAuthenticationProperties"] = reflect.TypeOf((*HostInternetScsiHbaAuthenticationProperties)(nil)).Elem() +} + +type HostInternetScsiHbaDigestCapabilities struct { + DynamicData + + HeaderDigestSettable *bool `xml:"headerDigestSettable"` + DataDigestSettable *bool `xml:"dataDigestSettable"` + TargetHeaderDigestSettable *bool `xml:"targetHeaderDigestSettable"` + TargetDataDigestSettable *bool `xml:"targetDataDigestSettable"` +} + +func init() { + t["HostInternetScsiHbaDigestCapabilities"] = reflect.TypeOf((*HostInternetScsiHbaDigestCapabilities)(nil)).Elem() +} + +type HostInternetScsiHbaDigestProperties struct { + DynamicData + + HeaderDigestType string `xml:"headerDigestType,omitempty"` + HeaderDigestInherited *bool `xml:"headerDigestInherited"` + DataDigestType string `xml:"dataDigestType,omitempty"` + DataDigestInherited *bool `xml:"dataDigestInherited"` +} + +func init() { + t["HostInternetScsiHbaDigestProperties"] = reflect.TypeOf((*HostInternetScsiHbaDigestProperties)(nil)).Elem() +} + +type HostInternetScsiHbaDiscoveryCapabilities struct { + DynamicData + + ISnsDiscoverySettable bool `xml:"iSnsDiscoverySettable"` + SlpDiscoverySettable bool `xml:"slpDiscoverySettable"` + StaticTargetDiscoverySettable bool `xml:"staticTargetDiscoverySettable"` + SendTargetsDiscoverySettable bool `xml:"sendTargetsDiscoverySettable"` +} + +func init() { + t["HostInternetScsiHbaDiscoveryCapabilities"] = reflect.TypeOf((*HostInternetScsiHbaDiscoveryCapabilities)(nil)).Elem() +} + +type HostInternetScsiHbaDiscoveryProperties struct { + DynamicData + + ISnsDiscoveryEnabled bool `xml:"iSnsDiscoveryEnabled"` + ISnsDiscoveryMethod string `xml:"iSnsDiscoveryMethod,omitempty"` + ISnsHost string `xml:"iSnsHost,omitempty"` + SlpDiscoveryEnabled bool `xml:"slpDiscoveryEnabled"` + SlpDiscoveryMethod string `xml:"slpDiscoveryMethod,omitempty"` + SlpHost string `xml:"slpHost,omitempty"` + StaticTargetDiscoveryEnabled bool `xml:"staticTargetDiscoveryEnabled"` + SendTargetsDiscoveryEnabled bool `xml:"sendTargetsDiscoveryEnabled"` +} + +func init() { + t["HostInternetScsiHbaDiscoveryProperties"] = reflect.TypeOf((*HostInternetScsiHbaDiscoveryProperties)(nil)).Elem() +} + +type HostInternetScsiHbaIPCapabilities struct { + DynamicData + + AddressSettable bool `xml:"addressSettable"` + IpConfigurationMethodSettable bool `xml:"ipConfigurationMethodSettable"` + SubnetMaskSettable bool `xml:"subnetMaskSettable"` + DefaultGatewaySettable bool `xml:"defaultGatewaySettable"` + PrimaryDnsServerAddressSettable bool `xml:"primaryDnsServerAddressSettable"` + AlternateDnsServerAddressSettable bool `xml:"alternateDnsServerAddressSettable"` + Ipv6Supported *bool `xml:"ipv6Supported"` + ArpRedirectSettable *bool `xml:"arpRedirectSettable"` + MtuSettable *bool `xml:"mtuSettable"` + HostNameAsTargetAddress *bool `xml:"hostNameAsTargetAddress"` + NameAliasSettable *bool `xml:"nameAliasSettable"` + Ipv4EnableSettable *bool `xml:"ipv4EnableSettable"` + Ipv6EnableSettable *bool `xml:"ipv6EnableSettable"` + Ipv6PrefixLengthSettable *bool `xml:"ipv6PrefixLengthSettable"` + Ipv6PrefixLength int32 `xml:"ipv6PrefixLength,omitempty"` + Ipv6DhcpConfigurationSettable *bool `xml:"ipv6DhcpConfigurationSettable"` + Ipv6LinkLocalAutoConfigurationSettable *bool `xml:"ipv6LinkLocalAutoConfigurationSettable"` + Ipv6RouterAdvertisementConfigurationSettable *bool `xml:"ipv6RouterAdvertisementConfigurationSettable"` + Ipv6DefaultGatewaySettable *bool `xml:"ipv6DefaultGatewaySettable"` + Ipv6MaxStaticAddressesSupported int32 `xml:"ipv6MaxStaticAddressesSupported,omitempty"` +} + +func init() { + t["HostInternetScsiHbaIPCapabilities"] = reflect.TypeOf((*HostInternetScsiHbaIPCapabilities)(nil)).Elem() +} + +type HostInternetScsiHbaIPProperties struct { + DynamicData + + Mac string `xml:"mac,omitempty"` + Address string `xml:"address,omitempty"` + DhcpConfigurationEnabled bool `xml:"dhcpConfigurationEnabled"` + SubnetMask string `xml:"subnetMask,omitempty"` + DefaultGateway string `xml:"defaultGateway,omitempty"` + PrimaryDnsServerAddress string `xml:"primaryDnsServerAddress,omitempty"` + AlternateDnsServerAddress string `xml:"alternateDnsServerAddress,omitempty"` + Ipv6Address string `xml:"ipv6Address,omitempty"` + Ipv6SubnetMask string `xml:"ipv6SubnetMask,omitempty"` + Ipv6DefaultGateway string `xml:"ipv6DefaultGateway,omitempty"` + ArpRedirectEnabled *bool `xml:"arpRedirectEnabled"` + Mtu int32 `xml:"mtu,omitempty"` + JumboFramesEnabled *bool `xml:"jumboFramesEnabled"` + Ipv4Enabled *bool `xml:"ipv4Enabled"` + Ipv6Enabled *bool `xml:"ipv6Enabled"` + Ipv6properties *HostInternetScsiHbaIPv6Properties `xml:"ipv6properties,omitempty"` +} + +func init() { + t["HostInternetScsiHbaIPProperties"] = reflect.TypeOf((*HostInternetScsiHbaIPProperties)(nil)).Elem() +} + +type HostInternetScsiHbaIPv6Properties struct { + DynamicData + + IscsiIpv6Address []HostInternetScsiHbaIscsiIpv6Address `xml:"iscsiIpv6Address,omitempty"` + Ipv6DhcpConfigurationEnabled *bool `xml:"ipv6DhcpConfigurationEnabled"` + Ipv6LinkLocalAutoConfigurationEnabled *bool `xml:"ipv6LinkLocalAutoConfigurationEnabled"` + Ipv6RouterAdvertisementConfigurationEnabled *bool `xml:"ipv6RouterAdvertisementConfigurationEnabled"` + Ipv6DefaultGateway string `xml:"ipv6DefaultGateway,omitempty"` +} + +func init() { + t["HostInternetScsiHbaIPv6Properties"] = reflect.TypeOf((*HostInternetScsiHbaIPv6Properties)(nil)).Elem() +} + +type HostInternetScsiHbaIscsiIpv6Address struct { + DynamicData + + Address string `xml:"address"` + PrefixLength int32 `xml:"prefixLength"` + Origin string `xml:"origin"` + Operation string `xml:"operation,omitempty"` +} + +func init() { + t["HostInternetScsiHbaIscsiIpv6Address"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6Address)(nil)).Elem() +} + +type HostInternetScsiHbaParamValue struct { + OptionValue + + IsInherited *bool `xml:"isInherited"` +} + +func init() { + t["HostInternetScsiHbaParamValue"] = reflect.TypeOf((*HostInternetScsiHbaParamValue)(nil)).Elem() +} + +type HostInternetScsiHbaSendTarget struct { + DynamicData + + Address string `xml:"address"` + Port int32 `xml:"port,omitempty"` + AuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties,omitempty"` + DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty"` + SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty"` + AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty"` + Parent string `xml:"parent,omitempty"` +} + +func init() { + t["HostInternetScsiHbaSendTarget"] = reflect.TypeOf((*HostInternetScsiHbaSendTarget)(nil)).Elem() +} + +type HostInternetScsiHbaStaticTarget struct { + DynamicData + + Address string `xml:"address"` + Port int32 `xml:"port,omitempty"` + IScsiName string `xml:"iScsiName"` + DiscoveryMethod string `xml:"discoveryMethod,omitempty"` + AuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties,omitempty"` + DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty"` + SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty"` + AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty"` + Parent string `xml:"parent,omitempty"` +} + +func init() { + t["HostInternetScsiHbaStaticTarget"] = reflect.TypeOf((*HostInternetScsiHbaStaticTarget)(nil)).Elem() +} + +type HostInternetScsiHbaTargetSet struct { + DynamicData + + StaticTargets []HostInternetScsiHbaStaticTarget `xml:"staticTargets,omitempty"` + SendTargets []HostInternetScsiHbaSendTarget `xml:"sendTargets,omitempty"` +} + +func init() { + t["HostInternetScsiHbaTargetSet"] = reflect.TypeOf((*HostInternetScsiHbaTargetSet)(nil)).Elem() +} + +type HostInternetScsiTargetTransport struct { + HostTargetTransport + + IScsiName string `xml:"iScsiName"` + IScsiAlias string `xml:"iScsiAlias"` + Address []string `xml:"address,omitempty"` +} + +func init() { + t["HostInternetScsiTargetTransport"] = reflect.TypeOf((*HostInternetScsiTargetTransport)(nil)).Elem() +} + +type HostInventoryFull struct { + NotEnoughLicenses + + Capacity int32 `xml:"capacity"` +} + +func init() { + t["HostInventoryFull"] = reflect.TypeOf((*HostInventoryFull)(nil)).Elem() +} + +type HostInventoryFullEvent struct { + LicenseEvent + + Capacity int32 `xml:"capacity"` +} + +func init() { + t["HostInventoryFullEvent"] = reflect.TypeOf((*HostInventoryFullEvent)(nil)).Elem() +} + +type HostInventoryFullFault HostInventoryFull + +func init() { + t["HostInventoryFullFault"] = reflect.TypeOf((*HostInventoryFullFault)(nil)).Elem() +} + +type HostInventoryUnreadableEvent struct { + Event +} + +func init() { + t["HostInventoryUnreadableEvent"] = reflect.TypeOf((*HostInventoryUnreadableEvent)(nil)).Elem() +} + +type HostIoFilterInfo struct { + IoFilterInfo + + Available bool `xml:"available"` +} + +func init() { + t["HostIoFilterInfo"] = reflect.TypeOf((*HostIoFilterInfo)(nil)).Elem() +} + +type HostIpChangedEvent struct { + HostEvent + + OldIP string `xml:"oldIP"` + NewIP string `xml:"newIP"` +} + +func init() { + t["HostIpChangedEvent"] = reflect.TypeOf((*HostIpChangedEvent)(nil)).Elem() +} + +type HostIpConfig struct { + DynamicData + + Dhcp bool `xml:"dhcp"` + IpAddress string `xml:"ipAddress,omitempty"` + SubnetMask string `xml:"subnetMask,omitempty"` + IpV6Config *HostIpConfigIpV6AddressConfiguration `xml:"ipV6Config,omitempty"` +} + +func init() { + t["HostIpConfig"] = reflect.TypeOf((*HostIpConfig)(nil)).Elem() +} + +type HostIpConfigIpV6Address struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + PrefixLength int32 `xml:"prefixLength"` + Origin string `xml:"origin,omitempty"` + DadState string `xml:"dadState,omitempty"` + Lifetime *time.Time `xml:"lifetime"` + Operation string `xml:"operation,omitempty"` +} + +func init() { + t["HostIpConfigIpV6Address"] = reflect.TypeOf((*HostIpConfigIpV6Address)(nil)).Elem() +} + +type HostIpConfigIpV6AddressConfiguration struct { + DynamicData + + IpV6Address []HostIpConfigIpV6Address `xml:"ipV6Address,omitempty"` + AutoConfigurationEnabled *bool `xml:"autoConfigurationEnabled"` + DhcpV6Enabled *bool `xml:"dhcpV6Enabled"` +} + +func init() { + t["HostIpConfigIpV6AddressConfiguration"] = reflect.TypeOf((*HostIpConfigIpV6AddressConfiguration)(nil)).Elem() +} + +type HostIpInconsistentEvent struct { + HostEvent + + IpAddress string `xml:"ipAddress"` + IpAddress2 string `xml:"ipAddress2"` +} + +func init() { + t["HostIpInconsistentEvent"] = reflect.TypeOf((*HostIpInconsistentEvent)(nil)).Elem() +} + +type HostIpRouteConfig struct { + DynamicData + + DefaultGateway string `xml:"defaultGateway,omitempty"` + GatewayDevice string `xml:"gatewayDevice,omitempty"` + IpV6DefaultGateway string `xml:"ipV6DefaultGateway,omitempty"` + IpV6GatewayDevice string `xml:"ipV6GatewayDevice,omitempty"` +} + +func init() { + t["HostIpRouteConfig"] = reflect.TypeOf((*HostIpRouteConfig)(nil)).Elem() +} + +type HostIpRouteConfigSpec struct { + HostIpRouteConfig + + GatewayDeviceConnection *HostVirtualNicConnection `xml:"gatewayDeviceConnection,omitempty"` + IpV6GatewayDeviceConnection *HostVirtualNicConnection `xml:"ipV6GatewayDeviceConnection,omitempty"` +} + +func init() { + t["HostIpRouteConfigSpec"] = reflect.TypeOf((*HostIpRouteConfigSpec)(nil)).Elem() +} + +type HostIpRouteEntry struct { + DynamicData + + Network string `xml:"network"` + PrefixLength int32 `xml:"prefixLength"` + Gateway string `xml:"gateway"` + DeviceName string `xml:"deviceName,omitempty"` +} + +func init() { + t["HostIpRouteEntry"] = reflect.TypeOf((*HostIpRouteEntry)(nil)).Elem() +} + +type HostIpRouteOp struct { + DynamicData + + ChangeOperation string `xml:"changeOperation"` + Route HostIpRouteEntry `xml:"route"` +} + +func init() { + t["HostIpRouteOp"] = reflect.TypeOf((*HostIpRouteOp)(nil)).Elem() +} + +type HostIpRouteTableConfig struct { + DynamicData + + IpRoute []HostIpRouteOp `xml:"ipRoute,omitempty"` + Ipv6Route []HostIpRouteOp `xml:"ipv6Route,omitempty"` +} + +func init() { + t["HostIpRouteTableConfig"] = reflect.TypeOf((*HostIpRouteTableConfig)(nil)).Elem() +} + +type HostIpRouteTableInfo struct { + DynamicData + + IpRoute []HostIpRouteEntry `xml:"ipRoute,omitempty"` + Ipv6Route []HostIpRouteEntry `xml:"ipv6Route,omitempty"` +} + +func init() { + t["HostIpRouteTableInfo"] = reflect.TypeOf((*HostIpRouteTableInfo)(nil)).Elem() +} + +type HostIpToShortNameFailedEvent struct { + HostEvent +} + +func init() { + t["HostIpToShortNameFailedEvent"] = reflect.TypeOf((*HostIpToShortNameFailedEvent)(nil)).Elem() +} + +type HostIpmiInfo struct { + DynamicData + + BmcIpAddress string `xml:"bmcIpAddress,omitempty"` + BmcMacAddress string `xml:"bmcMacAddress,omitempty"` + Login string `xml:"login,omitempty"` + Password string `xml:"password,omitempty"` +} + +func init() { + t["HostIpmiInfo"] = reflect.TypeOf((*HostIpmiInfo)(nil)).Elem() +} + +type HostIsolationIpPingFailedEvent struct { + HostDasEvent + + IsolationIp string `xml:"isolationIp"` +} + +func init() { + t["HostIsolationIpPingFailedEvent"] = reflect.TypeOf((*HostIsolationIpPingFailedEvent)(nil)).Elem() +} + +type HostLicensableResourceInfo struct { + DynamicData + + Resource []KeyAnyValue `xml:"resource"` +} + +func init() { + t["HostLicensableResourceInfo"] = reflect.TypeOf((*HostLicensableResourceInfo)(nil)).Elem() +} + +type HostLicenseConnectInfo struct { + DynamicData + + License LicenseManagerLicenseInfo `xml:"license"` + Evaluation LicenseManagerEvaluationInfo `xml:"evaluation"` + Resource *HostLicensableResourceInfo `xml:"resource,omitempty"` +} + +func init() { + t["HostLicenseConnectInfo"] = reflect.TypeOf((*HostLicenseConnectInfo)(nil)).Elem() +} + +type HostLicenseExpiredEvent struct { + LicenseEvent +} + +func init() { + t["HostLicenseExpiredEvent"] = reflect.TypeOf((*HostLicenseExpiredEvent)(nil)).Elem() +} + +type HostLicenseSpec struct { + DynamicData + + Source BaseLicenseSource `xml:"source,omitempty,typeattr"` + EditionKey string `xml:"editionKey,omitempty"` + DisabledFeatureKey []string `xml:"disabledFeatureKey,omitempty"` + EnabledFeatureKey []string `xml:"enabledFeatureKey,omitempty"` +} + +func init() { + t["HostLicenseSpec"] = reflect.TypeOf((*HostLicenseSpec)(nil)).Elem() +} + +type HostListSummary struct { + DynamicData + + Host *ManagedObjectReference `xml:"host,omitempty"` + Hardware *HostHardwareSummary `xml:"hardware,omitempty"` + Runtime *HostRuntimeInfo `xml:"runtime,omitempty"` + Config HostConfigSummary `xml:"config"` + QuickStats HostListSummaryQuickStats `xml:"quickStats"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + RebootRequired bool `xml:"rebootRequired"` + CustomValue []BaseCustomFieldValue `xml:"customValue,omitempty,typeattr"` + ManagementServerIp string `xml:"managementServerIp,omitempty"` + MaxEVCModeKey string `xml:"maxEVCModeKey,omitempty"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + Gateway *HostListSummaryGatewaySummary `xml:"gateway,omitempty"` + TpmAttestation *HostTpmAttestationInfo `xml:"tpmAttestation,omitempty"` +} + +func init() { + t["HostListSummary"] = reflect.TypeOf((*HostListSummary)(nil)).Elem() +} + +type HostListSummaryGatewaySummary struct { + DynamicData + + GatewayType string `xml:"gatewayType"` + GatewayId string `xml:"gatewayId"` +} + +func init() { + t["HostListSummaryGatewaySummary"] = reflect.TypeOf((*HostListSummaryGatewaySummary)(nil)).Elem() +} + +type HostListSummaryQuickStats struct { + DynamicData + + OverallCpuUsage int32 `xml:"overallCpuUsage,omitempty"` + OverallMemoryUsage int32 `xml:"overallMemoryUsage,omitempty"` + DistributedCpuFairness int32 `xml:"distributedCpuFairness,omitempty"` + DistributedMemoryFairness int32 `xml:"distributedMemoryFairness,omitempty"` + AvailablePMemCapacity int32 `xml:"availablePMemCapacity,omitempty"` + Uptime int32 `xml:"uptime,omitempty"` +} + +func init() { + t["HostListSummaryQuickStats"] = reflect.TypeOf((*HostListSummaryQuickStats)(nil)).Elem() +} + +type HostListVStorageObject HostListVStorageObjectRequestType + +func init() { + t["HostListVStorageObject"] = reflect.TypeOf((*HostListVStorageObject)(nil)).Elem() +} + +type HostListVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostListVStorageObjectRequestType"] = reflect.TypeOf((*HostListVStorageObjectRequestType)(nil)).Elem() +} + +type HostListVStorageObjectResponse struct { + Returnval []ID `xml:"returnval,omitempty"` +} + +type HostLocalAuthenticationInfo struct { + HostAuthenticationStoreInfo +} + +func init() { + t["HostLocalAuthenticationInfo"] = reflect.TypeOf((*HostLocalAuthenticationInfo)(nil)).Elem() +} + +type HostLocalFileSystemVolume struct { + HostFileSystemVolume + + Device string `xml:"device"` +} + +func init() { + t["HostLocalFileSystemVolume"] = reflect.TypeOf((*HostLocalFileSystemVolume)(nil)).Elem() +} + +type HostLocalFileSystemVolumeSpec struct { + DynamicData + + Device string `xml:"device"` + LocalPath string `xml:"localPath"` +} + +func init() { + t["HostLocalFileSystemVolumeSpec"] = reflect.TypeOf((*HostLocalFileSystemVolumeSpec)(nil)).Elem() +} + +type HostLocalPortCreatedEvent struct { + DvsEvent + + HostLocalPort DVSHostLocalPortInfo `xml:"hostLocalPort"` +} + +func init() { + t["HostLocalPortCreatedEvent"] = reflect.TypeOf((*HostLocalPortCreatedEvent)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerDiskLayoutSpec struct { + DynamicData + + ControllerType string `xml:"controllerType"` + BusNumber int32 `xml:"busNumber"` + UnitNumber *int32 `xml:"unitNumber"` + SrcFilename string `xml:"srcFilename"` + DstFilename string `xml:"dstFilename"` +} + +func init() { + t["HostLowLevelProvisioningManagerDiskLayoutSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerDiskLayoutSpec)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileDeleteResult struct { + DynamicData + + FileName string `xml:"fileName"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["HostLowLevelProvisioningManagerFileDeleteResult"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileDeleteResult)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileDeleteSpec struct { + DynamicData + + FileName string `xml:"fileName"` + FileType string `xml:"fileType"` +} + +func init() { + t["HostLowLevelProvisioningManagerFileDeleteSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileDeleteSpec)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileReserveResult struct { + DynamicData + + BaseName string `xml:"baseName"` + ParentDir string `xml:"parentDir"` + ReservedName string `xml:"reservedName"` +} + +func init() { + t["HostLowLevelProvisioningManagerFileReserveResult"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileReserveResult)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerFileReserveSpec struct { + DynamicData + + BaseName string `xml:"baseName"` + ParentDir string `xml:"parentDir"` + FileType string `xml:"fileType"` + StorageProfile string `xml:"storageProfile"` +} + +func init() { + t["HostLowLevelProvisioningManagerFileReserveSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileReserveSpec)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerSnapshotLayoutSpec struct { + DynamicData + + Id int32 `xml:"id"` + SrcFilename string `xml:"srcFilename"` + DstFilename string `xml:"dstFilename"` + Disk []HostLowLevelProvisioningManagerDiskLayoutSpec `xml:"disk,omitempty"` +} + +func init() { + t["HostLowLevelProvisioningManagerSnapshotLayoutSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerSnapshotLayoutSpec)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerVmMigrationStatus struct { + DynamicData + + MigrationId int64 `xml:"migrationId"` + Type string `xml:"type"` + Source bool `xml:"source"` + ConsideredSuccessful bool `xml:"consideredSuccessful"` +} + +func init() { + t["HostLowLevelProvisioningManagerVmMigrationStatus"] = reflect.TypeOf((*HostLowLevelProvisioningManagerVmMigrationStatus)(nil)).Elem() +} + +type HostLowLevelProvisioningManagerVmRecoveryInfo struct { + DynamicData + + Version string `xml:"version"` + BiosUUID string `xml:"biosUUID"` + InstanceUUID string `xml:"instanceUUID"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` +} + +func init() { + t["HostLowLevelProvisioningManagerVmRecoveryInfo"] = reflect.TypeOf((*HostLowLevelProvisioningManagerVmRecoveryInfo)(nil)).Elem() +} + +type HostMaintenanceSpec struct { + DynamicData + + VsanMode *VsanHostDecommissionMode `xml:"vsanMode,omitempty"` +} + +func init() { + t["HostMaintenanceSpec"] = reflect.TypeOf((*HostMaintenanceSpec)(nil)).Elem() +} + +type HostMemberHealthCheckResult struct { + DynamicData + + Summary string `xml:"summary,omitempty"` +} + +func init() { + t["HostMemberHealthCheckResult"] = reflect.TypeOf((*HostMemberHealthCheckResult)(nil)).Elem() +} + +type HostMemberRuntimeInfo struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Status string `xml:"status,omitempty"` + StatusDetail string `xml:"statusDetail,omitempty"` + HealthCheckResult []BaseHostMemberHealthCheckResult `xml:"healthCheckResult,omitempty,typeattr"` +} + +func init() { + t["HostMemberRuntimeInfo"] = reflect.TypeOf((*HostMemberRuntimeInfo)(nil)).Elem() +} + +type HostMemberUplinkHealthCheckResult struct { + HostMemberHealthCheckResult + + UplinkPortKey string `xml:"uplinkPortKey"` +} + +func init() { + t["HostMemberUplinkHealthCheckResult"] = reflect.TypeOf((*HostMemberUplinkHealthCheckResult)(nil)).Elem() +} + +type HostMemoryProfile struct { + ApplyProfile +} + +func init() { + t["HostMemoryProfile"] = reflect.TypeOf((*HostMemoryProfile)(nil)).Elem() +} + +type HostMemorySpec struct { + DynamicData + + ServiceConsoleReservation int64 `xml:"serviceConsoleReservation,omitempty"` +} + +func init() { + t["HostMemorySpec"] = reflect.TypeOf((*HostMemorySpec)(nil)).Elem() +} + +type HostMissingNetworksEvent struct { + HostDasEvent + + Ips string `xml:"ips,omitempty"` +} + +func init() { + t["HostMissingNetworksEvent"] = reflect.TypeOf((*HostMissingNetworksEvent)(nil)).Elem() +} + +type HostMonitoringStateChangedEvent struct { + ClusterEvent + + State string `xml:"state"` + PrevState string `xml:"prevState,omitempty"` +} + +func init() { + t["HostMonitoringStateChangedEvent"] = reflect.TypeOf((*HostMonitoringStateChangedEvent)(nil)).Elem() +} + +type HostMountInfo struct { + DynamicData + + Path string `xml:"path,omitempty"` + AccessMode string `xml:"accessMode"` + Mounted *bool `xml:"mounted"` + Accessible *bool `xml:"accessible"` + InaccessibleReason string `xml:"inaccessibleReason,omitempty"` +} + +func init() { + t["HostMountInfo"] = reflect.TypeOf((*HostMountInfo)(nil)).Elem() +} + +type HostMultipathInfo struct { + DynamicData + + Lun []HostMultipathInfoLogicalUnit `xml:"lun,omitempty"` +} + +func init() { + t["HostMultipathInfo"] = reflect.TypeOf((*HostMultipathInfo)(nil)).Elem() +} + +type HostMultipathInfoFixedLogicalUnitPolicy struct { + HostMultipathInfoLogicalUnitPolicy + + Prefer string `xml:"prefer"` +} + +func init() { + t["HostMultipathInfoFixedLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoFixedLogicalUnitPolicy)(nil)).Elem() +} + +type HostMultipathInfoLogicalUnit struct { + DynamicData + + Key string `xml:"key"` + Id string `xml:"id"` + Lun string `xml:"lun"` + Path []HostMultipathInfoPath `xml:"path"` + Policy BaseHostMultipathInfoLogicalUnitPolicy `xml:"policy,typeattr"` + StorageArrayTypePolicy *HostMultipathInfoLogicalUnitStorageArrayTypePolicy `xml:"storageArrayTypePolicy,omitempty"` +} + +func init() { + t["HostMultipathInfoLogicalUnit"] = reflect.TypeOf((*HostMultipathInfoLogicalUnit)(nil)).Elem() +} + +type HostMultipathInfoLogicalUnitPolicy struct { + DynamicData + + Policy string `xml:"policy"` +} + +func init() { + t["HostMultipathInfoLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitPolicy)(nil)).Elem() +} + +type HostMultipathInfoLogicalUnitStorageArrayTypePolicy struct { + DynamicData + + Policy string `xml:"policy"` +} + +func init() { + t["HostMultipathInfoLogicalUnitStorageArrayTypePolicy"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitStorageArrayTypePolicy)(nil)).Elem() +} + +type HostMultipathInfoPath struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + PathState string `xml:"pathState"` + State string `xml:"state,omitempty"` + IsWorkingPath *bool `xml:"isWorkingPath"` + Adapter string `xml:"adapter"` + Lun string `xml:"lun"` + Transport BaseHostTargetTransport `xml:"transport,omitempty,typeattr"` +} + +func init() { + t["HostMultipathInfoPath"] = reflect.TypeOf((*HostMultipathInfoPath)(nil)).Elem() +} + +type HostMultipathStateInfo struct { + DynamicData + + Path []HostMultipathStateInfoPath `xml:"path,omitempty"` +} + +func init() { + t["HostMultipathStateInfo"] = reflect.TypeOf((*HostMultipathStateInfo)(nil)).Elem() +} + +type HostMultipathStateInfoPath struct { + DynamicData + + Name string `xml:"name"` + PathState string `xml:"pathState"` +} + +func init() { + t["HostMultipathStateInfoPath"] = reflect.TypeOf((*HostMultipathStateInfoPath)(nil)).Elem() +} + +type HostNasVolume struct { + HostFileSystemVolume + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` + UserName string `xml:"userName,omitempty"` + RemoteHostNames []string `xml:"remoteHostNames,omitempty"` + SecurityType string `xml:"securityType,omitempty"` + ProtocolEndpoint *bool `xml:"protocolEndpoint"` +} + +func init() { + t["HostNasVolume"] = reflect.TypeOf((*HostNasVolume)(nil)).Elem() +} + +type HostNasVolumeConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Spec *HostNasVolumeSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostNasVolumeConfig"] = reflect.TypeOf((*HostNasVolumeConfig)(nil)).Elem() +} + +type HostNasVolumeSpec struct { + DynamicData + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` + LocalPath string `xml:"localPath"` + AccessMode string `xml:"accessMode"` + Type string `xml:"type,omitempty"` + UserName string `xml:"userName,omitempty"` + Password string `xml:"password,omitempty"` + RemoteHostNames []string `xml:"remoteHostNames,omitempty"` + SecurityType string `xml:"securityType,omitempty"` +} + +func init() { + t["HostNasVolumeSpec"] = reflect.TypeOf((*HostNasVolumeSpec)(nil)).Elem() +} + +type HostNasVolumeUserInfo struct { + DynamicData + + User string `xml:"user"` +} + +func init() { + t["HostNasVolumeUserInfo"] = reflect.TypeOf((*HostNasVolumeUserInfo)(nil)).Elem() +} + +type HostNatService struct { + DynamicData + + Key string `xml:"key"` + Spec HostNatServiceSpec `xml:"spec"` +} + +func init() { + t["HostNatService"] = reflect.TypeOf((*HostNatService)(nil)).Elem() +} + +type HostNatServiceConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Key string `xml:"key"` + Spec HostNatServiceSpec `xml:"spec"` +} + +func init() { + t["HostNatServiceConfig"] = reflect.TypeOf((*HostNatServiceConfig)(nil)).Elem() +} + +type HostNatServiceNameServiceSpec struct { + DynamicData + + DnsAutoDetect bool `xml:"dnsAutoDetect"` + DnsPolicy string `xml:"dnsPolicy"` + DnsRetries int32 `xml:"dnsRetries"` + DnsTimeout int32 `xml:"dnsTimeout"` + DnsNameServer []string `xml:"dnsNameServer,omitempty"` + NbdsTimeout int32 `xml:"nbdsTimeout"` + NbnsRetries int32 `xml:"nbnsRetries"` + NbnsTimeout int32 `xml:"nbnsTimeout"` +} + +func init() { + t["HostNatServiceNameServiceSpec"] = reflect.TypeOf((*HostNatServiceNameServiceSpec)(nil)).Elem() +} + +type HostNatServicePortForwardSpec struct { + DynamicData + + Type string `xml:"type"` + Name string `xml:"name"` + HostPort int32 `xml:"hostPort"` + GuestPort int32 `xml:"guestPort"` + GuestIpAddress string `xml:"guestIpAddress"` +} + +func init() { + t["HostNatServicePortForwardSpec"] = reflect.TypeOf((*HostNatServicePortForwardSpec)(nil)).Elem() +} + +type HostNatServiceSpec struct { + DynamicData + + VirtualSwitch string `xml:"virtualSwitch"` + ActiveFtp bool `xml:"activeFtp"` + AllowAnyOui bool `xml:"allowAnyOui"` + ConfigPort bool `xml:"configPort"` + IpGatewayAddress string `xml:"ipGatewayAddress"` + UdpTimeout int32 `xml:"udpTimeout"` + PortForward []HostNatServicePortForwardSpec `xml:"portForward,omitempty"` + NameService *HostNatServiceNameServiceSpec `xml:"nameService,omitempty"` +} + +func init() { + t["HostNatServiceSpec"] = reflect.TypeOf((*HostNatServiceSpec)(nil)).Elem() +} + +type HostNetCapabilities struct { + DynamicData + + CanSetPhysicalNicLinkSpeed bool `xml:"canSetPhysicalNicLinkSpeed"` + SupportsNicTeaming bool `xml:"supportsNicTeaming"` + NicTeamingPolicy []string `xml:"nicTeamingPolicy,omitempty"` + SupportsVlan bool `xml:"supportsVlan"` + UsesServiceConsoleNic bool `xml:"usesServiceConsoleNic"` + SupportsNetworkHints bool `xml:"supportsNetworkHints"` + MaxPortGroupsPerVswitch int32 `xml:"maxPortGroupsPerVswitch,omitempty"` + VswitchConfigSupported bool `xml:"vswitchConfigSupported"` + VnicConfigSupported bool `xml:"vnicConfigSupported"` + IpRouteConfigSupported bool `xml:"ipRouteConfigSupported"` + DnsConfigSupported bool `xml:"dnsConfigSupported"` + DhcpOnVnicSupported bool `xml:"dhcpOnVnicSupported"` + IpV6Supported *bool `xml:"ipV6Supported"` +} + +func init() { + t["HostNetCapabilities"] = reflect.TypeOf((*HostNetCapabilities)(nil)).Elem() +} + +type HostNetOffloadCapabilities struct { + DynamicData + + CsumOffload *bool `xml:"csumOffload"` + TcpSegmentation *bool `xml:"tcpSegmentation"` + ZeroCopyXmit *bool `xml:"zeroCopyXmit"` +} + +func init() { + t["HostNetOffloadCapabilities"] = reflect.TypeOf((*HostNetOffloadCapabilities)(nil)).Elem() +} + +type HostNetStackInstance struct { + DynamicData + + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` + IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` + RequestedMaxNumberOfConnections int32 `xml:"requestedMaxNumberOfConnections,omitempty"` + CongestionControlAlgorithm string `xml:"congestionControlAlgorithm,omitempty"` + IpV6Enabled *bool `xml:"ipV6Enabled"` + RouteTableConfig *HostIpRouteTableConfig `xml:"routeTableConfig,omitempty"` +} + +func init() { + t["HostNetStackInstance"] = reflect.TypeOf((*HostNetStackInstance)(nil)).Elem() +} + +type HostNetworkConfig struct { + DynamicData + + Vswitch []HostVirtualSwitchConfig `xml:"vswitch,omitempty"` + ProxySwitch []HostProxySwitchConfig `xml:"proxySwitch,omitempty"` + Portgroup []HostPortGroupConfig `xml:"portgroup,omitempty"` + Pnic []PhysicalNicConfig `xml:"pnic,omitempty"` + Vnic []HostVirtualNicConfig `xml:"vnic,omitempty"` + ConsoleVnic []HostVirtualNicConfig `xml:"consoleVnic,omitempty"` + DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` + IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` + ConsoleIpRouteConfig BaseHostIpRouteConfig `xml:"consoleIpRouteConfig,omitempty,typeattr"` + RouteTableConfig *HostIpRouteTableConfig `xml:"routeTableConfig,omitempty"` + Dhcp []HostDhcpServiceConfig `xml:"dhcp,omitempty"` + Nat []HostNatServiceConfig `xml:"nat,omitempty"` + IpV6Enabled *bool `xml:"ipV6Enabled"` + NetStackSpec []HostNetworkConfigNetStackSpec `xml:"netStackSpec,omitempty"` +} + +func init() { + t["HostNetworkConfig"] = reflect.TypeOf((*HostNetworkConfig)(nil)).Elem() +} + +type HostNetworkConfigNetStackSpec struct { + DynamicData + + NetStackInstance HostNetStackInstance `xml:"netStackInstance"` + Operation string `xml:"operation,omitempty"` +} + +func init() { + t["HostNetworkConfigNetStackSpec"] = reflect.TypeOf((*HostNetworkConfigNetStackSpec)(nil)).Elem() +} + +type HostNetworkConfigResult struct { + DynamicData + + VnicDevice []string `xml:"vnicDevice,omitempty"` + ConsoleVnicDevice []string `xml:"consoleVnicDevice,omitempty"` +} + +func init() { + t["HostNetworkConfigResult"] = reflect.TypeOf((*HostNetworkConfigResult)(nil)).Elem() +} + +type HostNetworkInfo struct { + DynamicData + + Vswitch []HostVirtualSwitch `xml:"vswitch,omitempty"` + ProxySwitch []HostProxySwitch `xml:"proxySwitch,omitempty"` + Portgroup []HostPortGroup `xml:"portgroup,omitempty"` + Pnic []PhysicalNic `xml:"pnic,omitempty"` + Vnic []HostVirtualNic `xml:"vnic,omitempty"` + ConsoleVnic []HostVirtualNic `xml:"consoleVnic,omitempty"` + DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` + IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` + ConsoleIpRouteConfig BaseHostIpRouteConfig `xml:"consoleIpRouteConfig,omitempty,typeattr"` + RouteTableInfo *HostIpRouteTableInfo `xml:"routeTableInfo,omitempty"` + Dhcp []HostDhcpService `xml:"dhcp,omitempty"` + Nat []HostNatService `xml:"nat,omitempty"` + IpV6Enabled *bool `xml:"ipV6Enabled"` + AtBootIpV6Enabled *bool `xml:"atBootIpV6Enabled"` + NetStackInstance []HostNetStackInstance `xml:"netStackInstance,omitempty"` + OpaqueSwitch []HostOpaqueSwitch `xml:"opaqueSwitch,omitempty"` + OpaqueNetwork []HostOpaqueNetworkInfo `xml:"opaqueNetwork,omitempty"` +} + +func init() { + t["HostNetworkInfo"] = reflect.TypeOf((*HostNetworkInfo)(nil)).Elem() +} + +type HostNetworkPolicy struct { + DynamicData + + Security *HostNetworkSecurityPolicy `xml:"security,omitempty"` + NicTeaming *HostNicTeamingPolicy `xml:"nicTeaming,omitempty"` + OffloadPolicy *HostNetOffloadCapabilities `xml:"offloadPolicy,omitempty"` + ShapingPolicy *HostNetworkTrafficShapingPolicy `xml:"shapingPolicy,omitempty"` +} + +func init() { + t["HostNetworkPolicy"] = reflect.TypeOf((*HostNetworkPolicy)(nil)).Elem() +} + +type HostNetworkResourceRuntime struct { + DynamicData + + PnicResourceInfo []HostPnicNetworkResourceInfo `xml:"pnicResourceInfo"` +} + +func init() { + t["HostNetworkResourceRuntime"] = reflect.TypeOf((*HostNetworkResourceRuntime)(nil)).Elem() +} + +type HostNetworkSecurityPolicy struct { + DynamicData + + AllowPromiscuous *bool `xml:"allowPromiscuous"` + MacChanges *bool `xml:"macChanges"` + ForgedTransmits *bool `xml:"forgedTransmits"` +} + +func init() { + t["HostNetworkSecurityPolicy"] = reflect.TypeOf((*HostNetworkSecurityPolicy)(nil)).Elem() +} + +type HostNetworkTrafficShapingPolicy struct { + DynamicData + + Enabled *bool `xml:"enabled"` + AverageBandwidth int64 `xml:"averageBandwidth,omitempty"` + PeakBandwidth int64 `xml:"peakBandwidth,omitempty"` + BurstSize int64 `xml:"burstSize,omitempty"` +} + +func init() { + t["HostNetworkTrafficShapingPolicy"] = reflect.TypeOf((*HostNetworkTrafficShapingPolicy)(nil)).Elem() +} + +type HostNewNetworkConnectInfo struct { + HostConnectInfoNetworkInfo +} + +func init() { + t["HostNewNetworkConnectInfo"] = reflect.TypeOf((*HostNewNetworkConnectInfo)(nil)).Elem() +} + +type HostNicFailureCriteria struct { + DynamicData + + CheckSpeed string `xml:"checkSpeed,omitempty"` + Speed int32 `xml:"speed,omitempty"` + CheckDuplex *bool `xml:"checkDuplex"` + FullDuplex *bool `xml:"fullDuplex"` + CheckErrorPercent *bool `xml:"checkErrorPercent"` + Percentage int32 `xml:"percentage,omitempty"` + CheckBeacon *bool `xml:"checkBeacon"` +} + +func init() { + t["HostNicFailureCriteria"] = reflect.TypeOf((*HostNicFailureCriteria)(nil)).Elem() +} + +type HostNicOrderPolicy struct { + DynamicData + + ActiveNic []string `xml:"activeNic,omitempty"` + StandbyNic []string `xml:"standbyNic,omitempty"` +} + +func init() { + t["HostNicOrderPolicy"] = reflect.TypeOf((*HostNicOrderPolicy)(nil)).Elem() +} + +type HostNicTeamingPolicy struct { + DynamicData + + Policy string `xml:"policy,omitempty"` + ReversePolicy *bool `xml:"reversePolicy"` + NotifySwitches *bool `xml:"notifySwitches"` + RollingOrder *bool `xml:"rollingOrder"` + FailureCriteria *HostNicFailureCriteria `xml:"failureCriteria,omitempty"` + NicOrder *HostNicOrderPolicy `xml:"nicOrder,omitempty"` +} + +func init() { + t["HostNicTeamingPolicy"] = reflect.TypeOf((*HostNicTeamingPolicy)(nil)).Elem() +} + +type HostNoAvailableNetworksEvent struct { + HostDasEvent + + Ips string `xml:"ips,omitempty"` +} + +func init() { + t["HostNoAvailableNetworksEvent"] = reflect.TypeOf((*HostNoAvailableNetworksEvent)(nil)).Elem() +} + +type HostNoHAEnabledPortGroupsEvent struct { + HostDasEvent +} + +func init() { + t["HostNoHAEnabledPortGroupsEvent"] = reflect.TypeOf((*HostNoHAEnabledPortGroupsEvent)(nil)).Elem() +} + +type HostNoRedundantManagementNetworkEvent struct { + HostDasEvent +} + +func init() { + t["HostNoRedundantManagementNetworkEvent"] = reflect.TypeOf((*HostNoRedundantManagementNetworkEvent)(nil)).Elem() +} + +type HostNonCompliantEvent struct { + HostEvent +} + +func init() { + t["HostNonCompliantEvent"] = reflect.TypeOf((*HostNonCompliantEvent)(nil)).Elem() +} + +type HostNotConnected struct { + HostCommunication +} + +func init() { + t["HostNotConnected"] = reflect.TypeOf((*HostNotConnected)(nil)).Elem() +} + +type HostNotConnectedFault HostNotConnected + +func init() { + t["HostNotConnectedFault"] = reflect.TypeOf((*HostNotConnectedFault)(nil)).Elem() +} + +type HostNotInClusterEvent struct { + HostDasEvent +} + +func init() { + t["HostNotInClusterEvent"] = reflect.TypeOf((*HostNotInClusterEvent)(nil)).Elem() +} + +type HostNotReachable struct { + HostCommunication +} + +func init() { + t["HostNotReachable"] = reflect.TypeOf((*HostNotReachable)(nil)).Elem() +} + +type HostNotReachableFault HostNotReachable + +func init() { + t["HostNotReachableFault"] = reflect.TypeOf((*HostNotReachableFault)(nil)).Elem() +} + +type HostNtpConfig struct { + DynamicData + + Server []string `xml:"server,omitempty"` + ConfigFile []string `xml:"configFile,omitempty"` +} + +func init() { + t["HostNtpConfig"] = reflect.TypeOf((*HostNtpConfig)(nil)).Elem() +} + +type HostNumaInfo struct { + DynamicData + + Type string `xml:"type"` + NumNodes int32 `xml:"numNodes"` + NumaNode []HostNumaNode `xml:"numaNode,omitempty"` +} + +func init() { + t["HostNumaInfo"] = reflect.TypeOf((*HostNumaInfo)(nil)).Elem() +} + +type HostNumaNode struct { + DynamicData + + TypeId byte `xml:"typeId"` + CpuID []int16 `xml:"cpuID"` + MemoryRangeBegin int64 `xml:"memoryRangeBegin"` + MemoryRangeLength int64 `xml:"memoryRangeLength"` + PciId []string `xml:"pciId,omitempty"` +} + +func init() { + t["HostNumaNode"] = reflect.TypeOf((*HostNumaNode)(nil)).Elem() +} + +type HostNumericSensorInfo struct { + DynamicData + + Name string `xml:"name"` + HealthState BaseElementDescription `xml:"healthState,omitempty,typeattr"` + CurrentReading int64 `xml:"currentReading"` + UnitModifier int32 `xml:"unitModifier"` + BaseUnits string `xml:"baseUnits"` + RateUnits string `xml:"rateUnits,omitempty"` + SensorType string `xml:"sensorType"` + Id string `xml:"id,omitempty"` + TimeStamp string `xml:"timeStamp,omitempty"` +} + +func init() { + t["HostNumericSensorInfo"] = reflect.TypeOf((*HostNumericSensorInfo)(nil)).Elem() +} + +type HostOpaqueNetworkInfo struct { + DynamicData + + OpaqueNetworkId string `xml:"opaqueNetworkId"` + OpaqueNetworkName string `xml:"opaqueNetworkName"` + OpaqueNetworkType string `xml:"opaqueNetworkType"` + PnicZone []string `xml:"pnicZone,omitempty"` + Capability *OpaqueNetworkCapability `xml:"capability,omitempty"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` +} + +func init() { + t["HostOpaqueNetworkInfo"] = reflect.TypeOf((*HostOpaqueNetworkInfo)(nil)).Elem() +} + +type HostOpaqueSwitch struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name,omitempty"` + Pnic []string `xml:"pnic,omitempty"` + PnicZone []HostOpaqueSwitchPhysicalNicZone `xml:"pnicZone,omitempty"` + Status string `xml:"status,omitempty"` + Vtep []HostVirtualNic `xml:"vtep,omitempty"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty"` +} + +func init() { + t["HostOpaqueSwitch"] = reflect.TypeOf((*HostOpaqueSwitch)(nil)).Elem() +} + +type HostOpaqueSwitchPhysicalNicZone struct { + DynamicData + + Key string `xml:"key"` + PnicDevice []string `xml:"pnicDevice,omitempty"` +} + +func init() { + t["HostOpaqueSwitchPhysicalNicZone"] = reflect.TypeOf((*HostOpaqueSwitchPhysicalNicZone)(nil)).Elem() +} + +type HostOvercommittedEvent struct { + ClusterOvercommittedEvent +} + +func init() { + t["HostOvercommittedEvent"] = reflect.TypeOf((*HostOvercommittedEvent)(nil)).Elem() +} + +type HostPMemVolume struct { + HostFileSystemVolume + + Uuid string `xml:"uuid"` + Version string `xml:"version"` +} + +func init() { + t["HostPMemVolume"] = reflect.TypeOf((*HostPMemVolume)(nil)).Elem() +} + +type HostParallelScsiHba struct { + HostHostBusAdapter +} + +func init() { + t["HostParallelScsiHba"] = reflect.TypeOf((*HostParallelScsiHba)(nil)).Elem() +} + +type HostParallelScsiTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostParallelScsiTargetTransport"] = reflect.TypeOf((*HostParallelScsiTargetTransport)(nil)).Elem() +} + +type HostPatchManagerLocator struct { + DynamicData + + Url string `xml:"url"` + Proxy string `xml:"proxy,omitempty"` +} + +func init() { + t["HostPatchManagerLocator"] = reflect.TypeOf((*HostPatchManagerLocator)(nil)).Elem() +} + +type HostPatchManagerPatchManagerOperationSpec struct { + DynamicData + + Proxy string `xml:"proxy,omitempty"` + Port int32 `xml:"port,omitempty"` + UserName string `xml:"userName,omitempty"` + Password string `xml:"password,omitempty"` + CmdOption string `xml:"cmdOption,omitempty"` +} + +func init() { + t["HostPatchManagerPatchManagerOperationSpec"] = reflect.TypeOf((*HostPatchManagerPatchManagerOperationSpec)(nil)).Elem() +} + +type HostPatchManagerResult struct { + DynamicData + + Version string `xml:"version"` + Status []HostPatchManagerStatus `xml:"status,omitempty"` + XmlResult string `xml:"xmlResult,omitempty"` +} + +func init() { + t["HostPatchManagerResult"] = reflect.TypeOf((*HostPatchManagerResult)(nil)).Elem() +} + +type HostPatchManagerStatus struct { + DynamicData + + Id string `xml:"id"` + Applicable bool `xml:"applicable"` + Reason []string `xml:"reason,omitempty"` + Integrity string `xml:"integrity,omitempty"` + Installed bool `xml:"installed"` + InstallState []string `xml:"installState,omitempty"` + PrerequisitePatch []HostPatchManagerStatusPrerequisitePatch `xml:"prerequisitePatch,omitempty"` + RestartRequired bool `xml:"restartRequired"` + ReconnectRequired bool `xml:"reconnectRequired"` + VmOffRequired bool `xml:"vmOffRequired"` + SupersededPatchIds []string `xml:"supersededPatchIds,omitempty"` +} + +func init() { + t["HostPatchManagerStatus"] = reflect.TypeOf((*HostPatchManagerStatus)(nil)).Elem() +} + +type HostPatchManagerStatusPrerequisitePatch struct { + DynamicData + + Id string `xml:"id"` + InstallState []string `xml:"installState,omitempty"` +} + +func init() { + t["HostPatchManagerStatusPrerequisitePatch"] = reflect.TypeOf((*HostPatchManagerStatusPrerequisitePatch)(nil)).Elem() +} + +type HostPathSelectionPolicyOption struct { + DynamicData + + Policy BaseElementDescription `xml:"policy,typeattr"` +} + +func init() { + t["HostPathSelectionPolicyOption"] = reflect.TypeOf((*HostPathSelectionPolicyOption)(nil)).Elem() +} + +type HostPciDevice struct { + DynamicData + + Id string `xml:"id"` + ClassId int16 `xml:"classId"` + Bus byte `xml:"bus"` + Slot byte `xml:"slot"` + Function byte `xml:"function"` + VendorId int16 `xml:"vendorId"` + SubVendorId int16 `xml:"subVendorId"` + VendorName string `xml:"vendorName"` + DeviceId int16 `xml:"deviceId"` + SubDeviceId int16 `xml:"subDeviceId"` + ParentBridge string `xml:"parentBridge,omitempty"` + DeviceName string `xml:"deviceName"` +} + +func init() { + t["HostPciDevice"] = reflect.TypeOf((*HostPciDevice)(nil)).Elem() +} + +type HostPciPassthruConfig struct { + DynamicData + + Id string `xml:"id"` + PassthruEnabled bool `xml:"passthruEnabled"` +} + +func init() { + t["HostPciPassthruConfig"] = reflect.TypeOf((*HostPciPassthruConfig)(nil)).Elem() +} + +type HostPciPassthruInfo struct { + DynamicData + + Id string `xml:"id"` + DependentDevice string `xml:"dependentDevice"` + PassthruEnabled bool `xml:"passthruEnabled"` + PassthruCapable bool `xml:"passthruCapable"` + PassthruActive bool `xml:"passthruActive"` +} + +func init() { + t["HostPciPassthruInfo"] = reflect.TypeOf((*HostPciPassthruInfo)(nil)).Elem() +} + +type HostPersistentMemoryInfo struct { + DynamicData + + CapacityInMB int64 `xml:"capacityInMB,omitempty"` + VolumeUUID string `xml:"volumeUUID,omitempty"` +} + +func init() { + t["HostPersistentMemoryInfo"] = reflect.TypeOf((*HostPersistentMemoryInfo)(nil)).Elem() +} + +type HostPlacedVirtualNicIdentifier struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + VnicKey string `xml:"vnicKey"` + Reservation *int32 `xml:"reservation"` +} + +func init() { + t["HostPlacedVirtualNicIdentifier"] = reflect.TypeOf((*HostPlacedVirtualNicIdentifier)(nil)).Elem() +} + +type HostPlugStoreTopology struct { + DynamicData + + Adapter []HostPlugStoreTopologyAdapter `xml:"adapter,omitempty"` + Path []HostPlugStoreTopologyPath `xml:"path,omitempty"` + Target []HostPlugStoreTopologyTarget `xml:"target,omitempty"` + Device []HostPlugStoreTopologyDevice `xml:"device,omitempty"` + Plugin []HostPlugStoreTopologyPlugin `xml:"plugin,omitempty"` +} + +func init() { + t["HostPlugStoreTopology"] = reflect.TypeOf((*HostPlugStoreTopology)(nil)).Elem() +} + +type HostPlugStoreTopologyAdapter struct { + DynamicData + + Key string `xml:"key"` + Adapter string `xml:"adapter"` + Path []string `xml:"path,omitempty"` +} + +func init() { + t["HostPlugStoreTopologyAdapter"] = reflect.TypeOf((*HostPlugStoreTopologyAdapter)(nil)).Elem() +} + +type HostPlugStoreTopologyDevice struct { + DynamicData + + Key string `xml:"key"` + Lun string `xml:"lun"` + Path []string `xml:"path,omitempty"` +} + +func init() { + t["HostPlugStoreTopologyDevice"] = reflect.TypeOf((*HostPlugStoreTopologyDevice)(nil)).Elem() +} + +type HostPlugStoreTopologyPath struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + ChannelNumber int32 `xml:"channelNumber,omitempty"` + TargetNumber int32 `xml:"targetNumber,omitempty"` + LunNumber int32 `xml:"lunNumber,omitempty"` + Adapter string `xml:"adapter,omitempty"` + Target string `xml:"target,omitempty"` + Device string `xml:"device,omitempty"` +} + +func init() { + t["HostPlugStoreTopologyPath"] = reflect.TypeOf((*HostPlugStoreTopologyPath)(nil)).Elem() +} + +type HostPlugStoreTopologyPlugin struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + Device []string `xml:"device,omitempty"` + ClaimedPath []string `xml:"claimedPath,omitempty"` +} + +func init() { + t["HostPlugStoreTopologyPlugin"] = reflect.TypeOf((*HostPlugStoreTopologyPlugin)(nil)).Elem() +} + +type HostPlugStoreTopologyTarget struct { + DynamicData + + Key string `xml:"key"` + Transport BaseHostTargetTransport `xml:"transport,omitempty,typeattr"` +} + +func init() { + t["HostPlugStoreTopologyTarget"] = reflect.TypeOf((*HostPlugStoreTopologyTarget)(nil)).Elem() +} + +type HostPnicNetworkResourceInfo struct { + DynamicData + + PnicDevice string `xml:"pnicDevice"` + AvailableBandwidthForVMTraffic int64 `xml:"availableBandwidthForVMTraffic,omitempty"` + UnusedBandwidthForVMTraffic int64 `xml:"unusedBandwidthForVMTraffic,omitempty"` + PlacedVirtualNics []HostPlacedVirtualNicIdentifier `xml:"placedVirtualNics,omitempty"` +} + +func init() { + t["HostPnicNetworkResourceInfo"] = reflect.TypeOf((*HostPnicNetworkResourceInfo)(nil)).Elem() +} + +type HostPortGroup struct { + DynamicData + + Key string `xml:"key,omitempty"` + Port []HostPortGroupPort `xml:"port,omitempty"` + Vswitch string `xml:"vswitch,omitempty"` + ComputedPolicy HostNetworkPolicy `xml:"computedPolicy"` + Spec HostPortGroupSpec `xml:"spec"` +} + +func init() { + t["HostPortGroup"] = reflect.TypeOf((*HostPortGroup)(nil)).Elem() +} + +type HostPortGroupConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Spec *HostPortGroupSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostPortGroupConfig"] = reflect.TypeOf((*HostPortGroupConfig)(nil)).Elem() +} + +type HostPortGroupPort struct { + DynamicData + + Key string `xml:"key,omitempty"` + Mac []string `xml:"mac,omitempty"` + Type string `xml:"type"` +} + +func init() { + t["HostPortGroupPort"] = reflect.TypeOf((*HostPortGroupPort)(nil)).Elem() +} + +type HostPortGroupProfile struct { + PortGroupProfile + + IpConfig IpAddressProfile `xml:"ipConfig"` +} + +func init() { + t["HostPortGroupProfile"] = reflect.TypeOf((*HostPortGroupProfile)(nil)).Elem() +} + +type HostPortGroupSpec struct { + DynamicData + + Name string `xml:"name"` + VlanId int32 `xml:"vlanId"` + VswitchName string `xml:"vswitchName"` + Policy HostNetworkPolicy `xml:"policy"` +} + +func init() { + t["HostPortGroupSpec"] = reflect.TypeOf((*HostPortGroupSpec)(nil)).Elem() +} + +type HostPosixAccountSpec struct { + HostAccountSpec + + PosixId int32 `xml:"posixId,omitempty"` + ShellAccess *bool `xml:"shellAccess"` +} + +func init() { + t["HostPosixAccountSpec"] = reflect.TypeOf((*HostPosixAccountSpec)(nil)).Elem() +} + +type HostPowerOpFailed struct { + VimFault +} + +func init() { + t["HostPowerOpFailed"] = reflect.TypeOf((*HostPowerOpFailed)(nil)).Elem() +} + +type HostPowerOpFailedFault BaseHostPowerOpFailed + +func init() { + t["HostPowerOpFailedFault"] = reflect.TypeOf((*HostPowerOpFailedFault)(nil)).Elem() +} + +type HostPowerPolicy struct { + DynamicData + + Key int32 `xml:"key"` + Name string `xml:"name"` + ShortName string `xml:"shortName"` + Description string `xml:"description"` +} + +func init() { + t["HostPowerPolicy"] = reflect.TypeOf((*HostPowerPolicy)(nil)).Elem() +} + +type HostPrimaryAgentNotShortNameEvent struct { + HostDasEvent + + PrimaryAgent string `xml:"primaryAgent"` +} + +func init() { + t["HostPrimaryAgentNotShortNameEvent"] = reflect.TypeOf((*HostPrimaryAgentNotShortNameEvent)(nil)).Elem() +} + +type HostProfileAppliedEvent struct { + HostEvent + + Profile ProfileEventArgument `xml:"profile"` +} + +func init() { + t["HostProfileAppliedEvent"] = reflect.TypeOf((*HostProfileAppliedEvent)(nil)).Elem() +} + +type HostProfileCompleteConfigSpec struct { + HostProfileConfigSpec + + ApplyProfile *HostApplyProfile `xml:"applyProfile,omitempty"` + CustomComplyProfile *ComplianceProfile `xml:"customComplyProfile,omitempty"` + DisabledExpressionListChanged bool `xml:"disabledExpressionListChanged"` + DisabledExpressionList []string `xml:"disabledExpressionList,omitempty"` + ValidatorHost *ManagedObjectReference `xml:"validatorHost,omitempty"` + Validating *bool `xml:"validating"` + HostConfig *HostProfileConfigInfo `xml:"hostConfig,omitempty"` +} + +func init() { + t["HostProfileCompleteConfigSpec"] = reflect.TypeOf((*HostProfileCompleteConfigSpec)(nil)).Elem() +} + +type HostProfileConfigInfo struct { + ProfileConfigInfo + + ApplyProfile *HostApplyProfile `xml:"applyProfile,omitempty"` + DefaultComplyProfile *ComplianceProfile `xml:"defaultComplyProfile,omitempty"` + DefaultComplyLocator []ComplianceLocator `xml:"defaultComplyLocator,omitempty"` + CustomComplyProfile *ComplianceProfile `xml:"customComplyProfile,omitempty"` + DisabledExpressionList []string `xml:"disabledExpressionList,omitempty"` + Description *ProfileDescription `xml:"description,omitempty"` +} + +func init() { + t["HostProfileConfigInfo"] = reflect.TypeOf((*HostProfileConfigInfo)(nil)).Elem() +} + +type HostProfileConfigSpec struct { + ProfileCreateSpec +} + +func init() { + t["HostProfileConfigSpec"] = reflect.TypeOf((*HostProfileConfigSpec)(nil)).Elem() +} + +type HostProfileHostBasedConfigSpec struct { + HostProfileConfigSpec + + Host ManagedObjectReference `xml:"host"` + UseHostProfileEngine *bool `xml:"useHostProfileEngine"` +} + +func init() { + t["HostProfileHostBasedConfigSpec"] = reflect.TypeOf((*HostProfileHostBasedConfigSpec)(nil)).Elem() +} + +type HostProfileManagerCompositionResult struct { + DynamicData + + Errors []LocalizableMessage `xml:"errors,omitempty"` + Results []HostProfileManagerCompositionResultResultElement `xml:"results,omitempty"` +} + +func init() { + t["HostProfileManagerCompositionResult"] = reflect.TypeOf((*HostProfileManagerCompositionResult)(nil)).Elem() +} + +type HostProfileManagerCompositionResultResultElement struct { + DynamicData + + Target ManagedObjectReference `xml:"target"` + Status string `xml:"status"` + Errors []LocalizableMessage `xml:"errors,omitempty"` +} + +func init() { + t["HostProfileManagerCompositionResultResultElement"] = reflect.TypeOf((*HostProfileManagerCompositionResultResultElement)(nil)).Elem() +} + +type HostProfileManagerCompositionValidationResult struct { + DynamicData + + Results []HostProfileManagerCompositionValidationResultResultElement `xml:"results,omitempty"` + Errors []LocalizableMessage `xml:"errors,omitempty"` +} + +func init() { + t["HostProfileManagerCompositionValidationResult"] = reflect.TypeOf((*HostProfileManagerCompositionValidationResult)(nil)).Elem() +} + +type HostProfileManagerCompositionValidationResultResultElement struct { + DynamicData + + Target ManagedObjectReference `xml:"target"` + Status string `xml:"status"` + Errors []LocalizableMessage `xml:"errors,omitempty"` + SourceDiffForToBeMerged *HostApplyProfile `xml:"sourceDiffForToBeMerged,omitempty"` + TargetDiffForToBeMerged *HostApplyProfile `xml:"targetDiffForToBeMerged,omitempty"` + ToBeAdded *HostApplyProfile `xml:"toBeAdded,omitempty"` + ToBeDeleted *HostApplyProfile `xml:"toBeDeleted,omitempty"` + ToBeDisabled *HostApplyProfile `xml:"toBeDisabled,omitempty"` + ToBeEnabled *HostApplyProfile `xml:"toBeEnabled,omitempty"` + ToBeReenableCC *HostApplyProfile `xml:"toBeReenableCC,omitempty"` +} + +func init() { + t["HostProfileManagerCompositionValidationResultResultElement"] = reflect.TypeOf((*HostProfileManagerCompositionValidationResultResultElement)(nil)).Elem() +} + +type HostProfileManagerConfigTaskList struct { + DynamicData + + ConfigSpec *HostConfigSpec `xml:"configSpec,omitempty"` + TaskDescription []LocalizableMessage `xml:"taskDescription,omitempty"` + TaskListRequirement []string `xml:"taskListRequirement,omitempty"` +} + +func init() { + t["HostProfileManagerConfigTaskList"] = reflect.TypeOf((*HostProfileManagerConfigTaskList)(nil)).Elem() +} + +type HostProfileManagerHostToConfigSpecMap struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + ConfigSpec BaseAnswerFileCreateSpec `xml:"configSpec,typeattr"` +} + +func init() { + t["HostProfileManagerHostToConfigSpecMap"] = reflect.TypeOf((*HostProfileManagerHostToConfigSpecMap)(nil)).Elem() +} + +type HostProfileResetValidationState HostProfileResetValidationStateRequestType + +func init() { + t["HostProfileResetValidationState"] = reflect.TypeOf((*HostProfileResetValidationState)(nil)).Elem() +} + +type HostProfileResetValidationStateRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HostProfileResetValidationStateRequestType"] = reflect.TypeOf((*HostProfileResetValidationStateRequestType)(nil)).Elem() +} + +type HostProfileResetValidationStateResponse struct { +} + +type HostProfileSerializedHostProfileSpec struct { + ProfileSerializedCreateSpec + + ValidatorHost *ManagedObjectReference `xml:"validatorHost,omitempty"` + Validating *bool `xml:"validating"` +} + +func init() { + t["HostProfileSerializedHostProfileSpec"] = reflect.TypeOf((*HostProfileSerializedHostProfileSpec)(nil)).Elem() +} + +type HostProfileValidationFailureInfo struct { + DynamicData + + Name string `xml:"name"` + Annotation string `xml:"annotation"` + UpdateType string `xml:"updateType"` + Host *ManagedObjectReference `xml:"host,omitempty"` + ApplyProfile *HostApplyProfile `xml:"applyProfile,omitempty"` + Failures []ProfileUpdateFailedUpdateFailure `xml:"failures,omitempty"` + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["HostProfileValidationFailureInfo"] = reflect.TypeOf((*HostProfileValidationFailureInfo)(nil)).Elem() +} + +type HostProfilesEntityCustomizations struct { + DynamicData +} + +func init() { + t["HostProfilesEntityCustomizations"] = reflect.TypeOf((*HostProfilesEntityCustomizations)(nil)).Elem() +} + +type HostProtocolEndpoint struct { + DynamicData + + PeType string `xml:"peType"` + Type string `xml:"type,omitempty"` + Uuid string `xml:"uuid"` + HostKey []ManagedObjectReference `xml:"hostKey,omitempty"` + StorageArray string `xml:"storageArray,omitempty"` + NfsServer string `xml:"nfsServer,omitempty"` + NfsDir string `xml:"nfsDir,omitempty"` + NfsServerScope string `xml:"nfsServerScope,omitempty"` + NfsServerMajor string `xml:"nfsServerMajor,omitempty"` + NfsServerAuthType string `xml:"nfsServerAuthType,omitempty"` + NfsServerUser string `xml:"nfsServerUser,omitempty"` + DeviceId string `xml:"deviceId,omitempty"` +} + +func init() { + t["HostProtocolEndpoint"] = reflect.TypeOf((*HostProtocolEndpoint)(nil)).Elem() +} + +type HostProxySwitch struct { + DynamicData + + DvsUuid string `xml:"dvsUuid"` + DvsName string `xml:"dvsName"` + Key string `xml:"key"` + NumPorts int32 `xml:"numPorts"` + ConfigNumPorts int32 `xml:"configNumPorts,omitempty"` + NumPortsAvailable int32 `xml:"numPortsAvailable"` + UplinkPort []KeyValue `xml:"uplinkPort,omitempty"` + Mtu int32 `xml:"mtu,omitempty"` + Pnic []string `xml:"pnic,omitempty"` + Spec HostProxySwitchSpec `xml:"spec"` + HostLag []HostProxySwitchHostLagConfig `xml:"hostLag,omitempty"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` +} + +func init() { + t["HostProxySwitch"] = reflect.TypeOf((*HostProxySwitch)(nil)).Elem() +} + +type HostProxySwitchConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Uuid string `xml:"uuid"` + Spec *HostProxySwitchSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostProxySwitchConfig"] = reflect.TypeOf((*HostProxySwitchConfig)(nil)).Elem() +} + +type HostProxySwitchHostLagConfig struct { + DynamicData + + LagKey string `xml:"lagKey"` + LagName string `xml:"lagName,omitempty"` + UplinkPort []KeyValue `xml:"uplinkPort,omitempty"` +} + +func init() { + t["HostProxySwitchHostLagConfig"] = reflect.TypeOf((*HostProxySwitchHostLagConfig)(nil)).Elem() +} + +type HostProxySwitchSpec struct { + DynamicData + + Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,omitempty,typeattr"` +} + +func init() { + t["HostProxySwitchSpec"] = reflect.TypeOf((*HostProxySwitchSpec)(nil)).Elem() +} + +type HostReconcileDatastoreInventoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostReconcileDatastoreInventoryRequestType"] = reflect.TypeOf((*HostReconcileDatastoreInventoryRequestType)(nil)).Elem() +} + +type HostReconcileDatastoreInventory_Task HostReconcileDatastoreInventoryRequestType + +func init() { + t["HostReconcileDatastoreInventory_Task"] = reflect.TypeOf((*HostReconcileDatastoreInventory_Task)(nil)).Elem() +} + +type HostReconcileDatastoreInventory_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostReconnectionFailedEvent struct { + HostEvent +} + +func init() { + t["HostReconnectionFailedEvent"] = reflect.TypeOf((*HostReconnectionFailedEvent)(nil)).Elem() +} + +type HostRegisterDisk HostRegisterDiskRequestType + +func init() { + t["HostRegisterDisk"] = reflect.TypeOf((*HostRegisterDisk)(nil)).Elem() +} + +type HostRegisterDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Path string `xml:"path"` + Name string `xml:"name,omitempty"` +} + +func init() { + t["HostRegisterDiskRequestType"] = reflect.TypeOf((*HostRegisterDiskRequestType)(nil)).Elem() +} + +type HostRegisterDiskResponse struct { + Returnval VStorageObject `xml:"returnval"` +} + +type HostReliableMemoryInfo struct { + DynamicData + + MemorySize int64 `xml:"memorySize"` +} + +func init() { + t["HostReliableMemoryInfo"] = reflect.TypeOf((*HostReliableMemoryInfo)(nil)).Elem() +} + +type HostRelocateVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec VslmRelocateSpec `xml:"spec"` +} + +func init() { + t["HostRelocateVStorageObjectRequestType"] = reflect.TypeOf((*HostRelocateVStorageObjectRequestType)(nil)).Elem() +} + +type HostRelocateVStorageObject_Task HostRelocateVStorageObjectRequestType + +func init() { + t["HostRelocateVStorageObject_Task"] = reflect.TypeOf((*HostRelocateVStorageObject_Task)(nil)).Elem() +} + +type HostRelocateVStorageObject_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostRemoveVFlashResource HostRemoveVFlashResourceRequestType + +func init() { + t["HostRemoveVFlashResource"] = reflect.TypeOf((*HostRemoveVFlashResource)(nil)).Elem() +} + +type HostRemoveVFlashResourceRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HostRemoveVFlashResourceRequestType"] = reflect.TypeOf((*HostRemoveVFlashResourceRequestType)(nil)).Elem() +} + +type HostRemoveVFlashResourceResponse struct { +} + +type HostRemovedEvent struct { + HostEvent +} + +func init() { + t["HostRemovedEvent"] = reflect.TypeOf((*HostRemovedEvent)(nil)).Elem() +} + +type HostRenameVStorageObject HostRenameVStorageObjectRequestType + +func init() { + t["HostRenameVStorageObject"] = reflect.TypeOf((*HostRenameVStorageObject)(nil)).Elem() +} + +type HostRenameVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Name string `xml:"name"` +} + +func init() { + t["HostRenameVStorageObjectRequestType"] = reflect.TypeOf((*HostRenameVStorageObjectRequestType)(nil)).Elem() +} + +type HostRenameVStorageObjectResponse struct { +} + +type HostResignatureRescanResult struct { + DynamicData + + Rescan []HostVmfsRescanResult `xml:"rescan,omitempty"` + Result ManagedObjectReference `xml:"result"` +} + +func init() { + t["HostResignatureRescanResult"] = reflect.TypeOf((*HostResignatureRescanResult)(nil)).Elem() +} + +type HostRetrieveVStorageInfrastructureObjectPolicy HostRetrieveVStorageInfrastructureObjectPolicyRequestType + +func init() { + t["HostRetrieveVStorageInfrastructureObjectPolicy"] = reflect.TypeOf((*HostRetrieveVStorageInfrastructureObjectPolicy)(nil)).Elem() +} + +type HostRetrieveVStorageInfrastructureObjectPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostRetrieveVStorageInfrastructureObjectPolicyRequestType"] = reflect.TypeOf((*HostRetrieveVStorageInfrastructureObjectPolicyRequestType)(nil)).Elem() +} + +type HostRetrieveVStorageInfrastructureObjectPolicyResponse struct { + Returnval []VslmInfrastructureObjectPolicy `xml:"returnval,omitempty"` +} + +type HostRetrieveVStorageObject HostRetrieveVStorageObjectRequestType + +func init() { + t["HostRetrieveVStorageObject"] = reflect.TypeOf((*HostRetrieveVStorageObject)(nil)).Elem() +} + +type HostRetrieveVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostRetrieveVStorageObjectRequestType"] = reflect.TypeOf((*HostRetrieveVStorageObjectRequestType)(nil)).Elem() +} + +type HostRetrieveVStorageObjectResponse struct { + Returnval VStorageObject `xml:"returnval"` +} + +type HostRetrieveVStorageObjectState HostRetrieveVStorageObjectStateRequestType + +func init() { + t["HostRetrieveVStorageObjectState"] = reflect.TypeOf((*HostRetrieveVStorageObjectState)(nil)).Elem() +} + +type HostRetrieveVStorageObjectStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostRetrieveVStorageObjectStateRequestType"] = reflect.TypeOf((*HostRetrieveVStorageObjectStateRequestType)(nil)).Elem() +} + +type HostRetrieveVStorageObjectStateResponse struct { + Returnval VStorageObjectStateInfo `xml:"returnval"` +} + +type HostRuntimeInfo struct { + DynamicData + + ConnectionState HostSystemConnectionState `xml:"connectionState"` + PowerState HostSystemPowerState `xml:"powerState"` + StandbyMode string `xml:"standbyMode,omitempty"` + InMaintenanceMode bool `xml:"inMaintenanceMode"` + InQuarantineMode *bool `xml:"inQuarantineMode"` + BootTime *time.Time `xml:"bootTime"` + HealthSystemRuntime *HealthSystemRuntime `xml:"healthSystemRuntime,omitempty"` + DasHostState *ClusterDasFdmHostState `xml:"dasHostState,omitempty"` + TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues,omitempty"` + VsanRuntimeInfo *VsanHostRuntimeInfo `xml:"vsanRuntimeInfo,omitempty"` + NetworkRuntimeInfo *HostRuntimeInfoNetworkRuntimeInfo `xml:"networkRuntimeInfo,omitempty"` + VFlashResourceRuntimeInfo *HostVFlashManagerVFlashResourceRunTimeInfo `xml:"vFlashResourceRuntimeInfo,omitempty"` + HostMaxVirtualDiskCapacity int64 `xml:"hostMaxVirtualDiskCapacity,omitempty"` + CryptoState string `xml:"cryptoState,omitempty"` + CryptoKeyId *CryptoKeyId `xml:"cryptoKeyId,omitempty"` +} + +func init() { + t["HostRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfo)(nil)).Elem() +} + +type HostRuntimeInfoNetStackInstanceRuntimeInfo struct { + DynamicData + + NetStackInstanceKey string `xml:"netStackInstanceKey"` + State string `xml:"state,omitempty"` + VmknicKeys []string `xml:"vmknicKeys,omitempty"` + MaxNumberOfConnections int32 `xml:"maxNumberOfConnections,omitempty"` + CurrentIpV6Enabled *bool `xml:"currentIpV6Enabled"` +} + +func init() { + t["HostRuntimeInfoNetStackInstanceRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfo)(nil)).Elem() +} + +type HostRuntimeInfoNetworkRuntimeInfo struct { + DynamicData + + NetStackInstanceRuntimeInfo []HostRuntimeInfoNetStackInstanceRuntimeInfo `xml:"netStackInstanceRuntimeInfo,omitempty"` + NetworkResourceRuntime *HostNetworkResourceRuntime `xml:"networkResourceRuntime,omitempty"` +} + +func init() { + t["HostRuntimeInfoNetworkRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfoNetworkRuntimeInfo)(nil)).Elem() +} + +type HostScheduleReconcileDatastoreInventory HostScheduleReconcileDatastoreInventoryRequestType + +func init() { + t["HostScheduleReconcileDatastoreInventory"] = reflect.TypeOf((*HostScheduleReconcileDatastoreInventory)(nil)).Elem() +} + +type HostScheduleReconcileDatastoreInventoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostScheduleReconcileDatastoreInventoryRequestType"] = reflect.TypeOf((*HostScheduleReconcileDatastoreInventoryRequestType)(nil)).Elem() +} + +type HostScheduleReconcileDatastoreInventoryResponse struct { +} + +type HostScsiDisk struct { + ScsiLun + + Capacity HostDiskDimensionsLba `xml:"capacity"` + DevicePath string `xml:"devicePath"` + Ssd *bool `xml:"ssd"` + LocalDisk *bool `xml:"localDisk"` + PhysicalLocation []string `xml:"physicalLocation,omitempty"` + EmulatedDIXDIFEnabled *bool `xml:"emulatedDIXDIFEnabled"` + VsanDiskInfo *VsanHostVsanDiskInfo `xml:"vsanDiskInfo,omitempty"` + ScsiDiskType string `xml:"scsiDiskType,omitempty"` +} + +func init() { + t["HostScsiDisk"] = reflect.TypeOf((*HostScsiDisk)(nil)).Elem() +} + +type HostScsiDiskPartition struct { + DynamicData + + DiskName string `xml:"diskName"` + Partition int32 `xml:"partition"` +} + +func init() { + t["HostScsiDiskPartition"] = reflect.TypeOf((*HostScsiDiskPartition)(nil)).Elem() +} + +type HostScsiTopology struct { + DynamicData + + Adapter []HostScsiTopologyInterface `xml:"adapter,omitempty"` +} + +func init() { + t["HostScsiTopology"] = reflect.TypeOf((*HostScsiTopology)(nil)).Elem() +} + +type HostScsiTopologyInterface struct { + DynamicData + + Key string `xml:"key"` + Adapter string `xml:"adapter"` + Target []HostScsiTopologyTarget `xml:"target,omitempty"` +} + +func init() { + t["HostScsiTopologyInterface"] = reflect.TypeOf((*HostScsiTopologyInterface)(nil)).Elem() +} + +type HostScsiTopologyLun struct { + DynamicData + + Key string `xml:"key"` + Lun int32 `xml:"lun"` + ScsiLun string `xml:"scsiLun"` +} + +func init() { + t["HostScsiTopologyLun"] = reflect.TypeOf((*HostScsiTopologyLun)(nil)).Elem() +} + +type HostScsiTopologyTarget struct { + DynamicData + + Key string `xml:"key"` + Target int32 `xml:"target"` + Lun []HostScsiTopologyLun `xml:"lun,omitempty"` + Transport BaseHostTargetTransport `xml:"transport,omitempty,typeattr"` +} + +func init() { + t["HostScsiTopologyTarget"] = reflect.TypeOf((*HostScsiTopologyTarget)(nil)).Elem() +} + +type HostSecuritySpec struct { + DynamicData + + AdminPassword string `xml:"adminPassword,omitempty"` + RemovePermission []Permission `xml:"removePermission,omitempty"` + AddPermission []Permission `xml:"addPermission,omitempty"` +} + +func init() { + t["HostSecuritySpec"] = reflect.TypeOf((*HostSecuritySpec)(nil)).Elem() +} + +type HostSerialAttachedHba struct { + HostHostBusAdapter + + NodeWorldWideName string `xml:"nodeWorldWideName"` +} + +func init() { + t["HostSerialAttachedHba"] = reflect.TypeOf((*HostSerialAttachedHba)(nil)).Elem() +} + +type HostSerialAttachedTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostSerialAttachedTargetTransport"] = reflect.TypeOf((*HostSerialAttachedTargetTransport)(nil)).Elem() +} + +type HostService struct { + DynamicData + + Key string `xml:"key"` + Label string `xml:"label"` + Required bool `xml:"required"` + Uninstallable bool `xml:"uninstallable"` + Running bool `xml:"running"` + Ruleset []string `xml:"ruleset,omitempty"` + Policy string `xml:"policy"` + SourcePackage *HostServiceSourcePackage `xml:"sourcePackage,omitempty"` +} + +func init() { + t["HostService"] = reflect.TypeOf((*HostService)(nil)).Elem() +} + +type HostServiceConfig struct { + DynamicData + + ServiceId string `xml:"serviceId"` + StartupPolicy string `xml:"startupPolicy"` +} + +func init() { + t["HostServiceConfig"] = reflect.TypeOf((*HostServiceConfig)(nil)).Elem() +} + +type HostServiceInfo struct { + DynamicData + + Service []HostService `xml:"service,omitempty"` +} + +func init() { + t["HostServiceInfo"] = reflect.TypeOf((*HostServiceInfo)(nil)).Elem() +} + +type HostServiceSourcePackage struct { + DynamicData + + SourcePackageName string `xml:"sourcePackageName"` + Description string `xml:"description"` +} + +func init() { + t["HostServiceSourcePackage"] = reflect.TypeOf((*HostServiceSourcePackage)(nil)).Elem() +} + +type HostServiceTicket struct { + DynamicData + + Host string `xml:"host,omitempty"` + Port int32 `xml:"port,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` + Service string `xml:"service"` + ServiceVersion string `xml:"serviceVersion"` + SessionId string `xml:"sessionId"` +} + +func init() { + t["HostServiceTicket"] = reflect.TypeOf((*HostServiceTicket)(nil)).Elem() +} + +type HostSetVStorageObjectControlFlags HostSetVStorageObjectControlFlagsRequestType + +func init() { + t["HostSetVStorageObjectControlFlags"] = reflect.TypeOf((*HostSetVStorageObjectControlFlags)(nil)).Elem() +} + +type HostSetVStorageObjectControlFlagsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + ControlFlags []string `xml:"controlFlags,omitempty"` +} + +func init() { + t["HostSetVStorageObjectControlFlagsRequestType"] = reflect.TypeOf((*HostSetVStorageObjectControlFlagsRequestType)(nil)).Elem() +} + +type HostSetVStorageObjectControlFlagsResponse struct { +} + +type HostSharedGpuCapabilities struct { + DynamicData + + Vgpu string `xml:"vgpu"` + DiskSnapshotSupported bool `xml:"diskSnapshotSupported"` + MemorySnapshotSupported bool `xml:"memorySnapshotSupported"` + SuspendSupported bool `xml:"suspendSupported"` + MigrateSupported bool `xml:"migrateSupported"` +} + +func init() { + t["HostSharedGpuCapabilities"] = reflect.TypeOf((*HostSharedGpuCapabilities)(nil)).Elem() +} + +type HostShortNameInconsistentEvent struct { + HostDasEvent + + ShortName string `xml:"shortName"` + ShortName2 string `xml:"shortName2"` +} + +func init() { + t["HostShortNameInconsistentEvent"] = reflect.TypeOf((*HostShortNameInconsistentEvent)(nil)).Elem() +} + +type HostShortNameToIpFailedEvent struct { + HostEvent + + ShortName string `xml:"shortName"` +} + +func init() { + t["HostShortNameToIpFailedEvent"] = reflect.TypeOf((*HostShortNameToIpFailedEvent)(nil)).Elem() +} + +type HostShutdownEvent struct { + HostEvent + + Reason string `xml:"reason"` +} + +func init() { + t["HostShutdownEvent"] = reflect.TypeOf((*HostShutdownEvent)(nil)).Elem() +} + +type HostSnmpConfigSpec struct { + DynamicData + + Enabled *bool `xml:"enabled"` + Port int32 `xml:"port,omitempty"` + ReadOnlyCommunities []string `xml:"readOnlyCommunities,omitempty"` + TrapTargets []HostSnmpDestination `xml:"trapTargets,omitempty"` + Option []KeyValue `xml:"option,omitempty"` +} + +func init() { + t["HostSnmpConfigSpec"] = reflect.TypeOf((*HostSnmpConfigSpec)(nil)).Elem() +} + +type HostSnmpDestination struct { + DynamicData + + HostName string `xml:"hostName"` + Port int32 `xml:"port"` + Community string `xml:"community"` +} + +func init() { + t["HostSnmpDestination"] = reflect.TypeOf((*HostSnmpDestination)(nil)).Elem() +} + +type HostSnmpSystemAgentLimits struct { + DynamicData + + MaxReadOnlyCommunities int32 `xml:"maxReadOnlyCommunities"` + MaxTrapDestinations int32 `xml:"maxTrapDestinations"` + MaxCommunityLength int32 `xml:"maxCommunityLength"` + MaxBufferSize int32 `xml:"maxBufferSize"` + Capability HostSnmpAgentCapability `xml:"capability,omitempty"` +} + +func init() { + t["HostSnmpSystemAgentLimits"] = reflect.TypeOf((*HostSnmpSystemAgentLimits)(nil)).Elem() +} + +type HostSpecGetUpdatedHosts HostSpecGetUpdatedHostsRequestType + +func init() { + t["HostSpecGetUpdatedHosts"] = reflect.TypeOf((*HostSpecGetUpdatedHosts)(nil)).Elem() +} + +type HostSpecGetUpdatedHostsRequestType struct { + This ManagedObjectReference `xml:"_this"` + StartChangeID string `xml:"startChangeID,omitempty"` + EndChangeID string `xml:"endChangeID,omitempty"` +} + +func init() { + t["HostSpecGetUpdatedHostsRequestType"] = reflect.TypeOf((*HostSpecGetUpdatedHostsRequestType)(nil)).Elem() +} + +type HostSpecGetUpdatedHostsResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type HostSpecification struct { + DynamicData + + CreatedTime time.Time `xml:"createdTime"` + LastModified *time.Time `xml:"lastModified"` + Host ManagedObjectReference `xml:"host"` + SubSpecs []HostSubSpecification `xml:"subSpecs,omitempty"` + ChangeID string `xml:"changeID,omitempty"` +} + +func init() { + t["HostSpecification"] = reflect.TypeOf((*HostSpecification)(nil)).Elem() +} + +type HostSpecificationOperationFailed struct { + VimFault + + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["HostSpecificationOperationFailed"] = reflect.TypeOf((*HostSpecificationOperationFailed)(nil)).Elem() +} + +type HostSpecificationOperationFailedFault HostSpecificationOperationFailed + +func init() { + t["HostSpecificationOperationFailedFault"] = reflect.TypeOf((*HostSpecificationOperationFailedFault)(nil)).Elem() +} + +type HostSriovConfig struct { + HostPciPassthruConfig + + SriovEnabled bool `xml:"sriovEnabled"` + NumVirtualFunction int32 `xml:"numVirtualFunction"` +} + +func init() { + t["HostSriovConfig"] = reflect.TypeOf((*HostSriovConfig)(nil)).Elem() +} + +type HostSriovDevicePoolInfo struct { + DynamicData + + Key string `xml:"key"` +} + +func init() { + t["HostSriovDevicePoolInfo"] = reflect.TypeOf((*HostSriovDevicePoolInfo)(nil)).Elem() +} + +type HostSriovInfo struct { + HostPciPassthruInfo + + SriovEnabled bool `xml:"sriovEnabled"` + SriovCapable bool `xml:"sriovCapable"` + SriovActive bool `xml:"sriovActive"` + NumVirtualFunctionRequested int32 `xml:"numVirtualFunctionRequested"` + NumVirtualFunction int32 `xml:"numVirtualFunction"` + MaxVirtualFunctionSupported int32 `xml:"maxVirtualFunctionSupported"` +} + +func init() { + t["HostSriovInfo"] = reflect.TypeOf((*HostSriovInfo)(nil)).Elem() +} + +type HostSriovNetworkDevicePoolInfo struct { + HostSriovDevicePoolInfo + + SwitchKey string `xml:"switchKey,omitempty"` + SwitchUuid string `xml:"switchUuid,omitempty"` + Pnic []PhysicalNic `xml:"pnic,omitempty"` +} + +func init() { + t["HostSriovNetworkDevicePoolInfo"] = reflect.TypeOf((*HostSriovNetworkDevicePoolInfo)(nil)).Elem() +} + +type HostSslThumbprintInfo struct { + DynamicData + + Principal string `xml:"principal"` + OwnerTag string `xml:"ownerTag,omitempty"` + SslThumbprints []string `xml:"sslThumbprints,omitempty"` +} + +func init() { + t["HostSslThumbprintInfo"] = reflect.TypeOf((*HostSslThumbprintInfo)(nil)).Elem() +} + +type HostStatusChangedEvent struct { + ClusterStatusChangedEvent +} + +func init() { + t["HostStatusChangedEvent"] = reflect.TypeOf((*HostStatusChangedEvent)(nil)).Elem() +} + +type HostStorageArrayTypePolicyOption struct { + DynamicData + + Policy BaseElementDescription `xml:"policy,typeattr"` +} + +func init() { + t["HostStorageArrayTypePolicyOption"] = reflect.TypeOf((*HostStorageArrayTypePolicyOption)(nil)).Elem() +} + +type HostStorageDeviceInfo struct { + DynamicData + + HostBusAdapter []BaseHostHostBusAdapter `xml:"hostBusAdapter,omitempty,typeattr"` + ScsiLun []BaseScsiLun `xml:"scsiLun,omitempty,typeattr"` + ScsiTopology *HostScsiTopology `xml:"scsiTopology,omitempty"` + MultipathInfo *HostMultipathInfo `xml:"multipathInfo,omitempty"` + PlugStoreTopology *HostPlugStoreTopology `xml:"plugStoreTopology,omitempty"` + SoftwareInternetScsiEnabled bool `xml:"softwareInternetScsiEnabled"` +} + +func init() { + t["HostStorageDeviceInfo"] = reflect.TypeOf((*HostStorageDeviceInfo)(nil)).Elem() +} + +type HostStorageElementInfo struct { + HostHardwareElementInfo + + OperationalInfo []HostStorageOperationalInfo `xml:"operationalInfo,omitempty"` +} + +func init() { + t["HostStorageElementInfo"] = reflect.TypeOf((*HostStorageElementInfo)(nil)).Elem() +} + +type HostStorageOperationalInfo struct { + DynamicData + + Property string `xml:"property"` + Value string `xml:"value"` +} + +func init() { + t["HostStorageOperationalInfo"] = reflect.TypeOf((*HostStorageOperationalInfo)(nil)).Elem() +} + +type HostStorageSystemDiskLocatorLedResult struct { + DynamicData + + Key string `xml:"key"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["HostStorageSystemDiskLocatorLedResult"] = reflect.TypeOf((*HostStorageSystemDiskLocatorLedResult)(nil)).Elem() +} + +type HostStorageSystemScsiLunResult struct { + DynamicData + + Key string `xml:"key"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostStorageSystemScsiLunResult"] = reflect.TypeOf((*HostStorageSystemScsiLunResult)(nil)).Elem() +} + +type HostStorageSystemVmfsVolumeResult struct { + DynamicData + + Key string `xml:"key"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostStorageSystemVmfsVolumeResult"] = reflect.TypeOf((*HostStorageSystemVmfsVolumeResult)(nil)).Elem() +} + +type HostSubSpecification struct { + DynamicData + + Name string `xml:"name"` + CreatedTime time.Time `xml:"createdTime"` + Data []byte `xml:"data,omitempty"` + BinaryData []byte `xml:"binaryData,omitempty"` +} + +func init() { + t["HostSubSpecification"] = reflect.TypeOf((*HostSubSpecification)(nil)).Elem() +} + +type HostSyncFailedEvent struct { + HostEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["HostSyncFailedEvent"] = reflect.TypeOf((*HostSyncFailedEvent)(nil)).Elem() +} + +type HostSystemComplianceCheckState struct { + DynamicData + + State string `xml:"state"` + CheckTime time.Time `xml:"checkTime"` +} + +func init() { + t["HostSystemComplianceCheckState"] = reflect.TypeOf((*HostSystemComplianceCheckState)(nil)).Elem() +} + +type HostSystemHealthInfo struct { + DynamicData + + NumericSensorInfo []HostNumericSensorInfo `xml:"numericSensorInfo,omitempty"` +} + +func init() { + t["HostSystemHealthInfo"] = reflect.TypeOf((*HostSystemHealthInfo)(nil)).Elem() +} + +type HostSystemIdentificationInfo struct { + DynamicData + + IdentifierValue string `xml:"identifierValue"` + IdentifierType BaseElementDescription `xml:"identifierType,typeattr"` +} + +func init() { + t["HostSystemIdentificationInfo"] = reflect.TypeOf((*HostSystemIdentificationInfo)(nil)).Elem() +} + +type HostSystemInfo struct { + DynamicData + + Vendor string `xml:"vendor"` + Model string `xml:"model"` + Uuid string `xml:"uuid"` + OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty"` + SerialNumber string `xml:"serialNumber,omitempty"` +} + +func init() { + t["HostSystemInfo"] = reflect.TypeOf((*HostSystemInfo)(nil)).Elem() +} + +type HostSystemReconnectSpec struct { + DynamicData + + SyncState *bool `xml:"syncState"` +} + +func init() { + t["HostSystemReconnectSpec"] = reflect.TypeOf((*HostSystemReconnectSpec)(nil)).Elem() +} + +type HostSystemRemediationState struct { + DynamicData + + State string `xml:"state"` + OperationTime time.Time `xml:"operationTime"` +} + +func init() { + t["HostSystemRemediationState"] = reflect.TypeOf((*HostSystemRemediationState)(nil)).Elem() +} + +type HostSystemResourceInfo struct { + DynamicData + + Key string `xml:"key"` + Config *ResourceConfigSpec `xml:"config,omitempty"` + Child []HostSystemResourceInfo `xml:"child,omitempty"` +} + +func init() { + t["HostSystemResourceInfo"] = reflect.TypeOf((*HostSystemResourceInfo)(nil)).Elem() +} + +type HostSystemSwapConfiguration struct { + DynamicData + + Option []BaseHostSystemSwapConfigurationSystemSwapOption `xml:"option,omitempty,typeattr"` +} + +func init() { + t["HostSystemSwapConfiguration"] = reflect.TypeOf((*HostSystemSwapConfiguration)(nil)).Elem() +} + +type HostSystemSwapConfigurationDatastoreOption struct { + HostSystemSwapConfigurationSystemSwapOption + + Datastore string `xml:"datastore"` +} + +func init() { + t["HostSystemSwapConfigurationDatastoreOption"] = reflect.TypeOf((*HostSystemSwapConfigurationDatastoreOption)(nil)).Elem() +} + +type HostSystemSwapConfigurationDisabledOption struct { + HostSystemSwapConfigurationSystemSwapOption +} + +func init() { + t["HostSystemSwapConfigurationDisabledOption"] = reflect.TypeOf((*HostSystemSwapConfigurationDisabledOption)(nil)).Elem() +} + +type HostSystemSwapConfigurationHostCacheOption struct { + HostSystemSwapConfigurationSystemSwapOption +} + +func init() { + t["HostSystemSwapConfigurationHostCacheOption"] = reflect.TypeOf((*HostSystemSwapConfigurationHostCacheOption)(nil)).Elem() +} + +type HostSystemSwapConfigurationHostLocalSwapOption struct { + HostSystemSwapConfigurationSystemSwapOption +} + +func init() { + t["HostSystemSwapConfigurationHostLocalSwapOption"] = reflect.TypeOf((*HostSystemSwapConfigurationHostLocalSwapOption)(nil)).Elem() +} + +type HostSystemSwapConfigurationSystemSwapOption struct { + DynamicData + + Key int32 `xml:"key"` +} + +func init() { + t["HostSystemSwapConfigurationSystemSwapOption"] = reflect.TypeOf((*HostSystemSwapConfigurationSystemSwapOption)(nil)).Elem() +} + +type HostTargetTransport struct { + DynamicData +} + +func init() { + t["HostTargetTransport"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem() +} + +type HostTpmAttestationInfo struct { + DynamicData + + Time time.Time `xml:"time"` + Status HostTpmAttestationInfoAcceptanceStatus `xml:"status"` + Message *LocalizableMessage `xml:"message,omitempty"` +} + +func init() { + t["HostTpmAttestationInfo"] = reflect.TypeOf((*HostTpmAttestationInfo)(nil)).Elem() +} + +type HostTpmAttestationReport struct { + DynamicData + + TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues"` + TpmEvents []HostTpmEventLogEntry `xml:"tpmEvents"` + TpmLogReliable bool `xml:"tpmLogReliable"` +} + +func init() { + t["HostTpmAttestationReport"] = reflect.TypeOf((*HostTpmAttestationReport)(nil)).Elem() +} + +type HostTpmBootSecurityOptionEventDetails struct { + HostTpmEventDetails + + BootSecurityOption string `xml:"bootSecurityOption"` +} + +func init() { + t["HostTpmBootSecurityOptionEventDetails"] = reflect.TypeOf((*HostTpmBootSecurityOptionEventDetails)(nil)).Elem() +} + +type HostTpmCommandEventDetails struct { + HostTpmEventDetails + + CommandLine string `xml:"commandLine"` +} + +func init() { + t["HostTpmCommandEventDetails"] = reflect.TypeOf((*HostTpmCommandEventDetails)(nil)).Elem() +} + +type HostTpmDigestInfo struct { + HostDigestInfo + + PcrNumber int32 `xml:"pcrNumber"` +} + +func init() { + t["HostTpmDigestInfo"] = reflect.TypeOf((*HostTpmDigestInfo)(nil)).Elem() +} + +type HostTpmEventDetails struct { + DynamicData + + DataHash []byte `xml:"dataHash"` + DataHashMethod string `xml:"dataHashMethod,omitempty"` +} + +func init() { + t["HostTpmEventDetails"] = reflect.TypeOf((*HostTpmEventDetails)(nil)).Elem() +} + +type HostTpmEventLogEntry struct { + DynamicData + + PcrIndex int32 `xml:"pcrIndex"` + EventDetails BaseHostTpmEventDetails `xml:"eventDetails,typeattr"` +} + +func init() { + t["HostTpmEventLogEntry"] = reflect.TypeOf((*HostTpmEventLogEntry)(nil)).Elem() +} + +type HostTpmOptionEventDetails struct { + HostTpmEventDetails + + OptionsFileName string `xml:"optionsFileName"` + BootOptions []byte `xml:"bootOptions,omitempty"` +} + +func init() { + t["HostTpmOptionEventDetails"] = reflect.TypeOf((*HostTpmOptionEventDetails)(nil)).Elem() +} + +type HostTpmSoftwareComponentEventDetails struct { + HostTpmEventDetails + + ComponentName string `xml:"componentName"` + VibName string `xml:"vibName"` + VibVersion string `xml:"vibVersion"` + VibVendor string `xml:"vibVendor"` +} + +func init() { + t["HostTpmSoftwareComponentEventDetails"] = reflect.TypeOf((*HostTpmSoftwareComponentEventDetails)(nil)).Elem() +} + +type HostUnresolvedVmfsExtent struct { + DynamicData + + Device HostScsiDiskPartition `xml:"device"` + DevicePath string `xml:"devicePath"` + VmfsUuid string `xml:"vmfsUuid"` + IsHeadExtent bool `xml:"isHeadExtent"` + Ordinal int32 `xml:"ordinal"` + StartBlock int32 `xml:"startBlock"` + EndBlock int32 `xml:"endBlock"` + Reason string `xml:"reason"` +} + +func init() { + t["HostUnresolvedVmfsExtent"] = reflect.TypeOf((*HostUnresolvedVmfsExtent)(nil)).Elem() +} + +type HostUnresolvedVmfsResignatureSpec struct { + DynamicData + + ExtentDevicePath []string `xml:"extentDevicePath"` +} + +func init() { + t["HostUnresolvedVmfsResignatureSpec"] = reflect.TypeOf((*HostUnresolvedVmfsResignatureSpec)(nil)).Elem() +} + +type HostUnresolvedVmfsResolutionResult struct { + DynamicData + + Spec HostUnresolvedVmfsResolutionSpec `xml:"spec"` + Vmfs *HostVmfsVolume `xml:"vmfs,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostUnresolvedVmfsResolutionResult"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionResult)(nil)).Elem() +} + +type HostUnresolvedVmfsResolutionSpec struct { + DynamicData + + ExtentDevicePath []string `xml:"extentDevicePath"` + UuidResolution string `xml:"uuidResolution"` +} + +func init() { + t["HostUnresolvedVmfsResolutionSpec"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionSpec)(nil)).Elem() +} + +type HostUnresolvedVmfsVolume struct { + DynamicData + + Extent []HostUnresolvedVmfsExtent `xml:"extent"` + VmfsLabel string `xml:"vmfsLabel"` + VmfsUuid string `xml:"vmfsUuid"` + TotalBlocks int32 `xml:"totalBlocks"` + ResolveStatus HostUnresolvedVmfsVolumeResolveStatus `xml:"resolveStatus"` +} + +func init() { + t["HostUnresolvedVmfsVolume"] = reflect.TypeOf((*HostUnresolvedVmfsVolume)(nil)).Elem() +} + +type HostUnresolvedVmfsVolumeResolveStatus struct { + DynamicData + + Resolvable bool `xml:"resolvable"` + IncompleteExtents *bool `xml:"incompleteExtents"` + MultipleCopies *bool `xml:"multipleCopies"` +} + +func init() { + t["HostUnresolvedVmfsVolumeResolveStatus"] = reflect.TypeOf((*HostUnresolvedVmfsVolumeResolveStatus)(nil)).Elem() +} + +type HostUpgradeFailedEvent struct { + HostEvent +} + +func init() { + t["HostUpgradeFailedEvent"] = reflect.TypeOf((*HostUpgradeFailedEvent)(nil)).Elem() +} + +type HostUserWorldSwapNotEnabledEvent struct { + HostEvent +} + +func init() { + t["HostUserWorldSwapNotEnabledEvent"] = reflect.TypeOf((*HostUserWorldSwapNotEnabledEvent)(nil)).Elem() +} + +type HostVFlashManagerVFlashCacheConfigInfo struct { + DynamicData + + VFlashModuleConfigOption []HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption `xml:"vFlashModuleConfigOption,omitempty"` + DefaultVFlashModule string `xml:"defaultVFlashModule,omitempty"` + SwapCacheReservationInGB int64 `xml:"swapCacheReservationInGB,omitempty"` +} + +func init() { + t["HostVFlashManagerVFlashCacheConfigInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigInfo)(nil)).Elem() +} + +type HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption struct { + DynamicData + + VFlashModule string `xml:"vFlashModule"` + VFlashModuleVersion string `xml:"vFlashModuleVersion"` + MinSupportedModuleVersion string `xml:"minSupportedModuleVersion"` + CacheConsistencyType ChoiceOption `xml:"cacheConsistencyType"` + CacheMode ChoiceOption `xml:"cacheMode"` + BlockSizeInKBOption LongOption `xml:"blockSizeInKBOption"` + ReservationInMBOption LongOption `xml:"reservationInMBOption"` + MaxDiskSizeInKB int64 `xml:"maxDiskSizeInKB"` +} + +func init() { + t["HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption)(nil)).Elem() +} + +type HostVFlashManagerVFlashCacheConfigSpec struct { + DynamicData + + DefaultVFlashModule string `xml:"defaultVFlashModule"` + SwapCacheReservationInGB int64 `xml:"swapCacheReservationInGB"` +} + +func init() { + t["HostVFlashManagerVFlashCacheConfigSpec"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigSpec)(nil)).Elem() +} + +type HostVFlashManagerVFlashConfigInfo struct { + DynamicData + + VFlashResourceConfigInfo *HostVFlashManagerVFlashResourceConfigInfo `xml:"vFlashResourceConfigInfo,omitempty"` + VFlashCacheConfigInfo *HostVFlashManagerVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty"` +} + +func init() { + t["HostVFlashManagerVFlashConfigInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashConfigInfo)(nil)).Elem() +} + +type HostVFlashManagerVFlashResourceConfigInfo struct { + DynamicData + + Vffs *HostVffsVolume `xml:"vffs,omitempty"` + Capacity int64 `xml:"capacity"` +} + +func init() { + t["HostVFlashManagerVFlashResourceConfigInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceConfigInfo)(nil)).Elem() +} + +type HostVFlashManagerVFlashResourceConfigSpec struct { + DynamicData + + VffsUuid string `xml:"vffsUuid"` +} + +func init() { + t["HostVFlashManagerVFlashResourceConfigSpec"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceConfigSpec)(nil)).Elem() +} + +type HostVFlashManagerVFlashResourceRunTimeInfo struct { + DynamicData + + Usage int64 `xml:"usage"` + Capacity int64 `xml:"capacity"` + Accessible bool `xml:"accessible"` + CapacityForVmCache int64 `xml:"capacityForVmCache"` + FreeForVmCache int64 `xml:"freeForVmCache"` +} + +func init() { + t["HostVFlashManagerVFlashResourceRunTimeInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceRunTimeInfo)(nil)).Elem() +} + +type HostVFlashResourceConfigurationResult struct { + DynamicData + + DevicePath []string `xml:"devicePath,omitempty"` + Vffs *HostVffsVolume `xml:"vffs,omitempty"` + DiskConfigurationResult []HostDiskConfigurationResult `xml:"diskConfigurationResult,omitempty"` +} + +func init() { + t["HostVFlashResourceConfigurationResult"] = reflect.TypeOf((*HostVFlashResourceConfigurationResult)(nil)).Elem() +} + +type HostVMotionCompatibility struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Compatibility []string `xml:"compatibility,omitempty"` +} + +func init() { + t["HostVMotionCompatibility"] = reflect.TypeOf((*HostVMotionCompatibility)(nil)).Elem() +} + +type HostVMotionConfig struct { + DynamicData + + VmotionNicKey string `xml:"vmotionNicKey,omitempty"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["HostVMotionConfig"] = reflect.TypeOf((*HostVMotionConfig)(nil)).Elem() +} + +type HostVMotionInfo struct { + DynamicData + + NetConfig *HostVMotionNetConfig `xml:"netConfig,omitempty"` + IpConfig *HostIpConfig `xml:"ipConfig,omitempty"` +} + +func init() { + t["HostVMotionInfo"] = reflect.TypeOf((*HostVMotionInfo)(nil)).Elem() +} + +type HostVMotionNetConfig struct { + DynamicData + + CandidateVnic []HostVirtualNic `xml:"candidateVnic,omitempty"` + SelectedVnic string `xml:"selectedVnic,omitempty"` +} + +func init() { + t["HostVMotionNetConfig"] = reflect.TypeOf((*HostVMotionNetConfig)(nil)).Elem() +} + +type HostVStorageObjectCreateDiskFromSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` + Name string `xml:"name"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` + Path string `xml:"path,omitempty"` +} + +func init() { + t["HostVStorageObjectCreateDiskFromSnapshotRequestType"] = reflect.TypeOf((*HostVStorageObjectCreateDiskFromSnapshotRequestType)(nil)).Elem() +} + +type HostVStorageObjectCreateDiskFromSnapshot_Task HostVStorageObjectCreateDiskFromSnapshotRequestType + +func init() { + t["HostVStorageObjectCreateDiskFromSnapshot_Task"] = reflect.TypeOf((*HostVStorageObjectCreateDiskFromSnapshot_Task)(nil)).Elem() +} + +type HostVStorageObjectCreateDiskFromSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostVStorageObjectCreateSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Description string `xml:"description"` +} + +func init() { + t["HostVStorageObjectCreateSnapshotRequestType"] = reflect.TypeOf((*HostVStorageObjectCreateSnapshotRequestType)(nil)).Elem() +} + +type HostVStorageObjectCreateSnapshot_Task HostVStorageObjectCreateSnapshotRequestType + +func init() { + t["HostVStorageObjectCreateSnapshot_Task"] = reflect.TypeOf((*HostVStorageObjectCreateSnapshot_Task)(nil)).Elem() +} + +type HostVStorageObjectCreateSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostVStorageObjectDeleteSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` +} + +func init() { + t["HostVStorageObjectDeleteSnapshotRequestType"] = reflect.TypeOf((*HostVStorageObjectDeleteSnapshotRequestType)(nil)).Elem() +} + +type HostVStorageObjectDeleteSnapshot_Task HostVStorageObjectDeleteSnapshotRequestType + +func init() { + t["HostVStorageObjectDeleteSnapshot_Task"] = reflect.TypeOf((*HostVStorageObjectDeleteSnapshot_Task)(nil)).Elem() +} + +type HostVStorageObjectDeleteSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostVStorageObjectRetrieveSnapshotInfo HostVStorageObjectRetrieveSnapshotInfoRequestType + +func init() { + t["HostVStorageObjectRetrieveSnapshotInfo"] = reflect.TypeOf((*HostVStorageObjectRetrieveSnapshotInfo)(nil)).Elem() +} + +type HostVStorageObjectRetrieveSnapshotInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostVStorageObjectRetrieveSnapshotInfoRequestType"] = reflect.TypeOf((*HostVStorageObjectRetrieveSnapshotInfoRequestType)(nil)).Elem() +} + +type HostVStorageObjectRetrieveSnapshotInfoResponse struct { + Returnval VStorageObjectSnapshotInfo `xml:"returnval"` +} + +type HostVStorageObjectRevertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` +} + +func init() { + t["HostVStorageObjectRevertRequestType"] = reflect.TypeOf((*HostVStorageObjectRevertRequestType)(nil)).Elem() +} + +type HostVStorageObjectRevert_Task HostVStorageObjectRevertRequestType + +func init() { + t["HostVStorageObjectRevert_Task"] = reflect.TypeOf((*HostVStorageObjectRevert_Task)(nil)).Elem() +} + +type HostVStorageObjectRevert_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostVfatVolume struct { + HostFileSystemVolume +} + +func init() { + t["HostVfatVolume"] = reflect.TypeOf((*HostVfatVolume)(nil)).Elem() +} + +type HostVffsSpec struct { + DynamicData + + DevicePath string `xml:"devicePath"` + Partition *HostDiskPartitionSpec `xml:"partition,omitempty"` + MajorVersion int32 `xml:"majorVersion"` + VolumeName string `xml:"volumeName"` +} + +func init() { + t["HostVffsSpec"] = reflect.TypeOf((*HostVffsSpec)(nil)).Elem() +} + +type HostVffsVolume struct { + HostFileSystemVolume + + MajorVersion int32 `xml:"majorVersion"` + Version string `xml:"version"` + Uuid string `xml:"uuid"` + Extent []HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["HostVffsVolume"] = reflect.TypeOf((*HostVffsVolume)(nil)).Elem() +} + +type HostVirtualNic struct { + DynamicData + + Device string `xml:"device"` + Key string `xml:"key"` + Portgroup string `xml:"portgroup"` + Spec HostVirtualNicSpec `xml:"spec"` + Port string `xml:"port,omitempty"` +} + +func init() { + t["HostVirtualNic"] = reflect.TypeOf((*HostVirtualNic)(nil)).Elem() +} + +type HostVirtualNicConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Device string `xml:"device,omitempty"` + Portgroup string `xml:"portgroup"` + Spec *HostVirtualNicSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostVirtualNicConfig"] = reflect.TypeOf((*HostVirtualNicConfig)(nil)).Elem() +} + +type HostVirtualNicConnection struct { + DynamicData + + Portgroup string `xml:"portgroup,omitempty"` + DvPort *DistributedVirtualSwitchPortConnection `xml:"dvPort,omitempty"` + OpNetwork *HostVirtualNicOpaqueNetworkSpec `xml:"opNetwork,omitempty"` +} + +func init() { + t["HostVirtualNicConnection"] = reflect.TypeOf((*HostVirtualNicConnection)(nil)).Elem() +} + +type HostVirtualNicIpRouteSpec struct { + DynamicData + + IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` +} + +func init() { + t["HostVirtualNicIpRouteSpec"] = reflect.TypeOf((*HostVirtualNicIpRouteSpec)(nil)).Elem() +} + +type HostVirtualNicManagerInfo struct { + DynamicData + + NetConfig []VirtualNicManagerNetConfig `xml:"netConfig,omitempty"` +} + +func init() { + t["HostVirtualNicManagerInfo"] = reflect.TypeOf((*HostVirtualNicManagerInfo)(nil)).Elem() +} + +type HostVirtualNicManagerNicTypeSelection struct { + DynamicData + + Vnic HostVirtualNicConnection `xml:"vnic"` + NicType []string `xml:"nicType,omitempty"` +} + +func init() { + t["HostVirtualNicManagerNicTypeSelection"] = reflect.TypeOf((*HostVirtualNicManagerNicTypeSelection)(nil)).Elem() +} + +type HostVirtualNicOpaqueNetworkSpec struct { + DynamicData + + OpaqueNetworkId string `xml:"opaqueNetworkId"` + OpaqueNetworkType string `xml:"opaqueNetworkType"` +} + +func init() { + t["HostVirtualNicOpaqueNetworkSpec"] = reflect.TypeOf((*HostVirtualNicOpaqueNetworkSpec)(nil)).Elem() +} + +type HostVirtualNicSpec struct { + DynamicData + + Ip *HostIpConfig `xml:"ip,omitempty"` + Mac string `xml:"mac,omitempty"` + DistributedVirtualPort *DistributedVirtualSwitchPortConnection `xml:"distributedVirtualPort,omitempty"` + Portgroup string `xml:"portgroup,omitempty"` + Mtu int32 `xml:"mtu,omitempty"` + TsoEnabled *bool `xml:"tsoEnabled"` + NetStackInstanceKey string `xml:"netStackInstanceKey,omitempty"` + OpaqueNetwork *HostVirtualNicOpaqueNetworkSpec `xml:"opaqueNetwork,omitempty"` + ExternalId string `xml:"externalId,omitempty"` + PinnedPnic string `xml:"pinnedPnic,omitempty"` + IpRouteSpec *HostVirtualNicIpRouteSpec `xml:"ipRouteSpec,omitempty"` +} + +func init() { + t["HostVirtualNicSpec"] = reflect.TypeOf((*HostVirtualNicSpec)(nil)).Elem() +} + +type HostVirtualSwitch struct { + DynamicData + + Name string `xml:"name"` + Key string `xml:"key"` + NumPorts int32 `xml:"numPorts"` + NumPortsAvailable int32 `xml:"numPortsAvailable"` + Mtu int32 `xml:"mtu,omitempty"` + Portgroup []string `xml:"portgroup,omitempty"` + Pnic []string `xml:"pnic,omitempty"` + Spec HostVirtualSwitchSpec `xml:"spec"` +} + +func init() { + t["HostVirtualSwitch"] = reflect.TypeOf((*HostVirtualSwitch)(nil)).Elem() +} + +type HostVirtualSwitchAutoBridge struct { + HostVirtualSwitchBridge + + ExcludedNicDevice []string `xml:"excludedNicDevice,omitempty"` +} + +func init() { + t["HostVirtualSwitchAutoBridge"] = reflect.TypeOf((*HostVirtualSwitchAutoBridge)(nil)).Elem() +} + +type HostVirtualSwitchBeaconConfig struct { + DynamicData + + Interval int32 `xml:"interval"` +} + +func init() { + t["HostVirtualSwitchBeaconConfig"] = reflect.TypeOf((*HostVirtualSwitchBeaconConfig)(nil)).Elem() +} + +type HostVirtualSwitchBondBridge struct { + HostVirtualSwitchBridge + + NicDevice []string `xml:"nicDevice"` + Beacon *HostVirtualSwitchBeaconConfig `xml:"beacon,omitempty"` + LinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:"linkDiscoveryProtocolConfig,omitempty"` +} + +func init() { + t["HostVirtualSwitchBondBridge"] = reflect.TypeOf((*HostVirtualSwitchBondBridge)(nil)).Elem() +} + +type HostVirtualSwitchBridge struct { + DynamicData +} + +func init() { + t["HostVirtualSwitchBridge"] = reflect.TypeOf((*HostVirtualSwitchBridge)(nil)).Elem() +} + +type HostVirtualSwitchConfig struct { + DynamicData + + ChangeOperation string `xml:"changeOperation,omitempty"` + Name string `xml:"name"` + Spec *HostVirtualSwitchSpec `xml:"spec,omitempty"` +} + +func init() { + t["HostVirtualSwitchConfig"] = reflect.TypeOf((*HostVirtualSwitchConfig)(nil)).Elem() +} + +type HostVirtualSwitchSimpleBridge struct { + HostVirtualSwitchBridge + + NicDevice string `xml:"nicDevice"` +} + +func init() { + t["HostVirtualSwitchSimpleBridge"] = reflect.TypeOf((*HostVirtualSwitchSimpleBridge)(nil)).Elem() +} + +type HostVirtualSwitchSpec struct { + DynamicData + + NumPorts int32 `xml:"numPorts"` + Bridge BaseHostVirtualSwitchBridge `xml:"bridge,omitempty,typeattr"` + Policy *HostNetworkPolicy `xml:"policy,omitempty"` + Mtu int32 `xml:"mtu,omitempty"` +} + +func init() { + t["HostVirtualSwitchSpec"] = reflect.TypeOf((*HostVirtualSwitchSpec)(nil)).Elem() +} + +type HostVmciAccessManagerAccessSpec struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + Services []string `xml:"services,omitempty"` + Mode string `xml:"mode"` +} + +func init() { + t["HostVmciAccessManagerAccessSpec"] = reflect.TypeOf((*HostVmciAccessManagerAccessSpec)(nil)).Elem() +} + +type HostVmfsRescanResult struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HostVmfsRescanResult"] = reflect.TypeOf((*HostVmfsRescanResult)(nil)).Elem() +} + +type HostVmfsSpec struct { + DynamicData + + Extent HostScsiDiskPartition `xml:"extent"` + BlockSizeMb int32 `xml:"blockSizeMb,omitempty"` + MajorVersion int32 `xml:"majorVersion"` + VolumeName string `xml:"volumeName"` + BlockSize int32 `xml:"blockSize,omitempty"` + UnmapGranularity int32 `xml:"unmapGranularity,omitempty"` + UnmapPriority string `xml:"unmapPriority,omitempty"` + UnmapBandwidthSpec *VmfsUnmapBandwidthSpec `xml:"unmapBandwidthSpec,omitempty"` +} + +func init() { + t["HostVmfsSpec"] = reflect.TypeOf((*HostVmfsSpec)(nil)).Elem() +} + +type HostVmfsVolume struct { + HostFileSystemVolume + + BlockSizeMb int32 `xml:"blockSizeMb"` + BlockSize int32 `xml:"blockSize,omitempty"` + UnmapGranularity int32 `xml:"unmapGranularity,omitempty"` + UnmapPriority string `xml:"unmapPriority,omitempty"` + UnmapBandwidthSpec *VmfsUnmapBandwidthSpec `xml:"unmapBandwidthSpec,omitempty"` + MaxBlocks int32 `xml:"maxBlocks"` + MajorVersion int32 `xml:"majorVersion"` + Version string `xml:"version"` + Uuid string `xml:"uuid"` + Extent []HostScsiDiskPartition `xml:"extent"` + VmfsUpgradable bool `xml:"vmfsUpgradable"` + ForceMountedInfo *HostForceMountedInfo `xml:"forceMountedInfo,omitempty"` + Ssd *bool `xml:"ssd"` + Local *bool `xml:"local"` + ScsiDiskType string `xml:"scsiDiskType,omitempty"` +} + +func init() { + t["HostVmfsVolume"] = reflect.TypeOf((*HostVmfsVolume)(nil)).Elem() +} + +type HostVnicConnectedToCustomizedDVPortEvent struct { + HostEvent + + Vnic VnicPortArgument `xml:"vnic"` + PrevPortKey string `xml:"prevPortKey,omitempty"` +} + +func init() { + t["HostVnicConnectedToCustomizedDVPortEvent"] = reflect.TypeOf((*HostVnicConnectedToCustomizedDVPortEvent)(nil)).Elem() +} + +type HostVsanInternalSystemCmmdsQuery struct { + DynamicData + + Type string `xml:"type,omitempty"` + Uuid string `xml:"uuid,omitempty"` + Owner string `xml:"owner,omitempty"` +} + +func init() { + t["HostVsanInternalSystemCmmdsQuery"] = reflect.TypeOf((*HostVsanInternalSystemCmmdsQuery)(nil)).Elem() +} + +type HostVsanInternalSystemDeleteVsanObjectsResult struct { + DynamicData + + Uuid string `xml:"uuid"` + Success bool `xml:"success"` + FailureReason []LocalizableMessage `xml:"failureReason,omitempty"` +} + +func init() { + t["HostVsanInternalSystemDeleteVsanObjectsResult"] = reflect.TypeOf((*HostVsanInternalSystemDeleteVsanObjectsResult)(nil)).Elem() +} + +type HostVsanInternalSystemVsanObjectOperationResult struct { + DynamicData + + Uuid string `xml:"uuid"` + FailureReason []LocalizableMessage `xml:"failureReason,omitempty"` +} + +func init() { + t["HostVsanInternalSystemVsanObjectOperationResult"] = reflect.TypeOf((*HostVsanInternalSystemVsanObjectOperationResult)(nil)).Elem() +} + +type HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult struct { + DynamicData + + DiskUuid string `xml:"diskUuid"` + Success bool `xml:"success"` + FailureReason string `xml:"failureReason,omitempty"` +} + +func init() { + t["HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult"] = reflect.TypeOf((*HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult)(nil)).Elem() +} + +type HostVvolVolume struct { + HostFileSystemVolume + + ScId string `xml:"scId"` + HostPE []VVolHostPE `xml:"hostPE,omitempty"` + VasaProviderInfo []VimVasaProviderInfo `xml:"vasaProviderInfo,omitempty"` + StorageArray []VASAStorageArray `xml:"storageArray,omitempty"` +} + +func init() { + t["HostVvolVolume"] = reflect.TypeOf((*HostVvolVolume)(nil)).Elem() +} + +type HostVvolVolumeSpecification struct { + DynamicData + + MaxSizeInMB int64 `xml:"maxSizeInMB"` + VolumeName string `xml:"volumeName"` + VasaProviderInfo []VimVasaProviderInfo `xml:"vasaProviderInfo,omitempty"` + StorageArray []VASAStorageArray `xml:"storageArray,omitempty"` + Uuid string `xml:"uuid"` +} + +func init() { + t["HostVvolVolumeSpecification"] = reflect.TypeOf((*HostVvolVolumeSpecification)(nil)).Elem() +} + +type HostWwnChangedEvent struct { + HostEvent + + OldNodeWwns []int64 `xml:"oldNodeWwns,omitempty"` + OldPortWwns []int64 `xml:"oldPortWwns,omitempty"` + NewNodeWwns []int64 `xml:"newNodeWwns,omitempty"` + NewPortWwns []int64 `xml:"newPortWwns,omitempty"` +} + +func init() { + t["HostWwnChangedEvent"] = reflect.TypeOf((*HostWwnChangedEvent)(nil)).Elem() +} + +type HostWwnConflictEvent struct { + HostEvent + + ConflictedVms []VmEventArgument `xml:"conflictedVms,omitempty"` + ConflictedHosts []HostEventArgument `xml:"conflictedHosts,omitempty"` + Wwn int64 `xml:"wwn"` +} + +func init() { + t["HostWwnConflictEvent"] = reflect.TypeOf((*HostWwnConflictEvent)(nil)).Elem() +} + +type HotSnapshotMoveNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["HotSnapshotMoveNotSupported"] = reflect.TypeOf((*HotSnapshotMoveNotSupported)(nil)).Elem() +} + +type HotSnapshotMoveNotSupportedFault HotSnapshotMoveNotSupported + +func init() { + t["HotSnapshotMoveNotSupportedFault"] = reflect.TypeOf((*HotSnapshotMoveNotSupportedFault)(nil)).Elem() +} + +type HourlyTaskScheduler struct { + RecurrentTaskScheduler + + Minute int32 `xml:"minute"` +} + +func init() { + t["HourlyTaskScheduler"] = reflect.TypeOf((*HourlyTaskScheduler)(nil)).Elem() +} + +type HttpFault struct { + VimFault + + StatusCode int32 `xml:"statusCode"` + StatusMessage string `xml:"statusMessage"` +} + +func init() { + t["HttpFault"] = reflect.TypeOf((*HttpFault)(nil)).Elem() +} + +type HttpFaultFault HttpFault + +func init() { + t["HttpFaultFault"] = reflect.TypeOf((*HttpFaultFault)(nil)).Elem() +} + +type HttpNfcLeaseAbort HttpNfcLeaseAbortRequestType + +func init() { + t["HttpNfcLeaseAbort"] = reflect.TypeOf((*HttpNfcLeaseAbort)(nil)).Elem() +} + +type HttpNfcLeaseAbortRequestType struct { + This ManagedObjectReference `xml:"_this"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["HttpNfcLeaseAbortRequestType"] = reflect.TypeOf((*HttpNfcLeaseAbortRequestType)(nil)).Elem() +} + +type HttpNfcLeaseAbortResponse struct { +} + +type HttpNfcLeaseCapabilities struct { + DynamicData + + PullModeSupported bool `xml:"pullModeSupported"` + CorsSupported bool `xml:"corsSupported"` +} + +func init() { + t["HttpNfcLeaseCapabilities"] = reflect.TypeOf((*HttpNfcLeaseCapabilities)(nil)).Elem() +} + +type HttpNfcLeaseComplete HttpNfcLeaseCompleteRequestType + +func init() { + t["HttpNfcLeaseComplete"] = reflect.TypeOf((*HttpNfcLeaseComplete)(nil)).Elem() +} + +type HttpNfcLeaseCompleteRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HttpNfcLeaseCompleteRequestType"] = reflect.TypeOf((*HttpNfcLeaseCompleteRequestType)(nil)).Elem() +} + +type HttpNfcLeaseCompleteResponse struct { +} + +type HttpNfcLeaseDatastoreLeaseInfo struct { + DynamicData + + DatastoreKey string `xml:"datastoreKey"` + Hosts []HttpNfcLeaseHostInfo `xml:"hosts"` +} + +func init() { + t["HttpNfcLeaseDatastoreLeaseInfo"] = reflect.TypeOf((*HttpNfcLeaseDatastoreLeaseInfo)(nil)).Elem() +} + +type HttpNfcLeaseDeviceUrl struct { + DynamicData + + Key string `xml:"key"` + ImportKey string `xml:"importKey"` + Url string `xml:"url"` + SslThumbprint string `xml:"sslThumbprint"` + Disk *bool `xml:"disk"` + TargetId string `xml:"targetId,omitempty"` + DatastoreKey string `xml:"datastoreKey,omitempty"` + FileSize int64 `xml:"fileSize,omitempty"` +} + +func init() { + t["HttpNfcLeaseDeviceUrl"] = reflect.TypeOf((*HttpNfcLeaseDeviceUrl)(nil)).Elem() +} + +type HttpNfcLeaseGetManifest HttpNfcLeaseGetManifestRequestType + +func init() { + t["HttpNfcLeaseGetManifest"] = reflect.TypeOf((*HttpNfcLeaseGetManifest)(nil)).Elem() +} + +type HttpNfcLeaseGetManifestRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["HttpNfcLeaseGetManifestRequestType"] = reflect.TypeOf((*HttpNfcLeaseGetManifestRequestType)(nil)).Elem() +} + +type HttpNfcLeaseGetManifestResponse struct { + Returnval []HttpNfcLeaseManifestEntry `xml:"returnval,omitempty"` +} + +type HttpNfcLeaseHostInfo struct { + DynamicData + + Url string `xml:"url"` + SslThumbprint string `xml:"sslThumbprint"` +} + +func init() { + t["HttpNfcLeaseHostInfo"] = reflect.TypeOf((*HttpNfcLeaseHostInfo)(nil)).Elem() +} + +type HttpNfcLeaseInfo struct { + DynamicData + + Lease ManagedObjectReference `xml:"lease"` + Entity ManagedObjectReference `xml:"entity"` + DeviceUrl []HttpNfcLeaseDeviceUrl `xml:"deviceUrl,omitempty"` + TotalDiskCapacityInKB int64 `xml:"totalDiskCapacityInKB"` + LeaseTimeout int32 `xml:"leaseTimeout"` + HostMap []HttpNfcLeaseDatastoreLeaseInfo `xml:"hostMap,omitempty"` +} + +func init() { + t["HttpNfcLeaseInfo"] = reflect.TypeOf((*HttpNfcLeaseInfo)(nil)).Elem() +} + +type HttpNfcLeaseManifestEntry struct { + DynamicData + + Key string `xml:"key"` + Sha1 string `xml:"sha1"` + Checksum string `xml:"checksum,omitempty"` + ChecksumType string `xml:"checksumType,omitempty"` + Size int64 `xml:"size"` + Disk bool `xml:"disk"` + Capacity int64 `xml:"capacity,omitempty"` + PopulatedSize int64 `xml:"populatedSize,omitempty"` +} + +func init() { + t["HttpNfcLeaseManifestEntry"] = reflect.TypeOf((*HttpNfcLeaseManifestEntry)(nil)).Elem() +} + +type HttpNfcLeaseProgress HttpNfcLeaseProgressRequestType + +func init() { + t["HttpNfcLeaseProgress"] = reflect.TypeOf((*HttpNfcLeaseProgress)(nil)).Elem() +} + +type HttpNfcLeaseProgressRequestType struct { + This ManagedObjectReference `xml:"_this"` + Percent int32 `xml:"percent"` +} + +func init() { + t["HttpNfcLeaseProgressRequestType"] = reflect.TypeOf((*HttpNfcLeaseProgressRequestType)(nil)).Elem() +} + +type HttpNfcLeaseProgressResponse struct { +} + +type HttpNfcLeasePullFromUrlsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Files []HttpNfcLeaseSourceFile `xml:"files,omitempty"` +} + +func init() { + t["HttpNfcLeasePullFromUrlsRequestType"] = reflect.TypeOf((*HttpNfcLeasePullFromUrlsRequestType)(nil)).Elem() +} + +type HttpNfcLeasePullFromUrls_Task HttpNfcLeasePullFromUrlsRequestType + +func init() { + t["HttpNfcLeasePullFromUrls_Task"] = reflect.TypeOf((*HttpNfcLeasePullFromUrls_Task)(nil)).Elem() +} + +type HttpNfcLeasePullFromUrls_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HttpNfcLeaseSetManifestChecksumType HttpNfcLeaseSetManifestChecksumTypeRequestType + +func init() { + t["HttpNfcLeaseSetManifestChecksumType"] = reflect.TypeOf((*HttpNfcLeaseSetManifestChecksumType)(nil)).Elem() +} + +type HttpNfcLeaseSetManifestChecksumTypeRequestType struct { + This ManagedObjectReference `xml:"_this"` + DeviceUrlsToChecksumTypes []KeyValue `xml:"deviceUrlsToChecksumTypes,omitempty"` +} + +func init() { + t["HttpNfcLeaseSetManifestChecksumTypeRequestType"] = reflect.TypeOf((*HttpNfcLeaseSetManifestChecksumTypeRequestType)(nil)).Elem() +} + +type HttpNfcLeaseSetManifestChecksumTypeResponse struct { +} + +type HttpNfcLeaseSourceFile struct { + DynamicData + + TargetDeviceId string `xml:"targetDeviceId"` + Url string `xml:"url"` + MemberName string `xml:"memberName,omitempty"` + Create bool `xml:"create"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` + HttpHeaders []KeyValue `xml:"httpHeaders,omitempty"` + Size int64 `xml:"size,omitempty"` +} + +func init() { + t["HttpNfcLeaseSourceFile"] = reflect.TypeOf((*HttpNfcLeaseSourceFile)(nil)).Elem() +} + +type ID struct { + DynamicData + + Id string `xml:"id"` +} + +func init() { + t["ID"] = reflect.TypeOf((*ID)(nil)).Elem() +} + +type IDEDiskNotSupported struct { + DiskNotSupported +} + +func init() { + t["IDEDiskNotSupported"] = reflect.TypeOf((*IDEDiskNotSupported)(nil)).Elem() +} + +type IDEDiskNotSupportedFault IDEDiskNotSupported + +func init() { + t["IDEDiskNotSupportedFault"] = reflect.TypeOf((*IDEDiskNotSupportedFault)(nil)).Elem() +} + +type IORMNotSupportedHostOnDatastore struct { + VimFault + + Datastore ManagedObjectReference `xml:"datastore"` + DatastoreName string `xml:"datastoreName"` + Host []ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["IORMNotSupportedHostOnDatastore"] = reflect.TypeOf((*IORMNotSupportedHostOnDatastore)(nil)).Elem() +} + +type IORMNotSupportedHostOnDatastoreFault IORMNotSupportedHostOnDatastore + +func init() { + t["IORMNotSupportedHostOnDatastoreFault"] = reflect.TypeOf((*IORMNotSupportedHostOnDatastoreFault)(nil)).Elem() +} + +type IScsiBootFailureEvent struct { + HostEvent +} + +func init() { + t["IScsiBootFailureEvent"] = reflect.TypeOf((*IScsiBootFailureEvent)(nil)).Elem() +} + +type ImpersonateUser ImpersonateUserRequestType + +func init() { + t["ImpersonateUser"] = reflect.TypeOf((*ImpersonateUser)(nil)).Elem() +} + +type ImpersonateUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["ImpersonateUserRequestType"] = reflect.TypeOf((*ImpersonateUserRequestType)(nil)).Elem() +} + +type ImpersonateUserResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type ImportCertificateForCAMRequestType struct { + This ManagedObjectReference `xml:"_this"` + CertPath string `xml:"certPath"` + CamServer string `xml:"camServer"` +} + +func init() { + t["ImportCertificateForCAMRequestType"] = reflect.TypeOf((*ImportCertificateForCAMRequestType)(nil)).Elem() +} + +type ImportCertificateForCAM_Task ImportCertificateForCAMRequestType + +func init() { + t["ImportCertificateForCAM_Task"] = reflect.TypeOf((*ImportCertificateForCAM_Task)(nil)).Elem() +} + +type ImportCertificateForCAM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ImportHostAddFailure struct { + DvsFault + + HostIp []string `xml:"hostIp"` +} + +func init() { + t["ImportHostAddFailure"] = reflect.TypeOf((*ImportHostAddFailure)(nil)).Elem() +} + +type ImportHostAddFailureFault ImportHostAddFailure + +func init() { + t["ImportHostAddFailureFault"] = reflect.TypeOf((*ImportHostAddFailureFault)(nil)).Elem() +} + +type ImportOperationBulkFault struct { + DvsFault + + ImportFaults []ImportOperationBulkFaultFaultOnImport `xml:"importFaults"` +} + +func init() { + t["ImportOperationBulkFault"] = reflect.TypeOf((*ImportOperationBulkFault)(nil)).Elem() +} + +type ImportOperationBulkFaultFault ImportOperationBulkFault + +func init() { + t["ImportOperationBulkFaultFault"] = reflect.TypeOf((*ImportOperationBulkFaultFault)(nil)).Elem() +} + +type ImportOperationBulkFaultFaultOnImport struct { + DynamicData + + EntityType string `xml:"entityType,omitempty"` + Key string `xml:"key,omitempty"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["ImportOperationBulkFaultFaultOnImport"] = reflect.TypeOf((*ImportOperationBulkFaultFaultOnImport)(nil)).Elem() +} + +type ImportSpec struct { + DynamicData + + EntityConfig *VAppEntityConfigInfo `xml:"entityConfig,omitempty"` + InstantiationOst *OvfConsumerOstNode `xml:"instantiationOst,omitempty"` +} + +func init() { + t["ImportSpec"] = reflect.TypeOf((*ImportSpec)(nil)).Elem() +} + +type ImportUnmanagedSnapshot ImportUnmanagedSnapshotRequestType + +func init() { + t["ImportUnmanagedSnapshot"] = reflect.TypeOf((*ImportUnmanagedSnapshot)(nil)).Elem() +} + +type ImportUnmanagedSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vdisk string `xml:"vdisk"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + VvolId string `xml:"vvolId"` +} + +func init() { + t["ImportUnmanagedSnapshotRequestType"] = reflect.TypeOf((*ImportUnmanagedSnapshotRequestType)(nil)).Elem() +} + +type ImportUnmanagedSnapshotResponse struct { +} + +type ImportVApp ImportVAppRequestType + +func init() { + t["ImportVApp"] = reflect.TypeOf((*ImportVApp)(nil)).Elem() +} + +type ImportVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseImportSpec `xml:"spec,typeattr"` + Folder *ManagedObjectReference `xml:"folder,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["ImportVAppRequestType"] = reflect.TypeOf((*ImportVAppRequestType)(nil)).Elem() +} + +type ImportVAppResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InUseFeatureManipulationDisallowed struct { + NotEnoughLicenses +} + +func init() { + t["InUseFeatureManipulationDisallowed"] = reflect.TypeOf((*InUseFeatureManipulationDisallowed)(nil)).Elem() +} + +type InUseFeatureManipulationDisallowedFault InUseFeatureManipulationDisallowed + +func init() { + t["InUseFeatureManipulationDisallowedFault"] = reflect.TypeOf((*InUseFeatureManipulationDisallowedFault)(nil)).Elem() +} + +type InaccessibleDatastore struct { + InvalidDatastore + + Detail string `xml:"detail,omitempty"` +} + +func init() { + t["InaccessibleDatastore"] = reflect.TypeOf((*InaccessibleDatastore)(nil)).Elem() +} + +type InaccessibleDatastoreFault BaseInaccessibleDatastore + +func init() { + t["InaccessibleDatastoreFault"] = reflect.TypeOf((*InaccessibleDatastoreFault)(nil)).Elem() +} + +type InaccessibleFTMetadataDatastore struct { + InaccessibleDatastore +} + +func init() { + t["InaccessibleFTMetadataDatastore"] = reflect.TypeOf((*InaccessibleFTMetadataDatastore)(nil)).Elem() +} + +type InaccessibleFTMetadataDatastoreFault InaccessibleFTMetadataDatastore + +func init() { + t["InaccessibleFTMetadataDatastoreFault"] = reflect.TypeOf((*InaccessibleFTMetadataDatastoreFault)(nil)).Elem() +} + +type InaccessibleVFlashSource struct { + VimFault + + HostName string `xml:"hostName"` +} + +func init() { + t["InaccessibleVFlashSource"] = reflect.TypeOf((*InaccessibleVFlashSource)(nil)).Elem() +} + +type InaccessibleVFlashSourceFault InaccessibleVFlashSource + +func init() { + t["InaccessibleVFlashSourceFault"] = reflect.TypeOf((*InaccessibleVFlashSourceFault)(nil)).Elem() +} + +type IncompatibleDefaultDevice struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["IncompatibleDefaultDevice"] = reflect.TypeOf((*IncompatibleDefaultDevice)(nil)).Elem() +} + +type IncompatibleDefaultDeviceFault IncompatibleDefaultDevice + +func init() { + t["IncompatibleDefaultDeviceFault"] = reflect.TypeOf((*IncompatibleDefaultDeviceFault)(nil)).Elem() +} + +type IncompatibleHostForFtSecondary struct { + VmFaultToleranceIssue + + Host ManagedObjectReference `xml:"host"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["IncompatibleHostForFtSecondary"] = reflect.TypeOf((*IncompatibleHostForFtSecondary)(nil)).Elem() +} + +type IncompatibleHostForFtSecondaryFault IncompatibleHostForFtSecondary + +func init() { + t["IncompatibleHostForFtSecondaryFault"] = reflect.TypeOf((*IncompatibleHostForFtSecondaryFault)(nil)).Elem() +} + +type IncompatibleHostForVmReplication struct { + ReplicationFault + + VmName string `xml:"vmName"` + HostName string `xml:"hostName"` + Reason string `xml:"reason"` +} + +func init() { + t["IncompatibleHostForVmReplication"] = reflect.TypeOf((*IncompatibleHostForVmReplication)(nil)).Elem() +} + +type IncompatibleHostForVmReplicationFault IncompatibleHostForVmReplication + +func init() { + t["IncompatibleHostForVmReplicationFault"] = reflect.TypeOf((*IncompatibleHostForVmReplicationFault)(nil)).Elem() +} + +type IncompatibleSetting struct { + InvalidArgument + + ConflictingProperty string `xml:"conflictingProperty"` +} + +func init() { + t["IncompatibleSetting"] = reflect.TypeOf((*IncompatibleSetting)(nil)).Elem() +} + +type IncompatibleSettingFault IncompatibleSetting + +func init() { + t["IncompatibleSettingFault"] = reflect.TypeOf((*IncompatibleSettingFault)(nil)).Elem() +} + +type IncorrectFileType struct { + FileFault +} + +func init() { + t["IncorrectFileType"] = reflect.TypeOf((*IncorrectFileType)(nil)).Elem() +} + +type IncorrectFileTypeFault IncorrectFileType + +func init() { + t["IncorrectFileTypeFault"] = reflect.TypeOf((*IncorrectFileTypeFault)(nil)).Elem() +} + +type IncorrectHostInformation struct { + NotEnoughLicenses +} + +func init() { + t["IncorrectHostInformation"] = reflect.TypeOf((*IncorrectHostInformation)(nil)).Elem() +} + +type IncorrectHostInformationEvent struct { + LicenseEvent +} + +func init() { + t["IncorrectHostInformationEvent"] = reflect.TypeOf((*IncorrectHostInformationEvent)(nil)).Elem() +} + +type IncorrectHostInformationFault IncorrectHostInformation + +func init() { + t["IncorrectHostInformationFault"] = reflect.TypeOf((*IncorrectHostInformationFault)(nil)).Elem() +} + +type IndependentDiskVMotionNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["IndependentDiskVMotionNotSupported"] = reflect.TypeOf((*IndependentDiskVMotionNotSupported)(nil)).Elem() +} + +type IndependentDiskVMotionNotSupportedFault IndependentDiskVMotionNotSupported + +func init() { + t["IndependentDiskVMotionNotSupportedFault"] = reflect.TypeOf((*IndependentDiskVMotionNotSupportedFault)(nil)).Elem() +} + +type InflateDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["InflateDiskRequestType"] = reflect.TypeOf((*InflateDiskRequestType)(nil)).Elem() +} + +type InflateDisk_Task InflateDiskRequestType + +func init() { + t["InflateDisk_Task"] = reflect.TypeOf((*InflateDisk_Task)(nil)).Elem() +} + +type InflateDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InflateVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["InflateVirtualDiskRequestType"] = reflect.TypeOf((*InflateVirtualDiskRequestType)(nil)).Elem() +} + +type InflateVirtualDisk_Task InflateVirtualDiskRequestType + +func init() { + t["InflateVirtualDisk_Task"] = reflect.TypeOf((*InflateVirtualDisk_Task)(nil)).Elem() +} + +type InflateVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InfoUpgradeEvent struct { + UpgradeEvent +} + +func init() { + t["InfoUpgradeEvent"] = reflect.TypeOf((*InfoUpgradeEvent)(nil)).Elem() +} + +type InheritablePolicy struct { + DynamicData + + Inherited bool `xml:"inherited"` +} + +func init() { + t["InheritablePolicy"] = reflect.TypeOf((*InheritablePolicy)(nil)).Elem() +} + +type InitializeDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mapping []VsanHostDiskMapping `xml:"mapping"` +} + +func init() { + t["InitializeDisksRequestType"] = reflect.TypeOf((*InitializeDisksRequestType)(nil)).Elem() +} + +type InitializeDisks_Task InitializeDisksRequestType + +func init() { + t["InitializeDisks_Task"] = reflect.TypeOf((*InitializeDisks_Task)(nil)).Elem() +} + +type InitializeDisks_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InitiateFileTransferFromGuest InitiateFileTransferFromGuestRequestType + +func init() { + t["InitiateFileTransferFromGuest"] = reflect.TypeOf((*InitiateFileTransferFromGuest)(nil)).Elem() +} + +type InitiateFileTransferFromGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + GuestFilePath string `xml:"guestFilePath"` +} + +func init() { + t["InitiateFileTransferFromGuestRequestType"] = reflect.TypeOf((*InitiateFileTransferFromGuestRequestType)(nil)).Elem() +} + +type InitiateFileTransferFromGuestResponse struct { + Returnval FileTransferInformation `xml:"returnval"` +} + +type InitiateFileTransferToGuest InitiateFileTransferToGuestRequestType + +func init() { + t["InitiateFileTransferToGuest"] = reflect.TypeOf((*InitiateFileTransferToGuest)(nil)).Elem() +} + +type InitiateFileTransferToGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + GuestFilePath string `xml:"guestFilePath"` + FileAttributes BaseGuestFileAttributes `xml:"fileAttributes,typeattr"` + FileSize int64 `xml:"fileSize"` + Overwrite bool `xml:"overwrite"` +} + +func init() { + t["InitiateFileTransferToGuestRequestType"] = reflect.TypeOf((*InitiateFileTransferToGuestRequestType)(nil)).Elem() +} + +type InitiateFileTransferToGuestResponse struct { + Returnval string `xml:"returnval"` +} + +type InstallHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + Repository HostPatchManagerLocator `xml:"repository"` + UpdateID string `xml:"updateID"` + Force *bool `xml:"force"` +} + +func init() { + t["InstallHostPatchRequestType"] = reflect.TypeOf((*InstallHostPatchRequestType)(nil)).Elem() +} + +type InstallHostPatchV2RequestType struct { + This ManagedObjectReference `xml:"_this"` + MetaUrls []string `xml:"metaUrls,omitempty"` + BundleUrls []string `xml:"bundleUrls,omitempty"` + VibUrls []string `xml:"vibUrls,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["InstallHostPatchV2RequestType"] = reflect.TypeOf((*InstallHostPatchV2RequestType)(nil)).Elem() +} + +type InstallHostPatchV2_Task InstallHostPatchV2RequestType + +func init() { + t["InstallHostPatchV2_Task"] = reflect.TypeOf((*InstallHostPatchV2_Task)(nil)).Elem() +} + +type InstallHostPatchV2_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InstallHostPatch_Task InstallHostPatchRequestType + +func init() { + t["InstallHostPatch_Task"] = reflect.TypeOf((*InstallHostPatch_Task)(nil)).Elem() +} + +type InstallHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InstallIoFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + VibUrl string `xml:"vibUrl"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["InstallIoFilterRequestType"] = reflect.TypeOf((*InstallIoFilterRequestType)(nil)).Elem() +} + +type InstallIoFilter_Task InstallIoFilterRequestType + +func init() { + t["InstallIoFilter_Task"] = reflect.TypeOf((*InstallIoFilter_Task)(nil)).Elem() +} + +type InstallIoFilter_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InstallServerCertificate InstallServerCertificateRequestType + +func init() { + t["InstallServerCertificate"] = reflect.TypeOf((*InstallServerCertificate)(nil)).Elem() +} + +type InstallServerCertificateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cert string `xml:"cert"` +} + +func init() { + t["InstallServerCertificateRequestType"] = reflect.TypeOf((*InstallServerCertificateRequestType)(nil)).Elem() +} + +type InstallServerCertificateResponse struct { +} + +type InstallSmartCardTrustAnchor InstallSmartCardTrustAnchorRequestType + +func init() { + t["InstallSmartCardTrustAnchor"] = reflect.TypeOf((*InstallSmartCardTrustAnchor)(nil)).Elem() +} + +type InstallSmartCardTrustAnchorRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cert string `xml:"cert"` +} + +func init() { + t["InstallSmartCardTrustAnchorRequestType"] = reflect.TypeOf((*InstallSmartCardTrustAnchorRequestType)(nil)).Elem() +} + +type InstallSmartCardTrustAnchorResponse struct { +} + +type InstantCloneRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VirtualMachineInstantCloneSpec `xml:"spec"` +} + +func init() { + t["InstantCloneRequestType"] = reflect.TypeOf((*InstantCloneRequestType)(nil)).Elem() +} + +type InstantClone_Task InstantCloneRequestType + +func init() { + t["InstantClone_Task"] = reflect.TypeOf((*InstantClone_Task)(nil)).Elem() +} + +type InstantClone_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InsufficientAgentVmsDeployed struct { + InsufficientResourcesFault + + HostName string `xml:"hostName"` + RequiredNumAgentVms int32 `xml:"requiredNumAgentVms"` + CurrentNumAgentVms int32 `xml:"currentNumAgentVms"` +} + +func init() { + t["InsufficientAgentVmsDeployed"] = reflect.TypeOf((*InsufficientAgentVmsDeployed)(nil)).Elem() +} + +type InsufficientAgentVmsDeployedFault InsufficientAgentVmsDeployed + +func init() { + t["InsufficientAgentVmsDeployedFault"] = reflect.TypeOf((*InsufficientAgentVmsDeployedFault)(nil)).Elem() +} + +type InsufficientCpuResourcesFault struct { + InsufficientResourcesFault + + Unreserved int64 `xml:"unreserved"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientCpuResourcesFault"] = reflect.TypeOf((*InsufficientCpuResourcesFault)(nil)).Elem() +} + +type InsufficientCpuResourcesFaultFault InsufficientCpuResourcesFault + +func init() { + t["InsufficientCpuResourcesFaultFault"] = reflect.TypeOf((*InsufficientCpuResourcesFaultFault)(nil)).Elem() +} + +type InsufficientDisks struct { + VsanDiskFault +} + +func init() { + t["InsufficientDisks"] = reflect.TypeOf((*InsufficientDisks)(nil)).Elem() +} + +type InsufficientDisksFault InsufficientDisks + +func init() { + t["InsufficientDisksFault"] = reflect.TypeOf((*InsufficientDisksFault)(nil)).Elem() +} + +type InsufficientFailoverResourcesEvent struct { + ClusterEvent +} + +func init() { + t["InsufficientFailoverResourcesEvent"] = reflect.TypeOf((*InsufficientFailoverResourcesEvent)(nil)).Elem() +} + +type InsufficientFailoverResourcesFault struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientFailoverResourcesFault"] = reflect.TypeOf((*InsufficientFailoverResourcesFault)(nil)).Elem() +} + +type InsufficientFailoverResourcesFaultFault InsufficientFailoverResourcesFault + +func init() { + t["InsufficientFailoverResourcesFaultFault"] = reflect.TypeOf((*InsufficientFailoverResourcesFaultFault)(nil)).Elem() +} + +type InsufficientGraphicsResourcesFault struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientGraphicsResourcesFault"] = reflect.TypeOf((*InsufficientGraphicsResourcesFault)(nil)).Elem() +} + +type InsufficientGraphicsResourcesFaultFault InsufficientGraphicsResourcesFault + +func init() { + t["InsufficientGraphicsResourcesFaultFault"] = reflect.TypeOf((*InsufficientGraphicsResourcesFaultFault)(nil)).Elem() +} + +type InsufficientHostCapacityFault struct { + InsufficientResourcesFault + + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["InsufficientHostCapacityFault"] = reflect.TypeOf((*InsufficientHostCapacityFault)(nil)).Elem() +} + +type InsufficientHostCapacityFaultFault BaseInsufficientHostCapacityFault + +func init() { + t["InsufficientHostCapacityFaultFault"] = reflect.TypeOf((*InsufficientHostCapacityFaultFault)(nil)).Elem() +} + +type InsufficientHostCpuCapacityFault struct { + InsufficientHostCapacityFault + + Unreserved int64 `xml:"unreserved"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientHostCpuCapacityFault"] = reflect.TypeOf((*InsufficientHostCpuCapacityFault)(nil)).Elem() +} + +type InsufficientHostCpuCapacityFaultFault InsufficientHostCpuCapacityFault + +func init() { + t["InsufficientHostCpuCapacityFaultFault"] = reflect.TypeOf((*InsufficientHostCpuCapacityFaultFault)(nil)).Elem() +} + +type InsufficientHostMemoryCapacityFault struct { + InsufficientHostCapacityFault + + Unreserved int64 `xml:"unreserved"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientHostMemoryCapacityFault"] = reflect.TypeOf((*InsufficientHostMemoryCapacityFault)(nil)).Elem() +} + +type InsufficientHostMemoryCapacityFaultFault InsufficientHostMemoryCapacityFault + +func init() { + t["InsufficientHostMemoryCapacityFaultFault"] = reflect.TypeOf((*InsufficientHostMemoryCapacityFaultFault)(nil)).Elem() +} + +type InsufficientMemoryResourcesFault struct { + InsufficientResourcesFault + + Unreserved int64 `xml:"unreserved"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientMemoryResourcesFault"] = reflect.TypeOf((*InsufficientMemoryResourcesFault)(nil)).Elem() +} + +type InsufficientMemoryResourcesFaultFault InsufficientMemoryResourcesFault + +func init() { + t["InsufficientMemoryResourcesFaultFault"] = reflect.TypeOf((*InsufficientMemoryResourcesFaultFault)(nil)).Elem() +} + +type InsufficientNetworkCapacity struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientNetworkCapacity"] = reflect.TypeOf((*InsufficientNetworkCapacity)(nil)).Elem() +} + +type InsufficientNetworkCapacityFault InsufficientNetworkCapacity + +func init() { + t["InsufficientNetworkCapacityFault"] = reflect.TypeOf((*InsufficientNetworkCapacityFault)(nil)).Elem() +} + +type InsufficientNetworkResourcePoolCapacity struct { + InsufficientResourcesFault + + DvsName string `xml:"dvsName"` + DvsUuid string `xml:"dvsUuid"` + ResourcePoolKey string `xml:"resourcePoolKey"` + Available int64 `xml:"available"` + Requested int64 `xml:"requested"` + Device []string `xml:"device"` +} + +func init() { + t["InsufficientNetworkResourcePoolCapacity"] = reflect.TypeOf((*InsufficientNetworkResourcePoolCapacity)(nil)).Elem() +} + +type InsufficientNetworkResourcePoolCapacityFault InsufficientNetworkResourcePoolCapacity + +func init() { + t["InsufficientNetworkResourcePoolCapacityFault"] = reflect.TypeOf((*InsufficientNetworkResourcePoolCapacityFault)(nil)).Elem() +} + +type InsufficientPerCpuCapacity struct { + InsufficientHostCapacityFault +} + +func init() { + t["InsufficientPerCpuCapacity"] = reflect.TypeOf((*InsufficientPerCpuCapacity)(nil)).Elem() +} + +type InsufficientPerCpuCapacityFault InsufficientPerCpuCapacity + +func init() { + t["InsufficientPerCpuCapacityFault"] = reflect.TypeOf((*InsufficientPerCpuCapacityFault)(nil)).Elem() +} + +type InsufficientResourcesFault struct { + VimFault +} + +func init() { + t["InsufficientResourcesFault"] = reflect.TypeOf((*InsufficientResourcesFault)(nil)).Elem() +} + +type InsufficientResourcesFaultFault BaseInsufficientResourcesFault + +func init() { + t["InsufficientResourcesFaultFault"] = reflect.TypeOf((*InsufficientResourcesFaultFault)(nil)).Elem() +} + +type InsufficientStandbyCpuResource struct { + InsufficientStandbyResource + + Available int64 `xml:"available"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientStandbyCpuResource"] = reflect.TypeOf((*InsufficientStandbyCpuResource)(nil)).Elem() +} + +type InsufficientStandbyCpuResourceFault InsufficientStandbyCpuResource + +func init() { + t["InsufficientStandbyCpuResourceFault"] = reflect.TypeOf((*InsufficientStandbyCpuResourceFault)(nil)).Elem() +} + +type InsufficientStandbyMemoryResource struct { + InsufficientStandbyResource + + Available int64 `xml:"available"` + Requested int64 `xml:"requested"` +} + +func init() { + t["InsufficientStandbyMemoryResource"] = reflect.TypeOf((*InsufficientStandbyMemoryResource)(nil)).Elem() +} + +type InsufficientStandbyMemoryResourceFault InsufficientStandbyMemoryResource + +func init() { + t["InsufficientStandbyMemoryResourceFault"] = reflect.TypeOf((*InsufficientStandbyMemoryResourceFault)(nil)).Elem() +} + +type InsufficientStandbyResource struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientStandbyResource"] = reflect.TypeOf((*InsufficientStandbyResource)(nil)).Elem() +} + +type InsufficientStandbyResourceFault BaseInsufficientStandbyResource + +func init() { + t["InsufficientStandbyResourceFault"] = reflect.TypeOf((*InsufficientStandbyResourceFault)(nil)).Elem() +} + +type InsufficientStorageIops struct { + VimFault + + UnreservedIops int64 `xml:"unreservedIops"` + RequestedIops int64 `xml:"requestedIops"` + DatastoreName string `xml:"datastoreName"` +} + +func init() { + t["InsufficientStorageIops"] = reflect.TypeOf((*InsufficientStorageIops)(nil)).Elem() +} + +type InsufficientStorageIopsFault InsufficientStorageIops + +func init() { + t["InsufficientStorageIopsFault"] = reflect.TypeOf((*InsufficientStorageIopsFault)(nil)).Elem() +} + +type InsufficientStorageSpace struct { + InsufficientResourcesFault +} + +func init() { + t["InsufficientStorageSpace"] = reflect.TypeOf((*InsufficientStorageSpace)(nil)).Elem() +} + +type InsufficientStorageSpaceFault InsufficientStorageSpace + +func init() { + t["InsufficientStorageSpaceFault"] = reflect.TypeOf((*InsufficientStorageSpaceFault)(nil)).Elem() +} + +type InsufficientVFlashResourcesFault struct { + InsufficientResourcesFault + + FreeSpaceInMB int64 `xml:"freeSpaceInMB,omitempty"` + FreeSpace int64 `xml:"freeSpace"` + RequestedSpaceInMB int64 `xml:"requestedSpaceInMB,omitempty"` + RequestedSpace int64 `xml:"requestedSpace"` +} + +func init() { + t["InsufficientVFlashResourcesFault"] = reflect.TypeOf((*InsufficientVFlashResourcesFault)(nil)).Elem() +} + +type InsufficientVFlashResourcesFaultFault InsufficientVFlashResourcesFault + +func init() { + t["InsufficientVFlashResourcesFaultFault"] = reflect.TypeOf((*InsufficientVFlashResourcesFaultFault)(nil)).Elem() +} + +type IntExpression struct { + NegatableExpression + + Value int32 `xml:"value,omitempty"` +} + +func init() { + t["IntExpression"] = reflect.TypeOf((*IntExpression)(nil)).Elem() +} + +type IntOption struct { + OptionType + + Min int32 `xml:"min"` + Max int32 `xml:"max"` + DefaultValue int32 `xml:"defaultValue"` +} + +func init() { + t["IntOption"] = reflect.TypeOf((*IntOption)(nil)).Elem() +} + +type IntPolicy struct { + InheritablePolicy + + Value int32 `xml:"value,omitempty"` +} + +func init() { + t["IntPolicy"] = reflect.TypeOf((*IntPolicy)(nil)).Elem() +} + +type InvalidAffinitySettingFault struct { + VimFault +} + +func init() { + t["InvalidAffinitySettingFault"] = reflect.TypeOf((*InvalidAffinitySettingFault)(nil)).Elem() +} + +type InvalidAffinitySettingFaultFault InvalidAffinitySettingFault + +func init() { + t["InvalidAffinitySettingFaultFault"] = reflect.TypeOf((*InvalidAffinitySettingFaultFault)(nil)).Elem() +} + +type InvalidArgument struct { + RuntimeFault + + InvalidProperty string `xml:"invalidProperty,omitempty"` +} + +func init() { + t["InvalidArgument"] = reflect.TypeOf((*InvalidArgument)(nil)).Elem() +} + +type InvalidArgumentFault BaseInvalidArgument + +func init() { + t["InvalidArgumentFault"] = reflect.TypeOf((*InvalidArgumentFault)(nil)).Elem() +} + +type InvalidBmcRole struct { + VimFault +} + +func init() { + t["InvalidBmcRole"] = reflect.TypeOf((*InvalidBmcRole)(nil)).Elem() +} + +type InvalidBmcRoleFault InvalidBmcRole + +func init() { + t["InvalidBmcRoleFault"] = reflect.TypeOf((*InvalidBmcRoleFault)(nil)).Elem() +} + +type InvalidBundle struct { + PlatformConfigFault +} + +func init() { + t["InvalidBundle"] = reflect.TypeOf((*InvalidBundle)(nil)).Elem() +} + +type InvalidBundleFault InvalidBundle + +func init() { + t["InvalidBundleFault"] = reflect.TypeOf((*InvalidBundleFault)(nil)).Elem() +} + +type InvalidCAMCertificate struct { + InvalidCAMServer +} + +func init() { + t["InvalidCAMCertificate"] = reflect.TypeOf((*InvalidCAMCertificate)(nil)).Elem() +} + +type InvalidCAMCertificateFault InvalidCAMCertificate + +func init() { + t["InvalidCAMCertificateFault"] = reflect.TypeOf((*InvalidCAMCertificateFault)(nil)).Elem() +} + +type InvalidCAMServer struct { + ActiveDirectoryFault + + CamServer string `xml:"camServer"` +} + +func init() { + t["InvalidCAMServer"] = reflect.TypeOf((*InvalidCAMServer)(nil)).Elem() +} + +type InvalidCAMServerFault BaseInvalidCAMServer + +func init() { + t["InvalidCAMServerFault"] = reflect.TypeOf((*InvalidCAMServerFault)(nil)).Elem() +} + +type InvalidClientCertificate struct { + InvalidLogin +} + +func init() { + t["InvalidClientCertificate"] = reflect.TypeOf((*InvalidClientCertificate)(nil)).Elem() +} + +type InvalidClientCertificateFault InvalidClientCertificate + +func init() { + t["InvalidClientCertificateFault"] = reflect.TypeOf((*InvalidClientCertificateFault)(nil)).Elem() +} + +type InvalidCollectorVersion struct { + MethodFault +} + +func init() { + t["InvalidCollectorVersion"] = reflect.TypeOf((*InvalidCollectorVersion)(nil)).Elem() +} + +type InvalidCollectorVersionFault InvalidCollectorVersion + +func init() { + t["InvalidCollectorVersionFault"] = reflect.TypeOf((*InvalidCollectorVersionFault)(nil)).Elem() +} + +type InvalidController struct { + InvalidDeviceSpec + + ControllerKey int32 `xml:"controllerKey"` +} + +func init() { + t["InvalidController"] = reflect.TypeOf((*InvalidController)(nil)).Elem() +} + +type InvalidControllerFault InvalidController + +func init() { + t["InvalidControllerFault"] = reflect.TypeOf((*InvalidControllerFault)(nil)).Elem() +} + +type InvalidDasConfigArgument struct { + InvalidArgument + + Entry string `xml:"entry,omitempty"` + ClusterName string `xml:"clusterName,omitempty"` +} + +func init() { + t["InvalidDasConfigArgument"] = reflect.TypeOf((*InvalidDasConfigArgument)(nil)).Elem() +} + +type InvalidDasConfigArgumentFault InvalidDasConfigArgument + +func init() { + t["InvalidDasConfigArgumentFault"] = reflect.TypeOf((*InvalidDasConfigArgumentFault)(nil)).Elem() +} + +type InvalidDasRestartPriorityForFtVm struct { + InvalidArgument + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["InvalidDasRestartPriorityForFtVm"] = reflect.TypeOf((*InvalidDasRestartPriorityForFtVm)(nil)).Elem() +} + +type InvalidDasRestartPriorityForFtVmFault InvalidDasRestartPriorityForFtVm + +func init() { + t["InvalidDasRestartPriorityForFtVmFault"] = reflect.TypeOf((*InvalidDasRestartPriorityForFtVmFault)(nil)).Elem() +} + +type InvalidDatastore struct { + VimFault + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + Name string `xml:"name,omitempty"` +} + +func init() { + t["InvalidDatastore"] = reflect.TypeOf((*InvalidDatastore)(nil)).Elem() +} + +type InvalidDatastoreFault BaseInvalidDatastore + +func init() { + t["InvalidDatastoreFault"] = reflect.TypeOf((*InvalidDatastoreFault)(nil)).Elem() +} + +type InvalidDatastorePath struct { + InvalidDatastore + + DatastorePath string `xml:"datastorePath"` +} + +func init() { + t["InvalidDatastorePath"] = reflect.TypeOf((*InvalidDatastorePath)(nil)).Elem() +} + +type InvalidDatastorePathFault InvalidDatastorePath + +func init() { + t["InvalidDatastorePathFault"] = reflect.TypeOf((*InvalidDatastorePathFault)(nil)).Elem() +} + +type InvalidDatastoreState struct { + InvalidState + + DatastoreName string `xml:"datastoreName,omitempty"` +} + +func init() { + t["InvalidDatastoreState"] = reflect.TypeOf((*InvalidDatastoreState)(nil)).Elem() +} + +type InvalidDatastoreStateFault InvalidDatastoreState + +func init() { + t["InvalidDatastoreStateFault"] = reflect.TypeOf((*InvalidDatastoreStateFault)(nil)).Elem() +} + +type InvalidDeviceBacking struct { + InvalidDeviceSpec +} + +func init() { + t["InvalidDeviceBacking"] = reflect.TypeOf((*InvalidDeviceBacking)(nil)).Elem() +} + +type InvalidDeviceBackingFault InvalidDeviceBacking + +func init() { + t["InvalidDeviceBackingFault"] = reflect.TypeOf((*InvalidDeviceBackingFault)(nil)).Elem() +} + +type InvalidDeviceOperation struct { + InvalidDeviceSpec + + BadOp VirtualDeviceConfigSpecOperation `xml:"badOp,omitempty"` + BadFileOp VirtualDeviceConfigSpecFileOperation `xml:"badFileOp,omitempty"` +} + +func init() { + t["InvalidDeviceOperation"] = reflect.TypeOf((*InvalidDeviceOperation)(nil)).Elem() +} + +type InvalidDeviceOperationFault InvalidDeviceOperation + +func init() { + t["InvalidDeviceOperationFault"] = reflect.TypeOf((*InvalidDeviceOperationFault)(nil)).Elem() +} + +type InvalidDeviceSpec struct { + InvalidVmConfig + + DeviceIndex int32 `xml:"deviceIndex"` +} + +func init() { + t["InvalidDeviceSpec"] = reflect.TypeOf((*InvalidDeviceSpec)(nil)).Elem() +} + +type InvalidDeviceSpecFault BaseInvalidDeviceSpec + +func init() { + t["InvalidDeviceSpecFault"] = reflect.TypeOf((*InvalidDeviceSpecFault)(nil)).Elem() +} + +type InvalidDiskFormat struct { + InvalidFormat +} + +func init() { + t["InvalidDiskFormat"] = reflect.TypeOf((*InvalidDiskFormat)(nil)).Elem() +} + +type InvalidDiskFormatFault InvalidDiskFormat + +func init() { + t["InvalidDiskFormatFault"] = reflect.TypeOf((*InvalidDiskFormatFault)(nil)).Elem() +} + +type InvalidDrsBehaviorForFtVm struct { + InvalidArgument + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["InvalidDrsBehaviorForFtVm"] = reflect.TypeOf((*InvalidDrsBehaviorForFtVm)(nil)).Elem() +} + +type InvalidDrsBehaviorForFtVmFault InvalidDrsBehaviorForFtVm + +func init() { + t["InvalidDrsBehaviorForFtVmFault"] = reflect.TypeOf((*InvalidDrsBehaviorForFtVmFault)(nil)).Elem() +} + +type InvalidEditionEvent struct { + LicenseEvent + + Feature string `xml:"feature"` +} + +func init() { + t["InvalidEditionEvent"] = reflect.TypeOf((*InvalidEditionEvent)(nil)).Elem() +} + +type InvalidEditionLicense struct { + NotEnoughLicenses + + Feature string `xml:"feature"` +} + +func init() { + t["InvalidEditionLicense"] = reflect.TypeOf((*InvalidEditionLicense)(nil)).Elem() +} + +type InvalidEditionLicenseFault InvalidEditionLicense + +func init() { + t["InvalidEditionLicenseFault"] = reflect.TypeOf((*InvalidEditionLicenseFault)(nil)).Elem() +} + +type InvalidEvent struct { + VimFault +} + +func init() { + t["InvalidEvent"] = reflect.TypeOf((*InvalidEvent)(nil)).Elem() +} + +type InvalidEventFault InvalidEvent + +func init() { + t["InvalidEventFault"] = reflect.TypeOf((*InvalidEventFault)(nil)).Elem() +} + +type InvalidFolder struct { + VimFault + + Target ManagedObjectReference `xml:"target"` +} + +func init() { + t["InvalidFolder"] = reflect.TypeOf((*InvalidFolder)(nil)).Elem() +} + +type InvalidFolderFault BaseInvalidFolder + +func init() { + t["InvalidFolderFault"] = reflect.TypeOf((*InvalidFolderFault)(nil)).Elem() +} + +type InvalidFormat struct { + VmConfigFault +} + +func init() { + t["InvalidFormat"] = reflect.TypeOf((*InvalidFormat)(nil)).Elem() +} + +type InvalidFormatFault BaseInvalidFormat + +func init() { + t["InvalidFormatFault"] = reflect.TypeOf((*InvalidFormatFault)(nil)).Elem() +} + +type InvalidGuestLogin struct { + GuestOperationsFault +} + +func init() { + t["InvalidGuestLogin"] = reflect.TypeOf((*InvalidGuestLogin)(nil)).Elem() +} + +type InvalidGuestLoginFault InvalidGuestLogin + +func init() { + t["InvalidGuestLoginFault"] = reflect.TypeOf((*InvalidGuestLoginFault)(nil)).Elem() +} + +type InvalidHostConnectionState struct { + InvalidHostState +} + +func init() { + t["InvalidHostConnectionState"] = reflect.TypeOf((*InvalidHostConnectionState)(nil)).Elem() +} + +type InvalidHostConnectionStateFault InvalidHostConnectionState + +func init() { + t["InvalidHostConnectionStateFault"] = reflect.TypeOf((*InvalidHostConnectionStateFault)(nil)).Elem() +} + +type InvalidHostName struct { + HostConfigFault +} + +func init() { + t["InvalidHostName"] = reflect.TypeOf((*InvalidHostName)(nil)).Elem() +} + +type InvalidHostNameFault InvalidHostName + +func init() { + t["InvalidHostNameFault"] = reflect.TypeOf((*InvalidHostNameFault)(nil)).Elem() +} + +type InvalidHostState struct { + InvalidState + + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["InvalidHostState"] = reflect.TypeOf((*InvalidHostState)(nil)).Elem() +} + +type InvalidHostStateFault BaseInvalidHostState + +func init() { + t["InvalidHostStateFault"] = reflect.TypeOf((*InvalidHostStateFault)(nil)).Elem() +} + +type InvalidIndexArgument struct { + InvalidArgument + + Key string `xml:"key"` +} + +func init() { + t["InvalidIndexArgument"] = reflect.TypeOf((*InvalidIndexArgument)(nil)).Elem() +} + +type InvalidIndexArgumentFault InvalidIndexArgument + +func init() { + t["InvalidIndexArgumentFault"] = reflect.TypeOf((*InvalidIndexArgumentFault)(nil)).Elem() +} + +type InvalidIpfixConfig struct { + DvsFault + + Property string `xml:"property,omitempty"` +} + +func init() { + t["InvalidIpfixConfig"] = reflect.TypeOf((*InvalidIpfixConfig)(nil)).Elem() +} + +type InvalidIpfixConfigFault InvalidIpfixConfig + +func init() { + t["InvalidIpfixConfigFault"] = reflect.TypeOf((*InvalidIpfixConfigFault)(nil)).Elem() +} + +type InvalidIpmiLoginInfo struct { + VimFault +} + +func init() { + t["InvalidIpmiLoginInfo"] = reflect.TypeOf((*InvalidIpmiLoginInfo)(nil)).Elem() +} + +type InvalidIpmiLoginInfoFault InvalidIpmiLoginInfo + +func init() { + t["InvalidIpmiLoginInfoFault"] = reflect.TypeOf((*InvalidIpmiLoginInfoFault)(nil)).Elem() +} + +type InvalidIpmiMacAddress struct { + VimFault + + UserProvidedMacAddress string `xml:"userProvidedMacAddress"` + ObservedMacAddress string `xml:"observedMacAddress"` +} + +func init() { + t["InvalidIpmiMacAddress"] = reflect.TypeOf((*InvalidIpmiMacAddress)(nil)).Elem() +} + +type InvalidIpmiMacAddressFault InvalidIpmiMacAddress + +func init() { + t["InvalidIpmiMacAddressFault"] = reflect.TypeOf((*InvalidIpmiMacAddressFault)(nil)).Elem() +} + +type InvalidLicense struct { + VimFault + + LicenseContent string `xml:"licenseContent"` +} + +func init() { + t["InvalidLicense"] = reflect.TypeOf((*InvalidLicense)(nil)).Elem() +} + +type InvalidLicenseFault InvalidLicense + +func init() { + t["InvalidLicenseFault"] = reflect.TypeOf((*InvalidLicenseFault)(nil)).Elem() +} + +type InvalidLocale struct { + VimFault +} + +func init() { + t["InvalidLocale"] = reflect.TypeOf((*InvalidLocale)(nil)).Elem() +} + +type InvalidLocaleFault InvalidLocale + +func init() { + t["InvalidLocaleFault"] = reflect.TypeOf((*InvalidLocaleFault)(nil)).Elem() +} + +type InvalidLogin struct { + VimFault +} + +func init() { + t["InvalidLogin"] = reflect.TypeOf((*InvalidLogin)(nil)).Elem() +} + +type InvalidLoginFault BaseInvalidLogin + +func init() { + t["InvalidLoginFault"] = reflect.TypeOf((*InvalidLoginFault)(nil)).Elem() +} + +type InvalidName struct { + VimFault + + Name string `xml:"name"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["InvalidName"] = reflect.TypeOf((*InvalidName)(nil)).Elem() +} + +type InvalidNameFault InvalidName + +func init() { + t["InvalidNameFault"] = reflect.TypeOf((*InvalidNameFault)(nil)).Elem() +} + +type InvalidNasCredentials struct { + NasConfigFault + + UserName string `xml:"userName"` +} + +func init() { + t["InvalidNasCredentials"] = reflect.TypeOf((*InvalidNasCredentials)(nil)).Elem() +} + +type InvalidNasCredentialsFault InvalidNasCredentials + +func init() { + t["InvalidNasCredentialsFault"] = reflect.TypeOf((*InvalidNasCredentialsFault)(nil)).Elem() +} + +type InvalidNetworkInType struct { + VAppPropertyFault +} + +func init() { + t["InvalidNetworkInType"] = reflect.TypeOf((*InvalidNetworkInType)(nil)).Elem() +} + +type InvalidNetworkInTypeFault InvalidNetworkInType + +func init() { + t["InvalidNetworkInTypeFault"] = reflect.TypeOf((*InvalidNetworkInTypeFault)(nil)).Elem() +} + +type InvalidNetworkResource struct { + NasConfigFault + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` +} + +func init() { + t["InvalidNetworkResource"] = reflect.TypeOf((*InvalidNetworkResource)(nil)).Elem() +} + +type InvalidNetworkResourceFault InvalidNetworkResource + +func init() { + t["InvalidNetworkResourceFault"] = reflect.TypeOf((*InvalidNetworkResourceFault)(nil)).Elem() +} + +type InvalidOperationOnSecondaryVm struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid,omitempty"` +} + +func init() { + t["InvalidOperationOnSecondaryVm"] = reflect.TypeOf((*InvalidOperationOnSecondaryVm)(nil)).Elem() +} + +type InvalidOperationOnSecondaryVmFault InvalidOperationOnSecondaryVm + +func init() { + t["InvalidOperationOnSecondaryVmFault"] = reflect.TypeOf((*InvalidOperationOnSecondaryVmFault)(nil)).Elem() +} + +type InvalidPowerState struct { + InvalidState + + RequestedState VirtualMachinePowerState `xml:"requestedState,omitempty"` + ExistingState VirtualMachinePowerState `xml:"existingState"` +} + +func init() { + t["InvalidPowerState"] = reflect.TypeOf((*InvalidPowerState)(nil)).Elem() +} + +type InvalidPowerStateFault InvalidPowerState + +func init() { + t["InvalidPowerStateFault"] = reflect.TypeOf((*InvalidPowerStateFault)(nil)).Elem() +} + +type InvalidPrivilege struct { + VimFault + + Privilege string `xml:"privilege"` +} + +func init() { + t["InvalidPrivilege"] = reflect.TypeOf((*InvalidPrivilege)(nil)).Elem() +} + +type InvalidPrivilegeFault InvalidPrivilege + +func init() { + t["InvalidPrivilegeFault"] = reflect.TypeOf((*InvalidPrivilegeFault)(nil)).Elem() +} + +type InvalidProfileReferenceHost struct { + RuntimeFault + + Reason string `xml:"reason,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` + ProfileName string `xml:"profileName,omitempty"` +} + +func init() { + t["InvalidProfileReferenceHost"] = reflect.TypeOf((*InvalidProfileReferenceHost)(nil)).Elem() +} + +type InvalidProfileReferenceHostFault InvalidProfileReferenceHost + +func init() { + t["InvalidProfileReferenceHostFault"] = reflect.TypeOf((*InvalidProfileReferenceHostFault)(nil)).Elem() +} + +type InvalidProperty struct { + MethodFault + + Name string `xml:"name"` +} + +func init() { + t["InvalidProperty"] = reflect.TypeOf((*InvalidProperty)(nil)).Elem() +} + +type InvalidPropertyFault InvalidProperty + +func init() { + t["InvalidPropertyFault"] = reflect.TypeOf((*InvalidPropertyFault)(nil)).Elem() +} + +type InvalidPropertyType struct { + VAppPropertyFault +} + +func init() { + t["InvalidPropertyType"] = reflect.TypeOf((*InvalidPropertyType)(nil)).Elem() +} + +type InvalidPropertyTypeFault InvalidPropertyType + +func init() { + t["InvalidPropertyTypeFault"] = reflect.TypeOf((*InvalidPropertyTypeFault)(nil)).Elem() +} + +type InvalidPropertyValue struct { + VAppPropertyFault +} + +func init() { + t["InvalidPropertyValue"] = reflect.TypeOf((*InvalidPropertyValue)(nil)).Elem() +} + +type InvalidPropertyValueFault BaseInvalidPropertyValue + +func init() { + t["InvalidPropertyValueFault"] = reflect.TypeOf((*InvalidPropertyValueFault)(nil)).Elem() +} + +type InvalidRequest struct { + RuntimeFault +} + +func init() { + t["InvalidRequest"] = reflect.TypeOf((*InvalidRequest)(nil)).Elem() +} + +type InvalidRequestFault BaseInvalidRequest + +func init() { + t["InvalidRequestFault"] = reflect.TypeOf((*InvalidRequestFault)(nil)).Elem() +} + +type InvalidResourcePoolStructureFault struct { + InsufficientResourcesFault +} + +func init() { + t["InvalidResourcePoolStructureFault"] = reflect.TypeOf((*InvalidResourcePoolStructureFault)(nil)).Elem() +} + +type InvalidResourcePoolStructureFaultFault InvalidResourcePoolStructureFault + +func init() { + t["InvalidResourcePoolStructureFaultFault"] = reflect.TypeOf((*InvalidResourcePoolStructureFaultFault)(nil)).Elem() +} + +type InvalidSnapshotFormat struct { + InvalidFormat +} + +func init() { + t["InvalidSnapshotFormat"] = reflect.TypeOf((*InvalidSnapshotFormat)(nil)).Elem() +} + +type InvalidSnapshotFormatFault InvalidSnapshotFormat + +func init() { + t["InvalidSnapshotFormatFault"] = reflect.TypeOf((*InvalidSnapshotFormatFault)(nil)).Elem() +} + +type InvalidState struct { + VimFault +} + +func init() { + t["InvalidState"] = reflect.TypeOf((*InvalidState)(nil)).Elem() +} + +type InvalidStateFault BaseInvalidState + +func init() { + t["InvalidStateFault"] = reflect.TypeOf((*InvalidStateFault)(nil)).Elem() +} + +type InvalidType struct { + InvalidRequest + + Argument string `xml:"argument,omitempty"` +} + +func init() { + t["InvalidType"] = reflect.TypeOf((*InvalidType)(nil)).Elem() +} + +type InvalidTypeFault InvalidType + +func init() { + t["InvalidTypeFault"] = reflect.TypeOf((*InvalidTypeFault)(nil)).Elem() +} + +type InvalidVmConfig struct { + VmConfigFault + + Property string `xml:"property,omitempty"` +} + +func init() { + t["InvalidVmConfig"] = reflect.TypeOf((*InvalidVmConfig)(nil)).Elem() +} + +type InvalidVmConfigFault BaseInvalidVmConfig + +func init() { + t["InvalidVmConfigFault"] = reflect.TypeOf((*InvalidVmConfigFault)(nil)).Elem() +} + +type InvalidVmState struct { + InvalidState + + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["InvalidVmState"] = reflect.TypeOf((*InvalidVmState)(nil)).Elem() +} + +type InvalidVmStateFault InvalidVmState + +func init() { + t["InvalidVmStateFault"] = reflect.TypeOf((*InvalidVmStateFault)(nil)).Elem() +} + +type InventoryDescription struct { + DynamicData + + NumHosts int32 `xml:"numHosts"` + NumVirtualMachines int32 `xml:"numVirtualMachines"` + NumResourcePools int32 `xml:"numResourcePools,omitempty"` + NumClusters int32 `xml:"numClusters,omitempty"` + NumCpuDev int32 `xml:"numCpuDev,omitempty"` + NumNetDev int32 `xml:"numNetDev,omitempty"` + NumDiskDev int32 `xml:"numDiskDev,omitempty"` + NumvCpuDev int32 `xml:"numvCpuDev,omitempty"` + NumvNetDev int32 `xml:"numvNetDev,omitempty"` + NumvDiskDev int32 `xml:"numvDiskDev,omitempty"` +} + +func init() { + t["InventoryDescription"] = reflect.TypeOf((*InventoryDescription)(nil)).Elem() +} + +type InventoryHasStandardAloneHosts struct { + NotEnoughLicenses + + Hosts []string `xml:"hosts"` +} + +func init() { + t["InventoryHasStandardAloneHosts"] = reflect.TypeOf((*InventoryHasStandardAloneHosts)(nil)).Elem() +} + +type InventoryHasStandardAloneHostsFault InventoryHasStandardAloneHosts + +func init() { + t["InventoryHasStandardAloneHostsFault"] = reflect.TypeOf((*InventoryHasStandardAloneHostsFault)(nil)).Elem() +} + +type IoFilterHostIssue struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + Issue []LocalizedMethodFault `xml:"issue"` +} + +func init() { + t["IoFilterHostIssue"] = reflect.TypeOf((*IoFilterHostIssue)(nil)).Elem() +} + +type IoFilterInfo struct { + DynamicData + + Id string `xml:"id"` + Name string `xml:"name"` + Vendor string `xml:"vendor"` + Version string `xml:"version"` + Type string `xml:"type,omitempty"` + Summary string `xml:"summary,omitempty"` + ReleaseDate string `xml:"releaseDate,omitempty"` +} + +func init() { + t["IoFilterInfo"] = reflect.TypeOf((*IoFilterInfo)(nil)).Elem() +} + +type IoFilterQueryIssueResult struct { + DynamicData + + OpType string `xml:"opType"` + HostIssue []IoFilterHostIssue `xml:"hostIssue,omitempty"` +} + +func init() { + t["IoFilterQueryIssueResult"] = reflect.TypeOf((*IoFilterQueryIssueResult)(nil)).Elem() +} + +type IpAddress struct { + NegatableExpression +} + +func init() { + t["IpAddress"] = reflect.TypeOf((*IpAddress)(nil)).Elem() +} + +type IpAddressProfile struct { + ApplyProfile +} + +func init() { + t["IpAddressProfile"] = reflect.TypeOf((*IpAddressProfile)(nil)).Elem() +} + +type IpHostnameGeneratorError struct { + CustomizationFault +} + +func init() { + t["IpHostnameGeneratorError"] = reflect.TypeOf((*IpHostnameGeneratorError)(nil)).Elem() +} + +type IpHostnameGeneratorErrorFault IpHostnameGeneratorError + +func init() { + t["IpHostnameGeneratorErrorFault"] = reflect.TypeOf((*IpHostnameGeneratorErrorFault)(nil)).Elem() +} + +type IpPool struct { + DynamicData + + Id int32 `xml:"id,omitempty"` + Name string `xml:"name,omitempty"` + Ipv4Config *IpPoolIpPoolConfigInfo `xml:"ipv4Config,omitempty"` + Ipv6Config *IpPoolIpPoolConfigInfo `xml:"ipv6Config,omitempty"` + DnsDomain string `xml:"dnsDomain,omitempty"` + DnsSearchPath string `xml:"dnsSearchPath,omitempty"` + HostPrefix string `xml:"hostPrefix,omitempty"` + HttpProxy string `xml:"httpProxy,omitempty"` + NetworkAssociation []IpPoolAssociation `xml:"networkAssociation,omitempty"` + AvailableIpv4Addresses int32 `xml:"availableIpv4Addresses,omitempty"` + AvailableIpv6Addresses int32 `xml:"availableIpv6Addresses,omitempty"` + AllocatedIpv4Addresses int32 `xml:"allocatedIpv4Addresses,omitempty"` + AllocatedIpv6Addresses int32 `xml:"allocatedIpv6Addresses,omitempty"` +} + +func init() { + t["IpPool"] = reflect.TypeOf((*IpPool)(nil)).Elem() +} + +type IpPoolAssociation struct { + DynamicData + + Network *ManagedObjectReference `xml:"network,omitempty"` + NetworkName string `xml:"networkName"` +} + +func init() { + t["IpPoolAssociation"] = reflect.TypeOf((*IpPoolAssociation)(nil)).Elem() +} + +type IpPoolIpPoolConfigInfo struct { + DynamicData + + SubnetAddress string `xml:"subnetAddress,omitempty"` + Netmask string `xml:"netmask,omitempty"` + Gateway string `xml:"gateway,omitempty"` + Range string `xml:"range,omitempty"` + Dns []string `xml:"dns,omitempty"` + DhcpServerAvailable *bool `xml:"dhcpServerAvailable"` + IpPoolEnabled *bool `xml:"ipPoolEnabled"` +} + +func init() { + t["IpPoolIpPoolConfigInfo"] = reflect.TypeOf((*IpPoolIpPoolConfigInfo)(nil)).Elem() +} + +type IpPoolManagerIpAllocation struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + AllocationId string `xml:"allocationId"` +} + +func init() { + t["IpPoolManagerIpAllocation"] = reflect.TypeOf((*IpPoolManagerIpAllocation)(nil)).Elem() +} + +type IpRange struct { + IpAddress + + AddressPrefix string `xml:"addressPrefix"` + PrefixLength int32 `xml:"prefixLength,omitempty"` +} + +func init() { + t["IpRange"] = reflect.TypeOf((*IpRange)(nil)).Elem() +} + +type IpRouteProfile struct { + ApplyProfile + + StaticRoute []StaticRouteProfile `xml:"staticRoute,omitempty"` +} + +func init() { + t["IpRouteProfile"] = reflect.TypeOf((*IpRouteProfile)(nil)).Elem() +} + +type IsSharedGraphicsActive IsSharedGraphicsActiveRequestType + +func init() { + t["IsSharedGraphicsActive"] = reflect.TypeOf((*IsSharedGraphicsActive)(nil)).Elem() +} + +type IsSharedGraphicsActiveRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["IsSharedGraphicsActiveRequestType"] = reflect.TypeOf((*IsSharedGraphicsActiveRequestType)(nil)).Elem() +} + +type IsSharedGraphicsActiveResponse struct { + Returnval bool `xml:"returnval"` +} + +type IscsiDependencyEntity struct { + DynamicData + + PnicDevice string `xml:"pnicDevice"` + VnicDevice string `xml:"vnicDevice"` + VmhbaName string `xml:"vmhbaName"` +} + +func init() { + t["IscsiDependencyEntity"] = reflect.TypeOf((*IscsiDependencyEntity)(nil)).Elem() +} + +type IscsiFault struct { + VimFault +} + +func init() { + t["IscsiFault"] = reflect.TypeOf((*IscsiFault)(nil)).Elem() +} + +type IscsiFaultFault BaseIscsiFault + +func init() { + t["IscsiFaultFault"] = reflect.TypeOf((*IscsiFaultFault)(nil)).Elem() +} + +type IscsiFaultInvalidVnic struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultInvalidVnic"] = reflect.TypeOf((*IscsiFaultInvalidVnic)(nil)).Elem() +} + +type IscsiFaultInvalidVnicFault IscsiFaultInvalidVnic + +func init() { + t["IscsiFaultInvalidVnicFault"] = reflect.TypeOf((*IscsiFaultInvalidVnicFault)(nil)).Elem() +} + +type IscsiFaultPnicInUse struct { + IscsiFault + + PnicDevice string `xml:"pnicDevice"` +} + +func init() { + t["IscsiFaultPnicInUse"] = reflect.TypeOf((*IscsiFaultPnicInUse)(nil)).Elem() +} + +type IscsiFaultPnicInUseFault IscsiFaultPnicInUse + +func init() { + t["IscsiFaultPnicInUseFault"] = reflect.TypeOf((*IscsiFaultPnicInUseFault)(nil)).Elem() +} + +type IscsiFaultVnicAlreadyBound struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicAlreadyBound"] = reflect.TypeOf((*IscsiFaultVnicAlreadyBound)(nil)).Elem() +} + +type IscsiFaultVnicAlreadyBoundFault IscsiFaultVnicAlreadyBound + +func init() { + t["IscsiFaultVnicAlreadyBoundFault"] = reflect.TypeOf((*IscsiFaultVnicAlreadyBoundFault)(nil)).Elem() +} + +type IscsiFaultVnicHasActivePaths struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicHasActivePaths"] = reflect.TypeOf((*IscsiFaultVnicHasActivePaths)(nil)).Elem() +} + +type IscsiFaultVnicHasActivePathsFault IscsiFaultVnicHasActivePaths + +func init() { + t["IscsiFaultVnicHasActivePathsFault"] = reflect.TypeOf((*IscsiFaultVnicHasActivePathsFault)(nil)).Elem() +} + +type IscsiFaultVnicHasMultipleUplinks struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicHasMultipleUplinks"] = reflect.TypeOf((*IscsiFaultVnicHasMultipleUplinks)(nil)).Elem() +} + +type IscsiFaultVnicHasMultipleUplinksFault IscsiFaultVnicHasMultipleUplinks + +func init() { + t["IscsiFaultVnicHasMultipleUplinksFault"] = reflect.TypeOf((*IscsiFaultVnicHasMultipleUplinksFault)(nil)).Elem() +} + +type IscsiFaultVnicHasNoUplinks struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicHasNoUplinks"] = reflect.TypeOf((*IscsiFaultVnicHasNoUplinks)(nil)).Elem() +} + +type IscsiFaultVnicHasNoUplinksFault IscsiFaultVnicHasNoUplinks + +func init() { + t["IscsiFaultVnicHasNoUplinksFault"] = reflect.TypeOf((*IscsiFaultVnicHasNoUplinksFault)(nil)).Elem() +} + +type IscsiFaultVnicHasWrongUplink struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicHasWrongUplink"] = reflect.TypeOf((*IscsiFaultVnicHasWrongUplink)(nil)).Elem() +} + +type IscsiFaultVnicHasWrongUplinkFault IscsiFaultVnicHasWrongUplink + +func init() { + t["IscsiFaultVnicHasWrongUplinkFault"] = reflect.TypeOf((*IscsiFaultVnicHasWrongUplinkFault)(nil)).Elem() +} + +type IscsiFaultVnicInUse struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicInUse"] = reflect.TypeOf((*IscsiFaultVnicInUse)(nil)).Elem() +} + +type IscsiFaultVnicInUseFault IscsiFaultVnicInUse + +func init() { + t["IscsiFaultVnicInUseFault"] = reflect.TypeOf((*IscsiFaultVnicInUseFault)(nil)).Elem() +} + +type IscsiFaultVnicIsLastPath struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicIsLastPath"] = reflect.TypeOf((*IscsiFaultVnicIsLastPath)(nil)).Elem() +} + +type IscsiFaultVnicIsLastPathFault IscsiFaultVnicIsLastPath + +func init() { + t["IscsiFaultVnicIsLastPathFault"] = reflect.TypeOf((*IscsiFaultVnicIsLastPathFault)(nil)).Elem() +} + +type IscsiFaultVnicNotBound struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicNotBound"] = reflect.TypeOf((*IscsiFaultVnicNotBound)(nil)).Elem() +} + +type IscsiFaultVnicNotBoundFault IscsiFaultVnicNotBound + +func init() { + t["IscsiFaultVnicNotBoundFault"] = reflect.TypeOf((*IscsiFaultVnicNotBoundFault)(nil)).Elem() +} + +type IscsiFaultVnicNotFound struct { + IscsiFault + + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["IscsiFaultVnicNotFound"] = reflect.TypeOf((*IscsiFaultVnicNotFound)(nil)).Elem() +} + +type IscsiFaultVnicNotFoundFault IscsiFaultVnicNotFound + +func init() { + t["IscsiFaultVnicNotFoundFault"] = reflect.TypeOf((*IscsiFaultVnicNotFoundFault)(nil)).Elem() +} + +type IscsiMigrationDependency struct { + DynamicData + + MigrationAllowed bool `xml:"migrationAllowed"` + DisallowReason *IscsiStatus `xml:"disallowReason,omitempty"` + Dependency []IscsiDependencyEntity `xml:"dependency,omitempty"` +} + +func init() { + t["IscsiMigrationDependency"] = reflect.TypeOf((*IscsiMigrationDependency)(nil)).Elem() +} + +type IscsiPortInfo struct { + DynamicData + + VnicDevice string `xml:"vnicDevice,omitempty"` + Vnic *HostVirtualNic `xml:"vnic,omitempty"` + PnicDevice string `xml:"pnicDevice,omitempty"` + Pnic *PhysicalNic `xml:"pnic,omitempty"` + SwitchName string `xml:"switchName,omitempty"` + SwitchUuid string `xml:"switchUuid,omitempty"` + PortgroupName string `xml:"portgroupName,omitempty"` + PortgroupKey string `xml:"portgroupKey,omitempty"` + PortKey string `xml:"portKey,omitempty"` + OpaqueNetworkId string `xml:"opaqueNetworkId,omitempty"` + OpaqueNetworkType string `xml:"opaqueNetworkType,omitempty"` + OpaqueNetworkName string `xml:"opaqueNetworkName,omitempty"` + ExternalId string `xml:"externalId,omitempty"` + ComplianceStatus *IscsiStatus `xml:"complianceStatus,omitempty"` + PathStatus string `xml:"pathStatus,omitempty"` +} + +func init() { + t["IscsiPortInfo"] = reflect.TypeOf((*IscsiPortInfo)(nil)).Elem() +} + +type IscsiStatus struct { + DynamicData + + Reason []LocalizedMethodFault `xml:"reason,omitempty"` +} + +func init() { + t["IscsiStatus"] = reflect.TypeOf((*IscsiStatus)(nil)).Elem() +} + +type IsoImageFileInfo struct { + FileInfo +} + +func init() { + t["IsoImageFileInfo"] = reflect.TypeOf((*IsoImageFileInfo)(nil)).Elem() +} + +type IsoImageFileQuery struct { + FileQuery +} + +func init() { + t["IsoImageFileQuery"] = reflect.TypeOf((*IsoImageFileQuery)(nil)).Elem() +} + +type JoinDomainRequestType struct { + This ManagedObjectReference `xml:"_this"` + DomainName string `xml:"domainName"` + UserName string `xml:"userName"` + Password string `xml:"password"` +} + +func init() { + t["JoinDomainRequestType"] = reflect.TypeOf((*JoinDomainRequestType)(nil)).Elem() +} + +type JoinDomainWithCAMRequestType struct { + This ManagedObjectReference `xml:"_this"` + DomainName string `xml:"domainName"` + CamServer string `xml:"camServer"` +} + +func init() { + t["JoinDomainWithCAMRequestType"] = reflect.TypeOf((*JoinDomainWithCAMRequestType)(nil)).Elem() +} + +type JoinDomainWithCAM_Task JoinDomainWithCAMRequestType + +func init() { + t["JoinDomainWithCAM_Task"] = reflect.TypeOf((*JoinDomainWithCAM_Task)(nil)).Elem() +} + +type JoinDomainWithCAM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type JoinDomain_Task JoinDomainRequestType + +func init() { + t["JoinDomain_Task"] = reflect.TypeOf((*JoinDomain_Task)(nil)).Elem() +} + +type JoinDomain_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type KernelModuleInfo struct { + DynamicData + + Id int32 `xml:"id"` + Name string `xml:"name"` + Version string `xml:"version"` + Filename string `xml:"filename"` + OptionString string `xml:"optionString"` + Loaded bool `xml:"loaded"` + Enabled bool `xml:"enabled"` + UseCount int32 `xml:"useCount"` + ReadOnlySection KernelModuleSectionInfo `xml:"readOnlySection"` + WritableSection KernelModuleSectionInfo `xml:"writableSection"` + TextSection KernelModuleSectionInfo `xml:"textSection"` + DataSection KernelModuleSectionInfo `xml:"dataSection"` + BssSection KernelModuleSectionInfo `xml:"bssSection"` +} + +func init() { + t["KernelModuleInfo"] = reflect.TypeOf((*KernelModuleInfo)(nil)).Elem() +} + +type KernelModuleSectionInfo struct { + DynamicData + + Address int64 `xml:"address"` + Length int32 `xml:"length,omitempty"` +} + +func init() { + t["KernelModuleSectionInfo"] = reflect.TypeOf((*KernelModuleSectionInfo)(nil)).Elem() +} + +type KeyAnyValue struct { + DynamicData + + Key string `xml:"key"` + Value AnyType `xml:"value,typeattr"` +} + +func init() { + t["KeyAnyValue"] = reflect.TypeOf((*KeyAnyValue)(nil)).Elem() +} + +type KeyProviderId struct { + DynamicData + + Id string `xml:"id"` +} + +func init() { + t["KeyProviderId"] = reflect.TypeOf((*KeyProviderId)(nil)).Elem() +} + +type KeyValue struct { + DynamicData + + Key string `xml:"key"` + Value string `xml:"value"` +} + +func init() { + t["KeyValue"] = reflect.TypeOf((*KeyValue)(nil)).Elem() +} + +type KmipClusterInfo struct { + DynamicData + + ClusterId KeyProviderId `xml:"clusterId"` + Servers []KmipServerInfo `xml:"servers,omitempty"` + UseAsDefault bool `xml:"useAsDefault"` +} + +func init() { + t["KmipClusterInfo"] = reflect.TypeOf((*KmipClusterInfo)(nil)).Elem() +} + +type KmipServerInfo struct { + DynamicData + + Name string `xml:"name"` + Address string `xml:"address"` + Port int32 `xml:"port"` + ProxyAddress string `xml:"proxyAddress,omitempty"` + ProxyPort int32 `xml:"proxyPort,omitempty"` + Reconnect int32 `xml:"reconnect,omitempty"` + Protocol string `xml:"protocol,omitempty"` + Nbio int32 `xml:"nbio,omitempty"` + Timeout int32 `xml:"timeout,omitempty"` + UserName string `xml:"userName,omitempty"` +} + +func init() { + t["KmipServerInfo"] = reflect.TypeOf((*KmipServerInfo)(nil)).Elem() +} + +type KmipServerSpec struct { + DynamicData + + ClusterId KeyProviderId `xml:"clusterId"` + Info KmipServerInfo `xml:"info"` + Password string `xml:"password,omitempty"` +} + +func init() { + t["KmipServerSpec"] = reflect.TypeOf((*KmipServerSpec)(nil)).Elem() +} + +type KmipServerStatus struct { + DynamicData + + ClusterId KeyProviderId `xml:"clusterId"` + Name string `xml:"name"` + Status ManagedEntityStatus `xml:"status"` + Description string `xml:"description"` +} + +func init() { + t["KmipServerStatus"] = reflect.TypeOf((*KmipServerStatus)(nil)).Elem() +} + +type LargeRDMConversionNotSupported struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["LargeRDMConversionNotSupported"] = reflect.TypeOf((*LargeRDMConversionNotSupported)(nil)).Elem() +} + +type LargeRDMConversionNotSupportedFault LargeRDMConversionNotSupported + +func init() { + t["LargeRDMConversionNotSupportedFault"] = reflect.TypeOf((*LargeRDMConversionNotSupportedFault)(nil)).Elem() +} + +type LargeRDMNotSupportedOnDatastore struct { + VmConfigFault + + Device string `xml:"device"` + Datastore ManagedObjectReference `xml:"datastore"` + DatastoreName string `xml:"datastoreName"` +} + +func init() { + t["LargeRDMNotSupportedOnDatastore"] = reflect.TypeOf((*LargeRDMNotSupportedOnDatastore)(nil)).Elem() +} + +type LargeRDMNotSupportedOnDatastoreFault LargeRDMNotSupportedOnDatastore + +func init() { + t["LargeRDMNotSupportedOnDatastoreFault"] = reflect.TypeOf((*LargeRDMNotSupportedOnDatastoreFault)(nil)).Elem() +} + +type LatencySensitivity struct { + DynamicData + + Level LatencySensitivitySensitivityLevel `xml:"level"` + Sensitivity int32 `xml:"sensitivity,omitempty"` +} + +func init() { + t["LatencySensitivity"] = reflect.TypeOf((*LatencySensitivity)(nil)).Elem() +} + +type LeaveCurrentDomainRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["LeaveCurrentDomainRequestType"] = reflect.TypeOf((*LeaveCurrentDomainRequestType)(nil)).Elem() +} + +type LeaveCurrentDomain_Task LeaveCurrentDomainRequestType + +func init() { + t["LeaveCurrentDomain_Task"] = reflect.TypeOf((*LeaveCurrentDomain_Task)(nil)).Elem() +} + +type LeaveCurrentDomain_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type LegacyNetworkInterfaceInUse struct { + CannotAccessNetwork +} + +func init() { + t["LegacyNetworkInterfaceInUse"] = reflect.TypeOf((*LegacyNetworkInterfaceInUse)(nil)).Elem() +} + +type LegacyNetworkInterfaceInUseFault LegacyNetworkInterfaceInUse + +func init() { + t["LegacyNetworkInterfaceInUseFault"] = reflect.TypeOf((*LegacyNetworkInterfaceInUseFault)(nil)).Elem() +} + +type LicenseAssignmentFailed struct { + RuntimeFault + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["LicenseAssignmentFailed"] = reflect.TypeOf((*LicenseAssignmentFailed)(nil)).Elem() +} + +type LicenseAssignmentFailedFault LicenseAssignmentFailed + +func init() { + t["LicenseAssignmentFailedFault"] = reflect.TypeOf((*LicenseAssignmentFailedFault)(nil)).Elem() +} + +type LicenseAssignmentManagerLicenseAssignment struct { + DynamicData + + EntityId string `xml:"entityId"` + Scope string `xml:"scope,omitempty"` + EntityDisplayName string `xml:"entityDisplayName,omitempty"` + AssignedLicense LicenseManagerLicenseInfo `xml:"assignedLicense"` + Properties []KeyAnyValue `xml:"properties,omitempty"` +} + +func init() { + t["LicenseAssignmentManagerLicenseAssignment"] = reflect.TypeOf((*LicenseAssignmentManagerLicenseAssignment)(nil)).Elem() +} + +type LicenseAvailabilityInfo struct { + DynamicData + + Feature LicenseFeatureInfo `xml:"feature"` + Total int32 `xml:"total"` + Available int32 `xml:"available"` +} + +func init() { + t["LicenseAvailabilityInfo"] = reflect.TypeOf((*LicenseAvailabilityInfo)(nil)).Elem() +} + +type LicenseDiagnostics struct { + DynamicData + + SourceLastChanged time.Time `xml:"sourceLastChanged"` + SourceLost string `xml:"sourceLost"` + SourceLatency float32 `xml:"sourceLatency"` + LicenseRequests string `xml:"licenseRequests"` + LicenseRequestFailures string `xml:"licenseRequestFailures"` + LicenseFeatureUnknowns string `xml:"licenseFeatureUnknowns"` + OpState LicenseManagerState `xml:"opState"` + LastStatusUpdate time.Time `xml:"lastStatusUpdate"` + OpFailureMessage string `xml:"opFailureMessage"` +} + +func init() { + t["LicenseDiagnostics"] = reflect.TypeOf((*LicenseDiagnostics)(nil)).Elem() +} + +type LicenseDowngradeDisallowed struct { + NotEnoughLicenses + + Edition string `xml:"edition"` + EntityId string `xml:"entityId"` + Features []KeyAnyValue `xml:"features"` +} + +func init() { + t["LicenseDowngradeDisallowed"] = reflect.TypeOf((*LicenseDowngradeDisallowed)(nil)).Elem() +} + +type LicenseDowngradeDisallowedFault LicenseDowngradeDisallowed + +func init() { + t["LicenseDowngradeDisallowedFault"] = reflect.TypeOf((*LicenseDowngradeDisallowedFault)(nil)).Elem() +} + +type LicenseEntityNotFound struct { + VimFault + + EntityId string `xml:"entityId"` +} + +func init() { + t["LicenseEntityNotFound"] = reflect.TypeOf((*LicenseEntityNotFound)(nil)).Elem() +} + +type LicenseEntityNotFoundFault LicenseEntityNotFound + +func init() { + t["LicenseEntityNotFoundFault"] = reflect.TypeOf((*LicenseEntityNotFoundFault)(nil)).Elem() +} + +type LicenseEvent struct { + Event +} + +func init() { + t["LicenseEvent"] = reflect.TypeOf((*LicenseEvent)(nil)).Elem() +} + +type LicenseExpired struct { + NotEnoughLicenses + + LicenseKey string `xml:"licenseKey"` +} + +func init() { + t["LicenseExpired"] = reflect.TypeOf((*LicenseExpired)(nil)).Elem() +} + +type LicenseExpiredEvent struct { + Event + + Feature LicenseFeatureInfo `xml:"feature"` +} + +func init() { + t["LicenseExpiredEvent"] = reflect.TypeOf((*LicenseExpiredEvent)(nil)).Elem() +} + +type LicenseExpiredFault LicenseExpired + +func init() { + t["LicenseExpiredFault"] = reflect.TypeOf((*LicenseExpiredFault)(nil)).Elem() +} + +type LicenseFeatureInfo struct { + DynamicData + + Key string `xml:"key"` + FeatureName string `xml:"featureName"` + FeatureDescription string `xml:"featureDescription,omitempty"` + State LicenseFeatureInfoState `xml:"state,omitempty"` + CostUnit string `xml:"costUnit"` + SourceRestriction string `xml:"sourceRestriction,omitempty"` + DependentKey []string `xml:"dependentKey,omitempty"` + Edition *bool `xml:"edition"` + ExpiresOn *time.Time `xml:"expiresOn"` +} + +func init() { + t["LicenseFeatureInfo"] = reflect.TypeOf((*LicenseFeatureInfo)(nil)).Elem() +} + +type LicenseKeyEntityMismatch struct { + NotEnoughLicenses +} + +func init() { + t["LicenseKeyEntityMismatch"] = reflect.TypeOf((*LicenseKeyEntityMismatch)(nil)).Elem() +} + +type LicenseKeyEntityMismatchFault LicenseKeyEntityMismatch + +func init() { + t["LicenseKeyEntityMismatchFault"] = reflect.TypeOf((*LicenseKeyEntityMismatchFault)(nil)).Elem() +} + +type LicenseManagerEvaluationInfo struct { + DynamicData + + Properties []KeyAnyValue `xml:"properties"` +} + +func init() { + t["LicenseManagerEvaluationInfo"] = reflect.TypeOf((*LicenseManagerEvaluationInfo)(nil)).Elem() +} + +type LicenseManagerLicenseInfo struct { + DynamicData + + LicenseKey string `xml:"licenseKey"` + EditionKey string `xml:"editionKey"` + Name string `xml:"name"` + Total int32 `xml:"total"` + Used int32 `xml:"used,omitempty"` + CostUnit string `xml:"costUnit"` + Properties []KeyAnyValue `xml:"properties,omitempty"` + Labels []KeyValue `xml:"labels,omitempty"` +} + +func init() { + t["LicenseManagerLicenseInfo"] = reflect.TypeOf((*LicenseManagerLicenseInfo)(nil)).Elem() +} + +type LicenseNonComplianceEvent struct { + LicenseEvent + + Url string `xml:"url"` +} + +func init() { + t["LicenseNonComplianceEvent"] = reflect.TypeOf((*LicenseNonComplianceEvent)(nil)).Elem() +} + +type LicenseReservationInfo struct { + DynamicData + + Key string `xml:"key"` + State LicenseReservationInfoState `xml:"state"` + Required int32 `xml:"required"` +} + +func init() { + t["LicenseReservationInfo"] = reflect.TypeOf((*LicenseReservationInfo)(nil)).Elem() +} + +type LicenseRestricted struct { + NotEnoughLicenses +} + +func init() { + t["LicenseRestricted"] = reflect.TypeOf((*LicenseRestricted)(nil)).Elem() +} + +type LicenseRestrictedEvent struct { + LicenseEvent +} + +func init() { + t["LicenseRestrictedEvent"] = reflect.TypeOf((*LicenseRestrictedEvent)(nil)).Elem() +} + +type LicenseRestrictedFault LicenseRestricted + +func init() { + t["LicenseRestrictedFault"] = reflect.TypeOf((*LicenseRestrictedFault)(nil)).Elem() +} + +type LicenseServerAvailableEvent struct { + LicenseEvent + + LicenseServer string `xml:"licenseServer"` +} + +func init() { + t["LicenseServerAvailableEvent"] = reflect.TypeOf((*LicenseServerAvailableEvent)(nil)).Elem() +} + +type LicenseServerSource struct { + LicenseSource + + LicenseServer string `xml:"licenseServer"` +} + +func init() { + t["LicenseServerSource"] = reflect.TypeOf((*LicenseServerSource)(nil)).Elem() +} + +type LicenseServerUnavailable struct { + VimFault + + LicenseServer string `xml:"licenseServer"` +} + +func init() { + t["LicenseServerUnavailable"] = reflect.TypeOf((*LicenseServerUnavailable)(nil)).Elem() +} + +type LicenseServerUnavailableEvent struct { + LicenseEvent + + LicenseServer string `xml:"licenseServer"` +} + +func init() { + t["LicenseServerUnavailableEvent"] = reflect.TypeOf((*LicenseServerUnavailableEvent)(nil)).Elem() +} + +type LicenseServerUnavailableFault LicenseServerUnavailable + +func init() { + t["LicenseServerUnavailableFault"] = reflect.TypeOf((*LicenseServerUnavailableFault)(nil)).Elem() +} + +type LicenseSource struct { + DynamicData +} + +func init() { + t["LicenseSource"] = reflect.TypeOf((*LicenseSource)(nil)).Elem() +} + +type LicenseSourceUnavailable struct { + NotEnoughLicenses + + LicenseSource BaseLicenseSource `xml:"licenseSource,typeattr"` +} + +func init() { + t["LicenseSourceUnavailable"] = reflect.TypeOf((*LicenseSourceUnavailable)(nil)).Elem() +} + +type LicenseSourceUnavailableFault LicenseSourceUnavailable + +func init() { + t["LicenseSourceUnavailableFault"] = reflect.TypeOf((*LicenseSourceUnavailableFault)(nil)).Elem() +} + +type LicenseUsageInfo struct { + DynamicData + + Source BaseLicenseSource `xml:"source,typeattr"` + SourceAvailable bool `xml:"sourceAvailable"` + ReservationInfo []LicenseReservationInfo `xml:"reservationInfo,omitempty"` + FeatureInfo []LicenseFeatureInfo `xml:"featureInfo,omitempty"` +} + +func init() { + t["LicenseUsageInfo"] = reflect.TypeOf((*LicenseUsageInfo)(nil)).Elem() +} + +type LimitExceeded struct { + VimFault + + Property string `xml:"property,omitempty"` + Limit *int32 `xml:"limit"` +} + +func init() { + t["LimitExceeded"] = reflect.TypeOf((*LimitExceeded)(nil)).Elem() +} + +type LimitExceededFault LimitExceeded + +func init() { + t["LimitExceededFault"] = reflect.TypeOf((*LimitExceededFault)(nil)).Elem() +} + +type LinkDiscoveryProtocolConfig struct { + DynamicData + + Protocol string `xml:"protocol"` + Operation string `xml:"operation"` +} + +func init() { + t["LinkDiscoveryProtocolConfig"] = reflect.TypeOf((*LinkDiscoveryProtocolConfig)(nil)).Elem() +} + +type LinkLayerDiscoveryProtocolInfo struct { + DynamicData + + ChassisId string `xml:"chassisId"` + PortId string `xml:"portId"` + TimeToLive int32 `xml:"timeToLive"` + Parameter []KeyAnyValue `xml:"parameter,omitempty"` +} + +func init() { + t["LinkLayerDiscoveryProtocolInfo"] = reflect.TypeOf((*LinkLayerDiscoveryProtocolInfo)(nil)).Elem() +} + +type LinkProfile struct { + ApplyProfile +} + +func init() { + t["LinkProfile"] = reflect.TypeOf((*LinkProfile)(nil)).Elem() +} + +type LinuxVolumeNotClean struct { + CustomizationFault +} + +func init() { + t["LinuxVolumeNotClean"] = reflect.TypeOf((*LinuxVolumeNotClean)(nil)).Elem() +} + +type LinuxVolumeNotCleanFault LinuxVolumeNotClean + +func init() { + t["LinuxVolumeNotCleanFault"] = reflect.TypeOf((*LinuxVolumeNotCleanFault)(nil)).Elem() +} + +type ListCACertificateRevocationLists ListCACertificateRevocationListsRequestType + +func init() { + t["ListCACertificateRevocationLists"] = reflect.TypeOf((*ListCACertificateRevocationLists)(nil)).Elem() +} + +type ListCACertificateRevocationListsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ListCACertificateRevocationListsRequestType"] = reflect.TypeOf((*ListCACertificateRevocationListsRequestType)(nil)).Elem() +} + +type ListCACertificateRevocationListsResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type ListCACertificates ListCACertificatesRequestType + +func init() { + t["ListCACertificates"] = reflect.TypeOf((*ListCACertificates)(nil)).Elem() +} + +type ListCACertificatesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ListCACertificatesRequestType"] = reflect.TypeOf((*ListCACertificatesRequestType)(nil)).Elem() +} + +type ListCACertificatesResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type ListFilesInGuest ListFilesInGuestRequestType + +func init() { + t["ListFilesInGuest"] = reflect.TypeOf((*ListFilesInGuest)(nil)).Elem() +} + +type ListFilesInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + FilePath string `xml:"filePath"` + Index int32 `xml:"index,omitempty"` + MaxResults int32 `xml:"maxResults,omitempty"` + MatchPattern string `xml:"matchPattern,omitempty"` +} + +func init() { + t["ListFilesInGuestRequestType"] = reflect.TypeOf((*ListFilesInGuestRequestType)(nil)).Elem() +} + +type ListFilesInGuestResponse struct { + Returnval GuestListFileInfo `xml:"returnval"` +} + +type ListGuestAliases ListGuestAliasesRequestType + +func init() { + t["ListGuestAliases"] = reflect.TypeOf((*ListGuestAliases)(nil)).Elem() +} + +type ListGuestAliasesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Username string `xml:"username"` +} + +func init() { + t["ListGuestAliasesRequestType"] = reflect.TypeOf((*ListGuestAliasesRequestType)(nil)).Elem() +} + +type ListGuestAliasesResponse struct { + Returnval []GuestAliases `xml:"returnval,omitempty"` +} + +type ListGuestMappedAliases ListGuestMappedAliasesRequestType + +func init() { + t["ListGuestMappedAliases"] = reflect.TypeOf((*ListGuestMappedAliases)(nil)).Elem() +} + +type ListGuestMappedAliasesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["ListGuestMappedAliasesRequestType"] = reflect.TypeOf((*ListGuestMappedAliasesRequestType)(nil)).Elem() +} + +type ListGuestMappedAliasesResponse struct { + Returnval []GuestMappedAliases `xml:"returnval,omitempty"` +} + +type ListKeys ListKeysRequestType + +func init() { + t["ListKeys"] = reflect.TypeOf((*ListKeys)(nil)).Elem() +} + +type ListKeysRequestType struct { + This ManagedObjectReference `xml:"_this"` + Limit *int32 `xml:"limit"` +} + +func init() { + t["ListKeysRequestType"] = reflect.TypeOf((*ListKeysRequestType)(nil)).Elem() +} + +type ListKeysResponse struct { + Returnval []CryptoKeyId `xml:"returnval,omitempty"` +} + +type ListKmipServers ListKmipServersRequestType + +func init() { + t["ListKmipServers"] = reflect.TypeOf((*ListKmipServers)(nil)).Elem() +} + +type ListKmipServersRequestType struct { + This ManagedObjectReference `xml:"_this"` + Limit *int32 `xml:"limit"` +} + +func init() { + t["ListKmipServersRequestType"] = reflect.TypeOf((*ListKmipServersRequestType)(nil)).Elem() +} + +type ListKmipServersResponse struct { + Returnval []KmipClusterInfo `xml:"returnval,omitempty"` +} + +type ListProcessesInGuest ListProcessesInGuestRequestType + +func init() { + t["ListProcessesInGuest"] = reflect.TypeOf((*ListProcessesInGuest)(nil)).Elem() +} + +type ListProcessesInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Pids []int64 `xml:"pids,omitempty"` +} + +func init() { + t["ListProcessesInGuestRequestType"] = reflect.TypeOf((*ListProcessesInGuestRequestType)(nil)).Elem() +} + +type ListProcessesInGuestResponse struct { + Returnval []GuestProcessInfo `xml:"returnval,omitempty"` +} + +type ListRegistryKeysInGuest ListRegistryKeysInGuestRequestType + +func init() { + t["ListRegistryKeysInGuest"] = reflect.TypeOf((*ListRegistryKeysInGuest)(nil)).Elem() +} + +type ListRegistryKeysInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + KeyName GuestRegKeyNameSpec `xml:"keyName"` + Recursive bool `xml:"recursive"` + MatchPattern string `xml:"matchPattern,omitempty"` +} + +func init() { + t["ListRegistryKeysInGuestRequestType"] = reflect.TypeOf((*ListRegistryKeysInGuestRequestType)(nil)).Elem() +} + +type ListRegistryKeysInGuestResponse struct { + Returnval []GuestRegKeyRecordSpec `xml:"returnval,omitempty"` +} + +type ListRegistryValuesInGuest ListRegistryValuesInGuestRequestType + +func init() { + t["ListRegistryValuesInGuest"] = reflect.TypeOf((*ListRegistryValuesInGuest)(nil)).Elem() +} + +type ListRegistryValuesInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + KeyName GuestRegKeyNameSpec `xml:"keyName"` + ExpandStrings bool `xml:"expandStrings"` + MatchPattern string `xml:"matchPattern,omitempty"` +} + +func init() { + t["ListRegistryValuesInGuestRequestType"] = reflect.TypeOf((*ListRegistryValuesInGuestRequestType)(nil)).Elem() +} + +type ListRegistryValuesInGuestResponse struct { + Returnval []GuestRegValueSpec `xml:"returnval,omitempty"` +} + +type ListSmartCardTrustAnchors ListSmartCardTrustAnchorsRequestType + +func init() { + t["ListSmartCardTrustAnchors"] = reflect.TypeOf((*ListSmartCardTrustAnchors)(nil)).Elem() +} + +type ListSmartCardTrustAnchorsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ListSmartCardTrustAnchorsRequestType"] = reflect.TypeOf((*ListSmartCardTrustAnchorsRequestType)(nil)).Elem() +} + +type ListSmartCardTrustAnchorsResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type ListTagsAttachedToVStorageObject ListTagsAttachedToVStorageObjectRequestType + +func init() { + t["ListTagsAttachedToVStorageObject"] = reflect.TypeOf((*ListTagsAttachedToVStorageObject)(nil)).Elem() +} + +type ListTagsAttachedToVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` +} + +func init() { + t["ListTagsAttachedToVStorageObjectRequestType"] = reflect.TypeOf((*ListTagsAttachedToVStorageObjectRequestType)(nil)).Elem() +} + +type ListTagsAttachedToVStorageObjectResponse struct { + Returnval []VslmTagEntry `xml:"returnval,omitempty"` +} + +type ListVStorageObject ListVStorageObjectRequestType + +func init() { + t["ListVStorageObject"] = reflect.TypeOf((*ListVStorageObject)(nil)).Elem() +} + +type ListVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["ListVStorageObjectRequestType"] = reflect.TypeOf((*ListVStorageObjectRequestType)(nil)).Elem() +} + +type ListVStorageObjectResponse struct { + Returnval []ID `xml:"returnval,omitempty"` +} + +type ListVStorageObjectsAttachedToTag ListVStorageObjectsAttachedToTagRequestType + +func init() { + t["ListVStorageObjectsAttachedToTag"] = reflect.TypeOf((*ListVStorageObjectsAttachedToTag)(nil)).Elem() +} + +type ListVStorageObjectsAttachedToTagRequestType struct { + This ManagedObjectReference `xml:"_this"` + Category string `xml:"category"` + Tag string `xml:"tag"` +} + +func init() { + t["ListVStorageObjectsAttachedToTagRequestType"] = reflect.TypeOf((*ListVStorageObjectsAttachedToTagRequestType)(nil)).Elem() +} + +type ListVStorageObjectsAttachedToTagResponse struct { + Returnval []ID `xml:"returnval,omitempty"` +} + +type LocalDatastoreCreatedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` + DatastoreUrl string `xml:"datastoreUrl,omitempty"` +} + +func init() { + t["LocalDatastoreCreatedEvent"] = reflect.TypeOf((*LocalDatastoreCreatedEvent)(nil)).Elem() +} + +type LocalDatastoreInfo struct { + DatastoreInfo + + Path string `xml:"path,omitempty"` +} + +func init() { + t["LocalDatastoreInfo"] = reflect.TypeOf((*LocalDatastoreInfo)(nil)).Elem() +} + +type LocalLicenseSource struct { + LicenseSource + + LicenseKeys string `xml:"licenseKeys"` +} + +func init() { + t["LocalLicenseSource"] = reflect.TypeOf((*LocalLicenseSource)(nil)).Elem() +} + +type LocalTSMEnabledEvent struct { + HostEvent +} + +func init() { + t["LocalTSMEnabledEvent"] = reflect.TypeOf((*LocalTSMEnabledEvent)(nil)).Elem() +} + +type LocalizableMessage struct { + DynamicData + + Key string `xml:"key"` + Arg []KeyAnyValue `xml:"arg,omitempty"` + Message string `xml:"message,omitempty"` +} + +func init() { + t["LocalizableMessage"] = reflect.TypeOf((*LocalizableMessage)(nil)).Elem() +} + +type LocalizationManagerMessageCatalog struct { + DynamicData + + ModuleName string `xml:"moduleName"` + CatalogName string `xml:"catalogName"` + Locale string `xml:"locale"` + CatalogUri string `xml:"catalogUri"` + LastModified *time.Time `xml:"lastModified"` + Md5sum string `xml:"md5sum,omitempty"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["LocalizationManagerMessageCatalog"] = reflect.TypeOf((*LocalizationManagerMessageCatalog)(nil)).Elem() +} + +type LocalizedMethodFault struct { + DynamicData + + Fault BaseMethodFault `xml:"fault,typeattr"` + LocalizedMessage string `xml:"localizedMessage,omitempty"` +} + +func init() { + t["LocalizedMethodFault"] = reflect.TypeOf((*LocalizedMethodFault)(nil)).Elem() +} + +type LockerMisconfiguredEvent struct { + Event + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["LockerMisconfiguredEvent"] = reflect.TypeOf((*LockerMisconfiguredEvent)(nil)).Elem() +} + +type LockerReconfiguredEvent struct { + Event + + OldDatastore *DatastoreEventArgument `xml:"oldDatastore,omitempty"` + NewDatastore *DatastoreEventArgument `xml:"newDatastore,omitempty"` +} + +func init() { + t["LockerReconfiguredEvent"] = reflect.TypeOf((*LockerReconfiguredEvent)(nil)).Elem() +} + +type LogBundlingFailed struct { + VimFault +} + +func init() { + t["LogBundlingFailed"] = reflect.TypeOf((*LogBundlingFailed)(nil)).Elem() +} + +type LogBundlingFailedFault LogBundlingFailed + +func init() { + t["LogBundlingFailedFault"] = reflect.TypeOf((*LogBundlingFailedFault)(nil)).Elem() +} + +type LogUserEvent LogUserEventRequestType + +func init() { + t["LogUserEvent"] = reflect.TypeOf((*LogUserEvent)(nil)).Elem() +} + +type LogUserEventRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Msg string `xml:"msg"` +} + +func init() { + t["LogUserEventRequestType"] = reflect.TypeOf((*LogUserEventRequestType)(nil)).Elem() +} + +type LogUserEventResponse struct { +} + +type Login LoginRequestType + +func init() { + t["Login"] = reflect.TypeOf((*Login)(nil)).Elem() +} + +type LoginBySSPI LoginBySSPIRequestType + +func init() { + t["LoginBySSPI"] = reflect.TypeOf((*LoginBySSPI)(nil)).Elem() +} + +type LoginBySSPIRequestType struct { + This ManagedObjectReference `xml:"_this"` + Base64Token string `xml:"base64Token"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginBySSPIRequestType"] = reflect.TypeOf((*LoginBySSPIRequestType)(nil)).Elem() +} + +type LoginBySSPIResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type LoginByToken LoginByTokenRequestType + +func init() { + t["LoginByToken"] = reflect.TypeOf((*LoginByToken)(nil)).Elem() +} + +type LoginByTokenRequestType struct { + This ManagedObjectReference `xml:"_this"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginByTokenRequestType"] = reflect.TypeOf((*LoginByTokenRequestType)(nil)).Elem() +} + +type LoginByTokenResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type LoginExtensionByCertificate LoginExtensionByCertificateRequestType + +func init() { + t["LoginExtensionByCertificate"] = reflect.TypeOf((*LoginExtensionByCertificate)(nil)).Elem() +} + +type LoginExtensionByCertificateRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginExtensionByCertificateRequestType"] = reflect.TypeOf((*LoginExtensionByCertificateRequestType)(nil)).Elem() +} + +type LoginExtensionByCertificateResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type LoginExtensionBySubjectName LoginExtensionBySubjectNameRequestType + +func init() { + t["LoginExtensionBySubjectName"] = reflect.TypeOf((*LoginExtensionBySubjectName)(nil)).Elem() +} + +type LoginExtensionBySubjectNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginExtensionBySubjectNameRequestType"] = reflect.TypeOf((*LoginExtensionBySubjectNameRequestType)(nil)).Elem() +} + +type LoginExtensionBySubjectNameResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type LoginRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` + Password string `xml:"password"` + Locale string `xml:"locale,omitempty"` +} + +func init() { + t["LoginRequestType"] = reflect.TypeOf((*LoginRequestType)(nil)).Elem() +} + +type LoginResponse struct { + Returnval UserSession `xml:"returnval"` +} + +type Logout LogoutRequestType + +func init() { + t["Logout"] = reflect.TypeOf((*Logout)(nil)).Elem() +} + +type LogoutRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["LogoutRequestType"] = reflect.TypeOf((*LogoutRequestType)(nil)).Elem() +} + +type LogoutResponse struct { +} + +type LongOption struct { + OptionType + + Min int64 `xml:"min"` + Max int64 `xml:"max"` + DefaultValue int64 `xml:"defaultValue"` +} + +func init() { + t["LongOption"] = reflect.TypeOf((*LongOption)(nil)).Elem() +} + +type LongPolicy struct { + InheritablePolicy + + Value int64 `xml:"value,omitempty"` +} + +func init() { + t["LongPolicy"] = reflect.TypeOf((*LongPolicy)(nil)).Elem() +} + +type LookupDvPortGroup LookupDvPortGroupRequestType + +func init() { + t["LookupDvPortGroup"] = reflect.TypeOf((*LookupDvPortGroup)(nil)).Elem() +} + +type LookupDvPortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + PortgroupKey string `xml:"portgroupKey"` +} + +func init() { + t["LookupDvPortGroupRequestType"] = reflect.TypeOf((*LookupDvPortGroupRequestType)(nil)).Elem() +} + +type LookupDvPortGroupResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type LookupVmOverheadMemory LookupVmOverheadMemoryRequestType + +func init() { + t["LookupVmOverheadMemory"] = reflect.TypeOf((*LookupVmOverheadMemory)(nil)).Elem() +} + +type LookupVmOverheadMemoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["LookupVmOverheadMemoryRequestType"] = reflect.TypeOf((*LookupVmOverheadMemoryRequestType)(nil)).Elem() +} + +type LookupVmOverheadMemoryResponse struct { + Returnval int64 `xml:"returnval"` +} + +type MacAddress struct { + NegatableExpression +} + +func init() { + t["MacAddress"] = reflect.TypeOf((*MacAddress)(nil)).Elem() +} + +type MacRange struct { + MacAddress + + Address string `xml:"address"` + Mask string `xml:"mask"` +} + +func init() { + t["MacRange"] = reflect.TypeOf((*MacRange)(nil)).Elem() +} + +type MaintenanceModeFileMove struct { + MigrationFault +} + +func init() { + t["MaintenanceModeFileMove"] = reflect.TypeOf((*MaintenanceModeFileMove)(nil)).Elem() +} + +type MaintenanceModeFileMoveFault MaintenanceModeFileMove + +func init() { + t["MaintenanceModeFileMoveFault"] = reflect.TypeOf((*MaintenanceModeFileMoveFault)(nil)).Elem() +} + +type MakeDirectory MakeDirectoryRequestType + +func init() { + t["MakeDirectory"] = reflect.TypeOf((*MakeDirectory)(nil)).Elem() +} + +type MakeDirectoryInGuest MakeDirectoryInGuestRequestType + +func init() { + t["MakeDirectoryInGuest"] = reflect.TypeOf((*MakeDirectoryInGuest)(nil)).Elem() +} + +type MakeDirectoryInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + DirectoryPath string `xml:"directoryPath"` + CreateParentDirectories bool `xml:"createParentDirectories"` +} + +func init() { + t["MakeDirectoryInGuestRequestType"] = reflect.TypeOf((*MakeDirectoryInGuestRequestType)(nil)).Elem() +} + +type MakeDirectoryInGuestResponse struct { +} + +type MakeDirectoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + CreateParentDirectories *bool `xml:"createParentDirectories"` +} + +func init() { + t["MakeDirectoryRequestType"] = reflect.TypeOf((*MakeDirectoryRequestType)(nil)).Elem() +} + +type MakeDirectoryResponse struct { +} + +type MakePrimaryVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["MakePrimaryVMRequestType"] = reflect.TypeOf((*MakePrimaryVMRequestType)(nil)).Elem() +} + +type MakePrimaryVM_Task MakePrimaryVMRequestType + +func init() { + t["MakePrimaryVM_Task"] = reflect.TypeOf((*MakePrimaryVM_Task)(nil)).Elem() +} + +type MakePrimaryVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ManagedByInfo struct { + DynamicData + + ExtensionKey string `xml:"extensionKey"` + Type string `xml:"type"` +} + +func init() { + t["ManagedByInfo"] = reflect.TypeOf((*ManagedByInfo)(nil)).Elem() +} + +type ManagedEntityEventArgument struct { + EntityEventArgument + + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["ManagedEntityEventArgument"] = reflect.TypeOf((*ManagedEntityEventArgument)(nil)).Elem() +} + +type ManagedObjectNotFound struct { + RuntimeFault + + Obj ManagedObjectReference `xml:"obj"` +} + +func init() { + t["ManagedObjectNotFound"] = reflect.TypeOf((*ManagedObjectNotFound)(nil)).Elem() +} + +type ManagedObjectNotFoundFault ManagedObjectNotFound + +func init() { + t["ManagedObjectNotFoundFault"] = reflect.TypeOf((*ManagedObjectNotFoundFault)(nil)).Elem() +} + +type ManagedObjectReference struct { + Type string `xml:"type,attr"` + Value string `xml:",chardata"` +} + +func init() { + t["ManagedObjectReference"] = reflect.TypeOf((*ManagedObjectReference)(nil)).Elem() +} + +type MarkAsLocalRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuid string `xml:"scsiDiskUuid"` +} + +func init() { + t["MarkAsLocalRequestType"] = reflect.TypeOf((*MarkAsLocalRequestType)(nil)).Elem() +} + +type MarkAsLocal_Task MarkAsLocalRequestType + +func init() { + t["MarkAsLocal_Task"] = reflect.TypeOf((*MarkAsLocal_Task)(nil)).Elem() +} + +type MarkAsLocal_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkAsNonLocalRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuid string `xml:"scsiDiskUuid"` +} + +func init() { + t["MarkAsNonLocalRequestType"] = reflect.TypeOf((*MarkAsNonLocalRequestType)(nil)).Elem() +} + +type MarkAsNonLocal_Task MarkAsNonLocalRequestType + +func init() { + t["MarkAsNonLocal_Task"] = reflect.TypeOf((*MarkAsNonLocal_Task)(nil)).Elem() +} + +type MarkAsNonLocal_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkAsNonSsdRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuid string `xml:"scsiDiskUuid"` +} + +func init() { + t["MarkAsNonSsdRequestType"] = reflect.TypeOf((*MarkAsNonSsdRequestType)(nil)).Elem() +} + +type MarkAsNonSsd_Task MarkAsNonSsdRequestType + +func init() { + t["MarkAsNonSsd_Task"] = reflect.TypeOf((*MarkAsNonSsd_Task)(nil)).Elem() +} + +type MarkAsNonSsd_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkAsSsdRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuid string `xml:"scsiDiskUuid"` +} + +func init() { + t["MarkAsSsdRequestType"] = reflect.TypeOf((*MarkAsSsdRequestType)(nil)).Elem() +} + +type MarkAsSsd_Task MarkAsSsdRequestType + +func init() { + t["MarkAsSsd_Task"] = reflect.TypeOf((*MarkAsSsd_Task)(nil)).Elem() +} + +type MarkAsSsd_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkAsTemplate MarkAsTemplateRequestType + +func init() { + t["MarkAsTemplate"] = reflect.TypeOf((*MarkAsTemplate)(nil)).Elem() +} + +type MarkAsTemplateRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["MarkAsTemplateRequestType"] = reflect.TypeOf((*MarkAsTemplateRequestType)(nil)).Elem() +} + +type MarkAsTemplateResponse struct { +} + +type MarkAsVirtualMachine MarkAsVirtualMachineRequestType + +func init() { + t["MarkAsVirtualMachine"] = reflect.TypeOf((*MarkAsVirtualMachine)(nil)).Elem() +} + +type MarkAsVirtualMachineRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pool ManagedObjectReference `xml:"pool"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["MarkAsVirtualMachineRequestType"] = reflect.TypeOf((*MarkAsVirtualMachineRequestType)(nil)).Elem() +} + +type MarkAsVirtualMachineResponse struct { +} + +type MarkDefault MarkDefaultRequestType + +func init() { + t["MarkDefault"] = reflect.TypeOf((*MarkDefault)(nil)).Elem() +} + +type MarkDefaultRequestType struct { + This ManagedObjectReference `xml:"_this"` + ClusterId KeyProviderId `xml:"clusterId"` +} + +func init() { + t["MarkDefaultRequestType"] = reflect.TypeOf((*MarkDefaultRequestType)(nil)).Elem() +} + +type MarkDefaultResponse struct { +} + +type MarkForRemoval MarkForRemovalRequestType + +func init() { + t["MarkForRemoval"] = reflect.TypeOf((*MarkForRemoval)(nil)).Elem() +} + +type MarkForRemovalRequestType struct { + This ManagedObjectReference `xml:"_this"` + HbaName string `xml:"hbaName"` + Remove bool `xml:"remove"` +} + +func init() { + t["MarkForRemovalRequestType"] = reflect.TypeOf((*MarkForRemovalRequestType)(nil)).Elem() +} + +type MarkForRemovalResponse struct { +} + +type MemoryFileFormatNotSupportedByDatastore struct { + UnsupportedDatastore + + DatastoreName string `xml:"datastoreName"` + Type string `xml:"type"` +} + +func init() { + t["MemoryFileFormatNotSupportedByDatastore"] = reflect.TypeOf((*MemoryFileFormatNotSupportedByDatastore)(nil)).Elem() +} + +type MemoryFileFormatNotSupportedByDatastoreFault MemoryFileFormatNotSupportedByDatastore + +func init() { + t["MemoryFileFormatNotSupportedByDatastoreFault"] = reflect.TypeOf((*MemoryFileFormatNotSupportedByDatastoreFault)(nil)).Elem() +} + +type MemoryHotPlugNotSupported struct { + VmConfigFault +} + +func init() { + t["MemoryHotPlugNotSupported"] = reflect.TypeOf((*MemoryHotPlugNotSupported)(nil)).Elem() +} + +type MemoryHotPlugNotSupportedFault MemoryHotPlugNotSupported + +func init() { + t["MemoryHotPlugNotSupportedFault"] = reflect.TypeOf((*MemoryHotPlugNotSupportedFault)(nil)).Elem() +} + +type MemorySizeNotRecommended struct { + VirtualHardwareCompatibilityIssue + + MemorySizeMB int32 `xml:"memorySizeMB"` + MinMemorySizeMB int32 `xml:"minMemorySizeMB"` + MaxMemorySizeMB int32 `xml:"maxMemorySizeMB"` +} + +func init() { + t["MemorySizeNotRecommended"] = reflect.TypeOf((*MemorySizeNotRecommended)(nil)).Elem() +} + +type MemorySizeNotRecommendedFault MemorySizeNotRecommended + +func init() { + t["MemorySizeNotRecommendedFault"] = reflect.TypeOf((*MemorySizeNotRecommendedFault)(nil)).Elem() +} + +type MemorySizeNotSupported struct { + VirtualHardwareCompatibilityIssue + + MemorySizeMB int32 `xml:"memorySizeMB"` + MinMemorySizeMB int32 `xml:"minMemorySizeMB"` + MaxMemorySizeMB int32 `xml:"maxMemorySizeMB"` +} + +func init() { + t["MemorySizeNotSupported"] = reflect.TypeOf((*MemorySizeNotSupported)(nil)).Elem() +} + +type MemorySizeNotSupportedByDatastore struct { + VirtualHardwareCompatibilityIssue + + Datastore ManagedObjectReference `xml:"datastore"` + MemorySizeMB int32 `xml:"memorySizeMB"` + MaxMemorySizeMB int32 `xml:"maxMemorySizeMB"` +} + +func init() { + t["MemorySizeNotSupportedByDatastore"] = reflect.TypeOf((*MemorySizeNotSupportedByDatastore)(nil)).Elem() +} + +type MemorySizeNotSupportedByDatastoreFault MemorySizeNotSupportedByDatastore + +func init() { + t["MemorySizeNotSupportedByDatastoreFault"] = reflect.TypeOf((*MemorySizeNotSupportedByDatastoreFault)(nil)).Elem() +} + +type MemorySizeNotSupportedFault MemorySizeNotSupported + +func init() { + t["MemorySizeNotSupportedFault"] = reflect.TypeOf((*MemorySizeNotSupportedFault)(nil)).Elem() +} + +type MemorySnapshotOnIndependentDisk struct { + SnapshotFault +} + +func init() { + t["MemorySnapshotOnIndependentDisk"] = reflect.TypeOf((*MemorySnapshotOnIndependentDisk)(nil)).Elem() +} + +type MemorySnapshotOnIndependentDiskFault MemorySnapshotOnIndependentDisk + +func init() { + t["MemorySnapshotOnIndependentDiskFault"] = reflect.TypeOf((*MemorySnapshotOnIndependentDiskFault)(nil)).Elem() +} + +type MergeDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dvs ManagedObjectReference `xml:"dvs"` +} + +func init() { + t["MergeDvsRequestType"] = reflect.TypeOf((*MergeDvsRequestType)(nil)).Elem() +} + +type MergeDvs_Task MergeDvsRequestType + +func init() { + t["MergeDvs_Task"] = reflect.TypeOf((*MergeDvs_Task)(nil)).Elem() +} + +type MergeDvs_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MergePermissions MergePermissionsRequestType + +func init() { + t["MergePermissions"] = reflect.TypeOf((*MergePermissions)(nil)).Elem() +} + +type MergePermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + SrcRoleId int32 `xml:"srcRoleId"` + DstRoleId int32 `xml:"dstRoleId"` +} + +func init() { + t["MergePermissionsRequestType"] = reflect.TypeOf((*MergePermissionsRequestType)(nil)).Elem() +} + +type MergePermissionsResponse struct { +} + +type MethodAction struct { + Action + + Name string `xml:"name"` + Argument []MethodActionArgument `xml:"argument,omitempty"` +} + +func init() { + t["MethodAction"] = reflect.TypeOf((*MethodAction)(nil)).Elem() +} + +type MethodActionArgument struct { + DynamicData + + Value AnyType `xml:"value,typeattr"` +} + +func init() { + t["MethodActionArgument"] = reflect.TypeOf((*MethodActionArgument)(nil)).Elem() +} + +type MethodAlreadyDisabledFault struct { + RuntimeFault + + SourceId string `xml:"sourceId"` +} + +func init() { + t["MethodAlreadyDisabledFault"] = reflect.TypeOf((*MethodAlreadyDisabledFault)(nil)).Elem() +} + +type MethodAlreadyDisabledFaultFault MethodAlreadyDisabledFault + +func init() { + t["MethodAlreadyDisabledFaultFault"] = reflect.TypeOf((*MethodAlreadyDisabledFaultFault)(nil)).Elem() +} + +type MethodDescription struct { + Description + + Key string `xml:"key"` +} + +func init() { + t["MethodDescription"] = reflect.TypeOf((*MethodDescription)(nil)).Elem() +} + +type MethodDisabled struct { + RuntimeFault + + Source string `xml:"source,omitempty"` +} + +func init() { + t["MethodDisabled"] = reflect.TypeOf((*MethodDisabled)(nil)).Elem() +} + +type MethodDisabledFault MethodDisabled + +func init() { + t["MethodDisabledFault"] = reflect.TypeOf((*MethodDisabledFault)(nil)).Elem() +} + +type MethodFault struct { + FaultCause *LocalizedMethodFault `xml:"faultCause,omitempty"` + FaultMessage []LocalizableMessage `xml:"faultMessage,omitempty"` +} + +func init() { + t["MethodFault"] = reflect.TypeOf((*MethodFault)(nil)).Elem() +} + +type MethodFaultFault BaseMethodFault + +func init() { + t["MethodFaultFault"] = reflect.TypeOf((*MethodFaultFault)(nil)).Elem() +} + +type MethodNotFound struct { + InvalidRequest + + Receiver ManagedObjectReference `xml:"receiver"` + Method string `xml:"method"` +} + +func init() { + t["MethodNotFound"] = reflect.TypeOf((*MethodNotFound)(nil)).Elem() +} + +type MethodNotFoundFault MethodNotFound + +func init() { + t["MethodNotFoundFault"] = reflect.TypeOf((*MethodNotFoundFault)(nil)).Elem() +} + +type MetricAlarmExpression struct { + AlarmExpression + + Operator MetricAlarmOperator `xml:"operator"` + Type string `xml:"type"` + Metric PerfMetricId `xml:"metric"` + Yellow int32 `xml:"yellow,omitempty"` + YellowInterval int32 `xml:"yellowInterval,omitempty"` + Red int32 `xml:"red,omitempty"` + RedInterval int32 `xml:"redInterval,omitempty"` +} + +func init() { + t["MetricAlarmExpression"] = reflect.TypeOf((*MetricAlarmExpression)(nil)).Elem() +} + +type MigrateVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Priority VirtualMachineMovePriority `xml:"priority"` + State VirtualMachinePowerState `xml:"state,omitempty"` +} + +func init() { + t["MigrateVMRequestType"] = reflect.TypeOf((*MigrateVMRequestType)(nil)).Elem() +} + +type MigrateVM_Task MigrateVMRequestType + +func init() { + t["MigrateVM_Task"] = reflect.TypeOf((*MigrateVM_Task)(nil)).Elem() +} + +type MigrateVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MigrationDisabled struct { + MigrationFault +} + +func init() { + t["MigrationDisabled"] = reflect.TypeOf((*MigrationDisabled)(nil)).Elem() +} + +type MigrationDisabledFault MigrationDisabled + +func init() { + t["MigrationDisabledFault"] = reflect.TypeOf((*MigrationDisabledFault)(nil)).Elem() +} + +type MigrationErrorEvent struct { + MigrationEvent +} + +func init() { + t["MigrationErrorEvent"] = reflect.TypeOf((*MigrationErrorEvent)(nil)).Elem() +} + +type MigrationEvent struct { + VmEvent + + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["MigrationEvent"] = reflect.TypeOf((*MigrationEvent)(nil)).Elem() +} + +type MigrationFault struct { + VimFault +} + +func init() { + t["MigrationFault"] = reflect.TypeOf((*MigrationFault)(nil)).Elem() +} + +type MigrationFaultFault BaseMigrationFault + +func init() { + t["MigrationFaultFault"] = reflect.TypeOf((*MigrationFaultFault)(nil)).Elem() +} + +type MigrationFeatureNotSupported struct { + MigrationFault + + AtSourceHost bool `xml:"atSourceHost"` + FailedHostName string `xml:"failedHostName"` + FailedHost ManagedObjectReference `xml:"failedHost"` +} + +func init() { + t["MigrationFeatureNotSupported"] = reflect.TypeOf((*MigrationFeatureNotSupported)(nil)).Elem() +} + +type MigrationFeatureNotSupportedFault BaseMigrationFeatureNotSupported + +func init() { + t["MigrationFeatureNotSupportedFault"] = reflect.TypeOf((*MigrationFeatureNotSupportedFault)(nil)).Elem() +} + +type MigrationHostErrorEvent struct { + MigrationEvent + + DstHost HostEventArgument `xml:"dstHost"` +} + +func init() { + t["MigrationHostErrorEvent"] = reflect.TypeOf((*MigrationHostErrorEvent)(nil)).Elem() +} + +type MigrationHostWarningEvent struct { + MigrationEvent + + DstHost HostEventArgument `xml:"dstHost"` +} + +func init() { + t["MigrationHostWarningEvent"] = reflect.TypeOf((*MigrationHostWarningEvent)(nil)).Elem() +} + +type MigrationNotReady struct { + MigrationFault + + Reason string `xml:"reason"` +} + +func init() { + t["MigrationNotReady"] = reflect.TypeOf((*MigrationNotReady)(nil)).Elem() +} + +type MigrationNotReadyFault MigrationNotReady + +func init() { + t["MigrationNotReadyFault"] = reflect.TypeOf((*MigrationNotReadyFault)(nil)).Elem() +} + +type MigrationResourceErrorEvent struct { + MigrationEvent + + DstPool ResourcePoolEventArgument `xml:"dstPool"` + DstHost HostEventArgument `xml:"dstHost"` +} + +func init() { + t["MigrationResourceErrorEvent"] = reflect.TypeOf((*MigrationResourceErrorEvent)(nil)).Elem() +} + +type MigrationResourceWarningEvent struct { + MigrationEvent + + DstPool ResourcePoolEventArgument `xml:"dstPool"` + DstHost HostEventArgument `xml:"dstHost"` +} + +func init() { + t["MigrationResourceWarningEvent"] = reflect.TypeOf((*MigrationResourceWarningEvent)(nil)).Elem() +} + +type MigrationWarningEvent struct { + MigrationEvent +} + +func init() { + t["MigrationWarningEvent"] = reflect.TypeOf((*MigrationWarningEvent)(nil)).Elem() +} + +type MismatchedBundle struct { + VimFault + + BundleUuid string `xml:"bundleUuid"` + HostUuid string `xml:"hostUuid"` + BundleBuildNumber int32 `xml:"bundleBuildNumber"` + HostBuildNumber int32 `xml:"hostBuildNumber"` +} + +func init() { + t["MismatchedBundle"] = reflect.TypeOf((*MismatchedBundle)(nil)).Elem() +} + +type MismatchedBundleFault MismatchedBundle + +func init() { + t["MismatchedBundleFault"] = reflect.TypeOf((*MismatchedBundleFault)(nil)).Elem() +} + +type MismatchedNetworkPolicies struct { + MigrationFault + + Device string `xml:"device"` + Backing string `xml:"backing"` + Connected bool `xml:"connected"` +} + +func init() { + t["MismatchedNetworkPolicies"] = reflect.TypeOf((*MismatchedNetworkPolicies)(nil)).Elem() +} + +type MismatchedNetworkPoliciesFault MismatchedNetworkPolicies + +func init() { + t["MismatchedNetworkPoliciesFault"] = reflect.TypeOf((*MismatchedNetworkPoliciesFault)(nil)).Elem() +} + +type MismatchedVMotionNetworkNames struct { + MigrationFault + + SourceNetwork string `xml:"sourceNetwork"` + DestNetwork string `xml:"destNetwork"` +} + +func init() { + t["MismatchedVMotionNetworkNames"] = reflect.TypeOf((*MismatchedVMotionNetworkNames)(nil)).Elem() +} + +type MismatchedVMotionNetworkNamesFault MismatchedVMotionNetworkNames + +func init() { + t["MismatchedVMotionNetworkNamesFault"] = reflect.TypeOf((*MismatchedVMotionNetworkNamesFault)(nil)).Elem() +} + +type MissingBmcSupport struct { + VimFault +} + +func init() { + t["MissingBmcSupport"] = reflect.TypeOf((*MissingBmcSupport)(nil)).Elem() +} + +type MissingBmcSupportFault MissingBmcSupport + +func init() { + t["MissingBmcSupportFault"] = reflect.TypeOf((*MissingBmcSupportFault)(nil)).Elem() +} + +type MissingController struct { + InvalidDeviceSpec +} + +func init() { + t["MissingController"] = reflect.TypeOf((*MissingController)(nil)).Elem() +} + +type MissingControllerFault MissingController + +func init() { + t["MissingControllerFault"] = reflect.TypeOf((*MissingControllerFault)(nil)).Elem() +} + +type MissingIpPool struct { + VAppPropertyFault +} + +func init() { + t["MissingIpPool"] = reflect.TypeOf((*MissingIpPool)(nil)).Elem() +} + +type MissingIpPoolFault MissingIpPool + +func init() { + t["MissingIpPoolFault"] = reflect.TypeOf((*MissingIpPoolFault)(nil)).Elem() +} + +type MissingLinuxCustResources struct { + CustomizationFault +} + +func init() { + t["MissingLinuxCustResources"] = reflect.TypeOf((*MissingLinuxCustResources)(nil)).Elem() +} + +type MissingLinuxCustResourcesFault MissingLinuxCustResources + +func init() { + t["MissingLinuxCustResourcesFault"] = reflect.TypeOf((*MissingLinuxCustResourcesFault)(nil)).Elem() +} + +type MissingNetworkIpConfig struct { + VAppPropertyFault +} + +func init() { + t["MissingNetworkIpConfig"] = reflect.TypeOf((*MissingNetworkIpConfig)(nil)).Elem() +} + +type MissingNetworkIpConfigFault MissingNetworkIpConfig + +func init() { + t["MissingNetworkIpConfigFault"] = reflect.TypeOf((*MissingNetworkIpConfigFault)(nil)).Elem() +} + +type MissingObject struct { + DynamicData + + Obj ManagedObjectReference `xml:"obj"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["MissingObject"] = reflect.TypeOf((*MissingObject)(nil)).Elem() +} + +type MissingPowerOffConfiguration struct { + VAppConfigFault +} + +func init() { + t["MissingPowerOffConfiguration"] = reflect.TypeOf((*MissingPowerOffConfiguration)(nil)).Elem() +} + +type MissingPowerOffConfigurationFault MissingPowerOffConfiguration + +func init() { + t["MissingPowerOffConfigurationFault"] = reflect.TypeOf((*MissingPowerOffConfigurationFault)(nil)).Elem() +} + +type MissingPowerOnConfiguration struct { + VAppConfigFault +} + +func init() { + t["MissingPowerOnConfiguration"] = reflect.TypeOf((*MissingPowerOnConfiguration)(nil)).Elem() +} + +type MissingPowerOnConfigurationFault MissingPowerOnConfiguration + +func init() { + t["MissingPowerOnConfigurationFault"] = reflect.TypeOf((*MissingPowerOnConfigurationFault)(nil)).Elem() +} + +type MissingProperty struct { + DynamicData + + Path string `xml:"path"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["MissingProperty"] = reflect.TypeOf((*MissingProperty)(nil)).Elem() +} + +type MissingWindowsCustResources struct { + CustomizationFault +} + +func init() { + t["MissingWindowsCustResources"] = reflect.TypeOf((*MissingWindowsCustResources)(nil)).Elem() +} + +type MissingWindowsCustResourcesFault MissingWindowsCustResources + +func init() { + t["MissingWindowsCustResourcesFault"] = reflect.TypeOf((*MissingWindowsCustResourcesFault)(nil)).Elem() +} + +type MksConnectionLimitReached struct { + InvalidState + + ConnectionLimit int32 `xml:"connectionLimit"` +} + +func init() { + t["MksConnectionLimitReached"] = reflect.TypeOf((*MksConnectionLimitReached)(nil)).Elem() +} + +type MksConnectionLimitReachedFault MksConnectionLimitReached + +func init() { + t["MksConnectionLimitReachedFault"] = reflect.TypeOf((*MksConnectionLimitReachedFault)(nil)).Elem() +} + +type ModeInfo struct { + DynamicData + + Browse string `xml:"browse,omitempty"` + Read string `xml:"read"` + Modify string `xml:"modify"` + Use string `xml:"use"` + Admin string `xml:"admin,omitempty"` + Full string `xml:"full"` +} + +func init() { + t["ModeInfo"] = reflect.TypeOf((*ModeInfo)(nil)).Elem() +} + +type ModifyListView ModifyListViewRequestType + +func init() { + t["ModifyListView"] = reflect.TypeOf((*ModifyListView)(nil)).Elem() +} + +type ModifyListViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + Add []ManagedObjectReference `xml:"add,omitempty"` + Remove []ManagedObjectReference `xml:"remove,omitempty"` +} + +func init() { + t["ModifyListViewRequestType"] = reflect.TypeOf((*ModifyListViewRequestType)(nil)).Elem() +} + +type ModifyListViewResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type MonthlyByDayTaskScheduler struct { + MonthlyTaskScheduler + + Day int32 `xml:"day"` +} + +func init() { + t["MonthlyByDayTaskScheduler"] = reflect.TypeOf((*MonthlyByDayTaskScheduler)(nil)).Elem() +} + +type MonthlyByWeekdayTaskScheduler struct { + MonthlyTaskScheduler + + Offset WeekOfMonth `xml:"offset"` + Weekday DayOfWeek `xml:"weekday"` +} + +func init() { + t["MonthlyByWeekdayTaskScheduler"] = reflect.TypeOf((*MonthlyByWeekdayTaskScheduler)(nil)).Elem() +} + +type MonthlyTaskScheduler struct { + DailyTaskScheduler +} + +func init() { + t["MonthlyTaskScheduler"] = reflect.TypeOf((*MonthlyTaskScheduler)(nil)).Elem() +} + +type MountError struct { + CustomizationFault + + Vm ManagedObjectReference `xml:"vm"` + DiskIndex int32 `xml:"diskIndex"` +} + +func init() { + t["MountError"] = reflect.TypeOf((*MountError)(nil)).Elem() +} + +type MountErrorFault MountError + +func init() { + t["MountErrorFault"] = reflect.TypeOf((*MountErrorFault)(nil)).Elem() +} + +type MountToolsInstaller MountToolsInstallerRequestType + +func init() { + t["MountToolsInstaller"] = reflect.TypeOf((*MountToolsInstaller)(nil)).Elem() +} + +type MountToolsInstallerRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["MountToolsInstallerRequestType"] = reflect.TypeOf((*MountToolsInstallerRequestType)(nil)).Elem() +} + +type MountToolsInstallerResponse struct { +} + +type MountVffsVolume MountVffsVolumeRequestType + +func init() { + t["MountVffsVolume"] = reflect.TypeOf((*MountVffsVolume)(nil)).Elem() +} + +type MountVffsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsUuid string `xml:"vffsUuid"` +} + +func init() { + t["MountVffsVolumeRequestType"] = reflect.TypeOf((*MountVffsVolumeRequestType)(nil)).Elem() +} + +type MountVffsVolumeResponse struct { +} + +type MountVmfsVolume MountVmfsVolumeRequestType + +func init() { + t["MountVmfsVolume"] = reflect.TypeOf((*MountVmfsVolume)(nil)).Elem() +} + +type MountVmfsVolumeExRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid []string `xml:"vmfsUuid"` +} + +func init() { + t["MountVmfsVolumeExRequestType"] = reflect.TypeOf((*MountVmfsVolumeExRequestType)(nil)).Elem() +} + +type MountVmfsVolumeEx_Task MountVmfsVolumeExRequestType + +func init() { + t["MountVmfsVolumeEx_Task"] = reflect.TypeOf((*MountVmfsVolumeEx_Task)(nil)).Elem() +} + +type MountVmfsVolumeEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MountVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` +} + +func init() { + t["MountVmfsVolumeRequestType"] = reflect.TypeOf((*MountVmfsVolumeRequestType)(nil)).Elem() +} + +type MountVmfsVolumeResponse struct { +} + +type MoveDVPortRequestType struct { + This ManagedObjectReference `xml:"_this"` + PortKey []string `xml:"portKey"` + DestinationPortgroupKey string `xml:"destinationPortgroupKey,omitempty"` +} + +func init() { + t["MoveDVPortRequestType"] = reflect.TypeOf((*MoveDVPortRequestType)(nil)).Elem() +} + +type MoveDVPort_Task MoveDVPortRequestType + +func init() { + t["MoveDVPort_Task"] = reflect.TypeOf((*MoveDVPort_Task)(nil)).Elem() +} + +type MoveDVPort_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveDatastoreFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + SourceName string `xml:"sourceName"` + SourceDatacenter *ManagedObjectReference `xml:"sourceDatacenter,omitempty"` + DestinationName string `xml:"destinationName"` + DestinationDatacenter *ManagedObjectReference `xml:"destinationDatacenter,omitempty"` + Force *bool `xml:"force"` +} + +func init() { + t["MoveDatastoreFileRequestType"] = reflect.TypeOf((*MoveDatastoreFileRequestType)(nil)).Elem() +} + +type MoveDatastoreFile_Task MoveDatastoreFileRequestType + +func init() { + t["MoveDatastoreFile_Task"] = reflect.TypeOf((*MoveDatastoreFile_Task)(nil)).Elem() +} + +type MoveDatastoreFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveDirectoryInGuest MoveDirectoryInGuestRequestType + +func init() { + t["MoveDirectoryInGuest"] = reflect.TypeOf((*MoveDirectoryInGuest)(nil)).Elem() +} + +type MoveDirectoryInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + SrcDirectoryPath string `xml:"srcDirectoryPath"` + DstDirectoryPath string `xml:"dstDirectoryPath"` +} + +func init() { + t["MoveDirectoryInGuestRequestType"] = reflect.TypeOf((*MoveDirectoryInGuestRequestType)(nil)).Elem() +} + +type MoveDirectoryInGuestResponse struct { +} + +type MoveFileInGuest MoveFileInGuestRequestType + +func init() { + t["MoveFileInGuest"] = reflect.TypeOf((*MoveFileInGuest)(nil)).Elem() +} + +type MoveFileInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + SrcFilePath string `xml:"srcFilePath"` + DstFilePath string `xml:"dstFilePath"` + Overwrite bool `xml:"overwrite"` +} + +func init() { + t["MoveFileInGuestRequestType"] = reflect.TypeOf((*MoveFileInGuestRequestType)(nil)).Elem() +} + +type MoveFileInGuestResponse struct { +} + +type MoveHostIntoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + ResourcePool *ManagedObjectReference `xml:"resourcePool,omitempty"` +} + +func init() { + t["MoveHostIntoRequestType"] = reflect.TypeOf((*MoveHostIntoRequestType)(nil)).Elem() +} + +type MoveHostInto_Task MoveHostIntoRequestType + +func init() { + t["MoveHostInto_Task"] = reflect.TypeOf((*MoveHostInto_Task)(nil)).Elem() +} + +type MoveHostInto_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveIntoFolderRequestType struct { + This ManagedObjectReference `xml:"_this"` + List []ManagedObjectReference `xml:"list"` +} + +func init() { + t["MoveIntoFolderRequestType"] = reflect.TypeOf((*MoveIntoFolderRequestType)(nil)).Elem() +} + +type MoveIntoFolder_Task MoveIntoFolderRequestType + +func init() { + t["MoveIntoFolder_Task"] = reflect.TypeOf((*MoveIntoFolder_Task)(nil)).Elem() +} + +type MoveIntoFolder_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveIntoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["MoveIntoRequestType"] = reflect.TypeOf((*MoveIntoRequestType)(nil)).Elem() +} + +type MoveIntoResourcePool MoveIntoResourcePoolRequestType + +func init() { + t["MoveIntoResourcePool"] = reflect.TypeOf((*MoveIntoResourcePool)(nil)).Elem() +} + +type MoveIntoResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + List []ManagedObjectReference `xml:"list"` +} + +func init() { + t["MoveIntoResourcePoolRequestType"] = reflect.TypeOf((*MoveIntoResourcePoolRequestType)(nil)).Elem() +} + +type MoveIntoResourcePoolResponse struct { +} + +type MoveInto_Task MoveIntoRequestType + +func init() { + t["MoveInto_Task"] = reflect.TypeOf((*MoveInto_Task)(nil)).Elem() +} + +type MoveInto_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MoveVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + SourceName string `xml:"sourceName"` + SourceDatacenter *ManagedObjectReference `xml:"sourceDatacenter,omitempty"` + DestName string `xml:"destName"` + DestDatacenter *ManagedObjectReference `xml:"destDatacenter,omitempty"` + Force *bool `xml:"force"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["MoveVirtualDiskRequestType"] = reflect.TypeOf((*MoveVirtualDiskRequestType)(nil)).Elem() +} + +type MoveVirtualDisk_Task MoveVirtualDiskRequestType + +func init() { + t["MoveVirtualDisk_Task"] = reflect.TypeOf((*MoveVirtualDisk_Task)(nil)).Elem() +} + +type MoveVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MtuMatchEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["MtuMatchEvent"] = reflect.TypeOf((*MtuMatchEvent)(nil)).Elem() +} + +type MtuMismatchEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["MtuMismatchEvent"] = reflect.TypeOf((*MtuMismatchEvent)(nil)).Elem() +} + +type MultiWriterNotSupported struct { + DeviceNotSupported +} + +func init() { + t["MultiWriterNotSupported"] = reflect.TypeOf((*MultiWriterNotSupported)(nil)).Elem() +} + +type MultiWriterNotSupportedFault MultiWriterNotSupported + +func init() { + t["MultiWriterNotSupportedFault"] = reflect.TypeOf((*MultiWriterNotSupportedFault)(nil)).Elem() +} + +type MultipleCertificatesVerifyFault struct { + HostConnectFault + + ThumbprintData []MultipleCertificatesVerifyFaultThumbprintData `xml:"thumbprintData"` +} + +func init() { + t["MultipleCertificatesVerifyFault"] = reflect.TypeOf((*MultipleCertificatesVerifyFault)(nil)).Elem() +} + +type MultipleCertificatesVerifyFaultFault MultipleCertificatesVerifyFault + +func init() { + t["MultipleCertificatesVerifyFaultFault"] = reflect.TypeOf((*MultipleCertificatesVerifyFaultFault)(nil)).Elem() +} + +type MultipleCertificatesVerifyFaultThumbprintData struct { + DynamicData + + Port int32 `xml:"port"` + Thumbprint string `xml:"thumbprint"` +} + +func init() { + t["MultipleCertificatesVerifyFaultThumbprintData"] = reflect.TypeOf((*MultipleCertificatesVerifyFaultThumbprintData)(nil)).Elem() +} + +type MultipleSnapshotsNotSupported struct { + SnapshotFault +} + +func init() { + t["MultipleSnapshotsNotSupported"] = reflect.TypeOf((*MultipleSnapshotsNotSupported)(nil)).Elem() +} + +type MultipleSnapshotsNotSupportedFault MultipleSnapshotsNotSupported + +func init() { + t["MultipleSnapshotsNotSupportedFault"] = reflect.TypeOf((*MultipleSnapshotsNotSupportedFault)(nil)).Elem() +} + +type NASDatastoreCreatedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` + DatastoreUrl string `xml:"datastoreUrl,omitempty"` +} + +func init() { + t["NASDatastoreCreatedEvent"] = reflect.TypeOf((*NASDatastoreCreatedEvent)(nil)).Elem() +} + +type NamePasswordAuthentication struct { + GuestAuthentication + + Username string `xml:"username"` + Password string `xml:"password"` +} + +func init() { + t["NamePasswordAuthentication"] = reflect.TypeOf((*NamePasswordAuthentication)(nil)).Elem() +} + +type NamespaceFull struct { + VimFault + + Name string `xml:"name"` + CurrentMaxSize int64 `xml:"currentMaxSize"` + RequiredSize int64 `xml:"requiredSize,omitempty"` +} + +func init() { + t["NamespaceFull"] = reflect.TypeOf((*NamespaceFull)(nil)).Elem() +} + +type NamespaceFullFault NamespaceFull + +func init() { + t["NamespaceFullFault"] = reflect.TypeOf((*NamespaceFullFault)(nil)).Elem() +} + +type NamespaceLimitReached struct { + VimFault + + Limit *int32 `xml:"limit"` +} + +func init() { + t["NamespaceLimitReached"] = reflect.TypeOf((*NamespaceLimitReached)(nil)).Elem() +} + +type NamespaceLimitReachedFault NamespaceLimitReached + +func init() { + t["NamespaceLimitReachedFault"] = reflect.TypeOf((*NamespaceLimitReachedFault)(nil)).Elem() +} + +type NamespaceWriteProtected struct { + VimFault + + Name string `xml:"name"` +} + +func init() { + t["NamespaceWriteProtected"] = reflect.TypeOf((*NamespaceWriteProtected)(nil)).Elem() +} + +type NamespaceWriteProtectedFault NamespaceWriteProtected + +func init() { + t["NamespaceWriteProtectedFault"] = reflect.TypeOf((*NamespaceWriteProtectedFault)(nil)).Elem() +} + +type NasConfigFault struct { + HostConfigFault + + Name string `xml:"name"` +} + +func init() { + t["NasConfigFault"] = reflect.TypeOf((*NasConfigFault)(nil)).Elem() +} + +type NasConfigFaultFault BaseNasConfigFault + +func init() { + t["NasConfigFaultFault"] = reflect.TypeOf((*NasConfigFaultFault)(nil)).Elem() +} + +type NasConnectionLimitReached struct { + NasConfigFault + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` +} + +func init() { + t["NasConnectionLimitReached"] = reflect.TypeOf((*NasConnectionLimitReached)(nil)).Elem() +} + +type NasConnectionLimitReachedFault NasConnectionLimitReached + +func init() { + t["NasConnectionLimitReachedFault"] = reflect.TypeOf((*NasConnectionLimitReachedFault)(nil)).Elem() +} + +type NasDatastoreInfo struct { + DatastoreInfo + + Nas *HostNasVolume `xml:"nas,omitempty"` +} + +func init() { + t["NasDatastoreInfo"] = reflect.TypeOf((*NasDatastoreInfo)(nil)).Elem() +} + +type NasSessionCredentialConflict struct { + NasConfigFault + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` + UserName string `xml:"userName"` +} + +func init() { + t["NasSessionCredentialConflict"] = reflect.TypeOf((*NasSessionCredentialConflict)(nil)).Elem() +} + +type NasSessionCredentialConflictFault NasSessionCredentialConflict + +func init() { + t["NasSessionCredentialConflictFault"] = reflect.TypeOf((*NasSessionCredentialConflictFault)(nil)).Elem() +} + +type NasStorageProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["NasStorageProfile"] = reflect.TypeOf((*NasStorageProfile)(nil)).Elem() +} + +type NasVolumeNotMounted struct { + NasConfigFault + + RemoteHost string `xml:"remoteHost"` + RemotePath string `xml:"remotePath"` +} + +func init() { + t["NasVolumeNotMounted"] = reflect.TypeOf((*NasVolumeNotMounted)(nil)).Elem() +} + +type NasVolumeNotMountedFault NasVolumeNotMounted + +func init() { + t["NasVolumeNotMountedFault"] = reflect.TypeOf((*NasVolumeNotMountedFault)(nil)).Elem() +} + +type NegatableExpression struct { + DynamicData + + Negate *bool `xml:"negate"` +} + +func init() { + t["NegatableExpression"] = reflect.TypeOf((*NegatableExpression)(nil)).Elem() +} + +type NetBIOSConfigInfo struct { + DynamicData + + Mode string `xml:"mode"` +} + +func init() { + t["NetBIOSConfigInfo"] = reflect.TypeOf((*NetBIOSConfigInfo)(nil)).Elem() +} + +type NetDhcpConfigInfo struct { + DynamicData + + Ipv6 *NetDhcpConfigInfoDhcpOptions `xml:"ipv6,omitempty"` + Ipv4 *NetDhcpConfigInfoDhcpOptions `xml:"ipv4,omitempty"` +} + +func init() { + t["NetDhcpConfigInfo"] = reflect.TypeOf((*NetDhcpConfigInfo)(nil)).Elem() +} + +type NetDhcpConfigInfoDhcpOptions struct { + DynamicData + + Enable bool `xml:"enable"` + Config []KeyValue `xml:"config,omitempty"` +} + +func init() { + t["NetDhcpConfigInfoDhcpOptions"] = reflect.TypeOf((*NetDhcpConfigInfoDhcpOptions)(nil)).Elem() +} + +type NetDhcpConfigSpec struct { + DynamicData + + Ipv6 *NetDhcpConfigSpecDhcpOptionsSpec `xml:"ipv6,omitempty"` + Ipv4 *NetDhcpConfigSpecDhcpOptionsSpec `xml:"ipv4,omitempty"` +} + +func init() { + t["NetDhcpConfigSpec"] = reflect.TypeOf((*NetDhcpConfigSpec)(nil)).Elem() +} + +type NetDhcpConfigSpecDhcpOptionsSpec struct { + DynamicData + + Enable *bool `xml:"enable"` + Config []KeyValue `xml:"config"` + Operation string `xml:"operation"` +} + +func init() { + t["NetDhcpConfigSpecDhcpOptionsSpec"] = reflect.TypeOf((*NetDhcpConfigSpecDhcpOptionsSpec)(nil)).Elem() +} + +type NetDnsConfigInfo struct { + DynamicData + + Dhcp bool `xml:"dhcp"` + HostName string `xml:"hostName"` + DomainName string `xml:"domainName"` + IpAddress []string `xml:"ipAddress,omitempty"` + SearchDomain []string `xml:"searchDomain,omitempty"` +} + +func init() { + t["NetDnsConfigInfo"] = reflect.TypeOf((*NetDnsConfigInfo)(nil)).Elem() +} + +type NetDnsConfigSpec struct { + DynamicData + + Dhcp *bool `xml:"dhcp"` + HostName string `xml:"hostName,omitempty"` + DomainName string `xml:"domainName,omitempty"` + IpAddress []string `xml:"ipAddress,omitempty"` + SearchDomain []string `xml:"searchDomain,omitempty"` +} + +func init() { + t["NetDnsConfigSpec"] = reflect.TypeOf((*NetDnsConfigSpec)(nil)).Elem() +} + +type NetIpConfigInfo struct { + DynamicData + + IpAddress []NetIpConfigInfoIpAddress `xml:"ipAddress,omitempty"` + Dhcp *NetDhcpConfigInfo `xml:"dhcp,omitempty"` + AutoConfigurationEnabled *bool `xml:"autoConfigurationEnabled"` +} + +func init() { + t["NetIpConfigInfo"] = reflect.TypeOf((*NetIpConfigInfo)(nil)).Elem() +} + +type NetIpConfigInfoIpAddress struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + PrefixLength int32 `xml:"prefixLength"` + Origin string `xml:"origin,omitempty"` + State string `xml:"state,omitempty"` + Lifetime *time.Time `xml:"lifetime"` +} + +func init() { + t["NetIpConfigInfoIpAddress"] = reflect.TypeOf((*NetIpConfigInfoIpAddress)(nil)).Elem() +} + +type NetIpConfigSpec struct { + DynamicData + + IpAddress []NetIpConfigSpecIpAddressSpec `xml:"ipAddress,omitempty"` + Dhcp *NetDhcpConfigSpec `xml:"dhcp,omitempty"` + AutoConfigurationEnabled *bool `xml:"autoConfigurationEnabled"` +} + +func init() { + t["NetIpConfigSpec"] = reflect.TypeOf((*NetIpConfigSpec)(nil)).Elem() +} + +type NetIpConfigSpecIpAddressSpec struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + PrefixLength int32 `xml:"prefixLength"` + Operation string `xml:"operation"` +} + +func init() { + t["NetIpConfigSpecIpAddressSpec"] = reflect.TypeOf((*NetIpConfigSpecIpAddressSpec)(nil)).Elem() +} + +type NetIpRouteConfigInfo struct { + DynamicData + + IpRoute []NetIpRouteConfigInfoIpRoute `xml:"ipRoute,omitempty"` +} + +func init() { + t["NetIpRouteConfigInfo"] = reflect.TypeOf((*NetIpRouteConfigInfo)(nil)).Elem() +} + +type NetIpRouteConfigInfoGateway struct { + DynamicData + + IpAddress string `xml:"ipAddress,omitempty"` + Device string `xml:"device,omitempty"` +} + +func init() { + t["NetIpRouteConfigInfoGateway"] = reflect.TypeOf((*NetIpRouteConfigInfoGateway)(nil)).Elem() +} + +type NetIpRouteConfigInfoIpRoute struct { + DynamicData + + Network string `xml:"network"` + PrefixLength int32 `xml:"prefixLength"` + Gateway NetIpRouteConfigInfoGateway `xml:"gateway"` +} + +func init() { + t["NetIpRouteConfigInfoIpRoute"] = reflect.TypeOf((*NetIpRouteConfigInfoIpRoute)(nil)).Elem() +} + +type NetIpRouteConfigSpec struct { + DynamicData + + IpRoute []NetIpRouteConfigSpecIpRouteSpec `xml:"ipRoute,omitempty"` +} + +func init() { + t["NetIpRouteConfigSpec"] = reflect.TypeOf((*NetIpRouteConfigSpec)(nil)).Elem() +} + +type NetIpRouteConfigSpecGatewaySpec struct { + DynamicData + + IpAddress string `xml:"ipAddress,omitempty"` + Device string `xml:"device,omitempty"` +} + +func init() { + t["NetIpRouteConfigSpecGatewaySpec"] = reflect.TypeOf((*NetIpRouteConfigSpecGatewaySpec)(nil)).Elem() +} + +type NetIpRouteConfigSpecIpRouteSpec struct { + DynamicData + + Network string `xml:"network"` + PrefixLength int32 `xml:"prefixLength"` + Gateway NetIpRouteConfigSpecGatewaySpec `xml:"gateway"` + Operation string `xml:"operation"` +} + +func init() { + t["NetIpRouteConfigSpecIpRouteSpec"] = reflect.TypeOf((*NetIpRouteConfigSpecIpRouteSpec)(nil)).Elem() +} + +type NetIpStackInfo struct { + DynamicData + + Neighbor []NetIpStackInfoNetToMedia `xml:"neighbor,omitempty"` + DefaultRouter []NetIpStackInfoDefaultRouter `xml:"defaultRouter,omitempty"` +} + +func init() { + t["NetIpStackInfo"] = reflect.TypeOf((*NetIpStackInfo)(nil)).Elem() +} + +type NetIpStackInfoDefaultRouter struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + Device string `xml:"device"` + Lifetime time.Time `xml:"lifetime"` + Preference string `xml:"preference"` +} + +func init() { + t["NetIpStackInfoDefaultRouter"] = reflect.TypeOf((*NetIpStackInfoDefaultRouter)(nil)).Elem() +} + +type NetIpStackInfoNetToMedia struct { + DynamicData + + IpAddress string `xml:"ipAddress"` + PhysicalAddress string `xml:"physicalAddress"` + Device string `xml:"device"` + Type string `xml:"type"` +} + +func init() { + t["NetIpStackInfoNetToMedia"] = reflect.TypeOf((*NetIpStackInfoNetToMedia)(nil)).Elem() +} + +type NetStackInstanceProfile struct { + ApplyProfile + + Key string `xml:"key"` + DnsConfig NetworkProfileDnsConfigProfile `xml:"dnsConfig"` + IpRouteConfig IpRouteProfile `xml:"ipRouteConfig"` +} + +func init() { + t["NetStackInstanceProfile"] = reflect.TypeOf((*NetStackInstanceProfile)(nil)).Elem() +} + +type NetworkCopyFault struct { + FileFault +} + +func init() { + t["NetworkCopyFault"] = reflect.TypeOf((*NetworkCopyFault)(nil)).Elem() +} + +type NetworkCopyFaultFault NetworkCopyFault + +func init() { + t["NetworkCopyFaultFault"] = reflect.TypeOf((*NetworkCopyFaultFault)(nil)).Elem() +} + +type NetworkDisruptedAndConfigRolledBack struct { + VimFault + + Host string `xml:"host"` +} + +func init() { + t["NetworkDisruptedAndConfigRolledBack"] = reflect.TypeOf((*NetworkDisruptedAndConfigRolledBack)(nil)).Elem() +} + +type NetworkDisruptedAndConfigRolledBackFault NetworkDisruptedAndConfigRolledBack + +func init() { + t["NetworkDisruptedAndConfigRolledBackFault"] = reflect.TypeOf((*NetworkDisruptedAndConfigRolledBackFault)(nil)).Elem() +} + +type NetworkEventArgument struct { + EntityEventArgument + + Network ManagedObjectReference `xml:"network"` +} + +func init() { + t["NetworkEventArgument"] = reflect.TypeOf((*NetworkEventArgument)(nil)).Elem() +} + +type NetworkInaccessible struct { + NasConfigFault +} + +func init() { + t["NetworkInaccessible"] = reflect.TypeOf((*NetworkInaccessible)(nil)).Elem() +} + +type NetworkInaccessibleFault NetworkInaccessible + +func init() { + t["NetworkInaccessibleFault"] = reflect.TypeOf((*NetworkInaccessibleFault)(nil)).Elem() +} + +type NetworkPolicyProfile struct { + ApplyProfile +} + +func init() { + t["NetworkPolicyProfile"] = reflect.TypeOf((*NetworkPolicyProfile)(nil)).Elem() +} + +type NetworkProfile struct { + ApplyProfile + + Vswitch []VirtualSwitchProfile `xml:"vswitch,omitempty"` + VmPortGroup []VmPortGroupProfile `xml:"vmPortGroup,omitempty"` + HostPortGroup []HostPortGroupProfile `xml:"hostPortGroup,omitempty"` + ServiceConsolePortGroup []ServiceConsolePortGroupProfile `xml:"serviceConsolePortGroup,omitempty"` + DnsConfig *NetworkProfileDnsConfigProfile `xml:"dnsConfig,omitempty"` + IpRouteConfig *IpRouteProfile `xml:"ipRouteConfig,omitempty"` + ConsoleIpRouteConfig *IpRouteProfile `xml:"consoleIpRouteConfig,omitempty"` + Pnic []PhysicalNicProfile `xml:"pnic,omitempty"` + Dvswitch []DvsProfile `xml:"dvswitch,omitempty"` + DvsServiceConsoleNic []DvsServiceConsoleVNicProfile `xml:"dvsServiceConsoleNic,omitempty"` + DvsHostNic []DvsHostVNicProfile `xml:"dvsHostNic,omitempty"` + NsxHostNic []NsxHostVNicProfile `xml:"nsxHostNic,omitempty"` + NetStackInstance []NetStackInstanceProfile `xml:"netStackInstance,omitempty"` +} + +func init() { + t["NetworkProfile"] = reflect.TypeOf((*NetworkProfile)(nil)).Elem() +} + +type NetworkProfileDnsConfigProfile struct { + ApplyProfile +} + +func init() { + t["NetworkProfileDnsConfigProfile"] = reflect.TypeOf((*NetworkProfileDnsConfigProfile)(nil)).Elem() +} + +type NetworkRollbackEvent struct { + Event + + MethodName string `xml:"methodName"` + TransactionId string `xml:"transactionId"` +} + +func init() { + t["NetworkRollbackEvent"] = reflect.TypeOf((*NetworkRollbackEvent)(nil)).Elem() +} + +type NetworkSummary struct { + DynamicData + + Network *ManagedObjectReference `xml:"network,omitempty"` + Name string `xml:"name"` + Accessible bool `xml:"accessible"` + IpPoolName string `xml:"ipPoolName,omitempty"` + IpPoolId int32 `xml:"ipPoolId,omitempty"` +} + +func init() { + t["NetworkSummary"] = reflect.TypeOf((*NetworkSummary)(nil)).Elem() +} + +type NetworksMayNotBeTheSame struct { + MigrationFault + + Name string `xml:"name,omitempty"` +} + +func init() { + t["NetworksMayNotBeTheSame"] = reflect.TypeOf((*NetworksMayNotBeTheSame)(nil)).Elem() +} + +type NetworksMayNotBeTheSameFault NetworksMayNotBeTheSame + +func init() { + t["NetworksMayNotBeTheSameFault"] = reflect.TypeOf((*NetworksMayNotBeTheSameFault)(nil)).Elem() +} + +type NicSettingMismatch struct { + CustomizationFault + + NumberOfNicsInSpec int32 `xml:"numberOfNicsInSpec"` + NumberOfNicsInVM int32 `xml:"numberOfNicsInVM"` +} + +func init() { + t["NicSettingMismatch"] = reflect.TypeOf((*NicSettingMismatch)(nil)).Elem() +} + +type NicSettingMismatchFault NicSettingMismatch + +func init() { + t["NicSettingMismatchFault"] = reflect.TypeOf((*NicSettingMismatchFault)(nil)).Elem() +} + +type NoAccessUserEvent struct { + SessionEvent + + IpAddress string `xml:"ipAddress"` +} + +func init() { + t["NoAccessUserEvent"] = reflect.TypeOf((*NoAccessUserEvent)(nil)).Elem() +} + +type NoActiveHostInCluster struct { + InvalidState + + ComputeResource ManagedObjectReference `xml:"computeResource"` +} + +func init() { + t["NoActiveHostInCluster"] = reflect.TypeOf((*NoActiveHostInCluster)(nil)).Elem() +} + +type NoActiveHostInClusterFault NoActiveHostInCluster + +func init() { + t["NoActiveHostInClusterFault"] = reflect.TypeOf((*NoActiveHostInClusterFault)(nil)).Elem() +} + +type NoAvailableIp struct { + VAppPropertyFault + + Network ManagedObjectReference `xml:"network"` +} + +func init() { + t["NoAvailableIp"] = reflect.TypeOf((*NoAvailableIp)(nil)).Elem() +} + +type NoAvailableIpFault NoAvailableIp + +func init() { + t["NoAvailableIpFault"] = reflect.TypeOf((*NoAvailableIpFault)(nil)).Elem() +} + +type NoClientCertificate struct { + VimFault +} + +func init() { + t["NoClientCertificate"] = reflect.TypeOf((*NoClientCertificate)(nil)).Elem() +} + +type NoClientCertificateFault NoClientCertificate + +func init() { + t["NoClientCertificateFault"] = reflect.TypeOf((*NoClientCertificateFault)(nil)).Elem() +} + +type NoCompatibleDatastore struct { + VimFault +} + +func init() { + t["NoCompatibleDatastore"] = reflect.TypeOf((*NoCompatibleDatastore)(nil)).Elem() +} + +type NoCompatibleDatastoreFault NoCompatibleDatastore + +func init() { + t["NoCompatibleDatastoreFault"] = reflect.TypeOf((*NoCompatibleDatastoreFault)(nil)).Elem() +} + +type NoCompatibleHardAffinityHost struct { + VmConfigFault + + VmName string `xml:"vmName"` +} + +func init() { + t["NoCompatibleHardAffinityHost"] = reflect.TypeOf((*NoCompatibleHardAffinityHost)(nil)).Elem() +} + +type NoCompatibleHardAffinityHostFault NoCompatibleHardAffinityHost + +func init() { + t["NoCompatibleHardAffinityHostFault"] = reflect.TypeOf((*NoCompatibleHardAffinityHostFault)(nil)).Elem() +} + +type NoCompatibleHost struct { + VimFault + + Host []ManagedObjectReference `xml:"host,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["NoCompatibleHost"] = reflect.TypeOf((*NoCompatibleHost)(nil)).Elem() +} + +type NoCompatibleHostFault BaseNoCompatibleHost + +func init() { + t["NoCompatibleHostFault"] = reflect.TypeOf((*NoCompatibleHostFault)(nil)).Elem() +} + +type NoCompatibleHostWithAccessToDevice struct { + NoCompatibleHost +} + +func init() { + t["NoCompatibleHostWithAccessToDevice"] = reflect.TypeOf((*NoCompatibleHostWithAccessToDevice)(nil)).Elem() +} + +type NoCompatibleHostWithAccessToDeviceFault NoCompatibleHostWithAccessToDevice + +func init() { + t["NoCompatibleHostWithAccessToDeviceFault"] = reflect.TypeOf((*NoCompatibleHostWithAccessToDeviceFault)(nil)).Elem() +} + +type NoCompatibleSoftAffinityHost struct { + VmConfigFault + + VmName string `xml:"vmName"` +} + +func init() { + t["NoCompatibleSoftAffinityHost"] = reflect.TypeOf((*NoCompatibleSoftAffinityHost)(nil)).Elem() +} + +type NoCompatibleSoftAffinityHostFault NoCompatibleSoftAffinityHost + +func init() { + t["NoCompatibleSoftAffinityHostFault"] = reflect.TypeOf((*NoCompatibleSoftAffinityHostFault)(nil)).Elem() +} + +type NoConnectedDatastore struct { + VimFault +} + +func init() { + t["NoConnectedDatastore"] = reflect.TypeOf((*NoConnectedDatastore)(nil)).Elem() +} + +type NoConnectedDatastoreFault NoConnectedDatastore + +func init() { + t["NoConnectedDatastoreFault"] = reflect.TypeOf((*NoConnectedDatastoreFault)(nil)).Elem() +} + +type NoDatastoresConfiguredEvent struct { + HostEvent +} + +func init() { + t["NoDatastoresConfiguredEvent"] = reflect.TypeOf((*NoDatastoresConfiguredEvent)(nil)).Elem() +} + +type NoDiskFound struct { + VimFault +} + +func init() { + t["NoDiskFound"] = reflect.TypeOf((*NoDiskFound)(nil)).Elem() +} + +type NoDiskFoundFault NoDiskFound + +func init() { + t["NoDiskFoundFault"] = reflect.TypeOf((*NoDiskFoundFault)(nil)).Elem() +} + +type NoDiskSpace struct { + FileFault + + Datastore string `xml:"datastore"` +} + +func init() { + t["NoDiskSpace"] = reflect.TypeOf((*NoDiskSpace)(nil)).Elem() +} + +type NoDiskSpaceFault NoDiskSpace + +func init() { + t["NoDiskSpaceFault"] = reflect.TypeOf((*NoDiskSpaceFault)(nil)).Elem() +} + +type NoDisksToCustomize struct { + CustomizationFault +} + +func init() { + t["NoDisksToCustomize"] = reflect.TypeOf((*NoDisksToCustomize)(nil)).Elem() +} + +type NoDisksToCustomizeFault NoDisksToCustomize + +func init() { + t["NoDisksToCustomizeFault"] = reflect.TypeOf((*NoDisksToCustomizeFault)(nil)).Elem() +} + +type NoGateway struct { + HostConfigFault +} + +func init() { + t["NoGateway"] = reflect.TypeOf((*NoGateway)(nil)).Elem() +} + +type NoGatewayFault NoGateway + +func init() { + t["NoGatewayFault"] = reflect.TypeOf((*NoGatewayFault)(nil)).Elem() +} + +type NoGuestHeartbeat struct { + MigrationFault +} + +func init() { + t["NoGuestHeartbeat"] = reflect.TypeOf((*NoGuestHeartbeat)(nil)).Elem() +} + +type NoGuestHeartbeatFault NoGuestHeartbeat + +func init() { + t["NoGuestHeartbeatFault"] = reflect.TypeOf((*NoGuestHeartbeatFault)(nil)).Elem() +} + +type NoHost struct { + HostConnectFault + + Name string `xml:"name,omitempty"` +} + +func init() { + t["NoHost"] = reflect.TypeOf((*NoHost)(nil)).Elem() +} + +type NoHostFault NoHost + +func init() { + t["NoHostFault"] = reflect.TypeOf((*NoHostFault)(nil)).Elem() +} + +type NoHostSuitableForFtSecondary struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` +} + +func init() { + t["NoHostSuitableForFtSecondary"] = reflect.TypeOf((*NoHostSuitableForFtSecondary)(nil)).Elem() +} + +type NoHostSuitableForFtSecondaryFault NoHostSuitableForFtSecondary + +func init() { + t["NoHostSuitableForFtSecondaryFault"] = reflect.TypeOf((*NoHostSuitableForFtSecondaryFault)(nil)).Elem() +} + +type NoLicenseEvent struct { + LicenseEvent + + Feature LicenseFeatureInfo `xml:"feature"` +} + +func init() { + t["NoLicenseEvent"] = reflect.TypeOf((*NoLicenseEvent)(nil)).Elem() +} + +type NoLicenseServerConfigured struct { + NotEnoughLicenses +} + +func init() { + t["NoLicenseServerConfigured"] = reflect.TypeOf((*NoLicenseServerConfigured)(nil)).Elem() +} + +type NoLicenseServerConfiguredFault NoLicenseServerConfigured + +func init() { + t["NoLicenseServerConfiguredFault"] = reflect.TypeOf((*NoLicenseServerConfiguredFault)(nil)).Elem() +} + +type NoMaintenanceModeDrsRecommendationForVM struct { + VmEvent +} + +func init() { + t["NoMaintenanceModeDrsRecommendationForVM"] = reflect.TypeOf((*NoMaintenanceModeDrsRecommendationForVM)(nil)).Elem() +} + +type NoPeerHostFound struct { + HostPowerOpFailed +} + +func init() { + t["NoPeerHostFound"] = reflect.TypeOf((*NoPeerHostFound)(nil)).Elem() +} + +type NoPeerHostFoundFault NoPeerHostFound + +func init() { + t["NoPeerHostFoundFault"] = reflect.TypeOf((*NoPeerHostFoundFault)(nil)).Elem() +} + +type NoPermission struct { + SecurityError + + Object ManagedObjectReference `xml:"object"` + PrivilegeId string `xml:"privilegeId"` +} + +func init() { + t["NoPermission"] = reflect.TypeOf((*NoPermission)(nil)).Elem() +} + +type NoPermissionFault BaseNoPermission + +func init() { + t["NoPermissionFault"] = reflect.TypeOf((*NoPermissionFault)(nil)).Elem() +} + +type NoPermissionOnAD struct { + ActiveDirectoryFault +} + +func init() { + t["NoPermissionOnAD"] = reflect.TypeOf((*NoPermissionOnAD)(nil)).Elem() +} + +type NoPermissionOnADFault NoPermissionOnAD + +func init() { + t["NoPermissionOnADFault"] = reflect.TypeOf((*NoPermissionOnADFault)(nil)).Elem() +} + +type NoPermissionOnHost struct { + HostConnectFault +} + +func init() { + t["NoPermissionOnHost"] = reflect.TypeOf((*NoPermissionOnHost)(nil)).Elem() +} + +type NoPermissionOnHostFault NoPermissionOnHost + +func init() { + t["NoPermissionOnHostFault"] = reflect.TypeOf((*NoPermissionOnHostFault)(nil)).Elem() +} + +type NoPermissionOnNasVolume struct { + NasConfigFault + + UserName string `xml:"userName,omitempty"` +} + +func init() { + t["NoPermissionOnNasVolume"] = reflect.TypeOf((*NoPermissionOnNasVolume)(nil)).Elem() +} + +type NoPermissionOnNasVolumeFault NoPermissionOnNasVolume + +func init() { + t["NoPermissionOnNasVolumeFault"] = reflect.TypeOf((*NoPermissionOnNasVolumeFault)(nil)).Elem() +} + +type NoSubjectName struct { + VimFault +} + +func init() { + t["NoSubjectName"] = reflect.TypeOf((*NoSubjectName)(nil)).Elem() +} + +type NoSubjectNameFault NoSubjectName + +func init() { + t["NoSubjectNameFault"] = reflect.TypeOf((*NoSubjectNameFault)(nil)).Elem() +} + +type NoVcManagedIpConfigured struct { + VAppPropertyFault +} + +func init() { + t["NoVcManagedIpConfigured"] = reflect.TypeOf((*NoVcManagedIpConfigured)(nil)).Elem() +} + +type NoVcManagedIpConfiguredFault NoVcManagedIpConfigured + +func init() { + t["NoVcManagedIpConfiguredFault"] = reflect.TypeOf((*NoVcManagedIpConfiguredFault)(nil)).Elem() +} + +type NoVirtualNic struct { + HostConfigFault +} + +func init() { + t["NoVirtualNic"] = reflect.TypeOf((*NoVirtualNic)(nil)).Elem() +} + +type NoVirtualNicFault NoVirtualNic + +func init() { + t["NoVirtualNicFault"] = reflect.TypeOf((*NoVirtualNicFault)(nil)).Elem() +} + +type NoVmInVApp struct { + VAppConfigFault +} + +func init() { + t["NoVmInVApp"] = reflect.TypeOf((*NoVmInVApp)(nil)).Elem() +} + +type NoVmInVAppFault NoVmInVApp + +func init() { + t["NoVmInVAppFault"] = reflect.TypeOf((*NoVmInVAppFault)(nil)).Elem() +} + +type NodeDeploymentSpec struct { + DynamicData + + EsxHost *ManagedObjectReference `xml:"esxHost,omitempty"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + PublicNetworkPortGroup *ManagedObjectReference `xml:"publicNetworkPortGroup,omitempty"` + ClusterNetworkPortGroup *ManagedObjectReference `xml:"clusterNetworkPortGroup,omitempty"` + Folder ManagedObjectReference `xml:"folder"` + ResourcePool *ManagedObjectReference `xml:"resourcePool,omitempty"` + ManagementVc *ServiceLocator `xml:"managementVc,omitempty"` + NodeName string `xml:"nodeName"` + IpSettings CustomizationIPSettings `xml:"ipSettings"` +} + +func init() { + t["NodeDeploymentSpec"] = reflect.TypeOf((*NodeDeploymentSpec)(nil)).Elem() +} + +type NodeNetworkSpec struct { + DynamicData + + IpSettings CustomizationIPSettings `xml:"ipSettings"` +} + +func init() { + t["NodeNetworkSpec"] = reflect.TypeOf((*NodeNetworkSpec)(nil)).Elem() +} + +type NonADUserRequired struct { + ActiveDirectoryFault +} + +func init() { + t["NonADUserRequired"] = reflect.TypeOf((*NonADUserRequired)(nil)).Elem() +} + +type NonADUserRequiredFault NonADUserRequired + +func init() { + t["NonADUserRequiredFault"] = reflect.TypeOf((*NonADUserRequiredFault)(nil)).Elem() +} + +type NonHomeRDMVMotionNotSupported struct { + MigrationFeatureNotSupported + + Device string `xml:"device"` +} + +func init() { + t["NonHomeRDMVMotionNotSupported"] = reflect.TypeOf((*NonHomeRDMVMotionNotSupported)(nil)).Elem() +} + +type NonHomeRDMVMotionNotSupportedFault NonHomeRDMVMotionNotSupported + +func init() { + t["NonHomeRDMVMotionNotSupportedFault"] = reflect.TypeOf((*NonHomeRDMVMotionNotSupportedFault)(nil)).Elem() +} + +type NonPersistentDisksNotSupported struct { + DeviceNotSupported +} + +func init() { + t["NonPersistentDisksNotSupported"] = reflect.TypeOf((*NonPersistentDisksNotSupported)(nil)).Elem() +} + +type NonPersistentDisksNotSupportedFault NonPersistentDisksNotSupported + +func init() { + t["NonPersistentDisksNotSupportedFault"] = reflect.TypeOf((*NonPersistentDisksNotSupportedFault)(nil)).Elem() +} + +type NonVIWorkloadDetectedOnDatastoreEvent struct { + DatastoreEvent +} + +func init() { + t["NonVIWorkloadDetectedOnDatastoreEvent"] = reflect.TypeOf((*NonVIWorkloadDetectedOnDatastoreEvent)(nil)).Elem() +} + +type NonVmwareOuiMacNotSupportedHost struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NonVmwareOuiMacNotSupportedHost"] = reflect.TypeOf((*NonVmwareOuiMacNotSupportedHost)(nil)).Elem() +} + +type NonVmwareOuiMacNotSupportedHostFault NonVmwareOuiMacNotSupportedHost + +func init() { + t["NonVmwareOuiMacNotSupportedHostFault"] = reflect.TypeOf((*NonVmwareOuiMacNotSupportedHostFault)(nil)).Elem() +} + +type NotADirectory struct { + FileFault +} + +func init() { + t["NotADirectory"] = reflect.TypeOf((*NotADirectory)(nil)).Elem() +} + +type NotADirectoryFault NotADirectory + +func init() { + t["NotADirectoryFault"] = reflect.TypeOf((*NotADirectoryFault)(nil)).Elem() +} + +type NotAFile struct { + FileFault +} + +func init() { + t["NotAFile"] = reflect.TypeOf((*NotAFile)(nil)).Elem() +} + +type NotAFileFault NotAFile + +func init() { + t["NotAFileFault"] = reflect.TypeOf((*NotAFileFault)(nil)).Elem() +} + +type NotAuthenticated struct { + NoPermission +} + +func init() { + t["NotAuthenticated"] = reflect.TypeOf((*NotAuthenticated)(nil)).Elem() +} + +type NotAuthenticatedFault NotAuthenticated + +func init() { + t["NotAuthenticatedFault"] = reflect.TypeOf((*NotAuthenticatedFault)(nil)).Elem() +} + +type NotEnoughCpus struct { + VirtualHardwareCompatibilityIssue + + NumCpuDest int32 `xml:"numCpuDest"` + NumCpuVm int32 `xml:"numCpuVm"` +} + +func init() { + t["NotEnoughCpus"] = reflect.TypeOf((*NotEnoughCpus)(nil)).Elem() +} + +type NotEnoughCpusFault BaseNotEnoughCpus + +func init() { + t["NotEnoughCpusFault"] = reflect.TypeOf((*NotEnoughCpusFault)(nil)).Elem() +} + +type NotEnoughLicenses struct { + RuntimeFault +} + +func init() { + t["NotEnoughLicenses"] = reflect.TypeOf((*NotEnoughLicenses)(nil)).Elem() +} + +type NotEnoughLicensesFault BaseNotEnoughLicenses + +func init() { + t["NotEnoughLicensesFault"] = reflect.TypeOf((*NotEnoughLicensesFault)(nil)).Elem() +} + +type NotEnoughLogicalCpus struct { + NotEnoughCpus + + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["NotEnoughLogicalCpus"] = reflect.TypeOf((*NotEnoughLogicalCpus)(nil)).Elem() +} + +type NotEnoughLogicalCpusFault NotEnoughLogicalCpus + +func init() { + t["NotEnoughLogicalCpusFault"] = reflect.TypeOf((*NotEnoughLogicalCpusFault)(nil)).Elem() +} + +type NotEnoughResourcesToStartVmEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["NotEnoughResourcesToStartVmEvent"] = reflect.TypeOf((*NotEnoughResourcesToStartVmEvent)(nil)).Elem() +} + +type NotFound struct { + VimFault +} + +func init() { + t["NotFound"] = reflect.TypeOf((*NotFound)(nil)).Elem() +} + +type NotFoundFault NotFound + +func init() { + t["NotFoundFault"] = reflect.TypeOf((*NotFoundFault)(nil)).Elem() +} + +type NotImplemented struct { + RuntimeFault +} + +func init() { + t["NotImplemented"] = reflect.TypeOf((*NotImplemented)(nil)).Elem() +} + +type NotImplementedFault NotImplemented + +func init() { + t["NotImplementedFault"] = reflect.TypeOf((*NotImplementedFault)(nil)).Elem() +} + +type NotSupported struct { + RuntimeFault +} + +func init() { + t["NotSupported"] = reflect.TypeOf((*NotSupported)(nil)).Elem() +} + +type NotSupportedDeviceForFT struct { + VmFaultToleranceIssue + + Host ManagedObjectReference `xml:"host"` + HostName string `xml:"hostName,omitempty"` + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName,omitempty"` + DeviceType string `xml:"deviceType"` + DeviceLabel string `xml:"deviceLabel,omitempty"` +} + +func init() { + t["NotSupportedDeviceForFT"] = reflect.TypeOf((*NotSupportedDeviceForFT)(nil)).Elem() +} + +type NotSupportedDeviceForFTFault NotSupportedDeviceForFT + +func init() { + t["NotSupportedDeviceForFTFault"] = reflect.TypeOf((*NotSupportedDeviceForFTFault)(nil)).Elem() +} + +type NotSupportedFault BaseNotSupported + +func init() { + t["NotSupportedFault"] = reflect.TypeOf((*NotSupportedFault)(nil)).Elem() +} + +type NotSupportedHost struct { + HostConnectFault + + ProductName string `xml:"productName,omitempty"` + ProductVersion string `xml:"productVersion,omitempty"` +} + +func init() { + t["NotSupportedHost"] = reflect.TypeOf((*NotSupportedHost)(nil)).Elem() +} + +type NotSupportedHostFault BaseNotSupportedHost + +func init() { + t["NotSupportedHostFault"] = reflect.TypeOf((*NotSupportedHostFault)(nil)).Elem() +} + +type NotSupportedHostForChecksum struct { + VimFault +} + +func init() { + t["NotSupportedHostForChecksum"] = reflect.TypeOf((*NotSupportedHostForChecksum)(nil)).Elem() +} + +type NotSupportedHostForChecksumFault NotSupportedHostForChecksum + +func init() { + t["NotSupportedHostForChecksumFault"] = reflect.TypeOf((*NotSupportedHostForChecksumFault)(nil)).Elem() +} + +type NotSupportedHostForVFlash struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NotSupportedHostForVFlash"] = reflect.TypeOf((*NotSupportedHostForVFlash)(nil)).Elem() +} + +type NotSupportedHostForVFlashFault NotSupportedHostForVFlash + +func init() { + t["NotSupportedHostForVFlashFault"] = reflect.TypeOf((*NotSupportedHostForVFlashFault)(nil)).Elem() +} + +type NotSupportedHostForVmcp struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NotSupportedHostForVmcp"] = reflect.TypeOf((*NotSupportedHostForVmcp)(nil)).Elem() +} + +type NotSupportedHostForVmcpFault NotSupportedHostForVmcp + +func init() { + t["NotSupportedHostForVmcpFault"] = reflect.TypeOf((*NotSupportedHostForVmcpFault)(nil)).Elem() +} + +type NotSupportedHostForVmemFile struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NotSupportedHostForVmemFile"] = reflect.TypeOf((*NotSupportedHostForVmemFile)(nil)).Elem() +} + +type NotSupportedHostForVmemFileFault NotSupportedHostForVmemFile + +func init() { + t["NotSupportedHostForVmemFileFault"] = reflect.TypeOf((*NotSupportedHostForVmemFileFault)(nil)).Elem() +} + +type NotSupportedHostForVsan struct { + NotSupportedHost + + HostName string `xml:"hostName"` +} + +func init() { + t["NotSupportedHostForVsan"] = reflect.TypeOf((*NotSupportedHostForVsan)(nil)).Elem() +} + +type NotSupportedHostForVsanFault NotSupportedHostForVsan + +func init() { + t["NotSupportedHostForVsanFault"] = reflect.TypeOf((*NotSupportedHostForVsanFault)(nil)).Elem() +} + +type NotSupportedHostInCluster struct { + NotSupportedHost +} + +func init() { + t["NotSupportedHostInCluster"] = reflect.TypeOf((*NotSupportedHostInCluster)(nil)).Elem() +} + +type NotSupportedHostInClusterFault BaseNotSupportedHostInCluster + +func init() { + t["NotSupportedHostInClusterFault"] = reflect.TypeOf((*NotSupportedHostInClusterFault)(nil)).Elem() +} + +type NotSupportedHostInDvs struct { + NotSupportedHost + + SwitchProductSpec DistributedVirtualSwitchProductSpec `xml:"switchProductSpec"` +} + +func init() { + t["NotSupportedHostInDvs"] = reflect.TypeOf((*NotSupportedHostInDvs)(nil)).Elem() +} + +type NotSupportedHostInDvsFault NotSupportedHostInDvs + +func init() { + t["NotSupportedHostInDvsFault"] = reflect.TypeOf((*NotSupportedHostInDvsFault)(nil)).Elem() +} + +type NotSupportedHostInHACluster struct { + NotSupportedHost + + HostName string `xml:"hostName"` + Build string `xml:"build"` +} + +func init() { + t["NotSupportedHostInHACluster"] = reflect.TypeOf((*NotSupportedHostInHACluster)(nil)).Elem() +} + +type NotSupportedHostInHAClusterFault NotSupportedHostInHACluster + +func init() { + t["NotSupportedHostInHAClusterFault"] = reflect.TypeOf((*NotSupportedHostInHAClusterFault)(nil)).Elem() +} + +type NotUserConfigurableProperty struct { + VAppPropertyFault +} + +func init() { + t["NotUserConfigurableProperty"] = reflect.TypeOf((*NotUserConfigurableProperty)(nil)).Elem() +} + +type NotUserConfigurablePropertyFault NotUserConfigurableProperty + +func init() { + t["NotUserConfigurablePropertyFault"] = reflect.TypeOf((*NotUserConfigurablePropertyFault)(nil)).Elem() +} + +type NsxHostVNicProfile struct { + ApplyProfile + + Key string `xml:"key"` + IpConfig IpAddressProfile `xml:"ipConfig"` +} + +func init() { + t["NsxHostVNicProfile"] = reflect.TypeOf((*NsxHostVNicProfile)(nil)).Elem() +} + +type NumPortsProfile struct { + ApplyProfile +} + +func init() { + t["NumPortsProfile"] = reflect.TypeOf((*NumPortsProfile)(nil)).Elem() +} + +type NumVirtualCoresPerSocketNotSupported struct { + VirtualHardwareCompatibilityIssue + + MaxSupportedCoresPerSocketDest int32 `xml:"maxSupportedCoresPerSocketDest"` + NumCoresPerSocketVm int32 `xml:"numCoresPerSocketVm"` +} + +func init() { + t["NumVirtualCoresPerSocketNotSupported"] = reflect.TypeOf((*NumVirtualCoresPerSocketNotSupported)(nil)).Elem() +} + +type NumVirtualCoresPerSocketNotSupportedFault NumVirtualCoresPerSocketNotSupported + +func init() { + t["NumVirtualCoresPerSocketNotSupportedFault"] = reflect.TypeOf((*NumVirtualCoresPerSocketNotSupportedFault)(nil)).Elem() +} + +type NumVirtualCpusExceedsLimit struct { + InsufficientResourcesFault + + MaxSupportedVcpus int32 `xml:"maxSupportedVcpus"` +} + +func init() { + t["NumVirtualCpusExceedsLimit"] = reflect.TypeOf((*NumVirtualCpusExceedsLimit)(nil)).Elem() +} + +type NumVirtualCpusExceedsLimitFault NumVirtualCpusExceedsLimit + +func init() { + t["NumVirtualCpusExceedsLimitFault"] = reflect.TypeOf((*NumVirtualCpusExceedsLimitFault)(nil)).Elem() +} + +type NumVirtualCpusIncompatible struct { + VmConfigFault + + Reason string `xml:"reason"` + NumCpu int32 `xml:"numCpu"` +} + +func init() { + t["NumVirtualCpusIncompatible"] = reflect.TypeOf((*NumVirtualCpusIncompatible)(nil)).Elem() +} + +type NumVirtualCpusIncompatibleFault NumVirtualCpusIncompatible + +func init() { + t["NumVirtualCpusIncompatibleFault"] = reflect.TypeOf((*NumVirtualCpusIncompatibleFault)(nil)).Elem() +} + +type NumVirtualCpusNotSupported struct { + VirtualHardwareCompatibilityIssue + + MaxSupportedVcpusDest int32 `xml:"maxSupportedVcpusDest"` + NumCpuVm int32 `xml:"numCpuVm"` +} + +func init() { + t["NumVirtualCpusNotSupported"] = reflect.TypeOf((*NumVirtualCpusNotSupported)(nil)).Elem() +} + +type NumVirtualCpusNotSupportedFault NumVirtualCpusNotSupported + +func init() { + t["NumVirtualCpusNotSupportedFault"] = reflect.TypeOf((*NumVirtualCpusNotSupportedFault)(nil)).Elem() +} + +type NumericRange struct { + DynamicData + + Start int32 `xml:"start"` + End int32 `xml:"end"` +} + +func init() { + t["NumericRange"] = reflect.TypeOf((*NumericRange)(nil)).Elem() +} + +type NvdimmDimmInfo struct { + DynamicData + + DimmHandle int32 `xml:"dimmHandle"` + HealthInfo NvdimmHealthInfo `xml:"healthInfo"` + TotalCapacity int64 `xml:"totalCapacity"` + PersistentCapacity int64 `xml:"persistentCapacity"` + AvailablePersistentCapacity int64 `xml:"availablePersistentCapacity"` + VolatileCapacity int64 `xml:"volatileCapacity"` + AvailableVolatileCapacity int64 `xml:"availableVolatileCapacity"` + BlockCapacity int64 `xml:"blockCapacity"` + RegionInfo []NvdimmRegionInfo `xml:"regionInfo,omitempty"` + RepresentationString string `xml:"representationString"` +} + +func init() { + t["NvdimmDimmInfo"] = reflect.TypeOf((*NvdimmDimmInfo)(nil)).Elem() +} + +type NvdimmGuid struct { + DynamicData + + Uuid string `xml:"uuid"` +} + +func init() { + t["NvdimmGuid"] = reflect.TypeOf((*NvdimmGuid)(nil)).Elem() +} + +type NvdimmHealthInfo struct { + DynamicData + + HealthStatus string `xml:"healthStatus"` + HealthInformation string `xml:"healthInformation"` + StateFlagInfo []string `xml:"stateFlagInfo,omitempty"` + DimmTemperature int32 `xml:"dimmTemperature"` + DimmTemperatureThreshold int32 `xml:"dimmTemperatureThreshold"` + SpareBlocksPercentage int32 `xml:"spareBlocksPercentage"` + SpareBlockThreshold int32 `xml:"spareBlockThreshold"` + DimmLifespanPercentage int32 `xml:"dimmLifespanPercentage"` + EsTemperature int32 `xml:"esTemperature,omitempty"` + EsTemperatureThreshold int32 `xml:"esTemperatureThreshold,omitempty"` + EsLifespanPercentage int32 `xml:"esLifespanPercentage,omitempty"` +} + +func init() { + t["NvdimmHealthInfo"] = reflect.TypeOf((*NvdimmHealthInfo)(nil)).Elem() +} + +type NvdimmInterleaveSetInfo struct { + DynamicData + + SetId int32 `xml:"setId"` + RangeType string `xml:"rangeType"` + BaseAddress int64 `xml:"baseAddress"` + Size int64 `xml:"size"` + AvailableSize int64 `xml:"availableSize"` + DeviceList []int32 `xml:"deviceList,omitempty"` + State string `xml:"state"` +} + +func init() { + t["NvdimmInterleaveSetInfo"] = reflect.TypeOf((*NvdimmInterleaveSetInfo)(nil)).Elem() +} + +type NvdimmNamespaceCreateSpec struct { + DynamicData + + FriendlyName string `xml:"friendlyName,omitempty"` + BlockSize int64 `xml:"blockSize"` + BlockCount int64 `xml:"blockCount"` + Type string `xml:"type"` + LocationID int32 `xml:"locationID"` +} + +func init() { + t["NvdimmNamespaceCreateSpec"] = reflect.TypeOf((*NvdimmNamespaceCreateSpec)(nil)).Elem() +} + +type NvdimmNamespaceDeleteSpec struct { + DynamicData + + Uuid string `xml:"uuid"` +} + +func init() { + t["NvdimmNamespaceDeleteSpec"] = reflect.TypeOf((*NvdimmNamespaceDeleteSpec)(nil)).Elem() +} + +type NvdimmNamespaceInfo struct { + DynamicData + + Uuid string `xml:"uuid"` + FriendlyName string `xml:"friendlyName"` + BlockSize int64 `xml:"blockSize"` + BlockCount int64 `xml:"blockCount"` + Type string `xml:"type"` + NamespaceHealthStatus string `xml:"namespaceHealthStatus"` + LocationID int32 `xml:"locationID"` + State string `xml:"state"` +} + +func init() { + t["NvdimmNamespaceInfo"] = reflect.TypeOf((*NvdimmNamespaceInfo)(nil)).Elem() +} + +type NvdimmRegionInfo struct { + DynamicData + + RegionId int32 `xml:"regionId"` + SetId int32 `xml:"setId"` + RangeType string `xml:"rangeType"` + StartAddr int64 `xml:"startAddr"` + Size int64 `xml:"size"` + Offset int64 `xml:"offset"` +} + +func init() { + t["NvdimmRegionInfo"] = reflect.TypeOf((*NvdimmRegionInfo)(nil)).Elem() +} + +type NvdimmSummary struct { + DynamicData + + NumDimms int32 `xml:"numDimms"` + HealthStatus string `xml:"healthStatus"` + TotalCapacity int64 `xml:"totalCapacity"` + PersistentCapacity int64 `xml:"persistentCapacity"` + BlockCapacity int64 `xml:"blockCapacity"` + AvailableCapacity int64 `xml:"availableCapacity"` + NumInterleavesets int32 `xml:"numInterleavesets"` + NumNamespaces int32 `xml:"numNamespaces"` +} + +func init() { + t["NvdimmSummary"] = reflect.TypeOf((*NvdimmSummary)(nil)).Elem() +} + +type NvdimmSystemInfo struct { + DynamicData + + Summary *NvdimmSummary `xml:"summary,omitempty"` + Dimms []int32 `xml:"dimms,omitempty"` + DimmInfo []NvdimmDimmInfo `xml:"dimmInfo,omitempty"` + InterleaveSet []int32 `xml:"interleaveSet,omitempty"` + ISetInfo []NvdimmInterleaveSetInfo `xml:"iSetInfo,omitempty"` + Namespace []NvdimmGuid `xml:"namespace,omitempty"` + NsInfo []NvdimmNamespaceInfo `xml:"nsInfo,omitempty"` +} + +func init() { + t["NvdimmSystemInfo"] = reflect.TypeOf((*NvdimmSystemInfo)(nil)).Elem() +} + +type ObjectContent struct { + DynamicData + + Obj ManagedObjectReference `xml:"obj"` + PropSet []DynamicProperty `xml:"propSet,omitempty"` + MissingSet []MissingProperty `xml:"missingSet,omitempty"` +} + +func init() { + t["ObjectContent"] = reflect.TypeOf((*ObjectContent)(nil)).Elem() +} + +type ObjectSpec struct { + DynamicData + + Obj ManagedObjectReference `xml:"obj"` + Skip *bool `xml:"skip"` + SelectSet []BaseSelectionSpec `xml:"selectSet,omitempty,typeattr"` +} + +func init() { + t["ObjectSpec"] = reflect.TypeOf((*ObjectSpec)(nil)).Elem() +} + +type ObjectUpdate struct { + DynamicData + + Kind ObjectUpdateKind `xml:"kind"` + Obj ManagedObjectReference `xml:"obj"` + ChangeSet []PropertyChange `xml:"changeSet,omitempty"` + MissingSet []MissingProperty `xml:"missingSet,omitempty"` +} + +func init() { + t["ObjectUpdate"] = reflect.TypeOf((*ObjectUpdate)(nil)).Elem() +} + +type OnceTaskScheduler struct { + TaskScheduler + + RunAt *time.Time `xml:"runAt"` +} + +func init() { + t["OnceTaskScheduler"] = reflect.TypeOf((*OnceTaskScheduler)(nil)).Elem() +} + +type OpaqueNetworkCapability struct { + DynamicData + + NetworkReservationSupported bool `xml:"networkReservationSupported"` +} + +func init() { + t["OpaqueNetworkCapability"] = reflect.TypeOf((*OpaqueNetworkCapability)(nil)).Elem() +} + +type OpaqueNetworkSummary struct { + NetworkSummary + + OpaqueNetworkId string `xml:"opaqueNetworkId"` + OpaqueNetworkType string `xml:"opaqueNetworkType"` +} + +func init() { + t["OpaqueNetworkSummary"] = reflect.TypeOf((*OpaqueNetworkSummary)(nil)).Elem() +} + +type OpaqueNetworkTargetInfo struct { + VirtualMachineTargetInfo + + Network OpaqueNetworkSummary `xml:"network"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` +} + +func init() { + t["OpaqueNetworkTargetInfo"] = reflect.TypeOf((*OpaqueNetworkTargetInfo)(nil)).Elem() +} + +type OpenInventoryViewFolder OpenInventoryViewFolderRequestType + +func init() { + t["OpenInventoryViewFolder"] = reflect.TypeOf((*OpenInventoryViewFolder)(nil)).Elem() +} + +type OpenInventoryViewFolderRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity"` +} + +func init() { + t["OpenInventoryViewFolderRequestType"] = reflect.TypeOf((*OpenInventoryViewFolderRequestType)(nil)).Elem() +} + +type OpenInventoryViewFolderResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type OperationDisabledByGuest struct { + GuestOperationsFault +} + +func init() { + t["OperationDisabledByGuest"] = reflect.TypeOf((*OperationDisabledByGuest)(nil)).Elem() +} + +type OperationDisabledByGuestFault OperationDisabledByGuest + +func init() { + t["OperationDisabledByGuestFault"] = reflect.TypeOf((*OperationDisabledByGuestFault)(nil)).Elem() +} + +type OperationDisallowedOnHost struct { + RuntimeFault +} + +func init() { + t["OperationDisallowedOnHost"] = reflect.TypeOf((*OperationDisallowedOnHost)(nil)).Elem() +} + +type OperationDisallowedOnHostFault OperationDisallowedOnHost + +func init() { + t["OperationDisallowedOnHostFault"] = reflect.TypeOf((*OperationDisallowedOnHostFault)(nil)).Elem() +} + +type OperationNotSupportedByGuest struct { + GuestOperationsFault +} + +func init() { + t["OperationNotSupportedByGuest"] = reflect.TypeOf((*OperationNotSupportedByGuest)(nil)).Elem() +} + +type OperationNotSupportedByGuestFault OperationNotSupportedByGuest + +func init() { + t["OperationNotSupportedByGuestFault"] = reflect.TypeOf((*OperationNotSupportedByGuestFault)(nil)).Elem() +} + +type OptionDef struct { + ElementDescription + + OptionType BaseOptionType `xml:"optionType,typeattr"` +} + +func init() { + t["OptionDef"] = reflect.TypeOf((*OptionDef)(nil)).Elem() +} + +type OptionProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["OptionProfile"] = reflect.TypeOf((*OptionProfile)(nil)).Elem() +} + +type OptionType struct { + DynamicData + + ValueIsReadonly *bool `xml:"valueIsReadonly"` +} + +func init() { + t["OptionType"] = reflect.TypeOf((*OptionType)(nil)).Elem() +} + +type OptionValue struct { + DynamicData + + Key string `xml:"key"` + Value AnyType `xml:"value,typeattr"` +} + +func init() { + t["OptionValue"] = reflect.TypeOf((*OptionValue)(nil)).Elem() +} + +type OrAlarmExpression struct { + AlarmExpression + + Expression []BaseAlarmExpression `xml:"expression,typeattr"` +} + +func init() { + t["OrAlarmExpression"] = reflect.TypeOf((*OrAlarmExpression)(nil)).Elem() +} + +type OutOfBounds struct { + VimFault + + ArgumentName string `xml:"argumentName"` +} + +func init() { + t["OutOfBounds"] = reflect.TypeOf((*OutOfBounds)(nil)).Elem() +} + +type OutOfBoundsFault OutOfBounds + +func init() { + t["OutOfBoundsFault"] = reflect.TypeOf((*OutOfBoundsFault)(nil)).Elem() +} + +type OutOfSyncDvsHost struct { + DvsEvent + + HostOutOfSync []DvsOutOfSyncHostArgument `xml:"hostOutOfSync"` +} + +func init() { + t["OutOfSyncDvsHost"] = reflect.TypeOf((*OutOfSyncDvsHost)(nil)).Elem() +} + +type OverwriteCustomizationSpec OverwriteCustomizationSpecRequestType + +func init() { + t["OverwriteCustomizationSpec"] = reflect.TypeOf((*OverwriteCustomizationSpec)(nil)).Elem() +} + +type OverwriteCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Item CustomizationSpecItem `xml:"item"` +} + +func init() { + t["OverwriteCustomizationSpecRequestType"] = reflect.TypeOf((*OverwriteCustomizationSpecRequestType)(nil)).Elem() +} + +type OverwriteCustomizationSpecResponse struct { +} + +type OvfAttribute struct { + OvfInvalidPackage + + ElementName string `xml:"elementName"` + AttributeName string `xml:"attributeName"` +} + +func init() { + t["OvfAttribute"] = reflect.TypeOf((*OvfAttribute)(nil)).Elem() +} + +type OvfAttributeFault BaseOvfAttribute + +func init() { + t["OvfAttributeFault"] = reflect.TypeOf((*OvfAttributeFault)(nil)).Elem() +} + +type OvfConnectedDevice struct { + OvfHardwareExport +} + +func init() { + t["OvfConnectedDevice"] = reflect.TypeOf((*OvfConnectedDevice)(nil)).Elem() +} + +type OvfConnectedDeviceFault BaseOvfConnectedDevice + +func init() { + t["OvfConnectedDeviceFault"] = reflect.TypeOf((*OvfConnectedDeviceFault)(nil)).Elem() +} + +type OvfConnectedDeviceFloppy struct { + OvfConnectedDevice + + Filename string `xml:"filename"` +} + +func init() { + t["OvfConnectedDeviceFloppy"] = reflect.TypeOf((*OvfConnectedDeviceFloppy)(nil)).Elem() +} + +type OvfConnectedDeviceFloppyFault OvfConnectedDeviceFloppy + +func init() { + t["OvfConnectedDeviceFloppyFault"] = reflect.TypeOf((*OvfConnectedDeviceFloppyFault)(nil)).Elem() +} + +type OvfConnectedDeviceIso struct { + OvfConnectedDevice + + Filename string `xml:"filename"` +} + +func init() { + t["OvfConnectedDeviceIso"] = reflect.TypeOf((*OvfConnectedDeviceIso)(nil)).Elem() +} + +type OvfConnectedDeviceIsoFault OvfConnectedDeviceIso + +func init() { + t["OvfConnectedDeviceIsoFault"] = reflect.TypeOf((*OvfConnectedDeviceIsoFault)(nil)).Elem() +} + +type OvfConstraint struct { + OvfInvalidPackage + + Name string `xml:"name"` +} + +func init() { + t["OvfConstraint"] = reflect.TypeOf((*OvfConstraint)(nil)).Elem() +} + +type OvfConstraintFault BaseOvfConstraint + +func init() { + t["OvfConstraintFault"] = reflect.TypeOf((*OvfConstraintFault)(nil)).Elem() +} + +type OvfConsumerCallbackFault struct { + OvfFault + + ExtensionKey string `xml:"extensionKey"` + ExtensionName string `xml:"extensionName"` +} + +func init() { + t["OvfConsumerCallbackFault"] = reflect.TypeOf((*OvfConsumerCallbackFault)(nil)).Elem() +} + +type OvfConsumerCallbackFaultFault BaseOvfConsumerCallbackFault + +func init() { + t["OvfConsumerCallbackFaultFault"] = reflect.TypeOf((*OvfConsumerCallbackFaultFault)(nil)).Elem() +} + +type OvfConsumerCommunicationError struct { + OvfConsumerCallbackFault + + Description string `xml:"description"` +} + +func init() { + t["OvfConsumerCommunicationError"] = reflect.TypeOf((*OvfConsumerCommunicationError)(nil)).Elem() +} + +type OvfConsumerCommunicationErrorFault OvfConsumerCommunicationError + +func init() { + t["OvfConsumerCommunicationErrorFault"] = reflect.TypeOf((*OvfConsumerCommunicationErrorFault)(nil)).Elem() +} + +type OvfConsumerFault struct { + OvfConsumerCallbackFault + + ErrorKey string `xml:"errorKey"` + Message string `xml:"message"` + Params []KeyValue `xml:"params,omitempty"` +} + +func init() { + t["OvfConsumerFault"] = reflect.TypeOf((*OvfConsumerFault)(nil)).Elem() +} + +type OvfConsumerFaultFault OvfConsumerFault + +func init() { + t["OvfConsumerFaultFault"] = reflect.TypeOf((*OvfConsumerFaultFault)(nil)).Elem() +} + +type OvfConsumerInvalidSection struct { + OvfConsumerCallbackFault + + LineNumber int32 `xml:"lineNumber"` + Description string `xml:"description"` +} + +func init() { + t["OvfConsumerInvalidSection"] = reflect.TypeOf((*OvfConsumerInvalidSection)(nil)).Elem() +} + +type OvfConsumerInvalidSectionFault OvfConsumerInvalidSection + +func init() { + t["OvfConsumerInvalidSectionFault"] = reflect.TypeOf((*OvfConsumerInvalidSectionFault)(nil)).Elem() +} + +type OvfConsumerOstNode struct { + DynamicData + + Id string `xml:"id"` + Type string `xml:"type"` + Section []OvfConsumerOvfSection `xml:"section,omitempty"` + Child []OvfConsumerOstNode `xml:"child,omitempty"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["OvfConsumerOstNode"] = reflect.TypeOf((*OvfConsumerOstNode)(nil)).Elem() +} + +type OvfConsumerOvfSection struct { + DynamicData + + LineNumber int32 `xml:"lineNumber"` + Xml string `xml:"xml"` +} + +func init() { + t["OvfConsumerOvfSection"] = reflect.TypeOf((*OvfConsumerOvfSection)(nil)).Elem() +} + +type OvfConsumerPowerOnFault struct { + InvalidState + + ExtensionKey string `xml:"extensionKey"` + ExtensionName string `xml:"extensionName"` + Description string `xml:"description"` +} + +func init() { + t["OvfConsumerPowerOnFault"] = reflect.TypeOf((*OvfConsumerPowerOnFault)(nil)).Elem() +} + +type OvfConsumerPowerOnFaultFault OvfConsumerPowerOnFault + +func init() { + t["OvfConsumerPowerOnFaultFault"] = reflect.TypeOf((*OvfConsumerPowerOnFaultFault)(nil)).Elem() +} + +type OvfConsumerUndeclaredSection struct { + OvfConsumerCallbackFault + + QualifiedSectionType string `xml:"qualifiedSectionType"` +} + +func init() { + t["OvfConsumerUndeclaredSection"] = reflect.TypeOf((*OvfConsumerUndeclaredSection)(nil)).Elem() +} + +type OvfConsumerUndeclaredSectionFault OvfConsumerUndeclaredSection + +func init() { + t["OvfConsumerUndeclaredSectionFault"] = reflect.TypeOf((*OvfConsumerUndeclaredSectionFault)(nil)).Elem() +} + +type OvfConsumerUndefinedPrefix struct { + OvfConsumerCallbackFault + + Prefix string `xml:"prefix"` +} + +func init() { + t["OvfConsumerUndefinedPrefix"] = reflect.TypeOf((*OvfConsumerUndefinedPrefix)(nil)).Elem() +} + +type OvfConsumerUndefinedPrefixFault OvfConsumerUndefinedPrefix + +func init() { + t["OvfConsumerUndefinedPrefixFault"] = reflect.TypeOf((*OvfConsumerUndefinedPrefixFault)(nil)).Elem() +} + +type OvfConsumerValidationFault struct { + VmConfigFault + + ExtensionKey string `xml:"extensionKey"` + ExtensionName string `xml:"extensionName"` + Message string `xml:"message"` +} + +func init() { + t["OvfConsumerValidationFault"] = reflect.TypeOf((*OvfConsumerValidationFault)(nil)).Elem() +} + +type OvfConsumerValidationFaultFault OvfConsumerValidationFault + +func init() { + t["OvfConsumerValidationFaultFault"] = reflect.TypeOf((*OvfConsumerValidationFaultFault)(nil)).Elem() +} + +type OvfCpuCompatibility struct { + OvfImport + + RegisterName string `xml:"registerName"` + Level int32 `xml:"level"` + RegisterValue string `xml:"registerValue"` + DesiredRegisterValue string `xml:"desiredRegisterValue"` +} + +func init() { + t["OvfCpuCompatibility"] = reflect.TypeOf((*OvfCpuCompatibility)(nil)).Elem() +} + +type OvfCpuCompatibilityCheckNotSupported struct { + OvfImport +} + +func init() { + t["OvfCpuCompatibilityCheckNotSupported"] = reflect.TypeOf((*OvfCpuCompatibilityCheckNotSupported)(nil)).Elem() +} + +type OvfCpuCompatibilityCheckNotSupportedFault OvfCpuCompatibilityCheckNotSupported + +func init() { + t["OvfCpuCompatibilityCheckNotSupportedFault"] = reflect.TypeOf((*OvfCpuCompatibilityCheckNotSupportedFault)(nil)).Elem() +} + +type OvfCpuCompatibilityFault OvfCpuCompatibility + +func init() { + t["OvfCpuCompatibilityFault"] = reflect.TypeOf((*OvfCpuCompatibilityFault)(nil)).Elem() +} + +type OvfCreateDescriptorParams struct { + DynamicData + + OvfFiles []OvfFile `xml:"ovfFiles,omitempty"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` + IncludeImageFiles *bool `xml:"includeImageFiles"` + ExportOption []string `xml:"exportOption,omitempty"` + Snapshot *ManagedObjectReference `xml:"snapshot,omitempty"` +} + +func init() { + t["OvfCreateDescriptorParams"] = reflect.TypeOf((*OvfCreateDescriptorParams)(nil)).Elem() +} + +type OvfCreateDescriptorResult struct { + DynamicData + + OvfDescriptor string `xml:"ovfDescriptor"` + Error []LocalizedMethodFault `xml:"error,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` + IncludeImageFiles *bool `xml:"includeImageFiles"` +} + +func init() { + t["OvfCreateDescriptorResult"] = reflect.TypeOf((*OvfCreateDescriptorResult)(nil)).Elem() +} + +type OvfCreateImportSpecParams struct { + OvfManagerCommonParams + + EntityName string `xml:"entityName"` + HostSystem *ManagedObjectReference `xml:"hostSystem,omitempty"` + NetworkMapping []OvfNetworkMapping `xml:"networkMapping,omitempty"` + IpAllocationPolicy string `xml:"ipAllocationPolicy,omitempty"` + IpProtocol string `xml:"ipProtocol,omitempty"` + PropertyMapping []KeyValue `xml:"propertyMapping,omitempty"` + ResourceMapping []OvfResourceMap `xml:"resourceMapping,omitempty"` + DiskProvisioning string `xml:"diskProvisioning,omitempty"` + InstantiationOst *OvfConsumerOstNode `xml:"instantiationOst,omitempty"` +} + +func init() { + t["OvfCreateImportSpecParams"] = reflect.TypeOf((*OvfCreateImportSpecParams)(nil)).Elem() +} + +type OvfCreateImportSpecResult struct { + DynamicData + + ImportSpec BaseImportSpec `xml:"importSpec,omitempty,typeattr"` + FileItem []OvfFileItem `xml:"fileItem,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["OvfCreateImportSpecResult"] = reflect.TypeOf((*OvfCreateImportSpecResult)(nil)).Elem() +} + +type OvfDeploymentOption struct { + DynamicData + + Key string `xml:"key"` + Label string `xml:"label"` + Description string `xml:"description"` +} + +func init() { + t["OvfDeploymentOption"] = reflect.TypeOf((*OvfDeploymentOption)(nil)).Elem() +} + +type OvfDiskMappingNotFound struct { + OvfSystemFault + + DiskName string `xml:"diskName"` + VmName string `xml:"vmName"` +} + +func init() { + t["OvfDiskMappingNotFound"] = reflect.TypeOf((*OvfDiskMappingNotFound)(nil)).Elem() +} + +type OvfDiskMappingNotFoundFault OvfDiskMappingNotFound + +func init() { + t["OvfDiskMappingNotFoundFault"] = reflect.TypeOf((*OvfDiskMappingNotFoundFault)(nil)).Elem() +} + +type OvfDiskOrderConstraint struct { + OvfConstraint +} + +func init() { + t["OvfDiskOrderConstraint"] = reflect.TypeOf((*OvfDiskOrderConstraint)(nil)).Elem() +} + +type OvfDiskOrderConstraintFault OvfDiskOrderConstraint + +func init() { + t["OvfDiskOrderConstraintFault"] = reflect.TypeOf((*OvfDiskOrderConstraintFault)(nil)).Elem() +} + +type OvfDuplicateElement struct { + OvfElement +} + +func init() { + t["OvfDuplicateElement"] = reflect.TypeOf((*OvfDuplicateElement)(nil)).Elem() +} + +type OvfDuplicateElementFault OvfDuplicateElement + +func init() { + t["OvfDuplicateElementFault"] = reflect.TypeOf((*OvfDuplicateElementFault)(nil)).Elem() +} + +type OvfDuplicatedElementBoundary struct { + OvfElement + + Boundary string `xml:"boundary"` +} + +func init() { + t["OvfDuplicatedElementBoundary"] = reflect.TypeOf((*OvfDuplicatedElementBoundary)(nil)).Elem() +} + +type OvfDuplicatedElementBoundaryFault OvfDuplicatedElementBoundary + +func init() { + t["OvfDuplicatedElementBoundaryFault"] = reflect.TypeOf((*OvfDuplicatedElementBoundaryFault)(nil)).Elem() +} + +type OvfDuplicatedPropertyIdExport struct { + OvfExport + + Fqid string `xml:"fqid"` +} + +func init() { + t["OvfDuplicatedPropertyIdExport"] = reflect.TypeOf((*OvfDuplicatedPropertyIdExport)(nil)).Elem() +} + +type OvfDuplicatedPropertyIdExportFault OvfDuplicatedPropertyIdExport + +func init() { + t["OvfDuplicatedPropertyIdExportFault"] = reflect.TypeOf((*OvfDuplicatedPropertyIdExportFault)(nil)).Elem() +} + +type OvfDuplicatedPropertyIdImport struct { + OvfExport +} + +func init() { + t["OvfDuplicatedPropertyIdImport"] = reflect.TypeOf((*OvfDuplicatedPropertyIdImport)(nil)).Elem() +} + +type OvfDuplicatedPropertyIdImportFault OvfDuplicatedPropertyIdImport + +func init() { + t["OvfDuplicatedPropertyIdImportFault"] = reflect.TypeOf((*OvfDuplicatedPropertyIdImportFault)(nil)).Elem() +} + +type OvfElement struct { + OvfInvalidPackage + + Name string `xml:"name"` +} + +func init() { + t["OvfElement"] = reflect.TypeOf((*OvfElement)(nil)).Elem() +} + +type OvfElementFault BaseOvfElement + +func init() { + t["OvfElementFault"] = reflect.TypeOf((*OvfElementFault)(nil)).Elem() +} + +type OvfElementInvalidValue struct { + OvfElement + + Value string `xml:"value"` +} + +func init() { + t["OvfElementInvalidValue"] = reflect.TypeOf((*OvfElementInvalidValue)(nil)).Elem() +} + +type OvfElementInvalidValueFault OvfElementInvalidValue + +func init() { + t["OvfElementInvalidValueFault"] = reflect.TypeOf((*OvfElementInvalidValueFault)(nil)).Elem() +} + +type OvfExport struct { + OvfFault +} + +func init() { + t["OvfExport"] = reflect.TypeOf((*OvfExport)(nil)).Elem() +} + +type OvfExportFailed struct { + OvfExport +} + +func init() { + t["OvfExportFailed"] = reflect.TypeOf((*OvfExportFailed)(nil)).Elem() +} + +type OvfExportFailedFault OvfExportFailed + +func init() { + t["OvfExportFailedFault"] = reflect.TypeOf((*OvfExportFailedFault)(nil)).Elem() +} + +type OvfExportFault BaseOvfExport + +func init() { + t["OvfExportFault"] = reflect.TypeOf((*OvfExportFault)(nil)).Elem() +} + +type OvfFault struct { + VimFault +} + +func init() { + t["OvfFault"] = reflect.TypeOf((*OvfFault)(nil)).Elem() +} + +type OvfFaultFault BaseOvfFault + +func init() { + t["OvfFaultFault"] = reflect.TypeOf((*OvfFaultFault)(nil)).Elem() +} + +type OvfFile struct { + DynamicData + + DeviceId string `xml:"deviceId"` + Path string `xml:"path"` + CompressionMethod string `xml:"compressionMethod,omitempty"` + ChunkSize int64 `xml:"chunkSize,omitempty"` + Size int64 `xml:"size"` + Capacity int64 `xml:"capacity,omitempty"` + PopulatedSize int64 `xml:"populatedSize,omitempty"` +} + +func init() { + t["OvfFile"] = reflect.TypeOf((*OvfFile)(nil)).Elem() +} + +type OvfFileItem struct { + DynamicData + + DeviceId string `xml:"deviceId"` + Path string `xml:"path"` + CompressionMethod string `xml:"compressionMethod,omitempty"` + ChunkSize int64 `xml:"chunkSize,omitempty"` + Size int64 `xml:"size,omitempty"` + CimType int32 `xml:"cimType"` + Create bool `xml:"create"` +} + +func init() { + t["OvfFileItem"] = reflect.TypeOf((*OvfFileItem)(nil)).Elem() +} + +type OvfHardwareCheck struct { + OvfImport +} + +func init() { + t["OvfHardwareCheck"] = reflect.TypeOf((*OvfHardwareCheck)(nil)).Elem() +} + +type OvfHardwareCheckFault OvfHardwareCheck + +func init() { + t["OvfHardwareCheckFault"] = reflect.TypeOf((*OvfHardwareCheckFault)(nil)).Elem() +} + +type OvfHardwareExport struct { + OvfExport + + Device BaseVirtualDevice `xml:"device,omitempty,typeattr"` + VmPath string `xml:"vmPath"` +} + +func init() { + t["OvfHardwareExport"] = reflect.TypeOf((*OvfHardwareExport)(nil)).Elem() +} + +type OvfHardwareExportFault BaseOvfHardwareExport + +func init() { + t["OvfHardwareExportFault"] = reflect.TypeOf((*OvfHardwareExportFault)(nil)).Elem() +} + +type OvfHostResourceConstraint struct { + OvfConstraint + + Value string `xml:"value"` +} + +func init() { + t["OvfHostResourceConstraint"] = reflect.TypeOf((*OvfHostResourceConstraint)(nil)).Elem() +} + +type OvfHostResourceConstraintFault OvfHostResourceConstraint + +func init() { + t["OvfHostResourceConstraintFault"] = reflect.TypeOf((*OvfHostResourceConstraintFault)(nil)).Elem() +} + +type OvfHostValueNotParsed struct { + OvfSystemFault + + Property string `xml:"property"` + Value string `xml:"value"` +} + +func init() { + t["OvfHostValueNotParsed"] = reflect.TypeOf((*OvfHostValueNotParsed)(nil)).Elem() +} + +type OvfHostValueNotParsedFault OvfHostValueNotParsed + +func init() { + t["OvfHostValueNotParsedFault"] = reflect.TypeOf((*OvfHostValueNotParsedFault)(nil)).Elem() +} + +type OvfImport struct { + OvfFault +} + +func init() { + t["OvfImport"] = reflect.TypeOf((*OvfImport)(nil)).Elem() +} + +type OvfImportFailed struct { + OvfImport +} + +func init() { + t["OvfImportFailed"] = reflect.TypeOf((*OvfImportFailed)(nil)).Elem() +} + +type OvfImportFailedFault OvfImportFailed + +func init() { + t["OvfImportFailedFault"] = reflect.TypeOf((*OvfImportFailedFault)(nil)).Elem() +} + +type OvfImportFault BaseOvfImport + +func init() { + t["OvfImportFault"] = reflect.TypeOf((*OvfImportFault)(nil)).Elem() +} + +type OvfInternalError struct { + OvfSystemFault +} + +func init() { + t["OvfInternalError"] = reflect.TypeOf((*OvfInternalError)(nil)).Elem() +} + +type OvfInternalErrorFault OvfInternalError + +func init() { + t["OvfInternalErrorFault"] = reflect.TypeOf((*OvfInternalErrorFault)(nil)).Elem() +} + +type OvfInvalidPackage struct { + OvfFault + + LineNumber int32 `xml:"lineNumber"` +} + +func init() { + t["OvfInvalidPackage"] = reflect.TypeOf((*OvfInvalidPackage)(nil)).Elem() +} + +type OvfInvalidPackageFault BaseOvfInvalidPackage + +func init() { + t["OvfInvalidPackageFault"] = reflect.TypeOf((*OvfInvalidPackageFault)(nil)).Elem() +} + +type OvfInvalidValue struct { + OvfAttribute + + Value string `xml:"value"` +} + +func init() { + t["OvfInvalidValue"] = reflect.TypeOf((*OvfInvalidValue)(nil)).Elem() +} + +type OvfInvalidValueConfiguration struct { + OvfInvalidValue +} + +func init() { + t["OvfInvalidValueConfiguration"] = reflect.TypeOf((*OvfInvalidValueConfiguration)(nil)).Elem() +} + +type OvfInvalidValueConfigurationFault OvfInvalidValueConfiguration + +func init() { + t["OvfInvalidValueConfigurationFault"] = reflect.TypeOf((*OvfInvalidValueConfigurationFault)(nil)).Elem() +} + +type OvfInvalidValueEmpty struct { + OvfInvalidValue +} + +func init() { + t["OvfInvalidValueEmpty"] = reflect.TypeOf((*OvfInvalidValueEmpty)(nil)).Elem() +} + +type OvfInvalidValueEmptyFault OvfInvalidValueEmpty + +func init() { + t["OvfInvalidValueEmptyFault"] = reflect.TypeOf((*OvfInvalidValueEmptyFault)(nil)).Elem() +} + +type OvfInvalidValueFault BaseOvfInvalidValue + +func init() { + t["OvfInvalidValueFault"] = reflect.TypeOf((*OvfInvalidValueFault)(nil)).Elem() +} + +type OvfInvalidValueFormatMalformed struct { + OvfInvalidValue +} + +func init() { + t["OvfInvalidValueFormatMalformed"] = reflect.TypeOf((*OvfInvalidValueFormatMalformed)(nil)).Elem() +} + +type OvfInvalidValueFormatMalformedFault OvfInvalidValueFormatMalformed + +func init() { + t["OvfInvalidValueFormatMalformedFault"] = reflect.TypeOf((*OvfInvalidValueFormatMalformedFault)(nil)).Elem() +} + +type OvfInvalidValueReference struct { + OvfInvalidValue +} + +func init() { + t["OvfInvalidValueReference"] = reflect.TypeOf((*OvfInvalidValueReference)(nil)).Elem() +} + +type OvfInvalidValueReferenceFault OvfInvalidValueReference + +func init() { + t["OvfInvalidValueReferenceFault"] = reflect.TypeOf((*OvfInvalidValueReferenceFault)(nil)).Elem() +} + +type OvfInvalidVmName struct { + OvfUnsupportedPackage + + Name string `xml:"name"` +} + +func init() { + t["OvfInvalidVmName"] = reflect.TypeOf((*OvfInvalidVmName)(nil)).Elem() +} + +type OvfInvalidVmNameFault OvfInvalidVmName + +func init() { + t["OvfInvalidVmNameFault"] = reflect.TypeOf((*OvfInvalidVmNameFault)(nil)).Elem() +} + +type OvfManagerCommonParams struct { + DynamicData + + Locale string `xml:"locale"` + DeploymentOption string `xml:"deploymentOption"` + MsgBundle []KeyValue `xml:"msgBundle,omitempty"` + ImportOption []string `xml:"importOption,omitempty"` +} + +func init() { + t["OvfManagerCommonParams"] = reflect.TypeOf((*OvfManagerCommonParams)(nil)).Elem() +} + +type OvfMappedOsId struct { + OvfImport + + OvfId int32 `xml:"ovfId"` + OvfDescription string `xml:"ovfDescription"` + TargetDescription string `xml:"targetDescription"` +} + +func init() { + t["OvfMappedOsId"] = reflect.TypeOf((*OvfMappedOsId)(nil)).Elem() +} + +type OvfMappedOsIdFault OvfMappedOsId + +func init() { + t["OvfMappedOsIdFault"] = reflect.TypeOf((*OvfMappedOsIdFault)(nil)).Elem() +} + +type OvfMissingAttribute struct { + OvfAttribute +} + +func init() { + t["OvfMissingAttribute"] = reflect.TypeOf((*OvfMissingAttribute)(nil)).Elem() +} + +type OvfMissingAttributeFault OvfMissingAttribute + +func init() { + t["OvfMissingAttributeFault"] = reflect.TypeOf((*OvfMissingAttributeFault)(nil)).Elem() +} + +type OvfMissingElement struct { + OvfElement +} + +func init() { + t["OvfMissingElement"] = reflect.TypeOf((*OvfMissingElement)(nil)).Elem() +} + +type OvfMissingElementFault BaseOvfMissingElement + +func init() { + t["OvfMissingElementFault"] = reflect.TypeOf((*OvfMissingElementFault)(nil)).Elem() +} + +type OvfMissingElementNormalBoundary struct { + OvfMissingElement + + Boundary string `xml:"boundary"` +} + +func init() { + t["OvfMissingElementNormalBoundary"] = reflect.TypeOf((*OvfMissingElementNormalBoundary)(nil)).Elem() +} + +type OvfMissingElementNormalBoundaryFault OvfMissingElementNormalBoundary + +func init() { + t["OvfMissingElementNormalBoundaryFault"] = reflect.TypeOf((*OvfMissingElementNormalBoundaryFault)(nil)).Elem() +} + +type OvfMissingHardware struct { + OvfImport + + Name string `xml:"name"` + ResourceType int32 `xml:"resourceType"` +} + +func init() { + t["OvfMissingHardware"] = reflect.TypeOf((*OvfMissingHardware)(nil)).Elem() +} + +type OvfMissingHardwareFault OvfMissingHardware + +func init() { + t["OvfMissingHardwareFault"] = reflect.TypeOf((*OvfMissingHardwareFault)(nil)).Elem() +} + +type OvfNetworkInfo struct { + DynamicData + + Name string `xml:"name"` + Description string `xml:"description"` +} + +func init() { + t["OvfNetworkInfo"] = reflect.TypeOf((*OvfNetworkInfo)(nil)).Elem() +} + +type OvfNetworkMapping struct { + DynamicData + + Name string `xml:"name"` + Network ManagedObjectReference `xml:"network"` +} + +func init() { + t["OvfNetworkMapping"] = reflect.TypeOf((*OvfNetworkMapping)(nil)).Elem() +} + +type OvfNetworkMappingNotSupported struct { + OvfImport +} + +func init() { + t["OvfNetworkMappingNotSupported"] = reflect.TypeOf((*OvfNetworkMappingNotSupported)(nil)).Elem() +} + +type OvfNetworkMappingNotSupportedFault OvfNetworkMappingNotSupported + +func init() { + t["OvfNetworkMappingNotSupportedFault"] = reflect.TypeOf((*OvfNetworkMappingNotSupportedFault)(nil)).Elem() +} + +type OvfNoHostNic struct { + OvfUnsupportedPackage +} + +func init() { + t["OvfNoHostNic"] = reflect.TypeOf((*OvfNoHostNic)(nil)).Elem() +} + +type OvfNoHostNicFault OvfNoHostNic + +func init() { + t["OvfNoHostNicFault"] = reflect.TypeOf((*OvfNoHostNicFault)(nil)).Elem() +} + +type OvfNoSpaceOnController struct { + OvfUnsupportedElement + + Parent string `xml:"parent"` +} + +func init() { + t["OvfNoSpaceOnController"] = reflect.TypeOf((*OvfNoSpaceOnController)(nil)).Elem() +} + +type OvfNoSpaceOnControllerFault OvfNoSpaceOnController + +func init() { + t["OvfNoSpaceOnControllerFault"] = reflect.TypeOf((*OvfNoSpaceOnControllerFault)(nil)).Elem() +} + +type OvfNoSupportedHardwareFamily struct { + OvfUnsupportedPackage + + Version string `xml:"version"` +} + +func init() { + t["OvfNoSupportedHardwareFamily"] = reflect.TypeOf((*OvfNoSupportedHardwareFamily)(nil)).Elem() +} + +type OvfNoSupportedHardwareFamilyFault OvfNoSupportedHardwareFamily + +func init() { + t["OvfNoSupportedHardwareFamilyFault"] = reflect.TypeOf((*OvfNoSupportedHardwareFamilyFault)(nil)).Elem() +} + +type OvfOptionInfo struct { + DynamicData + + Option string `xml:"option"` + Description LocalizableMessage `xml:"description"` +} + +func init() { + t["OvfOptionInfo"] = reflect.TypeOf((*OvfOptionInfo)(nil)).Elem() +} + +type OvfParseDescriptorParams struct { + OvfManagerCommonParams +} + +func init() { + t["OvfParseDescriptorParams"] = reflect.TypeOf((*OvfParseDescriptorParams)(nil)).Elem() +} + +type OvfParseDescriptorResult struct { + DynamicData + + Eula []string `xml:"eula,omitempty"` + Network []OvfNetworkInfo `xml:"network,omitempty"` + IpAllocationScheme []string `xml:"ipAllocationScheme,omitempty"` + IpProtocols []string `xml:"ipProtocols,omitempty"` + Property []VAppPropertyInfo `xml:"property,omitempty"` + ProductInfo *VAppProductInfo `xml:"productInfo,omitempty"` + Annotation string `xml:"annotation"` + ApproximateDownloadSize int64 `xml:"approximateDownloadSize,omitempty"` + ApproximateFlatDeploymentSize int64 `xml:"approximateFlatDeploymentSize,omitempty"` + ApproximateSparseDeploymentSize int64 `xml:"approximateSparseDeploymentSize,omitempty"` + DefaultEntityName string `xml:"defaultEntityName"` + VirtualApp bool `xml:"virtualApp"` + DeploymentOption []OvfDeploymentOption `xml:"deploymentOption,omitempty"` + DefaultDeploymentOption string `xml:"defaultDeploymentOption"` + EntityName []KeyValue `xml:"entityName,omitempty"` + AnnotatedOst *OvfConsumerOstNode `xml:"annotatedOst,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` +} + +func init() { + t["OvfParseDescriptorResult"] = reflect.TypeOf((*OvfParseDescriptorResult)(nil)).Elem() +} + +type OvfProperty struct { + OvfInvalidPackage + + Type string `xml:"type"` + Value string `xml:"value"` +} + +func init() { + t["OvfProperty"] = reflect.TypeOf((*OvfProperty)(nil)).Elem() +} + +type OvfPropertyExport struct { + OvfExport + + Type string `xml:"type"` + Value string `xml:"value"` +} + +func init() { + t["OvfPropertyExport"] = reflect.TypeOf((*OvfPropertyExport)(nil)).Elem() +} + +type OvfPropertyExportFault OvfPropertyExport + +func init() { + t["OvfPropertyExportFault"] = reflect.TypeOf((*OvfPropertyExportFault)(nil)).Elem() +} + +type OvfPropertyFault BaseOvfProperty + +func init() { + t["OvfPropertyFault"] = reflect.TypeOf((*OvfPropertyFault)(nil)).Elem() +} + +type OvfPropertyNetwork struct { + OvfProperty +} + +func init() { + t["OvfPropertyNetwork"] = reflect.TypeOf((*OvfPropertyNetwork)(nil)).Elem() +} + +type OvfPropertyNetworkExport struct { + OvfExport + + Network string `xml:"network"` +} + +func init() { + t["OvfPropertyNetworkExport"] = reflect.TypeOf((*OvfPropertyNetworkExport)(nil)).Elem() +} + +type OvfPropertyNetworkExportFault OvfPropertyNetworkExport + +func init() { + t["OvfPropertyNetworkExportFault"] = reflect.TypeOf((*OvfPropertyNetworkExportFault)(nil)).Elem() +} + +type OvfPropertyNetworkFault OvfPropertyNetwork + +func init() { + t["OvfPropertyNetworkFault"] = reflect.TypeOf((*OvfPropertyNetworkFault)(nil)).Elem() +} + +type OvfPropertyQualifier struct { + OvfProperty + + Qualifier string `xml:"qualifier"` +} + +func init() { + t["OvfPropertyQualifier"] = reflect.TypeOf((*OvfPropertyQualifier)(nil)).Elem() +} + +type OvfPropertyQualifierDuplicate struct { + OvfProperty + + Qualifier string `xml:"qualifier"` +} + +func init() { + t["OvfPropertyQualifierDuplicate"] = reflect.TypeOf((*OvfPropertyQualifierDuplicate)(nil)).Elem() +} + +type OvfPropertyQualifierDuplicateFault OvfPropertyQualifierDuplicate + +func init() { + t["OvfPropertyQualifierDuplicateFault"] = reflect.TypeOf((*OvfPropertyQualifierDuplicateFault)(nil)).Elem() +} + +type OvfPropertyQualifierFault OvfPropertyQualifier + +func init() { + t["OvfPropertyQualifierFault"] = reflect.TypeOf((*OvfPropertyQualifierFault)(nil)).Elem() +} + +type OvfPropertyQualifierIgnored struct { + OvfProperty + + Qualifier string `xml:"qualifier"` +} + +func init() { + t["OvfPropertyQualifierIgnored"] = reflect.TypeOf((*OvfPropertyQualifierIgnored)(nil)).Elem() +} + +type OvfPropertyQualifierIgnoredFault OvfPropertyQualifierIgnored + +func init() { + t["OvfPropertyQualifierIgnoredFault"] = reflect.TypeOf((*OvfPropertyQualifierIgnoredFault)(nil)).Elem() +} + +type OvfPropertyType struct { + OvfProperty +} + +func init() { + t["OvfPropertyType"] = reflect.TypeOf((*OvfPropertyType)(nil)).Elem() +} + +type OvfPropertyTypeFault OvfPropertyType + +func init() { + t["OvfPropertyTypeFault"] = reflect.TypeOf((*OvfPropertyTypeFault)(nil)).Elem() +} + +type OvfPropertyValue struct { + OvfProperty +} + +func init() { + t["OvfPropertyValue"] = reflect.TypeOf((*OvfPropertyValue)(nil)).Elem() +} + +type OvfPropertyValueFault OvfPropertyValue + +func init() { + t["OvfPropertyValueFault"] = reflect.TypeOf((*OvfPropertyValueFault)(nil)).Elem() +} + +type OvfResourceMap struct { + DynamicData + + Source string `xml:"source"` + Parent *ManagedObjectReference `xml:"parent,omitempty"` + ResourceSpec *ResourceConfigSpec `xml:"resourceSpec,omitempty"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["OvfResourceMap"] = reflect.TypeOf((*OvfResourceMap)(nil)).Elem() +} + +type OvfSystemFault struct { + OvfFault +} + +func init() { + t["OvfSystemFault"] = reflect.TypeOf((*OvfSystemFault)(nil)).Elem() +} + +type OvfSystemFaultFault BaseOvfSystemFault + +func init() { + t["OvfSystemFaultFault"] = reflect.TypeOf((*OvfSystemFaultFault)(nil)).Elem() +} + +type OvfToXmlUnsupportedElement struct { + OvfSystemFault + + Name string `xml:"name,omitempty"` +} + +func init() { + t["OvfToXmlUnsupportedElement"] = reflect.TypeOf((*OvfToXmlUnsupportedElement)(nil)).Elem() +} + +type OvfToXmlUnsupportedElementFault OvfToXmlUnsupportedElement + +func init() { + t["OvfToXmlUnsupportedElementFault"] = reflect.TypeOf((*OvfToXmlUnsupportedElementFault)(nil)).Elem() +} + +type OvfUnableToExportDisk struct { + OvfHardwareExport + + DiskName string `xml:"diskName"` +} + +func init() { + t["OvfUnableToExportDisk"] = reflect.TypeOf((*OvfUnableToExportDisk)(nil)).Elem() +} + +type OvfUnableToExportDiskFault OvfUnableToExportDisk + +func init() { + t["OvfUnableToExportDiskFault"] = reflect.TypeOf((*OvfUnableToExportDiskFault)(nil)).Elem() +} + +type OvfUnexpectedElement struct { + OvfElement +} + +func init() { + t["OvfUnexpectedElement"] = reflect.TypeOf((*OvfUnexpectedElement)(nil)).Elem() +} + +type OvfUnexpectedElementFault OvfUnexpectedElement + +func init() { + t["OvfUnexpectedElementFault"] = reflect.TypeOf((*OvfUnexpectedElementFault)(nil)).Elem() +} + +type OvfUnknownDevice struct { + OvfSystemFault + + Device BaseVirtualDevice `xml:"device,omitempty,typeattr"` + VmName string `xml:"vmName"` +} + +func init() { + t["OvfUnknownDevice"] = reflect.TypeOf((*OvfUnknownDevice)(nil)).Elem() +} + +type OvfUnknownDeviceBacking struct { + OvfHardwareExport + + Backing BaseVirtualDeviceBackingInfo `xml:"backing,typeattr"` +} + +func init() { + t["OvfUnknownDeviceBacking"] = reflect.TypeOf((*OvfUnknownDeviceBacking)(nil)).Elem() +} + +type OvfUnknownDeviceBackingFault OvfUnknownDeviceBacking + +func init() { + t["OvfUnknownDeviceBackingFault"] = reflect.TypeOf((*OvfUnknownDeviceBackingFault)(nil)).Elem() +} + +type OvfUnknownDeviceFault OvfUnknownDevice + +func init() { + t["OvfUnknownDeviceFault"] = reflect.TypeOf((*OvfUnknownDeviceFault)(nil)).Elem() +} + +type OvfUnknownEntity struct { + OvfSystemFault + + LineNumber int32 `xml:"lineNumber"` +} + +func init() { + t["OvfUnknownEntity"] = reflect.TypeOf((*OvfUnknownEntity)(nil)).Elem() +} + +type OvfUnknownEntityFault OvfUnknownEntity + +func init() { + t["OvfUnknownEntityFault"] = reflect.TypeOf((*OvfUnknownEntityFault)(nil)).Elem() +} + +type OvfUnsupportedAttribute struct { + OvfUnsupportedPackage + + ElementName string `xml:"elementName"` + AttributeName string `xml:"attributeName"` +} + +func init() { + t["OvfUnsupportedAttribute"] = reflect.TypeOf((*OvfUnsupportedAttribute)(nil)).Elem() +} + +type OvfUnsupportedAttributeFault BaseOvfUnsupportedAttribute + +func init() { + t["OvfUnsupportedAttributeFault"] = reflect.TypeOf((*OvfUnsupportedAttributeFault)(nil)).Elem() +} + +type OvfUnsupportedAttributeValue struct { + OvfUnsupportedAttribute + + Value string `xml:"value"` +} + +func init() { + t["OvfUnsupportedAttributeValue"] = reflect.TypeOf((*OvfUnsupportedAttributeValue)(nil)).Elem() +} + +type OvfUnsupportedAttributeValueFault OvfUnsupportedAttributeValue + +func init() { + t["OvfUnsupportedAttributeValueFault"] = reflect.TypeOf((*OvfUnsupportedAttributeValueFault)(nil)).Elem() +} + +type OvfUnsupportedDeviceBackingInfo struct { + OvfSystemFault + + ElementName string `xml:"elementName,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + DeviceName string `xml:"deviceName"` + BackingName string `xml:"backingName,omitempty"` +} + +func init() { + t["OvfUnsupportedDeviceBackingInfo"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingInfo)(nil)).Elem() +} + +type OvfUnsupportedDeviceBackingInfoFault OvfUnsupportedDeviceBackingInfo + +func init() { + t["OvfUnsupportedDeviceBackingInfoFault"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingInfoFault)(nil)).Elem() +} + +type OvfUnsupportedDeviceBackingOption struct { + OvfSystemFault + + ElementName string `xml:"elementName,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + DeviceName string `xml:"deviceName"` + BackingName string `xml:"backingName,omitempty"` +} + +func init() { + t["OvfUnsupportedDeviceBackingOption"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingOption)(nil)).Elem() +} + +type OvfUnsupportedDeviceBackingOptionFault OvfUnsupportedDeviceBackingOption + +func init() { + t["OvfUnsupportedDeviceBackingOptionFault"] = reflect.TypeOf((*OvfUnsupportedDeviceBackingOptionFault)(nil)).Elem() +} + +type OvfUnsupportedDeviceExport struct { + OvfHardwareExport +} + +func init() { + t["OvfUnsupportedDeviceExport"] = reflect.TypeOf((*OvfUnsupportedDeviceExport)(nil)).Elem() +} + +type OvfUnsupportedDeviceExportFault OvfUnsupportedDeviceExport + +func init() { + t["OvfUnsupportedDeviceExportFault"] = reflect.TypeOf((*OvfUnsupportedDeviceExportFault)(nil)).Elem() +} + +type OvfUnsupportedDiskProvisioning struct { + OvfImport + + DiskProvisioning string `xml:"diskProvisioning"` + SupportedDiskProvisioning string `xml:"supportedDiskProvisioning"` +} + +func init() { + t["OvfUnsupportedDiskProvisioning"] = reflect.TypeOf((*OvfUnsupportedDiskProvisioning)(nil)).Elem() +} + +type OvfUnsupportedDiskProvisioningFault OvfUnsupportedDiskProvisioning + +func init() { + t["OvfUnsupportedDiskProvisioningFault"] = reflect.TypeOf((*OvfUnsupportedDiskProvisioningFault)(nil)).Elem() +} + +type OvfUnsupportedElement struct { + OvfUnsupportedPackage + + Name string `xml:"name"` +} + +func init() { + t["OvfUnsupportedElement"] = reflect.TypeOf((*OvfUnsupportedElement)(nil)).Elem() +} + +type OvfUnsupportedElementFault BaseOvfUnsupportedElement + +func init() { + t["OvfUnsupportedElementFault"] = reflect.TypeOf((*OvfUnsupportedElementFault)(nil)).Elem() +} + +type OvfUnsupportedElementValue struct { + OvfUnsupportedElement + + Value string `xml:"value"` +} + +func init() { + t["OvfUnsupportedElementValue"] = reflect.TypeOf((*OvfUnsupportedElementValue)(nil)).Elem() +} + +type OvfUnsupportedElementValueFault OvfUnsupportedElementValue + +func init() { + t["OvfUnsupportedElementValueFault"] = reflect.TypeOf((*OvfUnsupportedElementValueFault)(nil)).Elem() +} + +type OvfUnsupportedPackage struct { + OvfFault + + LineNumber int32 `xml:"lineNumber,omitempty"` +} + +func init() { + t["OvfUnsupportedPackage"] = reflect.TypeOf((*OvfUnsupportedPackage)(nil)).Elem() +} + +type OvfUnsupportedPackageFault BaseOvfUnsupportedPackage + +func init() { + t["OvfUnsupportedPackageFault"] = reflect.TypeOf((*OvfUnsupportedPackageFault)(nil)).Elem() +} + +type OvfUnsupportedSection struct { + OvfUnsupportedElement + + Info string `xml:"info"` +} + +func init() { + t["OvfUnsupportedSection"] = reflect.TypeOf((*OvfUnsupportedSection)(nil)).Elem() +} + +type OvfUnsupportedSectionFault OvfUnsupportedSection + +func init() { + t["OvfUnsupportedSectionFault"] = reflect.TypeOf((*OvfUnsupportedSectionFault)(nil)).Elem() +} + +type OvfUnsupportedSubType struct { + OvfUnsupportedPackage + + ElementName string `xml:"elementName"` + InstanceId string `xml:"instanceId"` + DeviceType int32 `xml:"deviceType"` + DeviceSubType string `xml:"deviceSubType"` +} + +func init() { + t["OvfUnsupportedSubType"] = reflect.TypeOf((*OvfUnsupportedSubType)(nil)).Elem() +} + +type OvfUnsupportedSubTypeFault OvfUnsupportedSubType + +func init() { + t["OvfUnsupportedSubTypeFault"] = reflect.TypeOf((*OvfUnsupportedSubTypeFault)(nil)).Elem() +} + +type OvfUnsupportedType struct { + OvfUnsupportedPackage + + Name string `xml:"name"` + InstanceId string `xml:"instanceId"` + DeviceType int32 `xml:"deviceType"` +} + +func init() { + t["OvfUnsupportedType"] = reflect.TypeOf((*OvfUnsupportedType)(nil)).Elem() +} + +type OvfUnsupportedTypeFault OvfUnsupportedType + +func init() { + t["OvfUnsupportedTypeFault"] = reflect.TypeOf((*OvfUnsupportedTypeFault)(nil)).Elem() +} + +type OvfValidateHostParams struct { + OvfManagerCommonParams +} + +func init() { + t["OvfValidateHostParams"] = reflect.TypeOf((*OvfValidateHostParams)(nil)).Elem() +} + +type OvfValidateHostResult struct { + DynamicData + + DownloadSize int64 `xml:"downloadSize,omitempty"` + FlatDeploymentSize int64 `xml:"flatDeploymentSize,omitempty"` + SparseDeploymentSize int64 `xml:"sparseDeploymentSize,omitempty"` + Error []LocalizedMethodFault `xml:"error,omitempty"` + Warning []LocalizedMethodFault `xml:"warning,omitempty"` + SupportedDiskProvisioning []string `xml:"supportedDiskProvisioning,omitempty"` +} + +func init() { + t["OvfValidateHostResult"] = reflect.TypeOf((*OvfValidateHostResult)(nil)).Elem() +} + +type OvfWrongElement struct { + OvfElement +} + +func init() { + t["OvfWrongElement"] = reflect.TypeOf((*OvfWrongElement)(nil)).Elem() +} + +type OvfWrongElementFault OvfWrongElement + +func init() { + t["OvfWrongElementFault"] = reflect.TypeOf((*OvfWrongElementFault)(nil)).Elem() +} + +type OvfWrongNamespace struct { + OvfInvalidPackage + + NamespaceName string `xml:"namespaceName"` +} + +func init() { + t["OvfWrongNamespace"] = reflect.TypeOf((*OvfWrongNamespace)(nil)).Elem() +} + +type OvfWrongNamespaceFault OvfWrongNamespace + +func init() { + t["OvfWrongNamespaceFault"] = reflect.TypeOf((*OvfWrongNamespaceFault)(nil)).Elem() +} + +type OvfXmlFormat struct { + OvfInvalidPackage + + Description string `xml:"description"` +} + +func init() { + t["OvfXmlFormat"] = reflect.TypeOf((*OvfXmlFormat)(nil)).Elem() +} + +type OvfXmlFormatFault OvfXmlFormat + +func init() { + t["OvfXmlFormatFault"] = reflect.TypeOf((*OvfXmlFormatFault)(nil)).Elem() +} + +type PMemDatastoreInfo struct { + DatastoreInfo + + Pmem HostPMemVolume `xml:"pmem"` +} + +func init() { + t["PMemDatastoreInfo"] = reflect.TypeOf((*PMemDatastoreInfo)(nil)).Elem() +} + +type ParaVirtualSCSIController struct { + VirtualSCSIController +} + +func init() { + t["ParaVirtualSCSIController"] = reflect.TypeOf((*ParaVirtualSCSIController)(nil)).Elem() +} + +type ParaVirtualSCSIControllerOption struct { + VirtualSCSIControllerOption +} + +func init() { + t["ParaVirtualSCSIControllerOption"] = reflect.TypeOf((*ParaVirtualSCSIControllerOption)(nil)).Elem() +} + +type ParseDescriptor ParseDescriptorRequestType + +func init() { + t["ParseDescriptor"] = reflect.TypeOf((*ParseDescriptor)(nil)).Elem() +} + +type ParseDescriptorRequestType struct { + This ManagedObjectReference `xml:"_this"` + OvfDescriptor string `xml:"ovfDescriptor"` + Pdp OvfParseDescriptorParams `xml:"pdp"` +} + +func init() { + t["ParseDescriptorRequestType"] = reflect.TypeOf((*ParseDescriptorRequestType)(nil)).Elem() +} + +type ParseDescriptorResponse struct { + Returnval OvfParseDescriptorResult `xml:"returnval"` +} + +type PassiveNodeDeploymentSpec struct { + NodeDeploymentSpec + + FailoverIpSettings *CustomizationIPSettings `xml:"failoverIpSettings,omitempty"` +} + +func init() { + t["PassiveNodeDeploymentSpec"] = reflect.TypeOf((*PassiveNodeDeploymentSpec)(nil)).Elem() +} + +type PassiveNodeNetworkSpec struct { + NodeNetworkSpec + + FailoverIpSettings *CustomizationIPSettings `xml:"failoverIpSettings,omitempty"` +} + +func init() { + t["PassiveNodeNetworkSpec"] = reflect.TypeOf((*PassiveNodeNetworkSpec)(nil)).Elem() +} + +type PasswordField struct { + DynamicData + + Value string `xml:"value"` +} + +func init() { + t["PasswordField"] = reflect.TypeOf((*PasswordField)(nil)).Elem() +} + +type PatchAlreadyInstalled struct { + PatchNotApplicable +} + +func init() { + t["PatchAlreadyInstalled"] = reflect.TypeOf((*PatchAlreadyInstalled)(nil)).Elem() +} + +type PatchAlreadyInstalledFault PatchAlreadyInstalled + +func init() { + t["PatchAlreadyInstalledFault"] = reflect.TypeOf((*PatchAlreadyInstalledFault)(nil)).Elem() +} + +type PatchBinariesNotFound struct { + VimFault + + PatchID string `xml:"patchID"` + Binary []string `xml:"binary,omitempty"` +} + +func init() { + t["PatchBinariesNotFound"] = reflect.TypeOf((*PatchBinariesNotFound)(nil)).Elem() +} + +type PatchBinariesNotFoundFault PatchBinariesNotFound + +func init() { + t["PatchBinariesNotFoundFault"] = reflect.TypeOf((*PatchBinariesNotFoundFault)(nil)).Elem() +} + +type PatchInstallFailed struct { + PlatformConfigFault + + RolledBack bool `xml:"rolledBack"` +} + +func init() { + t["PatchInstallFailed"] = reflect.TypeOf((*PatchInstallFailed)(nil)).Elem() +} + +type PatchInstallFailedFault PatchInstallFailed + +func init() { + t["PatchInstallFailedFault"] = reflect.TypeOf((*PatchInstallFailedFault)(nil)).Elem() +} + +type PatchIntegrityError struct { + PlatformConfigFault +} + +func init() { + t["PatchIntegrityError"] = reflect.TypeOf((*PatchIntegrityError)(nil)).Elem() +} + +type PatchIntegrityErrorFault PatchIntegrityError + +func init() { + t["PatchIntegrityErrorFault"] = reflect.TypeOf((*PatchIntegrityErrorFault)(nil)).Elem() +} + +type PatchMetadataCorrupted struct { + PatchMetadataInvalid +} + +func init() { + t["PatchMetadataCorrupted"] = reflect.TypeOf((*PatchMetadataCorrupted)(nil)).Elem() +} + +type PatchMetadataCorruptedFault PatchMetadataCorrupted + +func init() { + t["PatchMetadataCorruptedFault"] = reflect.TypeOf((*PatchMetadataCorruptedFault)(nil)).Elem() +} + +type PatchMetadataInvalid struct { + VimFault + + PatchID string `xml:"patchID"` + MetaData []string `xml:"metaData,omitempty"` +} + +func init() { + t["PatchMetadataInvalid"] = reflect.TypeOf((*PatchMetadataInvalid)(nil)).Elem() +} + +type PatchMetadataInvalidFault BasePatchMetadataInvalid + +func init() { + t["PatchMetadataInvalidFault"] = reflect.TypeOf((*PatchMetadataInvalidFault)(nil)).Elem() +} + +type PatchMetadataNotFound struct { + PatchMetadataInvalid +} + +func init() { + t["PatchMetadataNotFound"] = reflect.TypeOf((*PatchMetadataNotFound)(nil)).Elem() +} + +type PatchMetadataNotFoundFault PatchMetadataNotFound + +func init() { + t["PatchMetadataNotFoundFault"] = reflect.TypeOf((*PatchMetadataNotFoundFault)(nil)).Elem() +} + +type PatchMissingDependencies struct { + PatchNotApplicable + + PrerequisitePatch []string `xml:"prerequisitePatch,omitempty"` + PrerequisiteLib []string `xml:"prerequisiteLib,omitempty"` +} + +func init() { + t["PatchMissingDependencies"] = reflect.TypeOf((*PatchMissingDependencies)(nil)).Elem() +} + +type PatchMissingDependenciesFault PatchMissingDependencies + +func init() { + t["PatchMissingDependenciesFault"] = reflect.TypeOf((*PatchMissingDependenciesFault)(nil)).Elem() +} + +type PatchNotApplicable struct { + VimFault + + PatchID string `xml:"patchID"` +} + +func init() { + t["PatchNotApplicable"] = reflect.TypeOf((*PatchNotApplicable)(nil)).Elem() +} + +type PatchNotApplicableFault BasePatchNotApplicable + +func init() { + t["PatchNotApplicableFault"] = reflect.TypeOf((*PatchNotApplicableFault)(nil)).Elem() +} + +type PatchSuperseded struct { + PatchNotApplicable + + Supersede []string `xml:"supersede,omitempty"` +} + +func init() { + t["PatchSuperseded"] = reflect.TypeOf((*PatchSuperseded)(nil)).Elem() +} + +type PatchSupersededFault PatchSuperseded + +func init() { + t["PatchSupersededFault"] = reflect.TypeOf((*PatchSupersededFault)(nil)).Elem() +} + +type PerfCompositeMetric struct { + DynamicData + + Entity BasePerfEntityMetricBase `xml:"entity,omitempty,typeattr"` + ChildEntity []BasePerfEntityMetricBase `xml:"childEntity,omitempty,typeattr"` +} + +func init() { + t["PerfCompositeMetric"] = reflect.TypeOf((*PerfCompositeMetric)(nil)).Elem() +} + +type PerfCounterInfo struct { + DynamicData + + Key int32 `xml:"key"` + NameInfo BaseElementDescription `xml:"nameInfo,typeattr"` + GroupInfo BaseElementDescription `xml:"groupInfo,typeattr"` + UnitInfo BaseElementDescription `xml:"unitInfo,typeattr"` + RollupType PerfSummaryType `xml:"rollupType"` + StatsType PerfStatsType `xml:"statsType"` + Level int32 `xml:"level,omitempty"` + PerDeviceLevel int32 `xml:"perDeviceLevel,omitempty"` + AssociatedCounterId []int32 `xml:"associatedCounterId,omitempty"` +} + +func init() { + t["PerfCounterInfo"] = reflect.TypeOf((*PerfCounterInfo)(nil)).Elem() +} + +type PerfEntityMetric struct { + PerfEntityMetricBase + + SampleInfo []PerfSampleInfo `xml:"sampleInfo,omitempty"` + Value []BasePerfMetricSeries `xml:"value,omitempty,typeattr"` +} + +func init() { + t["PerfEntityMetric"] = reflect.TypeOf((*PerfEntityMetric)(nil)).Elem() +} + +type PerfEntityMetricBase struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["PerfEntityMetricBase"] = reflect.TypeOf((*PerfEntityMetricBase)(nil)).Elem() +} + +type PerfEntityMetricCSV struct { + PerfEntityMetricBase + + SampleInfoCSV string `xml:"sampleInfoCSV"` + Value []PerfMetricSeriesCSV `xml:"value,omitempty"` +} + +func init() { + t["PerfEntityMetricCSV"] = reflect.TypeOf((*PerfEntityMetricCSV)(nil)).Elem() +} + +type PerfInterval struct { + DynamicData + + Key int32 `xml:"key"` + SamplingPeriod int32 `xml:"samplingPeriod"` + Name string `xml:"name"` + Length int32 `xml:"length"` + Level int32 `xml:"level,omitempty"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["PerfInterval"] = reflect.TypeOf((*PerfInterval)(nil)).Elem() +} + +type PerfMetricId struct { + DynamicData + + CounterId int32 `xml:"counterId"` + Instance string `xml:"instance"` +} + +func init() { + t["PerfMetricId"] = reflect.TypeOf((*PerfMetricId)(nil)).Elem() +} + +type PerfMetricIntSeries struct { + PerfMetricSeries + + Value []int64 `xml:"value,omitempty"` +} + +func init() { + t["PerfMetricIntSeries"] = reflect.TypeOf((*PerfMetricIntSeries)(nil)).Elem() +} + +type PerfMetricSeries struct { + DynamicData + + Id PerfMetricId `xml:"id"` +} + +func init() { + t["PerfMetricSeries"] = reflect.TypeOf((*PerfMetricSeries)(nil)).Elem() +} + +type PerfMetricSeriesCSV struct { + PerfMetricSeries + + Value string `xml:"value,omitempty"` +} + +func init() { + t["PerfMetricSeriesCSV"] = reflect.TypeOf((*PerfMetricSeriesCSV)(nil)).Elem() +} + +type PerfProviderSummary struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + CurrentSupported bool `xml:"currentSupported"` + SummarySupported bool `xml:"summarySupported"` + RefreshRate int32 `xml:"refreshRate,omitempty"` +} + +func init() { + t["PerfProviderSummary"] = reflect.TypeOf((*PerfProviderSummary)(nil)).Elem() +} + +type PerfQuerySpec struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + StartTime *time.Time `xml:"startTime"` + EndTime *time.Time `xml:"endTime"` + MaxSample int32 `xml:"maxSample,omitempty"` + MetricId []PerfMetricId `xml:"metricId,omitempty"` + IntervalId int32 `xml:"intervalId,omitempty"` + Format string `xml:"format,omitempty"` +} + +func init() { + t["PerfQuerySpec"] = reflect.TypeOf((*PerfQuerySpec)(nil)).Elem() +} + +type PerfSampleInfo struct { + DynamicData + + Timestamp time.Time `xml:"timestamp"` + Interval int32 `xml:"interval"` +} + +func init() { + t["PerfSampleInfo"] = reflect.TypeOf((*PerfSampleInfo)(nil)).Elem() +} + +type PerformDvsProductSpecOperationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Operation string `xml:"operation"` + ProductSpec *DistributedVirtualSwitchProductSpec `xml:"productSpec,omitempty"` +} + +func init() { + t["PerformDvsProductSpecOperationRequestType"] = reflect.TypeOf((*PerformDvsProductSpecOperationRequestType)(nil)).Elem() +} + +type PerformDvsProductSpecOperation_Task PerformDvsProductSpecOperationRequestType + +func init() { + t["PerformDvsProductSpecOperation_Task"] = reflect.TypeOf((*PerformDvsProductSpecOperation_Task)(nil)).Elem() +} + +type PerformDvsProductSpecOperation_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PerformVsanUpgradePreflightCheck PerformVsanUpgradePreflightCheckRequestType + +func init() { + t["PerformVsanUpgradePreflightCheck"] = reflect.TypeOf((*PerformVsanUpgradePreflightCheck)(nil)).Elem() +} + +type PerformVsanUpgradePreflightCheckRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster ManagedObjectReference `xml:"cluster"` + DowngradeFormat *bool `xml:"downgradeFormat"` +} + +func init() { + t["PerformVsanUpgradePreflightCheckRequestType"] = reflect.TypeOf((*PerformVsanUpgradePreflightCheckRequestType)(nil)).Elem() +} + +type PerformVsanUpgradePreflightCheckResponse struct { + Returnval VsanUpgradeSystemPreflightCheckResult `xml:"returnval"` +} + +type PerformVsanUpgradeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster ManagedObjectReference `xml:"cluster"` + PerformObjectUpgrade *bool `xml:"performObjectUpgrade"` + DowngradeFormat *bool `xml:"downgradeFormat"` + AllowReducedRedundancy *bool `xml:"allowReducedRedundancy"` + ExcludeHosts []ManagedObjectReference `xml:"excludeHosts,omitempty"` +} + +func init() { + t["PerformVsanUpgradeRequestType"] = reflect.TypeOf((*PerformVsanUpgradeRequestType)(nil)).Elem() +} + +type PerformVsanUpgrade_Task PerformVsanUpgradeRequestType + +func init() { + t["PerformVsanUpgrade_Task"] = reflect.TypeOf((*PerformVsanUpgrade_Task)(nil)).Elem() +} + +type PerformVsanUpgrade_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PerformanceDescription struct { + DynamicData + + CounterType []BaseElementDescription `xml:"counterType,typeattr"` + StatsType []BaseElementDescription `xml:"statsType,typeattr"` +} + +func init() { + t["PerformanceDescription"] = reflect.TypeOf((*PerformanceDescription)(nil)).Elem() +} + +type PerformanceManagerCounterLevelMapping struct { + DynamicData + + CounterId int32 `xml:"counterId"` + AggregateLevel int32 `xml:"aggregateLevel,omitempty"` + PerDeviceLevel int32 `xml:"perDeviceLevel,omitempty"` +} + +func init() { + t["PerformanceManagerCounterLevelMapping"] = reflect.TypeOf((*PerformanceManagerCounterLevelMapping)(nil)).Elem() +} + +type PerformanceStatisticsDescription struct { + DynamicData + + Intervals []PerfInterval `xml:"intervals,omitempty"` +} + +func init() { + t["PerformanceStatisticsDescription"] = reflect.TypeOf((*PerformanceStatisticsDescription)(nil)).Elem() +} + +type Permission struct { + DynamicData + + Entity *ManagedObjectReference `xml:"entity,omitempty"` + Principal string `xml:"principal"` + Group bool `xml:"group"` + RoleId int32 `xml:"roleId"` + Propagate bool `xml:"propagate"` +} + +func init() { + t["Permission"] = reflect.TypeOf((*Permission)(nil)).Elem() +} + +type PermissionAddedEvent struct { + PermissionEvent + + Role RoleEventArgument `xml:"role"` + Propagate bool `xml:"propagate"` +} + +func init() { + t["PermissionAddedEvent"] = reflect.TypeOf((*PermissionAddedEvent)(nil)).Elem() +} + +type PermissionEvent struct { + AuthorizationEvent + + Entity ManagedEntityEventArgument `xml:"entity"` + Principal string `xml:"principal"` + Group bool `xml:"group"` +} + +func init() { + t["PermissionEvent"] = reflect.TypeOf((*PermissionEvent)(nil)).Elem() +} + +type PermissionProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["PermissionProfile"] = reflect.TypeOf((*PermissionProfile)(nil)).Elem() +} + +type PermissionRemovedEvent struct { + PermissionEvent +} + +func init() { + t["PermissionRemovedEvent"] = reflect.TypeOf((*PermissionRemovedEvent)(nil)).Elem() +} + +type PermissionUpdatedEvent struct { + PermissionEvent + + Role RoleEventArgument `xml:"role"` + Propagate bool `xml:"propagate"` + PrevRole *RoleEventArgument `xml:"prevRole,omitempty"` + PrevPropagate *bool `xml:"prevPropagate"` +} + +func init() { + t["PermissionUpdatedEvent"] = reflect.TypeOf((*PermissionUpdatedEvent)(nil)).Elem() +} + +type PhysCompatRDMNotSupported struct { + RDMNotSupported +} + +func init() { + t["PhysCompatRDMNotSupported"] = reflect.TypeOf((*PhysCompatRDMNotSupported)(nil)).Elem() +} + +type PhysCompatRDMNotSupportedFault PhysCompatRDMNotSupported + +func init() { + t["PhysCompatRDMNotSupportedFault"] = reflect.TypeOf((*PhysCompatRDMNotSupportedFault)(nil)).Elem() +} + +type PhysicalNic struct { + DynamicData + + Key string `xml:"key,omitempty"` + Device string `xml:"device"` + Pci string `xml:"pci"` + Driver string `xml:"driver,omitempty"` + LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty"` + ValidLinkSpecification []PhysicalNicLinkInfo `xml:"validLinkSpecification,omitempty"` + Spec PhysicalNicSpec `xml:"spec"` + WakeOnLanSupported bool `xml:"wakeOnLanSupported"` + Mac string `xml:"mac"` + FcoeConfiguration *FcoeConfig `xml:"fcoeConfiguration,omitempty"` + VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported"` + VmDirectPathGen2SupportedMode string `xml:"vmDirectPathGen2SupportedMode,omitempty"` + ResourcePoolSchedulerAllowed *bool `xml:"resourcePoolSchedulerAllowed"` + ResourcePoolSchedulerDisallowedReason []string `xml:"resourcePoolSchedulerDisallowedReason,omitempty"` + AutoNegotiateSupported *bool `xml:"autoNegotiateSupported"` + EnhancedNetworkingStackSupported *bool `xml:"enhancedNetworkingStackSupported"` +} + +func init() { + t["PhysicalNic"] = reflect.TypeOf((*PhysicalNic)(nil)).Elem() +} + +type PhysicalNicCdpDeviceCapability struct { + DynamicData + + Router bool `xml:"router"` + TransparentBridge bool `xml:"transparentBridge"` + SourceRouteBridge bool `xml:"sourceRouteBridge"` + NetworkSwitch bool `xml:"networkSwitch"` + Host bool `xml:"host"` + IgmpEnabled bool `xml:"igmpEnabled"` + Repeater bool `xml:"repeater"` +} + +func init() { + t["PhysicalNicCdpDeviceCapability"] = reflect.TypeOf((*PhysicalNicCdpDeviceCapability)(nil)).Elem() +} + +type PhysicalNicCdpInfo struct { + DynamicData + + CdpVersion int32 `xml:"cdpVersion,omitempty"` + Timeout int32 `xml:"timeout,omitempty"` + Ttl int32 `xml:"ttl,omitempty"` + Samples int32 `xml:"samples,omitempty"` + DevId string `xml:"devId,omitempty"` + Address string `xml:"address,omitempty"` + PortId string `xml:"portId,omitempty"` + DeviceCapability *PhysicalNicCdpDeviceCapability `xml:"deviceCapability,omitempty"` + SoftwareVersion string `xml:"softwareVersion,omitempty"` + HardwarePlatform string `xml:"hardwarePlatform,omitempty"` + IpPrefix string `xml:"ipPrefix,omitempty"` + IpPrefixLen int32 `xml:"ipPrefixLen,omitempty"` + Vlan int32 `xml:"vlan,omitempty"` + FullDuplex *bool `xml:"fullDuplex"` + Mtu int32 `xml:"mtu,omitempty"` + SystemName string `xml:"systemName,omitempty"` + SystemOID string `xml:"systemOID,omitempty"` + MgmtAddr string `xml:"mgmtAddr,omitempty"` + Location string `xml:"location,omitempty"` +} + +func init() { + t["PhysicalNicCdpInfo"] = reflect.TypeOf((*PhysicalNicCdpInfo)(nil)).Elem() +} + +type PhysicalNicConfig struct { + DynamicData + + Device string `xml:"device"` + Spec PhysicalNicSpec `xml:"spec"` +} + +func init() { + t["PhysicalNicConfig"] = reflect.TypeOf((*PhysicalNicConfig)(nil)).Elem() +} + +type PhysicalNicHint struct { + DynamicData + + VlanId int32 `xml:"vlanId,omitempty"` +} + +func init() { + t["PhysicalNicHint"] = reflect.TypeOf((*PhysicalNicHint)(nil)).Elem() +} + +type PhysicalNicHintInfo struct { + DynamicData + + Device string `xml:"device"` + Subnet []PhysicalNicIpHint `xml:"subnet,omitempty"` + Network []PhysicalNicNameHint `xml:"network,omitempty"` + ConnectedSwitchPort *PhysicalNicCdpInfo `xml:"connectedSwitchPort,omitempty"` + LldpInfo *LinkLayerDiscoveryProtocolInfo `xml:"lldpInfo,omitempty"` +} + +func init() { + t["PhysicalNicHintInfo"] = reflect.TypeOf((*PhysicalNicHintInfo)(nil)).Elem() +} + +type PhysicalNicIpHint struct { + PhysicalNicHint + + IpSubnet string `xml:"ipSubnet"` +} + +func init() { + t["PhysicalNicIpHint"] = reflect.TypeOf((*PhysicalNicIpHint)(nil)).Elem() +} + +type PhysicalNicLinkInfo struct { + DynamicData + + SpeedMb int32 `xml:"speedMb"` + Duplex bool `xml:"duplex"` +} + +func init() { + t["PhysicalNicLinkInfo"] = reflect.TypeOf((*PhysicalNicLinkInfo)(nil)).Elem() +} + +type PhysicalNicNameHint struct { + PhysicalNicHint + + Network string `xml:"network"` +} + +func init() { + t["PhysicalNicNameHint"] = reflect.TypeOf((*PhysicalNicNameHint)(nil)).Elem() +} + +type PhysicalNicProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["PhysicalNicProfile"] = reflect.TypeOf((*PhysicalNicProfile)(nil)).Elem() +} + +type PhysicalNicSpec struct { + DynamicData + + Ip *HostIpConfig `xml:"ip,omitempty"` + LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty"` + EnableEnhancedNetworkingStack *bool `xml:"enableEnhancedNetworkingStack"` +} + +func init() { + t["PhysicalNicSpec"] = reflect.TypeOf((*PhysicalNicSpec)(nil)).Elem() +} + +type PlaceVm PlaceVmRequestType + +func init() { + t["PlaceVm"] = reflect.TypeOf((*PlaceVm)(nil)).Elem() +} + +type PlaceVmRequestType struct { + This ManagedObjectReference `xml:"_this"` + PlacementSpec PlacementSpec `xml:"placementSpec"` +} + +func init() { + t["PlaceVmRequestType"] = reflect.TypeOf((*PlaceVmRequestType)(nil)).Elem() +} + +type PlaceVmResponse struct { + Returnval PlacementResult `xml:"returnval"` +} + +type PlacementAction struct { + ClusterAction + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + TargetHost *ManagedObjectReference `xml:"targetHost,omitempty"` + RelocateSpec *VirtualMachineRelocateSpec `xml:"relocateSpec,omitempty"` +} + +func init() { + t["PlacementAction"] = reflect.TypeOf((*PlacementAction)(nil)).Elem() +} + +type PlacementAffinityRule struct { + DynamicData + + RuleType string `xml:"ruleType"` + RuleScope string `xml:"ruleScope"` + Vms []ManagedObjectReference `xml:"vms,omitempty"` + Keys []string `xml:"keys,omitempty"` +} + +func init() { + t["PlacementAffinityRule"] = reflect.TypeOf((*PlacementAffinityRule)(nil)).Elem() +} + +type PlacementRankResult struct { + DynamicData + + Key string `xml:"key"` + Candidate ManagedObjectReference `xml:"candidate"` + ReservedSpaceMB int64 `xml:"reservedSpaceMB"` + UsedSpaceMB int64 `xml:"usedSpaceMB"` + TotalSpaceMB int64 `xml:"totalSpaceMB"` + Utilization float64 `xml:"utilization"` + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["PlacementRankResult"] = reflect.TypeOf((*PlacementRankResult)(nil)).Elem() +} + +type PlacementRankSpec struct { + DynamicData + + Specs []PlacementSpec `xml:"specs"` + Clusters []ManagedObjectReference `xml:"clusters"` + Rules []PlacementAffinityRule `xml:"rules,omitempty"` + PlacementRankByVm []StorageDrsPlacementRankVmSpec `xml:"placementRankByVm,omitempty"` +} + +func init() { + t["PlacementRankSpec"] = reflect.TypeOf((*PlacementRankSpec)(nil)).Elem() +} + +type PlacementResult struct { + DynamicData + + Recommendations []ClusterRecommendation `xml:"recommendations,omitempty"` + DrsFault *ClusterDrsFaults `xml:"drsFault,omitempty"` +} + +func init() { + t["PlacementResult"] = reflect.TypeOf((*PlacementResult)(nil)).Elem() +} + +type PlacementSpec struct { + DynamicData + + Priority VirtualMachineMovePriority `xml:"priority,omitempty"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` + ConfigSpec *VirtualMachineConfigSpec `xml:"configSpec,omitempty"` + RelocateSpec *VirtualMachineRelocateSpec `xml:"relocateSpec,omitempty"` + Hosts []ManagedObjectReference `xml:"hosts,omitempty"` + Datastores []ManagedObjectReference `xml:"datastores,omitempty"` + StoragePods []ManagedObjectReference `xml:"storagePods,omitempty"` + DisallowPrerequisiteMoves *bool `xml:"disallowPrerequisiteMoves"` + Rules []BaseClusterRuleInfo `xml:"rules,omitempty,typeattr"` + Key string `xml:"key,omitempty"` + PlacementType string `xml:"placementType,omitempty"` + CloneSpec *VirtualMachineCloneSpec `xml:"cloneSpec,omitempty"` + CloneName string `xml:"cloneName,omitempty"` +} + +func init() { + t["PlacementSpec"] = reflect.TypeOf((*PlacementSpec)(nil)).Elem() +} + +type PlatformConfigFault struct { + HostConfigFault + + Text string `xml:"text"` +} + +func init() { + t["PlatformConfigFault"] = reflect.TypeOf((*PlatformConfigFault)(nil)).Elem() +} + +type PlatformConfigFaultFault BasePlatformConfigFault + +func init() { + t["PlatformConfigFaultFault"] = reflect.TypeOf((*PlatformConfigFaultFault)(nil)).Elem() +} + +type PnicUplinkProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["PnicUplinkProfile"] = reflect.TypeOf((*PnicUplinkProfile)(nil)).Elem() +} + +type PodDiskLocator struct { + DynamicData + + DiskId int32 `xml:"diskId"` + DiskMoveType string `xml:"diskMoveType,omitempty"` + DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["PodDiskLocator"] = reflect.TypeOf((*PodDiskLocator)(nil)).Elem() +} + +type PodStorageDrsEntry struct { + DynamicData + + StorageDrsConfig StorageDrsConfigInfo `xml:"storageDrsConfig"` + Recommendation []ClusterRecommendation `xml:"recommendation,omitempty"` + DrsFault []ClusterDrsFaults `xml:"drsFault,omitempty"` + ActionHistory []ClusterActionHistory `xml:"actionHistory,omitempty"` +} + +func init() { + t["PodStorageDrsEntry"] = reflect.TypeOf((*PodStorageDrsEntry)(nil)).Elem() +} + +type PolicyOption struct { + DynamicData + + Id string `xml:"id"` + Parameter []KeyAnyValue `xml:"parameter,omitempty"` +} + +func init() { + t["PolicyOption"] = reflect.TypeOf((*PolicyOption)(nil)).Elem() +} + +type PortGroupProfile struct { + ApplyProfile + + Key string `xml:"key"` + Name string `xml:"name"` + Vlan VlanProfile `xml:"vlan"` + Vswitch VirtualSwitchSelectionProfile `xml:"vswitch"` + NetworkPolicy NetworkPolicyProfile `xml:"networkPolicy"` +} + +func init() { + t["PortGroupProfile"] = reflect.TypeOf((*PortGroupProfile)(nil)).Elem() +} + +type PosixUserSearchResult struct { + UserSearchResult + + Id int32 `xml:"id"` + ShellAccess *bool `xml:"shellAccess"` +} + +func init() { + t["PosixUserSearchResult"] = reflect.TypeOf((*PosixUserSearchResult)(nil)).Elem() +} + +type PostEvent PostEventRequestType + +func init() { + t["PostEvent"] = reflect.TypeOf((*PostEvent)(nil)).Elem() +} + +type PostEventRequestType struct { + This ManagedObjectReference `xml:"_this"` + EventToPost BaseEvent `xml:"eventToPost,typeattr"` + TaskInfo *TaskInfo `xml:"taskInfo,omitempty"` +} + +func init() { + t["PostEventRequestType"] = reflect.TypeOf((*PostEventRequestType)(nil)).Elem() +} + +type PostEventResponse struct { +} + +type PostHealthUpdates PostHealthUpdatesRequestType + +func init() { + t["PostHealthUpdates"] = reflect.TypeOf((*PostHealthUpdates)(nil)).Elem() +} + +type PostHealthUpdatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` + Updates []HealthUpdate `xml:"updates,omitempty"` +} + +func init() { + t["PostHealthUpdatesRequestType"] = reflect.TypeOf((*PostHealthUpdatesRequestType)(nil)).Elem() +} + +type PostHealthUpdatesResponse struct { +} + +type PowerDownHostToStandByRequestType struct { + This ManagedObjectReference `xml:"_this"` + TimeoutSec int32 `xml:"timeoutSec"` + EvacuatePoweredOffVms *bool `xml:"evacuatePoweredOffVms"` +} + +func init() { + t["PowerDownHostToStandByRequestType"] = reflect.TypeOf((*PowerDownHostToStandByRequestType)(nil)).Elem() +} + +type PowerDownHostToStandBy_Task PowerDownHostToStandByRequestType + +func init() { + t["PowerDownHostToStandBy_Task"] = reflect.TypeOf((*PowerDownHostToStandBy_Task)(nil)).Elem() +} + +type PowerDownHostToStandBy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOffVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["PowerOffVAppRequestType"] = reflect.TypeOf((*PowerOffVAppRequestType)(nil)).Elem() +} + +type PowerOffVApp_Task PowerOffVAppRequestType + +func init() { + t["PowerOffVApp_Task"] = reflect.TypeOf((*PowerOffVApp_Task)(nil)).Elem() +} + +type PowerOffVApp_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOffVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["PowerOffVMRequestType"] = reflect.TypeOf((*PowerOffVMRequestType)(nil)).Elem() +} + +type PowerOffVM_Task PowerOffVMRequestType + +func init() { + t["PowerOffVM_Task"] = reflect.TypeOf((*PowerOffVM_Task)(nil)).Elem() +} + +type PowerOffVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOnFtSecondaryFailed struct { + VmFaultToleranceIssue + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` + HostSelectionBy FtIssuesOnHostHostSelectionType `xml:"hostSelectionBy"` + HostErrors []LocalizedMethodFault `xml:"hostErrors,omitempty"` + RootCause LocalizedMethodFault `xml:"rootCause"` +} + +func init() { + t["PowerOnFtSecondaryFailed"] = reflect.TypeOf((*PowerOnFtSecondaryFailed)(nil)).Elem() +} + +type PowerOnFtSecondaryFailedFault PowerOnFtSecondaryFailed + +func init() { + t["PowerOnFtSecondaryFailedFault"] = reflect.TypeOf((*PowerOnFtSecondaryFailedFault)(nil)).Elem() +} + +type PowerOnFtSecondaryTimedout struct { + Timedout + + Vm ManagedObjectReference `xml:"vm"` + VmName string `xml:"vmName"` + Timeout int32 `xml:"timeout"` +} + +func init() { + t["PowerOnFtSecondaryTimedout"] = reflect.TypeOf((*PowerOnFtSecondaryTimedout)(nil)).Elem() +} + +type PowerOnFtSecondaryTimedoutFault PowerOnFtSecondaryTimedout + +func init() { + t["PowerOnFtSecondaryTimedoutFault"] = reflect.TypeOf((*PowerOnFtSecondaryTimedoutFault)(nil)).Elem() +} + +type PowerOnMultiVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm []ManagedObjectReference `xml:"vm"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["PowerOnMultiVMRequestType"] = reflect.TypeOf((*PowerOnMultiVMRequestType)(nil)).Elem() +} + +type PowerOnMultiVM_Task PowerOnMultiVMRequestType + +func init() { + t["PowerOnMultiVM_Task"] = reflect.TypeOf((*PowerOnMultiVM_Task)(nil)).Elem() +} + +type PowerOnMultiVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOnVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["PowerOnVAppRequestType"] = reflect.TypeOf((*PowerOnVAppRequestType)(nil)).Elem() +} + +type PowerOnVApp_Task PowerOnVAppRequestType + +func init() { + t["PowerOnVApp_Task"] = reflect.TypeOf((*PowerOnVApp_Task)(nil)).Elem() +} + +type PowerOnVApp_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerOnVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["PowerOnVMRequestType"] = reflect.TypeOf((*PowerOnVMRequestType)(nil)).Elem() +} + +type PowerOnVM_Task PowerOnVMRequestType + +func init() { + t["PowerOnVM_Task"] = reflect.TypeOf((*PowerOnVM_Task)(nil)).Elem() +} + +type PowerOnVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PowerSystemCapability struct { + DynamicData + + AvailablePolicy []HostPowerPolicy `xml:"availablePolicy"` +} + +func init() { + t["PowerSystemCapability"] = reflect.TypeOf((*PowerSystemCapability)(nil)).Elem() +} + +type PowerSystemInfo struct { + DynamicData + + CurrentPolicy HostPowerPolicy `xml:"currentPolicy"` +} + +func init() { + t["PowerSystemInfo"] = reflect.TypeOf((*PowerSystemInfo)(nil)).Elem() +} + +type PowerUpHostFromStandByRequestType struct { + This ManagedObjectReference `xml:"_this"` + TimeoutSec int32 `xml:"timeoutSec"` +} + +func init() { + t["PowerUpHostFromStandByRequestType"] = reflect.TypeOf((*PowerUpHostFromStandByRequestType)(nil)).Elem() +} + +type PowerUpHostFromStandBy_Task PowerUpHostFromStandByRequestType + +func init() { + t["PowerUpHostFromStandBy_Task"] = reflect.TypeOf((*PowerUpHostFromStandBy_Task)(nil)).Elem() +} + +type PowerUpHostFromStandBy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PrepareCrypto PrepareCryptoRequestType + +func init() { + t["PrepareCrypto"] = reflect.TypeOf((*PrepareCrypto)(nil)).Elem() +} + +type PrepareCryptoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["PrepareCryptoRequestType"] = reflect.TypeOf((*PrepareCryptoRequestType)(nil)).Elem() +} + +type PrepareCryptoResponse struct { +} + +type PrivilegeAvailability struct { + DynamicData + + PrivId string `xml:"privId"` + IsGranted bool `xml:"isGranted"` +} + +func init() { + t["PrivilegeAvailability"] = reflect.TypeOf((*PrivilegeAvailability)(nil)).Elem() +} + +type PrivilegePolicyDef struct { + DynamicData + + CreatePrivilege string `xml:"createPrivilege"` + ReadPrivilege string `xml:"readPrivilege"` + UpdatePrivilege string `xml:"updatePrivilege"` + DeletePrivilege string `xml:"deletePrivilege"` +} + +func init() { + t["PrivilegePolicyDef"] = reflect.TypeOf((*PrivilegePolicyDef)(nil)).Elem() +} + +type ProductComponentInfo struct { + DynamicData + + Id string `xml:"id"` + Name string `xml:"name"` + Version string `xml:"version"` + Release int32 `xml:"release"` +} + +func init() { + t["ProductComponentInfo"] = reflect.TypeOf((*ProductComponentInfo)(nil)).Elem() +} + +type ProfileApplyProfileElement struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["ProfileApplyProfileElement"] = reflect.TypeOf((*ProfileApplyProfileElement)(nil)).Elem() +} + +type ProfileApplyProfileProperty struct { + DynamicData + + PropertyName string `xml:"propertyName"` + Array bool `xml:"array"` + Profile []BaseApplyProfile `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["ProfileApplyProfileProperty"] = reflect.TypeOf((*ProfileApplyProfileProperty)(nil)).Elem() +} + +type ProfileAssociatedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileAssociatedEvent"] = reflect.TypeOf((*ProfileAssociatedEvent)(nil)).Elem() +} + +type ProfileChangedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileChangedEvent"] = reflect.TypeOf((*ProfileChangedEvent)(nil)).Elem() +} + +type ProfileCompositeExpression struct { + ProfileExpression + + Operator string `xml:"operator"` + ExpressionName []string `xml:"expressionName"` +} + +func init() { + t["ProfileCompositeExpression"] = reflect.TypeOf((*ProfileCompositeExpression)(nil)).Elem() +} + +type ProfileCompositePolicyOptionMetadata struct { + ProfilePolicyOptionMetadata + + Option []string `xml:"option"` +} + +func init() { + t["ProfileCompositePolicyOptionMetadata"] = reflect.TypeOf((*ProfileCompositePolicyOptionMetadata)(nil)).Elem() +} + +type ProfileConfigInfo struct { + DynamicData + + Name string `xml:"name"` + Annotation string `xml:"annotation,omitempty"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["ProfileConfigInfo"] = reflect.TypeOf((*ProfileConfigInfo)(nil)).Elem() +} + +type ProfileCreateSpec struct { + DynamicData + + Name string `xml:"name,omitempty"` + Annotation string `xml:"annotation,omitempty"` + Enabled *bool `xml:"enabled"` +} + +func init() { + t["ProfileCreateSpec"] = reflect.TypeOf((*ProfileCreateSpec)(nil)).Elem() +} + +type ProfileCreatedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileCreatedEvent"] = reflect.TypeOf((*ProfileCreatedEvent)(nil)).Elem() +} + +type ProfileDeferredPolicyOptionParameter struct { + DynamicData + + InputPath ProfilePropertyPath `xml:"inputPath"` + Parameter []KeyAnyValue `xml:"parameter,omitempty"` +} + +func init() { + t["ProfileDeferredPolicyOptionParameter"] = reflect.TypeOf((*ProfileDeferredPolicyOptionParameter)(nil)).Elem() +} + +type ProfileDescription struct { + DynamicData + + Section []ProfileDescriptionSection `xml:"section"` +} + +func init() { + t["ProfileDescription"] = reflect.TypeOf((*ProfileDescription)(nil)).Elem() +} + +type ProfileDescriptionSection struct { + DynamicData + + Description ExtendedElementDescription `xml:"description"` + Message []LocalizableMessage `xml:"message,omitempty"` +} + +func init() { + t["ProfileDescriptionSection"] = reflect.TypeOf((*ProfileDescriptionSection)(nil)).Elem() +} + +type ProfileDissociatedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileDissociatedEvent"] = reflect.TypeOf((*ProfileDissociatedEvent)(nil)).Elem() +} + +type ProfileEvent struct { + Event + + Profile ProfileEventArgument `xml:"profile"` +} + +func init() { + t["ProfileEvent"] = reflect.TypeOf((*ProfileEvent)(nil)).Elem() +} + +type ProfileEventArgument struct { + EventArgument + + Profile ManagedObjectReference `xml:"profile"` + Name string `xml:"name"` +} + +func init() { + t["ProfileEventArgument"] = reflect.TypeOf((*ProfileEventArgument)(nil)).Elem() +} + +type ProfileExecuteError struct { + DynamicData + + Path *ProfilePropertyPath `xml:"path,omitempty"` + Message LocalizableMessage `xml:"message"` +} + +func init() { + t["ProfileExecuteError"] = reflect.TypeOf((*ProfileExecuteError)(nil)).Elem() +} + +type ProfileExecuteResult struct { + DynamicData + + Status string `xml:"status"` + ConfigSpec *HostConfigSpec `xml:"configSpec,omitempty"` + InapplicablePath []string `xml:"inapplicablePath,omitempty"` + RequireInput []ProfileDeferredPolicyOptionParameter `xml:"requireInput,omitempty"` + Error []ProfileExecuteError `xml:"error,omitempty"` +} + +func init() { + t["ProfileExecuteResult"] = reflect.TypeOf((*ProfileExecuteResult)(nil)).Elem() +} + +type ProfileExpression struct { + DynamicData + + Id string `xml:"id"` + DisplayName string `xml:"displayName"` + Negated bool `xml:"negated"` +} + +func init() { + t["ProfileExpression"] = reflect.TypeOf((*ProfileExpression)(nil)).Elem() +} + +type ProfileExpressionMetadata struct { + DynamicData + + ExpressionId ExtendedElementDescription `xml:"expressionId"` + Parameter []ProfileParameterMetadata `xml:"parameter,omitempty"` +} + +func init() { + t["ProfileExpressionMetadata"] = reflect.TypeOf((*ProfileExpressionMetadata)(nil)).Elem() +} + +type ProfileMetadata struct { + DynamicData + + Key string `xml:"key"` + ProfileTypeName string `xml:"profileTypeName,omitempty"` + Description *ExtendedDescription `xml:"description,omitempty"` + SortSpec []ProfileMetadataProfileSortSpec `xml:"sortSpec,omitempty"` + ProfileCategory string `xml:"profileCategory,omitempty"` + ProfileComponent string `xml:"profileComponent,omitempty"` + OperationMessages []ProfileMetadataProfileOperationMessage `xml:"operationMessages,omitempty"` +} + +func init() { + t["ProfileMetadata"] = reflect.TypeOf((*ProfileMetadata)(nil)).Elem() +} + +type ProfileMetadataProfileOperationMessage struct { + DynamicData + + OperationName string `xml:"operationName"` + Message LocalizableMessage `xml:"message"` +} + +func init() { + t["ProfileMetadataProfileOperationMessage"] = reflect.TypeOf((*ProfileMetadataProfileOperationMessage)(nil)).Elem() +} + +type ProfileMetadataProfileSortSpec struct { + DynamicData + + PolicyId string `xml:"policyId"` + Parameter string `xml:"parameter"` +} + +func init() { + t["ProfileMetadataProfileSortSpec"] = reflect.TypeOf((*ProfileMetadataProfileSortSpec)(nil)).Elem() +} + +type ProfileParameterMetadata struct { + DynamicData + + Id ExtendedElementDescription `xml:"id"` + Type string `xml:"type"` + Optional bool `xml:"optional"` + DefaultValue AnyType `xml:"defaultValue,omitempty,typeattr"` + Hidden *bool `xml:"hidden"` + SecuritySensitive *bool `xml:"securitySensitive"` + ReadOnly *bool `xml:"readOnly"` + ParameterRelations []ProfileParameterMetadataParameterRelationMetadata `xml:"parameterRelations,omitempty"` +} + +func init() { + t["ProfileParameterMetadata"] = reflect.TypeOf((*ProfileParameterMetadata)(nil)).Elem() +} + +type ProfileParameterMetadataParameterRelationMetadata struct { + DynamicData + + RelationTypes []string `xml:"relationTypes,omitempty"` + Values []AnyType `xml:"values,omitempty,typeattr"` + Path *ProfilePropertyPath `xml:"path,omitempty"` + MinCount int32 `xml:"minCount"` + MaxCount int32 `xml:"maxCount"` +} + +func init() { + t["ProfileParameterMetadataParameterRelationMetadata"] = reflect.TypeOf((*ProfileParameterMetadataParameterRelationMetadata)(nil)).Elem() +} + +type ProfilePolicy struct { + DynamicData + + Id string `xml:"id"` + PolicyOption BasePolicyOption `xml:"policyOption,typeattr"` +} + +func init() { + t["ProfilePolicy"] = reflect.TypeOf((*ProfilePolicy)(nil)).Elem() +} + +type ProfilePolicyMetadata struct { + DynamicData + + Id ExtendedElementDescription `xml:"id"` + PossibleOption []BaseProfilePolicyOptionMetadata `xml:"possibleOption,typeattr"` +} + +func init() { + t["ProfilePolicyMetadata"] = reflect.TypeOf((*ProfilePolicyMetadata)(nil)).Elem() +} + +type ProfilePolicyOptionMetadata struct { + DynamicData + + Id ExtendedElementDescription `xml:"id"` + Parameter []ProfileParameterMetadata `xml:"parameter,omitempty"` +} + +func init() { + t["ProfilePolicyOptionMetadata"] = reflect.TypeOf((*ProfilePolicyOptionMetadata)(nil)).Elem() +} + +type ProfileProfileStructure struct { + DynamicData + + ProfileTypeName string `xml:"profileTypeName"` + Child []ProfileProfileStructureProperty `xml:"child,omitempty"` +} + +func init() { + t["ProfileProfileStructure"] = reflect.TypeOf((*ProfileProfileStructure)(nil)).Elem() +} + +type ProfileProfileStructureProperty struct { + DynamicData + + PropertyName string `xml:"propertyName"` + Array bool `xml:"array"` + Element ProfileProfileStructure `xml:"element"` +} + +func init() { + t["ProfileProfileStructureProperty"] = reflect.TypeOf((*ProfileProfileStructureProperty)(nil)).Elem() +} + +type ProfilePropertyPath struct { + DynamicData + + ProfilePath string `xml:"profilePath"` + PolicyId string `xml:"policyId,omitempty"` + ParameterId string `xml:"parameterId,omitempty"` + PolicyOptionId string `xml:"policyOptionId,omitempty"` +} + +func init() { + t["ProfilePropertyPath"] = reflect.TypeOf((*ProfilePropertyPath)(nil)).Elem() +} + +type ProfileReferenceHostChangedEvent struct { + ProfileEvent + + ReferenceHost *ManagedObjectReference `xml:"referenceHost,omitempty"` + ReferenceHostName string `xml:"referenceHostName,omitempty"` + PrevReferenceHostName string `xml:"prevReferenceHostName,omitempty"` +} + +func init() { + t["ProfileReferenceHostChangedEvent"] = reflect.TypeOf((*ProfileReferenceHostChangedEvent)(nil)).Elem() +} + +type ProfileRemovedEvent struct { + ProfileEvent +} + +func init() { + t["ProfileRemovedEvent"] = reflect.TypeOf((*ProfileRemovedEvent)(nil)).Elem() +} + +type ProfileSerializedCreateSpec struct { + ProfileCreateSpec + + ProfileConfigString string `xml:"profileConfigString"` +} + +func init() { + t["ProfileSerializedCreateSpec"] = reflect.TypeOf((*ProfileSerializedCreateSpec)(nil)).Elem() +} + +type ProfileSimpleExpression struct { + ProfileExpression + + ExpressionType string `xml:"expressionType"` + Parameter []KeyAnyValue `xml:"parameter,omitempty"` +} + +func init() { + t["ProfileSimpleExpression"] = reflect.TypeOf((*ProfileSimpleExpression)(nil)).Elem() +} + +type ProfileUpdateFailed struct { + VimFault + + Failure []ProfileUpdateFailedUpdateFailure `xml:"failure"` + Warnings []ProfileUpdateFailedUpdateFailure `xml:"warnings,omitempty"` +} + +func init() { + t["ProfileUpdateFailed"] = reflect.TypeOf((*ProfileUpdateFailed)(nil)).Elem() +} + +type ProfileUpdateFailedFault ProfileUpdateFailed + +func init() { + t["ProfileUpdateFailedFault"] = reflect.TypeOf((*ProfileUpdateFailedFault)(nil)).Elem() +} + +type ProfileUpdateFailedUpdateFailure struct { + DynamicData + + ProfilePath ProfilePropertyPath `xml:"profilePath"` + ErrMsg LocalizableMessage `xml:"errMsg"` +} + +func init() { + t["ProfileUpdateFailedUpdateFailure"] = reflect.TypeOf((*ProfileUpdateFailedUpdateFailure)(nil)).Elem() +} + +type PromoteDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Unlink bool `xml:"unlink"` + Disks []VirtualDisk `xml:"disks,omitempty"` +} + +func init() { + t["PromoteDisksRequestType"] = reflect.TypeOf((*PromoteDisksRequestType)(nil)).Elem() +} + +type PromoteDisks_Task PromoteDisksRequestType + +func init() { + t["PromoteDisks_Task"] = reflect.TypeOf((*PromoteDisks_Task)(nil)).Elem() +} + +type PromoteDisks_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type PropertyChange struct { + DynamicData + + Name string `xml:"name"` + Op PropertyChangeOp `xml:"op"` + Val AnyType `xml:"val,typeattr"` +} + +func init() { + t["PropertyChange"] = reflect.TypeOf((*PropertyChange)(nil)).Elem() +} + +type PropertyFilterSpec struct { + DynamicData + + PropSet []PropertySpec `xml:"propSet"` + ObjectSet []ObjectSpec `xml:"objectSet"` + ReportMissingObjectsInResults *bool `xml:"reportMissingObjectsInResults"` +} + +func init() { + t["PropertyFilterSpec"] = reflect.TypeOf((*PropertyFilterSpec)(nil)).Elem() +} + +type PropertyFilterUpdate struct { + DynamicData + + Filter ManagedObjectReference `xml:"filter"` + ObjectSet []ObjectUpdate `xml:"objectSet,omitempty"` + MissingSet []MissingObject `xml:"missingSet,omitempty"` +} + +func init() { + t["PropertyFilterUpdate"] = reflect.TypeOf((*PropertyFilterUpdate)(nil)).Elem() +} + +type PropertySpec struct { + DynamicData + + Type string `xml:"type"` + All *bool `xml:"all"` + PathSet []string `xml:"pathSet,omitempty"` +} + +func init() { + t["PropertySpec"] = reflect.TypeOf((*PropertySpec)(nil)).Elem() +} + +type PutUsbScanCodes PutUsbScanCodesRequestType + +func init() { + t["PutUsbScanCodes"] = reflect.TypeOf((*PutUsbScanCodes)(nil)).Elem() +} + +type PutUsbScanCodesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec UsbScanCodeSpec `xml:"spec"` +} + +func init() { + t["PutUsbScanCodesRequestType"] = reflect.TypeOf((*PutUsbScanCodesRequestType)(nil)).Elem() +} + +type PutUsbScanCodesResponse struct { + Returnval int32 `xml:"returnval"` +} + +type QuarantineModeFault struct { + VmConfigFault + + VmName string `xml:"vmName"` + FaultType string `xml:"faultType"` +} + +func init() { + t["QuarantineModeFault"] = reflect.TypeOf((*QuarantineModeFault)(nil)).Elem() +} + +type QuarantineModeFaultFault QuarantineModeFault + +func init() { + t["QuarantineModeFaultFault"] = reflect.TypeOf((*QuarantineModeFaultFault)(nil)).Elem() +} + +type QueryAnswerFileStatus QueryAnswerFileStatusRequestType + +func init() { + t["QueryAnswerFileStatus"] = reflect.TypeOf((*QueryAnswerFileStatus)(nil)).Elem() +} + +type QueryAnswerFileStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["QueryAnswerFileStatusRequestType"] = reflect.TypeOf((*QueryAnswerFileStatusRequestType)(nil)).Elem() +} + +type QueryAnswerFileStatusResponse struct { + Returnval []AnswerFileStatusResult `xml:"returnval,omitempty"` +} + +type QueryAssignedLicenses QueryAssignedLicensesRequestType + +func init() { + t["QueryAssignedLicenses"] = reflect.TypeOf((*QueryAssignedLicenses)(nil)).Elem() +} + +type QueryAssignedLicensesRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityId string `xml:"entityId,omitempty"` +} + +func init() { + t["QueryAssignedLicensesRequestType"] = reflect.TypeOf((*QueryAssignedLicensesRequestType)(nil)).Elem() +} + +type QueryAssignedLicensesResponse struct { + Returnval []LicenseAssignmentManagerLicenseAssignment `xml:"returnval,omitempty"` +} + +type QueryAvailableDisksForVmfs QueryAvailableDisksForVmfsRequestType + +func init() { + t["QueryAvailableDisksForVmfs"] = reflect.TypeOf((*QueryAvailableDisksForVmfs)(nil)).Elem() +} + +type QueryAvailableDisksForVmfsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["QueryAvailableDisksForVmfsRequestType"] = reflect.TypeOf((*QueryAvailableDisksForVmfsRequestType)(nil)).Elem() +} + +type QueryAvailableDisksForVmfsResponse struct { + Returnval []HostScsiDisk `xml:"returnval,omitempty"` +} + +type QueryAvailableDvsSpec QueryAvailableDvsSpecRequestType + +func init() { + t["QueryAvailableDvsSpec"] = reflect.TypeOf((*QueryAvailableDvsSpec)(nil)).Elem() +} + +type QueryAvailableDvsSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Recommended *bool `xml:"recommended"` +} + +func init() { + t["QueryAvailableDvsSpecRequestType"] = reflect.TypeOf((*QueryAvailableDvsSpecRequestType)(nil)).Elem() +} + +type QueryAvailableDvsSpecResponse struct { + Returnval []DistributedVirtualSwitchProductSpec `xml:"returnval,omitempty"` +} + +type QueryAvailablePartition QueryAvailablePartitionRequestType + +func init() { + t["QueryAvailablePartition"] = reflect.TypeOf((*QueryAvailablePartition)(nil)).Elem() +} + +type QueryAvailablePartitionRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryAvailablePartitionRequestType"] = reflect.TypeOf((*QueryAvailablePartitionRequestType)(nil)).Elem() +} + +type QueryAvailablePartitionResponse struct { + Returnval []HostDiagnosticPartition `xml:"returnval,omitempty"` +} + +type QueryAvailablePerfMetric QueryAvailablePerfMetricRequestType + +func init() { + t["QueryAvailablePerfMetric"] = reflect.TypeOf((*QueryAvailablePerfMetric)(nil)).Elem() +} + +type QueryAvailablePerfMetricRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + BeginTime *time.Time `xml:"beginTime"` + EndTime *time.Time `xml:"endTime"` + IntervalId int32 `xml:"intervalId,omitempty"` +} + +func init() { + t["QueryAvailablePerfMetricRequestType"] = reflect.TypeOf((*QueryAvailablePerfMetricRequestType)(nil)).Elem() +} + +type QueryAvailablePerfMetricResponse struct { + Returnval []PerfMetricId `xml:"returnval,omitempty"` +} + +type QueryAvailableSsds QueryAvailableSsdsRequestType + +func init() { + t["QueryAvailableSsds"] = reflect.TypeOf((*QueryAvailableSsds)(nil)).Elem() +} + +type QueryAvailableSsdsRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsPath string `xml:"vffsPath,omitempty"` +} + +func init() { + t["QueryAvailableSsdsRequestType"] = reflect.TypeOf((*QueryAvailableSsdsRequestType)(nil)).Elem() +} + +type QueryAvailableSsdsResponse struct { + Returnval []HostScsiDisk `xml:"returnval,omitempty"` +} + +type QueryAvailableTimeZones QueryAvailableTimeZonesRequestType + +func init() { + t["QueryAvailableTimeZones"] = reflect.TypeOf((*QueryAvailableTimeZones)(nil)).Elem() +} + +type QueryAvailableTimeZonesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryAvailableTimeZonesRequestType"] = reflect.TypeOf((*QueryAvailableTimeZonesRequestType)(nil)).Elem() +} + +type QueryAvailableTimeZonesResponse struct { + Returnval []HostDateTimeSystemTimeZone `xml:"returnval,omitempty"` +} + +type QueryBootDevices QueryBootDevicesRequestType + +func init() { + t["QueryBootDevices"] = reflect.TypeOf((*QueryBootDevices)(nil)).Elem() +} + +type QueryBootDevicesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryBootDevicesRequestType"] = reflect.TypeOf((*QueryBootDevicesRequestType)(nil)).Elem() +} + +type QueryBootDevicesResponse struct { + Returnval *HostBootDeviceInfo `xml:"returnval,omitempty"` +} + +type QueryBoundVnics QueryBoundVnicsRequestType + +func init() { + t["QueryBoundVnics"] = reflect.TypeOf((*QueryBoundVnics)(nil)).Elem() +} + +type QueryBoundVnicsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaName string `xml:"iScsiHbaName"` +} + +func init() { + t["QueryBoundVnicsRequestType"] = reflect.TypeOf((*QueryBoundVnicsRequestType)(nil)).Elem() +} + +type QueryBoundVnicsResponse struct { + Returnval []IscsiPortInfo `xml:"returnval,omitempty"` +} + +type QueryCandidateNics QueryCandidateNicsRequestType + +func init() { + t["QueryCandidateNics"] = reflect.TypeOf((*QueryCandidateNics)(nil)).Elem() +} + +type QueryCandidateNicsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaName string `xml:"iScsiHbaName"` +} + +func init() { + t["QueryCandidateNicsRequestType"] = reflect.TypeOf((*QueryCandidateNicsRequestType)(nil)).Elem() +} + +type QueryCandidateNicsResponse struct { + Returnval []IscsiPortInfo `xml:"returnval,omitempty"` +} + +type QueryChangedDiskAreas QueryChangedDiskAreasRequestType + +func init() { + t["QueryChangedDiskAreas"] = reflect.TypeOf((*QueryChangedDiskAreas)(nil)).Elem() +} + +type QueryChangedDiskAreasRequestType struct { + This ManagedObjectReference `xml:"_this"` + Snapshot *ManagedObjectReference `xml:"snapshot,omitempty"` + DeviceKey int32 `xml:"deviceKey"` + StartOffset int64 `xml:"startOffset"` + ChangeId string `xml:"changeId"` +} + +func init() { + t["QueryChangedDiskAreasRequestType"] = reflect.TypeOf((*QueryChangedDiskAreasRequestType)(nil)).Elem() +} + +type QueryChangedDiskAreasResponse struct { + Returnval DiskChangeInfo `xml:"returnval"` +} + +type QueryCmmds QueryCmmdsRequestType + +func init() { + t["QueryCmmds"] = reflect.TypeOf((*QueryCmmds)(nil)).Elem() +} + +type QueryCmmdsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Queries []HostVsanInternalSystemCmmdsQuery `xml:"queries"` +} + +func init() { + t["QueryCmmdsRequestType"] = reflect.TypeOf((*QueryCmmdsRequestType)(nil)).Elem() +} + +type QueryCmmdsResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryCompatibleHostForExistingDvs QueryCompatibleHostForExistingDvsRequestType + +func init() { + t["QueryCompatibleHostForExistingDvs"] = reflect.TypeOf((*QueryCompatibleHostForExistingDvs)(nil)).Elem() +} + +type QueryCompatibleHostForExistingDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Container ManagedObjectReference `xml:"container"` + Recursive bool `xml:"recursive"` + Dvs ManagedObjectReference `xml:"dvs"` +} + +func init() { + t["QueryCompatibleHostForExistingDvsRequestType"] = reflect.TypeOf((*QueryCompatibleHostForExistingDvsRequestType)(nil)).Elem() +} + +type QueryCompatibleHostForExistingDvsResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryCompatibleHostForNewDvs QueryCompatibleHostForNewDvsRequestType + +func init() { + t["QueryCompatibleHostForNewDvs"] = reflect.TypeOf((*QueryCompatibleHostForNewDvs)(nil)).Elem() +} + +type QueryCompatibleHostForNewDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Container ManagedObjectReference `xml:"container"` + Recursive bool `xml:"recursive"` + SwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:"switchProductSpec,omitempty"` +} + +func init() { + t["QueryCompatibleHostForNewDvsRequestType"] = reflect.TypeOf((*QueryCompatibleHostForNewDvsRequestType)(nil)).Elem() +} + +type QueryCompatibleHostForNewDvsResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryComplianceStatus QueryComplianceStatusRequestType + +func init() { + t["QueryComplianceStatus"] = reflect.TypeOf((*QueryComplianceStatus)(nil)).Elem() +} + +type QueryComplianceStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Profile []ManagedObjectReference `xml:"profile,omitempty"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["QueryComplianceStatusRequestType"] = reflect.TypeOf((*QueryComplianceStatusRequestType)(nil)).Elem() +} + +type QueryComplianceStatusResponse struct { + Returnval []ComplianceResult `xml:"returnval,omitempty"` +} + +type QueryConfigOption QueryConfigOptionRequestType + +func init() { + t["QueryConfigOption"] = reflect.TypeOf((*QueryConfigOption)(nil)).Elem() +} + +type QueryConfigOptionDescriptor QueryConfigOptionDescriptorRequestType + +func init() { + t["QueryConfigOptionDescriptor"] = reflect.TypeOf((*QueryConfigOptionDescriptor)(nil)).Elem() +} + +type QueryConfigOptionDescriptorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryConfigOptionDescriptorRequestType"] = reflect.TypeOf((*QueryConfigOptionDescriptorRequestType)(nil)).Elem() +} + +type QueryConfigOptionDescriptorResponse struct { + Returnval []VirtualMachineConfigOptionDescriptor `xml:"returnval,omitempty"` +} + +type QueryConfigOptionEx QueryConfigOptionExRequestType + +func init() { + t["QueryConfigOptionEx"] = reflect.TypeOf((*QueryConfigOptionEx)(nil)).Elem() +} + +type QueryConfigOptionExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec *EnvironmentBrowserConfigOptionQuerySpec `xml:"spec,omitempty"` +} + +func init() { + t["QueryConfigOptionExRequestType"] = reflect.TypeOf((*QueryConfigOptionExRequestType)(nil)).Elem() +} + +type QueryConfigOptionExResponse struct { + Returnval *VirtualMachineConfigOption `xml:"returnval,omitempty"` +} + +type QueryConfigOptionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key string `xml:"key,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryConfigOptionRequestType"] = reflect.TypeOf((*QueryConfigOptionRequestType)(nil)).Elem() +} + +type QueryConfigOptionResponse struct { + Returnval *VirtualMachineConfigOption `xml:"returnval,omitempty"` +} + +type QueryConfigTarget QueryConfigTargetRequestType + +func init() { + t["QueryConfigTarget"] = reflect.TypeOf((*QueryConfigTarget)(nil)).Elem() +} + +type QueryConfigTargetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryConfigTargetRequestType"] = reflect.TypeOf((*QueryConfigTargetRequestType)(nil)).Elem() +} + +type QueryConfigTargetResponse struct { + Returnval *ConfigTarget `xml:"returnval,omitempty"` +} + +type QueryConfiguredModuleOptionString QueryConfiguredModuleOptionStringRequestType + +func init() { + t["QueryConfiguredModuleOptionString"] = reflect.TypeOf((*QueryConfiguredModuleOptionString)(nil)).Elem() +} + +type QueryConfiguredModuleOptionStringRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` +} + +func init() { + t["QueryConfiguredModuleOptionStringRequestType"] = reflect.TypeOf((*QueryConfiguredModuleOptionStringRequestType)(nil)).Elem() +} + +type QueryConfiguredModuleOptionStringResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryConnectionInfo QueryConnectionInfoRequestType + +func init() { + t["QueryConnectionInfo"] = reflect.TypeOf((*QueryConnectionInfo)(nil)).Elem() +} + +type QueryConnectionInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Hostname string `xml:"hostname"` + Port int32 `xml:"port"` + Username string `xml:"username"` + Password string `xml:"password"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["QueryConnectionInfoRequestType"] = reflect.TypeOf((*QueryConnectionInfoRequestType)(nil)).Elem() +} + +type QueryConnectionInfoResponse struct { + Returnval HostConnectInfo `xml:"returnval"` +} + +type QueryConnectionInfoViaSpec QueryConnectionInfoViaSpecRequestType + +func init() { + t["QueryConnectionInfoViaSpec"] = reflect.TypeOf((*QueryConnectionInfoViaSpec)(nil)).Elem() +} + +type QueryConnectionInfoViaSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostConnectSpec `xml:"spec"` +} + +func init() { + t["QueryConnectionInfoViaSpecRequestType"] = reflect.TypeOf((*QueryConnectionInfoViaSpecRequestType)(nil)).Elem() +} + +type QueryConnectionInfoViaSpecResponse struct { + Returnval HostConnectInfo `xml:"returnval"` +} + +type QueryDatastorePerformanceSummary QueryDatastorePerformanceSummaryRequestType + +func init() { + t["QueryDatastorePerformanceSummary"] = reflect.TypeOf((*QueryDatastorePerformanceSummary)(nil)).Elem() +} + +type QueryDatastorePerformanceSummaryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["QueryDatastorePerformanceSummaryRequestType"] = reflect.TypeOf((*QueryDatastorePerformanceSummaryRequestType)(nil)).Elem() +} + +type QueryDatastorePerformanceSummaryResponse struct { + Returnval []StoragePerformanceSummary `xml:"returnval,omitempty"` +} + +type QueryDateTime QueryDateTimeRequestType + +func init() { + t["QueryDateTime"] = reflect.TypeOf((*QueryDateTime)(nil)).Elem() +} + +type QueryDateTimeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryDateTimeRequestType"] = reflect.TypeOf((*QueryDateTimeRequestType)(nil)).Elem() +} + +type QueryDateTimeResponse struct { + Returnval time.Time `xml:"returnval"` +} + +type QueryDescriptions QueryDescriptionsRequestType + +func init() { + t["QueryDescriptions"] = reflect.TypeOf((*QueryDescriptions)(nil)).Elem() +} + +type QueryDescriptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryDescriptionsRequestType"] = reflect.TypeOf((*QueryDescriptionsRequestType)(nil)).Elem() +} + +type QueryDescriptionsResponse struct { + Returnval []DiagnosticManagerLogDescriptor `xml:"returnval,omitempty"` +} + +type QueryDisksForVsan QueryDisksForVsanRequestType + +func init() { + t["QueryDisksForVsan"] = reflect.TypeOf((*QueryDisksForVsan)(nil)).Elem() +} + +type QueryDisksForVsanRequestType struct { + This ManagedObjectReference `xml:"_this"` + CanonicalName []string `xml:"canonicalName,omitempty"` +} + +func init() { + t["QueryDisksForVsanRequestType"] = reflect.TypeOf((*QueryDisksForVsanRequestType)(nil)).Elem() +} + +type QueryDisksForVsanResponse struct { + Returnval []VsanHostDiskResult `xml:"returnval,omitempty"` +} + +type QueryDisksUsingFilter QueryDisksUsingFilterRequestType + +func init() { + t["QueryDisksUsingFilter"] = reflect.TypeOf((*QueryDisksUsingFilter)(nil)).Elem() +} + +type QueryDisksUsingFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["QueryDisksUsingFilterRequestType"] = reflect.TypeOf((*QueryDisksUsingFilterRequestType)(nil)).Elem() +} + +type QueryDisksUsingFilterResponse struct { + Returnval []VirtualDiskId `xml:"returnval"` +} + +type QueryDvsByUuid QueryDvsByUuidRequestType + +func init() { + t["QueryDvsByUuid"] = reflect.TypeOf((*QueryDvsByUuid)(nil)).Elem() +} + +type QueryDvsByUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuid string `xml:"uuid"` +} + +func init() { + t["QueryDvsByUuidRequestType"] = reflect.TypeOf((*QueryDvsByUuidRequestType)(nil)).Elem() +} + +type QueryDvsByUuidResponse struct { + Returnval *ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryDvsCheckCompatibility QueryDvsCheckCompatibilityRequestType + +func init() { + t["QueryDvsCheckCompatibility"] = reflect.TypeOf((*QueryDvsCheckCompatibility)(nil)).Elem() +} + +type QueryDvsCheckCompatibilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + HostContainer DistributedVirtualSwitchManagerHostContainer `xml:"hostContainer"` + DvsProductSpec *DistributedVirtualSwitchManagerDvsProductSpec `xml:"dvsProductSpec,omitempty"` + HostFilterSpec []BaseDistributedVirtualSwitchManagerHostDvsFilterSpec `xml:"hostFilterSpec,omitempty,typeattr"` +} + +func init() { + t["QueryDvsCheckCompatibilityRequestType"] = reflect.TypeOf((*QueryDvsCheckCompatibilityRequestType)(nil)).Elem() +} + +type QueryDvsCheckCompatibilityResponse struct { + Returnval []DistributedVirtualSwitchManagerCompatibilityResult `xml:"returnval,omitempty"` +} + +type QueryDvsCompatibleHostSpec QueryDvsCompatibleHostSpecRequestType + +func init() { + t["QueryDvsCompatibleHostSpec"] = reflect.TypeOf((*QueryDvsCompatibleHostSpec)(nil)).Elem() +} + +type QueryDvsCompatibleHostSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + SwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:"switchProductSpec,omitempty"` +} + +func init() { + t["QueryDvsCompatibleHostSpecRequestType"] = reflect.TypeOf((*QueryDvsCompatibleHostSpecRequestType)(nil)).Elem() +} + +type QueryDvsCompatibleHostSpecResponse struct { + Returnval []DistributedVirtualSwitchHostProductSpec `xml:"returnval,omitempty"` +} + +type QueryDvsConfigTarget QueryDvsConfigTargetRequestType + +func init() { + t["QueryDvsConfigTarget"] = reflect.TypeOf((*QueryDvsConfigTarget)(nil)).Elem() +} + +type QueryDvsConfigTargetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Dvs *ManagedObjectReference `xml:"dvs,omitempty"` +} + +func init() { + t["QueryDvsConfigTargetRequestType"] = reflect.TypeOf((*QueryDvsConfigTargetRequestType)(nil)).Elem() +} + +type QueryDvsConfigTargetResponse struct { + Returnval DVSManagerDvsConfigTarget `xml:"returnval"` +} + +type QueryDvsFeatureCapability QueryDvsFeatureCapabilityRequestType + +func init() { + t["QueryDvsFeatureCapability"] = reflect.TypeOf((*QueryDvsFeatureCapability)(nil)).Elem() +} + +type QueryDvsFeatureCapabilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + SwitchProductSpec *DistributedVirtualSwitchProductSpec `xml:"switchProductSpec,omitempty"` +} + +func init() { + t["QueryDvsFeatureCapabilityRequestType"] = reflect.TypeOf((*QueryDvsFeatureCapabilityRequestType)(nil)).Elem() +} + +type QueryDvsFeatureCapabilityResponse struct { + Returnval BaseDVSFeatureCapability `xml:"returnval,omitempty,typeattr"` +} + +type QueryEvents QueryEventsRequestType + +func init() { + t["QueryEvents"] = reflect.TypeOf((*QueryEvents)(nil)).Elem() +} + +type QueryEventsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Filter EventFilterSpec `xml:"filter"` +} + +func init() { + t["QueryEventsRequestType"] = reflect.TypeOf((*QueryEventsRequestType)(nil)).Elem() +} + +type QueryEventsResponse struct { + Returnval []BaseEvent `xml:"returnval,omitempty,typeattr"` +} + +type QueryExpressionMetadata QueryExpressionMetadataRequestType + +func init() { + t["QueryExpressionMetadata"] = reflect.TypeOf((*QueryExpressionMetadata)(nil)).Elem() +} + +type QueryExpressionMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExpressionName []string `xml:"expressionName,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["QueryExpressionMetadataRequestType"] = reflect.TypeOf((*QueryExpressionMetadataRequestType)(nil)).Elem() +} + +type QueryExpressionMetadataResponse struct { + Returnval []ProfileExpressionMetadata `xml:"returnval,omitempty"` +} + +type QueryExtensionIpAllocationUsage QueryExtensionIpAllocationUsageRequestType + +func init() { + t["QueryExtensionIpAllocationUsage"] = reflect.TypeOf((*QueryExtensionIpAllocationUsage)(nil)).Elem() +} + +type QueryExtensionIpAllocationUsageRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKeys []string `xml:"extensionKeys,omitempty"` +} + +func init() { + t["QueryExtensionIpAllocationUsageRequestType"] = reflect.TypeOf((*QueryExtensionIpAllocationUsageRequestType)(nil)).Elem() +} + +type QueryExtensionIpAllocationUsageResponse struct { + Returnval []ExtensionManagerIpAllocationUsage `xml:"returnval,omitempty"` +} + +type QueryFaultToleranceCompatibility QueryFaultToleranceCompatibilityRequestType + +func init() { + t["QueryFaultToleranceCompatibility"] = reflect.TypeOf((*QueryFaultToleranceCompatibility)(nil)).Elem() +} + +type QueryFaultToleranceCompatibilityEx QueryFaultToleranceCompatibilityExRequestType + +func init() { + t["QueryFaultToleranceCompatibilityEx"] = reflect.TypeOf((*QueryFaultToleranceCompatibilityEx)(nil)).Elem() +} + +type QueryFaultToleranceCompatibilityExRequestType struct { + This ManagedObjectReference `xml:"_this"` + ForLegacyFt *bool `xml:"forLegacyFt"` +} + +func init() { + t["QueryFaultToleranceCompatibilityExRequestType"] = reflect.TypeOf((*QueryFaultToleranceCompatibilityExRequestType)(nil)).Elem() +} + +type QueryFaultToleranceCompatibilityExResponse struct { + Returnval []LocalizedMethodFault `xml:"returnval,omitempty"` +} + +type QueryFaultToleranceCompatibilityRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryFaultToleranceCompatibilityRequestType"] = reflect.TypeOf((*QueryFaultToleranceCompatibilityRequestType)(nil)).Elem() +} + +type QueryFaultToleranceCompatibilityResponse struct { + Returnval []LocalizedMethodFault `xml:"returnval,omitempty"` +} + +type QueryFilterEntities QueryFilterEntitiesRequestType + +func init() { + t["QueryFilterEntities"] = reflect.TypeOf((*QueryFilterEntities)(nil)).Elem() +} + +type QueryFilterEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` +} + +func init() { + t["QueryFilterEntitiesRequestType"] = reflect.TypeOf((*QueryFilterEntitiesRequestType)(nil)).Elem() +} + +type QueryFilterEntitiesResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryFilterInfoIds QueryFilterInfoIdsRequestType + +func init() { + t["QueryFilterInfoIds"] = reflect.TypeOf((*QueryFilterInfoIds)(nil)).Elem() +} + +type QueryFilterInfoIdsRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` +} + +func init() { + t["QueryFilterInfoIdsRequestType"] = reflect.TypeOf((*QueryFilterInfoIdsRequestType)(nil)).Elem() +} + +type QueryFilterInfoIdsResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryFilterList QueryFilterListRequestType + +func init() { + t["QueryFilterList"] = reflect.TypeOf((*QueryFilterList)(nil)).Elem() +} + +type QueryFilterListRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` +} + +func init() { + t["QueryFilterListRequestType"] = reflect.TypeOf((*QueryFilterListRequestType)(nil)).Elem() +} + +type QueryFilterListResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryFilterName QueryFilterNameRequestType + +func init() { + t["QueryFilterName"] = reflect.TypeOf((*QueryFilterName)(nil)).Elem() +} + +type QueryFilterNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` +} + +func init() { + t["QueryFilterNameRequestType"] = reflect.TypeOf((*QueryFilterNameRequestType)(nil)).Elem() +} + +type QueryFilterNameResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryFirmwareConfigUploadURL QueryFirmwareConfigUploadURLRequestType + +func init() { + t["QueryFirmwareConfigUploadURL"] = reflect.TypeOf((*QueryFirmwareConfigUploadURL)(nil)).Elem() +} + +type QueryFirmwareConfigUploadURLRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryFirmwareConfigUploadURLRequestType"] = reflect.TypeOf((*QueryFirmwareConfigUploadURLRequestType)(nil)).Elem() +} + +type QueryFirmwareConfigUploadURLResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryHealthUpdateInfos QueryHealthUpdateInfosRequestType + +func init() { + t["QueryHealthUpdateInfos"] = reflect.TypeOf((*QueryHealthUpdateInfos)(nil)).Elem() +} + +type QueryHealthUpdateInfosRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` +} + +func init() { + t["QueryHealthUpdateInfosRequestType"] = reflect.TypeOf((*QueryHealthUpdateInfosRequestType)(nil)).Elem() +} + +type QueryHealthUpdateInfosResponse struct { + Returnval []HealthUpdateInfo `xml:"returnval,omitempty"` +} + +type QueryHealthUpdates QueryHealthUpdatesRequestType + +func init() { + t["QueryHealthUpdates"] = reflect.TypeOf((*QueryHealthUpdates)(nil)).Elem() +} + +type QueryHealthUpdatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` +} + +func init() { + t["QueryHealthUpdatesRequestType"] = reflect.TypeOf((*QueryHealthUpdatesRequestType)(nil)).Elem() +} + +type QueryHealthUpdatesResponse struct { + Returnval []HealthUpdate `xml:"returnval,omitempty"` +} + +type QueryHostConnectionInfo QueryHostConnectionInfoRequestType + +func init() { + t["QueryHostConnectionInfo"] = reflect.TypeOf((*QueryHostConnectionInfo)(nil)).Elem() +} + +type QueryHostConnectionInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryHostConnectionInfoRequestType"] = reflect.TypeOf((*QueryHostConnectionInfoRequestType)(nil)).Elem() +} + +type QueryHostConnectionInfoResponse struct { + Returnval HostConnectInfo `xml:"returnval"` +} + +type QueryHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["QueryHostPatchRequestType"] = reflect.TypeOf((*QueryHostPatchRequestType)(nil)).Elem() +} + +type QueryHostPatch_Task QueryHostPatchRequestType + +func init() { + t["QueryHostPatch_Task"] = reflect.TypeOf((*QueryHostPatch_Task)(nil)).Elem() +} + +type QueryHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type QueryHostProfileMetadata QueryHostProfileMetadataRequestType + +func init() { + t["QueryHostProfileMetadata"] = reflect.TypeOf((*QueryHostProfileMetadata)(nil)).Elem() +} + +type QueryHostProfileMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProfileName []string `xml:"profileName,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["QueryHostProfileMetadataRequestType"] = reflect.TypeOf((*QueryHostProfileMetadataRequestType)(nil)).Elem() +} + +type QueryHostProfileMetadataResponse struct { + Returnval []ProfileMetadata `xml:"returnval,omitempty"` +} + +type QueryHostStatus QueryHostStatusRequestType + +func init() { + t["QueryHostStatus"] = reflect.TypeOf((*QueryHostStatus)(nil)).Elem() +} + +type QueryHostStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryHostStatusRequestType"] = reflect.TypeOf((*QueryHostStatusRequestType)(nil)).Elem() +} + +type QueryHostStatusResponse struct { + Returnval VsanHostClusterStatus `xml:"returnval"` +} + +type QueryIORMConfigOption QueryIORMConfigOptionRequestType + +func init() { + t["QueryIORMConfigOption"] = reflect.TypeOf((*QueryIORMConfigOption)(nil)).Elem() +} + +type QueryIORMConfigOptionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["QueryIORMConfigOptionRequestType"] = reflect.TypeOf((*QueryIORMConfigOptionRequestType)(nil)).Elem() +} + +type QueryIORMConfigOptionResponse struct { + Returnval StorageIORMConfigOption `xml:"returnval"` +} + +type QueryIPAllocations QueryIPAllocationsRequestType + +func init() { + t["QueryIPAllocations"] = reflect.TypeOf((*QueryIPAllocations)(nil)).Elem() +} + +type QueryIPAllocationsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + PoolId int32 `xml:"poolId"` + ExtensionKey string `xml:"extensionKey"` +} + +func init() { + t["QueryIPAllocationsRequestType"] = reflect.TypeOf((*QueryIPAllocationsRequestType)(nil)).Elem() +} + +type QueryIPAllocationsResponse struct { + Returnval []IpPoolManagerIpAllocation `xml:"returnval"` +} + +type QueryIoFilterInfo QueryIoFilterInfoRequestType + +func init() { + t["QueryIoFilterInfo"] = reflect.TypeOf((*QueryIoFilterInfo)(nil)).Elem() +} + +type QueryIoFilterInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["QueryIoFilterInfoRequestType"] = reflect.TypeOf((*QueryIoFilterInfoRequestType)(nil)).Elem() +} + +type QueryIoFilterInfoResponse struct { + Returnval []ClusterIoFilterInfo `xml:"returnval,omitempty"` +} + +type QueryIoFilterIssues QueryIoFilterIssuesRequestType + +func init() { + t["QueryIoFilterIssues"] = reflect.TypeOf((*QueryIoFilterIssues)(nil)).Elem() +} + +type QueryIoFilterIssuesRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["QueryIoFilterIssuesRequestType"] = reflect.TypeOf((*QueryIoFilterIssuesRequestType)(nil)).Elem() +} + +type QueryIoFilterIssuesResponse struct { + Returnval IoFilterQueryIssueResult `xml:"returnval"` +} + +type QueryIpPools QueryIpPoolsRequestType + +func init() { + t["QueryIpPools"] = reflect.TypeOf((*QueryIpPools)(nil)).Elem() +} + +type QueryIpPoolsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` +} + +func init() { + t["QueryIpPoolsRequestType"] = reflect.TypeOf((*QueryIpPoolsRequestType)(nil)).Elem() +} + +type QueryIpPoolsResponse struct { + Returnval []IpPool `xml:"returnval,omitempty"` +} + +type QueryLicenseSourceAvailability QueryLicenseSourceAvailabilityRequestType + +func init() { + t["QueryLicenseSourceAvailability"] = reflect.TypeOf((*QueryLicenseSourceAvailability)(nil)).Elem() +} + +type QueryLicenseSourceAvailabilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryLicenseSourceAvailabilityRequestType"] = reflect.TypeOf((*QueryLicenseSourceAvailabilityRequestType)(nil)).Elem() +} + +type QueryLicenseSourceAvailabilityResponse struct { + Returnval []LicenseAvailabilityInfo `xml:"returnval,omitempty"` +} + +type QueryLicenseUsage QueryLicenseUsageRequestType + +func init() { + t["QueryLicenseUsage"] = reflect.TypeOf((*QueryLicenseUsage)(nil)).Elem() +} + +type QueryLicenseUsageRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryLicenseUsageRequestType"] = reflect.TypeOf((*QueryLicenseUsageRequestType)(nil)).Elem() +} + +type QueryLicenseUsageResponse struct { + Returnval LicenseUsageInfo `xml:"returnval"` +} + +type QueryLockdownExceptions QueryLockdownExceptionsRequestType + +func init() { + t["QueryLockdownExceptions"] = reflect.TypeOf((*QueryLockdownExceptions)(nil)).Elem() +} + +type QueryLockdownExceptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryLockdownExceptionsRequestType"] = reflect.TypeOf((*QueryLockdownExceptionsRequestType)(nil)).Elem() +} + +type QueryLockdownExceptionsResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryManagedBy QueryManagedByRequestType + +func init() { + t["QueryManagedBy"] = reflect.TypeOf((*QueryManagedBy)(nil)).Elem() +} + +type QueryManagedByRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` +} + +func init() { + t["QueryManagedByRequestType"] = reflect.TypeOf((*QueryManagedByRequestType)(nil)).Elem() +} + +type QueryManagedByResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryMemoryOverhead QueryMemoryOverheadRequestType + +func init() { + t["QueryMemoryOverhead"] = reflect.TypeOf((*QueryMemoryOverhead)(nil)).Elem() +} + +type QueryMemoryOverheadEx QueryMemoryOverheadExRequestType + +func init() { + t["QueryMemoryOverheadEx"] = reflect.TypeOf((*QueryMemoryOverheadEx)(nil)).Elem() +} + +type QueryMemoryOverheadExRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmConfigInfo VirtualMachineConfigInfo `xml:"vmConfigInfo"` +} + +func init() { + t["QueryMemoryOverheadExRequestType"] = reflect.TypeOf((*QueryMemoryOverheadExRequestType)(nil)).Elem() +} + +type QueryMemoryOverheadExResponse struct { + Returnval int64 `xml:"returnval"` +} + +type QueryMemoryOverheadRequestType struct { + This ManagedObjectReference `xml:"_this"` + MemorySize int64 `xml:"memorySize"` + VideoRamSize int32 `xml:"videoRamSize,omitempty"` + NumVcpus int32 `xml:"numVcpus"` +} + +func init() { + t["QueryMemoryOverheadRequestType"] = reflect.TypeOf((*QueryMemoryOverheadRequestType)(nil)).Elem() +} + +type QueryMemoryOverheadResponse struct { + Returnval int64 `xml:"returnval"` +} + +type QueryMigrationDependencies QueryMigrationDependenciesRequestType + +func init() { + t["QueryMigrationDependencies"] = reflect.TypeOf((*QueryMigrationDependencies)(nil)).Elem() +} + +type QueryMigrationDependenciesRequestType struct { + This ManagedObjectReference `xml:"_this"` + PnicDevice []string `xml:"pnicDevice"` +} + +func init() { + t["QueryMigrationDependenciesRequestType"] = reflect.TypeOf((*QueryMigrationDependenciesRequestType)(nil)).Elem() +} + +type QueryMigrationDependenciesResponse struct { + Returnval IscsiMigrationDependency `xml:"returnval"` +} + +type QueryModules QueryModulesRequestType + +func init() { + t["QueryModules"] = reflect.TypeOf((*QueryModules)(nil)).Elem() +} + +type QueryModulesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryModulesRequestType"] = reflect.TypeOf((*QueryModulesRequestType)(nil)).Elem() +} + +type QueryModulesResponse struct { + Returnval []KernelModuleInfo `xml:"returnval,omitempty"` +} + +type QueryMonitoredEntities QueryMonitoredEntitiesRequestType + +func init() { + t["QueryMonitoredEntities"] = reflect.TypeOf((*QueryMonitoredEntities)(nil)).Elem() +} + +type QueryMonitoredEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` +} + +func init() { + t["QueryMonitoredEntitiesRequestType"] = reflect.TypeOf((*QueryMonitoredEntitiesRequestType)(nil)).Elem() +} + +type QueryMonitoredEntitiesResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryNFSUser QueryNFSUserRequestType + +func init() { + t["QueryNFSUser"] = reflect.TypeOf((*QueryNFSUser)(nil)).Elem() +} + +type QueryNFSUserRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryNFSUserRequestType"] = reflect.TypeOf((*QueryNFSUserRequestType)(nil)).Elem() +} + +type QueryNFSUserResponse struct { + Returnval *HostNasVolumeUserInfo `xml:"returnval,omitempty"` +} + +type QueryNetConfig QueryNetConfigRequestType + +func init() { + t["QueryNetConfig"] = reflect.TypeOf((*QueryNetConfig)(nil)).Elem() +} + +type QueryNetConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + NicType string `xml:"nicType"` +} + +func init() { + t["QueryNetConfigRequestType"] = reflect.TypeOf((*QueryNetConfigRequestType)(nil)).Elem() +} + +type QueryNetConfigResponse struct { + Returnval *VirtualNicManagerNetConfig `xml:"returnval,omitempty"` +} + +type QueryNetworkHint QueryNetworkHintRequestType + +func init() { + t["QueryNetworkHint"] = reflect.TypeOf((*QueryNetworkHint)(nil)).Elem() +} + +type QueryNetworkHintRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device []string `xml:"device,omitempty"` +} + +func init() { + t["QueryNetworkHintRequestType"] = reflect.TypeOf((*QueryNetworkHintRequestType)(nil)).Elem() +} + +type QueryNetworkHintResponse struct { + Returnval []PhysicalNicHintInfo `xml:"returnval,omitempty"` +} + +type QueryObjectsOnPhysicalVsanDisk QueryObjectsOnPhysicalVsanDiskRequestType + +func init() { + t["QueryObjectsOnPhysicalVsanDisk"] = reflect.TypeOf((*QueryObjectsOnPhysicalVsanDisk)(nil)).Elem() +} + +type QueryObjectsOnPhysicalVsanDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Disks []string `xml:"disks"` +} + +func init() { + t["QueryObjectsOnPhysicalVsanDiskRequestType"] = reflect.TypeOf((*QueryObjectsOnPhysicalVsanDiskRequestType)(nil)).Elem() +} + +type QueryObjectsOnPhysicalVsanDiskResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryOptions QueryOptionsRequestType + +func init() { + t["QueryOptions"] = reflect.TypeOf((*QueryOptions)(nil)).Elem() +} + +type QueryOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name,omitempty"` +} + +func init() { + t["QueryOptionsRequestType"] = reflect.TypeOf((*QueryOptionsRequestType)(nil)).Elem() +} + +type QueryOptionsResponse struct { + Returnval []BaseOptionValue `xml:"returnval,omitempty,typeattr"` +} + +type QueryPartitionCreateDesc QueryPartitionCreateDescRequestType + +func init() { + t["QueryPartitionCreateDesc"] = reflect.TypeOf((*QueryPartitionCreateDesc)(nil)).Elem() +} + +type QueryPartitionCreateDescRequestType struct { + This ManagedObjectReference `xml:"_this"` + DiskUuid string `xml:"diskUuid"` + DiagnosticType string `xml:"diagnosticType"` +} + +func init() { + t["QueryPartitionCreateDescRequestType"] = reflect.TypeOf((*QueryPartitionCreateDescRequestType)(nil)).Elem() +} + +type QueryPartitionCreateDescResponse struct { + Returnval HostDiagnosticPartitionCreateDescription `xml:"returnval"` +} + +type QueryPartitionCreateOptions QueryPartitionCreateOptionsRequestType + +func init() { + t["QueryPartitionCreateOptions"] = reflect.TypeOf((*QueryPartitionCreateOptions)(nil)).Elem() +} + +type QueryPartitionCreateOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + StorageType string `xml:"storageType"` + DiagnosticType string `xml:"diagnosticType"` +} + +func init() { + t["QueryPartitionCreateOptionsRequestType"] = reflect.TypeOf((*QueryPartitionCreateOptionsRequestType)(nil)).Elem() +} + +type QueryPartitionCreateOptionsResponse struct { + Returnval []HostDiagnosticPartitionCreateOption `xml:"returnval,omitempty"` +} + +type QueryPathSelectionPolicyOptions QueryPathSelectionPolicyOptionsRequestType + +func init() { + t["QueryPathSelectionPolicyOptions"] = reflect.TypeOf((*QueryPathSelectionPolicyOptions)(nil)).Elem() +} + +type QueryPathSelectionPolicyOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryPathSelectionPolicyOptionsRequestType"] = reflect.TypeOf((*QueryPathSelectionPolicyOptionsRequestType)(nil)).Elem() +} + +type QueryPathSelectionPolicyOptionsResponse struct { + Returnval []HostPathSelectionPolicyOption `xml:"returnval,omitempty"` +} + +type QueryPerf QueryPerfRequestType + +func init() { + t["QueryPerf"] = reflect.TypeOf((*QueryPerf)(nil)).Elem() +} + +type QueryPerfComposite QueryPerfCompositeRequestType + +func init() { + t["QueryPerfComposite"] = reflect.TypeOf((*QueryPerfComposite)(nil)).Elem() +} + +type QueryPerfCompositeRequestType struct { + This ManagedObjectReference `xml:"_this"` + QuerySpec PerfQuerySpec `xml:"querySpec"` +} + +func init() { + t["QueryPerfCompositeRequestType"] = reflect.TypeOf((*QueryPerfCompositeRequestType)(nil)).Elem() +} + +type QueryPerfCompositeResponse struct { + Returnval PerfCompositeMetric `xml:"returnval"` +} + +type QueryPerfCounter QueryPerfCounterRequestType + +func init() { + t["QueryPerfCounter"] = reflect.TypeOf((*QueryPerfCounter)(nil)).Elem() +} + +type QueryPerfCounterByLevel QueryPerfCounterByLevelRequestType + +func init() { + t["QueryPerfCounterByLevel"] = reflect.TypeOf((*QueryPerfCounterByLevel)(nil)).Elem() +} + +type QueryPerfCounterByLevelRequestType struct { + This ManagedObjectReference `xml:"_this"` + Level int32 `xml:"level"` +} + +func init() { + t["QueryPerfCounterByLevelRequestType"] = reflect.TypeOf((*QueryPerfCounterByLevelRequestType)(nil)).Elem() +} + +type QueryPerfCounterByLevelResponse struct { + Returnval []PerfCounterInfo `xml:"returnval"` +} + +type QueryPerfCounterRequestType struct { + This ManagedObjectReference `xml:"_this"` + CounterId []int32 `xml:"counterId"` +} + +func init() { + t["QueryPerfCounterRequestType"] = reflect.TypeOf((*QueryPerfCounterRequestType)(nil)).Elem() +} + +type QueryPerfCounterResponse struct { + Returnval []PerfCounterInfo `xml:"returnval,omitempty"` +} + +type QueryPerfProviderSummary QueryPerfProviderSummaryRequestType + +func init() { + t["QueryPerfProviderSummary"] = reflect.TypeOf((*QueryPerfProviderSummary)(nil)).Elem() +} + +type QueryPerfProviderSummaryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["QueryPerfProviderSummaryRequestType"] = reflect.TypeOf((*QueryPerfProviderSummaryRequestType)(nil)).Elem() +} + +type QueryPerfProviderSummaryResponse struct { + Returnval PerfProviderSummary `xml:"returnval"` +} + +type QueryPerfRequestType struct { + This ManagedObjectReference `xml:"_this"` + QuerySpec []PerfQuerySpec `xml:"querySpec"` +} + +func init() { + t["QueryPerfRequestType"] = reflect.TypeOf((*QueryPerfRequestType)(nil)).Elem() +} + +type QueryPerfResponse struct { + Returnval []BasePerfEntityMetricBase `xml:"returnval,omitempty,typeattr"` +} + +type QueryPhysicalVsanDisks QueryPhysicalVsanDisksRequestType + +func init() { + t["QueryPhysicalVsanDisks"] = reflect.TypeOf((*QueryPhysicalVsanDisks)(nil)).Elem() +} + +type QueryPhysicalVsanDisksRequestType struct { + This ManagedObjectReference `xml:"_this"` + Props []string `xml:"props,omitempty"` +} + +func init() { + t["QueryPhysicalVsanDisksRequestType"] = reflect.TypeOf((*QueryPhysicalVsanDisksRequestType)(nil)).Elem() +} + +type QueryPhysicalVsanDisksResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryPnicStatus QueryPnicStatusRequestType + +func init() { + t["QueryPnicStatus"] = reflect.TypeOf((*QueryPnicStatus)(nil)).Elem() +} + +type QueryPnicStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + PnicDevice string `xml:"pnicDevice"` +} + +func init() { + t["QueryPnicStatusRequestType"] = reflect.TypeOf((*QueryPnicStatusRequestType)(nil)).Elem() +} + +type QueryPnicStatusResponse struct { + Returnval IscsiStatus `xml:"returnval"` +} + +type QueryPolicyMetadata QueryPolicyMetadataRequestType + +func init() { + t["QueryPolicyMetadata"] = reflect.TypeOf((*QueryPolicyMetadata)(nil)).Elem() +} + +type QueryPolicyMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + PolicyName []string `xml:"policyName,omitempty"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["QueryPolicyMetadataRequestType"] = reflect.TypeOf((*QueryPolicyMetadataRequestType)(nil)).Elem() +} + +type QueryPolicyMetadataResponse struct { + Returnval []ProfilePolicyMetadata `xml:"returnval,omitempty"` +} + +type QueryProfileStructure QueryProfileStructureRequestType + +func init() { + t["QueryProfileStructure"] = reflect.TypeOf((*QueryProfileStructure)(nil)).Elem() +} + +type QueryProfileStructureRequestType struct { + This ManagedObjectReference `xml:"_this"` + Profile *ManagedObjectReference `xml:"profile,omitempty"` +} + +func init() { + t["QueryProfileStructureRequestType"] = reflect.TypeOf((*QueryProfileStructureRequestType)(nil)).Elem() +} + +type QueryProfileStructureResponse struct { + Returnval ProfileProfileStructure `xml:"returnval"` +} + +type QueryProviderList QueryProviderListRequestType + +func init() { + t["QueryProviderList"] = reflect.TypeOf((*QueryProviderList)(nil)).Elem() +} + +type QueryProviderListRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryProviderListRequestType"] = reflect.TypeOf((*QueryProviderListRequestType)(nil)).Elem() +} + +type QueryProviderListResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryProviderName QueryProviderNameRequestType + +func init() { + t["QueryProviderName"] = reflect.TypeOf((*QueryProviderName)(nil)).Elem() +} + +type QueryProviderNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["QueryProviderNameRequestType"] = reflect.TypeOf((*QueryProviderNameRequestType)(nil)).Elem() +} + +type QueryProviderNameResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryResourceConfigOption QueryResourceConfigOptionRequestType + +func init() { + t["QueryResourceConfigOption"] = reflect.TypeOf((*QueryResourceConfigOption)(nil)).Elem() +} + +type QueryResourceConfigOptionRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryResourceConfigOptionRequestType"] = reflect.TypeOf((*QueryResourceConfigOptionRequestType)(nil)).Elem() +} + +type QueryResourceConfigOptionResponse struct { + Returnval ResourceConfigOption `xml:"returnval"` +} + +type QueryServiceList QueryServiceListRequestType + +func init() { + t["QueryServiceList"] = reflect.TypeOf((*QueryServiceList)(nil)).Elem() +} + +type QueryServiceListRequestType struct { + This ManagedObjectReference `xml:"_this"` + ServiceName string `xml:"serviceName,omitempty"` + Location []string `xml:"location,omitempty"` +} + +func init() { + t["QueryServiceListRequestType"] = reflect.TypeOf((*QueryServiceListRequestType)(nil)).Elem() +} + +type QueryServiceListResponse struct { + Returnval []ServiceManagerServiceInfo `xml:"returnval,omitempty"` +} + +type QueryStorageArrayTypePolicyOptions QueryStorageArrayTypePolicyOptionsRequestType + +func init() { + t["QueryStorageArrayTypePolicyOptions"] = reflect.TypeOf((*QueryStorageArrayTypePolicyOptions)(nil)).Elem() +} + +type QueryStorageArrayTypePolicyOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryStorageArrayTypePolicyOptionsRequestType"] = reflect.TypeOf((*QueryStorageArrayTypePolicyOptionsRequestType)(nil)).Elem() +} + +type QueryStorageArrayTypePolicyOptionsResponse struct { + Returnval []HostStorageArrayTypePolicyOption `xml:"returnval,omitempty"` +} + +type QuerySupportedFeatures QuerySupportedFeaturesRequestType + +func init() { + t["QuerySupportedFeatures"] = reflect.TypeOf((*QuerySupportedFeatures)(nil)).Elem() +} + +type QuerySupportedFeaturesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QuerySupportedFeaturesRequestType"] = reflect.TypeOf((*QuerySupportedFeaturesRequestType)(nil)).Elem() +} + +type QuerySupportedFeaturesResponse struct { + Returnval []LicenseFeatureInfo `xml:"returnval,omitempty"` +} + +type QuerySyncingVsanObjects QuerySyncingVsanObjectsRequestType + +func init() { + t["QuerySyncingVsanObjects"] = reflect.TypeOf((*QuerySyncingVsanObjects)(nil)).Elem() +} + +type QuerySyncingVsanObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids,omitempty"` +} + +func init() { + t["QuerySyncingVsanObjectsRequestType"] = reflect.TypeOf((*QuerySyncingVsanObjectsRequestType)(nil)).Elem() +} + +type QuerySyncingVsanObjectsResponse struct { + Returnval string `xml:"returnval"` +} + +type QuerySystemUsers QuerySystemUsersRequestType + +func init() { + t["QuerySystemUsers"] = reflect.TypeOf((*QuerySystemUsers)(nil)).Elem() +} + +type QuerySystemUsersRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QuerySystemUsersRequestType"] = reflect.TypeOf((*QuerySystemUsersRequestType)(nil)).Elem() +} + +type QuerySystemUsersResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryTargetCapabilities QueryTargetCapabilitiesRequestType + +func init() { + t["QueryTargetCapabilities"] = reflect.TypeOf((*QueryTargetCapabilities)(nil)).Elem() +} + +type QueryTargetCapabilitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["QueryTargetCapabilitiesRequestType"] = reflect.TypeOf((*QueryTargetCapabilitiesRequestType)(nil)).Elem() +} + +type QueryTargetCapabilitiesResponse struct { + Returnval *HostCapability `xml:"returnval,omitempty"` +} + +type QueryTpmAttestationReport QueryTpmAttestationReportRequestType + +func init() { + t["QueryTpmAttestationReport"] = reflect.TypeOf((*QueryTpmAttestationReport)(nil)).Elem() +} + +type QueryTpmAttestationReportRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryTpmAttestationReportRequestType"] = reflect.TypeOf((*QueryTpmAttestationReportRequestType)(nil)).Elem() +} + +type QueryTpmAttestationReportResponse struct { + Returnval *HostTpmAttestationReport `xml:"returnval,omitempty"` +} + +type QueryUnmonitoredHosts QueryUnmonitoredHostsRequestType + +func init() { + t["QueryUnmonitoredHosts"] = reflect.TypeOf((*QueryUnmonitoredHosts)(nil)).Elem() +} + +type QueryUnmonitoredHostsRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` + Cluster ManagedObjectReference `xml:"cluster"` +} + +func init() { + t["QueryUnmonitoredHostsRequestType"] = reflect.TypeOf((*QueryUnmonitoredHostsRequestType)(nil)).Elem() +} + +type QueryUnmonitoredHostsResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type QueryUnownedFiles QueryUnownedFilesRequestType + +func init() { + t["QueryUnownedFiles"] = reflect.TypeOf((*QueryUnownedFiles)(nil)).Elem() +} + +type QueryUnownedFilesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryUnownedFilesRequestType"] = reflect.TypeOf((*QueryUnownedFilesRequestType)(nil)).Elem() +} + +type QueryUnownedFilesResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryUnresolvedVmfsVolume QueryUnresolvedVmfsVolumeRequestType + +func init() { + t["QueryUnresolvedVmfsVolume"] = reflect.TypeOf((*QueryUnresolvedVmfsVolume)(nil)).Elem() +} + +type QueryUnresolvedVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryUnresolvedVmfsVolumeRequestType"] = reflect.TypeOf((*QueryUnresolvedVmfsVolumeRequestType)(nil)).Elem() +} + +type QueryUnresolvedVmfsVolumeResponse struct { + Returnval []HostUnresolvedVmfsVolume `xml:"returnval,omitempty"` +} + +type QueryUnresolvedVmfsVolumes QueryUnresolvedVmfsVolumesRequestType + +func init() { + t["QueryUnresolvedVmfsVolumes"] = reflect.TypeOf((*QueryUnresolvedVmfsVolumes)(nil)).Elem() +} + +type QueryUnresolvedVmfsVolumesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryUnresolvedVmfsVolumesRequestType"] = reflect.TypeOf((*QueryUnresolvedVmfsVolumesRequestType)(nil)).Elem() +} + +type QueryUnresolvedVmfsVolumesResponse struct { + Returnval []HostUnresolvedVmfsVolume `xml:"returnval,omitempty"` +} + +type QueryUsedVlanIdInDvs QueryUsedVlanIdInDvsRequestType + +func init() { + t["QueryUsedVlanIdInDvs"] = reflect.TypeOf((*QueryUsedVlanIdInDvs)(nil)).Elem() +} + +type QueryUsedVlanIdInDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryUsedVlanIdInDvsRequestType"] = reflect.TypeOf((*QueryUsedVlanIdInDvsRequestType)(nil)).Elem() +} + +type QueryUsedVlanIdInDvsResponse struct { + Returnval []int32 `xml:"returnval,omitempty"` +} + +type QueryVMotionCompatibility QueryVMotionCompatibilityRequestType + +func init() { + t["QueryVMotionCompatibility"] = reflect.TypeOf((*QueryVMotionCompatibility)(nil)).Elem() +} + +type QueryVMotionCompatibilityExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm []ManagedObjectReference `xml:"vm"` + Host []ManagedObjectReference `xml:"host"` +} + +func init() { + t["QueryVMotionCompatibilityExRequestType"] = reflect.TypeOf((*QueryVMotionCompatibilityExRequestType)(nil)).Elem() +} + +type QueryVMotionCompatibilityEx_Task QueryVMotionCompatibilityExRequestType + +func init() { + t["QueryVMotionCompatibilityEx_Task"] = reflect.TypeOf((*QueryVMotionCompatibilityEx_Task)(nil)).Elem() +} + +type QueryVMotionCompatibilityEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type QueryVMotionCompatibilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Host []ManagedObjectReference `xml:"host"` + Compatibility []string `xml:"compatibility,omitempty"` +} + +func init() { + t["QueryVMotionCompatibilityRequestType"] = reflect.TypeOf((*QueryVMotionCompatibilityRequestType)(nil)).Elem() +} + +type QueryVMotionCompatibilityResponse struct { + Returnval []HostVMotionCompatibility `xml:"returnval,omitempty"` +} + +type QueryVirtualDiskFragmentation QueryVirtualDiskFragmentationRequestType + +func init() { + t["QueryVirtualDiskFragmentation"] = reflect.TypeOf((*QueryVirtualDiskFragmentation)(nil)).Elem() +} + +type QueryVirtualDiskFragmentationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["QueryVirtualDiskFragmentationRequestType"] = reflect.TypeOf((*QueryVirtualDiskFragmentationRequestType)(nil)).Elem() +} + +type QueryVirtualDiskFragmentationResponse struct { + Returnval int32 `xml:"returnval"` +} + +type QueryVirtualDiskGeometry QueryVirtualDiskGeometryRequestType + +func init() { + t["QueryVirtualDiskGeometry"] = reflect.TypeOf((*QueryVirtualDiskGeometry)(nil)).Elem() +} + +type QueryVirtualDiskGeometryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["QueryVirtualDiskGeometryRequestType"] = reflect.TypeOf((*QueryVirtualDiskGeometryRequestType)(nil)).Elem() +} + +type QueryVirtualDiskGeometryResponse struct { + Returnval HostDiskDimensionsChs `xml:"returnval"` +} + +type QueryVirtualDiskUuid QueryVirtualDiskUuidRequestType + +func init() { + t["QueryVirtualDiskUuid"] = reflect.TypeOf((*QueryVirtualDiskUuid)(nil)).Elem() +} + +type QueryVirtualDiskUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["QueryVirtualDiskUuidRequestType"] = reflect.TypeOf((*QueryVirtualDiskUuidRequestType)(nil)).Elem() +} + +type QueryVirtualDiskUuidResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryVmfsConfigOption QueryVmfsConfigOptionRequestType + +func init() { + t["QueryVmfsConfigOption"] = reflect.TypeOf((*QueryVmfsConfigOption)(nil)).Elem() +} + +type QueryVmfsConfigOptionRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryVmfsConfigOptionRequestType"] = reflect.TypeOf((*QueryVmfsConfigOptionRequestType)(nil)).Elem() +} + +type QueryVmfsConfigOptionResponse struct { + Returnval []VmfsConfigOption `xml:"returnval,omitempty"` +} + +type QueryVmfsDatastoreCreateOptions QueryVmfsDatastoreCreateOptionsRequestType + +func init() { + t["QueryVmfsDatastoreCreateOptions"] = reflect.TypeOf((*QueryVmfsDatastoreCreateOptions)(nil)).Elem() +} + +type QueryVmfsDatastoreCreateOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath string `xml:"devicePath"` + VmfsMajorVersion int32 `xml:"vmfsMajorVersion,omitempty"` +} + +func init() { + t["QueryVmfsDatastoreCreateOptionsRequestType"] = reflect.TypeOf((*QueryVmfsDatastoreCreateOptionsRequestType)(nil)).Elem() +} + +type QueryVmfsDatastoreCreateOptionsResponse struct { + Returnval []VmfsDatastoreOption `xml:"returnval,omitempty"` +} + +type QueryVmfsDatastoreExpandOptions QueryVmfsDatastoreExpandOptionsRequestType + +func init() { + t["QueryVmfsDatastoreExpandOptions"] = reflect.TypeOf((*QueryVmfsDatastoreExpandOptions)(nil)).Elem() +} + +type QueryVmfsDatastoreExpandOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["QueryVmfsDatastoreExpandOptionsRequestType"] = reflect.TypeOf((*QueryVmfsDatastoreExpandOptionsRequestType)(nil)).Elem() +} + +type QueryVmfsDatastoreExpandOptionsResponse struct { + Returnval []VmfsDatastoreOption `xml:"returnval,omitempty"` +} + +type QueryVmfsDatastoreExtendOptions QueryVmfsDatastoreExtendOptionsRequestType + +func init() { + t["QueryVmfsDatastoreExtendOptions"] = reflect.TypeOf((*QueryVmfsDatastoreExtendOptions)(nil)).Elem() +} + +type QueryVmfsDatastoreExtendOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` + DevicePath string `xml:"devicePath"` + SuppressExpandCandidates *bool `xml:"suppressExpandCandidates"` +} + +func init() { + t["QueryVmfsDatastoreExtendOptionsRequestType"] = reflect.TypeOf((*QueryVmfsDatastoreExtendOptionsRequestType)(nil)).Elem() +} + +type QueryVmfsDatastoreExtendOptionsResponse struct { + Returnval []VmfsDatastoreOption `xml:"returnval,omitempty"` +} + +type QueryVnicStatus QueryVnicStatusRequestType + +func init() { + t["QueryVnicStatus"] = reflect.TypeOf((*QueryVnicStatus)(nil)).Elem() +} + +type QueryVnicStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + VnicDevice string `xml:"vnicDevice"` +} + +func init() { + t["QueryVnicStatusRequestType"] = reflect.TypeOf((*QueryVnicStatusRequestType)(nil)).Elem() +} + +type QueryVnicStatusResponse struct { + Returnval IscsiStatus `xml:"returnval"` +} + +type QueryVsanObjectUuidsByFilter QueryVsanObjectUuidsByFilterRequestType + +func init() { + t["QueryVsanObjectUuidsByFilter"] = reflect.TypeOf((*QueryVsanObjectUuidsByFilter)(nil)).Elem() +} + +type QueryVsanObjectUuidsByFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids,omitempty"` + Limit *int32 `xml:"limit"` + Version int32 `xml:"version,omitempty"` +} + +func init() { + t["QueryVsanObjectUuidsByFilterRequestType"] = reflect.TypeOf((*QueryVsanObjectUuidsByFilterRequestType)(nil)).Elem() +} + +type QueryVsanObjectUuidsByFilterResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type QueryVsanObjects QueryVsanObjectsRequestType + +func init() { + t["QueryVsanObjects"] = reflect.TypeOf((*QueryVsanObjects)(nil)).Elem() +} + +type QueryVsanObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids,omitempty"` +} + +func init() { + t["QueryVsanObjectsRequestType"] = reflect.TypeOf((*QueryVsanObjectsRequestType)(nil)).Elem() +} + +type QueryVsanObjectsResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryVsanStatistics QueryVsanStatisticsRequestType + +func init() { + t["QueryVsanStatistics"] = reflect.TypeOf((*QueryVsanStatistics)(nil)).Elem() +} + +type QueryVsanStatisticsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Labels []string `xml:"labels"` +} + +func init() { + t["QueryVsanStatisticsRequestType"] = reflect.TypeOf((*QueryVsanStatisticsRequestType)(nil)).Elem() +} + +type QueryVsanStatisticsResponse struct { + Returnval string `xml:"returnval"` +} + +type QueryVsanUpgradeStatus QueryVsanUpgradeStatusRequestType + +func init() { + t["QueryVsanUpgradeStatus"] = reflect.TypeOf((*QueryVsanUpgradeStatus)(nil)).Elem() +} + +type QueryVsanUpgradeStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster ManagedObjectReference `xml:"cluster"` +} + +func init() { + t["QueryVsanUpgradeStatusRequestType"] = reflect.TypeOf((*QueryVsanUpgradeStatusRequestType)(nil)).Elem() +} + +type QueryVsanUpgradeStatusResponse struct { + Returnval VsanUpgradeSystemUpgradeStatus `xml:"returnval"` +} + +type QuestionPending struct { + InvalidState + + Text string `xml:"text"` +} + +func init() { + t["QuestionPending"] = reflect.TypeOf((*QuestionPending)(nil)).Elem() +} + +type QuestionPendingFault QuestionPending + +func init() { + t["QuestionPendingFault"] = reflect.TypeOf((*QuestionPendingFault)(nil)).Elem() +} + +type QuiesceDatastoreIOForHAFailed struct { + ResourceInUse + + Host ManagedObjectReference `xml:"host"` + HostName string `xml:"hostName"` + Ds ManagedObjectReference `xml:"ds"` + DsName string `xml:"dsName"` +} + +func init() { + t["QuiesceDatastoreIOForHAFailed"] = reflect.TypeOf((*QuiesceDatastoreIOForHAFailed)(nil)).Elem() +} + +type QuiesceDatastoreIOForHAFailedFault QuiesceDatastoreIOForHAFailed + +func init() { + t["QuiesceDatastoreIOForHAFailedFault"] = reflect.TypeOf((*QuiesceDatastoreIOForHAFailedFault)(nil)).Elem() +} + +type RDMConversionNotSupported struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["RDMConversionNotSupported"] = reflect.TypeOf((*RDMConversionNotSupported)(nil)).Elem() +} + +type RDMConversionNotSupportedFault RDMConversionNotSupported + +func init() { + t["RDMConversionNotSupportedFault"] = reflect.TypeOf((*RDMConversionNotSupportedFault)(nil)).Elem() +} + +type RDMNotPreserved struct { + MigrationFault + + Device string `xml:"device"` +} + +func init() { + t["RDMNotPreserved"] = reflect.TypeOf((*RDMNotPreserved)(nil)).Elem() +} + +type RDMNotPreservedFault RDMNotPreserved + +func init() { + t["RDMNotPreservedFault"] = reflect.TypeOf((*RDMNotPreservedFault)(nil)).Elem() +} + +type RDMNotSupported struct { + DeviceNotSupported +} + +func init() { + t["RDMNotSupported"] = reflect.TypeOf((*RDMNotSupported)(nil)).Elem() +} + +type RDMNotSupportedFault BaseRDMNotSupported + +func init() { + t["RDMNotSupportedFault"] = reflect.TypeOf((*RDMNotSupportedFault)(nil)).Elem() +} + +type RDMNotSupportedOnDatastore struct { + VmConfigFault + + Device string `xml:"device"` + Datastore ManagedObjectReference `xml:"datastore"` + DatastoreName string `xml:"datastoreName"` +} + +func init() { + t["RDMNotSupportedOnDatastore"] = reflect.TypeOf((*RDMNotSupportedOnDatastore)(nil)).Elem() +} + +type RDMNotSupportedOnDatastoreFault RDMNotSupportedOnDatastore + +func init() { + t["RDMNotSupportedOnDatastoreFault"] = reflect.TypeOf((*RDMNotSupportedOnDatastoreFault)(nil)).Elem() +} + +type RDMPointsToInaccessibleDisk struct { + CannotAccessVmDisk +} + +func init() { + t["RDMPointsToInaccessibleDisk"] = reflect.TypeOf((*RDMPointsToInaccessibleDisk)(nil)).Elem() +} + +type RDMPointsToInaccessibleDiskFault RDMPointsToInaccessibleDisk + +func init() { + t["RDMPointsToInaccessibleDiskFault"] = reflect.TypeOf((*RDMPointsToInaccessibleDiskFault)(nil)).Elem() +} + +type RawDiskNotSupported struct { + DeviceNotSupported +} + +func init() { + t["RawDiskNotSupported"] = reflect.TypeOf((*RawDiskNotSupported)(nil)).Elem() +} + +type RawDiskNotSupportedFault RawDiskNotSupported + +func init() { + t["RawDiskNotSupportedFault"] = reflect.TypeOf((*RawDiskNotSupportedFault)(nil)).Elem() +} + +type ReadEnvironmentVariableInGuest ReadEnvironmentVariableInGuestRequestType + +func init() { + t["ReadEnvironmentVariableInGuest"] = reflect.TypeOf((*ReadEnvironmentVariableInGuest)(nil)).Elem() +} + +type ReadEnvironmentVariableInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Names []string `xml:"names,omitempty"` +} + +func init() { + t["ReadEnvironmentVariableInGuestRequestType"] = reflect.TypeOf((*ReadEnvironmentVariableInGuestRequestType)(nil)).Elem() +} + +type ReadEnvironmentVariableInGuestResponse struct { + Returnval []string `xml:"returnval,omitempty"` +} + +type ReadHostResourcePoolTreeFailed struct { + HostConnectFault +} + +func init() { + t["ReadHostResourcePoolTreeFailed"] = reflect.TypeOf((*ReadHostResourcePoolTreeFailed)(nil)).Elem() +} + +type ReadHostResourcePoolTreeFailedFault ReadHostResourcePoolTreeFailed + +func init() { + t["ReadHostResourcePoolTreeFailedFault"] = reflect.TypeOf((*ReadHostResourcePoolTreeFailedFault)(nil)).Elem() +} + +type ReadNextEvents ReadNextEventsRequestType + +func init() { + t["ReadNextEvents"] = reflect.TypeOf((*ReadNextEvents)(nil)).Elem() +} + +type ReadNextEventsRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int32 `xml:"maxCount"` +} + +func init() { + t["ReadNextEventsRequestType"] = reflect.TypeOf((*ReadNextEventsRequestType)(nil)).Elem() +} + +type ReadNextEventsResponse struct { + Returnval []BaseEvent `xml:"returnval,omitempty,typeattr"` +} + +type ReadNextTasks ReadNextTasksRequestType + +func init() { + t["ReadNextTasks"] = reflect.TypeOf((*ReadNextTasks)(nil)).Elem() +} + +type ReadNextTasksRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int32 `xml:"maxCount"` +} + +func init() { + t["ReadNextTasksRequestType"] = reflect.TypeOf((*ReadNextTasksRequestType)(nil)).Elem() +} + +type ReadNextTasksResponse struct { + Returnval []TaskInfo `xml:"returnval,omitempty"` +} + +type ReadOnlyDisksWithLegacyDestination struct { + MigrationFault + + RoDiskCount int32 `xml:"roDiskCount"` + TimeoutDanger bool `xml:"timeoutDanger"` +} + +func init() { + t["ReadOnlyDisksWithLegacyDestination"] = reflect.TypeOf((*ReadOnlyDisksWithLegacyDestination)(nil)).Elem() +} + +type ReadOnlyDisksWithLegacyDestinationFault ReadOnlyDisksWithLegacyDestination + +func init() { + t["ReadOnlyDisksWithLegacyDestinationFault"] = reflect.TypeOf((*ReadOnlyDisksWithLegacyDestinationFault)(nil)).Elem() +} + +type ReadPreviousEvents ReadPreviousEventsRequestType + +func init() { + t["ReadPreviousEvents"] = reflect.TypeOf((*ReadPreviousEvents)(nil)).Elem() +} + +type ReadPreviousEventsRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int32 `xml:"maxCount"` +} + +func init() { + t["ReadPreviousEventsRequestType"] = reflect.TypeOf((*ReadPreviousEventsRequestType)(nil)).Elem() +} + +type ReadPreviousEventsResponse struct { + Returnval []BaseEvent `xml:"returnval,omitempty,typeattr"` +} + +type ReadPreviousTasks ReadPreviousTasksRequestType + +func init() { + t["ReadPreviousTasks"] = reflect.TypeOf((*ReadPreviousTasks)(nil)).Elem() +} + +type ReadPreviousTasksRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int32 `xml:"maxCount"` +} + +func init() { + t["ReadPreviousTasksRequestType"] = reflect.TypeOf((*ReadPreviousTasksRequestType)(nil)).Elem() +} + +type ReadPreviousTasksResponse struct { + Returnval []TaskInfo `xml:"returnval,omitempty"` +} + +type RebootGuest RebootGuestRequestType + +func init() { + t["RebootGuest"] = reflect.TypeOf((*RebootGuest)(nil)).Elem() +} + +type RebootGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RebootGuestRequestType"] = reflect.TypeOf((*RebootGuestRequestType)(nil)).Elem() +} + +type RebootGuestResponse struct { +} + +type RebootHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["RebootHostRequestType"] = reflect.TypeOf((*RebootHostRequestType)(nil)).Elem() +} + +type RebootHost_Task RebootHostRequestType + +func init() { + t["RebootHost_Task"] = reflect.TypeOf((*RebootHost_Task)(nil)).Elem() +} + +type RebootHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RebootRequired struct { + VimFault + + Patch string `xml:"patch,omitempty"` +} + +func init() { + t["RebootRequired"] = reflect.TypeOf((*RebootRequired)(nil)).Elem() +} + +type RebootRequiredFault RebootRequired + +func init() { + t["RebootRequiredFault"] = reflect.TypeOf((*RebootRequiredFault)(nil)).Elem() +} + +type RecommendDatastores RecommendDatastoresRequestType + +func init() { + t["RecommendDatastores"] = reflect.TypeOf((*RecommendDatastores)(nil)).Elem() +} + +type RecommendDatastoresRequestType struct { + This ManagedObjectReference `xml:"_this"` + StorageSpec StoragePlacementSpec `xml:"storageSpec"` +} + +func init() { + t["RecommendDatastoresRequestType"] = reflect.TypeOf((*RecommendDatastoresRequestType)(nil)).Elem() +} + +type RecommendDatastoresResponse struct { + Returnval StoragePlacementResult `xml:"returnval"` +} + +type RecommendHostsForVm RecommendHostsForVmRequestType + +func init() { + t["RecommendHostsForVm"] = reflect.TypeOf((*RecommendHostsForVm)(nil)).Elem() +} + +type RecommendHostsForVmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` +} + +func init() { + t["RecommendHostsForVmRequestType"] = reflect.TypeOf((*RecommendHostsForVmRequestType)(nil)).Elem() +} + +type RecommendHostsForVmResponse struct { + Returnval []ClusterHostRecommendation `xml:"returnval,omitempty"` +} + +type RecommissionVsanNodeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RecommissionVsanNodeRequestType"] = reflect.TypeOf((*RecommissionVsanNodeRequestType)(nil)).Elem() +} + +type RecommissionVsanNode_Task RecommissionVsanNodeRequestType + +func init() { + t["RecommissionVsanNode_Task"] = reflect.TypeOf((*RecommissionVsanNode_Task)(nil)).Elem() +} + +type RecommissionVsanNode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconcileDatastoreInventoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["ReconcileDatastoreInventoryRequestType"] = reflect.TypeOf((*ReconcileDatastoreInventoryRequestType)(nil)).Elem() +} + +type ReconcileDatastoreInventory_Task ReconcileDatastoreInventoryRequestType + +func init() { + t["ReconcileDatastoreInventory_Task"] = reflect.TypeOf((*ReconcileDatastoreInventory_Task)(nil)).Elem() +} + +type ReconcileDatastoreInventory_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VirtualMachineConfigSpec `xml:"spec"` +} + +func init() { + t["ReconfigVMRequestType"] = reflect.TypeOf((*ReconfigVMRequestType)(nil)).Elem() +} + +type ReconfigVM_Task ReconfigVMRequestType + +func init() { + t["ReconfigVM_Task"] = reflect.TypeOf((*ReconfigVM_Task)(nil)).Elem() +} + +type ReconfigVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigurationSatisfiable ReconfigurationSatisfiableRequestType + +func init() { + t["ReconfigurationSatisfiable"] = reflect.TypeOf((*ReconfigurationSatisfiable)(nil)).Elem() +} + +type ReconfigurationSatisfiableRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pcbs []VsanPolicyChangeBatch `xml:"pcbs"` + IgnoreSatisfiability *bool `xml:"ignoreSatisfiability"` +} + +func init() { + t["ReconfigurationSatisfiableRequestType"] = reflect.TypeOf((*ReconfigurationSatisfiableRequestType)(nil)).Elem() +} + +type ReconfigurationSatisfiableResponse struct { + Returnval []VsanPolicySatisfiability `xml:"returnval"` +} + +type ReconfigureAlarm ReconfigureAlarmRequestType + +func init() { + t["ReconfigureAlarm"] = reflect.TypeOf((*ReconfigureAlarm)(nil)).Elem() +} + +type ReconfigureAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseAlarmSpec `xml:"spec,typeattr"` +} + +func init() { + t["ReconfigureAlarmRequestType"] = reflect.TypeOf((*ReconfigureAlarmRequestType)(nil)).Elem() +} + +type ReconfigureAlarmResponse struct { +} + +type ReconfigureAutostart ReconfigureAutostartRequestType + +func init() { + t["ReconfigureAutostart"] = reflect.TypeOf((*ReconfigureAutostart)(nil)).Elem() +} + +type ReconfigureAutostartRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostAutoStartManagerConfig `xml:"spec"` +} + +func init() { + t["ReconfigureAutostartRequestType"] = reflect.TypeOf((*ReconfigureAutostartRequestType)(nil)).Elem() +} + +type ReconfigureAutostartResponse struct { +} + +type ReconfigureClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec ClusterConfigSpec `xml:"spec"` + Modify bool `xml:"modify"` +} + +func init() { + t["ReconfigureClusterRequestType"] = reflect.TypeOf((*ReconfigureClusterRequestType)(nil)).Elem() +} + +type ReconfigureCluster_Task ReconfigureClusterRequestType + +func init() { + t["ReconfigureCluster_Task"] = reflect.TypeOf((*ReconfigureCluster_Task)(nil)).Elem() +} + +type ReconfigureCluster_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureComputeResourceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseComputeResourceConfigSpec `xml:"spec,typeattr"` + Modify bool `xml:"modify"` +} + +func init() { + t["ReconfigureComputeResourceRequestType"] = reflect.TypeOf((*ReconfigureComputeResourceRequestType)(nil)).Elem() +} + +type ReconfigureComputeResource_Task ReconfigureComputeResourceRequestType + +func init() { + t["ReconfigureComputeResource_Task"] = reflect.TypeOf((*ReconfigureComputeResource_Task)(nil)).Elem() +} + +type ReconfigureComputeResource_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureDVPortRequestType struct { + This ManagedObjectReference `xml:"_this"` + Port []DVPortConfigSpec `xml:"port"` +} + +func init() { + t["ReconfigureDVPortRequestType"] = reflect.TypeOf((*ReconfigureDVPortRequestType)(nil)).Elem() +} + +type ReconfigureDVPort_Task ReconfigureDVPortRequestType + +func init() { + t["ReconfigureDVPort_Task"] = reflect.TypeOf((*ReconfigureDVPort_Task)(nil)).Elem() +} + +type ReconfigureDVPort_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureDVPortgroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec DVPortgroupConfigSpec `xml:"spec"` +} + +func init() { + t["ReconfigureDVPortgroupRequestType"] = reflect.TypeOf((*ReconfigureDVPortgroupRequestType)(nil)).Elem() +} + +type ReconfigureDVPortgroup_Task ReconfigureDVPortgroupRequestType + +func init() { + t["ReconfigureDVPortgroup_Task"] = reflect.TypeOf((*ReconfigureDVPortgroup_Task)(nil)).Elem() +} + +type ReconfigureDVPortgroup_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureDatacenterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec DatacenterConfigSpec `xml:"spec"` + Modify bool `xml:"modify"` +} + +func init() { + t["ReconfigureDatacenterRequestType"] = reflect.TypeOf((*ReconfigureDatacenterRequestType)(nil)).Elem() +} + +type ReconfigureDatacenter_Task ReconfigureDatacenterRequestType + +func init() { + t["ReconfigureDatacenter_Task"] = reflect.TypeOf((*ReconfigureDatacenter_Task)(nil)).Elem() +} + +type ReconfigureDatacenter_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureDomObject ReconfigureDomObjectRequestType + +func init() { + t["ReconfigureDomObject"] = reflect.TypeOf((*ReconfigureDomObject)(nil)).Elem() +} + +type ReconfigureDomObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuid string `xml:"uuid"` + Policy string `xml:"policy"` +} + +func init() { + t["ReconfigureDomObjectRequestType"] = reflect.TypeOf((*ReconfigureDomObjectRequestType)(nil)).Elem() +} + +type ReconfigureDomObjectResponse struct { +} + +type ReconfigureDvsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseDVSConfigSpec `xml:"spec,typeattr"` +} + +func init() { + t["ReconfigureDvsRequestType"] = reflect.TypeOf((*ReconfigureDvsRequestType)(nil)).Elem() +} + +type ReconfigureDvs_Task ReconfigureDvsRequestType + +func init() { + t["ReconfigureDvs_Task"] = reflect.TypeOf((*ReconfigureDvs_Task)(nil)).Elem() +} + +type ReconfigureDvs_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureHostForDASRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ReconfigureHostForDASRequestType"] = reflect.TypeOf((*ReconfigureHostForDASRequestType)(nil)).Elem() +} + +type ReconfigureHostForDAS_Task ReconfigureHostForDASRequestType + +func init() { + t["ReconfigureHostForDAS_Task"] = reflect.TypeOf((*ReconfigureHostForDAS_Task)(nil)).Elem() +} + +type ReconfigureHostForDAS_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReconfigureScheduledTask ReconfigureScheduledTaskRequestType + +func init() { + t["ReconfigureScheduledTask"] = reflect.TypeOf((*ReconfigureScheduledTask)(nil)).Elem() +} + +type ReconfigureScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseScheduledTaskSpec `xml:"spec,typeattr"` +} + +func init() { + t["ReconfigureScheduledTaskRequestType"] = reflect.TypeOf((*ReconfigureScheduledTaskRequestType)(nil)).Elem() +} + +type ReconfigureScheduledTaskResponse struct { +} + +type ReconfigureServiceConsoleReservation ReconfigureServiceConsoleReservationRequestType + +func init() { + t["ReconfigureServiceConsoleReservation"] = reflect.TypeOf((*ReconfigureServiceConsoleReservation)(nil)).Elem() +} + +type ReconfigureServiceConsoleReservationRequestType struct { + This ManagedObjectReference `xml:"_this"` + CfgBytes int64 `xml:"cfgBytes"` +} + +func init() { + t["ReconfigureServiceConsoleReservationRequestType"] = reflect.TypeOf((*ReconfigureServiceConsoleReservationRequestType)(nil)).Elem() +} + +type ReconfigureServiceConsoleReservationResponse struct { +} + +type ReconfigureSnmpAgent ReconfigureSnmpAgentRequestType + +func init() { + t["ReconfigureSnmpAgent"] = reflect.TypeOf((*ReconfigureSnmpAgent)(nil)).Elem() +} + +type ReconfigureSnmpAgentRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec HostSnmpConfigSpec `xml:"spec"` +} + +func init() { + t["ReconfigureSnmpAgentRequestType"] = reflect.TypeOf((*ReconfigureSnmpAgentRequestType)(nil)).Elem() +} + +type ReconfigureSnmpAgentResponse struct { +} + +type ReconfigureVirtualMachineReservation ReconfigureVirtualMachineReservationRequestType + +func init() { + t["ReconfigureVirtualMachineReservation"] = reflect.TypeOf((*ReconfigureVirtualMachineReservation)(nil)).Elem() +} + +type ReconfigureVirtualMachineReservationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VirtualMachineMemoryReservationSpec `xml:"spec"` +} + +func init() { + t["ReconfigureVirtualMachineReservationRequestType"] = reflect.TypeOf((*ReconfigureVirtualMachineReservationRequestType)(nil)).Elem() +} + +type ReconfigureVirtualMachineReservationResponse struct { +} + +type ReconnectHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + CnxSpec *HostConnectSpec `xml:"cnxSpec,omitempty"` + ReconnectSpec *HostSystemReconnectSpec `xml:"reconnectSpec,omitempty"` +} + +func init() { + t["ReconnectHostRequestType"] = reflect.TypeOf((*ReconnectHostRequestType)(nil)).Elem() +} + +type ReconnectHost_Task ReconnectHostRequestType + +func init() { + t["ReconnectHost_Task"] = reflect.TypeOf((*ReconnectHost_Task)(nil)).Elem() +} + +type ReconnectHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RecordReplayDisabled struct { + VimFault +} + +func init() { + t["RecordReplayDisabled"] = reflect.TypeOf((*RecordReplayDisabled)(nil)).Elem() +} + +type RecordReplayDisabledFault RecordReplayDisabled + +func init() { + t["RecordReplayDisabledFault"] = reflect.TypeOf((*RecordReplayDisabledFault)(nil)).Elem() +} + +type RecoveryEvent struct { + DvsEvent + + HostName string `xml:"hostName"` + PortKey string `xml:"portKey"` + DvsUuid string `xml:"dvsUuid,omitempty"` + Vnic string `xml:"vnic,omitempty"` +} + +func init() { + t["RecoveryEvent"] = reflect.TypeOf((*RecoveryEvent)(nil)).Elem() +} + +type RectifyDvsHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Hosts []ManagedObjectReference `xml:"hosts,omitempty"` +} + +func init() { + t["RectifyDvsHostRequestType"] = reflect.TypeOf((*RectifyDvsHostRequestType)(nil)).Elem() +} + +type RectifyDvsHost_Task RectifyDvsHostRequestType + +func init() { + t["RectifyDvsHost_Task"] = reflect.TypeOf((*RectifyDvsHost_Task)(nil)).Elem() +} + +type RectifyDvsHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RectifyDvsOnHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["RectifyDvsOnHostRequestType"] = reflect.TypeOf((*RectifyDvsOnHostRequestType)(nil)).Elem() +} + +type RectifyDvsOnHost_Task RectifyDvsOnHostRequestType + +func init() { + t["RectifyDvsOnHost_Task"] = reflect.TypeOf((*RectifyDvsOnHost_Task)(nil)).Elem() +} + +type RectifyDvsOnHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RecurrentTaskScheduler struct { + TaskScheduler + + Interval int32 `xml:"interval"` +} + +func init() { + t["RecurrentTaskScheduler"] = reflect.TypeOf((*RecurrentTaskScheduler)(nil)).Elem() +} + +type Refresh RefreshRequestType + +func init() { + t["Refresh"] = reflect.TypeOf((*Refresh)(nil)).Elem() +} + +type RefreshDVPortState RefreshDVPortStateRequestType + +func init() { + t["RefreshDVPortState"] = reflect.TypeOf((*RefreshDVPortState)(nil)).Elem() +} + +type RefreshDVPortStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + PortKeys []string `xml:"portKeys,omitempty"` +} + +func init() { + t["RefreshDVPortStateRequestType"] = reflect.TypeOf((*RefreshDVPortStateRequestType)(nil)).Elem() +} + +type RefreshDVPortStateResponse struct { +} + +type RefreshDatastore RefreshDatastoreRequestType + +func init() { + t["RefreshDatastore"] = reflect.TypeOf((*RefreshDatastore)(nil)).Elem() +} + +type RefreshDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshDatastoreRequestType"] = reflect.TypeOf((*RefreshDatastoreRequestType)(nil)).Elem() +} + +type RefreshDatastoreResponse struct { +} + +type RefreshDatastoreStorageInfo RefreshDatastoreStorageInfoRequestType + +func init() { + t["RefreshDatastoreStorageInfo"] = reflect.TypeOf((*RefreshDatastoreStorageInfo)(nil)).Elem() +} + +type RefreshDatastoreStorageInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshDatastoreStorageInfoRequestType"] = reflect.TypeOf((*RefreshDatastoreStorageInfoRequestType)(nil)).Elem() +} + +type RefreshDatastoreStorageInfoResponse struct { +} + +type RefreshDateTimeSystem RefreshDateTimeSystemRequestType + +func init() { + t["RefreshDateTimeSystem"] = reflect.TypeOf((*RefreshDateTimeSystem)(nil)).Elem() +} + +type RefreshDateTimeSystemRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshDateTimeSystemRequestType"] = reflect.TypeOf((*RefreshDateTimeSystemRequestType)(nil)).Elem() +} + +type RefreshDateTimeSystemResponse struct { +} + +type RefreshFirewall RefreshFirewallRequestType + +func init() { + t["RefreshFirewall"] = reflect.TypeOf((*RefreshFirewall)(nil)).Elem() +} + +type RefreshFirewallRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshFirewallRequestType"] = reflect.TypeOf((*RefreshFirewallRequestType)(nil)).Elem() +} + +type RefreshFirewallResponse struct { +} + +type RefreshGraphicsManager RefreshGraphicsManagerRequestType + +func init() { + t["RefreshGraphicsManager"] = reflect.TypeOf((*RefreshGraphicsManager)(nil)).Elem() +} + +type RefreshGraphicsManagerRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshGraphicsManagerRequestType"] = reflect.TypeOf((*RefreshGraphicsManagerRequestType)(nil)).Elem() +} + +type RefreshGraphicsManagerResponse struct { +} + +type RefreshHealthStatusSystem RefreshHealthStatusSystemRequestType + +func init() { + t["RefreshHealthStatusSystem"] = reflect.TypeOf((*RefreshHealthStatusSystem)(nil)).Elem() +} + +type RefreshHealthStatusSystemRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshHealthStatusSystemRequestType"] = reflect.TypeOf((*RefreshHealthStatusSystemRequestType)(nil)).Elem() +} + +type RefreshHealthStatusSystemResponse struct { +} + +type RefreshNetworkSystem RefreshNetworkSystemRequestType + +func init() { + t["RefreshNetworkSystem"] = reflect.TypeOf((*RefreshNetworkSystem)(nil)).Elem() +} + +type RefreshNetworkSystemRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshNetworkSystemRequestType"] = reflect.TypeOf((*RefreshNetworkSystemRequestType)(nil)).Elem() +} + +type RefreshNetworkSystemResponse struct { +} + +type RefreshRecommendation RefreshRecommendationRequestType + +func init() { + t["RefreshRecommendation"] = reflect.TypeOf((*RefreshRecommendation)(nil)).Elem() +} + +type RefreshRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshRecommendationRequestType"] = reflect.TypeOf((*RefreshRecommendationRequestType)(nil)).Elem() +} + +type RefreshRecommendationResponse struct { +} + +type RefreshRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshRequestType"] = reflect.TypeOf((*RefreshRequestType)(nil)).Elem() +} + +type RefreshResponse struct { +} + +type RefreshRuntime RefreshRuntimeRequestType + +func init() { + t["RefreshRuntime"] = reflect.TypeOf((*RefreshRuntime)(nil)).Elem() +} + +type RefreshRuntimeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshRuntimeRequestType"] = reflect.TypeOf((*RefreshRuntimeRequestType)(nil)).Elem() +} + +type RefreshRuntimeResponse struct { +} + +type RefreshServices RefreshServicesRequestType + +func init() { + t["RefreshServices"] = reflect.TypeOf((*RefreshServices)(nil)).Elem() +} + +type RefreshServicesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshServicesRequestType"] = reflect.TypeOf((*RefreshServicesRequestType)(nil)).Elem() +} + +type RefreshServicesResponse struct { +} + +type RefreshStorageDrsRecommendation RefreshStorageDrsRecommendationRequestType + +func init() { + t["RefreshStorageDrsRecommendation"] = reflect.TypeOf((*RefreshStorageDrsRecommendation)(nil)).Elem() +} + +type RefreshStorageDrsRecommendationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pod ManagedObjectReference `xml:"pod"` +} + +func init() { + t["RefreshStorageDrsRecommendationRequestType"] = reflect.TypeOf((*RefreshStorageDrsRecommendationRequestType)(nil)).Elem() +} + +type RefreshStorageDrsRecommendationResponse struct { +} + +type RefreshStorageDrsRecommendationsForPodRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pod ManagedObjectReference `xml:"pod"` +} + +func init() { + t["RefreshStorageDrsRecommendationsForPodRequestType"] = reflect.TypeOf((*RefreshStorageDrsRecommendationsForPodRequestType)(nil)).Elem() +} + +type RefreshStorageDrsRecommendationsForPod_Task RefreshStorageDrsRecommendationsForPodRequestType + +func init() { + t["RefreshStorageDrsRecommendationsForPod_Task"] = reflect.TypeOf((*RefreshStorageDrsRecommendationsForPod_Task)(nil)).Elem() +} + +type RefreshStorageDrsRecommendationsForPod_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RefreshStorageInfo RefreshStorageInfoRequestType + +func init() { + t["RefreshStorageInfo"] = reflect.TypeOf((*RefreshStorageInfo)(nil)).Elem() +} + +type RefreshStorageInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshStorageInfoRequestType"] = reflect.TypeOf((*RefreshStorageInfoRequestType)(nil)).Elem() +} + +type RefreshStorageInfoResponse struct { +} + +type RefreshStorageSystem RefreshStorageSystemRequestType + +func init() { + t["RefreshStorageSystem"] = reflect.TypeOf((*RefreshStorageSystem)(nil)).Elem() +} + +type RefreshStorageSystemRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RefreshStorageSystemRequestType"] = reflect.TypeOf((*RefreshStorageSystemRequestType)(nil)).Elem() +} + +type RefreshStorageSystemResponse struct { +} + +type RegisterChildVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Path string `xml:"path"` + Name string `xml:"name,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["RegisterChildVMRequestType"] = reflect.TypeOf((*RegisterChildVMRequestType)(nil)).Elem() +} + +type RegisterChildVM_Task RegisterChildVMRequestType + +func init() { + t["RegisterChildVM_Task"] = reflect.TypeOf((*RegisterChildVM_Task)(nil)).Elem() +} + +type RegisterChildVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RegisterDisk RegisterDiskRequestType + +func init() { + t["RegisterDisk"] = reflect.TypeOf((*RegisterDisk)(nil)).Elem() +} + +type RegisterDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Path string `xml:"path"` + Name string `xml:"name,omitempty"` +} + +func init() { + t["RegisterDiskRequestType"] = reflect.TypeOf((*RegisterDiskRequestType)(nil)).Elem() +} + +type RegisterDiskResponse struct { + Returnval VStorageObject `xml:"returnval"` +} + +type RegisterExtension RegisterExtensionRequestType + +func init() { + t["RegisterExtension"] = reflect.TypeOf((*RegisterExtension)(nil)).Elem() +} + +type RegisterExtensionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Extension Extension `xml:"extension"` +} + +func init() { + t["RegisterExtensionRequestType"] = reflect.TypeOf((*RegisterExtensionRequestType)(nil)).Elem() +} + +type RegisterExtensionResponse struct { +} + +type RegisterHealthUpdateProvider RegisterHealthUpdateProviderRequestType + +func init() { + t["RegisterHealthUpdateProvider"] = reflect.TypeOf((*RegisterHealthUpdateProvider)(nil)).Elem() +} + +type RegisterHealthUpdateProviderRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + HealthUpdateInfo []HealthUpdateInfo `xml:"healthUpdateInfo,omitempty"` +} + +func init() { + t["RegisterHealthUpdateProviderRequestType"] = reflect.TypeOf((*RegisterHealthUpdateProviderRequestType)(nil)).Elem() +} + +type RegisterHealthUpdateProviderResponse struct { + Returnval string `xml:"returnval"` +} + +type RegisterKmipServer RegisterKmipServerRequestType + +func init() { + t["RegisterKmipServer"] = reflect.TypeOf((*RegisterKmipServer)(nil)).Elem() +} + +type RegisterKmipServerRequestType struct { + This ManagedObjectReference `xml:"_this"` + Server KmipServerSpec `xml:"server"` +} + +func init() { + t["RegisterKmipServerRequestType"] = reflect.TypeOf((*RegisterKmipServerRequestType)(nil)).Elem() +} + +type RegisterKmipServerResponse struct { +} + +type RegisterVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Path string `xml:"path"` + Name string `xml:"name,omitempty"` + AsTemplate bool `xml:"asTemplate"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["RegisterVMRequestType"] = reflect.TypeOf((*RegisterVMRequestType)(nil)).Elem() +} + +type RegisterVM_Task RegisterVMRequestType + +func init() { + t["RegisterVM_Task"] = reflect.TypeOf((*RegisterVM_Task)(nil)).Elem() +} + +type RegisterVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type Relation struct { + DynamicData + + Constraint string `xml:"constraint,omitempty"` + Name string `xml:"name"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["Relation"] = reflect.TypeOf((*Relation)(nil)).Elem() +} + +type ReleaseCredentialsInGuest ReleaseCredentialsInGuestRequestType + +func init() { + t["ReleaseCredentialsInGuest"] = reflect.TypeOf((*ReleaseCredentialsInGuest)(nil)).Elem() +} + +type ReleaseCredentialsInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["ReleaseCredentialsInGuestRequestType"] = reflect.TypeOf((*ReleaseCredentialsInGuestRequestType)(nil)).Elem() +} + +type ReleaseCredentialsInGuestResponse struct { +} + +type ReleaseIpAllocation ReleaseIpAllocationRequestType + +func init() { + t["ReleaseIpAllocation"] = reflect.TypeOf((*ReleaseIpAllocation)(nil)).Elem() +} + +type ReleaseIpAllocationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + PoolId int32 `xml:"poolId"` + AllocationId string `xml:"allocationId"` +} + +func init() { + t["ReleaseIpAllocationRequestType"] = reflect.TypeOf((*ReleaseIpAllocationRequestType)(nil)).Elem() +} + +type ReleaseIpAllocationResponse struct { +} + +type ReleaseManagedSnapshot ReleaseManagedSnapshotRequestType + +func init() { + t["ReleaseManagedSnapshot"] = reflect.TypeOf((*ReleaseManagedSnapshot)(nil)).Elem() +} + +type ReleaseManagedSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vdisk string `xml:"vdisk"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["ReleaseManagedSnapshotRequestType"] = reflect.TypeOf((*ReleaseManagedSnapshotRequestType)(nil)).Elem() +} + +type ReleaseManagedSnapshotResponse struct { +} + +type Reload ReloadRequestType + +func init() { + t["Reload"] = reflect.TypeOf((*Reload)(nil)).Elem() +} + +type ReloadRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ReloadRequestType"] = reflect.TypeOf((*ReloadRequestType)(nil)).Elem() +} + +type ReloadResponse struct { +} + +type RelocateVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VirtualMachineRelocateSpec `xml:"spec"` + Priority VirtualMachineMovePriority `xml:"priority,omitempty"` +} + +func init() { + t["RelocateVMRequestType"] = reflect.TypeOf((*RelocateVMRequestType)(nil)).Elem() +} + +type RelocateVM_Task RelocateVMRequestType + +func init() { + t["RelocateVM_Task"] = reflect.TypeOf((*RelocateVM_Task)(nil)).Elem() +} + +type RelocateVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RelocateVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Spec VslmRelocateSpec `xml:"spec"` +} + +func init() { + t["RelocateVStorageObjectRequestType"] = reflect.TypeOf((*RelocateVStorageObjectRequestType)(nil)).Elem() +} + +type RelocateVStorageObject_Task RelocateVStorageObjectRequestType + +func init() { + t["RelocateVStorageObject_Task"] = reflect.TypeOf((*RelocateVStorageObject_Task)(nil)).Elem() +} + +type RelocateVStorageObject_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoteDeviceNotSupported struct { + DeviceNotSupported +} + +func init() { + t["RemoteDeviceNotSupported"] = reflect.TypeOf((*RemoteDeviceNotSupported)(nil)).Elem() +} + +type RemoteDeviceNotSupportedFault RemoteDeviceNotSupported + +func init() { + t["RemoteDeviceNotSupportedFault"] = reflect.TypeOf((*RemoteDeviceNotSupportedFault)(nil)).Elem() +} + +type RemoteTSMEnabledEvent struct { + HostEvent +} + +func init() { + t["RemoteTSMEnabledEvent"] = reflect.TypeOf((*RemoteTSMEnabledEvent)(nil)).Elem() +} + +type RemoveAlarm RemoveAlarmRequestType + +func init() { + t["RemoveAlarm"] = reflect.TypeOf((*RemoveAlarm)(nil)).Elem() +} + +type RemoveAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RemoveAlarmRequestType"] = reflect.TypeOf((*RemoveAlarmRequestType)(nil)).Elem() +} + +type RemoveAlarmResponse struct { +} + +type RemoveAllSnapshotsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Consolidate *bool `xml:"consolidate"` +} + +func init() { + t["RemoveAllSnapshotsRequestType"] = reflect.TypeOf((*RemoveAllSnapshotsRequestType)(nil)).Elem() +} + +type RemoveAllSnapshots_Task RemoveAllSnapshotsRequestType + +func init() { + t["RemoveAllSnapshots_Task"] = reflect.TypeOf((*RemoveAllSnapshots_Task)(nil)).Elem() +} + +type RemoveAllSnapshots_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveAssignedLicense RemoveAssignedLicenseRequestType + +func init() { + t["RemoveAssignedLicense"] = reflect.TypeOf((*RemoveAssignedLicense)(nil)).Elem() +} + +type RemoveAssignedLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + EntityId string `xml:"entityId"` +} + +func init() { + t["RemoveAssignedLicenseRequestType"] = reflect.TypeOf((*RemoveAssignedLicenseRequestType)(nil)).Elem() +} + +type RemoveAssignedLicenseResponse struct { +} + +type RemoveAuthorizationRole RemoveAuthorizationRoleRequestType + +func init() { + t["RemoveAuthorizationRole"] = reflect.TypeOf((*RemoveAuthorizationRole)(nil)).Elem() +} + +type RemoveAuthorizationRoleRequestType struct { + This ManagedObjectReference `xml:"_this"` + RoleId int32 `xml:"roleId"` + FailIfUsed bool `xml:"failIfUsed"` +} + +func init() { + t["RemoveAuthorizationRoleRequestType"] = reflect.TypeOf((*RemoveAuthorizationRoleRequestType)(nil)).Elem() +} + +type RemoveAuthorizationRoleResponse struct { +} + +type RemoveCustomFieldDef RemoveCustomFieldDefRequestType + +func init() { + t["RemoveCustomFieldDef"] = reflect.TypeOf((*RemoveCustomFieldDef)(nil)).Elem() +} + +type RemoveCustomFieldDefRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key int32 `xml:"key"` +} + +func init() { + t["RemoveCustomFieldDefRequestType"] = reflect.TypeOf((*RemoveCustomFieldDefRequestType)(nil)).Elem() +} + +type RemoveCustomFieldDefResponse struct { +} + +type RemoveDatastore RemoveDatastoreRequestType + +func init() { + t["RemoveDatastore"] = reflect.TypeOf((*RemoveDatastore)(nil)).Elem() +} + +type RemoveDatastoreExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore []ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["RemoveDatastoreExRequestType"] = reflect.TypeOf((*RemoveDatastoreExRequestType)(nil)).Elem() +} + +type RemoveDatastoreEx_Task RemoveDatastoreExRequestType + +func init() { + t["RemoveDatastoreEx_Task"] = reflect.TypeOf((*RemoveDatastoreEx_Task)(nil)).Elem() +} + +type RemoveDatastoreEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["RemoveDatastoreRequestType"] = reflect.TypeOf((*RemoveDatastoreRequestType)(nil)).Elem() +} + +type RemoveDatastoreResponse struct { +} + +type RemoveDiskMappingRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mapping []VsanHostDiskMapping `xml:"mapping"` + MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty"` + Timeout int32 `xml:"timeout,omitempty"` +} + +func init() { + t["RemoveDiskMappingRequestType"] = reflect.TypeOf((*RemoveDiskMappingRequestType)(nil)).Elem() +} + +type RemoveDiskMapping_Task RemoveDiskMappingRequestType + +func init() { + t["RemoveDiskMapping_Task"] = reflect.TypeOf((*RemoveDiskMapping_Task)(nil)).Elem() +} + +type RemoveDiskMapping_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Disk []HostScsiDisk `xml:"disk"` + MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty"` + Timeout int32 `xml:"timeout,omitempty"` +} + +func init() { + t["RemoveDiskRequestType"] = reflect.TypeOf((*RemoveDiskRequestType)(nil)).Elem() +} + +type RemoveDisk_Task RemoveDiskRequestType + +func init() { + t["RemoveDisk_Task"] = reflect.TypeOf((*RemoveDisk_Task)(nil)).Elem() +} + +type RemoveDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveEntityPermission RemoveEntityPermissionRequestType + +func init() { + t["RemoveEntityPermission"] = reflect.TypeOf((*RemoveEntityPermission)(nil)).Elem() +} + +type RemoveEntityPermissionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + User string `xml:"user"` + IsGroup bool `xml:"isGroup"` +} + +func init() { + t["RemoveEntityPermissionRequestType"] = reflect.TypeOf((*RemoveEntityPermissionRequestType)(nil)).Elem() +} + +type RemoveEntityPermissionResponse struct { +} + +type RemoveFailed struct { + VimFault +} + +func init() { + t["RemoveFailed"] = reflect.TypeOf((*RemoveFailed)(nil)).Elem() +} + +type RemoveFailedFault RemoveFailed + +func init() { + t["RemoveFailedFault"] = reflect.TypeOf((*RemoveFailedFault)(nil)).Elem() +} + +type RemoveFilter RemoveFilterRequestType + +func init() { + t["RemoveFilter"] = reflect.TypeOf((*RemoveFilter)(nil)).Elem() +} + +type RemoveFilterEntities RemoveFilterEntitiesRequestType + +func init() { + t["RemoveFilterEntities"] = reflect.TypeOf((*RemoveFilterEntities)(nil)).Elem() +} + +type RemoveFilterEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + Entities []ManagedObjectReference `xml:"entities,omitempty"` +} + +func init() { + t["RemoveFilterEntitiesRequestType"] = reflect.TypeOf((*RemoveFilterEntitiesRequestType)(nil)).Elem() +} + +type RemoveFilterEntitiesResponse struct { +} + +type RemoveFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` +} + +func init() { + t["RemoveFilterRequestType"] = reflect.TypeOf((*RemoveFilterRequestType)(nil)).Elem() +} + +type RemoveFilterResponse struct { +} + +type RemoveGroup RemoveGroupRequestType + +func init() { + t["RemoveGroup"] = reflect.TypeOf((*RemoveGroup)(nil)).Elem() +} + +type RemoveGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + GroupName string `xml:"groupName"` +} + +func init() { + t["RemoveGroupRequestType"] = reflect.TypeOf((*RemoveGroupRequestType)(nil)).Elem() +} + +type RemoveGroupResponse struct { +} + +type RemoveGuestAlias RemoveGuestAliasRequestType + +func init() { + t["RemoveGuestAlias"] = reflect.TypeOf((*RemoveGuestAlias)(nil)).Elem() +} + +type RemoveGuestAliasByCert RemoveGuestAliasByCertRequestType + +func init() { + t["RemoveGuestAliasByCert"] = reflect.TypeOf((*RemoveGuestAliasByCert)(nil)).Elem() +} + +type RemoveGuestAliasByCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Username string `xml:"username"` + Base64Cert string `xml:"base64Cert"` +} + +func init() { + t["RemoveGuestAliasByCertRequestType"] = reflect.TypeOf((*RemoveGuestAliasByCertRequestType)(nil)).Elem() +} + +type RemoveGuestAliasByCertResponse struct { +} + +type RemoveGuestAliasRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Username string `xml:"username"` + Base64Cert string `xml:"base64Cert"` + Subject BaseGuestAuthSubject `xml:"subject,typeattr"` +} + +func init() { + t["RemoveGuestAliasRequestType"] = reflect.TypeOf((*RemoveGuestAliasRequestType)(nil)).Elem() +} + +type RemoveGuestAliasResponse struct { +} + +type RemoveInternetScsiSendTargets RemoveInternetScsiSendTargetsRequestType + +func init() { + t["RemoveInternetScsiSendTargets"] = reflect.TypeOf((*RemoveInternetScsiSendTargets)(nil)).Elem() +} + +type RemoveInternetScsiSendTargetsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + Targets []HostInternetScsiHbaSendTarget `xml:"targets"` +} + +func init() { + t["RemoveInternetScsiSendTargetsRequestType"] = reflect.TypeOf((*RemoveInternetScsiSendTargetsRequestType)(nil)).Elem() +} + +type RemoveInternetScsiSendTargetsResponse struct { +} + +type RemoveInternetScsiStaticTargets RemoveInternetScsiStaticTargetsRequestType + +func init() { + t["RemoveInternetScsiStaticTargets"] = reflect.TypeOf((*RemoveInternetScsiStaticTargets)(nil)).Elem() +} + +type RemoveInternetScsiStaticTargetsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + Targets []HostInternetScsiHbaStaticTarget `xml:"targets"` +} + +func init() { + t["RemoveInternetScsiStaticTargetsRequestType"] = reflect.TypeOf((*RemoveInternetScsiStaticTargetsRequestType)(nil)).Elem() +} + +type RemoveInternetScsiStaticTargetsResponse struct { +} + +type RemoveKey RemoveKeyRequestType + +func init() { + t["RemoveKey"] = reflect.TypeOf((*RemoveKey)(nil)).Elem() +} + +type RemoveKeyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key CryptoKeyId `xml:"key"` + Force bool `xml:"force"` +} + +func init() { + t["RemoveKeyRequestType"] = reflect.TypeOf((*RemoveKeyRequestType)(nil)).Elem() +} + +type RemoveKeyResponse struct { +} + +type RemoveKeys RemoveKeysRequestType + +func init() { + t["RemoveKeys"] = reflect.TypeOf((*RemoveKeys)(nil)).Elem() +} + +type RemoveKeysRequestType struct { + This ManagedObjectReference `xml:"_this"` + Keys []CryptoKeyId `xml:"keys,omitempty"` + Force bool `xml:"force"` +} + +func init() { + t["RemoveKeysRequestType"] = reflect.TypeOf((*RemoveKeysRequestType)(nil)).Elem() +} + +type RemoveKeysResponse struct { + Returnval []CryptoKeyResult `xml:"returnval,omitempty"` +} + +type RemoveKmipServer RemoveKmipServerRequestType + +func init() { + t["RemoveKmipServer"] = reflect.TypeOf((*RemoveKmipServer)(nil)).Elem() +} + +type RemoveKmipServerRequestType struct { + This ManagedObjectReference `xml:"_this"` + ClusterId KeyProviderId `xml:"clusterId"` + ServerName string `xml:"serverName"` +} + +func init() { + t["RemoveKmipServerRequestType"] = reflect.TypeOf((*RemoveKmipServerRequestType)(nil)).Elem() +} + +type RemoveKmipServerResponse struct { +} + +type RemoveLicense RemoveLicenseRequestType + +func init() { + t["RemoveLicense"] = reflect.TypeOf((*RemoveLicense)(nil)).Elem() +} + +type RemoveLicenseLabel RemoveLicenseLabelRequestType + +func init() { + t["RemoveLicenseLabel"] = reflect.TypeOf((*RemoveLicenseLabel)(nil)).Elem() +} + +type RemoveLicenseLabelRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` + LabelKey string `xml:"labelKey"` +} + +func init() { + t["RemoveLicenseLabelRequestType"] = reflect.TypeOf((*RemoveLicenseLabelRequestType)(nil)).Elem() +} + +type RemoveLicenseLabelResponse struct { +} + +type RemoveLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` +} + +func init() { + t["RemoveLicenseRequestType"] = reflect.TypeOf((*RemoveLicenseRequestType)(nil)).Elem() +} + +type RemoveLicenseResponse struct { +} + +type RemoveMonitoredEntities RemoveMonitoredEntitiesRequestType + +func init() { + t["RemoveMonitoredEntities"] = reflect.TypeOf((*RemoveMonitoredEntities)(nil)).Elem() +} + +type RemoveMonitoredEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` + Entities []ManagedObjectReference `xml:"entities,omitempty"` +} + +func init() { + t["RemoveMonitoredEntitiesRequestType"] = reflect.TypeOf((*RemoveMonitoredEntitiesRequestType)(nil)).Elem() +} + +type RemoveMonitoredEntitiesResponse struct { +} + +type RemoveNetworkResourcePool RemoveNetworkResourcePoolRequestType + +func init() { + t["RemoveNetworkResourcePool"] = reflect.TypeOf((*RemoveNetworkResourcePool)(nil)).Elem() +} + +type RemoveNetworkResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key []string `xml:"key"` +} + +func init() { + t["RemoveNetworkResourcePoolRequestType"] = reflect.TypeOf((*RemoveNetworkResourcePoolRequestType)(nil)).Elem() +} + +type RemoveNetworkResourcePoolResponse struct { +} + +type RemovePerfInterval RemovePerfIntervalRequestType + +func init() { + t["RemovePerfInterval"] = reflect.TypeOf((*RemovePerfInterval)(nil)).Elem() +} + +type RemovePerfIntervalRequestType struct { + This ManagedObjectReference `xml:"_this"` + SamplePeriod int32 `xml:"samplePeriod"` +} + +func init() { + t["RemovePerfIntervalRequestType"] = reflect.TypeOf((*RemovePerfIntervalRequestType)(nil)).Elem() +} + +type RemovePerfIntervalResponse struct { +} + +type RemovePortGroup RemovePortGroupRequestType + +func init() { + t["RemovePortGroup"] = reflect.TypeOf((*RemovePortGroup)(nil)).Elem() +} + +type RemovePortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + PgName string `xml:"pgName"` +} + +func init() { + t["RemovePortGroupRequestType"] = reflect.TypeOf((*RemovePortGroupRequestType)(nil)).Elem() +} + +type RemovePortGroupResponse struct { +} + +type RemoveScheduledTask RemoveScheduledTaskRequestType + +func init() { + t["RemoveScheduledTask"] = reflect.TypeOf((*RemoveScheduledTask)(nil)).Elem() +} + +type RemoveScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RemoveScheduledTaskRequestType"] = reflect.TypeOf((*RemoveScheduledTaskRequestType)(nil)).Elem() +} + +type RemoveScheduledTaskResponse struct { +} + +type RemoveServiceConsoleVirtualNic RemoveServiceConsoleVirtualNicRequestType + +func init() { + t["RemoveServiceConsoleVirtualNic"] = reflect.TypeOf((*RemoveServiceConsoleVirtualNic)(nil)).Elem() +} + +type RemoveServiceConsoleVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` +} + +func init() { + t["RemoveServiceConsoleVirtualNicRequestType"] = reflect.TypeOf((*RemoveServiceConsoleVirtualNicRequestType)(nil)).Elem() +} + +type RemoveServiceConsoleVirtualNicResponse struct { +} + +type RemoveSmartCardTrustAnchor RemoveSmartCardTrustAnchorRequestType + +func init() { + t["RemoveSmartCardTrustAnchor"] = reflect.TypeOf((*RemoveSmartCardTrustAnchor)(nil)).Elem() +} + +type RemoveSmartCardTrustAnchorByFingerprint RemoveSmartCardTrustAnchorByFingerprintRequestType + +func init() { + t["RemoveSmartCardTrustAnchorByFingerprint"] = reflect.TypeOf((*RemoveSmartCardTrustAnchorByFingerprint)(nil)).Elem() +} + +type RemoveSmartCardTrustAnchorByFingerprintRequestType struct { + This ManagedObjectReference `xml:"_this"` + Fingerprint string `xml:"fingerprint"` + Digest string `xml:"digest"` +} + +func init() { + t["RemoveSmartCardTrustAnchorByFingerprintRequestType"] = reflect.TypeOf((*RemoveSmartCardTrustAnchorByFingerprintRequestType)(nil)).Elem() +} + +type RemoveSmartCardTrustAnchorByFingerprintResponse struct { +} + +type RemoveSmartCardTrustAnchorRequestType struct { + This ManagedObjectReference `xml:"_this"` + Issuer string `xml:"issuer"` + Serial string `xml:"serial"` +} + +func init() { + t["RemoveSmartCardTrustAnchorRequestType"] = reflect.TypeOf((*RemoveSmartCardTrustAnchorRequestType)(nil)).Elem() +} + +type RemoveSmartCardTrustAnchorResponse struct { +} + +type RemoveSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + RemoveChildren bool `xml:"removeChildren"` + Consolidate *bool `xml:"consolidate"` +} + +func init() { + t["RemoveSnapshotRequestType"] = reflect.TypeOf((*RemoveSnapshotRequestType)(nil)).Elem() +} + +type RemoveSnapshot_Task RemoveSnapshotRequestType + +func init() { + t["RemoveSnapshot_Task"] = reflect.TypeOf((*RemoveSnapshot_Task)(nil)).Elem() +} + +type RemoveSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RemoveUser RemoveUserRequestType + +func init() { + t["RemoveUser"] = reflect.TypeOf((*RemoveUser)(nil)).Elem() +} + +type RemoveUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + UserName string `xml:"userName"` +} + +func init() { + t["RemoveUserRequestType"] = reflect.TypeOf((*RemoveUserRequestType)(nil)).Elem() +} + +type RemoveUserResponse struct { +} + +type RemoveVirtualNic RemoveVirtualNicRequestType + +func init() { + t["RemoveVirtualNic"] = reflect.TypeOf((*RemoveVirtualNic)(nil)).Elem() +} + +type RemoveVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` +} + +func init() { + t["RemoveVirtualNicRequestType"] = reflect.TypeOf((*RemoveVirtualNicRequestType)(nil)).Elem() +} + +type RemoveVirtualNicResponse struct { +} + +type RemoveVirtualSwitch RemoveVirtualSwitchRequestType + +func init() { + t["RemoveVirtualSwitch"] = reflect.TypeOf((*RemoveVirtualSwitch)(nil)).Elem() +} + +type RemoveVirtualSwitchRequestType struct { + This ManagedObjectReference `xml:"_this"` + VswitchName string `xml:"vswitchName"` +} + +func init() { + t["RemoveVirtualSwitchRequestType"] = reflect.TypeOf((*RemoveVirtualSwitchRequestType)(nil)).Elem() +} + +type RemoveVirtualSwitchResponse struct { +} + +type RenameCustomFieldDef RenameCustomFieldDefRequestType + +func init() { + t["RenameCustomFieldDef"] = reflect.TypeOf((*RenameCustomFieldDef)(nil)).Elem() +} + +type RenameCustomFieldDefRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key int32 `xml:"key"` + Name string `xml:"name"` +} + +func init() { + t["RenameCustomFieldDefRequestType"] = reflect.TypeOf((*RenameCustomFieldDefRequestType)(nil)).Elem() +} + +type RenameCustomFieldDefResponse struct { +} + +type RenameCustomizationSpec RenameCustomizationSpecRequestType + +func init() { + t["RenameCustomizationSpec"] = reflect.TypeOf((*RenameCustomizationSpec)(nil)).Elem() +} + +type RenameCustomizationSpecRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + NewName string `xml:"newName"` +} + +func init() { + t["RenameCustomizationSpecRequestType"] = reflect.TypeOf((*RenameCustomizationSpecRequestType)(nil)).Elem() +} + +type RenameCustomizationSpecResponse struct { +} + +type RenameDatastore RenameDatastoreRequestType + +func init() { + t["RenameDatastore"] = reflect.TypeOf((*RenameDatastore)(nil)).Elem() +} + +type RenameDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + NewName string `xml:"newName"` +} + +func init() { + t["RenameDatastoreRequestType"] = reflect.TypeOf((*RenameDatastoreRequestType)(nil)).Elem() +} + +type RenameDatastoreResponse struct { +} + +type RenameRequestType struct { + This ManagedObjectReference `xml:"_this"` + NewName string `xml:"newName"` +} + +func init() { + t["RenameRequestType"] = reflect.TypeOf((*RenameRequestType)(nil)).Elem() +} + +type RenameSnapshot RenameSnapshotRequestType + +func init() { + t["RenameSnapshot"] = reflect.TypeOf((*RenameSnapshot)(nil)).Elem() +} + +type RenameSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["RenameSnapshotRequestType"] = reflect.TypeOf((*RenameSnapshotRequestType)(nil)).Elem() +} + +type RenameSnapshotResponse struct { +} + +type RenameVStorageObject RenameVStorageObjectRequestType + +func init() { + t["RenameVStorageObject"] = reflect.TypeOf((*RenameVStorageObject)(nil)).Elem() +} + +type RenameVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Name string `xml:"name"` +} + +func init() { + t["RenameVStorageObjectRequestType"] = reflect.TypeOf((*RenameVStorageObjectRequestType)(nil)).Elem() +} + +type RenameVStorageObjectResponse struct { +} + +type Rename_Task RenameRequestType + +func init() { + t["Rename_Task"] = reflect.TypeOf((*Rename_Task)(nil)).Elem() +} + +type Rename_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ReplaceCACertificatesAndCRLs ReplaceCACertificatesAndCRLsRequestType + +func init() { + t["ReplaceCACertificatesAndCRLs"] = reflect.TypeOf((*ReplaceCACertificatesAndCRLs)(nil)).Elem() +} + +type ReplaceCACertificatesAndCRLsRequestType struct { + This ManagedObjectReference `xml:"_this"` + CaCert []string `xml:"caCert"` + CaCrl []string `xml:"caCrl,omitempty"` +} + +func init() { + t["ReplaceCACertificatesAndCRLsRequestType"] = reflect.TypeOf((*ReplaceCACertificatesAndCRLsRequestType)(nil)).Elem() +} + +type ReplaceCACertificatesAndCRLsResponse struct { +} + +type ReplaceSmartCardTrustAnchors ReplaceSmartCardTrustAnchorsRequestType + +func init() { + t["ReplaceSmartCardTrustAnchors"] = reflect.TypeOf((*ReplaceSmartCardTrustAnchors)(nil)).Elem() +} + +type ReplaceSmartCardTrustAnchorsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Certs []string `xml:"certs,omitempty"` +} + +func init() { + t["ReplaceSmartCardTrustAnchorsRequestType"] = reflect.TypeOf((*ReplaceSmartCardTrustAnchorsRequestType)(nil)).Elem() +} + +type ReplaceSmartCardTrustAnchorsResponse struct { +} + +type ReplicationConfigFault struct { + ReplicationFault +} + +func init() { + t["ReplicationConfigFault"] = reflect.TypeOf((*ReplicationConfigFault)(nil)).Elem() +} + +type ReplicationConfigFaultFault BaseReplicationConfigFault + +func init() { + t["ReplicationConfigFaultFault"] = reflect.TypeOf((*ReplicationConfigFaultFault)(nil)).Elem() +} + +type ReplicationConfigSpec struct { + DynamicData + + Generation int64 `xml:"generation"` + VmReplicationId string `xml:"vmReplicationId"` + Destination string `xml:"destination"` + Port int32 `xml:"port"` + Rpo int64 `xml:"rpo"` + QuiesceGuestEnabled bool `xml:"quiesceGuestEnabled"` + Paused bool `xml:"paused"` + OppUpdatesEnabled bool `xml:"oppUpdatesEnabled"` + NetCompressionEnabled *bool `xml:"netCompressionEnabled"` + NetEncryptionEnabled *bool `xml:"netEncryptionEnabled"` + EncryptionDestination string `xml:"encryptionDestination,omitempty"` + EncryptionPort int32 `xml:"encryptionPort,omitempty"` + RemoteCertificateThumbprint string `xml:"remoteCertificateThumbprint,omitempty"` + Disk []ReplicationInfoDiskSettings `xml:"disk,omitempty"` +} + +func init() { + t["ReplicationConfigSpec"] = reflect.TypeOf((*ReplicationConfigSpec)(nil)).Elem() +} + +type ReplicationDiskConfigFault struct { + ReplicationConfigFault + + Reason string `xml:"reason,omitempty"` + VmRef *ManagedObjectReference `xml:"vmRef,omitempty"` + Key int32 `xml:"key,omitempty"` +} + +func init() { + t["ReplicationDiskConfigFault"] = reflect.TypeOf((*ReplicationDiskConfigFault)(nil)).Elem() +} + +type ReplicationDiskConfigFaultFault ReplicationDiskConfigFault + +func init() { + t["ReplicationDiskConfigFaultFault"] = reflect.TypeOf((*ReplicationDiskConfigFaultFault)(nil)).Elem() +} + +type ReplicationFault struct { + VimFault +} + +func init() { + t["ReplicationFault"] = reflect.TypeOf((*ReplicationFault)(nil)).Elem() +} + +type ReplicationFaultFault BaseReplicationFault + +func init() { + t["ReplicationFaultFault"] = reflect.TypeOf((*ReplicationFaultFault)(nil)).Elem() +} + +type ReplicationGroupId struct { + DynamicData + + FaultDomainId FaultDomainId `xml:"faultDomainId"` + DeviceGroupId DeviceGroupId `xml:"deviceGroupId"` +} + +func init() { + t["ReplicationGroupId"] = reflect.TypeOf((*ReplicationGroupId)(nil)).Elem() +} + +type ReplicationIncompatibleWithFT struct { + ReplicationFault +} + +func init() { + t["ReplicationIncompatibleWithFT"] = reflect.TypeOf((*ReplicationIncompatibleWithFT)(nil)).Elem() +} + +type ReplicationIncompatibleWithFTFault ReplicationIncompatibleWithFT + +func init() { + t["ReplicationIncompatibleWithFTFault"] = reflect.TypeOf((*ReplicationIncompatibleWithFTFault)(nil)).Elem() +} + +type ReplicationInfoDiskSettings struct { + DynamicData + + Key int32 `xml:"key"` + DiskReplicationId string `xml:"diskReplicationId"` +} + +func init() { + t["ReplicationInfoDiskSettings"] = reflect.TypeOf((*ReplicationInfoDiskSettings)(nil)).Elem() +} + +type ReplicationInvalidOptions struct { + ReplicationFault + + Options string `xml:"options"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["ReplicationInvalidOptions"] = reflect.TypeOf((*ReplicationInvalidOptions)(nil)).Elem() +} + +type ReplicationInvalidOptionsFault ReplicationInvalidOptions + +func init() { + t["ReplicationInvalidOptionsFault"] = reflect.TypeOf((*ReplicationInvalidOptionsFault)(nil)).Elem() +} + +type ReplicationNotSupportedOnHost struct { + ReplicationFault +} + +func init() { + t["ReplicationNotSupportedOnHost"] = reflect.TypeOf((*ReplicationNotSupportedOnHost)(nil)).Elem() +} + +type ReplicationNotSupportedOnHostFault ReplicationNotSupportedOnHost + +func init() { + t["ReplicationNotSupportedOnHostFault"] = reflect.TypeOf((*ReplicationNotSupportedOnHostFault)(nil)).Elem() +} + +type ReplicationSpec struct { + DynamicData + + ReplicationGroupId ReplicationGroupId `xml:"replicationGroupId"` +} + +func init() { + t["ReplicationSpec"] = reflect.TypeOf((*ReplicationSpec)(nil)).Elem() +} + +type ReplicationVmConfigFault struct { + ReplicationConfigFault + + Reason string `xml:"reason,omitempty"` + VmRef *ManagedObjectReference `xml:"vmRef,omitempty"` +} + +func init() { + t["ReplicationVmConfigFault"] = reflect.TypeOf((*ReplicationVmConfigFault)(nil)).Elem() +} + +type ReplicationVmConfigFaultFault ReplicationVmConfigFault + +func init() { + t["ReplicationVmConfigFaultFault"] = reflect.TypeOf((*ReplicationVmConfigFaultFault)(nil)).Elem() +} + +type ReplicationVmFault struct { + ReplicationFault + + Reason string `xml:"reason,omitempty"` + State string `xml:"state,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["ReplicationVmFault"] = reflect.TypeOf((*ReplicationVmFault)(nil)).Elem() +} + +type ReplicationVmFaultFault BaseReplicationVmFault + +func init() { + t["ReplicationVmFaultFault"] = reflect.TypeOf((*ReplicationVmFaultFault)(nil)).Elem() +} + +type ReplicationVmInProgressFault struct { + ReplicationVmFault + + RequestedActivity string `xml:"requestedActivity"` + InProgressActivity string `xml:"inProgressActivity"` +} + +func init() { + t["ReplicationVmInProgressFault"] = reflect.TypeOf((*ReplicationVmInProgressFault)(nil)).Elem() +} + +type ReplicationVmInProgressFaultFault ReplicationVmInProgressFault + +func init() { + t["ReplicationVmInProgressFaultFault"] = reflect.TypeOf((*ReplicationVmInProgressFaultFault)(nil)).Elem() +} + +type ReplicationVmProgressInfo struct { + DynamicData + + Progress int32 `xml:"progress"` + BytesTransferred int64 `xml:"bytesTransferred"` + BytesToTransfer int64 `xml:"bytesToTransfer"` + ChecksumTotalBytes int64 `xml:"checksumTotalBytes,omitempty"` + ChecksumComparedBytes int64 `xml:"checksumComparedBytes,omitempty"` +} + +func init() { + t["ReplicationVmProgressInfo"] = reflect.TypeOf((*ReplicationVmProgressInfo)(nil)).Elem() +} + +type RequestCanceled struct { + RuntimeFault +} + +func init() { + t["RequestCanceled"] = reflect.TypeOf((*RequestCanceled)(nil)).Elem() +} + +type RequestCanceledFault RequestCanceled + +func init() { + t["RequestCanceledFault"] = reflect.TypeOf((*RequestCanceledFault)(nil)).Elem() +} + +type RescanAllHba RescanAllHbaRequestType + +func init() { + t["RescanAllHba"] = reflect.TypeOf((*RescanAllHba)(nil)).Elem() +} + +type RescanAllHbaRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RescanAllHbaRequestType"] = reflect.TypeOf((*RescanAllHbaRequestType)(nil)).Elem() +} + +type RescanAllHbaResponse struct { +} + +type RescanHba RescanHbaRequestType + +func init() { + t["RescanHba"] = reflect.TypeOf((*RescanHba)(nil)).Elem() +} + +type RescanHbaRequestType struct { + This ManagedObjectReference `xml:"_this"` + HbaDevice string `xml:"hbaDevice"` +} + +func init() { + t["RescanHbaRequestType"] = reflect.TypeOf((*RescanHbaRequestType)(nil)).Elem() +} + +type RescanHbaResponse struct { +} + +type RescanVffs RescanVffsRequestType + +func init() { + t["RescanVffs"] = reflect.TypeOf((*RescanVffs)(nil)).Elem() +} + +type RescanVffsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RescanVffsRequestType"] = reflect.TypeOf((*RescanVffsRequestType)(nil)).Elem() +} + +type RescanVffsResponse struct { +} + +type RescanVmfs RescanVmfsRequestType + +func init() { + t["RescanVmfs"] = reflect.TypeOf((*RescanVmfs)(nil)).Elem() +} + +type RescanVmfsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RescanVmfsRequestType"] = reflect.TypeOf((*RescanVmfsRequestType)(nil)).Elem() +} + +type RescanVmfsResponse struct { +} + +type ResetCollector ResetCollectorRequestType + +func init() { + t["ResetCollector"] = reflect.TypeOf((*ResetCollector)(nil)).Elem() +} + +type ResetCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetCollectorRequestType"] = reflect.TypeOf((*ResetCollectorRequestType)(nil)).Elem() +} + +type ResetCollectorResponse struct { +} + +type ResetCounterLevelMapping ResetCounterLevelMappingRequestType + +func init() { + t["ResetCounterLevelMapping"] = reflect.TypeOf((*ResetCounterLevelMapping)(nil)).Elem() +} + +type ResetCounterLevelMappingRequestType struct { + This ManagedObjectReference `xml:"_this"` + Counters []int32 `xml:"counters"` +} + +func init() { + t["ResetCounterLevelMappingRequestType"] = reflect.TypeOf((*ResetCounterLevelMappingRequestType)(nil)).Elem() +} + +type ResetCounterLevelMappingResponse struct { +} + +type ResetEntityPermissions ResetEntityPermissionsRequestType + +func init() { + t["ResetEntityPermissions"] = reflect.TypeOf((*ResetEntityPermissions)(nil)).Elem() +} + +type ResetEntityPermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Permission []Permission `xml:"permission,omitempty"` +} + +func init() { + t["ResetEntityPermissionsRequestType"] = reflect.TypeOf((*ResetEntityPermissionsRequestType)(nil)).Elem() +} + +type ResetEntityPermissionsResponse struct { +} + +type ResetFirmwareToFactoryDefaults ResetFirmwareToFactoryDefaultsRequestType + +func init() { + t["ResetFirmwareToFactoryDefaults"] = reflect.TypeOf((*ResetFirmwareToFactoryDefaults)(nil)).Elem() +} + +type ResetFirmwareToFactoryDefaultsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetFirmwareToFactoryDefaultsRequestType"] = reflect.TypeOf((*ResetFirmwareToFactoryDefaultsRequestType)(nil)).Elem() +} + +type ResetFirmwareToFactoryDefaultsResponse struct { +} + +type ResetGuestInformation ResetGuestInformationRequestType + +func init() { + t["ResetGuestInformation"] = reflect.TypeOf((*ResetGuestInformation)(nil)).Elem() +} + +type ResetGuestInformationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetGuestInformationRequestType"] = reflect.TypeOf((*ResetGuestInformationRequestType)(nil)).Elem() +} + +type ResetGuestInformationResponse struct { +} + +type ResetListView ResetListViewRequestType + +func init() { + t["ResetListView"] = reflect.TypeOf((*ResetListView)(nil)).Elem() +} + +type ResetListViewFromView ResetListViewFromViewRequestType + +func init() { + t["ResetListViewFromView"] = reflect.TypeOf((*ResetListViewFromView)(nil)).Elem() +} + +type ResetListViewFromViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + View ManagedObjectReference `xml:"view"` +} + +func init() { + t["ResetListViewFromViewRequestType"] = reflect.TypeOf((*ResetListViewFromViewRequestType)(nil)).Elem() +} + +type ResetListViewFromViewResponse struct { +} + +type ResetListViewRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj []ManagedObjectReference `xml:"obj,omitempty"` +} + +func init() { + t["ResetListViewRequestType"] = reflect.TypeOf((*ResetListViewRequestType)(nil)).Elem() +} + +type ResetListViewResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type ResetSystemHealthInfo ResetSystemHealthInfoRequestType + +func init() { + t["ResetSystemHealthInfo"] = reflect.TypeOf((*ResetSystemHealthInfo)(nil)).Elem() +} + +type ResetSystemHealthInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetSystemHealthInfoRequestType"] = reflect.TypeOf((*ResetSystemHealthInfoRequestType)(nil)).Elem() +} + +type ResetSystemHealthInfoResponse struct { +} + +type ResetVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ResetVMRequestType"] = reflect.TypeOf((*ResetVMRequestType)(nil)).Elem() +} + +type ResetVM_Task ResetVMRequestType + +func init() { + t["ResetVM_Task"] = reflect.TypeOf((*ResetVM_Task)(nil)).Elem() +} + +type ResetVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResignatureUnresolvedVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + ResolutionSpec HostUnresolvedVmfsResignatureSpec `xml:"resolutionSpec"` +} + +func init() { + t["ResignatureUnresolvedVmfsVolumeRequestType"] = reflect.TypeOf((*ResignatureUnresolvedVmfsVolumeRequestType)(nil)).Elem() +} + +type ResignatureUnresolvedVmfsVolume_Task ResignatureUnresolvedVmfsVolumeRequestType + +func init() { + t["ResignatureUnresolvedVmfsVolume_Task"] = reflect.TypeOf((*ResignatureUnresolvedVmfsVolume_Task)(nil)).Elem() +} + +type ResignatureUnresolvedVmfsVolume_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResolveInstallationErrorsOnClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + Cluster ManagedObjectReference `xml:"cluster"` +} + +func init() { + t["ResolveInstallationErrorsOnClusterRequestType"] = reflect.TypeOf((*ResolveInstallationErrorsOnClusterRequestType)(nil)).Elem() +} + +type ResolveInstallationErrorsOnCluster_Task ResolveInstallationErrorsOnClusterRequestType + +func init() { + t["ResolveInstallationErrorsOnCluster_Task"] = reflect.TypeOf((*ResolveInstallationErrorsOnCluster_Task)(nil)).Elem() +} + +type ResolveInstallationErrorsOnCluster_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResolveInstallationErrorsOnHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["ResolveInstallationErrorsOnHostRequestType"] = reflect.TypeOf((*ResolveInstallationErrorsOnHostRequestType)(nil)).Elem() +} + +type ResolveInstallationErrorsOnHost_Task ResolveInstallationErrorsOnHostRequestType + +func init() { + t["ResolveInstallationErrorsOnHost_Task"] = reflect.TypeOf((*ResolveInstallationErrorsOnHost_Task)(nil)).Elem() +} + +type ResolveInstallationErrorsOnHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResolveMultipleUnresolvedVmfsVolumes ResolveMultipleUnresolvedVmfsVolumesRequestType + +func init() { + t["ResolveMultipleUnresolvedVmfsVolumes"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumes)(nil)).Elem() +} + +type ResolveMultipleUnresolvedVmfsVolumesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + ResolutionSpec []HostUnresolvedVmfsResolutionSpec `xml:"resolutionSpec"` +} + +func init() { + t["ResolveMultipleUnresolvedVmfsVolumesExRequestType"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumesExRequestType)(nil)).Elem() +} + +type ResolveMultipleUnresolvedVmfsVolumesEx_Task ResolveMultipleUnresolvedVmfsVolumesExRequestType + +func init() { + t["ResolveMultipleUnresolvedVmfsVolumesEx_Task"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumesEx_Task)(nil)).Elem() +} + +type ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ResolveMultipleUnresolvedVmfsVolumesRequestType struct { + This ManagedObjectReference `xml:"_this"` + ResolutionSpec []HostUnresolvedVmfsResolutionSpec `xml:"resolutionSpec"` +} + +func init() { + t["ResolveMultipleUnresolvedVmfsVolumesRequestType"] = reflect.TypeOf((*ResolveMultipleUnresolvedVmfsVolumesRequestType)(nil)).Elem() +} + +type ResolveMultipleUnresolvedVmfsVolumesResponse struct { + Returnval []HostUnresolvedVmfsResolutionResult `xml:"returnval,omitempty"` +} + +type ResourceAllocationInfo struct { + DynamicData + + Reservation *int64 `xml:"reservation"` + ExpandableReservation *bool `xml:"expandableReservation"` + Limit *int64 `xml:"limit"` + Shares *SharesInfo `xml:"shares,omitempty"` + OverheadLimit *int64 `xml:"overheadLimit"` +} + +func init() { + t["ResourceAllocationInfo"] = reflect.TypeOf((*ResourceAllocationInfo)(nil)).Elem() +} + +type ResourceAllocationOption struct { + DynamicData + + SharesOption SharesOption `xml:"sharesOption"` +} + +func init() { + t["ResourceAllocationOption"] = reflect.TypeOf((*ResourceAllocationOption)(nil)).Elem() +} + +type ResourceConfigOption struct { + DynamicData + + CpuAllocationOption ResourceAllocationOption `xml:"cpuAllocationOption"` + MemoryAllocationOption ResourceAllocationOption `xml:"memoryAllocationOption"` +} + +func init() { + t["ResourceConfigOption"] = reflect.TypeOf((*ResourceConfigOption)(nil)).Elem() +} + +type ResourceConfigSpec struct { + DynamicData + + Entity *ManagedObjectReference `xml:"entity,omitempty"` + ChangeVersion string `xml:"changeVersion,omitempty"` + LastModified *time.Time `xml:"lastModified"` + CpuAllocation ResourceAllocationInfo `xml:"cpuAllocation"` + MemoryAllocation ResourceAllocationInfo `xml:"memoryAllocation"` +} + +func init() { + t["ResourceConfigSpec"] = reflect.TypeOf((*ResourceConfigSpec)(nil)).Elem() +} + +type ResourceInUse struct { + VimFault + + Type string `xml:"type,omitempty"` + Name string `xml:"name,omitempty"` +} + +func init() { + t["ResourceInUse"] = reflect.TypeOf((*ResourceInUse)(nil)).Elem() +} + +type ResourceInUseFault BaseResourceInUse + +func init() { + t["ResourceInUseFault"] = reflect.TypeOf((*ResourceInUseFault)(nil)).Elem() +} + +type ResourceNotAvailable struct { + VimFault + + ContainerType string `xml:"containerType,omitempty"` + ContainerName string `xml:"containerName,omitempty"` + Type string `xml:"type,omitempty"` +} + +func init() { + t["ResourceNotAvailable"] = reflect.TypeOf((*ResourceNotAvailable)(nil)).Elem() +} + +type ResourceNotAvailableFault ResourceNotAvailable + +func init() { + t["ResourceNotAvailableFault"] = reflect.TypeOf((*ResourceNotAvailableFault)(nil)).Elem() +} + +type ResourcePoolCreatedEvent struct { + ResourcePoolEvent + + Parent ResourcePoolEventArgument `xml:"parent"` +} + +func init() { + t["ResourcePoolCreatedEvent"] = reflect.TypeOf((*ResourcePoolCreatedEvent)(nil)).Elem() +} + +type ResourcePoolDestroyedEvent struct { + ResourcePoolEvent +} + +func init() { + t["ResourcePoolDestroyedEvent"] = reflect.TypeOf((*ResourcePoolDestroyedEvent)(nil)).Elem() +} + +type ResourcePoolEvent struct { + Event + + ResourcePool ResourcePoolEventArgument `xml:"resourcePool"` +} + +func init() { + t["ResourcePoolEvent"] = reflect.TypeOf((*ResourcePoolEvent)(nil)).Elem() +} + +type ResourcePoolEventArgument struct { + EntityEventArgument + + ResourcePool ManagedObjectReference `xml:"resourcePool"` +} + +func init() { + t["ResourcePoolEventArgument"] = reflect.TypeOf((*ResourcePoolEventArgument)(nil)).Elem() +} + +type ResourcePoolMovedEvent struct { + ResourcePoolEvent + + OldParent ResourcePoolEventArgument `xml:"oldParent"` + NewParent ResourcePoolEventArgument `xml:"newParent"` +} + +func init() { + t["ResourcePoolMovedEvent"] = reflect.TypeOf((*ResourcePoolMovedEvent)(nil)).Elem() +} + +type ResourcePoolQuickStats struct { + DynamicData + + OverallCpuUsage int64 `xml:"overallCpuUsage,omitempty"` + OverallCpuDemand int64 `xml:"overallCpuDemand,omitempty"` + GuestMemoryUsage int64 `xml:"guestMemoryUsage,omitempty"` + HostMemoryUsage int64 `xml:"hostMemoryUsage,omitempty"` + DistributedCpuEntitlement int64 `xml:"distributedCpuEntitlement,omitempty"` + DistributedMemoryEntitlement int64 `xml:"distributedMemoryEntitlement,omitempty"` + StaticCpuEntitlement int32 `xml:"staticCpuEntitlement,omitempty"` + StaticMemoryEntitlement int32 `xml:"staticMemoryEntitlement,omitempty"` + PrivateMemory int64 `xml:"privateMemory,omitempty"` + SharedMemory int64 `xml:"sharedMemory,omitempty"` + SwappedMemory int64 `xml:"swappedMemory,omitempty"` + BalloonedMemory int64 `xml:"balloonedMemory,omitempty"` + OverheadMemory int64 `xml:"overheadMemory,omitempty"` + ConsumedOverheadMemory int64 `xml:"consumedOverheadMemory,omitempty"` + CompressedMemory int64 `xml:"compressedMemory,omitempty"` +} + +func init() { + t["ResourcePoolQuickStats"] = reflect.TypeOf((*ResourcePoolQuickStats)(nil)).Elem() +} + +type ResourcePoolReconfiguredEvent struct { + ResourcePoolEvent + + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty"` +} + +func init() { + t["ResourcePoolReconfiguredEvent"] = reflect.TypeOf((*ResourcePoolReconfiguredEvent)(nil)).Elem() +} + +type ResourcePoolResourceUsage struct { + DynamicData + + ReservationUsed int64 `xml:"reservationUsed"` + ReservationUsedForVm int64 `xml:"reservationUsedForVm"` + UnreservedForPool int64 `xml:"unreservedForPool"` + UnreservedForVm int64 `xml:"unreservedForVm"` + OverallUsage int64 `xml:"overallUsage"` + MaxUsage int64 `xml:"maxUsage"` +} + +func init() { + t["ResourcePoolResourceUsage"] = reflect.TypeOf((*ResourcePoolResourceUsage)(nil)).Elem() +} + +type ResourcePoolRuntimeInfo struct { + DynamicData + + Memory ResourcePoolResourceUsage `xml:"memory"` + Cpu ResourcePoolResourceUsage `xml:"cpu"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` +} + +func init() { + t["ResourcePoolRuntimeInfo"] = reflect.TypeOf((*ResourcePoolRuntimeInfo)(nil)).Elem() +} + +type ResourcePoolSummary struct { + DynamicData + + Name string `xml:"name"` + Config ResourceConfigSpec `xml:"config"` + Runtime ResourcePoolRuntimeInfo `xml:"runtime"` + QuickStats *ResourcePoolQuickStats `xml:"quickStats,omitempty"` + ConfiguredMemoryMB int32 `xml:"configuredMemoryMB,omitempty"` +} + +func init() { + t["ResourcePoolSummary"] = reflect.TypeOf((*ResourcePoolSummary)(nil)).Elem() +} + +type ResourceViolatedEvent struct { + ResourcePoolEvent +} + +func init() { + t["ResourceViolatedEvent"] = reflect.TypeOf((*ResourceViolatedEvent)(nil)).Elem() +} + +type RestartService RestartServiceRequestType + +func init() { + t["RestartService"] = reflect.TypeOf((*RestartService)(nil)).Elem() +} + +type RestartServiceConsoleVirtualNic RestartServiceConsoleVirtualNicRequestType + +func init() { + t["RestartServiceConsoleVirtualNic"] = reflect.TypeOf((*RestartServiceConsoleVirtualNic)(nil)).Elem() +} + +type RestartServiceConsoleVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` +} + +func init() { + t["RestartServiceConsoleVirtualNicRequestType"] = reflect.TypeOf((*RestartServiceConsoleVirtualNicRequestType)(nil)).Elem() +} + +type RestartServiceConsoleVirtualNicResponse struct { +} + +type RestartServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["RestartServiceRequestType"] = reflect.TypeOf((*RestartServiceRequestType)(nil)).Elem() +} + +type RestartServiceResponse struct { +} + +type RestoreFirmwareConfiguration RestoreFirmwareConfigurationRequestType + +func init() { + t["RestoreFirmwareConfiguration"] = reflect.TypeOf((*RestoreFirmwareConfiguration)(nil)).Elem() +} + +type RestoreFirmwareConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["RestoreFirmwareConfigurationRequestType"] = reflect.TypeOf((*RestoreFirmwareConfigurationRequestType)(nil)).Elem() +} + +type RestoreFirmwareConfigurationResponse struct { +} + +type RestrictedByAdministrator struct { + RuntimeFault + + Details string `xml:"details"` +} + +func init() { + t["RestrictedByAdministrator"] = reflect.TypeOf((*RestrictedByAdministrator)(nil)).Elem() +} + +type RestrictedByAdministratorFault RestrictedByAdministrator + +func init() { + t["RestrictedByAdministratorFault"] = reflect.TypeOf((*RestrictedByAdministratorFault)(nil)).Elem() +} + +type RestrictedVersion struct { + SecurityError +} + +func init() { + t["RestrictedVersion"] = reflect.TypeOf((*RestrictedVersion)(nil)).Elem() +} + +type RestrictedVersionFault RestrictedVersion + +func init() { + t["RestrictedVersionFault"] = reflect.TypeOf((*RestrictedVersionFault)(nil)).Elem() +} + +type RetrieveAllPermissions RetrieveAllPermissionsRequestType + +func init() { + t["RetrieveAllPermissions"] = reflect.TypeOf((*RetrieveAllPermissions)(nil)).Elem() +} + +type RetrieveAllPermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveAllPermissionsRequestType"] = reflect.TypeOf((*RetrieveAllPermissionsRequestType)(nil)).Elem() +} + +type RetrieveAllPermissionsResponse struct { + Returnval []Permission `xml:"returnval,omitempty"` +} + +type RetrieveAnswerFile RetrieveAnswerFileRequestType + +func init() { + t["RetrieveAnswerFile"] = reflect.TypeOf((*RetrieveAnswerFile)(nil)).Elem() +} + +type RetrieveAnswerFileForProfile RetrieveAnswerFileForProfileRequestType + +func init() { + t["RetrieveAnswerFileForProfile"] = reflect.TypeOf((*RetrieveAnswerFileForProfile)(nil)).Elem() +} + +type RetrieveAnswerFileForProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + ApplyProfile HostApplyProfile `xml:"applyProfile"` +} + +func init() { + t["RetrieveAnswerFileForProfileRequestType"] = reflect.TypeOf((*RetrieveAnswerFileForProfileRequestType)(nil)).Elem() +} + +type RetrieveAnswerFileForProfileResponse struct { + Returnval *AnswerFile `xml:"returnval,omitempty"` +} + +type RetrieveAnswerFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["RetrieveAnswerFileRequestType"] = reflect.TypeOf((*RetrieveAnswerFileRequestType)(nil)).Elem() +} + +type RetrieveAnswerFileResponse struct { + Returnval *AnswerFile `xml:"returnval,omitempty"` +} + +type RetrieveArgumentDescription RetrieveArgumentDescriptionRequestType + +func init() { + t["RetrieveArgumentDescription"] = reflect.TypeOf((*RetrieveArgumentDescription)(nil)).Elem() +} + +type RetrieveArgumentDescriptionRequestType struct { + This ManagedObjectReference `xml:"_this"` + EventTypeId string `xml:"eventTypeId"` +} + +func init() { + t["RetrieveArgumentDescriptionRequestType"] = reflect.TypeOf((*RetrieveArgumentDescriptionRequestType)(nil)).Elem() +} + +type RetrieveArgumentDescriptionResponse struct { + Returnval []EventArgDesc `xml:"returnval,omitempty"` +} + +type RetrieveClientCert RetrieveClientCertRequestType + +func init() { + t["RetrieveClientCert"] = reflect.TypeOf((*RetrieveClientCert)(nil)).Elem() +} + +type RetrieveClientCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster KeyProviderId `xml:"cluster"` +} + +func init() { + t["RetrieveClientCertRequestType"] = reflect.TypeOf((*RetrieveClientCertRequestType)(nil)).Elem() +} + +type RetrieveClientCertResponse struct { + Returnval string `xml:"returnval"` +} + +type RetrieveClientCsr RetrieveClientCsrRequestType + +func init() { + t["RetrieveClientCsr"] = reflect.TypeOf((*RetrieveClientCsr)(nil)).Elem() +} + +type RetrieveClientCsrRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster KeyProviderId `xml:"cluster"` +} + +func init() { + t["RetrieveClientCsrRequestType"] = reflect.TypeOf((*RetrieveClientCsrRequestType)(nil)).Elem() +} + +type RetrieveClientCsrResponse struct { + Returnval string `xml:"returnval"` +} + +type RetrieveDasAdvancedRuntimeInfo RetrieveDasAdvancedRuntimeInfoRequestType + +func init() { + t["RetrieveDasAdvancedRuntimeInfo"] = reflect.TypeOf((*RetrieveDasAdvancedRuntimeInfo)(nil)).Elem() +} + +type RetrieveDasAdvancedRuntimeInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveDasAdvancedRuntimeInfoRequestType"] = reflect.TypeOf((*RetrieveDasAdvancedRuntimeInfoRequestType)(nil)).Elem() +} + +type RetrieveDasAdvancedRuntimeInfoResponse struct { + Returnval BaseClusterDasAdvancedRuntimeInfo `xml:"returnval,omitempty,typeattr"` +} + +type RetrieveDescription RetrieveDescriptionRequestType + +func init() { + t["RetrieveDescription"] = reflect.TypeOf((*RetrieveDescription)(nil)).Elem() +} + +type RetrieveDescriptionRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveDescriptionRequestType"] = reflect.TypeOf((*RetrieveDescriptionRequestType)(nil)).Elem() +} + +type RetrieveDescriptionResponse struct { + Returnval *ProfileDescription `xml:"returnval,omitempty"` +} + +type RetrieveDiskPartitionInfo RetrieveDiskPartitionInfoRequestType + +func init() { + t["RetrieveDiskPartitionInfo"] = reflect.TypeOf((*RetrieveDiskPartitionInfo)(nil)).Elem() +} + +type RetrieveDiskPartitionInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath []string `xml:"devicePath"` +} + +func init() { + t["RetrieveDiskPartitionInfoRequestType"] = reflect.TypeOf((*RetrieveDiskPartitionInfoRequestType)(nil)).Elem() +} + +type RetrieveDiskPartitionInfoResponse struct { + Returnval []HostDiskPartitionInfo `xml:"returnval,omitempty"` +} + +type RetrieveEntityPermissions RetrieveEntityPermissionsRequestType + +func init() { + t["RetrieveEntityPermissions"] = reflect.TypeOf((*RetrieveEntityPermissions)(nil)).Elem() +} + +type RetrieveEntityPermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Inherited bool `xml:"inherited"` +} + +func init() { + t["RetrieveEntityPermissionsRequestType"] = reflect.TypeOf((*RetrieveEntityPermissionsRequestType)(nil)).Elem() +} + +type RetrieveEntityPermissionsResponse struct { + Returnval []Permission `xml:"returnval,omitempty"` +} + +type RetrieveEntityScheduledTask RetrieveEntityScheduledTaskRequestType + +func init() { + t["RetrieveEntityScheduledTask"] = reflect.TypeOf((*RetrieveEntityScheduledTask)(nil)).Elem() +} + +type RetrieveEntityScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["RetrieveEntityScheduledTaskRequestType"] = reflect.TypeOf((*RetrieveEntityScheduledTaskRequestType)(nil)).Elem() +} + +type RetrieveEntityScheduledTaskResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type RetrieveHardwareUptime RetrieveHardwareUptimeRequestType + +func init() { + t["RetrieveHardwareUptime"] = reflect.TypeOf((*RetrieveHardwareUptime)(nil)).Elem() +} + +type RetrieveHardwareUptimeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveHardwareUptimeRequestType"] = reflect.TypeOf((*RetrieveHardwareUptimeRequestType)(nil)).Elem() +} + +type RetrieveHardwareUptimeResponse struct { + Returnval int64 `xml:"returnval"` +} + +type RetrieveHostAccessControlEntries RetrieveHostAccessControlEntriesRequestType + +func init() { + t["RetrieveHostAccessControlEntries"] = reflect.TypeOf((*RetrieveHostAccessControlEntries)(nil)).Elem() +} + +type RetrieveHostAccessControlEntriesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveHostAccessControlEntriesRequestType"] = reflect.TypeOf((*RetrieveHostAccessControlEntriesRequestType)(nil)).Elem() +} + +type RetrieveHostAccessControlEntriesResponse struct { + Returnval []HostAccessControlEntry `xml:"returnval,omitempty"` +} + +type RetrieveHostCustomizations RetrieveHostCustomizationsRequestType + +func init() { + t["RetrieveHostCustomizations"] = reflect.TypeOf((*RetrieveHostCustomizations)(nil)).Elem() +} + +type RetrieveHostCustomizationsForProfile RetrieveHostCustomizationsForProfileRequestType + +func init() { + t["RetrieveHostCustomizationsForProfile"] = reflect.TypeOf((*RetrieveHostCustomizationsForProfile)(nil)).Elem() +} + +type RetrieveHostCustomizationsForProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Hosts []ManagedObjectReference `xml:"hosts,omitempty"` + ApplyProfile HostApplyProfile `xml:"applyProfile"` +} + +func init() { + t["RetrieveHostCustomizationsForProfileRequestType"] = reflect.TypeOf((*RetrieveHostCustomizationsForProfileRequestType)(nil)).Elem() +} + +type RetrieveHostCustomizationsForProfileResponse struct { + Returnval []StructuredCustomizations `xml:"returnval,omitempty"` +} + +type RetrieveHostCustomizationsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Hosts []ManagedObjectReference `xml:"hosts,omitempty"` +} + +func init() { + t["RetrieveHostCustomizationsRequestType"] = reflect.TypeOf((*RetrieveHostCustomizationsRequestType)(nil)).Elem() +} + +type RetrieveHostCustomizationsResponse struct { + Returnval []StructuredCustomizations `xml:"returnval,omitempty"` +} + +type RetrieveHostSpecification RetrieveHostSpecificationRequestType + +func init() { + t["RetrieveHostSpecification"] = reflect.TypeOf((*RetrieveHostSpecification)(nil)).Elem() +} + +type RetrieveHostSpecificationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + FromHost bool `xml:"fromHost"` +} + +func init() { + t["RetrieveHostSpecificationRequestType"] = reflect.TypeOf((*RetrieveHostSpecificationRequestType)(nil)).Elem() +} + +type RetrieveHostSpecificationResponse struct { + Returnval HostSpecification `xml:"returnval"` +} + +type RetrieveKmipServerCert RetrieveKmipServerCertRequestType + +func init() { + t["RetrieveKmipServerCert"] = reflect.TypeOf((*RetrieveKmipServerCert)(nil)).Elem() +} + +type RetrieveKmipServerCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + KeyProvider KeyProviderId `xml:"keyProvider"` + Server KmipServerInfo `xml:"server"` +} + +func init() { + t["RetrieveKmipServerCertRequestType"] = reflect.TypeOf((*RetrieveKmipServerCertRequestType)(nil)).Elem() +} + +type RetrieveKmipServerCertResponse struct { + Returnval CryptoManagerKmipServerCertInfo `xml:"returnval"` +} + +type RetrieveKmipServersStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + Clusters []KmipClusterInfo `xml:"clusters,omitempty"` +} + +func init() { + t["RetrieveKmipServersStatusRequestType"] = reflect.TypeOf((*RetrieveKmipServersStatusRequestType)(nil)).Elem() +} + +type RetrieveKmipServersStatus_Task RetrieveKmipServersStatusRequestType + +func init() { + t["RetrieveKmipServersStatus_Task"] = reflect.TypeOf((*RetrieveKmipServersStatus_Task)(nil)).Elem() +} + +type RetrieveKmipServersStatus_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RetrieveObjectScheduledTask RetrieveObjectScheduledTaskRequestType + +func init() { + t["RetrieveObjectScheduledTask"] = reflect.TypeOf((*RetrieveObjectScheduledTask)(nil)).Elem() +} + +type RetrieveObjectScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Obj *ManagedObjectReference `xml:"obj,omitempty"` +} + +func init() { + t["RetrieveObjectScheduledTaskRequestType"] = reflect.TypeOf((*RetrieveObjectScheduledTaskRequestType)(nil)).Elem() +} + +type RetrieveObjectScheduledTaskResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type RetrieveOptions struct { + DynamicData + + MaxObjects int32 `xml:"maxObjects,omitempty"` +} + +func init() { + t["RetrieveOptions"] = reflect.TypeOf((*RetrieveOptions)(nil)).Elem() +} + +type RetrieveProductComponents RetrieveProductComponentsRequestType + +func init() { + t["RetrieveProductComponents"] = reflect.TypeOf((*RetrieveProductComponents)(nil)).Elem() +} + +type RetrieveProductComponentsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveProductComponentsRequestType"] = reflect.TypeOf((*RetrieveProductComponentsRequestType)(nil)).Elem() +} + +type RetrieveProductComponentsResponse struct { + Returnval []ProductComponentInfo `xml:"returnval,omitempty"` +} + +type RetrieveProperties RetrievePropertiesRequestType + +func init() { + t["RetrieveProperties"] = reflect.TypeOf((*RetrieveProperties)(nil)).Elem() +} + +type RetrievePropertiesEx RetrievePropertiesExRequestType + +func init() { + t["RetrievePropertiesEx"] = reflect.TypeOf((*RetrievePropertiesEx)(nil)).Elem() +} + +type RetrievePropertiesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + SpecSet []PropertyFilterSpec `xml:"specSet"` + Options RetrieveOptions `xml:"options"` +} + +func init() { + t["RetrievePropertiesExRequestType"] = reflect.TypeOf((*RetrievePropertiesExRequestType)(nil)).Elem() +} + +type RetrievePropertiesExResponse struct { + Returnval *RetrieveResult `xml:"returnval,omitempty"` +} + +type RetrievePropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + SpecSet []PropertyFilterSpec `xml:"specSet"` +} + +func init() { + t["RetrievePropertiesRequestType"] = reflect.TypeOf((*RetrievePropertiesRequestType)(nil)).Elem() +} + +type RetrievePropertiesResponse struct { + Returnval []ObjectContent `xml:"returnval,omitempty"` +} + +type RetrieveResult struct { + DynamicData + + Token string `xml:"token,omitempty"` + Objects []ObjectContent `xml:"objects"` +} + +func init() { + t["RetrieveResult"] = reflect.TypeOf((*RetrieveResult)(nil)).Elem() +} + +type RetrieveRolePermissions RetrieveRolePermissionsRequestType + +func init() { + t["RetrieveRolePermissions"] = reflect.TypeOf((*RetrieveRolePermissions)(nil)).Elem() +} + +type RetrieveRolePermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + RoleId int32 `xml:"roleId"` +} + +func init() { + t["RetrieveRolePermissionsRequestType"] = reflect.TypeOf((*RetrieveRolePermissionsRequestType)(nil)).Elem() +} + +type RetrieveRolePermissionsResponse struct { + Returnval []Permission `xml:"returnval,omitempty"` +} + +type RetrieveSelfSignedClientCert RetrieveSelfSignedClientCertRequestType + +func init() { + t["RetrieveSelfSignedClientCert"] = reflect.TypeOf((*RetrieveSelfSignedClientCert)(nil)).Elem() +} + +type RetrieveSelfSignedClientCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster KeyProviderId `xml:"cluster"` +} + +func init() { + t["RetrieveSelfSignedClientCertRequestType"] = reflect.TypeOf((*RetrieveSelfSignedClientCertRequestType)(nil)).Elem() +} + +type RetrieveSelfSignedClientCertResponse struct { + Returnval string `xml:"returnval"` +} + +type RetrieveServiceContent RetrieveServiceContentRequestType + +func init() { + t["RetrieveServiceContent"] = reflect.TypeOf((*RetrieveServiceContent)(nil)).Elem() +} + +type RetrieveServiceContentRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveServiceContentRequestType"] = reflect.TypeOf((*RetrieveServiceContentRequestType)(nil)).Elem() +} + +type RetrieveServiceContentResponse struct { + Returnval ServiceContent `xml:"returnval"` +} + +type RetrieveSnapshotInfo RetrieveSnapshotInfoRequestType + +func init() { + t["RetrieveSnapshotInfo"] = reflect.TypeOf((*RetrieveSnapshotInfo)(nil)).Elem() +} + +type RetrieveSnapshotInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["RetrieveSnapshotInfoRequestType"] = reflect.TypeOf((*RetrieveSnapshotInfoRequestType)(nil)).Elem() +} + +type RetrieveSnapshotInfoResponse struct { + Returnval VStorageObjectSnapshotInfo `xml:"returnval"` +} + +type RetrieveUserGroups RetrieveUserGroupsRequestType + +func init() { + t["RetrieveUserGroups"] = reflect.TypeOf((*RetrieveUserGroups)(nil)).Elem() +} + +type RetrieveUserGroupsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Domain string `xml:"domain,omitempty"` + SearchStr string `xml:"searchStr"` + BelongsToGroup string `xml:"belongsToGroup,omitempty"` + BelongsToUser string `xml:"belongsToUser,omitempty"` + ExactMatch bool `xml:"exactMatch"` + FindUsers bool `xml:"findUsers"` + FindGroups bool `xml:"findGroups"` +} + +func init() { + t["RetrieveUserGroupsRequestType"] = reflect.TypeOf((*RetrieveUserGroupsRequestType)(nil)).Elem() +} + +type RetrieveUserGroupsResponse struct { + Returnval []BaseUserSearchResult `xml:"returnval,omitempty,typeattr"` +} + +type RetrieveVStorageInfrastructureObjectPolicy RetrieveVStorageInfrastructureObjectPolicyRequestType + +func init() { + t["RetrieveVStorageInfrastructureObjectPolicy"] = reflect.TypeOf((*RetrieveVStorageInfrastructureObjectPolicy)(nil)).Elem() +} + +type RetrieveVStorageInfrastructureObjectPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["RetrieveVStorageInfrastructureObjectPolicyRequestType"] = reflect.TypeOf((*RetrieveVStorageInfrastructureObjectPolicyRequestType)(nil)).Elem() +} + +type RetrieveVStorageInfrastructureObjectPolicyResponse struct { + Returnval []VslmInfrastructureObjectPolicy `xml:"returnval,omitempty"` +} + +type RetrieveVStorageObjSpec struct { + DynamicData + + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["RetrieveVStorageObjSpec"] = reflect.TypeOf((*RetrieveVStorageObjSpec)(nil)).Elem() +} + +type RetrieveVStorageObject RetrieveVStorageObjectRequestType + +func init() { + t["RetrieveVStorageObject"] = reflect.TypeOf((*RetrieveVStorageObject)(nil)).Elem() +} + +type RetrieveVStorageObjectAssociations RetrieveVStorageObjectAssociationsRequestType + +func init() { + t["RetrieveVStorageObjectAssociations"] = reflect.TypeOf((*RetrieveVStorageObjectAssociations)(nil)).Elem() +} + +type RetrieveVStorageObjectAssociationsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Ids []RetrieveVStorageObjSpec `xml:"ids,omitempty"` +} + +func init() { + t["RetrieveVStorageObjectAssociationsRequestType"] = reflect.TypeOf((*RetrieveVStorageObjectAssociationsRequestType)(nil)).Elem() +} + +type RetrieveVStorageObjectAssociationsResponse struct { + Returnval []VStorageObjectAssociations `xml:"returnval,omitempty"` +} + +type RetrieveVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["RetrieveVStorageObjectRequestType"] = reflect.TypeOf((*RetrieveVStorageObjectRequestType)(nil)).Elem() +} + +type RetrieveVStorageObjectResponse struct { + Returnval VStorageObject `xml:"returnval"` +} + +type RetrieveVStorageObjectState RetrieveVStorageObjectStateRequestType + +func init() { + t["RetrieveVStorageObjectState"] = reflect.TypeOf((*RetrieveVStorageObjectState)(nil)).Elem() +} + +type RetrieveVStorageObjectStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["RetrieveVStorageObjectStateRequestType"] = reflect.TypeOf((*RetrieveVStorageObjectStateRequestType)(nil)).Elem() +} + +type RetrieveVStorageObjectStateResponse struct { + Returnval VStorageObjectStateInfo `xml:"returnval"` +} + +type RevertToCurrentSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + SuppressPowerOn *bool `xml:"suppressPowerOn"` +} + +func init() { + t["RevertToCurrentSnapshotRequestType"] = reflect.TypeOf((*RevertToCurrentSnapshotRequestType)(nil)).Elem() +} + +type RevertToCurrentSnapshot_Task RevertToCurrentSnapshotRequestType + +func init() { + t["RevertToCurrentSnapshot_Task"] = reflect.TypeOf((*RevertToCurrentSnapshot_Task)(nil)).Elem() +} + +type RevertToCurrentSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RevertToSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + SuppressPowerOn *bool `xml:"suppressPowerOn"` +} + +func init() { + t["RevertToSnapshotRequestType"] = reflect.TypeOf((*RevertToSnapshotRequestType)(nil)).Elem() +} + +type RevertToSnapshot_Task RevertToSnapshotRequestType + +func init() { + t["RevertToSnapshot_Task"] = reflect.TypeOf((*RevertToSnapshot_Task)(nil)).Elem() +} + +type RevertToSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RevertVStorageObjectRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` +} + +func init() { + t["RevertVStorageObjectRequestType"] = reflect.TypeOf((*RevertVStorageObjectRequestType)(nil)).Elem() +} + +type RevertVStorageObject_Task RevertVStorageObjectRequestType + +func init() { + t["RevertVStorageObject_Task"] = reflect.TypeOf((*RevertVStorageObject_Task)(nil)).Elem() +} + +type RevertVStorageObject_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type RewindCollector RewindCollectorRequestType + +func init() { + t["RewindCollector"] = reflect.TypeOf((*RewindCollector)(nil)).Elem() +} + +type RewindCollectorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RewindCollectorRequestType"] = reflect.TypeOf((*RewindCollectorRequestType)(nil)).Elem() +} + +type RewindCollectorResponse struct { +} + +type RoleAddedEvent struct { + RoleEvent + + PrivilegeList []string `xml:"privilegeList,omitempty"` +} + +func init() { + t["RoleAddedEvent"] = reflect.TypeOf((*RoleAddedEvent)(nil)).Elem() +} + +type RoleEvent struct { + AuthorizationEvent + + Role RoleEventArgument `xml:"role"` +} + +func init() { + t["RoleEvent"] = reflect.TypeOf((*RoleEvent)(nil)).Elem() +} + +type RoleEventArgument struct { + EventArgument + + RoleId int32 `xml:"roleId"` + Name string `xml:"name"` +} + +func init() { + t["RoleEventArgument"] = reflect.TypeOf((*RoleEventArgument)(nil)).Elem() +} + +type RoleRemovedEvent struct { + RoleEvent +} + +func init() { + t["RoleRemovedEvent"] = reflect.TypeOf((*RoleRemovedEvent)(nil)).Elem() +} + +type RoleUpdatedEvent struct { + RoleEvent + + PrivilegeList []string `xml:"privilegeList,omitempty"` + PrevRoleName string `xml:"prevRoleName,omitempty"` + PrivilegesAdded []string `xml:"privilegesAdded,omitempty"` + PrivilegesRemoved []string `xml:"privilegesRemoved,omitempty"` +} + +func init() { + t["RoleUpdatedEvent"] = reflect.TypeOf((*RoleUpdatedEvent)(nil)).Elem() +} + +type RollbackEvent struct { + DvsEvent + + HostName string `xml:"hostName"` + MethodName string `xml:"methodName,omitempty"` +} + +func init() { + t["RollbackEvent"] = reflect.TypeOf((*RollbackEvent)(nil)).Elem() +} + +type RollbackFailure struct { + DvsFault + + EntityName string `xml:"entityName"` + EntityType string `xml:"entityType"` +} + +func init() { + t["RollbackFailure"] = reflect.TypeOf((*RollbackFailure)(nil)).Elem() +} + +type RollbackFailureFault RollbackFailure + +func init() { + t["RollbackFailureFault"] = reflect.TypeOf((*RollbackFailureFault)(nil)).Elem() +} + +type RuleViolation struct { + VmConfigFault + + Host *ManagedObjectReference `xml:"host,omitempty"` + Rule BaseClusterRuleInfo `xml:"rule,omitempty,typeattr"` +} + +func init() { + t["RuleViolation"] = reflect.TypeOf((*RuleViolation)(nil)).Elem() +} + +type RuleViolationFault RuleViolation + +func init() { + t["RuleViolationFault"] = reflect.TypeOf((*RuleViolationFault)(nil)).Elem() +} + +type RunScheduledTask RunScheduledTaskRequestType + +func init() { + t["RunScheduledTask"] = reflect.TypeOf((*RunScheduledTask)(nil)).Elem() +} + +type RunScheduledTaskRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RunScheduledTaskRequestType"] = reflect.TypeOf((*RunScheduledTaskRequestType)(nil)).Elem() +} + +type RunScheduledTaskResponse struct { +} + +type RunScriptAction struct { + Action + + Script string `xml:"script"` +} + +func init() { + t["RunScriptAction"] = reflect.TypeOf((*RunScriptAction)(nil)).Elem() +} + +type RunVsanPhysicalDiskDiagnostics RunVsanPhysicalDiskDiagnosticsRequestType + +func init() { + t["RunVsanPhysicalDiskDiagnostics"] = reflect.TypeOf((*RunVsanPhysicalDiskDiagnostics)(nil)).Elem() +} + +type RunVsanPhysicalDiskDiagnosticsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Disks []string `xml:"disks,omitempty"` +} + +func init() { + t["RunVsanPhysicalDiskDiagnosticsRequestType"] = reflect.TypeOf((*RunVsanPhysicalDiskDiagnosticsRequestType)(nil)).Elem() +} + +type RunVsanPhysicalDiskDiagnosticsResponse struct { + Returnval []HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult `xml:"returnval"` +} + +type RuntimeFault struct { + MethodFault +} + +func init() { + t["RuntimeFault"] = reflect.TypeOf((*RuntimeFault)(nil)).Elem() +} + +type RuntimeFaultFault BaseRuntimeFault + +func init() { + t["RuntimeFaultFault"] = reflect.TypeOf((*RuntimeFaultFault)(nil)).Elem() +} + +type SAMLTokenAuthentication struct { + GuestAuthentication + + Token string `xml:"token"` + Username string `xml:"username,omitempty"` +} + +func init() { + t["SAMLTokenAuthentication"] = reflect.TypeOf((*SAMLTokenAuthentication)(nil)).Elem() +} + +type SDDCBase struct { + DynamicData +} + +func init() { + t["SDDCBase"] = reflect.TypeOf((*SDDCBase)(nil)).Elem() +} + +type SSLDisabledFault struct { + HostConnectFault +} + +func init() { + t["SSLDisabledFault"] = reflect.TypeOf((*SSLDisabledFault)(nil)).Elem() +} + +type SSLDisabledFaultFault SSLDisabledFault + +func init() { + t["SSLDisabledFaultFault"] = reflect.TypeOf((*SSLDisabledFaultFault)(nil)).Elem() +} + +type SSLVerifyFault struct { + HostConnectFault + + SelfSigned bool `xml:"selfSigned"` + Thumbprint string `xml:"thumbprint"` +} + +func init() { + t["SSLVerifyFault"] = reflect.TypeOf((*SSLVerifyFault)(nil)).Elem() +} + +type SSLVerifyFaultFault SSLVerifyFault + +func init() { + t["SSLVerifyFaultFault"] = reflect.TypeOf((*SSLVerifyFaultFault)(nil)).Elem() +} + +type SSPIAuthentication struct { + GuestAuthentication + + SspiToken string `xml:"sspiToken"` +} + +func init() { + t["SSPIAuthentication"] = reflect.TypeOf((*SSPIAuthentication)(nil)).Elem() +} + +type SSPIChallenge struct { + VimFault + + Base64Token string `xml:"base64Token"` +} + +func init() { + t["SSPIChallenge"] = reflect.TypeOf((*SSPIChallenge)(nil)).Elem() +} + +type SSPIChallengeFault SSPIChallenge + +func init() { + t["SSPIChallengeFault"] = reflect.TypeOf((*SSPIChallengeFault)(nil)).Elem() +} + +type ScanHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + Repository HostPatchManagerLocator `xml:"repository"` + UpdateID []string `xml:"updateID,omitempty"` +} + +func init() { + t["ScanHostPatchRequestType"] = reflect.TypeOf((*ScanHostPatchRequestType)(nil)).Elem() +} + +type ScanHostPatchV2RequestType struct { + This ManagedObjectReference `xml:"_this"` + MetaUrls []string `xml:"metaUrls,omitempty"` + BundleUrls []string `xml:"bundleUrls,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["ScanHostPatchV2RequestType"] = reflect.TypeOf((*ScanHostPatchV2RequestType)(nil)).Elem() +} + +type ScanHostPatchV2_Task ScanHostPatchV2RequestType + +func init() { + t["ScanHostPatchV2_Task"] = reflect.TypeOf((*ScanHostPatchV2_Task)(nil)).Elem() +} + +type ScanHostPatchV2_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ScanHostPatch_Task ScanHostPatchRequestType + +func init() { + t["ScanHostPatch_Task"] = reflect.TypeOf((*ScanHostPatch_Task)(nil)).Elem() +} + +type ScanHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ScheduleReconcileDatastoreInventory ScheduleReconcileDatastoreInventoryRequestType + +func init() { + t["ScheduleReconcileDatastoreInventory"] = reflect.TypeOf((*ScheduleReconcileDatastoreInventory)(nil)).Elem() +} + +type ScheduleReconcileDatastoreInventoryRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["ScheduleReconcileDatastoreInventoryRequestType"] = reflect.TypeOf((*ScheduleReconcileDatastoreInventoryRequestType)(nil)).Elem() +} + +type ScheduleReconcileDatastoreInventoryResponse struct { +} + +type ScheduledHardwareUpgradeInfo struct { + DynamicData + + UpgradePolicy string `xml:"upgradePolicy,omitempty"` + VersionKey string `xml:"versionKey,omitempty"` + ScheduledHardwareUpgradeStatus string `xml:"scheduledHardwareUpgradeStatus,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["ScheduledHardwareUpgradeInfo"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfo)(nil)).Elem() +} + +type ScheduledTaskCompletedEvent struct { + ScheduledTaskEvent +} + +func init() { + t["ScheduledTaskCompletedEvent"] = reflect.TypeOf((*ScheduledTaskCompletedEvent)(nil)).Elem() +} + +type ScheduledTaskCreatedEvent struct { + ScheduledTaskEvent +} + +func init() { + t["ScheduledTaskCreatedEvent"] = reflect.TypeOf((*ScheduledTaskCreatedEvent)(nil)).Elem() +} + +type ScheduledTaskDescription struct { + DynamicData + + Action []BaseTypeDescription `xml:"action,typeattr"` + SchedulerInfo []ScheduledTaskDetail `xml:"schedulerInfo"` + State []BaseElementDescription `xml:"state,typeattr"` + DayOfWeek []BaseElementDescription `xml:"dayOfWeek,typeattr"` + WeekOfMonth []BaseElementDescription `xml:"weekOfMonth,typeattr"` +} + +func init() { + t["ScheduledTaskDescription"] = reflect.TypeOf((*ScheduledTaskDescription)(nil)).Elem() +} + +type ScheduledTaskDetail struct { + TypeDescription + + Frequency string `xml:"frequency"` +} + +func init() { + t["ScheduledTaskDetail"] = reflect.TypeOf((*ScheduledTaskDetail)(nil)).Elem() +} + +type ScheduledTaskEmailCompletedEvent struct { + ScheduledTaskEvent + + To string `xml:"to"` +} + +func init() { + t["ScheduledTaskEmailCompletedEvent"] = reflect.TypeOf((*ScheduledTaskEmailCompletedEvent)(nil)).Elem() +} + +type ScheduledTaskEmailFailedEvent struct { + ScheduledTaskEvent + + To string `xml:"to"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["ScheduledTaskEmailFailedEvent"] = reflect.TypeOf((*ScheduledTaskEmailFailedEvent)(nil)).Elem() +} + +type ScheduledTaskEvent struct { + Event + + ScheduledTask ScheduledTaskEventArgument `xml:"scheduledTask"` + Entity ManagedEntityEventArgument `xml:"entity"` +} + +func init() { + t["ScheduledTaskEvent"] = reflect.TypeOf((*ScheduledTaskEvent)(nil)).Elem() +} + +type ScheduledTaskEventArgument struct { + EntityEventArgument + + ScheduledTask ManagedObjectReference `xml:"scheduledTask"` +} + +func init() { + t["ScheduledTaskEventArgument"] = reflect.TypeOf((*ScheduledTaskEventArgument)(nil)).Elem() +} + +type ScheduledTaskFailedEvent struct { + ScheduledTaskEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["ScheduledTaskFailedEvent"] = reflect.TypeOf((*ScheduledTaskFailedEvent)(nil)).Elem() +} + +type ScheduledTaskInfo struct { + ScheduledTaskSpec + + ScheduledTask ManagedObjectReference `xml:"scheduledTask"` + Entity ManagedObjectReference `xml:"entity"` + LastModifiedTime time.Time `xml:"lastModifiedTime"` + LastModifiedUser string `xml:"lastModifiedUser"` + NextRunTime *time.Time `xml:"nextRunTime"` + PrevRunTime *time.Time `xml:"prevRunTime"` + State TaskInfoState `xml:"state"` + Error *LocalizedMethodFault `xml:"error,omitempty"` + Result AnyType `xml:"result,omitempty,typeattr"` + Progress int32 `xml:"progress,omitempty"` + ActiveTask *ManagedObjectReference `xml:"activeTask,omitempty"` + TaskObject *ManagedObjectReference `xml:"taskObject,omitempty"` +} + +func init() { + t["ScheduledTaskInfo"] = reflect.TypeOf((*ScheduledTaskInfo)(nil)).Elem() +} + +type ScheduledTaskReconfiguredEvent struct { + ScheduledTaskEvent + + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty"` +} + +func init() { + t["ScheduledTaskReconfiguredEvent"] = reflect.TypeOf((*ScheduledTaskReconfiguredEvent)(nil)).Elem() +} + +type ScheduledTaskRemovedEvent struct { + ScheduledTaskEvent +} + +func init() { + t["ScheduledTaskRemovedEvent"] = reflect.TypeOf((*ScheduledTaskRemovedEvent)(nil)).Elem() +} + +type ScheduledTaskSpec struct { + DynamicData + + Name string `xml:"name"` + Description string `xml:"description"` + Enabled bool `xml:"enabled"` + Scheduler BaseTaskScheduler `xml:"scheduler,typeattr"` + Action BaseAction `xml:"action,typeattr"` + Notification string `xml:"notification,omitempty"` +} + +func init() { + t["ScheduledTaskSpec"] = reflect.TypeOf((*ScheduledTaskSpec)(nil)).Elem() +} + +type ScheduledTaskStartedEvent struct { + ScheduledTaskEvent +} + +func init() { + t["ScheduledTaskStartedEvent"] = reflect.TypeOf((*ScheduledTaskStartedEvent)(nil)).Elem() +} + +type ScsiLun struct { + HostDevice + + Key string `xml:"key,omitempty"` + Uuid string `xml:"uuid"` + Descriptor []ScsiLunDescriptor `xml:"descriptor,omitempty"` + CanonicalName string `xml:"canonicalName,omitempty"` + DisplayName string `xml:"displayName,omitempty"` + LunType string `xml:"lunType"` + Vendor string `xml:"vendor,omitempty"` + Model string `xml:"model,omitempty"` + Revision string `xml:"revision,omitempty"` + ScsiLevel int32 `xml:"scsiLevel,omitempty"` + SerialNumber string `xml:"serialNumber,omitempty"` + DurableName *ScsiLunDurableName `xml:"durableName,omitempty"` + AlternateName []ScsiLunDurableName `xml:"alternateName,omitempty"` + StandardInquiry []byte `xml:"standardInquiry,omitempty"` + QueueDepth int32 `xml:"queueDepth,omitempty"` + OperationalState []string `xml:"operationalState"` + Capabilities *ScsiLunCapabilities `xml:"capabilities,omitempty"` + VStorageSupport string `xml:"vStorageSupport,omitempty"` + ProtocolEndpoint *bool `xml:"protocolEndpoint"` +} + +func init() { + t["ScsiLun"] = reflect.TypeOf((*ScsiLun)(nil)).Elem() +} + +type ScsiLunCapabilities struct { + DynamicData + + UpdateDisplayNameSupported bool `xml:"updateDisplayNameSupported"` +} + +func init() { + t["ScsiLunCapabilities"] = reflect.TypeOf((*ScsiLunCapabilities)(nil)).Elem() +} + +type ScsiLunDescriptor struct { + DynamicData + + Quality string `xml:"quality"` + Id string `xml:"id"` +} + +func init() { + t["ScsiLunDescriptor"] = reflect.TypeOf((*ScsiLunDescriptor)(nil)).Elem() +} + +type ScsiLunDurableName struct { + DynamicData + + Namespace string `xml:"namespace"` + NamespaceId byte `xml:"namespaceId"` + Data []byte `xml:"data,omitempty"` +} + +func init() { + t["ScsiLunDurableName"] = reflect.TypeOf((*ScsiLunDurableName)(nil)).Elem() +} + +type SeSparseVirtualDiskSpec struct { + FileBackedVirtualDiskSpec + + GrainSizeKb int32 `xml:"grainSizeKb,omitempty"` +} + +func init() { + t["SeSparseVirtualDiskSpec"] = reflect.TypeOf((*SeSparseVirtualDiskSpec)(nil)).Elem() +} + +type SearchDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + DatastorePath string `xml:"datastorePath"` + SearchSpec *HostDatastoreBrowserSearchSpec `xml:"searchSpec,omitempty"` +} + +func init() { + t["SearchDatastoreRequestType"] = reflect.TypeOf((*SearchDatastoreRequestType)(nil)).Elem() +} + +type SearchDatastoreSubFoldersRequestType struct { + This ManagedObjectReference `xml:"_this"` + DatastorePath string `xml:"datastorePath"` + SearchSpec *HostDatastoreBrowserSearchSpec `xml:"searchSpec,omitempty"` +} + +func init() { + t["SearchDatastoreSubFoldersRequestType"] = reflect.TypeOf((*SearchDatastoreSubFoldersRequestType)(nil)).Elem() +} + +type SearchDatastoreSubFolders_Task SearchDatastoreSubFoldersRequestType + +func init() { + t["SearchDatastoreSubFolders_Task"] = reflect.TypeOf((*SearchDatastoreSubFolders_Task)(nil)).Elem() +} + +type SearchDatastoreSubFolders_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SearchDatastore_Task SearchDatastoreRequestType + +func init() { + t["SearchDatastore_Task"] = reflect.TypeOf((*SearchDatastore_Task)(nil)).Elem() +} + +type SearchDatastore_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SecondaryVmAlreadyDisabled struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid"` +} + +func init() { + t["SecondaryVmAlreadyDisabled"] = reflect.TypeOf((*SecondaryVmAlreadyDisabled)(nil)).Elem() +} + +type SecondaryVmAlreadyDisabledFault SecondaryVmAlreadyDisabled + +func init() { + t["SecondaryVmAlreadyDisabledFault"] = reflect.TypeOf((*SecondaryVmAlreadyDisabledFault)(nil)).Elem() +} + +type SecondaryVmAlreadyEnabled struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid"` +} + +func init() { + t["SecondaryVmAlreadyEnabled"] = reflect.TypeOf((*SecondaryVmAlreadyEnabled)(nil)).Elem() +} + +type SecondaryVmAlreadyEnabledFault SecondaryVmAlreadyEnabled + +func init() { + t["SecondaryVmAlreadyEnabledFault"] = reflect.TypeOf((*SecondaryVmAlreadyEnabledFault)(nil)).Elem() +} + +type SecondaryVmAlreadyRegistered struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid,omitempty"` +} + +func init() { + t["SecondaryVmAlreadyRegistered"] = reflect.TypeOf((*SecondaryVmAlreadyRegistered)(nil)).Elem() +} + +type SecondaryVmAlreadyRegisteredFault SecondaryVmAlreadyRegistered + +func init() { + t["SecondaryVmAlreadyRegisteredFault"] = reflect.TypeOf((*SecondaryVmAlreadyRegisteredFault)(nil)).Elem() +} + +type SecondaryVmNotRegistered struct { + VmFaultToleranceIssue + + InstanceUuid string `xml:"instanceUuid,omitempty"` +} + +func init() { + t["SecondaryVmNotRegistered"] = reflect.TypeOf((*SecondaryVmNotRegistered)(nil)).Elem() +} + +type SecondaryVmNotRegisteredFault SecondaryVmNotRegistered + +func init() { + t["SecondaryVmNotRegisteredFault"] = reflect.TypeOf((*SecondaryVmNotRegisteredFault)(nil)).Elem() +} + +type SecurityError struct { + RuntimeFault +} + +func init() { + t["SecurityError"] = reflect.TypeOf((*SecurityError)(nil)).Elem() +} + +type SecurityErrorFault BaseSecurityError + +func init() { + t["SecurityErrorFault"] = reflect.TypeOf((*SecurityErrorFault)(nil)).Elem() +} + +type SecurityProfile struct { + ApplyProfile + + Permission []PermissionProfile `xml:"permission,omitempty"` +} + +func init() { + t["SecurityProfile"] = reflect.TypeOf((*SecurityProfile)(nil)).Elem() +} + +type SelectActivePartition SelectActivePartitionRequestType + +func init() { + t["SelectActivePartition"] = reflect.TypeOf((*SelectActivePartition)(nil)).Elem() +} + +type SelectActivePartitionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Partition *HostScsiDiskPartition `xml:"partition,omitempty"` +} + +func init() { + t["SelectActivePartitionRequestType"] = reflect.TypeOf((*SelectActivePartitionRequestType)(nil)).Elem() +} + +type SelectActivePartitionResponse struct { +} + +type SelectVnic SelectVnicRequestType + +func init() { + t["SelectVnic"] = reflect.TypeOf((*SelectVnic)(nil)).Elem() +} + +type SelectVnicForNicType SelectVnicForNicTypeRequestType + +func init() { + t["SelectVnicForNicType"] = reflect.TypeOf((*SelectVnicForNicType)(nil)).Elem() +} + +type SelectVnicForNicTypeRequestType struct { + This ManagedObjectReference `xml:"_this"` + NicType string `xml:"nicType"` + Device string `xml:"device"` +} + +func init() { + t["SelectVnicForNicTypeRequestType"] = reflect.TypeOf((*SelectVnicForNicTypeRequestType)(nil)).Elem() +} + +type SelectVnicForNicTypeResponse struct { +} + +type SelectVnicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` +} + +func init() { + t["SelectVnicRequestType"] = reflect.TypeOf((*SelectVnicRequestType)(nil)).Elem() +} + +type SelectVnicResponse struct { +} + +type SelectionSet struct { + DynamicData +} + +func init() { + t["SelectionSet"] = reflect.TypeOf((*SelectionSet)(nil)).Elem() +} + +type SelectionSpec struct { + DynamicData + + Name string `xml:"name,omitempty"` +} + +func init() { + t["SelectionSpec"] = reflect.TypeOf((*SelectionSpec)(nil)).Elem() +} + +type SendEmailAction struct { + Action + + ToList string `xml:"toList"` + CcList string `xml:"ccList"` + Subject string `xml:"subject"` + Body string `xml:"body"` +} + +func init() { + t["SendEmailAction"] = reflect.TypeOf((*SendEmailAction)(nil)).Elem() +} + +type SendNMI SendNMIRequestType + +func init() { + t["SendNMI"] = reflect.TypeOf((*SendNMI)(nil)).Elem() +} + +type SendNMIRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["SendNMIRequestType"] = reflect.TypeOf((*SendNMIRequestType)(nil)).Elem() +} + +type SendNMIResponse struct { +} + +type SendSNMPAction struct { + Action +} + +func init() { + t["SendSNMPAction"] = reflect.TypeOf((*SendSNMPAction)(nil)).Elem() +} + +type SendTestNotification SendTestNotificationRequestType + +func init() { + t["SendTestNotification"] = reflect.TypeOf((*SendTestNotification)(nil)).Elem() +} + +type SendTestNotificationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["SendTestNotificationRequestType"] = reflect.TypeOf((*SendTestNotificationRequestType)(nil)).Elem() +} + +type SendTestNotificationResponse struct { +} + +type ServerLicenseExpiredEvent struct { + LicenseEvent + + Product string `xml:"product"` +} + +func init() { + t["ServerLicenseExpiredEvent"] = reflect.TypeOf((*ServerLicenseExpiredEvent)(nil)).Elem() +} + +type ServerStartedSessionEvent struct { + SessionEvent +} + +func init() { + t["ServerStartedSessionEvent"] = reflect.TypeOf((*ServerStartedSessionEvent)(nil)).Elem() +} + +type ServiceConsolePortGroupProfile struct { + PortGroupProfile + + IpConfig IpAddressProfile `xml:"ipConfig"` +} + +func init() { + t["ServiceConsolePortGroupProfile"] = reflect.TypeOf((*ServiceConsolePortGroupProfile)(nil)).Elem() +} + +type ServiceConsoleReservationInfo struct { + DynamicData + + ServiceConsoleReservedCfg int64 `xml:"serviceConsoleReservedCfg"` + ServiceConsoleReserved int64 `xml:"serviceConsoleReserved"` + Unreserved int64 `xml:"unreserved"` +} + +func init() { + t["ServiceConsoleReservationInfo"] = reflect.TypeOf((*ServiceConsoleReservationInfo)(nil)).Elem() +} + +type ServiceContent struct { + DynamicData + + RootFolder ManagedObjectReference `xml:"rootFolder"` + PropertyCollector ManagedObjectReference `xml:"propertyCollector"` + ViewManager *ManagedObjectReference `xml:"viewManager,omitempty"` + About AboutInfo `xml:"about"` + Setting *ManagedObjectReference `xml:"setting,omitempty"` + UserDirectory *ManagedObjectReference `xml:"userDirectory,omitempty"` + SessionManager *ManagedObjectReference `xml:"sessionManager,omitempty"` + AuthorizationManager *ManagedObjectReference `xml:"authorizationManager,omitempty"` + ServiceManager *ManagedObjectReference `xml:"serviceManager,omitempty"` + PerfManager *ManagedObjectReference `xml:"perfManager,omitempty"` + ScheduledTaskManager *ManagedObjectReference `xml:"scheduledTaskManager,omitempty"` + AlarmManager *ManagedObjectReference `xml:"alarmManager,omitempty"` + EventManager *ManagedObjectReference `xml:"eventManager,omitempty"` + TaskManager *ManagedObjectReference `xml:"taskManager,omitempty"` + ExtensionManager *ManagedObjectReference `xml:"extensionManager,omitempty"` + CustomizationSpecManager *ManagedObjectReference `xml:"customizationSpecManager,omitempty"` + CustomFieldsManager *ManagedObjectReference `xml:"customFieldsManager,omitempty"` + AccountManager *ManagedObjectReference `xml:"accountManager,omitempty"` + DiagnosticManager *ManagedObjectReference `xml:"diagnosticManager,omitempty"` + LicenseManager *ManagedObjectReference `xml:"licenseManager,omitempty"` + SearchIndex *ManagedObjectReference `xml:"searchIndex,omitempty"` + FileManager *ManagedObjectReference `xml:"fileManager,omitempty"` + DatastoreNamespaceManager *ManagedObjectReference `xml:"datastoreNamespaceManager,omitempty"` + VirtualDiskManager *ManagedObjectReference `xml:"virtualDiskManager,omitempty"` + VirtualizationManager *ManagedObjectReference `xml:"virtualizationManager,omitempty"` + SnmpSystem *ManagedObjectReference `xml:"snmpSystem,omitempty"` + VmProvisioningChecker *ManagedObjectReference `xml:"vmProvisioningChecker,omitempty"` + VmCompatibilityChecker *ManagedObjectReference `xml:"vmCompatibilityChecker,omitempty"` + OvfManager *ManagedObjectReference `xml:"ovfManager,omitempty"` + IpPoolManager *ManagedObjectReference `xml:"ipPoolManager,omitempty"` + DvSwitchManager *ManagedObjectReference `xml:"dvSwitchManager,omitempty"` + HostProfileManager *ManagedObjectReference `xml:"hostProfileManager,omitempty"` + ClusterProfileManager *ManagedObjectReference `xml:"clusterProfileManager,omitempty"` + ComplianceManager *ManagedObjectReference `xml:"complianceManager,omitempty"` + LocalizationManager *ManagedObjectReference `xml:"localizationManager,omitempty"` + StorageResourceManager *ManagedObjectReference `xml:"storageResourceManager,omitempty"` + GuestOperationsManager *ManagedObjectReference `xml:"guestOperationsManager,omitempty"` + OverheadMemoryManager *ManagedObjectReference `xml:"overheadMemoryManager,omitempty"` + CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty"` + IoFilterManager *ManagedObjectReference `xml:"ioFilterManager,omitempty"` + VStorageObjectManager *ManagedObjectReference `xml:"vStorageObjectManager,omitempty"` + HostSpecManager *ManagedObjectReference `xml:"hostSpecManager,omitempty"` + CryptoManager *ManagedObjectReference `xml:"cryptoManager,omitempty"` + HealthUpdateManager *ManagedObjectReference `xml:"healthUpdateManager,omitempty"` + FailoverClusterConfigurator *ManagedObjectReference `xml:"failoverClusterConfigurator,omitempty"` + FailoverClusterManager *ManagedObjectReference `xml:"failoverClusterManager,omitempty"` +} + +func init() { + t["ServiceContent"] = reflect.TypeOf((*ServiceContent)(nil)).Elem() +} + +type ServiceLocator struct { + DynamicData + + InstanceUuid string `xml:"instanceUuid"` + Url string `xml:"url"` + Credential BaseServiceLocatorCredential `xml:"credential,typeattr"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["ServiceLocator"] = reflect.TypeOf((*ServiceLocator)(nil)).Elem() +} + +type ServiceLocatorCredential struct { + DynamicData +} + +func init() { + t["ServiceLocatorCredential"] = reflect.TypeOf((*ServiceLocatorCredential)(nil)).Elem() +} + +type ServiceLocatorNamePassword struct { + ServiceLocatorCredential + + Username string `xml:"username"` + Password string `xml:"password"` +} + +func init() { + t["ServiceLocatorNamePassword"] = reflect.TypeOf((*ServiceLocatorNamePassword)(nil)).Elem() +} + +type ServiceLocatorSAMLCredential struct { + ServiceLocatorCredential + + Token string `xml:"token,omitempty"` +} + +func init() { + t["ServiceLocatorSAMLCredential"] = reflect.TypeOf((*ServiceLocatorSAMLCredential)(nil)).Elem() +} + +type ServiceManagerServiceInfo struct { + DynamicData + + ServiceName string `xml:"serviceName"` + Location []string `xml:"location,omitempty"` + Service ManagedObjectReference `xml:"service"` + Description string `xml:"description"` +} + +func init() { + t["ServiceManagerServiceInfo"] = reflect.TypeOf((*ServiceManagerServiceInfo)(nil)).Elem() +} + +type ServiceProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["ServiceProfile"] = reflect.TypeOf((*ServiceProfile)(nil)).Elem() +} + +type SessionEvent struct { + Event +} + +func init() { + t["SessionEvent"] = reflect.TypeOf((*SessionEvent)(nil)).Elem() +} + +type SessionIsActive SessionIsActiveRequestType + +func init() { + t["SessionIsActive"] = reflect.TypeOf((*SessionIsActive)(nil)).Elem() +} + +type SessionIsActiveRequestType struct { + This ManagedObjectReference `xml:"_this"` + SessionID string `xml:"sessionID"` + UserName string `xml:"userName"` +} + +func init() { + t["SessionIsActiveRequestType"] = reflect.TypeOf((*SessionIsActiveRequestType)(nil)).Elem() +} + +type SessionIsActiveResponse struct { + Returnval bool `xml:"returnval"` +} + +type SessionManagerGenericServiceTicket struct { + DynamicData + + Id string `xml:"id"` + HostName string `xml:"hostName,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["SessionManagerGenericServiceTicket"] = reflect.TypeOf((*SessionManagerGenericServiceTicket)(nil)).Elem() +} + +type SessionManagerHttpServiceRequestSpec struct { + SessionManagerServiceRequestSpec + + Method string `xml:"method,omitempty"` + Url string `xml:"url"` +} + +func init() { + t["SessionManagerHttpServiceRequestSpec"] = reflect.TypeOf((*SessionManagerHttpServiceRequestSpec)(nil)).Elem() +} + +type SessionManagerLocalTicket struct { + DynamicData + + UserName string `xml:"userName"` + PasswordFilePath string `xml:"passwordFilePath"` +} + +func init() { + t["SessionManagerLocalTicket"] = reflect.TypeOf((*SessionManagerLocalTicket)(nil)).Elem() +} + +type SessionManagerServiceRequestSpec struct { + DynamicData +} + +func init() { + t["SessionManagerServiceRequestSpec"] = reflect.TypeOf((*SessionManagerServiceRequestSpec)(nil)).Elem() +} + +type SessionManagerVmomiServiceRequestSpec struct { + SessionManagerServiceRequestSpec + + Method string `xml:"method"` +} + +func init() { + t["SessionManagerVmomiServiceRequestSpec"] = reflect.TypeOf((*SessionManagerVmomiServiceRequestSpec)(nil)).Elem() +} + +type SessionTerminatedEvent struct { + SessionEvent + + SessionId string `xml:"sessionId"` + TerminatedUsername string `xml:"terminatedUsername"` +} + +func init() { + t["SessionTerminatedEvent"] = reflect.TypeOf((*SessionTerminatedEvent)(nil)).Elem() +} + +type SetCollectorPageSize SetCollectorPageSizeRequestType + +func init() { + t["SetCollectorPageSize"] = reflect.TypeOf((*SetCollectorPageSize)(nil)).Elem() +} + +type SetCollectorPageSizeRequestType struct { + This ManagedObjectReference `xml:"_this"` + MaxCount int32 `xml:"maxCount"` +} + +func init() { + t["SetCollectorPageSizeRequestType"] = reflect.TypeOf((*SetCollectorPageSizeRequestType)(nil)).Elem() +} + +type SetCollectorPageSizeResponse struct { +} + +type SetDisplayTopology SetDisplayTopologyRequestType + +func init() { + t["SetDisplayTopology"] = reflect.TypeOf((*SetDisplayTopology)(nil)).Elem() +} + +type SetDisplayTopologyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Displays []VirtualMachineDisplayTopology `xml:"displays"` +} + +func init() { + t["SetDisplayTopologyRequestType"] = reflect.TypeOf((*SetDisplayTopologyRequestType)(nil)).Elem() +} + +type SetDisplayTopologyResponse struct { +} + +type SetEntityPermissions SetEntityPermissionsRequestType + +func init() { + t["SetEntityPermissions"] = reflect.TypeOf((*SetEntityPermissions)(nil)).Elem() +} + +type SetEntityPermissionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Permission []Permission `xml:"permission,omitempty"` +} + +func init() { + t["SetEntityPermissionsRequestType"] = reflect.TypeOf((*SetEntityPermissionsRequestType)(nil)).Elem() +} + +type SetEntityPermissionsResponse struct { +} + +type SetExtensionCertificate SetExtensionCertificateRequestType + +func init() { + t["SetExtensionCertificate"] = reflect.TypeOf((*SetExtensionCertificate)(nil)).Elem() +} + +type SetExtensionCertificateRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` + CertificatePem string `xml:"certificatePem,omitempty"` +} + +func init() { + t["SetExtensionCertificateRequestType"] = reflect.TypeOf((*SetExtensionCertificateRequestType)(nil)).Elem() +} + +type SetExtensionCertificateResponse struct { +} + +type SetField SetFieldRequestType + +func init() { + t["SetField"] = reflect.TypeOf((*SetField)(nil)).Elem() +} + +type SetFieldRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity ManagedObjectReference `xml:"entity"` + Key int32 `xml:"key"` + Value string `xml:"value"` +} + +func init() { + t["SetFieldRequestType"] = reflect.TypeOf((*SetFieldRequestType)(nil)).Elem() +} + +type SetFieldResponse struct { +} + +type SetLicenseEdition SetLicenseEditionRequestType + +func init() { + t["SetLicenseEdition"] = reflect.TypeOf((*SetLicenseEdition)(nil)).Elem() +} + +type SetLicenseEditionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` + FeatureKey string `xml:"featureKey,omitempty"` +} + +func init() { + t["SetLicenseEditionRequestType"] = reflect.TypeOf((*SetLicenseEditionRequestType)(nil)).Elem() +} + +type SetLicenseEditionResponse struct { +} + +type SetLocale SetLocaleRequestType + +func init() { + t["SetLocale"] = reflect.TypeOf((*SetLocale)(nil)).Elem() +} + +type SetLocaleRequestType struct { + This ManagedObjectReference `xml:"_this"` + Locale string `xml:"locale"` +} + +func init() { + t["SetLocaleRequestType"] = reflect.TypeOf((*SetLocaleRequestType)(nil)).Elem() +} + +type SetLocaleResponse struct { +} + +type SetMultipathLunPolicy SetMultipathLunPolicyRequestType + +func init() { + t["SetMultipathLunPolicy"] = reflect.TypeOf((*SetMultipathLunPolicy)(nil)).Elem() +} + +type SetMultipathLunPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunId string `xml:"lunId"` + Policy BaseHostMultipathInfoLogicalUnitPolicy `xml:"policy,typeattr"` +} + +func init() { + t["SetMultipathLunPolicyRequestType"] = reflect.TypeOf((*SetMultipathLunPolicyRequestType)(nil)).Elem() +} + +type SetMultipathLunPolicyResponse struct { +} + +type SetNFSUser SetNFSUserRequestType + +func init() { + t["SetNFSUser"] = reflect.TypeOf((*SetNFSUser)(nil)).Elem() +} + +type SetNFSUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + User string `xml:"user"` + Password string `xml:"password"` +} + +func init() { + t["SetNFSUserRequestType"] = reflect.TypeOf((*SetNFSUserRequestType)(nil)).Elem() +} + +type SetNFSUserResponse struct { +} + +type SetPublicKey SetPublicKeyRequestType + +func init() { + t["SetPublicKey"] = reflect.TypeOf((*SetPublicKey)(nil)).Elem() +} + +type SetPublicKeyRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` + PublicKey string `xml:"publicKey"` +} + +func init() { + t["SetPublicKeyRequestType"] = reflect.TypeOf((*SetPublicKeyRequestType)(nil)).Elem() +} + +type SetPublicKeyResponse struct { +} + +type SetRegistryValueInGuest SetRegistryValueInGuestRequestType + +func init() { + t["SetRegistryValueInGuest"] = reflect.TypeOf((*SetRegistryValueInGuest)(nil)).Elem() +} + +type SetRegistryValueInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Value GuestRegValueSpec `xml:"value"` +} + +func init() { + t["SetRegistryValueInGuestRequestType"] = reflect.TypeOf((*SetRegistryValueInGuestRequestType)(nil)).Elem() +} + +type SetRegistryValueInGuestResponse struct { +} + +type SetScreenResolution SetScreenResolutionRequestType + +func init() { + t["SetScreenResolution"] = reflect.TypeOf((*SetScreenResolution)(nil)).Elem() +} + +type SetScreenResolutionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Width int32 `xml:"width"` + Height int32 `xml:"height"` +} + +func init() { + t["SetScreenResolutionRequestType"] = reflect.TypeOf((*SetScreenResolutionRequestType)(nil)).Elem() +} + +type SetScreenResolutionResponse struct { +} + +type SetTaskDescription SetTaskDescriptionRequestType + +func init() { + t["SetTaskDescription"] = reflect.TypeOf((*SetTaskDescription)(nil)).Elem() +} + +type SetTaskDescriptionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Description LocalizableMessage `xml:"description"` +} + +func init() { + t["SetTaskDescriptionRequestType"] = reflect.TypeOf((*SetTaskDescriptionRequestType)(nil)).Elem() +} + +type SetTaskDescriptionResponse struct { +} + +type SetTaskState SetTaskStateRequestType + +func init() { + t["SetTaskState"] = reflect.TypeOf((*SetTaskState)(nil)).Elem() +} + +type SetTaskStateRequestType struct { + This ManagedObjectReference `xml:"_this"` + State TaskInfoState `xml:"state"` + Result AnyType `xml:"result,omitempty,typeattr"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["SetTaskStateRequestType"] = reflect.TypeOf((*SetTaskStateRequestType)(nil)).Elem() +} + +type SetTaskStateResponse struct { +} + +type SetVStorageObjectControlFlags SetVStorageObjectControlFlagsRequestType + +func init() { + t["SetVStorageObjectControlFlags"] = reflect.TypeOf((*SetVStorageObjectControlFlags)(nil)).Elem() +} + +type SetVStorageObjectControlFlagsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + ControlFlags []string `xml:"controlFlags,omitempty"` +} + +func init() { + t["SetVStorageObjectControlFlagsRequestType"] = reflect.TypeOf((*SetVStorageObjectControlFlagsRequestType)(nil)).Elem() +} + +type SetVStorageObjectControlFlagsResponse struct { +} + +type SetVirtualDiskUuid SetVirtualDiskUuidRequestType + +func init() { + t["SetVirtualDiskUuid"] = reflect.TypeOf((*SetVirtualDiskUuid)(nil)).Elem() +} + +type SetVirtualDiskUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Uuid string `xml:"uuid"` +} + +func init() { + t["SetVirtualDiskUuidRequestType"] = reflect.TypeOf((*SetVirtualDiskUuidRequestType)(nil)).Elem() +} + +type SetVirtualDiskUuidResponse struct { +} + +type SharedBusControllerNotSupported struct { + DeviceNotSupported +} + +func init() { + t["SharedBusControllerNotSupported"] = reflect.TypeOf((*SharedBusControllerNotSupported)(nil)).Elem() +} + +type SharedBusControllerNotSupportedFault SharedBusControllerNotSupported + +func init() { + t["SharedBusControllerNotSupportedFault"] = reflect.TypeOf((*SharedBusControllerNotSupportedFault)(nil)).Elem() +} + +type SharesInfo struct { + DynamicData + + Shares int32 `xml:"shares"` + Level SharesLevel `xml:"level"` +} + +func init() { + t["SharesInfo"] = reflect.TypeOf((*SharesInfo)(nil)).Elem() +} + +type SharesOption struct { + DynamicData + + SharesOption IntOption `xml:"sharesOption"` + DefaultLevel SharesLevel `xml:"defaultLevel"` +} + +func init() { + t["SharesOption"] = reflect.TypeOf((*SharesOption)(nil)).Elem() +} + +type ShrinkDiskFault struct { + VimFault + + DiskId int32 `xml:"diskId,omitempty"` +} + +func init() { + t["ShrinkDiskFault"] = reflect.TypeOf((*ShrinkDiskFault)(nil)).Elem() +} + +type ShrinkDiskFaultFault ShrinkDiskFault + +func init() { + t["ShrinkDiskFaultFault"] = reflect.TypeOf((*ShrinkDiskFaultFault)(nil)).Elem() +} + +type ShrinkVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` + Copy *bool `xml:"copy"` +} + +func init() { + t["ShrinkVirtualDiskRequestType"] = reflect.TypeOf((*ShrinkVirtualDiskRequestType)(nil)).Elem() +} + +type ShrinkVirtualDisk_Task ShrinkVirtualDiskRequestType + +func init() { + t["ShrinkVirtualDisk_Task"] = reflect.TypeOf((*ShrinkVirtualDisk_Task)(nil)).Elem() +} + +type ShrinkVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ShutdownGuest ShutdownGuestRequestType + +func init() { + t["ShutdownGuest"] = reflect.TypeOf((*ShutdownGuest)(nil)).Elem() +} + +type ShutdownGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["ShutdownGuestRequestType"] = reflect.TypeOf((*ShutdownGuestRequestType)(nil)).Elem() +} + +type ShutdownGuestResponse struct { +} + +type ShutdownHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Force bool `xml:"force"` +} + +func init() { + t["ShutdownHostRequestType"] = reflect.TypeOf((*ShutdownHostRequestType)(nil)).Elem() +} + +type ShutdownHost_Task ShutdownHostRequestType + +func init() { + t["ShutdownHost_Task"] = reflect.TypeOf((*ShutdownHost_Task)(nil)).Elem() +} + +type ShutdownHost_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SingleIp struct { + IpAddress + + Address string `xml:"address"` +} + +func init() { + t["SingleIp"] = reflect.TypeOf((*SingleIp)(nil)).Elem() +} + +type SingleMac struct { + MacAddress + + Address string `xml:"address"` +} + +func init() { + t["SingleMac"] = reflect.TypeOf((*SingleMac)(nil)).Elem() +} + +type SnapshotCloneNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["SnapshotCloneNotSupported"] = reflect.TypeOf((*SnapshotCloneNotSupported)(nil)).Elem() +} + +type SnapshotCloneNotSupportedFault SnapshotCloneNotSupported + +func init() { + t["SnapshotCloneNotSupportedFault"] = reflect.TypeOf((*SnapshotCloneNotSupportedFault)(nil)).Elem() +} + +type SnapshotCopyNotSupported struct { + MigrationFault +} + +func init() { + t["SnapshotCopyNotSupported"] = reflect.TypeOf((*SnapshotCopyNotSupported)(nil)).Elem() +} + +type SnapshotCopyNotSupportedFault BaseSnapshotCopyNotSupported + +func init() { + t["SnapshotCopyNotSupportedFault"] = reflect.TypeOf((*SnapshotCopyNotSupportedFault)(nil)).Elem() +} + +type SnapshotDisabled struct { + SnapshotFault +} + +func init() { + t["SnapshotDisabled"] = reflect.TypeOf((*SnapshotDisabled)(nil)).Elem() +} + +type SnapshotDisabledFault SnapshotDisabled + +func init() { + t["SnapshotDisabledFault"] = reflect.TypeOf((*SnapshotDisabledFault)(nil)).Elem() +} + +type SnapshotFault struct { + VimFault +} + +func init() { + t["SnapshotFault"] = reflect.TypeOf((*SnapshotFault)(nil)).Elem() +} + +type SnapshotFaultFault BaseSnapshotFault + +func init() { + t["SnapshotFaultFault"] = reflect.TypeOf((*SnapshotFaultFault)(nil)).Elem() +} + +type SnapshotIncompatibleDeviceInVm struct { + SnapshotFault + + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["SnapshotIncompatibleDeviceInVm"] = reflect.TypeOf((*SnapshotIncompatibleDeviceInVm)(nil)).Elem() +} + +type SnapshotIncompatibleDeviceInVmFault SnapshotIncompatibleDeviceInVm + +func init() { + t["SnapshotIncompatibleDeviceInVmFault"] = reflect.TypeOf((*SnapshotIncompatibleDeviceInVmFault)(nil)).Elem() +} + +type SnapshotLocked struct { + SnapshotFault +} + +func init() { + t["SnapshotLocked"] = reflect.TypeOf((*SnapshotLocked)(nil)).Elem() +} + +type SnapshotLockedFault SnapshotLocked + +func init() { + t["SnapshotLockedFault"] = reflect.TypeOf((*SnapshotLockedFault)(nil)).Elem() +} + +type SnapshotMoveFromNonHomeNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["SnapshotMoveFromNonHomeNotSupported"] = reflect.TypeOf((*SnapshotMoveFromNonHomeNotSupported)(nil)).Elem() +} + +type SnapshotMoveFromNonHomeNotSupportedFault SnapshotMoveFromNonHomeNotSupported + +func init() { + t["SnapshotMoveFromNonHomeNotSupportedFault"] = reflect.TypeOf((*SnapshotMoveFromNonHomeNotSupportedFault)(nil)).Elem() +} + +type SnapshotMoveNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["SnapshotMoveNotSupported"] = reflect.TypeOf((*SnapshotMoveNotSupported)(nil)).Elem() +} + +type SnapshotMoveNotSupportedFault SnapshotMoveNotSupported + +func init() { + t["SnapshotMoveNotSupportedFault"] = reflect.TypeOf((*SnapshotMoveNotSupportedFault)(nil)).Elem() +} + +type SnapshotMoveToNonHomeNotSupported struct { + SnapshotCopyNotSupported +} + +func init() { + t["SnapshotMoveToNonHomeNotSupported"] = reflect.TypeOf((*SnapshotMoveToNonHomeNotSupported)(nil)).Elem() +} + +type SnapshotMoveToNonHomeNotSupportedFault SnapshotMoveToNonHomeNotSupported + +func init() { + t["SnapshotMoveToNonHomeNotSupportedFault"] = reflect.TypeOf((*SnapshotMoveToNonHomeNotSupportedFault)(nil)).Elem() +} + +type SnapshotNoChange struct { + SnapshotFault +} + +func init() { + t["SnapshotNoChange"] = reflect.TypeOf((*SnapshotNoChange)(nil)).Elem() +} + +type SnapshotNoChangeFault SnapshotNoChange + +func init() { + t["SnapshotNoChangeFault"] = reflect.TypeOf((*SnapshotNoChangeFault)(nil)).Elem() +} + +type SnapshotRevertIssue struct { + MigrationFault + + SnapshotName string `xml:"snapshotName,omitempty"` + Event []BaseEvent `xml:"event,omitempty,typeattr"` + Errors bool `xml:"errors"` +} + +func init() { + t["SnapshotRevertIssue"] = reflect.TypeOf((*SnapshotRevertIssue)(nil)).Elem() +} + +type SnapshotRevertIssueFault SnapshotRevertIssue + +func init() { + t["SnapshotRevertIssueFault"] = reflect.TypeOf((*SnapshotRevertIssueFault)(nil)).Elem() +} + +type SoftRuleVioCorrectionDisallowed struct { + VmConfigFault + + VmName string `xml:"vmName"` +} + +func init() { + t["SoftRuleVioCorrectionDisallowed"] = reflect.TypeOf((*SoftRuleVioCorrectionDisallowed)(nil)).Elem() +} + +type SoftRuleVioCorrectionDisallowedFault SoftRuleVioCorrectionDisallowed + +func init() { + t["SoftRuleVioCorrectionDisallowedFault"] = reflect.TypeOf((*SoftRuleVioCorrectionDisallowedFault)(nil)).Elem() +} + +type SoftRuleVioCorrectionImpact struct { + VmConfigFault + + VmName string `xml:"vmName"` +} + +func init() { + t["SoftRuleVioCorrectionImpact"] = reflect.TypeOf((*SoftRuleVioCorrectionImpact)(nil)).Elem() +} + +type SoftRuleVioCorrectionImpactFault SoftRuleVioCorrectionImpact + +func init() { + t["SoftRuleVioCorrectionImpactFault"] = reflect.TypeOf((*SoftRuleVioCorrectionImpactFault)(nil)).Elem() +} + +type SoftwarePackage struct { + DynamicData + + Name string `xml:"name"` + Version string `xml:"version"` + Type string `xml:"type"` + Vendor string `xml:"vendor"` + AcceptanceLevel string `xml:"acceptanceLevel"` + Summary string `xml:"summary"` + Description string `xml:"description"` + ReferenceURL []string `xml:"referenceURL,omitempty"` + CreationDate *time.Time `xml:"creationDate"` + Depends []Relation `xml:"depends,omitempty"` + Conflicts []Relation `xml:"conflicts,omitempty"` + Replaces []Relation `xml:"replaces,omitempty"` + Provides []string `xml:"provides,omitempty"` + MaintenanceModeRequired *bool `xml:"maintenanceModeRequired"` + HardwarePlatformsRequired []string `xml:"hardwarePlatformsRequired,omitempty"` + Capability SoftwarePackageCapability `xml:"capability"` + Tag []string `xml:"tag,omitempty"` + Payload []string `xml:"payload,omitempty"` +} + +func init() { + t["SoftwarePackage"] = reflect.TypeOf((*SoftwarePackage)(nil)).Elem() +} + +type SoftwarePackageCapability struct { + DynamicData + + LiveInstallAllowed *bool `xml:"liveInstallAllowed"` + LiveRemoveAllowed *bool `xml:"liveRemoveAllowed"` + StatelessReady *bool `xml:"statelessReady"` + Overlay *bool `xml:"overlay"` +} + +func init() { + t["SoftwarePackageCapability"] = reflect.TypeOf((*SoftwarePackageCapability)(nil)).Elem() +} + +type SourceNodeSpec struct { + DynamicData + + ManagementVc ServiceLocator `xml:"managementVc"` + ActiveVc ManagedObjectReference `xml:"activeVc"` +} + +func init() { + t["SourceNodeSpec"] = reflect.TypeOf((*SourceNodeSpec)(nil)).Elem() +} + +type SsdDiskNotAvailable struct { + VimFault + + DevicePath string `xml:"devicePath"` +} + +func init() { + t["SsdDiskNotAvailable"] = reflect.TypeOf((*SsdDiskNotAvailable)(nil)).Elem() +} + +type SsdDiskNotAvailableFault SsdDiskNotAvailable + +func init() { + t["SsdDiskNotAvailableFault"] = reflect.TypeOf((*SsdDiskNotAvailableFault)(nil)).Elem() +} + +type StageHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + MetaUrls []string `xml:"metaUrls,omitempty"` + BundleUrls []string `xml:"bundleUrls,omitempty"` + VibUrls []string `xml:"vibUrls,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["StageHostPatchRequestType"] = reflect.TypeOf((*StageHostPatchRequestType)(nil)).Elem() +} + +type StageHostPatch_Task StageHostPatchRequestType + +func init() { + t["StageHostPatch_Task"] = reflect.TypeOf((*StageHostPatch_Task)(nil)).Elem() +} + +type StageHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StampAllRulesWithUuidRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["StampAllRulesWithUuidRequestType"] = reflect.TypeOf((*StampAllRulesWithUuidRequestType)(nil)).Elem() +} + +type StampAllRulesWithUuid_Task StampAllRulesWithUuidRequestType + +func init() { + t["StampAllRulesWithUuid_Task"] = reflect.TypeOf((*StampAllRulesWithUuid_Task)(nil)).Elem() +} + +type StampAllRulesWithUuid_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StandbyGuest StandbyGuestRequestType + +func init() { + t["StandbyGuest"] = reflect.TypeOf((*StandbyGuest)(nil)).Elem() +} + +type StandbyGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["StandbyGuestRequestType"] = reflect.TypeOf((*StandbyGuestRequestType)(nil)).Elem() +} + +type StandbyGuestResponse struct { +} + +type StartProgramInGuest StartProgramInGuestRequestType + +func init() { + t["StartProgramInGuest"] = reflect.TypeOf((*StartProgramInGuest)(nil)).Elem() +} + +type StartProgramInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Spec BaseGuestProgramSpec `xml:"spec,typeattr"` +} + +func init() { + t["StartProgramInGuestRequestType"] = reflect.TypeOf((*StartProgramInGuestRequestType)(nil)).Elem() +} + +type StartProgramInGuestResponse struct { + Returnval int64 `xml:"returnval"` +} + +type StartRecordingRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["StartRecordingRequestType"] = reflect.TypeOf((*StartRecordingRequestType)(nil)).Elem() +} + +type StartRecording_Task StartRecordingRequestType + +func init() { + t["StartRecording_Task"] = reflect.TypeOf((*StartRecording_Task)(nil)).Elem() +} + +type StartRecording_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StartReplayingRequestType struct { + This ManagedObjectReference `xml:"_this"` + ReplaySnapshot ManagedObjectReference `xml:"replaySnapshot"` +} + +func init() { + t["StartReplayingRequestType"] = reflect.TypeOf((*StartReplayingRequestType)(nil)).Elem() +} + +type StartReplaying_Task StartReplayingRequestType + +func init() { + t["StartReplaying_Task"] = reflect.TypeOf((*StartReplaying_Task)(nil)).Elem() +} + +type StartReplaying_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StartService StartServiceRequestType + +func init() { + t["StartService"] = reflect.TypeOf((*StartService)(nil)).Elem() +} + +type StartServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["StartServiceRequestType"] = reflect.TypeOf((*StartServiceRequestType)(nil)).Elem() +} + +type StartServiceResponse struct { +} + +type StateAlarmExpression struct { + AlarmExpression + + Operator StateAlarmOperator `xml:"operator"` + Type string `xml:"type"` + StatePath string `xml:"statePath"` + Yellow string `xml:"yellow,omitempty"` + Red string `xml:"red,omitempty"` +} + +func init() { + t["StateAlarmExpression"] = reflect.TypeOf((*StateAlarmExpression)(nil)).Elem() +} + +type StaticRouteProfile struct { + ApplyProfile + + Key string `xml:"key,omitempty"` +} + +func init() { + t["StaticRouteProfile"] = reflect.TypeOf((*StaticRouteProfile)(nil)).Elem() +} + +type StopRecordingRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["StopRecordingRequestType"] = reflect.TypeOf((*StopRecordingRequestType)(nil)).Elem() +} + +type StopRecording_Task StopRecordingRequestType + +func init() { + t["StopRecording_Task"] = reflect.TypeOf((*StopRecording_Task)(nil)).Elem() +} + +type StopRecording_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StopReplayingRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["StopReplayingRequestType"] = reflect.TypeOf((*StopReplayingRequestType)(nil)).Elem() +} + +type StopReplaying_Task StopReplayingRequestType + +func init() { + t["StopReplaying_Task"] = reflect.TypeOf((*StopReplaying_Task)(nil)).Elem() +} + +type StopReplaying_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type StopService StopServiceRequestType + +func init() { + t["StopService"] = reflect.TypeOf((*StopService)(nil)).Elem() +} + +type StopServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["StopServiceRequestType"] = reflect.TypeOf((*StopServiceRequestType)(nil)).Elem() +} + +type StopServiceResponse struct { +} + +type StorageDrsAutomationConfig struct { + DynamicData + + SpaceLoadBalanceAutomationMode string `xml:"spaceLoadBalanceAutomationMode,omitempty"` + IoLoadBalanceAutomationMode string `xml:"ioLoadBalanceAutomationMode,omitempty"` + RuleEnforcementAutomationMode string `xml:"ruleEnforcementAutomationMode,omitempty"` + PolicyEnforcementAutomationMode string `xml:"policyEnforcementAutomationMode,omitempty"` + VmEvacuationAutomationMode string `xml:"vmEvacuationAutomationMode,omitempty"` +} + +func init() { + t["StorageDrsAutomationConfig"] = reflect.TypeOf((*StorageDrsAutomationConfig)(nil)).Elem() +} + +type StorageDrsCannotMoveDiskInMultiWriterMode struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveDiskInMultiWriterMode"] = reflect.TypeOf((*StorageDrsCannotMoveDiskInMultiWriterMode)(nil)).Elem() +} + +type StorageDrsCannotMoveDiskInMultiWriterModeFault StorageDrsCannotMoveDiskInMultiWriterMode + +func init() { + t["StorageDrsCannotMoveDiskInMultiWriterModeFault"] = reflect.TypeOf((*StorageDrsCannotMoveDiskInMultiWriterModeFault)(nil)).Elem() +} + +type StorageDrsCannotMoveFTVm struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveFTVm"] = reflect.TypeOf((*StorageDrsCannotMoveFTVm)(nil)).Elem() +} + +type StorageDrsCannotMoveFTVmFault StorageDrsCannotMoveFTVm + +func init() { + t["StorageDrsCannotMoveFTVmFault"] = reflect.TypeOf((*StorageDrsCannotMoveFTVmFault)(nil)).Elem() +} + +type StorageDrsCannotMoveIndependentDisk struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveIndependentDisk"] = reflect.TypeOf((*StorageDrsCannotMoveIndependentDisk)(nil)).Elem() +} + +type StorageDrsCannotMoveIndependentDiskFault StorageDrsCannotMoveIndependentDisk + +func init() { + t["StorageDrsCannotMoveIndependentDiskFault"] = reflect.TypeOf((*StorageDrsCannotMoveIndependentDiskFault)(nil)).Elem() +} + +type StorageDrsCannotMoveManuallyPlacedSwapFile struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveManuallyPlacedSwapFile"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedSwapFile)(nil)).Elem() +} + +type StorageDrsCannotMoveManuallyPlacedSwapFileFault StorageDrsCannotMoveManuallyPlacedSwapFile + +func init() { + t["StorageDrsCannotMoveManuallyPlacedSwapFileFault"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedSwapFileFault)(nil)).Elem() +} + +type StorageDrsCannotMoveManuallyPlacedVm struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveManuallyPlacedVm"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedVm)(nil)).Elem() +} + +type StorageDrsCannotMoveManuallyPlacedVmFault StorageDrsCannotMoveManuallyPlacedVm + +func init() { + t["StorageDrsCannotMoveManuallyPlacedVmFault"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedVmFault)(nil)).Elem() +} + +type StorageDrsCannotMoveSharedDisk struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveSharedDisk"] = reflect.TypeOf((*StorageDrsCannotMoveSharedDisk)(nil)).Elem() +} + +type StorageDrsCannotMoveSharedDiskFault StorageDrsCannotMoveSharedDisk + +func init() { + t["StorageDrsCannotMoveSharedDiskFault"] = reflect.TypeOf((*StorageDrsCannotMoveSharedDiskFault)(nil)).Elem() +} + +type StorageDrsCannotMoveTemplate struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveTemplate"] = reflect.TypeOf((*StorageDrsCannotMoveTemplate)(nil)).Elem() +} + +type StorageDrsCannotMoveTemplateFault StorageDrsCannotMoveTemplate + +func init() { + t["StorageDrsCannotMoveTemplateFault"] = reflect.TypeOf((*StorageDrsCannotMoveTemplateFault)(nil)).Elem() +} + +type StorageDrsCannotMoveVmInUserFolder struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveVmInUserFolder"] = reflect.TypeOf((*StorageDrsCannotMoveVmInUserFolder)(nil)).Elem() +} + +type StorageDrsCannotMoveVmInUserFolderFault StorageDrsCannotMoveVmInUserFolder + +func init() { + t["StorageDrsCannotMoveVmInUserFolderFault"] = reflect.TypeOf((*StorageDrsCannotMoveVmInUserFolderFault)(nil)).Elem() +} + +type StorageDrsCannotMoveVmWithMountedCDROM struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveVmWithMountedCDROM"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithMountedCDROM)(nil)).Elem() +} + +type StorageDrsCannotMoveVmWithMountedCDROMFault StorageDrsCannotMoveVmWithMountedCDROM + +func init() { + t["StorageDrsCannotMoveVmWithMountedCDROMFault"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithMountedCDROMFault)(nil)).Elem() +} + +type StorageDrsCannotMoveVmWithNoFilesInLayout struct { + VimFault +} + +func init() { + t["StorageDrsCannotMoveVmWithNoFilesInLayout"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithNoFilesInLayout)(nil)).Elem() +} + +type StorageDrsCannotMoveVmWithNoFilesInLayoutFault StorageDrsCannotMoveVmWithNoFilesInLayout + +func init() { + t["StorageDrsCannotMoveVmWithNoFilesInLayoutFault"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithNoFilesInLayoutFault)(nil)).Elem() +} + +type StorageDrsConfigInfo struct { + DynamicData + + PodConfig StorageDrsPodConfigInfo `xml:"podConfig"` + VmConfig []StorageDrsVmConfigInfo `xml:"vmConfig,omitempty"` +} + +func init() { + t["StorageDrsConfigInfo"] = reflect.TypeOf((*StorageDrsConfigInfo)(nil)).Elem() +} + +type StorageDrsConfigSpec struct { + DynamicData + + PodConfigSpec *StorageDrsPodConfigSpec `xml:"podConfigSpec,omitempty"` + VmConfigSpec []StorageDrsVmConfigSpec `xml:"vmConfigSpec,omitempty"` +} + +func init() { + t["StorageDrsConfigSpec"] = reflect.TypeOf((*StorageDrsConfigSpec)(nil)).Elem() +} + +type StorageDrsDatacentersCannotShareDatastore struct { + VimFault +} + +func init() { + t["StorageDrsDatacentersCannotShareDatastore"] = reflect.TypeOf((*StorageDrsDatacentersCannotShareDatastore)(nil)).Elem() +} + +type StorageDrsDatacentersCannotShareDatastoreFault StorageDrsDatacentersCannotShareDatastore + +func init() { + t["StorageDrsDatacentersCannotShareDatastoreFault"] = reflect.TypeOf((*StorageDrsDatacentersCannotShareDatastoreFault)(nil)).Elem() +} + +type StorageDrsDisabledOnVm struct { + VimFault +} + +func init() { + t["StorageDrsDisabledOnVm"] = reflect.TypeOf((*StorageDrsDisabledOnVm)(nil)).Elem() +} + +type StorageDrsDisabledOnVmFault StorageDrsDisabledOnVm + +func init() { + t["StorageDrsDisabledOnVmFault"] = reflect.TypeOf((*StorageDrsDisabledOnVmFault)(nil)).Elem() +} + +type StorageDrsHbrDiskNotMovable struct { + VimFault + + NonMovableDiskIds string `xml:"nonMovableDiskIds"` +} + +func init() { + t["StorageDrsHbrDiskNotMovable"] = reflect.TypeOf((*StorageDrsHbrDiskNotMovable)(nil)).Elem() +} + +type StorageDrsHbrDiskNotMovableFault StorageDrsHbrDiskNotMovable + +func init() { + t["StorageDrsHbrDiskNotMovableFault"] = reflect.TypeOf((*StorageDrsHbrDiskNotMovableFault)(nil)).Elem() +} + +type StorageDrsHmsMoveInProgress struct { + VimFault +} + +func init() { + t["StorageDrsHmsMoveInProgress"] = reflect.TypeOf((*StorageDrsHmsMoveInProgress)(nil)).Elem() +} + +type StorageDrsHmsMoveInProgressFault StorageDrsHmsMoveInProgress + +func init() { + t["StorageDrsHmsMoveInProgressFault"] = reflect.TypeOf((*StorageDrsHmsMoveInProgressFault)(nil)).Elem() +} + +type StorageDrsHmsUnreachable struct { + VimFault +} + +func init() { + t["StorageDrsHmsUnreachable"] = reflect.TypeOf((*StorageDrsHmsUnreachable)(nil)).Elem() +} + +type StorageDrsHmsUnreachableFault StorageDrsHmsUnreachable + +func init() { + t["StorageDrsHmsUnreachableFault"] = reflect.TypeOf((*StorageDrsHmsUnreachableFault)(nil)).Elem() +} + +type StorageDrsIoLoadBalanceConfig struct { + DynamicData + + ReservablePercentThreshold int32 `xml:"reservablePercentThreshold,omitempty"` + ReservableIopsThreshold int32 `xml:"reservableIopsThreshold,omitempty"` + ReservableThresholdMode string `xml:"reservableThresholdMode,omitempty"` + IoLatencyThreshold int32 `xml:"ioLatencyThreshold,omitempty"` + IoLoadImbalanceThreshold int32 `xml:"ioLoadImbalanceThreshold,omitempty"` +} + +func init() { + t["StorageDrsIoLoadBalanceConfig"] = reflect.TypeOf((*StorageDrsIoLoadBalanceConfig)(nil)).Elem() +} + +type StorageDrsIolbDisabledInternally struct { + VimFault +} + +func init() { + t["StorageDrsIolbDisabledInternally"] = reflect.TypeOf((*StorageDrsIolbDisabledInternally)(nil)).Elem() +} + +type StorageDrsIolbDisabledInternallyFault StorageDrsIolbDisabledInternally + +func init() { + t["StorageDrsIolbDisabledInternallyFault"] = reflect.TypeOf((*StorageDrsIolbDisabledInternallyFault)(nil)).Elem() +} + +type StorageDrsOptionSpec struct { + ArrayUpdateSpec + + Option BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["StorageDrsOptionSpec"] = reflect.TypeOf((*StorageDrsOptionSpec)(nil)).Elem() +} + +type StorageDrsPlacementRankVmSpec struct { + DynamicData + + VmPlacementSpec PlacementSpec `xml:"vmPlacementSpec"` + VmClusters []ManagedObjectReference `xml:"vmClusters"` +} + +func init() { + t["StorageDrsPlacementRankVmSpec"] = reflect.TypeOf((*StorageDrsPlacementRankVmSpec)(nil)).Elem() +} + +type StorageDrsPodConfigInfo struct { + DynamicData + + Enabled bool `xml:"enabled"` + IoLoadBalanceEnabled bool `xml:"ioLoadBalanceEnabled"` + DefaultVmBehavior string `xml:"defaultVmBehavior"` + LoadBalanceInterval int32 `xml:"loadBalanceInterval,omitempty"` + DefaultIntraVmAffinity *bool `xml:"defaultIntraVmAffinity"` + SpaceLoadBalanceConfig *StorageDrsSpaceLoadBalanceConfig `xml:"spaceLoadBalanceConfig,omitempty"` + IoLoadBalanceConfig *StorageDrsIoLoadBalanceConfig `xml:"ioLoadBalanceConfig,omitempty"` + AutomationOverrides *StorageDrsAutomationConfig `xml:"automationOverrides,omitempty"` + Rule []BaseClusterRuleInfo `xml:"rule,omitempty,typeattr"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr"` +} + +func init() { + t["StorageDrsPodConfigInfo"] = reflect.TypeOf((*StorageDrsPodConfigInfo)(nil)).Elem() +} + +type StorageDrsPodConfigSpec struct { + DynamicData + + Enabled *bool `xml:"enabled"` + IoLoadBalanceEnabled *bool `xml:"ioLoadBalanceEnabled"` + DefaultVmBehavior string `xml:"defaultVmBehavior,omitempty"` + LoadBalanceInterval int32 `xml:"loadBalanceInterval,omitempty"` + DefaultIntraVmAffinity *bool `xml:"defaultIntraVmAffinity"` + SpaceLoadBalanceConfig *StorageDrsSpaceLoadBalanceConfig `xml:"spaceLoadBalanceConfig,omitempty"` + IoLoadBalanceConfig *StorageDrsIoLoadBalanceConfig `xml:"ioLoadBalanceConfig,omitempty"` + AutomationOverrides *StorageDrsAutomationConfig `xml:"automationOverrides,omitempty"` + Rule []ClusterRuleSpec `xml:"rule,omitempty"` + Option []StorageDrsOptionSpec `xml:"option,omitempty"` +} + +func init() { + t["StorageDrsPodConfigSpec"] = reflect.TypeOf((*StorageDrsPodConfigSpec)(nil)).Elem() +} + +type StorageDrsPodSelectionSpec struct { + DynamicData + + InitialVmConfig []VmPodConfigForPlacement `xml:"initialVmConfig,omitempty"` + StoragePod *ManagedObjectReference `xml:"storagePod,omitempty"` +} + +func init() { + t["StorageDrsPodSelectionSpec"] = reflect.TypeOf((*StorageDrsPodSelectionSpec)(nil)).Elem() +} + +type StorageDrsRelocateDisabled struct { + VimFault +} + +func init() { + t["StorageDrsRelocateDisabled"] = reflect.TypeOf((*StorageDrsRelocateDisabled)(nil)).Elem() +} + +type StorageDrsRelocateDisabledFault StorageDrsRelocateDisabled + +func init() { + t["StorageDrsRelocateDisabledFault"] = reflect.TypeOf((*StorageDrsRelocateDisabledFault)(nil)).Elem() +} + +type StorageDrsSpaceLoadBalanceConfig struct { + DynamicData + + SpaceThresholdMode string `xml:"spaceThresholdMode,omitempty"` + SpaceUtilizationThreshold int32 `xml:"spaceUtilizationThreshold,omitempty"` + FreeSpaceThresholdGB int32 `xml:"freeSpaceThresholdGB,omitempty"` + MinSpaceUtilizationDifference int32 `xml:"minSpaceUtilizationDifference,omitempty"` +} + +func init() { + t["StorageDrsSpaceLoadBalanceConfig"] = reflect.TypeOf((*StorageDrsSpaceLoadBalanceConfig)(nil)).Elem() +} + +type StorageDrsStaleHmsCollection struct { + VimFault +} + +func init() { + t["StorageDrsStaleHmsCollection"] = reflect.TypeOf((*StorageDrsStaleHmsCollection)(nil)).Elem() +} + +type StorageDrsStaleHmsCollectionFault StorageDrsStaleHmsCollection + +func init() { + t["StorageDrsStaleHmsCollectionFault"] = reflect.TypeOf((*StorageDrsStaleHmsCollectionFault)(nil)).Elem() +} + +type StorageDrsUnableToMoveFiles struct { + VimFault +} + +func init() { + t["StorageDrsUnableToMoveFiles"] = reflect.TypeOf((*StorageDrsUnableToMoveFiles)(nil)).Elem() +} + +type StorageDrsUnableToMoveFilesFault StorageDrsUnableToMoveFiles + +func init() { + t["StorageDrsUnableToMoveFilesFault"] = reflect.TypeOf((*StorageDrsUnableToMoveFilesFault)(nil)).Elem() +} + +type StorageDrsVmConfigInfo struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Enabled *bool `xml:"enabled"` + Behavior string `xml:"behavior,omitempty"` + IntraVmAffinity *bool `xml:"intraVmAffinity"` + IntraVmAntiAffinity *VirtualDiskAntiAffinityRuleSpec `xml:"intraVmAntiAffinity,omitempty"` + VirtualDiskRules []VirtualDiskRuleSpec `xml:"virtualDiskRules,omitempty"` +} + +func init() { + t["StorageDrsVmConfigInfo"] = reflect.TypeOf((*StorageDrsVmConfigInfo)(nil)).Elem() +} + +type StorageDrsVmConfigSpec struct { + ArrayUpdateSpec + + Info *StorageDrsVmConfigInfo `xml:"info,omitempty"` +} + +func init() { + t["StorageDrsVmConfigSpec"] = reflect.TypeOf((*StorageDrsVmConfigSpec)(nil)).Elem() +} + +type StorageIOAllocationInfo struct { + DynamicData + + Limit *int64 `xml:"limit"` + Shares *SharesInfo `xml:"shares,omitempty"` + Reservation *int32 `xml:"reservation"` +} + +func init() { + t["StorageIOAllocationInfo"] = reflect.TypeOf((*StorageIOAllocationInfo)(nil)).Elem() +} + +type StorageIOAllocationOption struct { + DynamicData + + LimitOption LongOption `xml:"limitOption"` + SharesOption SharesOption `xml:"sharesOption"` +} + +func init() { + t["StorageIOAllocationOption"] = reflect.TypeOf((*StorageIOAllocationOption)(nil)).Elem() +} + +type StorageIORMConfigOption struct { + DynamicData + + EnabledOption BoolOption `xml:"enabledOption"` + CongestionThresholdOption IntOption `xml:"congestionThresholdOption"` + StatsCollectionEnabledOption *BoolOption `xml:"statsCollectionEnabledOption,omitempty"` + ReservationEnabledOption *BoolOption `xml:"reservationEnabledOption,omitempty"` +} + +func init() { + t["StorageIORMConfigOption"] = reflect.TypeOf((*StorageIORMConfigOption)(nil)).Elem() +} + +type StorageIORMConfigSpec struct { + DynamicData + + Enabled *bool `xml:"enabled"` + CongestionThresholdMode string `xml:"congestionThresholdMode,omitempty"` + CongestionThreshold int32 `xml:"congestionThreshold,omitempty"` + PercentOfPeakThroughput int32 `xml:"percentOfPeakThroughput,omitempty"` + StatsCollectionEnabled *bool `xml:"statsCollectionEnabled"` + ReservationEnabled *bool `xml:"reservationEnabled"` + StatsAggregationDisabled *bool `xml:"statsAggregationDisabled"` + ReservableIopsThreshold int32 `xml:"reservableIopsThreshold,omitempty"` +} + +func init() { + t["StorageIORMConfigSpec"] = reflect.TypeOf((*StorageIORMConfigSpec)(nil)).Elem() +} + +type StorageIORMInfo struct { + DynamicData + + Enabled bool `xml:"enabled"` + CongestionThresholdMode string `xml:"congestionThresholdMode,omitempty"` + CongestionThreshold int32 `xml:"congestionThreshold"` + PercentOfPeakThroughput int32 `xml:"percentOfPeakThroughput,omitempty"` + StatsCollectionEnabled *bool `xml:"statsCollectionEnabled"` + ReservationEnabled *bool `xml:"reservationEnabled"` + StatsAggregationDisabled *bool `xml:"statsAggregationDisabled"` + ReservableIopsThreshold int32 `xml:"reservableIopsThreshold,omitempty"` +} + +func init() { + t["StorageIORMInfo"] = reflect.TypeOf((*StorageIORMInfo)(nil)).Elem() +} + +type StorageMigrationAction struct { + ClusterAction + + Vm ManagedObjectReference `xml:"vm"` + RelocateSpec VirtualMachineRelocateSpec `xml:"relocateSpec"` + Source ManagedObjectReference `xml:"source"` + Destination ManagedObjectReference `xml:"destination"` + SizeTransferred int64 `xml:"sizeTransferred"` + SpaceUtilSrcBefore float32 `xml:"spaceUtilSrcBefore,omitempty"` + SpaceUtilDstBefore float32 `xml:"spaceUtilDstBefore,omitempty"` + SpaceUtilSrcAfter float32 `xml:"spaceUtilSrcAfter,omitempty"` + SpaceUtilDstAfter float32 `xml:"spaceUtilDstAfter,omitempty"` + IoLatencySrcBefore float32 `xml:"ioLatencySrcBefore,omitempty"` + IoLatencyDstBefore float32 `xml:"ioLatencyDstBefore,omitempty"` +} + +func init() { + t["StorageMigrationAction"] = reflect.TypeOf((*StorageMigrationAction)(nil)).Elem() +} + +type StoragePerformanceSummary struct { + DynamicData + + Interval int32 `xml:"interval"` + Percentile []int32 `xml:"percentile"` + DatastoreReadLatency []float64 `xml:"datastoreReadLatency"` + DatastoreWriteLatency []float64 `xml:"datastoreWriteLatency"` + DatastoreVmLatency []float64 `xml:"datastoreVmLatency"` + DatastoreReadIops []float64 `xml:"datastoreReadIops"` + DatastoreWriteIops []float64 `xml:"datastoreWriteIops"` + SiocActivityDuration int32 `xml:"siocActivityDuration"` +} + +func init() { + t["StoragePerformanceSummary"] = reflect.TypeOf((*StoragePerformanceSummary)(nil)).Elem() +} + +type StoragePlacementAction struct { + ClusterAction + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + RelocateSpec VirtualMachineRelocateSpec `xml:"relocateSpec"` + Destination ManagedObjectReference `xml:"destination"` + SpaceUtilBefore float32 `xml:"spaceUtilBefore,omitempty"` + SpaceDemandBefore float32 `xml:"spaceDemandBefore,omitempty"` + SpaceUtilAfter float32 `xml:"spaceUtilAfter,omitempty"` + SpaceDemandAfter float32 `xml:"spaceDemandAfter,omitempty"` + IoLatencyBefore float32 `xml:"ioLatencyBefore,omitempty"` +} + +func init() { + t["StoragePlacementAction"] = reflect.TypeOf((*StoragePlacementAction)(nil)).Elem() +} + +type StoragePlacementResult struct { + DynamicData + + Recommendations []ClusterRecommendation `xml:"recommendations,omitempty"` + DrsFault *ClusterDrsFaults `xml:"drsFault,omitempty"` + Task *ManagedObjectReference `xml:"task,omitempty"` +} + +func init() { + t["StoragePlacementResult"] = reflect.TypeOf((*StoragePlacementResult)(nil)).Elem() +} + +type StoragePlacementSpec struct { + DynamicData + + Type string `xml:"type"` + Priority VirtualMachineMovePriority `xml:"priority,omitempty"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` + PodSelectionSpec StorageDrsPodSelectionSpec `xml:"podSelectionSpec"` + CloneSpec *VirtualMachineCloneSpec `xml:"cloneSpec,omitempty"` + CloneName string `xml:"cloneName,omitempty"` + ConfigSpec *VirtualMachineConfigSpec `xml:"configSpec,omitempty"` + RelocateSpec *VirtualMachineRelocateSpec `xml:"relocateSpec,omitempty"` + ResourcePool *ManagedObjectReference `xml:"resourcePool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Folder *ManagedObjectReference `xml:"folder,omitempty"` + DisallowPrerequisiteMoves *bool `xml:"disallowPrerequisiteMoves"` + ResourceLeaseDurationSec int32 `xml:"resourceLeaseDurationSec,omitempty"` +} + +func init() { + t["StoragePlacementSpec"] = reflect.TypeOf((*StoragePlacementSpec)(nil)).Elem() +} + +type StoragePodSummary struct { + DynamicData + + Name string `xml:"name"` + Capacity int64 `xml:"capacity"` + FreeSpace int64 `xml:"freeSpace"` +} + +func init() { + t["StoragePodSummary"] = reflect.TypeOf((*StoragePodSummary)(nil)).Elem() +} + +type StorageProfile struct { + ApplyProfile + + NasStorage []NasStorageProfile `xml:"nasStorage,omitempty"` +} + +func init() { + t["StorageProfile"] = reflect.TypeOf((*StorageProfile)(nil)).Elem() +} + +type StorageRequirement struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + FreeSpaceRequiredInKb int64 `xml:"freeSpaceRequiredInKb"` +} + +func init() { + t["StorageRequirement"] = reflect.TypeOf((*StorageRequirement)(nil)).Elem() +} + +type StorageResourceManagerStorageProfileStatistics struct { + DynamicData + + ProfileId string `xml:"profileId"` + TotalSpaceMB int64 `xml:"totalSpaceMB"` + UsedSpaceMB int64 `xml:"usedSpaceMB"` +} + +func init() { + t["StorageResourceManagerStorageProfileStatistics"] = reflect.TypeOf((*StorageResourceManagerStorageProfileStatistics)(nil)).Elem() +} + +type StorageVMotionNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["StorageVMotionNotSupported"] = reflect.TypeOf((*StorageVMotionNotSupported)(nil)).Elem() +} + +type StorageVMotionNotSupportedFault StorageVMotionNotSupported + +func init() { + t["StorageVMotionNotSupportedFault"] = reflect.TypeOf((*StorageVMotionNotSupportedFault)(nil)).Elem() +} + +type StorageVmotionIncompatible struct { + VirtualHardwareCompatibilityIssue + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["StorageVmotionIncompatible"] = reflect.TypeOf((*StorageVmotionIncompatible)(nil)).Elem() +} + +type StorageVmotionIncompatibleFault StorageVmotionIncompatible + +func init() { + t["StorageVmotionIncompatibleFault"] = reflect.TypeOf((*StorageVmotionIncompatibleFault)(nil)).Elem() +} + +type StringExpression struct { + NegatableExpression + + Value string `xml:"value,omitempty"` +} + +func init() { + t["StringExpression"] = reflect.TypeOf((*StringExpression)(nil)).Elem() +} + +type StringOption struct { + OptionType + + DefaultValue string `xml:"defaultValue"` + ValidCharacters string `xml:"validCharacters,omitempty"` +} + +func init() { + t["StringOption"] = reflect.TypeOf((*StringOption)(nil)).Elem() +} + +type StringPolicy struct { + InheritablePolicy + + Value string `xml:"value,omitempty"` +} + +func init() { + t["StringPolicy"] = reflect.TypeOf((*StringPolicy)(nil)).Elem() +} + +type StructuredCustomizations struct { + HostProfilesEntityCustomizations + + Entity ManagedObjectReference `xml:"entity"` + Customizations *AnswerFile `xml:"customizations,omitempty"` +} + +func init() { + t["StructuredCustomizations"] = reflect.TypeOf((*StructuredCustomizations)(nil)).Elem() +} + +type SuspendVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["SuspendVAppRequestType"] = reflect.TypeOf((*SuspendVAppRequestType)(nil)).Elem() +} + +type SuspendVApp_Task SuspendVAppRequestType + +func init() { + t["SuspendVApp_Task"] = reflect.TypeOf((*SuspendVApp_Task)(nil)).Elem() +} + +type SuspendVApp_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SuspendVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["SuspendVMRequestType"] = reflect.TypeOf((*SuspendVMRequestType)(nil)).Elem() +} + +type SuspendVM_Task SuspendVMRequestType + +func init() { + t["SuspendVM_Task"] = reflect.TypeOf((*SuspendVM_Task)(nil)).Elem() +} + +type SuspendVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SuspendedRelocateNotSupported struct { + MigrationFault +} + +func init() { + t["SuspendedRelocateNotSupported"] = reflect.TypeOf((*SuspendedRelocateNotSupported)(nil)).Elem() +} + +type SuspendedRelocateNotSupportedFault SuspendedRelocateNotSupported + +func init() { + t["SuspendedRelocateNotSupportedFault"] = reflect.TypeOf((*SuspendedRelocateNotSupportedFault)(nil)).Elem() +} + +type SwapDatastoreNotWritableOnHost struct { + DatastoreNotWritableOnHost +} + +func init() { + t["SwapDatastoreNotWritableOnHost"] = reflect.TypeOf((*SwapDatastoreNotWritableOnHost)(nil)).Elem() +} + +type SwapDatastoreNotWritableOnHostFault SwapDatastoreNotWritableOnHost + +func init() { + t["SwapDatastoreNotWritableOnHostFault"] = reflect.TypeOf((*SwapDatastoreNotWritableOnHostFault)(nil)).Elem() +} + +type SwapDatastoreUnset struct { + VimFault +} + +func init() { + t["SwapDatastoreUnset"] = reflect.TypeOf((*SwapDatastoreUnset)(nil)).Elem() +} + +type SwapDatastoreUnsetFault SwapDatastoreUnset + +func init() { + t["SwapDatastoreUnsetFault"] = reflect.TypeOf((*SwapDatastoreUnsetFault)(nil)).Elem() +} + +type SwapPlacementOverrideNotSupported struct { + InvalidVmConfig +} + +func init() { + t["SwapPlacementOverrideNotSupported"] = reflect.TypeOf((*SwapPlacementOverrideNotSupported)(nil)).Elem() +} + +type SwapPlacementOverrideNotSupportedFault SwapPlacementOverrideNotSupported + +func init() { + t["SwapPlacementOverrideNotSupportedFault"] = reflect.TypeOf((*SwapPlacementOverrideNotSupportedFault)(nil)).Elem() +} + +type SwitchIpUnset struct { + DvsFault +} + +func init() { + t["SwitchIpUnset"] = reflect.TypeOf((*SwitchIpUnset)(nil)).Elem() +} + +type SwitchIpUnsetFault SwitchIpUnset + +func init() { + t["SwitchIpUnsetFault"] = reflect.TypeOf((*SwitchIpUnsetFault)(nil)).Elem() +} + +type SwitchNotInUpgradeMode struct { + DvsFault +} + +func init() { + t["SwitchNotInUpgradeMode"] = reflect.TypeOf((*SwitchNotInUpgradeMode)(nil)).Elem() +} + +type SwitchNotInUpgradeModeFault SwitchNotInUpgradeMode + +func init() { + t["SwitchNotInUpgradeModeFault"] = reflect.TypeOf((*SwitchNotInUpgradeModeFault)(nil)).Elem() +} + +type SystemError struct { + RuntimeFault + + Reason string `xml:"reason"` +} + +func init() { + t["SystemError"] = reflect.TypeOf((*SystemError)(nil)).Elem() +} + +type SystemErrorFault SystemError + +func init() { + t["SystemErrorFault"] = reflect.TypeOf((*SystemErrorFault)(nil)).Elem() +} + +type SystemEventInfo struct { + DynamicData + + RecordId int64 `xml:"recordId"` + When string `xml:"when"` + SelType int64 `xml:"selType"` + Message string `xml:"message"` + SensorNumber int64 `xml:"sensorNumber"` +} + +func init() { + t["SystemEventInfo"] = reflect.TypeOf((*SystemEventInfo)(nil)).Elem() +} + +type Tag struct { + DynamicData + + Key string `xml:"key"` +} + +func init() { + t["Tag"] = reflect.TypeOf((*Tag)(nil)).Elem() +} + +type TaskDescription struct { + DynamicData + + MethodInfo []BaseElementDescription `xml:"methodInfo,typeattr"` + State []BaseElementDescription `xml:"state,typeattr"` + Reason []BaseTypeDescription `xml:"reason,typeattr"` +} + +func init() { + t["TaskDescription"] = reflect.TypeOf((*TaskDescription)(nil)).Elem() +} + +type TaskEvent struct { + Event + + Info TaskInfo `xml:"info"` +} + +func init() { + t["TaskEvent"] = reflect.TypeOf((*TaskEvent)(nil)).Elem() +} + +type TaskFilterSpec struct { + DynamicData + + Entity *TaskFilterSpecByEntity `xml:"entity,omitempty"` + Time *TaskFilterSpecByTime `xml:"time,omitempty"` + UserName *TaskFilterSpecByUsername `xml:"userName,omitempty"` + ActivationId []string `xml:"activationId,omitempty"` + State []TaskInfoState `xml:"state,omitempty"` + Alarm *ManagedObjectReference `xml:"alarm,omitempty"` + ScheduledTask *ManagedObjectReference `xml:"scheduledTask,omitempty"` + EventChainId []int32 `xml:"eventChainId,omitempty"` + Tag []string `xml:"tag,omitempty"` + ParentTaskKey []string `xml:"parentTaskKey,omitempty"` + RootTaskKey []string `xml:"rootTaskKey,omitempty"` +} + +func init() { + t["TaskFilterSpec"] = reflect.TypeOf((*TaskFilterSpec)(nil)).Elem() +} + +type TaskFilterSpecByEntity struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + Recursion TaskFilterSpecRecursionOption `xml:"recursion"` +} + +func init() { + t["TaskFilterSpecByEntity"] = reflect.TypeOf((*TaskFilterSpecByEntity)(nil)).Elem() +} + +type TaskFilterSpecByTime struct { + DynamicData + + TimeType TaskFilterSpecTimeOption `xml:"timeType"` + BeginTime *time.Time `xml:"beginTime"` + EndTime *time.Time `xml:"endTime"` +} + +func init() { + t["TaskFilterSpecByTime"] = reflect.TypeOf((*TaskFilterSpecByTime)(nil)).Elem() +} + +type TaskFilterSpecByUsername struct { + DynamicData + + SystemUser bool `xml:"systemUser"` + UserList []string `xml:"userList,omitempty"` +} + +func init() { + t["TaskFilterSpecByUsername"] = reflect.TypeOf((*TaskFilterSpecByUsername)(nil)).Elem() +} + +type TaskInProgress struct { + VimFault + + Task ManagedObjectReference `xml:"task"` +} + +func init() { + t["TaskInProgress"] = reflect.TypeOf((*TaskInProgress)(nil)).Elem() +} + +type TaskInProgressFault BaseTaskInProgress + +func init() { + t["TaskInProgressFault"] = reflect.TypeOf((*TaskInProgressFault)(nil)).Elem() +} + +type TaskInfo struct { + DynamicData + + Key string `xml:"key"` + Task ManagedObjectReference `xml:"task"` + Description *LocalizableMessage `xml:"description,omitempty"` + Name string `xml:"name,omitempty"` + DescriptionId string `xml:"descriptionId"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + EntityName string `xml:"entityName,omitempty"` + Locked []ManagedObjectReference `xml:"locked,omitempty"` + State TaskInfoState `xml:"state"` + Cancelled bool `xml:"cancelled"` + Cancelable bool `xml:"cancelable"` + Error *LocalizedMethodFault `xml:"error,omitempty"` + Result AnyType `xml:"result,omitempty,typeattr"` + Progress int32 `xml:"progress,omitempty"` + Reason BaseTaskReason `xml:"reason,typeattr"` + QueueTime time.Time `xml:"queueTime"` + StartTime *time.Time `xml:"startTime"` + CompleteTime *time.Time `xml:"completeTime"` + EventChainId int32 `xml:"eventChainId"` + ChangeTag string `xml:"changeTag,omitempty"` + ParentTaskKey string `xml:"parentTaskKey,omitempty"` + RootTaskKey string `xml:"rootTaskKey,omitempty"` + ActivationId string `xml:"activationId,omitempty"` +} + +func init() { + t["TaskInfo"] = reflect.TypeOf((*TaskInfo)(nil)).Elem() +} + +type TaskReason struct { + DynamicData +} + +func init() { + t["TaskReason"] = reflect.TypeOf((*TaskReason)(nil)).Elem() +} + +type TaskReasonAlarm struct { + TaskReason + + AlarmName string `xml:"alarmName"` + Alarm ManagedObjectReference `xml:"alarm"` + EntityName string `xml:"entityName"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["TaskReasonAlarm"] = reflect.TypeOf((*TaskReasonAlarm)(nil)).Elem() +} + +type TaskReasonSchedule struct { + TaskReason + + Name string `xml:"name"` + ScheduledTask ManagedObjectReference `xml:"scheduledTask"` +} + +func init() { + t["TaskReasonSchedule"] = reflect.TypeOf((*TaskReasonSchedule)(nil)).Elem() +} + +type TaskReasonSystem struct { + TaskReason +} + +func init() { + t["TaskReasonSystem"] = reflect.TypeOf((*TaskReasonSystem)(nil)).Elem() +} + +type TaskReasonUser struct { + TaskReason + + UserName string `xml:"userName"` +} + +func init() { + t["TaskReasonUser"] = reflect.TypeOf((*TaskReasonUser)(nil)).Elem() +} + +type TaskScheduler struct { + DynamicData + + ActiveTime *time.Time `xml:"activeTime"` + ExpireTime *time.Time `xml:"expireTime"` +} + +func init() { + t["TaskScheduler"] = reflect.TypeOf((*TaskScheduler)(nil)).Elem() +} + +type TaskTimeoutEvent struct { + TaskEvent +} + +func init() { + t["TaskTimeoutEvent"] = reflect.TypeOf((*TaskTimeoutEvent)(nil)).Elem() +} + +type TeamingMatchEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["TeamingMatchEvent"] = reflect.TypeOf((*TeamingMatchEvent)(nil)).Elem() +} + +type TeamingMisMatchEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["TeamingMisMatchEvent"] = reflect.TypeOf((*TeamingMisMatchEvent)(nil)).Elem() +} + +type TemplateBeingUpgradedEvent struct { + TemplateUpgradeEvent +} + +func init() { + t["TemplateBeingUpgradedEvent"] = reflect.TypeOf((*TemplateBeingUpgradedEvent)(nil)).Elem() +} + +type TemplateConfigFileInfo struct { + VmConfigFileInfo +} + +func init() { + t["TemplateConfigFileInfo"] = reflect.TypeOf((*TemplateConfigFileInfo)(nil)).Elem() +} + +type TemplateConfigFileQuery struct { + VmConfigFileQuery +} + +func init() { + t["TemplateConfigFileQuery"] = reflect.TypeOf((*TemplateConfigFileQuery)(nil)).Elem() +} + +type TemplateUpgradeEvent struct { + Event + + LegacyTemplate string `xml:"legacyTemplate"` +} + +func init() { + t["TemplateUpgradeEvent"] = reflect.TypeOf((*TemplateUpgradeEvent)(nil)).Elem() +} + +type TemplateUpgradeFailedEvent struct { + TemplateUpgradeEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["TemplateUpgradeFailedEvent"] = reflect.TypeOf((*TemplateUpgradeFailedEvent)(nil)).Elem() +} + +type TemplateUpgradedEvent struct { + TemplateUpgradeEvent +} + +func init() { + t["TemplateUpgradedEvent"] = reflect.TypeOf((*TemplateUpgradedEvent)(nil)).Elem() +} + +type TerminateFaultTolerantVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm *ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["TerminateFaultTolerantVMRequestType"] = reflect.TypeOf((*TerminateFaultTolerantVMRequestType)(nil)).Elem() +} + +type TerminateFaultTolerantVM_Task TerminateFaultTolerantVMRequestType + +func init() { + t["TerminateFaultTolerantVM_Task"] = reflect.TypeOf((*TerminateFaultTolerantVM_Task)(nil)).Elem() +} + +type TerminateFaultTolerantVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type TerminateProcessInGuest TerminateProcessInGuestRequestType + +func init() { + t["TerminateProcessInGuest"] = reflect.TypeOf((*TerminateProcessInGuest)(nil)).Elem() +} + +type TerminateProcessInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Pid int64 `xml:"pid"` +} + +func init() { + t["TerminateProcessInGuestRequestType"] = reflect.TypeOf((*TerminateProcessInGuestRequestType)(nil)).Elem() +} + +type TerminateProcessInGuestResponse struct { +} + +type TerminateSession TerminateSessionRequestType + +func init() { + t["TerminateSession"] = reflect.TypeOf((*TerminateSession)(nil)).Elem() +} + +type TerminateSessionRequestType struct { + This ManagedObjectReference `xml:"_this"` + SessionId []string `xml:"sessionId"` +} + +func init() { + t["TerminateSessionRequestType"] = reflect.TypeOf((*TerminateSessionRequestType)(nil)).Elem() +} + +type TerminateSessionResponse struct { +} + +type TerminateVM TerminateVMRequestType + +func init() { + t["TerminateVM"] = reflect.TypeOf((*TerminateVM)(nil)).Elem() +} + +type TerminateVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["TerminateVMRequestType"] = reflect.TypeOf((*TerminateVMRequestType)(nil)).Elem() +} + +type TerminateVMResponse struct { +} + +type ThirdPartyLicenseAssignmentFailed struct { + RuntimeFault + + Host ManagedObjectReference `xml:"host"` + Module string `xml:"module"` + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["ThirdPartyLicenseAssignmentFailed"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailed)(nil)).Elem() +} + +type ThirdPartyLicenseAssignmentFailedFault ThirdPartyLicenseAssignmentFailed + +func init() { + t["ThirdPartyLicenseAssignmentFailedFault"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailedFault)(nil)).Elem() +} + +type TicketedSessionAuthentication struct { + GuestAuthentication + + Ticket string `xml:"ticket"` +} + +func init() { + t["TicketedSessionAuthentication"] = reflect.TypeOf((*TicketedSessionAuthentication)(nil)).Elem() +} + +type TimedOutHostOperationEvent struct { + HostEvent +} + +func init() { + t["TimedOutHostOperationEvent"] = reflect.TypeOf((*TimedOutHostOperationEvent)(nil)).Elem() +} + +type Timedout struct { + VimFault +} + +func init() { + t["Timedout"] = reflect.TypeOf((*Timedout)(nil)).Elem() +} + +type TimedoutFault BaseTimedout + +func init() { + t["TimedoutFault"] = reflect.TypeOf((*TimedoutFault)(nil)).Elem() +} + +type TooManyConcurrentNativeClones struct { + FileFault +} + +func init() { + t["TooManyConcurrentNativeClones"] = reflect.TypeOf((*TooManyConcurrentNativeClones)(nil)).Elem() +} + +type TooManyConcurrentNativeClonesFault TooManyConcurrentNativeClones + +func init() { + t["TooManyConcurrentNativeClonesFault"] = reflect.TypeOf((*TooManyConcurrentNativeClonesFault)(nil)).Elem() +} + +type TooManyConsecutiveOverrides struct { + VimFault +} + +func init() { + t["TooManyConsecutiveOverrides"] = reflect.TypeOf((*TooManyConsecutiveOverrides)(nil)).Elem() +} + +type TooManyConsecutiveOverridesFault TooManyConsecutiveOverrides + +func init() { + t["TooManyConsecutiveOverridesFault"] = reflect.TypeOf((*TooManyConsecutiveOverridesFault)(nil)).Elem() +} + +type TooManyDevices struct { + InvalidVmConfig +} + +func init() { + t["TooManyDevices"] = reflect.TypeOf((*TooManyDevices)(nil)).Elem() +} + +type TooManyDevicesFault TooManyDevices + +func init() { + t["TooManyDevicesFault"] = reflect.TypeOf((*TooManyDevicesFault)(nil)).Elem() +} + +type TooManyDisksOnLegacyHost struct { + MigrationFault + + DiskCount int32 `xml:"diskCount"` + TimeoutDanger bool `xml:"timeoutDanger"` +} + +func init() { + t["TooManyDisksOnLegacyHost"] = reflect.TypeOf((*TooManyDisksOnLegacyHost)(nil)).Elem() +} + +type TooManyDisksOnLegacyHostFault TooManyDisksOnLegacyHost + +func init() { + t["TooManyDisksOnLegacyHostFault"] = reflect.TypeOf((*TooManyDisksOnLegacyHostFault)(nil)).Elem() +} + +type TooManyGuestLogons struct { + GuestOperationsFault +} + +func init() { + t["TooManyGuestLogons"] = reflect.TypeOf((*TooManyGuestLogons)(nil)).Elem() +} + +type TooManyGuestLogonsFault TooManyGuestLogons + +func init() { + t["TooManyGuestLogonsFault"] = reflect.TypeOf((*TooManyGuestLogonsFault)(nil)).Elem() +} + +type TooManyHosts struct { + HostConnectFault +} + +func init() { + t["TooManyHosts"] = reflect.TypeOf((*TooManyHosts)(nil)).Elem() +} + +type TooManyHostsFault TooManyHosts + +func init() { + t["TooManyHostsFault"] = reflect.TypeOf((*TooManyHostsFault)(nil)).Elem() +} + +type TooManyNativeCloneLevels struct { + FileFault +} + +func init() { + t["TooManyNativeCloneLevels"] = reflect.TypeOf((*TooManyNativeCloneLevels)(nil)).Elem() +} + +type TooManyNativeCloneLevelsFault TooManyNativeCloneLevels + +func init() { + t["TooManyNativeCloneLevelsFault"] = reflect.TypeOf((*TooManyNativeCloneLevelsFault)(nil)).Elem() +} + +type TooManyNativeClonesOnFile struct { + FileFault +} + +func init() { + t["TooManyNativeClonesOnFile"] = reflect.TypeOf((*TooManyNativeClonesOnFile)(nil)).Elem() +} + +type TooManyNativeClonesOnFileFault TooManyNativeClonesOnFile + +func init() { + t["TooManyNativeClonesOnFileFault"] = reflect.TypeOf((*TooManyNativeClonesOnFileFault)(nil)).Elem() +} + +type TooManySnapshotLevels struct { + SnapshotFault +} + +func init() { + t["TooManySnapshotLevels"] = reflect.TypeOf((*TooManySnapshotLevels)(nil)).Elem() +} + +type TooManySnapshotLevelsFault TooManySnapshotLevels + +func init() { + t["TooManySnapshotLevelsFault"] = reflect.TypeOf((*TooManySnapshotLevelsFault)(nil)).Elem() +} + +type ToolsAlreadyUpgraded struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsAlreadyUpgraded"] = reflect.TypeOf((*ToolsAlreadyUpgraded)(nil)).Elem() +} + +type ToolsAlreadyUpgradedFault ToolsAlreadyUpgraded + +func init() { + t["ToolsAlreadyUpgradedFault"] = reflect.TypeOf((*ToolsAlreadyUpgradedFault)(nil)).Elem() +} + +type ToolsAutoUpgradeNotSupported struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsAutoUpgradeNotSupported"] = reflect.TypeOf((*ToolsAutoUpgradeNotSupported)(nil)).Elem() +} + +type ToolsAutoUpgradeNotSupportedFault ToolsAutoUpgradeNotSupported + +func init() { + t["ToolsAutoUpgradeNotSupportedFault"] = reflect.TypeOf((*ToolsAutoUpgradeNotSupportedFault)(nil)).Elem() +} + +type ToolsConfigInfo struct { + DynamicData + + ToolsVersion int32 `xml:"toolsVersion,omitempty"` + ToolsInstallType string `xml:"toolsInstallType,omitempty"` + AfterPowerOn *bool `xml:"afterPowerOn"` + AfterResume *bool `xml:"afterResume"` + BeforeGuestStandby *bool `xml:"beforeGuestStandby"` + BeforeGuestShutdown *bool `xml:"beforeGuestShutdown"` + BeforeGuestReboot *bool `xml:"beforeGuestReboot"` + ToolsUpgradePolicy string `xml:"toolsUpgradePolicy,omitempty"` + PendingCustomization string `xml:"pendingCustomization,omitempty"` + CustomizationKeyId *CryptoKeyId `xml:"customizationKeyId,omitempty"` + SyncTimeWithHost *bool `xml:"syncTimeWithHost"` + LastInstallInfo *ToolsConfigInfoToolsLastInstallInfo `xml:"lastInstallInfo,omitempty"` +} + +func init() { + t["ToolsConfigInfo"] = reflect.TypeOf((*ToolsConfigInfo)(nil)).Elem() +} + +type ToolsConfigInfoToolsLastInstallInfo struct { + DynamicData + + Counter int32 `xml:"counter"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["ToolsConfigInfoToolsLastInstallInfo"] = reflect.TypeOf((*ToolsConfigInfoToolsLastInstallInfo)(nil)).Elem() +} + +type ToolsImageCopyFailed struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsImageCopyFailed"] = reflect.TypeOf((*ToolsImageCopyFailed)(nil)).Elem() +} + +type ToolsImageCopyFailedFault ToolsImageCopyFailed + +func init() { + t["ToolsImageCopyFailedFault"] = reflect.TypeOf((*ToolsImageCopyFailedFault)(nil)).Elem() +} + +type ToolsImageNotAvailable struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsImageNotAvailable"] = reflect.TypeOf((*ToolsImageNotAvailable)(nil)).Elem() +} + +type ToolsImageNotAvailableFault ToolsImageNotAvailable + +func init() { + t["ToolsImageNotAvailableFault"] = reflect.TypeOf((*ToolsImageNotAvailableFault)(nil)).Elem() +} + +type ToolsImageSignatureCheckFailed struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsImageSignatureCheckFailed"] = reflect.TypeOf((*ToolsImageSignatureCheckFailed)(nil)).Elem() +} + +type ToolsImageSignatureCheckFailedFault ToolsImageSignatureCheckFailed + +func init() { + t["ToolsImageSignatureCheckFailedFault"] = reflect.TypeOf((*ToolsImageSignatureCheckFailedFault)(nil)).Elem() +} + +type ToolsInstallationInProgress struct { + MigrationFault +} + +func init() { + t["ToolsInstallationInProgress"] = reflect.TypeOf((*ToolsInstallationInProgress)(nil)).Elem() +} + +type ToolsInstallationInProgressFault ToolsInstallationInProgress + +func init() { + t["ToolsInstallationInProgressFault"] = reflect.TypeOf((*ToolsInstallationInProgressFault)(nil)).Elem() +} + +type ToolsUnavailable struct { + VimFault +} + +func init() { + t["ToolsUnavailable"] = reflect.TypeOf((*ToolsUnavailable)(nil)).Elem() +} + +type ToolsUnavailableFault ToolsUnavailable + +func init() { + t["ToolsUnavailableFault"] = reflect.TypeOf((*ToolsUnavailableFault)(nil)).Elem() +} + +type ToolsUpgradeCancelled struct { + VmToolsUpgradeFault +} + +func init() { + t["ToolsUpgradeCancelled"] = reflect.TypeOf((*ToolsUpgradeCancelled)(nil)).Elem() +} + +type ToolsUpgradeCancelledFault ToolsUpgradeCancelled + +func init() { + t["ToolsUpgradeCancelledFault"] = reflect.TypeOf((*ToolsUpgradeCancelledFault)(nil)).Elem() +} + +type TraversalSpec struct { + SelectionSpec + + Type string `xml:"type"` + Path string `xml:"path"` + Skip *bool `xml:"skip"` + SelectSet []BaseSelectionSpec `xml:"selectSet,omitempty,typeattr"` +} + +func init() { + t["TraversalSpec"] = reflect.TypeOf((*TraversalSpec)(nil)).Elem() +} + +type TurnDiskLocatorLedOffRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuids []string `xml:"scsiDiskUuids"` +} + +func init() { + t["TurnDiskLocatorLedOffRequestType"] = reflect.TypeOf((*TurnDiskLocatorLedOffRequestType)(nil)).Elem() +} + +type TurnDiskLocatorLedOff_Task TurnDiskLocatorLedOffRequestType + +func init() { + t["TurnDiskLocatorLedOff_Task"] = reflect.TypeOf((*TurnDiskLocatorLedOff_Task)(nil)).Elem() +} + +type TurnDiskLocatorLedOff_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type TurnDiskLocatorLedOnRequestType struct { + This ManagedObjectReference `xml:"_this"` + ScsiDiskUuids []string `xml:"scsiDiskUuids"` +} + +func init() { + t["TurnDiskLocatorLedOnRequestType"] = reflect.TypeOf((*TurnDiskLocatorLedOnRequestType)(nil)).Elem() +} + +type TurnDiskLocatorLedOn_Task TurnDiskLocatorLedOnRequestType + +func init() { + t["TurnDiskLocatorLedOn_Task"] = reflect.TypeOf((*TurnDiskLocatorLedOn_Task)(nil)).Elem() +} + +type TurnDiskLocatorLedOn_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type TurnOffFaultToleranceForVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["TurnOffFaultToleranceForVMRequestType"] = reflect.TypeOf((*TurnOffFaultToleranceForVMRequestType)(nil)).Elem() +} + +type TurnOffFaultToleranceForVM_Task TurnOffFaultToleranceForVMRequestType + +func init() { + t["TurnOffFaultToleranceForVM_Task"] = reflect.TypeOf((*TurnOffFaultToleranceForVM_Task)(nil)).Elem() +} + +type TurnOffFaultToleranceForVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type TypeDescription struct { + Description + + Key string `xml:"key"` +} + +func init() { + t["TypeDescription"] = reflect.TypeOf((*TypeDescription)(nil)).Elem() +} + +type UnSupportedDatastoreForVFlash struct { + UnsupportedDatastore + + DatastoreName string `xml:"datastoreName"` + Type string `xml:"type"` +} + +func init() { + t["UnSupportedDatastoreForVFlash"] = reflect.TypeOf((*UnSupportedDatastoreForVFlash)(nil)).Elem() +} + +type UnSupportedDatastoreForVFlashFault UnSupportedDatastoreForVFlash + +func init() { + t["UnSupportedDatastoreForVFlashFault"] = reflect.TypeOf((*UnSupportedDatastoreForVFlashFault)(nil)).Elem() +} + +type UnassignUserFromGroup UnassignUserFromGroupRequestType + +func init() { + t["UnassignUserFromGroup"] = reflect.TypeOf((*UnassignUserFromGroup)(nil)).Elem() +} + +type UnassignUserFromGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + User string `xml:"user"` + Group string `xml:"group"` +} + +func init() { + t["UnassignUserFromGroupRequestType"] = reflect.TypeOf((*UnassignUserFromGroupRequestType)(nil)).Elem() +} + +type UnassignUserFromGroupResponse struct { +} + +type UnbindVnic UnbindVnicRequestType + +func init() { + t["UnbindVnic"] = reflect.TypeOf((*UnbindVnic)(nil)).Elem() +} + +type UnbindVnicRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaName string `xml:"iScsiHbaName"` + VnicDevice string `xml:"vnicDevice"` + Force bool `xml:"force"` +} + +func init() { + t["UnbindVnicRequestType"] = reflect.TypeOf((*UnbindVnicRequestType)(nil)).Elem() +} + +type UnbindVnicResponse struct { +} + +type UncommittedUndoableDisk struct { + MigrationFault +} + +func init() { + t["UncommittedUndoableDisk"] = reflect.TypeOf((*UncommittedUndoableDisk)(nil)).Elem() +} + +type UncommittedUndoableDiskFault UncommittedUndoableDisk + +func init() { + t["UncommittedUndoableDiskFault"] = reflect.TypeOf((*UncommittedUndoableDiskFault)(nil)).Elem() +} + +type UnconfiguredPropertyValue struct { + InvalidPropertyValue +} + +func init() { + t["UnconfiguredPropertyValue"] = reflect.TypeOf((*UnconfiguredPropertyValue)(nil)).Elem() +} + +type UnconfiguredPropertyValueFault UnconfiguredPropertyValue + +func init() { + t["UnconfiguredPropertyValueFault"] = reflect.TypeOf((*UnconfiguredPropertyValueFault)(nil)).Elem() +} + +type UncustomizableGuest struct { + CustomizationFault + + UncustomizableGuestOS string `xml:"uncustomizableGuestOS"` +} + +func init() { + t["UncustomizableGuest"] = reflect.TypeOf((*UncustomizableGuest)(nil)).Elem() +} + +type UncustomizableGuestFault UncustomizableGuest + +func init() { + t["UncustomizableGuestFault"] = reflect.TypeOf((*UncustomizableGuestFault)(nil)).Elem() +} + +type UnexpectedCustomizationFault struct { + CustomizationFault +} + +func init() { + t["UnexpectedCustomizationFault"] = reflect.TypeOf((*UnexpectedCustomizationFault)(nil)).Elem() +} + +type UnexpectedCustomizationFaultFault UnexpectedCustomizationFault + +func init() { + t["UnexpectedCustomizationFaultFault"] = reflect.TypeOf((*UnexpectedCustomizationFaultFault)(nil)).Elem() +} + +type UnexpectedFault struct { + RuntimeFault + + FaultName string `xml:"faultName"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["UnexpectedFault"] = reflect.TypeOf((*UnexpectedFault)(nil)).Elem() +} + +type UnexpectedFaultFault UnexpectedFault + +func init() { + t["UnexpectedFaultFault"] = reflect.TypeOf((*UnexpectedFaultFault)(nil)).Elem() +} + +type UninstallHostPatchRequestType struct { + This ManagedObjectReference `xml:"_this"` + BulletinIds []string `xml:"bulletinIds,omitempty"` + Spec *HostPatchManagerPatchManagerOperationSpec `xml:"spec,omitempty"` +} + +func init() { + t["UninstallHostPatchRequestType"] = reflect.TypeOf((*UninstallHostPatchRequestType)(nil)).Elem() +} + +type UninstallHostPatch_Task UninstallHostPatchRequestType + +func init() { + t["UninstallHostPatch_Task"] = reflect.TypeOf((*UninstallHostPatch_Task)(nil)).Elem() +} + +type UninstallHostPatch_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UninstallIoFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + CompRes ManagedObjectReference `xml:"compRes"` +} + +func init() { + t["UninstallIoFilterRequestType"] = reflect.TypeOf((*UninstallIoFilterRequestType)(nil)).Elem() +} + +type UninstallIoFilter_Task UninstallIoFilterRequestType + +func init() { + t["UninstallIoFilter_Task"] = reflect.TypeOf((*UninstallIoFilter_Task)(nil)).Elem() +} + +type UninstallIoFilter_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UninstallService UninstallServiceRequestType + +func init() { + t["UninstallService"] = reflect.TypeOf((*UninstallService)(nil)).Elem() +} + +type UninstallServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` +} + +func init() { + t["UninstallServiceRequestType"] = reflect.TypeOf((*UninstallServiceRequestType)(nil)).Elem() +} + +type UninstallServiceResponse struct { +} + +type UnlicensedVirtualMachinesEvent struct { + LicenseEvent + + Unlicensed int32 `xml:"unlicensed"` + Available int32 `xml:"available"` +} + +func init() { + t["UnlicensedVirtualMachinesEvent"] = reflect.TypeOf((*UnlicensedVirtualMachinesEvent)(nil)).Elem() +} + +type UnlicensedVirtualMachinesFoundEvent struct { + LicenseEvent + + Available int32 `xml:"available"` +} + +func init() { + t["UnlicensedVirtualMachinesFoundEvent"] = reflect.TypeOf((*UnlicensedVirtualMachinesFoundEvent)(nil)).Elem() +} + +type UnmapVmfsVolumeExRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid []string `xml:"vmfsUuid"` +} + +func init() { + t["UnmapVmfsVolumeExRequestType"] = reflect.TypeOf((*UnmapVmfsVolumeExRequestType)(nil)).Elem() +} + +type UnmapVmfsVolumeEx_Task UnmapVmfsVolumeExRequestType + +func init() { + t["UnmapVmfsVolumeEx_Task"] = reflect.TypeOf((*UnmapVmfsVolumeEx_Task)(nil)).Elem() +} + +type UnmapVmfsVolumeEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UnmountDiskMappingRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mapping []VsanHostDiskMapping `xml:"mapping"` +} + +func init() { + t["UnmountDiskMappingRequestType"] = reflect.TypeOf((*UnmountDiskMappingRequestType)(nil)).Elem() +} + +type UnmountDiskMapping_Task UnmountDiskMappingRequestType + +func init() { + t["UnmountDiskMapping_Task"] = reflect.TypeOf((*UnmountDiskMapping_Task)(nil)).Elem() +} + +type UnmountDiskMapping_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UnmountForceMountedVmfsVolume UnmountForceMountedVmfsVolumeRequestType + +func init() { + t["UnmountForceMountedVmfsVolume"] = reflect.TypeOf((*UnmountForceMountedVmfsVolume)(nil)).Elem() +} + +type UnmountForceMountedVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` +} + +func init() { + t["UnmountForceMountedVmfsVolumeRequestType"] = reflect.TypeOf((*UnmountForceMountedVmfsVolumeRequestType)(nil)).Elem() +} + +type UnmountForceMountedVmfsVolumeResponse struct { +} + +type UnmountToolsInstaller UnmountToolsInstallerRequestType + +func init() { + t["UnmountToolsInstaller"] = reflect.TypeOf((*UnmountToolsInstaller)(nil)).Elem() +} + +type UnmountToolsInstallerRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["UnmountToolsInstallerRequestType"] = reflect.TypeOf((*UnmountToolsInstallerRequestType)(nil)).Elem() +} + +type UnmountToolsInstallerResponse struct { +} + +type UnmountVffsVolume UnmountVffsVolumeRequestType + +func init() { + t["UnmountVffsVolume"] = reflect.TypeOf((*UnmountVffsVolume)(nil)).Elem() +} + +type UnmountVffsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VffsUuid string `xml:"vffsUuid"` +} + +func init() { + t["UnmountVffsVolumeRequestType"] = reflect.TypeOf((*UnmountVffsVolumeRequestType)(nil)).Elem() +} + +type UnmountVffsVolumeResponse struct { +} + +type UnmountVmfsVolume UnmountVmfsVolumeRequestType + +func init() { + t["UnmountVmfsVolume"] = reflect.TypeOf((*UnmountVmfsVolume)(nil)).Elem() +} + +type UnmountVmfsVolumeExRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid []string `xml:"vmfsUuid"` +} + +func init() { + t["UnmountVmfsVolumeExRequestType"] = reflect.TypeOf((*UnmountVmfsVolumeExRequestType)(nil)).Elem() +} + +type UnmountVmfsVolumeEx_Task UnmountVmfsVolumeExRequestType + +func init() { + t["UnmountVmfsVolumeEx_Task"] = reflect.TypeOf((*UnmountVmfsVolumeEx_Task)(nil)).Elem() +} + +type UnmountVmfsVolumeEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UnmountVmfsVolumeRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` +} + +func init() { + t["UnmountVmfsVolumeRequestType"] = reflect.TypeOf((*UnmountVmfsVolumeRequestType)(nil)).Elem() +} + +type UnmountVmfsVolumeResponse struct { +} + +type UnrecognizedHost struct { + VimFault + + HostName string `xml:"hostName"` +} + +func init() { + t["UnrecognizedHost"] = reflect.TypeOf((*UnrecognizedHost)(nil)).Elem() +} + +type UnrecognizedHostFault UnrecognizedHost + +func init() { + t["UnrecognizedHostFault"] = reflect.TypeOf((*UnrecognizedHostFault)(nil)).Elem() +} + +type UnregisterAndDestroyRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["UnregisterAndDestroyRequestType"] = reflect.TypeOf((*UnregisterAndDestroyRequestType)(nil)).Elem() +} + +type UnregisterAndDestroy_Task UnregisterAndDestroyRequestType + +func init() { + t["UnregisterAndDestroy_Task"] = reflect.TypeOf((*UnregisterAndDestroy_Task)(nil)).Elem() +} + +type UnregisterAndDestroy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UnregisterExtension UnregisterExtensionRequestType + +func init() { + t["UnregisterExtension"] = reflect.TypeOf((*UnregisterExtension)(nil)).Elem() +} + +type UnregisterExtensionRequestType struct { + This ManagedObjectReference `xml:"_this"` + ExtensionKey string `xml:"extensionKey"` +} + +func init() { + t["UnregisterExtensionRequestType"] = reflect.TypeOf((*UnregisterExtensionRequestType)(nil)).Elem() +} + +type UnregisterExtensionResponse struct { +} + +type UnregisterHealthUpdateProvider UnregisterHealthUpdateProviderRequestType + +func init() { + t["UnregisterHealthUpdateProvider"] = reflect.TypeOf((*UnregisterHealthUpdateProvider)(nil)).Elem() +} + +type UnregisterHealthUpdateProviderRequestType struct { + This ManagedObjectReference `xml:"_this"` + ProviderId string `xml:"providerId"` +} + +func init() { + t["UnregisterHealthUpdateProviderRequestType"] = reflect.TypeOf((*UnregisterHealthUpdateProviderRequestType)(nil)).Elem() +} + +type UnregisterHealthUpdateProviderResponse struct { +} + +type UnregisterVM UnregisterVMRequestType + +func init() { + t["UnregisterVM"] = reflect.TypeOf((*UnregisterVM)(nil)).Elem() +} + +type UnregisterVMRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["UnregisterVMRequestType"] = reflect.TypeOf((*UnregisterVMRequestType)(nil)).Elem() +} + +type UnregisterVMResponse struct { +} + +type UnsharedSwapVMotionNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["UnsharedSwapVMotionNotSupported"] = reflect.TypeOf((*UnsharedSwapVMotionNotSupported)(nil)).Elem() +} + +type UnsharedSwapVMotionNotSupportedFault UnsharedSwapVMotionNotSupported + +func init() { + t["UnsharedSwapVMotionNotSupportedFault"] = reflect.TypeOf((*UnsharedSwapVMotionNotSupportedFault)(nil)).Elem() +} + +type UnsupportedDatastore struct { + VmConfigFault + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["UnsupportedDatastore"] = reflect.TypeOf((*UnsupportedDatastore)(nil)).Elem() +} + +type UnsupportedDatastoreFault BaseUnsupportedDatastore + +func init() { + t["UnsupportedDatastoreFault"] = reflect.TypeOf((*UnsupportedDatastoreFault)(nil)).Elem() +} + +type UnsupportedGuest struct { + InvalidVmConfig + + UnsupportedGuestOS string `xml:"unsupportedGuestOS"` +} + +func init() { + t["UnsupportedGuest"] = reflect.TypeOf((*UnsupportedGuest)(nil)).Elem() +} + +type UnsupportedGuestFault UnsupportedGuest + +func init() { + t["UnsupportedGuestFault"] = reflect.TypeOf((*UnsupportedGuestFault)(nil)).Elem() +} + +type UnsupportedVimApiVersion struct { + VimFault + + Version string `xml:"version,omitempty"` +} + +func init() { + t["UnsupportedVimApiVersion"] = reflect.TypeOf((*UnsupportedVimApiVersion)(nil)).Elem() +} + +type UnsupportedVimApiVersionFault UnsupportedVimApiVersion + +func init() { + t["UnsupportedVimApiVersionFault"] = reflect.TypeOf((*UnsupportedVimApiVersionFault)(nil)).Elem() +} + +type UnsupportedVmxLocation struct { + VmConfigFault +} + +func init() { + t["UnsupportedVmxLocation"] = reflect.TypeOf((*UnsupportedVmxLocation)(nil)).Elem() +} + +type UnsupportedVmxLocationFault UnsupportedVmxLocation + +func init() { + t["UnsupportedVmxLocationFault"] = reflect.TypeOf((*UnsupportedVmxLocationFault)(nil)).Elem() +} + +type UnusedVirtualDiskBlocksNotScrubbed struct { + DeviceBackingNotSupported +} + +func init() { + t["UnusedVirtualDiskBlocksNotScrubbed"] = reflect.TypeOf((*UnusedVirtualDiskBlocksNotScrubbed)(nil)).Elem() +} + +type UnusedVirtualDiskBlocksNotScrubbedFault UnusedVirtualDiskBlocksNotScrubbed + +func init() { + t["UnusedVirtualDiskBlocksNotScrubbedFault"] = reflect.TypeOf((*UnusedVirtualDiskBlocksNotScrubbedFault)(nil)).Elem() +} + +type UpdateAnswerFileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + ConfigSpec BaseAnswerFileCreateSpec `xml:"configSpec,typeattr"` +} + +func init() { + t["UpdateAnswerFileRequestType"] = reflect.TypeOf((*UpdateAnswerFileRequestType)(nil)).Elem() +} + +type UpdateAnswerFile_Task UpdateAnswerFileRequestType + +func init() { + t["UpdateAnswerFile_Task"] = reflect.TypeOf((*UpdateAnswerFile_Task)(nil)).Elem() +} + +type UpdateAnswerFile_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateAssignedLicense UpdateAssignedLicenseRequestType + +func init() { + t["UpdateAssignedLicense"] = reflect.TypeOf((*UpdateAssignedLicense)(nil)).Elem() +} + +type UpdateAssignedLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity string `xml:"entity"` + LicenseKey string `xml:"licenseKey"` + EntityDisplayName string `xml:"entityDisplayName,omitempty"` +} + +func init() { + t["UpdateAssignedLicenseRequestType"] = reflect.TypeOf((*UpdateAssignedLicenseRequestType)(nil)).Elem() +} + +type UpdateAssignedLicenseResponse struct { + Returnval LicenseManagerLicenseInfo `xml:"returnval"` +} + +type UpdateAuthorizationRole UpdateAuthorizationRoleRequestType + +func init() { + t["UpdateAuthorizationRole"] = reflect.TypeOf((*UpdateAuthorizationRole)(nil)).Elem() +} + +type UpdateAuthorizationRoleRequestType struct { + This ManagedObjectReference `xml:"_this"` + RoleId int32 `xml:"roleId"` + NewName string `xml:"newName"` + PrivIds []string `xml:"privIds,omitempty"` +} + +func init() { + t["UpdateAuthorizationRoleRequestType"] = reflect.TypeOf((*UpdateAuthorizationRoleRequestType)(nil)).Elem() +} + +type UpdateAuthorizationRoleResponse struct { +} + +type UpdateBootDevice UpdateBootDeviceRequestType + +func init() { + t["UpdateBootDevice"] = reflect.TypeOf((*UpdateBootDevice)(nil)).Elem() +} + +type UpdateBootDeviceRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key string `xml:"key"` +} + +func init() { + t["UpdateBootDeviceRequestType"] = reflect.TypeOf((*UpdateBootDeviceRequestType)(nil)).Elem() +} + +type UpdateBootDeviceResponse struct { +} + +type UpdateChildResourceConfiguration UpdateChildResourceConfigurationRequestType + +func init() { + t["UpdateChildResourceConfiguration"] = reflect.TypeOf((*UpdateChildResourceConfiguration)(nil)).Elem() +} + +type UpdateChildResourceConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec []ResourceConfigSpec `xml:"spec"` +} + +func init() { + t["UpdateChildResourceConfigurationRequestType"] = reflect.TypeOf((*UpdateChildResourceConfigurationRequestType)(nil)).Elem() +} + +type UpdateChildResourceConfigurationResponse struct { +} + +type UpdateClusterProfile UpdateClusterProfileRequestType + +func init() { + t["UpdateClusterProfile"] = reflect.TypeOf((*UpdateClusterProfile)(nil)).Elem() +} + +type UpdateClusterProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseClusterProfileConfigSpec `xml:"config,typeattr"` +} + +func init() { + t["UpdateClusterProfileRequestType"] = reflect.TypeOf((*UpdateClusterProfileRequestType)(nil)).Elem() +} + +type UpdateClusterProfileResponse struct { +} + +type UpdateConfig UpdateConfigRequestType + +func init() { + t["UpdateConfig"] = reflect.TypeOf((*UpdateConfig)(nil)).Elem() +} + +type UpdateConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name,omitempty"` + Config *ResourceConfigSpec `xml:"config,omitempty"` +} + +func init() { + t["UpdateConfigRequestType"] = reflect.TypeOf((*UpdateConfigRequestType)(nil)).Elem() +} + +type UpdateConfigResponse struct { +} + +type UpdateConsoleIpRouteConfig UpdateConsoleIpRouteConfigRequestType + +func init() { + t["UpdateConsoleIpRouteConfig"] = reflect.TypeOf((*UpdateConsoleIpRouteConfig)(nil)).Elem() +} + +type UpdateConsoleIpRouteConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseHostIpRouteConfig `xml:"config,typeattr"` +} + +func init() { + t["UpdateConsoleIpRouteConfigRequestType"] = reflect.TypeOf((*UpdateConsoleIpRouteConfigRequestType)(nil)).Elem() +} + +type UpdateConsoleIpRouteConfigResponse struct { +} + +type UpdateCounterLevelMapping UpdateCounterLevelMappingRequestType + +func init() { + t["UpdateCounterLevelMapping"] = reflect.TypeOf((*UpdateCounterLevelMapping)(nil)).Elem() +} + +type UpdateCounterLevelMappingRequestType struct { + This ManagedObjectReference `xml:"_this"` + CounterLevelMap []PerformanceManagerCounterLevelMapping `xml:"counterLevelMap"` +} + +func init() { + t["UpdateCounterLevelMappingRequestType"] = reflect.TypeOf((*UpdateCounterLevelMappingRequestType)(nil)).Elem() +} + +type UpdateCounterLevelMappingResponse struct { +} + +type UpdateDVSHealthCheckConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + HealthCheckConfig []BaseDVSHealthCheckConfig `xml:"healthCheckConfig,typeattr"` +} + +func init() { + t["UpdateDVSHealthCheckConfigRequestType"] = reflect.TypeOf((*UpdateDVSHealthCheckConfigRequestType)(nil)).Elem() +} + +type UpdateDVSHealthCheckConfig_Task UpdateDVSHealthCheckConfigRequestType + +func init() { + t["UpdateDVSHealthCheckConfig_Task"] = reflect.TypeOf((*UpdateDVSHealthCheckConfig_Task)(nil)).Elem() +} + +type UpdateDVSHealthCheckConfig_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateDVSLacpGroupConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + LacpGroupSpec []VMwareDvsLacpGroupSpec `xml:"lacpGroupSpec"` +} + +func init() { + t["UpdateDVSLacpGroupConfigRequestType"] = reflect.TypeOf((*UpdateDVSLacpGroupConfigRequestType)(nil)).Elem() +} + +type UpdateDVSLacpGroupConfig_Task UpdateDVSLacpGroupConfigRequestType + +func init() { + t["UpdateDVSLacpGroupConfig_Task"] = reflect.TypeOf((*UpdateDVSLacpGroupConfig_Task)(nil)).Elem() +} + +type UpdateDVSLacpGroupConfig_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateDateTime UpdateDateTimeRequestType + +func init() { + t["UpdateDateTime"] = reflect.TypeOf((*UpdateDateTime)(nil)).Elem() +} + +type UpdateDateTimeConfig UpdateDateTimeConfigRequestType + +func init() { + t["UpdateDateTimeConfig"] = reflect.TypeOf((*UpdateDateTimeConfig)(nil)).Elem() +} + +type UpdateDateTimeConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config HostDateTimeConfig `xml:"config"` +} + +func init() { + t["UpdateDateTimeConfigRequestType"] = reflect.TypeOf((*UpdateDateTimeConfigRequestType)(nil)).Elem() +} + +type UpdateDateTimeConfigResponse struct { +} + +type UpdateDateTimeRequestType struct { + This ManagedObjectReference `xml:"_this"` + DateTime time.Time `xml:"dateTime"` +} + +func init() { + t["UpdateDateTimeRequestType"] = reflect.TypeOf((*UpdateDateTimeRequestType)(nil)).Elem() +} + +type UpdateDateTimeResponse struct { +} + +type UpdateDefaultPolicy UpdateDefaultPolicyRequestType + +func init() { + t["UpdateDefaultPolicy"] = reflect.TypeOf((*UpdateDefaultPolicy)(nil)).Elem() +} + +type UpdateDefaultPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + DefaultPolicy HostFirewallDefaultPolicy `xml:"defaultPolicy"` +} + +func init() { + t["UpdateDefaultPolicyRequestType"] = reflect.TypeOf((*UpdateDefaultPolicyRequestType)(nil)).Elem() +} + +type UpdateDefaultPolicyResponse struct { +} + +type UpdateDiskPartitions UpdateDiskPartitionsRequestType + +func init() { + t["UpdateDiskPartitions"] = reflect.TypeOf((*UpdateDiskPartitions)(nil)).Elem() +} + +type UpdateDiskPartitionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + DevicePath string `xml:"devicePath"` + Spec HostDiskPartitionSpec `xml:"spec"` +} + +func init() { + t["UpdateDiskPartitionsRequestType"] = reflect.TypeOf((*UpdateDiskPartitionsRequestType)(nil)).Elem() +} + +type UpdateDiskPartitionsResponse struct { +} + +type UpdateDnsConfig UpdateDnsConfigRequestType + +func init() { + t["UpdateDnsConfig"] = reflect.TypeOf((*UpdateDnsConfig)(nil)).Elem() +} + +type UpdateDnsConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseHostDnsConfig `xml:"config,typeattr"` +} + +func init() { + t["UpdateDnsConfigRequestType"] = reflect.TypeOf((*UpdateDnsConfigRequestType)(nil)).Elem() +} + +type UpdateDnsConfigResponse struct { +} + +type UpdateDvsCapability UpdateDvsCapabilityRequestType + +func init() { + t["UpdateDvsCapability"] = reflect.TypeOf((*UpdateDvsCapability)(nil)).Elem() +} + +type UpdateDvsCapabilityRequestType struct { + This ManagedObjectReference `xml:"_this"` + Capability DVSCapability `xml:"capability"` +} + +func init() { + t["UpdateDvsCapabilityRequestType"] = reflect.TypeOf((*UpdateDvsCapabilityRequestType)(nil)).Elem() +} + +type UpdateDvsCapabilityResponse struct { +} + +type UpdateExtension UpdateExtensionRequestType + +func init() { + t["UpdateExtension"] = reflect.TypeOf((*UpdateExtension)(nil)).Elem() +} + +type UpdateExtensionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Extension Extension `xml:"extension"` +} + +func init() { + t["UpdateExtensionRequestType"] = reflect.TypeOf((*UpdateExtensionRequestType)(nil)).Elem() +} + +type UpdateExtensionResponse struct { +} + +type UpdateFlags UpdateFlagsRequestType + +func init() { + t["UpdateFlags"] = reflect.TypeOf((*UpdateFlags)(nil)).Elem() +} + +type UpdateFlagsRequestType struct { + This ManagedObjectReference `xml:"_this"` + FlagInfo HostFlagInfo `xml:"flagInfo"` +} + +func init() { + t["UpdateFlagsRequestType"] = reflect.TypeOf((*UpdateFlagsRequestType)(nil)).Elem() +} + +type UpdateFlagsResponse struct { +} + +type UpdateGraphicsConfig UpdateGraphicsConfigRequestType + +func init() { + t["UpdateGraphicsConfig"] = reflect.TypeOf((*UpdateGraphicsConfig)(nil)).Elem() +} + +type UpdateGraphicsConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config HostGraphicsConfig `xml:"config"` +} + +func init() { + t["UpdateGraphicsConfigRequestType"] = reflect.TypeOf((*UpdateGraphicsConfigRequestType)(nil)).Elem() +} + +type UpdateGraphicsConfigResponse struct { +} + +type UpdateHostCustomizationsRequestType struct { + This ManagedObjectReference `xml:"_this"` + HostToConfigSpecMap []HostProfileManagerHostToConfigSpecMap `xml:"hostToConfigSpecMap,omitempty"` +} + +func init() { + t["UpdateHostCustomizationsRequestType"] = reflect.TypeOf((*UpdateHostCustomizationsRequestType)(nil)).Elem() +} + +type UpdateHostCustomizations_Task UpdateHostCustomizationsRequestType + +func init() { + t["UpdateHostCustomizations_Task"] = reflect.TypeOf((*UpdateHostCustomizations_Task)(nil)).Elem() +} + +type UpdateHostCustomizations_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateHostImageAcceptanceLevel UpdateHostImageAcceptanceLevelRequestType + +func init() { + t["UpdateHostImageAcceptanceLevel"] = reflect.TypeOf((*UpdateHostImageAcceptanceLevel)(nil)).Elem() +} + +type UpdateHostImageAcceptanceLevelRequestType struct { + This ManagedObjectReference `xml:"_this"` + NewAcceptanceLevel string `xml:"newAcceptanceLevel"` +} + +func init() { + t["UpdateHostImageAcceptanceLevelRequestType"] = reflect.TypeOf((*UpdateHostImageAcceptanceLevelRequestType)(nil)).Elem() +} + +type UpdateHostImageAcceptanceLevelResponse struct { +} + +type UpdateHostProfile UpdateHostProfileRequestType + +func init() { + t["UpdateHostProfile"] = reflect.TypeOf((*UpdateHostProfile)(nil)).Elem() +} + +type UpdateHostProfileRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseHostProfileConfigSpec `xml:"config,typeattr"` +} + +func init() { + t["UpdateHostProfileRequestType"] = reflect.TypeOf((*UpdateHostProfileRequestType)(nil)).Elem() +} + +type UpdateHostProfileResponse struct { +} + +type UpdateHostSpecification UpdateHostSpecificationRequestType + +func init() { + t["UpdateHostSpecification"] = reflect.TypeOf((*UpdateHostSpecification)(nil)).Elem() +} + +type UpdateHostSpecificationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + HostSpec HostSpecification `xml:"hostSpec"` +} + +func init() { + t["UpdateHostSpecificationRequestType"] = reflect.TypeOf((*UpdateHostSpecificationRequestType)(nil)).Elem() +} + +type UpdateHostSpecificationResponse struct { +} + +type UpdateHostSubSpecification UpdateHostSubSpecificationRequestType + +func init() { + t["UpdateHostSubSpecification"] = reflect.TypeOf((*UpdateHostSubSpecification)(nil)).Elem() +} + +type UpdateHostSubSpecificationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host ManagedObjectReference `xml:"host"` + HostSubSpec HostSubSpecification `xml:"hostSubSpec"` +} + +func init() { + t["UpdateHostSubSpecificationRequestType"] = reflect.TypeOf((*UpdateHostSubSpecificationRequestType)(nil)).Elem() +} + +type UpdateHostSubSpecificationResponse struct { +} + +type UpdateInternetScsiAdvancedOptions UpdateInternetScsiAdvancedOptionsRequestType + +func init() { + t["UpdateInternetScsiAdvancedOptions"] = reflect.TypeOf((*UpdateInternetScsiAdvancedOptions)(nil)).Elem() +} + +type UpdateInternetScsiAdvancedOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + TargetSet *HostInternetScsiHbaTargetSet `xml:"targetSet,omitempty"` + Options []HostInternetScsiHbaParamValue `xml:"options"` +} + +func init() { + t["UpdateInternetScsiAdvancedOptionsRequestType"] = reflect.TypeOf((*UpdateInternetScsiAdvancedOptionsRequestType)(nil)).Elem() +} + +type UpdateInternetScsiAdvancedOptionsResponse struct { +} + +type UpdateInternetScsiAlias UpdateInternetScsiAliasRequestType + +func init() { + t["UpdateInternetScsiAlias"] = reflect.TypeOf((*UpdateInternetScsiAlias)(nil)).Elem() +} + +type UpdateInternetScsiAliasRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + IScsiAlias string `xml:"iScsiAlias"` +} + +func init() { + t["UpdateInternetScsiAliasRequestType"] = reflect.TypeOf((*UpdateInternetScsiAliasRequestType)(nil)).Elem() +} + +type UpdateInternetScsiAliasResponse struct { +} + +type UpdateInternetScsiAuthenticationProperties UpdateInternetScsiAuthenticationPropertiesRequestType + +func init() { + t["UpdateInternetScsiAuthenticationProperties"] = reflect.TypeOf((*UpdateInternetScsiAuthenticationProperties)(nil)).Elem() +} + +type UpdateInternetScsiAuthenticationPropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + AuthenticationProperties HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties"` + TargetSet *HostInternetScsiHbaTargetSet `xml:"targetSet,omitempty"` +} + +func init() { + t["UpdateInternetScsiAuthenticationPropertiesRequestType"] = reflect.TypeOf((*UpdateInternetScsiAuthenticationPropertiesRequestType)(nil)).Elem() +} + +type UpdateInternetScsiAuthenticationPropertiesResponse struct { +} + +type UpdateInternetScsiDigestProperties UpdateInternetScsiDigestPropertiesRequestType + +func init() { + t["UpdateInternetScsiDigestProperties"] = reflect.TypeOf((*UpdateInternetScsiDigestProperties)(nil)).Elem() +} + +type UpdateInternetScsiDigestPropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + TargetSet *HostInternetScsiHbaTargetSet `xml:"targetSet,omitempty"` + DigestProperties HostInternetScsiHbaDigestProperties `xml:"digestProperties"` +} + +func init() { + t["UpdateInternetScsiDigestPropertiesRequestType"] = reflect.TypeOf((*UpdateInternetScsiDigestPropertiesRequestType)(nil)).Elem() +} + +type UpdateInternetScsiDigestPropertiesResponse struct { +} + +type UpdateInternetScsiDiscoveryProperties UpdateInternetScsiDiscoveryPropertiesRequestType + +func init() { + t["UpdateInternetScsiDiscoveryProperties"] = reflect.TypeOf((*UpdateInternetScsiDiscoveryProperties)(nil)).Elem() +} + +type UpdateInternetScsiDiscoveryPropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + DiscoveryProperties HostInternetScsiHbaDiscoveryProperties `xml:"discoveryProperties"` +} + +func init() { + t["UpdateInternetScsiDiscoveryPropertiesRequestType"] = reflect.TypeOf((*UpdateInternetScsiDiscoveryPropertiesRequestType)(nil)).Elem() +} + +type UpdateInternetScsiDiscoveryPropertiesResponse struct { +} + +type UpdateInternetScsiIPProperties UpdateInternetScsiIPPropertiesRequestType + +func init() { + t["UpdateInternetScsiIPProperties"] = reflect.TypeOf((*UpdateInternetScsiIPProperties)(nil)).Elem() +} + +type UpdateInternetScsiIPPropertiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + IpProperties HostInternetScsiHbaIPProperties `xml:"ipProperties"` +} + +func init() { + t["UpdateInternetScsiIPPropertiesRequestType"] = reflect.TypeOf((*UpdateInternetScsiIPPropertiesRequestType)(nil)).Elem() +} + +type UpdateInternetScsiIPPropertiesResponse struct { +} + +type UpdateInternetScsiName UpdateInternetScsiNameRequestType + +func init() { + t["UpdateInternetScsiName"] = reflect.TypeOf((*UpdateInternetScsiName)(nil)).Elem() +} + +type UpdateInternetScsiNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + IScsiHbaDevice string `xml:"iScsiHbaDevice"` + IScsiName string `xml:"iScsiName"` +} + +func init() { + t["UpdateInternetScsiNameRequestType"] = reflect.TypeOf((*UpdateInternetScsiNameRequestType)(nil)).Elem() +} + +type UpdateInternetScsiNameResponse struct { +} + +type UpdateIpConfig UpdateIpConfigRequestType + +func init() { + t["UpdateIpConfig"] = reflect.TypeOf((*UpdateIpConfig)(nil)).Elem() +} + +type UpdateIpConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + IpConfig HostIpConfig `xml:"ipConfig"` +} + +func init() { + t["UpdateIpConfigRequestType"] = reflect.TypeOf((*UpdateIpConfigRequestType)(nil)).Elem() +} + +type UpdateIpConfigResponse struct { +} + +type UpdateIpPool UpdateIpPoolRequestType + +func init() { + t["UpdateIpPool"] = reflect.TypeOf((*UpdateIpPool)(nil)).Elem() +} + +type UpdateIpPoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + Dc ManagedObjectReference `xml:"dc"` + Pool IpPool `xml:"pool"` +} + +func init() { + t["UpdateIpPoolRequestType"] = reflect.TypeOf((*UpdateIpPoolRequestType)(nil)).Elem() +} + +type UpdateIpPoolResponse struct { +} + +type UpdateIpRouteConfig UpdateIpRouteConfigRequestType + +func init() { + t["UpdateIpRouteConfig"] = reflect.TypeOf((*UpdateIpRouteConfig)(nil)).Elem() +} + +type UpdateIpRouteConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config BaseHostIpRouteConfig `xml:"config,typeattr"` +} + +func init() { + t["UpdateIpRouteConfigRequestType"] = reflect.TypeOf((*UpdateIpRouteConfigRequestType)(nil)).Elem() +} + +type UpdateIpRouteConfigResponse struct { +} + +type UpdateIpRouteTableConfig UpdateIpRouteTableConfigRequestType + +func init() { + t["UpdateIpRouteTableConfig"] = reflect.TypeOf((*UpdateIpRouteTableConfig)(nil)).Elem() +} + +type UpdateIpRouteTableConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config HostIpRouteTableConfig `xml:"config"` +} + +func init() { + t["UpdateIpRouteTableConfigRequestType"] = reflect.TypeOf((*UpdateIpRouteTableConfigRequestType)(nil)).Elem() +} + +type UpdateIpRouteTableConfigResponse struct { +} + +type UpdateIpmi UpdateIpmiRequestType + +func init() { + t["UpdateIpmi"] = reflect.TypeOf((*UpdateIpmi)(nil)).Elem() +} + +type UpdateIpmiRequestType struct { + This ManagedObjectReference `xml:"_this"` + IpmiInfo HostIpmiInfo `xml:"ipmiInfo"` +} + +func init() { + t["UpdateIpmiRequestType"] = reflect.TypeOf((*UpdateIpmiRequestType)(nil)).Elem() +} + +type UpdateIpmiResponse struct { +} + +type UpdateKmipServer UpdateKmipServerRequestType + +func init() { + t["UpdateKmipServer"] = reflect.TypeOf((*UpdateKmipServer)(nil)).Elem() +} + +type UpdateKmipServerRequestType struct { + This ManagedObjectReference `xml:"_this"` + Server KmipServerSpec `xml:"server"` +} + +func init() { + t["UpdateKmipServerRequestType"] = reflect.TypeOf((*UpdateKmipServerRequestType)(nil)).Elem() +} + +type UpdateKmipServerResponse struct { +} + +type UpdateKmsSignedCsrClientCert UpdateKmsSignedCsrClientCertRequestType + +func init() { + t["UpdateKmsSignedCsrClientCert"] = reflect.TypeOf((*UpdateKmsSignedCsrClientCert)(nil)).Elem() +} + +type UpdateKmsSignedCsrClientCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster KeyProviderId `xml:"cluster"` + Certificate string `xml:"certificate"` +} + +func init() { + t["UpdateKmsSignedCsrClientCertRequestType"] = reflect.TypeOf((*UpdateKmsSignedCsrClientCertRequestType)(nil)).Elem() +} + +type UpdateKmsSignedCsrClientCertResponse struct { +} + +type UpdateLicense UpdateLicenseRequestType + +func init() { + t["UpdateLicense"] = reflect.TypeOf((*UpdateLicense)(nil)).Elem() +} + +type UpdateLicenseLabel UpdateLicenseLabelRequestType + +func init() { + t["UpdateLicenseLabel"] = reflect.TypeOf((*UpdateLicenseLabel)(nil)).Elem() +} + +type UpdateLicenseLabelRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` + LabelKey string `xml:"labelKey"` + LabelValue string `xml:"labelValue"` +} + +func init() { + t["UpdateLicenseLabelRequestType"] = reflect.TypeOf((*UpdateLicenseLabelRequestType)(nil)).Elem() +} + +type UpdateLicenseLabelResponse struct { +} + +type UpdateLicenseRequestType struct { + This ManagedObjectReference `xml:"_this"` + LicenseKey string `xml:"licenseKey"` + Labels []KeyValue `xml:"labels,omitempty"` +} + +func init() { + t["UpdateLicenseRequestType"] = reflect.TypeOf((*UpdateLicenseRequestType)(nil)).Elem() +} + +type UpdateLicenseResponse struct { + Returnval LicenseManagerLicenseInfo `xml:"returnval"` +} + +type UpdateLinkedChildren UpdateLinkedChildrenRequestType + +func init() { + t["UpdateLinkedChildren"] = reflect.TypeOf((*UpdateLinkedChildren)(nil)).Elem() +} + +type UpdateLinkedChildrenRequestType struct { + This ManagedObjectReference `xml:"_this"` + AddChangeSet []VirtualAppLinkInfo `xml:"addChangeSet,omitempty"` + RemoveSet []ManagedObjectReference `xml:"removeSet,omitempty"` +} + +func init() { + t["UpdateLinkedChildrenRequestType"] = reflect.TypeOf((*UpdateLinkedChildrenRequestType)(nil)).Elem() +} + +type UpdateLinkedChildrenResponse struct { +} + +type UpdateLocalSwapDatastore UpdateLocalSwapDatastoreRequestType + +func init() { + t["UpdateLocalSwapDatastore"] = reflect.TypeOf((*UpdateLocalSwapDatastore)(nil)).Elem() +} + +type UpdateLocalSwapDatastoreRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["UpdateLocalSwapDatastoreRequestType"] = reflect.TypeOf((*UpdateLocalSwapDatastoreRequestType)(nil)).Elem() +} + +type UpdateLocalSwapDatastoreResponse struct { +} + +type UpdateLockdownExceptions UpdateLockdownExceptionsRequestType + +func init() { + t["UpdateLockdownExceptions"] = reflect.TypeOf((*UpdateLockdownExceptions)(nil)).Elem() +} + +type UpdateLockdownExceptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Users []string `xml:"users,omitempty"` +} + +func init() { + t["UpdateLockdownExceptionsRequestType"] = reflect.TypeOf((*UpdateLockdownExceptionsRequestType)(nil)).Elem() +} + +type UpdateLockdownExceptionsResponse struct { +} + +type UpdateModuleOptionString UpdateModuleOptionStringRequestType + +func init() { + t["UpdateModuleOptionString"] = reflect.TypeOf((*UpdateModuleOptionString)(nil)).Elem() +} + +type UpdateModuleOptionStringRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Options string `xml:"options"` +} + +func init() { + t["UpdateModuleOptionStringRequestType"] = reflect.TypeOf((*UpdateModuleOptionStringRequestType)(nil)).Elem() +} + +type UpdateModuleOptionStringResponse struct { +} + +type UpdateNetworkConfig UpdateNetworkConfigRequestType + +func init() { + t["UpdateNetworkConfig"] = reflect.TypeOf((*UpdateNetworkConfig)(nil)).Elem() +} + +type UpdateNetworkConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config HostNetworkConfig `xml:"config"` + ChangeMode string `xml:"changeMode"` +} + +func init() { + t["UpdateNetworkConfigRequestType"] = reflect.TypeOf((*UpdateNetworkConfigRequestType)(nil)).Elem() +} + +type UpdateNetworkConfigResponse struct { + Returnval HostNetworkConfigResult `xml:"returnval"` +} + +type UpdateNetworkResourcePool UpdateNetworkResourcePoolRequestType + +func init() { + t["UpdateNetworkResourcePool"] = reflect.TypeOf((*UpdateNetworkResourcePool)(nil)).Elem() +} + +type UpdateNetworkResourcePoolRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec []DVSNetworkResourcePoolConfigSpec `xml:"configSpec"` +} + +func init() { + t["UpdateNetworkResourcePoolRequestType"] = reflect.TypeOf((*UpdateNetworkResourcePoolRequestType)(nil)).Elem() +} + +type UpdateNetworkResourcePoolResponse struct { +} + +type UpdateOptions UpdateOptionsRequestType + +func init() { + t["UpdateOptions"] = reflect.TypeOf((*UpdateOptions)(nil)).Elem() +} + +type UpdateOptionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + ChangedValue []BaseOptionValue `xml:"changedValue,typeattr"` +} + +func init() { + t["UpdateOptionsRequestType"] = reflect.TypeOf((*UpdateOptionsRequestType)(nil)).Elem() +} + +type UpdateOptionsResponse struct { +} + +type UpdatePassthruConfig UpdatePassthruConfigRequestType + +func init() { + t["UpdatePassthruConfig"] = reflect.TypeOf((*UpdatePassthruConfig)(nil)).Elem() +} + +type UpdatePassthruConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config []BaseHostPciPassthruConfig `xml:"config,typeattr"` +} + +func init() { + t["UpdatePassthruConfigRequestType"] = reflect.TypeOf((*UpdatePassthruConfigRequestType)(nil)).Elem() +} + +type UpdatePassthruConfigResponse struct { +} + +type UpdatePerfInterval UpdatePerfIntervalRequestType + +func init() { + t["UpdatePerfInterval"] = reflect.TypeOf((*UpdatePerfInterval)(nil)).Elem() +} + +type UpdatePerfIntervalRequestType struct { + This ManagedObjectReference `xml:"_this"` + Interval PerfInterval `xml:"interval"` +} + +func init() { + t["UpdatePerfIntervalRequestType"] = reflect.TypeOf((*UpdatePerfIntervalRequestType)(nil)).Elem() +} + +type UpdatePerfIntervalResponse struct { +} + +type UpdatePhysicalNicLinkSpeed UpdatePhysicalNicLinkSpeedRequestType + +func init() { + t["UpdatePhysicalNicLinkSpeed"] = reflect.TypeOf((*UpdatePhysicalNicLinkSpeed)(nil)).Elem() +} + +type UpdatePhysicalNicLinkSpeedRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` + LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty"` +} + +func init() { + t["UpdatePhysicalNicLinkSpeedRequestType"] = reflect.TypeOf((*UpdatePhysicalNicLinkSpeedRequestType)(nil)).Elem() +} + +type UpdatePhysicalNicLinkSpeedResponse struct { +} + +type UpdatePortGroup UpdatePortGroupRequestType + +func init() { + t["UpdatePortGroup"] = reflect.TypeOf((*UpdatePortGroup)(nil)).Elem() +} + +type UpdatePortGroupRequestType struct { + This ManagedObjectReference `xml:"_this"` + PgName string `xml:"pgName"` + Portgrp HostPortGroupSpec `xml:"portgrp"` +} + +func init() { + t["UpdatePortGroupRequestType"] = reflect.TypeOf((*UpdatePortGroupRequestType)(nil)).Elem() +} + +type UpdatePortGroupResponse struct { +} + +type UpdateProgress UpdateProgressRequestType + +func init() { + t["UpdateProgress"] = reflect.TypeOf((*UpdateProgress)(nil)).Elem() +} + +type UpdateProgressRequestType struct { + This ManagedObjectReference `xml:"_this"` + PercentDone int32 `xml:"percentDone"` +} + +func init() { + t["UpdateProgressRequestType"] = reflect.TypeOf((*UpdateProgressRequestType)(nil)).Elem() +} + +type UpdateProgressResponse struct { +} + +type UpdateReferenceHost UpdateReferenceHostRequestType + +func init() { + t["UpdateReferenceHost"] = reflect.TypeOf((*UpdateReferenceHost)(nil)).Elem() +} + +type UpdateReferenceHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["UpdateReferenceHostRequestType"] = reflect.TypeOf((*UpdateReferenceHostRequestType)(nil)).Elem() +} + +type UpdateReferenceHostResponse struct { +} + +type UpdateRuleset UpdateRulesetRequestType + +func init() { + t["UpdateRuleset"] = reflect.TypeOf((*UpdateRuleset)(nil)).Elem() +} + +type UpdateRulesetRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` + Spec HostFirewallRulesetRulesetSpec `xml:"spec"` +} + +func init() { + t["UpdateRulesetRequestType"] = reflect.TypeOf((*UpdateRulesetRequestType)(nil)).Elem() +} + +type UpdateRulesetResponse struct { +} + +type UpdateScsiLunDisplayName UpdateScsiLunDisplayNameRequestType + +func init() { + t["UpdateScsiLunDisplayName"] = reflect.TypeOf((*UpdateScsiLunDisplayName)(nil)).Elem() +} + +type UpdateScsiLunDisplayNameRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` + DisplayName string `xml:"displayName"` +} + +func init() { + t["UpdateScsiLunDisplayNameRequestType"] = reflect.TypeOf((*UpdateScsiLunDisplayNameRequestType)(nil)).Elem() +} + +type UpdateScsiLunDisplayNameResponse struct { +} + +type UpdateSelfSignedClientCert UpdateSelfSignedClientCertRequestType + +func init() { + t["UpdateSelfSignedClientCert"] = reflect.TypeOf((*UpdateSelfSignedClientCert)(nil)).Elem() +} + +type UpdateSelfSignedClientCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster KeyProviderId `xml:"cluster"` + Certificate string `xml:"certificate"` +} + +func init() { + t["UpdateSelfSignedClientCertRequestType"] = reflect.TypeOf((*UpdateSelfSignedClientCertRequestType)(nil)).Elem() +} + +type UpdateSelfSignedClientCertResponse struct { +} + +type UpdateServiceConsoleVirtualNic UpdateServiceConsoleVirtualNicRequestType + +func init() { + t["UpdateServiceConsoleVirtualNic"] = reflect.TypeOf((*UpdateServiceConsoleVirtualNic)(nil)).Elem() +} + +type UpdateServiceConsoleVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` + Nic HostVirtualNicSpec `xml:"nic"` +} + +func init() { + t["UpdateServiceConsoleVirtualNicRequestType"] = reflect.TypeOf((*UpdateServiceConsoleVirtualNicRequestType)(nil)).Elem() +} + +type UpdateServiceConsoleVirtualNicResponse struct { +} + +type UpdateServiceMessage UpdateServiceMessageRequestType + +func init() { + t["UpdateServiceMessage"] = reflect.TypeOf((*UpdateServiceMessage)(nil)).Elem() +} + +type UpdateServiceMessageRequestType struct { + This ManagedObjectReference `xml:"_this"` + Message string `xml:"message"` +} + +func init() { + t["UpdateServiceMessageRequestType"] = reflect.TypeOf((*UpdateServiceMessageRequestType)(nil)).Elem() +} + +type UpdateServiceMessageResponse struct { +} + +type UpdateServicePolicy UpdateServicePolicyRequestType + +func init() { + t["UpdateServicePolicy"] = reflect.TypeOf((*UpdateServicePolicy)(nil)).Elem() +} + +type UpdateServicePolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id string `xml:"id"` + Policy string `xml:"policy"` +} + +func init() { + t["UpdateServicePolicyRequestType"] = reflect.TypeOf((*UpdateServicePolicyRequestType)(nil)).Elem() +} + +type UpdateServicePolicyResponse struct { +} + +type UpdateSet struct { + DynamicData + + Version string `xml:"version"` + FilterSet []PropertyFilterUpdate `xml:"filterSet,omitempty"` + Truncated *bool `xml:"truncated"` +} + +func init() { + t["UpdateSet"] = reflect.TypeOf((*UpdateSet)(nil)).Elem() +} + +type UpdateSoftwareInternetScsiEnabled UpdateSoftwareInternetScsiEnabledRequestType + +func init() { + t["UpdateSoftwareInternetScsiEnabled"] = reflect.TypeOf((*UpdateSoftwareInternetScsiEnabled)(nil)).Elem() +} + +type UpdateSoftwareInternetScsiEnabledRequestType struct { + This ManagedObjectReference `xml:"_this"` + Enabled bool `xml:"enabled"` +} + +func init() { + t["UpdateSoftwareInternetScsiEnabledRequestType"] = reflect.TypeOf((*UpdateSoftwareInternetScsiEnabledRequestType)(nil)).Elem() +} + +type UpdateSoftwareInternetScsiEnabledResponse struct { +} + +type UpdateSystemResources UpdateSystemResourcesRequestType + +func init() { + t["UpdateSystemResources"] = reflect.TypeOf((*UpdateSystemResources)(nil)).Elem() +} + +type UpdateSystemResourcesRequestType struct { + This ManagedObjectReference `xml:"_this"` + ResourceInfo HostSystemResourceInfo `xml:"resourceInfo"` +} + +func init() { + t["UpdateSystemResourcesRequestType"] = reflect.TypeOf((*UpdateSystemResourcesRequestType)(nil)).Elem() +} + +type UpdateSystemResourcesResponse struct { +} + +type UpdateSystemSwapConfiguration UpdateSystemSwapConfigurationRequestType + +func init() { + t["UpdateSystemSwapConfiguration"] = reflect.TypeOf((*UpdateSystemSwapConfiguration)(nil)).Elem() +} + +type UpdateSystemSwapConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` + SysSwapConfig HostSystemSwapConfiguration `xml:"sysSwapConfig"` +} + +func init() { + t["UpdateSystemSwapConfigurationRequestType"] = reflect.TypeOf((*UpdateSystemSwapConfigurationRequestType)(nil)).Elem() +} + +type UpdateSystemSwapConfigurationResponse struct { +} + +type UpdateSystemUsers UpdateSystemUsersRequestType + +func init() { + t["UpdateSystemUsers"] = reflect.TypeOf((*UpdateSystemUsers)(nil)).Elem() +} + +type UpdateSystemUsersRequestType struct { + This ManagedObjectReference `xml:"_this"` + Users []string `xml:"users,omitempty"` +} + +func init() { + t["UpdateSystemUsersRequestType"] = reflect.TypeOf((*UpdateSystemUsersRequestType)(nil)).Elem() +} + +type UpdateSystemUsersResponse struct { +} + +type UpdateUser UpdateUserRequestType + +func init() { + t["UpdateUser"] = reflect.TypeOf((*UpdateUser)(nil)).Elem() +} + +type UpdateUserRequestType struct { + This ManagedObjectReference `xml:"_this"` + User BaseHostAccountSpec `xml:"user,typeattr"` +} + +func init() { + t["UpdateUserRequestType"] = reflect.TypeOf((*UpdateUserRequestType)(nil)).Elem() +} + +type UpdateUserResponse struct { +} + +type UpdateVAppConfig UpdateVAppConfigRequestType + +func init() { + t["UpdateVAppConfig"] = reflect.TypeOf((*UpdateVAppConfig)(nil)).Elem() +} + +type UpdateVAppConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VAppConfigSpec `xml:"spec"` +} + +func init() { + t["UpdateVAppConfigRequestType"] = reflect.TypeOf((*UpdateVAppConfigRequestType)(nil)).Elem() +} + +type UpdateVAppConfigResponse struct { +} + +type UpdateVStorageInfrastructureObjectPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec VslmInfrastructureObjectPolicySpec `xml:"spec"` +} + +func init() { + t["UpdateVStorageInfrastructureObjectPolicyRequestType"] = reflect.TypeOf((*UpdateVStorageInfrastructureObjectPolicyRequestType)(nil)).Elem() +} + +type UpdateVStorageInfrastructureObjectPolicy_Task UpdateVStorageInfrastructureObjectPolicyRequestType + +func init() { + t["UpdateVStorageInfrastructureObjectPolicy_Task"] = reflect.TypeOf((*UpdateVStorageInfrastructureObjectPolicy_Task)(nil)).Elem() +} + +type UpdateVStorageInfrastructureObjectPolicy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateVStorageObjectPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["UpdateVStorageObjectPolicyRequestType"] = reflect.TypeOf((*UpdateVStorageObjectPolicyRequestType)(nil)).Elem() +} + +type UpdateVStorageObjectPolicy_Task UpdateVStorageObjectPolicyRequestType + +func init() { + t["UpdateVStorageObjectPolicy_Task"] = reflect.TypeOf((*UpdateVStorageObjectPolicy_Task)(nil)).Elem() +} + +type UpdateVStorageObjectPolicy_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateVVolVirtualMachineFilesRequestType struct { + This ManagedObjectReference `xml:"_this"` + FailoverPair []DatastoreVVolContainerFailoverPair `xml:"failoverPair,omitempty"` +} + +func init() { + t["UpdateVVolVirtualMachineFilesRequestType"] = reflect.TypeOf((*UpdateVVolVirtualMachineFilesRequestType)(nil)).Elem() +} + +type UpdateVVolVirtualMachineFiles_Task UpdateVVolVirtualMachineFilesRequestType + +func init() { + t["UpdateVVolVirtualMachineFiles_Task"] = reflect.TypeOf((*UpdateVVolVirtualMachineFiles_Task)(nil)).Elem() +} + +type UpdateVVolVirtualMachineFiles_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateVirtualMachineFilesRequestType struct { + This ManagedObjectReference `xml:"_this"` + MountPathDatastoreMapping []DatastoreMountPathDatastorePair `xml:"mountPathDatastoreMapping"` +} + +func init() { + t["UpdateVirtualMachineFilesRequestType"] = reflect.TypeOf((*UpdateVirtualMachineFilesRequestType)(nil)).Elem() +} + +type UpdateVirtualMachineFilesResult struct { + DynamicData + + FailedVmFile []UpdateVirtualMachineFilesResultFailedVmFileInfo `xml:"failedVmFile,omitempty"` +} + +func init() { + t["UpdateVirtualMachineFilesResult"] = reflect.TypeOf((*UpdateVirtualMachineFilesResult)(nil)).Elem() +} + +type UpdateVirtualMachineFilesResultFailedVmFileInfo struct { + DynamicData + + VmFile string `xml:"vmFile"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["UpdateVirtualMachineFilesResultFailedVmFileInfo"] = reflect.TypeOf((*UpdateVirtualMachineFilesResultFailedVmFileInfo)(nil)).Elem() +} + +type UpdateVirtualMachineFiles_Task UpdateVirtualMachineFilesRequestType + +func init() { + t["UpdateVirtualMachineFiles_Task"] = reflect.TypeOf((*UpdateVirtualMachineFiles_Task)(nil)).Elem() +} + +type UpdateVirtualMachineFiles_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdateVirtualNic UpdateVirtualNicRequestType + +func init() { + t["UpdateVirtualNic"] = reflect.TypeOf((*UpdateVirtualNic)(nil)).Elem() +} + +type UpdateVirtualNicRequestType struct { + This ManagedObjectReference `xml:"_this"` + Device string `xml:"device"` + Nic HostVirtualNicSpec `xml:"nic"` +} + +func init() { + t["UpdateVirtualNicRequestType"] = reflect.TypeOf((*UpdateVirtualNicRequestType)(nil)).Elem() +} + +type UpdateVirtualNicResponse struct { +} + +type UpdateVirtualSwitch UpdateVirtualSwitchRequestType + +func init() { + t["UpdateVirtualSwitch"] = reflect.TypeOf((*UpdateVirtualSwitch)(nil)).Elem() +} + +type UpdateVirtualSwitchRequestType struct { + This ManagedObjectReference `xml:"_this"` + VswitchName string `xml:"vswitchName"` + Spec HostVirtualSwitchSpec `xml:"spec"` +} + +func init() { + t["UpdateVirtualSwitchRequestType"] = reflect.TypeOf((*UpdateVirtualSwitchRequestType)(nil)).Elem() +} + +type UpdateVirtualSwitchResponse struct { +} + +type UpdateVmfsUnmapBandwidth UpdateVmfsUnmapBandwidthRequestType + +func init() { + t["UpdateVmfsUnmapBandwidth"] = reflect.TypeOf((*UpdateVmfsUnmapBandwidth)(nil)).Elem() +} + +type UpdateVmfsUnmapBandwidthRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` + UnmapBandwidthSpec VmfsUnmapBandwidthSpec `xml:"unmapBandwidthSpec"` +} + +func init() { + t["UpdateVmfsUnmapBandwidthRequestType"] = reflect.TypeOf((*UpdateVmfsUnmapBandwidthRequestType)(nil)).Elem() +} + +type UpdateVmfsUnmapBandwidthResponse struct { +} + +type UpdateVmfsUnmapPriority UpdateVmfsUnmapPriorityRequestType + +func init() { + t["UpdateVmfsUnmapPriority"] = reflect.TypeOf((*UpdateVmfsUnmapPriority)(nil)).Elem() +} + +type UpdateVmfsUnmapPriorityRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsUuid string `xml:"vmfsUuid"` + UnmapPriority string `xml:"unmapPriority"` +} + +func init() { + t["UpdateVmfsUnmapPriorityRequestType"] = reflect.TypeOf((*UpdateVmfsUnmapPriorityRequestType)(nil)).Elem() +} + +type UpdateVmfsUnmapPriorityResponse struct { +} + +type UpdateVsanRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config VsanHostConfigInfo `xml:"config"` +} + +func init() { + t["UpdateVsanRequestType"] = reflect.TypeOf((*UpdateVsanRequestType)(nil)).Elem() +} + +type UpdateVsan_Task UpdateVsanRequestType + +func init() { + t["UpdateVsan_Task"] = reflect.TypeOf((*UpdateVsan_Task)(nil)).Elem() +} + +type UpdateVsan_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpdatedAgentBeingRestartedEvent struct { + HostEvent +} + +func init() { + t["UpdatedAgentBeingRestartedEvent"] = reflect.TypeOf((*UpdatedAgentBeingRestartedEvent)(nil)).Elem() +} + +type UpgradeEvent struct { + Event + + Message string `xml:"message"` +} + +func init() { + t["UpgradeEvent"] = reflect.TypeOf((*UpgradeEvent)(nil)).Elem() +} + +type UpgradeIoFilterRequestType struct { + This ManagedObjectReference `xml:"_this"` + FilterId string `xml:"filterId"` + CompRes ManagedObjectReference `xml:"compRes"` + VibUrl string `xml:"vibUrl"` +} + +func init() { + t["UpgradeIoFilterRequestType"] = reflect.TypeOf((*UpgradeIoFilterRequestType)(nil)).Elem() +} + +type UpgradeIoFilter_Task UpgradeIoFilterRequestType + +func init() { + t["UpgradeIoFilter_Task"] = reflect.TypeOf((*UpgradeIoFilter_Task)(nil)).Elem() +} + +type UpgradeIoFilter_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpgradeToolsRequestType struct { + This ManagedObjectReference `xml:"_this"` + InstallerOptions string `xml:"installerOptions,omitempty"` +} + +func init() { + t["UpgradeToolsRequestType"] = reflect.TypeOf((*UpgradeToolsRequestType)(nil)).Elem() +} + +type UpgradeTools_Task UpgradeToolsRequestType + +func init() { + t["UpgradeTools_Task"] = reflect.TypeOf((*UpgradeTools_Task)(nil)).Elem() +} + +type UpgradeTools_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpgradeVMRequestType struct { + This ManagedObjectReference `xml:"_this"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["UpgradeVMRequestType"] = reflect.TypeOf((*UpgradeVMRequestType)(nil)).Elem() +} + +type UpgradeVM_Task UpgradeVMRequestType + +func init() { + t["UpgradeVM_Task"] = reflect.TypeOf((*UpgradeVM_Task)(nil)).Elem() +} + +type UpgradeVM_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type UpgradeVmLayout UpgradeVmLayoutRequestType + +func init() { + t["UpgradeVmLayout"] = reflect.TypeOf((*UpgradeVmLayout)(nil)).Elem() +} + +type UpgradeVmLayoutRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["UpgradeVmLayoutRequestType"] = reflect.TypeOf((*UpgradeVmLayoutRequestType)(nil)).Elem() +} + +type UpgradeVmLayoutResponse struct { +} + +type UpgradeVmfs UpgradeVmfsRequestType + +func init() { + t["UpgradeVmfs"] = reflect.TypeOf((*UpgradeVmfs)(nil)).Elem() +} + +type UpgradeVmfsRequestType struct { + This ManagedObjectReference `xml:"_this"` + VmfsPath string `xml:"vmfsPath"` +} + +func init() { + t["UpgradeVmfsRequestType"] = reflect.TypeOf((*UpgradeVmfsRequestType)(nil)).Elem() +} + +type UpgradeVmfsResponse struct { +} + +type UpgradeVsanObjects UpgradeVsanObjectsRequestType + +func init() { + t["UpgradeVsanObjects"] = reflect.TypeOf((*UpgradeVsanObjects)(nil)).Elem() +} + +type UpgradeVsanObjectsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Uuids []string `xml:"uuids"` + NewVersion int32 `xml:"newVersion"` +} + +func init() { + t["UpgradeVsanObjectsRequestType"] = reflect.TypeOf((*UpgradeVsanObjectsRequestType)(nil)).Elem() +} + +type UpgradeVsanObjectsResponse struct { + Returnval []HostVsanInternalSystemVsanObjectOperationResult `xml:"returnval,omitempty"` +} + +type UplinkPortMtuNotSupportEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["UplinkPortMtuNotSupportEvent"] = reflect.TypeOf((*UplinkPortMtuNotSupportEvent)(nil)).Elem() +} + +type UplinkPortMtuSupportEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["UplinkPortMtuSupportEvent"] = reflect.TypeOf((*UplinkPortMtuSupportEvent)(nil)).Elem() +} + +type UplinkPortVlanTrunkedEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["UplinkPortVlanTrunkedEvent"] = reflect.TypeOf((*UplinkPortVlanTrunkedEvent)(nil)).Elem() +} + +type UplinkPortVlanUntrunkedEvent struct { + DvsHealthStatusChangeEvent +} + +func init() { + t["UplinkPortVlanUntrunkedEvent"] = reflect.TypeOf((*UplinkPortVlanUntrunkedEvent)(nil)).Elem() +} + +type UploadClientCert UploadClientCertRequestType + +func init() { + t["UploadClientCert"] = reflect.TypeOf((*UploadClientCert)(nil)).Elem() +} + +type UploadClientCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster KeyProviderId `xml:"cluster"` + Certificate string `xml:"certificate"` + PrivateKey string `xml:"privateKey"` +} + +func init() { + t["UploadClientCertRequestType"] = reflect.TypeOf((*UploadClientCertRequestType)(nil)).Elem() +} + +type UploadClientCertResponse struct { +} + +type UploadKmipServerCert UploadKmipServerCertRequestType + +func init() { + t["UploadKmipServerCert"] = reflect.TypeOf((*UploadKmipServerCert)(nil)).Elem() +} + +type UploadKmipServerCertRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster KeyProviderId `xml:"cluster"` + Certificate string `xml:"certificate"` +} + +func init() { + t["UploadKmipServerCertRequestType"] = reflect.TypeOf((*UploadKmipServerCertRequestType)(nil)).Elem() +} + +type UploadKmipServerCertResponse struct { +} + +type UsbScanCodeSpec struct { + DynamicData + + KeyEvents []UsbScanCodeSpecKeyEvent `xml:"keyEvents"` +} + +func init() { + t["UsbScanCodeSpec"] = reflect.TypeOf((*UsbScanCodeSpec)(nil)).Elem() +} + +type UsbScanCodeSpecKeyEvent struct { + DynamicData + + UsbHidCode int32 `xml:"usbHidCode"` + Modifiers *UsbScanCodeSpecModifierType `xml:"modifiers,omitempty"` +} + +func init() { + t["UsbScanCodeSpecKeyEvent"] = reflect.TypeOf((*UsbScanCodeSpecKeyEvent)(nil)).Elem() +} + +type UsbScanCodeSpecModifierType struct { + DynamicData + + LeftControl *bool `xml:"leftControl"` + LeftShift *bool `xml:"leftShift"` + LeftAlt *bool `xml:"leftAlt"` + LeftGui *bool `xml:"leftGui"` + RightControl *bool `xml:"rightControl"` + RightShift *bool `xml:"rightShift"` + RightAlt *bool `xml:"rightAlt"` + RightGui *bool `xml:"rightGui"` +} + +func init() { + t["UsbScanCodeSpecModifierType"] = reflect.TypeOf((*UsbScanCodeSpecModifierType)(nil)).Elem() +} + +type UserAssignedToGroup struct { + HostEvent + + UserLogin string `xml:"userLogin"` + Group string `xml:"group"` +} + +func init() { + t["UserAssignedToGroup"] = reflect.TypeOf((*UserAssignedToGroup)(nil)).Elem() +} + +type UserGroupProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["UserGroupProfile"] = reflect.TypeOf((*UserGroupProfile)(nil)).Elem() +} + +type UserInputRequiredParameterMetadata struct { + ProfilePolicyOptionMetadata + + UserInputParameter []ProfileParameterMetadata `xml:"userInputParameter,omitempty"` +} + +func init() { + t["UserInputRequiredParameterMetadata"] = reflect.TypeOf((*UserInputRequiredParameterMetadata)(nil)).Elem() +} + +type UserLoginSessionEvent struct { + SessionEvent + + IpAddress string `xml:"ipAddress"` + UserAgent string `xml:"userAgent,omitempty"` + Locale string `xml:"locale"` + SessionId string `xml:"sessionId"` +} + +func init() { + t["UserLoginSessionEvent"] = reflect.TypeOf((*UserLoginSessionEvent)(nil)).Elem() +} + +type UserLogoutSessionEvent struct { + SessionEvent + + IpAddress string `xml:"ipAddress,omitempty"` + UserAgent string `xml:"userAgent,omitempty"` + CallCount int64 `xml:"callCount,omitempty"` + SessionId string `xml:"sessionId,omitempty"` + LoginTime *time.Time `xml:"loginTime"` +} + +func init() { + t["UserLogoutSessionEvent"] = reflect.TypeOf((*UserLogoutSessionEvent)(nil)).Elem() +} + +type UserNotFound struct { + VimFault + + Principal string `xml:"principal"` + Unresolved bool `xml:"unresolved"` +} + +func init() { + t["UserNotFound"] = reflect.TypeOf((*UserNotFound)(nil)).Elem() +} + +type UserNotFoundFault UserNotFound + +func init() { + t["UserNotFoundFault"] = reflect.TypeOf((*UserNotFoundFault)(nil)).Elem() +} + +type UserPasswordChanged struct { + HostEvent + + UserLogin string `xml:"userLogin"` +} + +func init() { + t["UserPasswordChanged"] = reflect.TypeOf((*UserPasswordChanged)(nil)).Elem() +} + +type UserPrivilegeResult struct { + DynamicData + + Entity ManagedObjectReference `xml:"entity"` + Privileges []string `xml:"privileges,omitempty"` +} + +func init() { + t["UserPrivilegeResult"] = reflect.TypeOf((*UserPrivilegeResult)(nil)).Elem() +} + +type UserProfile struct { + ApplyProfile + + Key string `xml:"key"` +} + +func init() { + t["UserProfile"] = reflect.TypeOf((*UserProfile)(nil)).Elem() +} + +type UserSearchResult struct { + DynamicData + + Principal string `xml:"principal"` + FullName string `xml:"fullName,omitempty"` + Group bool `xml:"group"` +} + +func init() { + t["UserSearchResult"] = reflect.TypeOf((*UserSearchResult)(nil)).Elem() +} + +type UserSession struct { + DynamicData + + Key string `xml:"key"` + UserName string `xml:"userName"` + FullName string `xml:"fullName"` + LoginTime time.Time `xml:"loginTime"` + LastActiveTime time.Time `xml:"lastActiveTime"` + Locale string `xml:"locale"` + MessageLocale string `xml:"messageLocale"` + ExtensionSession *bool `xml:"extensionSession"` + IpAddress string `xml:"ipAddress,omitempty"` + UserAgent string `xml:"userAgent,omitempty"` + CallCount int64 `xml:"callCount,omitempty"` +} + +func init() { + t["UserSession"] = reflect.TypeOf((*UserSession)(nil)).Elem() +} + +type UserUnassignedFromGroup struct { + HostEvent + + UserLogin string `xml:"userLogin"` + Group string `xml:"group"` +} + +func init() { + t["UserUnassignedFromGroup"] = reflect.TypeOf((*UserUnassignedFromGroup)(nil)).Elem() +} + +type UserUpgradeEvent struct { + UpgradeEvent +} + +func init() { + t["UserUpgradeEvent"] = reflect.TypeOf((*UserUpgradeEvent)(nil)).Elem() +} + +type VASAStorageArray struct { + DynamicData + + Name string `xml:"name"` + Uuid string `xml:"uuid"` + VendorId string `xml:"vendorId"` + ModelId string `xml:"modelId"` +} + +func init() { + t["VASAStorageArray"] = reflect.TypeOf((*VASAStorageArray)(nil)).Elem() +} + +type VAppCloneSpec struct { + DynamicData + + Location ManagedObjectReference `xml:"location"` + Host *ManagedObjectReference `xml:"host,omitempty"` + ResourceSpec *ResourceConfigSpec `xml:"resourceSpec,omitempty"` + VmFolder *ManagedObjectReference `xml:"vmFolder,omitempty"` + NetworkMapping []VAppCloneSpecNetworkMappingPair `xml:"networkMapping,omitempty"` + Property []KeyValue `xml:"property,omitempty"` + ResourceMapping []VAppCloneSpecResourceMap `xml:"resourceMapping,omitempty"` + Provisioning string `xml:"provisioning,omitempty"` +} + +func init() { + t["VAppCloneSpec"] = reflect.TypeOf((*VAppCloneSpec)(nil)).Elem() +} + +type VAppCloneSpecNetworkMappingPair struct { + DynamicData + + Source ManagedObjectReference `xml:"source"` + Destination ManagedObjectReference `xml:"destination"` +} + +func init() { + t["VAppCloneSpecNetworkMappingPair"] = reflect.TypeOf((*VAppCloneSpecNetworkMappingPair)(nil)).Elem() +} + +type VAppCloneSpecResourceMap struct { + DynamicData + + Source ManagedObjectReference `xml:"source"` + Parent *ManagedObjectReference `xml:"parent,omitempty"` + ResourceSpec *ResourceConfigSpec `xml:"resourceSpec,omitempty"` + Location *ManagedObjectReference `xml:"location,omitempty"` +} + +func init() { + t["VAppCloneSpecResourceMap"] = reflect.TypeOf((*VAppCloneSpecResourceMap)(nil)).Elem() +} + +type VAppConfigFault struct { + VimFault +} + +func init() { + t["VAppConfigFault"] = reflect.TypeOf((*VAppConfigFault)(nil)).Elem() +} + +type VAppConfigFaultFault BaseVAppConfigFault + +func init() { + t["VAppConfigFaultFault"] = reflect.TypeOf((*VAppConfigFaultFault)(nil)).Elem() +} + +type VAppConfigInfo struct { + VmConfigInfo + + EntityConfig []VAppEntityConfigInfo `xml:"entityConfig,omitempty"` + Annotation string `xml:"annotation"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` +} + +func init() { + t["VAppConfigInfo"] = reflect.TypeOf((*VAppConfigInfo)(nil)).Elem() +} + +type VAppConfigSpec struct { + VmConfigSpec + + EntityConfig []VAppEntityConfigInfo `xml:"entityConfig,omitempty"` + Annotation string `xml:"annotation,omitempty"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` +} + +func init() { + t["VAppConfigSpec"] = reflect.TypeOf((*VAppConfigSpec)(nil)).Elem() +} + +type VAppEntityConfigInfo struct { + DynamicData + + Key *ManagedObjectReference `xml:"key,omitempty"` + Tag string `xml:"tag,omitempty"` + StartOrder int32 `xml:"startOrder,omitempty"` + StartDelay int32 `xml:"startDelay,omitempty"` + WaitingForGuest *bool `xml:"waitingForGuest"` + StartAction string `xml:"startAction,omitempty"` + StopDelay int32 `xml:"stopDelay,omitempty"` + StopAction string `xml:"stopAction,omitempty"` + DestroyWithParent *bool `xml:"destroyWithParent"` +} + +func init() { + t["VAppEntityConfigInfo"] = reflect.TypeOf((*VAppEntityConfigInfo)(nil)).Elem() +} + +type VAppIPAssignmentInfo struct { + DynamicData + + SupportedAllocationScheme []string `xml:"supportedAllocationScheme,omitempty"` + IpAllocationPolicy string `xml:"ipAllocationPolicy,omitempty"` + SupportedIpProtocol []string `xml:"supportedIpProtocol,omitempty"` + IpProtocol string `xml:"ipProtocol,omitempty"` +} + +func init() { + t["VAppIPAssignmentInfo"] = reflect.TypeOf((*VAppIPAssignmentInfo)(nil)).Elem() +} + +type VAppNotRunning struct { + VmConfigFault +} + +func init() { + t["VAppNotRunning"] = reflect.TypeOf((*VAppNotRunning)(nil)).Elem() +} + +type VAppNotRunningFault VAppNotRunning + +func init() { + t["VAppNotRunningFault"] = reflect.TypeOf((*VAppNotRunningFault)(nil)).Elem() +} + +type VAppOperationInProgress struct { + RuntimeFault +} + +func init() { + t["VAppOperationInProgress"] = reflect.TypeOf((*VAppOperationInProgress)(nil)).Elem() +} + +type VAppOperationInProgressFault VAppOperationInProgress + +func init() { + t["VAppOperationInProgressFault"] = reflect.TypeOf((*VAppOperationInProgressFault)(nil)).Elem() +} + +type VAppOvfSectionInfo struct { + DynamicData + + Key int32 `xml:"key,omitempty"` + Namespace string `xml:"namespace,omitempty"` + Type string `xml:"type,omitempty"` + AtEnvelopeLevel *bool `xml:"atEnvelopeLevel"` + Contents string `xml:"contents,omitempty"` +} + +func init() { + t["VAppOvfSectionInfo"] = reflect.TypeOf((*VAppOvfSectionInfo)(nil)).Elem() +} + +type VAppOvfSectionSpec struct { + ArrayUpdateSpec + + Info *VAppOvfSectionInfo `xml:"info,omitempty"` +} + +func init() { + t["VAppOvfSectionSpec"] = reflect.TypeOf((*VAppOvfSectionSpec)(nil)).Elem() +} + +type VAppProductInfo struct { + DynamicData + + Key int32 `xml:"key"` + ClassId string `xml:"classId,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + Name string `xml:"name,omitempty"` + Vendor string `xml:"vendor,omitempty"` + Version string `xml:"version,omitempty"` + FullVersion string `xml:"fullVersion,omitempty"` + VendorUrl string `xml:"vendorUrl,omitempty"` + ProductUrl string `xml:"productUrl,omitempty"` + AppUrl string `xml:"appUrl,omitempty"` +} + +func init() { + t["VAppProductInfo"] = reflect.TypeOf((*VAppProductInfo)(nil)).Elem() +} + +type VAppProductSpec struct { + ArrayUpdateSpec + + Info *VAppProductInfo `xml:"info,omitempty"` +} + +func init() { + t["VAppProductSpec"] = reflect.TypeOf((*VAppProductSpec)(nil)).Elem() +} + +type VAppPropertyFault struct { + VmConfigFault + + Id string `xml:"id"` + Category string `xml:"category"` + Label string `xml:"label"` + Type string `xml:"type"` + Value string `xml:"value"` +} + +func init() { + t["VAppPropertyFault"] = reflect.TypeOf((*VAppPropertyFault)(nil)).Elem() +} + +type VAppPropertyFaultFault BaseVAppPropertyFault + +func init() { + t["VAppPropertyFaultFault"] = reflect.TypeOf((*VAppPropertyFaultFault)(nil)).Elem() +} + +type VAppPropertyInfo struct { + DynamicData + + Key int32 `xml:"key"` + ClassId string `xml:"classId,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + Id string `xml:"id,omitempty"` + Category string `xml:"category,omitempty"` + Label string `xml:"label,omitempty"` + Type string `xml:"type,omitempty"` + TypeReference string `xml:"typeReference,omitempty"` + UserConfigurable *bool `xml:"userConfigurable"` + DefaultValue string `xml:"defaultValue,omitempty"` + Value string `xml:"value,omitempty"` + Description string `xml:"description,omitempty"` +} + +func init() { + t["VAppPropertyInfo"] = reflect.TypeOf((*VAppPropertyInfo)(nil)).Elem() +} + +type VAppPropertySpec struct { + ArrayUpdateSpec + + Info *VAppPropertyInfo `xml:"info,omitempty"` +} + +func init() { + t["VAppPropertySpec"] = reflect.TypeOf((*VAppPropertySpec)(nil)).Elem() +} + +type VAppTaskInProgress struct { + TaskInProgress +} + +func init() { + t["VAppTaskInProgress"] = reflect.TypeOf((*VAppTaskInProgress)(nil)).Elem() +} + +type VAppTaskInProgressFault VAppTaskInProgress + +func init() { + t["VAppTaskInProgressFault"] = reflect.TypeOf((*VAppTaskInProgressFault)(nil)).Elem() +} + +type VFlashCacheHotConfigNotSupported struct { + VmConfigFault +} + +func init() { + t["VFlashCacheHotConfigNotSupported"] = reflect.TypeOf((*VFlashCacheHotConfigNotSupported)(nil)).Elem() +} + +type VFlashCacheHotConfigNotSupportedFault VFlashCacheHotConfigNotSupported + +func init() { + t["VFlashCacheHotConfigNotSupportedFault"] = reflect.TypeOf((*VFlashCacheHotConfigNotSupportedFault)(nil)).Elem() +} + +type VFlashModuleNotSupported struct { + VmConfigFault + + VmName string `xml:"vmName"` + ModuleName string `xml:"moduleName"` + Reason string `xml:"reason"` + HostName string `xml:"hostName"` +} + +func init() { + t["VFlashModuleNotSupported"] = reflect.TypeOf((*VFlashModuleNotSupported)(nil)).Elem() +} + +type VFlashModuleNotSupportedFault VFlashModuleNotSupported + +func init() { + t["VFlashModuleNotSupportedFault"] = reflect.TypeOf((*VFlashModuleNotSupportedFault)(nil)).Elem() +} + +type VFlashModuleVersionIncompatible struct { + VimFault + + ModuleName string `xml:"moduleName"` + VmRequestModuleVersion string `xml:"vmRequestModuleVersion"` + HostMinSupportedVerson string `xml:"hostMinSupportedVerson"` + HostModuleVersion string `xml:"hostModuleVersion"` +} + +func init() { + t["VFlashModuleVersionIncompatible"] = reflect.TypeOf((*VFlashModuleVersionIncompatible)(nil)).Elem() +} + +type VFlashModuleVersionIncompatibleFault VFlashModuleVersionIncompatible + +func init() { + t["VFlashModuleVersionIncompatibleFault"] = reflect.TypeOf((*VFlashModuleVersionIncompatibleFault)(nil)).Elem() +} + +type VMFSDatastoreCreatedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` + DatastoreUrl string `xml:"datastoreUrl,omitempty"` +} + +func init() { + t["VMFSDatastoreCreatedEvent"] = reflect.TypeOf((*VMFSDatastoreCreatedEvent)(nil)).Elem() +} + +type VMFSDatastoreExpandedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["VMFSDatastoreExpandedEvent"] = reflect.TypeOf((*VMFSDatastoreExpandedEvent)(nil)).Elem() +} + +type VMFSDatastoreExtendedEvent struct { + HostEvent + + Datastore DatastoreEventArgument `xml:"datastore"` +} + +func init() { + t["VMFSDatastoreExtendedEvent"] = reflect.TypeOf((*VMFSDatastoreExtendedEvent)(nil)).Elem() +} + +type VMINotSupported struct { + DeviceNotSupported +} + +func init() { + t["VMINotSupported"] = reflect.TypeOf((*VMINotSupported)(nil)).Elem() +} + +type VMINotSupportedFault VMINotSupported + +func init() { + t["VMINotSupportedFault"] = reflect.TypeOf((*VMINotSupportedFault)(nil)).Elem() +} + +type VMOnConflictDVPort struct { + CannotAccessNetwork +} + +func init() { + t["VMOnConflictDVPort"] = reflect.TypeOf((*VMOnConflictDVPort)(nil)).Elem() +} + +type VMOnConflictDVPortFault VMOnConflictDVPort + +func init() { + t["VMOnConflictDVPortFault"] = reflect.TypeOf((*VMOnConflictDVPortFault)(nil)).Elem() +} + +type VMOnVirtualIntranet struct { + CannotAccessNetwork +} + +func init() { + t["VMOnVirtualIntranet"] = reflect.TypeOf((*VMOnVirtualIntranet)(nil)).Elem() +} + +type VMOnVirtualIntranetFault VMOnVirtualIntranet + +func init() { + t["VMOnVirtualIntranetFault"] = reflect.TypeOf((*VMOnVirtualIntranetFault)(nil)).Elem() +} + +type VMotionAcrossNetworkNotSupported struct { + MigrationFeatureNotSupported +} + +func init() { + t["VMotionAcrossNetworkNotSupported"] = reflect.TypeOf((*VMotionAcrossNetworkNotSupported)(nil)).Elem() +} + +type VMotionAcrossNetworkNotSupportedFault VMotionAcrossNetworkNotSupported + +func init() { + t["VMotionAcrossNetworkNotSupportedFault"] = reflect.TypeOf((*VMotionAcrossNetworkNotSupportedFault)(nil)).Elem() +} + +type VMotionInterfaceIssue struct { + MigrationFault + + AtSourceHost bool `xml:"atSourceHost"` + FailedHost string `xml:"failedHost"` + FailedHostEntity *ManagedObjectReference `xml:"failedHostEntity,omitempty"` +} + +func init() { + t["VMotionInterfaceIssue"] = reflect.TypeOf((*VMotionInterfaceIssue)(nil)).Elem() +} + +type VMotionInterfaceIssueFault BaseVMotionInterfaceIssue + +func init() { + t["VMotionInterfaceIssueFault"] = reflect.TypeOf((*VMotionInterfaceIssueFault)(nil)).Elem() +} + +type VMotionLicenseExpiredEvent struct { + LicenseEvent +} + +func init() { + t["VMotionLicenseExpiredEvent"] = reflect.TypeOf((*VMotionLicenseExpiredEvent)(nil)).Elem() +} + +type VMotionLinkCapacityLow struct { + VMotionInterfaceIssue + + Network string `xml:"network"` +} + +func init() { + t["VMotionLinkCapacityLow"] = reflect.TypeOf((*VMotionLinkCapacityLow)(nil)).Elem() +} + +type VMotionLinkCapacityLowFault VMotionLinkCapacityLow + +func init() { + t["VMotionLinkCapacityLowFault"] = reflect.TypeOf((*VMotionLinkCapacityLowFault)(nil)).Elem() +} + +type VMotionLinkDown struct { + VMotionInterfaceIssue + + Network string `xml:"network"` +} + +func init() { + t["VMotionLinkDown"] = reflect.TypeOf((*VMotionLinkDown)(nil)).Elem() +} + +type VMotionLinkDownFault VMotionLinkDown + +func init() { + t["VMotionLinkDownFault"] = reflect.TypeOf((*VMotionLinkDownFault)(nil)).Elem() +} + +type VMotionNotConfigured struct { + VMotionInterfaceIssue +} + +func init() { + t["VMotionNotConfigured"] = reflect.TypeOf((*VMotionNotConfigured)(nil)).Elem() +} + +type VMotionNotConfiguredFault VMotionNotConfigured + +func init() { + t["VMotionNotConfiguredFault"] = reflect.TypeOf((*VMotionNotConfiguredFault)(nil)).Elem() +} + +type VMotionNotLicensed struct { + VMotionInterfaceIssue +} + +func init() { + t["VMotionNotLicensed"] = reflect.TypeOf((*VMotionNotLicensed)(nil)).Elem() +} + +type VMotionNotLicensedFault VMotionNotLicensed + +func init() { + t["VMotionNotLicensedFault"] = reflect.TypeOf((*VMotionNotLicensedFault)(nil)).Elem() +} + +type VMotionNotSupported struct { + VMotionInterfaceIssue +} + +func init() { + t["VMotionNotSupported"] = reflect.TypeOf((*VMotionNotSupported)(nil)).Elem() +} + +type VMotionNotSupportedFault VMotionNotSupported + +func init() { + t["VMotionNotSupportedFault"] = reflect.TypeOf((*VMotionNotSupportedFault)(nil)).Elem() +} + +type VMotionProtocolIncompatible struct { + MigrationFault +} + +func init() { + t["VMotionProtocolIncompatible"] = reflect.TypeOf((*VMotionProtocolIncompatible)(nil)).Elem() +} + +type VMotionProtocolIncompatibleFault VMotionProtocolIncompatible + +func init() { + t["VMotionProtocolIncompatibleFault"] = reflect.TypeOf((*VMotionProtocolIncompatibleFault)(nil)).Elem() +} + +type VMwareDVSConfigInfo struct { + DVSConfigInfo + + VspanSession []VMwareVspanSession `xml:"vspanSession,omitempty"` + PvlanConfig []VMwareDVSPvlanMapEntry `xml:"pvlanConfig,omitempty"` + MaxMtu int32 `xml:"maxMtu"` + LinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:"linkDiscoveryProtocolConfig,omitempty"` + IpfixConfig *VMwareIpfixConfig `xml:"ipfixConfig,omitempty"` + LacpGroupConfig []VMwareDvsLacpGroupConfig `xml:"lacpGroupConfig,omitempty"` + LacpApiVersion string `xml:"lacpApiVersion,omitempty"` + MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty"` +} + +func init() { + t["VMwareDVSConfigInfo"] = reflect.TypeOf((*VMwareDVSConfigInfo)(nil)).Elem() +} + +type VMwareDVSConfigSpec struct { + DVSConfigSpec + + PvlanConfigSpec []VMwareDVSPvlanConfigSpec `xml:"pvlanConfigSpec,omitempty"` + VspanConfigSpec []VMwareDVSVspanConfigSpec `xml:"vspanConfigSpec,omitempty"` + MaxMtu int32 `xml:"maxMtu,omitempty"` + LinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:"linkDiscoveryProtocolConfig,omitempty"` + IpfixConfig *VMwareIpfixConfig `xml:"ipfixConfig,omitempty"` + LacpApiVersion string `xml:"lacpApiVersion,omitempty"` + MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty"` +} + +func init() { + t["VMwareDVSConfigSpec"] = reflect.TypeOf((*VMwareDVSConfigSpec)(nil)).Elem() +} + +type VMwareDVSFeatureCapability struct { + DVSFeatureCapability + + VspanSupported *bool `xml:"vspanSupported"` + LldpSupported *bool `xml:"lldpSupported"` + IpfixSupported *bool `xml:"ipfixSupported"` + IpfixCapability *VMwareDvsIpfixCapability `xml:"ipfixCapability,omitempty"` + MulticastSnoopingSupported *bool `xml:"multicastSnoopingSupported"` + VspanCapability *VMwareDVSVspanCapability `xml:"vspanCapability,omitempty"` + LacpCapability *VMwareDvsLacpCapability `xml:"lacpCapability,omitempty"` +} + +func init() { + t["VMwareDVSFeatureCapability"] = reflect.TypeOf((*VMwareDVSFeatureCapability)(nil)).Elem() +} + +type VMwareDVSHealthCheckCapability struct { + DVSHealthCheckCapability + + VlanMtuSupported bool `xml:"vlanMtuSupported"` + TeamingSupported bool `xml:"teamingSupported"` +} + +func init() { + t["VMwareDVSHealthCheckCapability"] = reflect.TypeOf((*VMwareDVSHealthCheckCapability)(nil)).Elem() +} + +type VMwareDVSHealthCheckConfig struct { + DVSHealthCheckConfig +} + +func init() { + t["VMwareDVSHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSHealthCheckConfig)(nil)).Elem() +} + +type VMwareDVSMtuHealthCheckResult struct { + HostMemberUplinkHealthCheckResult + + MtuMismatch bool `xml:"mtuMismatch"` + VlanSupportSwitchMtu []NumericRange `xml:"vlanSupportSwitchMtu,omitempty"` + VlanNotSupportSwitchMtu []NumericRange `xml:"vlanNotSupportSwitchMtu,omitempty"` +} + +func init() { + t["VMwareDVSMtuHealthCheckResult"] = reflect.TypeOf((*VMwareDVSMtuHealthCheckResult)(nil)).Elem() +} + +type VMwareDVSPortSetting struct { + DVPortSetting + + Vlan BaseVmwareDistributedVirtualSwitchVlanSpec `xml:"vlan,omitempty,typeattr"` + QosTag *IntPolicy `xml:"qosTag,omitempty"` + UplinkTeamingPolicy *VmwareUplinkPortTeamingPolicy `xml:"uplinkTeamingPolicy,omitempty"` + SecurityPolicy *DVSSecurityPolicy `xml:"securityPolicy,omitempty"` + IpfixEnabled *BoolPolicy `xml:"ipfixEnabled,omitempty"` + TxUplink *BoolPolicy `xml:"txUplink,omitempty"` + LacpPolicy *VMwareUplinkLacpPolicy `xml:"lacpPolicy,omitempty"` + MacManagementPolicy *DVSMacManagementPolicy `xml:"macManagementPolicy,omitempty"` +} + +func init() { + t["VMwareDVSPortSetting"] = reflect.TypeOf((*VMwareDVSPortSetting)(nil)).Elem() +} + +type VMwareDVSPortgroupPolicy struct { + DVPortgroupPolicy + + VlanOverrideAllowed bool `xml:"vlanOverrideAllowed"` + UplinkTeamingOverrideAllowed bool `xml:"uplinkTeamingOverrideAllowed"` + SecurityPolicyOverrideAllowed bool `xml:"securityPolicyOverrideAllowed"` + IpfixOverrideAllowed *bool `xml:"ipfixOverrideAllowed"` +} + +func init() { + t["VMwareDVSPortgroupPolicy"] = reflect.TypeOf((*VMwareDVSPortgroupPolicy)(nil)).Elem() +} + +type VMwareDVSPvlanConfigSpec struct { + DynamicData + + PvlanEntry VMwareDVSPvlanMapEntry `xml:"pvlanEntry"` + Operation string `xml:"operation"` +} + +func init() { + t["VMwareDVSPvlanConfigSpec"] = reflect.TypeOf((*VMwareDVSPvlanConfigSpec)(nil)).Elem() +} + +type VMwareDVSPvlanMapEntry struct { + DynamicData + + PrimaryVlanId int32 `xml:"primaryVlanId"` + SecondaryVlanId int32 `xml:"secondaryVlanId"` + PvlanType string `xml:"pvlanType"` +} + +func init() { + t["VMwareDVSPvlanMapEntry"] = reflect.TypeOf((*VMwareDVSPvlanMapEntry)(nil)).Elem() +} + +type VMwareDVSTeamingHealthCheckConfig struct { + VMwareDVSHealthCheckConfig +} + +func init() { + t["VMwareDVSTeamingHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSTeamingHealthCheckConfig)(nil)).Elem() +} + +type VMwareDVSTeamingHealthCheckResult struct { + HostMemberHealthCheckResult + + TeamingStatus string `xml:"teamingStatus"` +} + +func init() { + t["VMwareDVSTeamingHealthCheckResult"] = reflect.TypeOf((*VMwareDVSTeamingHealthCheckResult)(nil)).Elem() +} + +type VMwareDVSVlanHealthCheckResult struct { + HostMemberUplinkHealthCheckResult + + TrunkedVlan []NumericRange `xml:"trunkedVlan,omitempty"` + UntrunkedVlan []NumericRange `xml:"untrunkedVlan,omitempty"` +} + +func init() { + t["VMwareDVSVlanHealthCheckResult"] = reflect.TypeOf((*VMwareDVSVlanHealthCheckResult)(nil)).Elem() +} + +type VMwareDVSVlanMtuHealthCheckConfig struct { + VMwareDVSHealthCheckConfig +} + +func init() { + t["VMwareDVSVlanMtuHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSVlanMtuHealthCheckConfig)(nil)).Elem() +} + +type VMwareDVSVspanCapability struct { + DynamicData + + MixedDestSupported bool `xml:"mixedDestSupported"` + DvportSupported bool `xml:"dvportSupported"` + RemoteSourceSupported bool `xml:"remoteSourceSupported"` + RemoteDestSupported bool `xml:"remoteDestSupported"` + EncapRemoteSourceSupported bool `xml:"encapRemoteSourceSupported"` + ErspanProtocolSupported *bool `xml:"erspanProtocolSupported"` + MirrorNetstackSupported *bool `xml:"mirrorNetstackSupported"` +} + +func init() { + t["VMwareDVSVspanCapability"] = reflect.TypeOf((*VMwareDVSVspanCapability)(nil)).Elem() +} + +type VMwareDVSVspanConfigSpec struct { + DynamicData + + VspanSession VMwareVspanSession `xml:"vspanSession"` + Operation string `xml:"operation"` +} + +func init() { + t["VMwareDVSVspanConfigSpec"] = reflect.TypeOf((*VMwareDVSVspanConfigSpec)(nil)).Elem() +} + +type VMwareDvsIpfixCapability struct { + DynamicData + + IpfixSupported *bool `xml:"ipfixSupported"` + Ipv6ForIpfixSupported *bool `xml:"ipv6ForIpfixSupported"` + ObservationDomainIdSupported *bool `xml:"observationDomainIdSupported"` +} + +func init() { + t["VMwareDvsIpfixCapability"] = reflect.TypeOf((*VMwareDvsIpfixCapability)(nil)).Elem() +} + +type VMwareDvsLacpCapability struct { + DynamicData + + LacpSupported *bool `xml:"lacpSupported"` + MultiLacpGroupSupported *bool `xml:"multiLacpGroupSupported"` +} + +func init() { + t["VMwareDvsLacpCapability"] = reflect.TypeOf((*VMwareDvsLacpCapability)(nil)).Elem() +} + +type VMwareDvsLacpGroupConfig struct { + DynamicData + + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + Mode string `xml:"mode,omitempty"` + UplinkNum int32 `xml:"uplinkNum,omitempty"` + LoadbalanceAlgorithm string `xml:"loadbalanceAlgorithm,omitempty"` + Vlan *VMwareDvsLagVlanConfig `xml:"vlan,omitempty"` + Ipfix *VMwareDvsLagIpfixConfig `xml:"ipfix,omitempty"` + UplinkName []string `xml:"uplinkName,omitempty"` + UplinkPortKey []string `xml:"uplinkPortKey,omitempty"` +} + +func init() { + t["VMwareDvsLacpGroupConfig"] = reflect.TypeOf((*VMwareDvsLacpGroupConfig)(nil)).Elem() +} + +type VMwareDvsLacpGroupSpec struct { + DynamicData + + LacpGroupConfig VMwareDvsLacpGroupConfig `xml:"lacpGroupConfig"` + Operation string `xml:"operation"` +} + +func init() { + t["VMwareDvsLacpGroupSpec"] = reflect.TypeOf((*VMwareDvsLacpGroupSpec)(nil)).Elem() +} + +type VMwareDvsLagIpfixConfig struct { + DynamicData + + IpfixEnabled *bool `xml:"ipfixEnabled"` +} + +func init() { + t["VMwareDvsLagIpfixConfig"] = reflect.TypeOf((*VMwareDvsLagIpfixConfig)(nil)).Elem() +} + +type VMwareDvsLagVlanConfig struct { + DynamicData + + VlanId []NumericRange `xml:"vlanId,omitempty"` +} + +func init() { + t["VMwareDvsLagVlanConfig"] = reflect.TypeOf((*VMwareDvsLagVlanConfig)(nil)).Elem() +} + +type VMwareIpfixConfig struct { + DynamicData + + CollectorIpAddress string `xml:"collectorIpAddress,omitempty"` + CollectorPort int32 `xml:"collectorPort,omitempty"` + ObservationDomainId int64 `xml:"observationDomainId,omitempty"` + ActiveFlowTimeout int32 `xml:"activeFlowTimeout"` + IdleFlowTimeout int32 `xml:"idleFlowTimeout"` + SamplingRate int32 `xml:"samplingRate"` + InternalFlowsOnly bool `xml:"internalFlowsOnly"` +} + +func init() { + t["VMwareIpfixConfig"] = reflect.TypeOf((*VMwareIpfixConfig)(nil)).Elem() +} + +type VMwareUplinkLacpPolicy struct { + InheritablePolicy + + Enable *BoolPolicy `xml:"enable,omitempty"` + Mode *StringPolicy `xml:"mode,omitempty"` +} + +func init() { + t["VMwareUplinkLacpPolicy"] = reflect.TypeOf((*VMwareUplinkLacpPolicy)(nil)).Elem() +} + +type VMwareUplinkPortOrderPolicy struct { + InheritablePolicy + + ActiveUplinkPort []string `xml:"activeUplinkPort,omitempty"` + StandbyUplinkPort []string `xml:"standbyUplinkPort,omitempty"` +} + +func init() { + t["VMwareUplinkPortOrderPolicy"] = reflect.TypeOf((*VMwareUplinkPortOrderPolicy)(nil)).Elem() +} + +type VMwareVspanPort struct { + DynamicData + + PortKey []string `xml:"portKey,omitempty"` + UplinkPortName []string `xml:"uplinkPortName,omitempty"` + WildcardPortConnecteeType []string `xml:"wildcardPortConnecteeType,omitempty"` + Vlans []int32 `xml:"vlans,omitempty"` + IpAddress []string `xml:"ipAddress,omitempty"` +} + +func init() { + t["VMwareVspanPort"] = reflect.TypeOf((*VMwareVspanPort)(nil)).Elem() +} + +type VMwareVspanSession struct { + DynamicData + + Key string `xml:"key,omitempty"` + Name string `xml:"name,omitempty"` + Description string `xml:"description,omitempty"` + Enabled bool `xml:"enabled"` + SourcePortTransmitted *VMwareVspanPort `xml:"sourcePortTransmitted,omitempty"` + SourcePortReceived *VMwareVspanPort `xml:"sourcePortReceived,omitempty"` + DestinationPort *VMwareVspanPort `xml:"destinationPort,omitempty"` + EncapsulationVlanId int32 `xml:"encapsulationVlanId,omitempty"` + StripOriginalVlan bool `xml:"stripOriginalVlan"` + MirroredPacketLength int32 `xml:"mirroredPacketLength,omitempty"` + NormalTrafficAllowed bool `xml:"normalTrafficAllowed"` + SessionType string `xml:"sessionType,omitempty"` + SamplingRate int32 `xml:"samplingRate,omitempty"` + EncapType string `xml:"encapType,omitempty"` + ErspanId int32 `xml:"erspanId,omitempty"` + ErspanCOS int32 `xml:"erspanCOS,omitempty"` + ErspanGraNanosec *bool `xml:"erspanGraNanosec"` + Netstack string `xml:"netstack,omitempty"` +} + +func init() { + t["VMwareVspanSession"] = reflect.TypeOf((*VMwareVspanSession)(nil)).Elem() +} + +type VStorageObject struct { + DynamicData + + Config VStorageObjectConfigInfo `xml:"config"` +} + +func init() { + t["VStorageObject"] = reflect.TypeOf((*VStorageObject)(nil)).Elem() +} + +type VStorageObjectAssociations struct { + DynamicData + + Id ID `xml:"id"` + VmDiskAssociations []VStorageObjectAssociationsVmDiskAssociations `xml:"vmDiskAssociations,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["VStorageObjectAssociations"] = reflect.TypeOf((*VStorageObjectAssociations)(nil)).Elem() +} + +type VStorageObjectAssociationsVmDiskAssociations struct { + DynamicData + + VmId string `xml:"vmId"` + DiskKey int32 `xml:"diskKey"` +} + +func init() { + t["VStorageObjectAssociationsVmDiskAssociations"] = reflect.TypeOf((*VStorageObjectAssociationsVmDiskAssociations)(nil)).Elem() +} + +type VStorageObjectConfigInfo struct { + BaseConfigInfo + + CapacityInMB int64 `xml:"capacityInMB"` + ConsumptionType []string `xml:"consumptionType,omitempty"` + ConsumerId []ID `xml:"consumerId,omitempty"` +} + +func init() { + t["VStorageObjectConfigInfo"] = reflect.TypeOf((*VStorageObjectConfigInfo)(nil)).Elem() +} + +type VStorageObjectCreateSnapshotRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Description string `xml:"description"` +} + +func init() { + t["VStorageObjectCreateSnapshotRequestType"] = reflect.TypeOf((*VStorageObjectCreateSnapshotRequestType)(nil)).Elem() +} + +type VStorageObjectCreateSnapshot_Task VStorageObjectCreateSnapshotRequestType + +func init() { + t["VStorageObjectCreateSnapshot_Task"] = reflect.TypeOf((*VStorageObjectCreateSnapshot_Task)(nil)).Elem() +} + +type VStorageObjectCreateSnapshot_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type VStorageObjectSnapshotInfo struct { + DynamicData + + Snapshots []VStorageObjectSnapshotInfoVStorageObjectSnapshot `xml:"snapshots,omitempty"` +} + +func init() { + t["VStorageObjectSnapshotInfo"] = reflect.TypeOf((*VStorageObjectSnapshotInfo)(nil)).Elem() +} + +type VStorageObjectSnapshotInfoVStorageObjectSnapshot struct { + DynamicData + + Id *ID `xml:"id,omitempty"` + BackingObjectId string `xml:"backingObjectId,omitempty"` + CreateTime time.Time `xml:"createTime"` + Description string `xml:"description"` +} + +func init() { + t["VStorageObjectSnapshotInfoVStorageObjectSnapshot"] = reflect.TypeOf((*VStorageObjectSnapshotInfoVStorageObjectSnapshot)(nil)).Elem() +} + +type VStorageObjectStateInfo struct { + DynamicData + + Tentative *bool `xml:"tentative"` +} + +func init() { + t["VStorageObjectStateInfo"] = reflect.TypeOf((*VStorageObjectStateInfo)(nil)).Elem() +} + +type VVolHostPE struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + ProtocolEndpoint []HostProtocolEndpoint `xml:"protocolEndpoint"` +} + +func init() { + t["VVolHostPE"] = reflect.TypeOf((*VVolHostPE)(nil)).Elem() +} + +type VVolVmConfigFileUpdateResult struct { + DynamicData + + SucceededVmConfigFile []KeyValue `xml:"succeededVmConfigFile,omitempty"` + FailedVmConfigFile []VVolVmConfigFileUpdateResultFailedVmConfigFileInfo `xml:"failedVmConfigFile,omitempty"` +} + +func init() { + t["VVolVmConfigFileUpdateResult"] = reflect.TypeOf((*VVolVmConfigFileUpdateResult)(nil)).Elem() +} + +type VVolVmConfigFileUpdateResultFailedVmConfigFileInfo struct { + DynamicData + + TargetConfigVVolId string `xml:"targetConfigVVolId"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["VVolVmConfigFileUpdateResultFailedVmConfigFileInfo"] = reflect.TypeOf((*VVolVmConfigFileUpdateResultFailedVmConfigFileInfo)(nil)).Elem() +} + +type ValidateCredentialsInGuest ValidateCredentialsInGuestRequestType + +func init() { + t["ValidateCredentialsInGuest"] = reflect.TypeOf((*ValidateCredentialsInGuest)(nil)).Elem() +} + +type ValidateCredentialsInGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["ValidateCredentialsInGuestRequestType"] = reflect.TypeOf((*ValidateCredentialsInGuestRequestType)(nil)).Elem() +} + +type ValidateCredentialsInGuestResponse struct { +} + +type ValidateHost ValidateHostRequestType + +func init() { + t["ValidateHost"] = reflect.TypeOf((*ValidateHost)(nil)).Elem() +} + +type ValidateHostProfileCompositionRequestType struct { + This ManagedObjectReference `xml:"_this"` + Source ManagedObjectReference `xml:"source"` + Targets []ManagedObjectReference `xml:"targets,omitempty"` + ToBeMerged *HostApplyProfile `xml:"toBeMerged,omitempty"` + ToReplaceWith *HostApplyProfile `xml:"toReplaceWith,omitempty"` + ToBeDeleted *HostApplyProfile `xml:"toBeDeleted,omitempty"` + EnableStatusToBeCopied *HostApplyProfile `xml:"enableStatusToBeCopied,omitempty"` + ErrorOnly *bool `xml:"errorOnly"` +} + +func init() { + t["ValidateHostProfileCompositionRequestType"] = reflect.TypeOf((*ValidateHostProfileCompositionRequestType)(nil)).Elem() +} + +type ValidateHostProfileComposition_Task ValidateHostProfileCompositionRequestType + +func init() { + t["ValidateHostProfileComposition_Task"] = reflect.TypeOf((*ValidateHostProfileComposition_Task)(nil)).Elem() +} + +type ValidateHostProfileComposition_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ValidateHostRequestType struct { + This ManagedObjectReference `xml:"_this"` + OvfDescriptor string `xml:"ovfDescriptor"` + Host ManagedObjectReference `xml:"host"` + Vhp OvfValidateHostParams `xml:"vhp"` +} + +func init() { + t["ValidateHostRequestType"] = reflect.TypeOf((*ValidateHostRequestType)(nil)).Elem() +} + +type ValidateHostResponse struct { + Returnval OvfValidateHostResult `xml:"returnval"` +} + +type ValidateMigration ValidateMigrationRequestType + +func init() { + t["ValidateMigration"] = reflect.TypeOf((*ValidateMigration)(nil)).Elem() +} + +type ValidateMigrationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm []ManagedObjectReference `xml:"vm"` + State VirtualMachinePowerState `xml:"state,omitempty"` + TestType []string `xml:"testType,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` +} + +func init() { + t["ValidateMigrationRequestType"] = reflect.TypeOf((*ValidateMigrationRequestType)(nil)).Elem() +} + +type ValidateMigrationResponse struct { + Returnval []BaseEvent `xml:"returnval,omitempty,typeattr"` +} + +type ValidateStoragePodConfig ValidateStoragePodConfigRequestType + +func init() { + t["ValidateStoragePodConfig"] = reflect.TypeOf((*ValidateStoragePodConfig)(nil)).Elem() +} + +type ValidateStoragePodConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Pod ManagedObjectReference `xml:"pod"` + Spec StorageDrsConfigSpec `xml:"spec"` +} + +func init() { + t["ValidateStoragePodConfigRequestType"] = reflect.TypeOf((*ValidateStoragePodConfigRequestType)(nil)).Elem() +} + +type ValidateStoragePodConfigResponse struct { + Returnval *LocalizedMethodFault `xml:"returnval,omitempty"` +} + +type VasaProviderContainerSpec struct { + DynamicData + + VasaProviderInfo []VimVasaProviderInfo `xml:"vasaProviderInfo,omitempty"` + ScId string `xml:"scId"` + Deleted bool `xml:"deleted"` +} + +func init() { + t["VasaProviderContainerSpec"] = reflect.TypeOf((*VasaProviderContainerSpec)(nil)).Elem() +} + +type VcAgentUninstallFailedEvent struct { + HostEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VcAgentUninstallFailedEvent"] = reflect.TypeOf((*VcAgentUninstallFailedEvent)(nil)).Elem() +} + +type VcAgentUninstalledEvent struct { + HostEvent +} + +func init() { + t["VcAgentUninstalledEvent"] = reflect.TypeOf((*VcAgentUninstalledEvent)(nil)).Elem() +} + +type VcAgentUpgradeFailedEvent struct { + HostEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VcAgentUpgradeFailedEvent"] = reflect.TypeOf((*VcAgentUpgradeFailedEvent)(nil)).Elem() +} + +type VcAgentUpgradedEvent struct { + HostEvent +} + +func init() { + t["VcAgentUpgradedEvent"] = reflect.TypeOf((*VcAgentUpgradedEvent)(nil)).Elem() +} + +type VchaClusterConfigInfo struct { + DynamicData + + FailoverNodeInfo1 *FailoverNodeInfo `xml:"failoverNodeInfo1,omitempty"` + FailoverNodeInfo2 *FailoverNodeInfo `xml:"failoverNodeInfo2,omitempty"` + WitnessNodeInfo *WitnessNodeInfo `xml:"witnessNodeInfo,omitempty"` + State string `xml:"state"` +} + +func init() { + t["VchaClusterConfigInfo"] = reflect.TypeOf((*VchaClusterConfigInfo)(nil)).Elem() +} + +type VchaClusterConfigSpec struct { + DynamicData + + PassiveIp string `xml:"passiveIp"` + WitnessIp string `xml:"witnessIp"` +} + +func init() { + t["VchaClusterConfigSpec"] = reflect.TypeOf((*VchaClusterConfigSpec)(nil)).Elem() +} + +type VchaClusterDeploymentSpec struct { + DynamicData + + PassiveDeploymentSpec PassiveNodeDeploymentSpec `xml:"passiveDeploymentSpec"` + WitnessDeploymentSpec BaseNodeDeploymentSpec `xml:"witnessDeploymentSpec,typeattr"` + ActiveVcSpec SourceNodeSpec `xml:"activeVcSpec"` + ActiveVcNetworkConfig *ClusterNetworkConfigSpec `xml:"activeVcNetworkConfig,omitempty"` +} + +func init() { + t["VchaClusterDeploymentSpec"] = reflect.TypeOf((*VchaClusterDeploymentSpec)(nil)).Elem() +} + +type VchaClusterHealth struct { + DynamicData + + RuntimeInfo VchaClusterRuntimeInfo `xml:"runtimeInfo"` + HealthMessages []LocalizableMessage `xml:"healthMessages,omitempty"` + AdditionalInformation []LocalizableMessage `xml:"additionalInformation,omitempty"` +} + +func init() { + t["VchaClusterHealth"] = reflect.TypeOf((*VchaClusterHealth)(nil)).Elem() +} + +type VchaClusterNetworkSpec struct { + DynamicData + + WitnessNetworkSpec BaseNodeNetworkSpec `xml:"witnessNetworkSpec,typeattr"` + PassiveNetworkSpec PassiveNodeNetworkSpec `xml:"passiveNetworkSpec"` +} + +func init() { + t["VchaClusterNetworkSpec"] = reflect.TypeOf((*VchaClusterNetworkSpec)(nil)).Elem() +} + +type VchaClusterRuntimeInfo struct { + DynamicData + + ClusterState string `xml:"clusterState"` + NodeInfo []VchaNodeRuntimeInfo `xml:"nodeInfo,omitempty"` + ClusterMode string `xml:"clusterMode"` +} + +func init() { + t["VchaClusterRuntimeInfo"] = reflect.TypeOf((*VchaClusterRuntimeInfo)(nil)).Elem() +} + +type VchaNodeRuntimeInfo struct { + DynamicData + + NodeState string `xml:"nodeState"` + NodeRole string `xml:"nodeRole"` + NodeIp string `xml:"nodeIp"` +} + +func init() { + t["VchaNodeRuntimeInfo"] = reflect.TypeOf((*VchaNodeRuntimeInfo)(nil)).Elem() +} + +type VimAccountPasswordChangedEvent struct { + HostEvent +} + +func init() { + t["VimAccountPasswordChangedEvent"] = reflect.TypeOf((*VimAccountPasswordChangedEvent)(nil)).Elem() +} + +type VimFault struct { + MethodFault +} + +func init() { + t["VimFault"] = reflect.TypeOf((*VimFault)(nil)).Elem() +} + +type VimFaultFault BaseVimFault + +func init() { + t["VimFaultFault"] = reflect.TypeOf((*VimFaultFault)(nil)).Elem() +} + +type VimVasaProvider struct { + DynamicData + + Uid string `xml:"uid,omitempty"` + Url string `xml:"url"` + Name string `xml:"name,omitempty"` + SelfSignedCertificate string `xml:"selfSignedCertificate,omitempty"` +} + +func init() { + t["VimVasaProvider"] = reflect.TypeOf((*VimVasaProvider)(nil)).Elem() +} + +type VimVasaProviderInfo struct { + DynamicData + + Provider VimVasaProvider `xml:"provider"` + ArrayState []VimVasaProviderStatePerArray `xml:"arrayState,omitempty"` +} + +func init() { + t["VimVasaProviderInfo"] = reflect.TypeOf((*VimVasaProviderInfo)(nil)).Elem() +} + +type VimVasaProviderStatePerArray struct { + DynamicData + + Priority int32 `xml:"priority"` + ArrayId string `xml:"arrayId"` + Active bool `xml:"active"` +} + +func init() { + t["VimVasaProviderStatePerArray"] = reflect.TypeOf((*VimVasaProviderStatePerArray)(nil)).Elem() +} + +type VirtualAHCIController struct { + VirtualSATAController +} + +func init() { + t["VirtualAHCIController"] = reflect.TypeOf((*VirtualAHCIController)(nil)).Elem() +} + +type VirtualAHCIControllerOption struct { + VirtualSATAControllerOption +} + +func init() { + t["VirtualAHCIControllerOption"] = reflect.TypeOf((*VirtualAHCIControllerOption)(nil)).Elem() +} + +type VirtualAppImportSpec struct { + ImportSpec + + Name string `xml:"name"` + VAppConfigSpec VAppConfigSpec `xml:"vAppConfigSpec"` + ResourcePoolSpec ResourceConfigSpec `xml:"resourcePoolSpec"` + Child []BaseImportSpec `xml:"child,omitempty,typeattr"` +} + +func init() { + t["VirtualAppImportSpec"] = reflect.TypeOf((*VirtualAppImportSpec)(nil)).Elem() +} + +type VirtualAppLinkInfo struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + DestroyWithParent *bool `xml:"destroyWithParent"` +} + +func init() { + t["VirtualAppLinkInfo"] = reflect.TypeOf((*VirtualAppLinkInfo)(nil)).Elem() +} + +type VirtualAppSummary struct { + ResourcePoolSummary + + Product *VAppProductInfo `xml:"product,omitempty"` + VAppState VirtualAppVAppState `xml:"vAppState,omitempty"` + Suspended *bool `xml:"suspended"` + InstallBootRequired *bool `xml:"installBootRequired"` + InstanceUuid string `xml:"instanceUuid,omitempty"` +} + +func init() { + t["VirtualAppSummary"] = reflect.TypeOf((*VirtualAppSummary)(nil)).Elem() +} + +type VirtualBusLogicController struct { + VirtualSCSIController +} + +func init() { + t["VirtualBusLogicController"] = reflect.TypeOf((*VirtualBusLogicController)(nil)).Elem() +} + +type VirtualBusLogicControllerOption struct { + VirtualSCSIControllerOption +} + +func init() { + t["VirtualBusLogicControllerOption"] = reflect.TypeOf((*VirtualBusLogicControllerOption)(nil)).Elem() +} + +type VirtualCdrom struct { + VirtualDevice +} + +func init() { + t["VirtualCdrom"] = reflect.TypeOf((*VirtualCdrom)(nil)).Elem() +} + +type VirtualCdromAtapiBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualCdromAtapiBackingInfo"] = reflect.TypeOf((*VirtualCdromAtapiBackingInfo)(nil)).Elem() +} + +type VirtualCdromAtapiBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualCdromAtapiBackingOption"] = reflect.TypeOf((*VirtualCdromAtapiBackingOption)(nil)).Elem() +} + +type VirtualCdromIsoBackingInfo struct { + VirtualDeviceFileBackingInfo +} + +func init() { + t["VirtualCdromIsoBackingInfo"] = reflect.TypeOf((*VirtualCdromIsoBackingInfo)(nil)).Elem() +} + +type VirtualCdromIsoBackingOption struct { + VirtualDeviceFileBackingOption +} + +func init() { + t["VirtualCdromIsoBackingOption"] = reflect.TypeOf((*VirtualCdromIsoBackingOption)(nil)).Elem() +} + +type VirtualCdromOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualCdromOption"] = reflect.TypeOf((*VirtualCdromOption)(nil)).Elem() +} + +type VirtualCdromPassthroughBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + Exclusive bool `xml:"exclusive"` +} + +func init() { + t["VirtualCdromPassthroughBackingInfo"] = reflect.TypeOf((*VirtualCdromPassthroughBackingInfo)(nil)).Elem() +} + +type VirtualCdromPassthroughBackingOption struct { + VirtualDeviceDeviceBackingOption + + Exclusive BoolOption `xml:"exclusive"` +} + +func init() { + t["VirtualCdromPassthroughBackingOption"] = reflect.TypeOf((*VirtualCdromPassthroughBackingOption)(nil)).Elem() +} + +type VirtualCdromRemoteAtapiBackingInfo struct { + VirtualDeviceRemoteDeviceBackingInfo +} + +func init() { + t["VirtualCdromRemoteAtapiBackingInfo"] = reflect.TypeOf((*VirtualCdromRemoteAtapiBackingInfo)(nil)).Elem() +} + +type VirtualCdromRemoteAtapiBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualCdromRemoteAtapiBackingOption"] = reflect.TypeOf((*VirtualCdromRemoteAtapiBackingOption)(nil)).Elem() +} + +type VirtualCdromRemotePassthroughBackingInfo struct { + VirtualDeviceRemoteDeviceBackingInfo + + Exclusive bool `xml:"exclusive"` +} + +func init() { + t["VirtualCdromRemotePassthroughBackingInfo"] = reflect.TypeOf((*VirtualCdromRemotePassthroughBackingInfo)(nil)).Elem() +} + +type VirtualCdromRemotePassthroughBackingOption struct { + VirtualDeviceRemoteDeviceBackingOption + + Exclusive BoolOption `xml:"exclusive"` +} + +func init() { + t["VirtualCdromRemotePassthroughBackingOption"] = reflect.TypeOf((*VirtualCdromRemotePassthroughBackingOption)(nil)).Elem() +} + +type VirtualController struct { + VirtualDevice + + BusNumber int32 `xml:"busNumber"` + Device []int32 `xml:"device,omitempty"` +} + +func init() { + t["VirtualController"] = reflect.TypeOf((*VirtualController)(nil)).Elem() +} + +type VirtualControllerOption struct { + VirtualDeviceOption + + Devices IntOption `xml:"devices"` + SupportedDevice []string `xml:"supportedDevice,omitempty"` +} + +func init() { + t["VirtualControllerOption"] = reflect.TypeOf((*VirtualControllerOption)(nil)).Elem() +} + +type VirtualDevice struct { + DynamicData + + Key int32 `xml:"key"` + DeviceInfo BaseDescription `xml:"deviceInfo,omitempty,typeattr"` + Backing BaseVirtualDeviceBackingInfo `xml:"backing,omitempty,typeattr"` + Connectable *VirtualDeviceConnectInfo `xml:"connectable,omitempty"` + SlotInfo BaseVirtualDeviceBusSlotInfo `xml:"slotInfo,omitempty,typeattr"` + ControllerKey int32 `xml:"controllerKey,omitempty"` + UnitNumber *int32 `xml:"unitNumber"` +} + +func init() { + t["VirtualDevice"] = reflect.TypeOf((*VirtualDevice)(nil)).Elem() +} + +type VirtualDeviceBackingInfo struct { + DynamicData +} + +func init() { + t["VirtualDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceBackingInfo)(nil)).Elem() +} + +type VirtualDeviceBackingOption struct { + DynamicData + + Type string `xml:"type"` +} + +func init() { + t["VirtualDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceBackingOption)(nil)).Elem() +} + +type VirtualDeviceBusSlotInfo struct { + DynamicData +} + +func init() { + t["VirtualDeviceBusSlotInfo"] = reflect.TypeOf((*VirtualDeviceBusSlotInfo)(nil)).Elem() +} + +type VirtualDeviceBusSlotOption struct { + DynamicData + + Type string `xml:"type"` +} + +func init() { + t["VirtualDeviceBusSlotOption"] = reflect.TypeOf((*VirtualDeviceBusSlotOption)(nil)).Elem() +} + +type VirtualDeviceConfigSpec struct { + DynamicData + + Operation VirtualDeviceConfigSpecOperation `xml:"operation,omitempty"` + FileOperation VirtualDeviceConfigSpecFileOperation `xml:"fileOperation,omitempty"` + Device BaseVirtualDevice `xml:"device,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Backing *VirtualDeviceConfigSpecBackingSpec `xml:"backing,omitempty"` +} + +func init() { + t["VirtualDeviceConfigSpec"] = reflect.TypeOf((*VirtualDeviceConfigSpec)(nil)).Elem() +} + +type VirtualDeviceConfigSpecBackingSpec struct { + DynamicData + + Parent *VirtualDeviceConfigSpecBackingSpec `xml:"parent,omitempty"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` +} + +func init() { + t["VirtualDeviceConfigSpecBackingSpec"] = reflect.TypeOf((*VirtualDeviceConfigSpecBackingSpec)(nil)).Elem() +} + +type VirtualDeviceConnectInfo struct { + DynamicData + + MigrateConnect string `xml:"migrateConnect,omitempty"` + StartConnected bool `xml:"startConnected"` + AllowGuestControl bool `xml:"allowGuestControl"` + Connected bool `xml:"connected"` + Status string `xml:"status,omitempty"` +} + +func init() { + t["VirtualDeviceConnectInfo"] = reflect.TypeOf((*VirtualDeviceConnectInfo)(nil)).Elem() +} + +type VirtualDeviceConnectOption struct { + DynamicData + + StartConnected BoolOption `xml:"startConnected"` + AllowGuestControl BoolOption `xml:"allowGuestControl"` +} + +func init() { + t["VirtualDeviceConnectOption"] = reflect.TypeOf((*VirtualDeviceConnectOption)(nil)).Elem() +} + +type VirtualDeviceDeviceBackingInfo struct { + VirtualDeviceBackingInfo + + DeviceName string `xml:"deviceName"` + UseAutoDetect *bool `xml:"useAutoDetect"` +} + +func init() { + t["VirtualDeviceDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceDeviceBackingInfo)(nil)).Elem() +} + +type VirtualDeviceDeviceBackingOption struct { + VirtualDeviceBackingOption + + AutoDetectAvailable BoolOption `xml:"autoDetectAvailable"` +} + +func init() { + t["VirtualDeviceDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceDeviceBackingOption)(nil)).Elem() +} + +type VirtualDeviceFileBackingInfo struct { + VirtualDeviceBackingInfo + + FileName string `xml:"fileName"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + BackingObjectId string `xml:"backingObjectId,omitempty"` +} + +func init() { + t["VirtualDeviceFileBackingInfo"] = reflect.TypeOf((*VirtualDeviceFileBackingInfo)(nil)).Elem() +} + +type VirtualDeviceFileBackingOption struct { + VirtualDeviceBackingOption + + FileNameExtensions *ChoiceOption `xml:"fileNameExtensions,omitempty"` +} + +func init() { + t["VirtualDeviceFileBackingOption"] = reflect.TypeOf((*VirtualDeviceFileBackingOption)(nil)).Elem() +} + +type VirtualDeviceOption struct { + DynamicData + + Type string `xml:"type"` + ConnectOption *VirtualDeviceConnectOption `xml:"connectOption,omitempty"` + BusSlotOption *VirtualDeviceBusSlotOption `xml:"busSlotOption,omitempty"` + ControllerType string `xml:"controllerType,omitempty"` + AutoAssignController *BoolOption `xml:"autoAssignController,omitempty"` + BackingOption []BaseVirtualDeviceBackingOption `xml:"backingOption,omitempty,typeattr"` + DefaultBackingOptionIndex int32 `xml:"defaultBackingOptionIndex,omitempty"` + LicensingLimit []string `xml:"licensingLimit,omitempty"` + Deprecated bool `xml:"deprecated"` + PlugAndPlay bool `xml:"plugAndPlay"` + HotRemoveSupported *bool `xml:"hotRemoveSupported"` +} + +func init() { + t["VirtualDeviceOption"] = reflect.TypeOf((*VirtualDeviceOption)(nil)).Elem() +} + +type VirtualDevicePciBusSlotInfo struct { + VirtualDeviceBusSlotInfo + + PciSlotNumber int32 `xml:"pciSlotNumber"` +} + +func init() { + t["VirtualDevicePciBusSlotInfo"] = reflect.TypeOf((*VirtualDevicePciBusSlotInfo)(nil)).Elem() +} + +type VirtualDevicePipeBackingInfo struct { + VirtualDeviceBackingInfo + + PipeName string `xml:"pipeName"` +} + +func init() { + t["VirtualDevicePipeBackingInfo"] = reflect.TypeOf((*VirtualDevicePipeBackingInfo)(nil)).Elem() +} + +type VirtualDevicePipeBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualDevicePipeBackingOption"] = reflect.TypeOf((*VirtualDevicePipeBackingOption)(nil)).Elem() +} + +type VirtualDeviceRemoteDeviceBackingInfo struct { + VirtualDeviceBackingInfo + + DeviceName string `xml:"deviceName"` + UseAutoDetect *bool `xml:"useAutoDetect"` +} + +func init() { + t["VirtualDeviceRemoteDeviceBackingInfo"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingInfo)(nil)).Elem() +} + +type VirtualDeviceRemoteDeviceBackingOption struct { + VirtualDeviceBackingOption + + AutoDetectAvailable BoolOption `xml:"autoDetectAvailable"` +} + +func init() { + t["VirtualDeviceRemoteDeviceBackingOption"] = reflect.TypeOf((*VirtualDeviceRemoteDeviceBackingOption)(nil)).Elem() +} + +type VirtualDeviceURIBackingInfo struct { + VirtualDeviceBackingInfo + + ServiceURI string `xml:"serviceURI"` + Direction string `xml:"direction"` + ProxyURI string `xml:"proxyURI,omitempty"` +} + +func init() { + t["VirtualDeviceURIBackingInfo"] = reflect.TypeOf((*VirtualDeviceURIBackingInfo)(nil)).Elem() +} + +type VirtualDeviceURIBackingOption struct { + VirtualDeviceBackingOption + + Directions ChoiceOption `xml:"directions"` +} + +func init() { + t["VirtualDeviceURIBackingOption"] = reflect.TypeOf((*VirtualDeviceURIBackingOption)(nil)).Elem() +} + +type VirtualDisk struct { + VirtualDevice + + CapacityInKB int64 `xml:"capacityInKB"` + CapacityInBytes int64 `xml:"capacityInBytes,omitempty"` + Shares *SharesInfo `xml:"shares,omitempty"` + StorageIOAllocation *StorageIOAllocationInfo `xml:"storageIOAllocation,omitempty"` + DiskObjectId string `xml:"diskObjectId,omitempty"` + VFlashCacheConfigInfo *VirtualDiskVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty"` + Iofilter []string `xml:"iofilter,omitempty"` + VDiskId *ID `xml:"vDiskId,omitempty"` + NativeUnmanagedLinkedClone *bool `xml:"nativeUnmanagedLinkedClone"` +} + +func init() { + t["VirtualDisk"] = reflect.TypeOf((*VirtualDisk)(nil)).Elem() +} + +type VirtualDiskAntiAffinityRuleSpec struct { + ClusterRuleInfo + + DiskId []int32 `xml:"diskId"` +} + +func init() { + t["VirtualDiskAntiAffinityRuleSpec"] = reflect.TypeOf((*VirtualDiskAntiAffinityRuleSpec)(nil)).Elem() +} + +type VirtualDiskBlocksNotFullyProvisioned struct { + DeviceBackingNotSupported +} + +func init() { + t["VirtualDiskBlocksNotFullyProvisioned"] = reflect.TypeOf((*VirtualDiskBlocksNotFullyProvisioned)(nil)).Elem() +} + +type VirtualDiskBlocksNotFullyProvisionedFault VirtualDiskBlocksNotFullyProvisioned + +func init() { + t["VirtualDiskBlocksNotFullyProvisionedFault"] = reflect.TypeOf((*VirtualDiskBlocksNotFullyProvisionedFault)(nil)).Elem() +} + +type VirtualDiskConfigSpec struct { + VirtualDeviceConfigSpec + + DiskMoveType string `xml:"diskMoveType,omitempty"` + MigrateCache *bool `xml:"migrateCache"` +} + +func init() { + t["VirtualDiskConfigSpec"] = reflect.TypeOf((*VirtualDiskConfigSpec)(nil)).Elem() +} + +type VirtualDiskDeltaDiskFormatsSupported struct { + DynamicData + + DatastoreType string `xml:"datastoreType"` + DeltaDiskFormat ChoiceOption `xml:"deltaDiskFormat"` +} + +func init() { + t["VirtualDiskDeltaDiskFormatsSupported"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormatsSupported)(nil)).Elem() +} + +type VirtualDiskFlatVer1BackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + Split *bool `xml:"split"` + WriteThrough *bool `xml:"writeThrough"` + ContentId string `xml:"contentId,omitempty"` + Parent *VirtualDiskFlatVer1BackingInfo `xml:"parent,omitempty"` +} + +func init() { + t["VirtualDiskFlatVer1BackingInfo"] = reflect.TypeOf((*VirtualDiskFlatVer1BackingInfo)(nil)).Elem() +} + +type VirtualDiskFlatVer1BackingOption struct { + VirtualDeviceFileBackingOption + + DiskMode ChoiceOption `xml:"diskMode"` + Split BoolOption `xml:"split"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` +} + +func init() { + t["VirtualDiskFlatVer1BackingOption"] = reflect.TypeOf((*VirtualDiskFlatVer1BackingOption)(nil)).Elem() +} + +type VirtualDiskFlatVer2BackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + Split *bool `xml:"split"` + WriteThrough *bool `xml:"writeThrough"` + ThinProvisioned *bool `xml:"thinProvisioned"` + EagerlyScrub *bool `xml:"eagerlyScrub"` + Uuid string `xml:"uuid,omitempty"` + ContentId string `xml:"contentId,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Parent *VirtualDiskFlatVer2BackingInfo `xml:"parent,omitempty"` + DeltaDiskFormat string `xml:"deltaDiskFormat,omitempty"` + DigestEnabled *bool `xml:"digestEnabled"` + DeltaGrainSize int32 `xml:"deltaGrainSize,omitempty"` + DeltaDiskFormatVariant string `xml:"deltaDiskFormatVariant,omitempty"` + Sharing string `xml:"sharing,omitempty"` + KeyId *CryptoKeyId `xml:"keyId,omitempty"` +} + +func init() { + t["VirtualDiskFlatVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskFlatVer2BackingInfo)(nil)).Elem() +} + +type VirtualDiskFlatVer2BackingOption struct { + VirtualDeviceFileBackingOption + + DiskMode ChoiceOption `xml:"diskMode"` + Split BoolOption `xml:"split"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` + HotGrowable bool `xml:"hotGrowable"` + Uuid bool `xml:"uuid"` + ThinProvisioned *BoolOption `xml:"thinProvisioned,omitempty"` + EagerlyScrub *BoolOption `xml:"eagerlyScrub,omitempty"` + DeltaDiskFormat *ChoiceOption `xml:"deltaDiskFormat,omitempty"` + DeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:"deltaDiskFormatsSupported,omitempty"` +} + +func init() { + t["VirtualDiskFlatVer2BackingOption"] = reflect.TypeOf((*VirtualDiskFlatVer2BackingOption)(nil)).Elem() +} + +type VirtualDiskId struct { + DynamicData + + Vm ManagedObjectReference `xml:"vm"` + DiskId int32 `xml:"diskId"` +} + +func init() { + t["VirtualDiskId"] = reflect.TypeOf((*VirtualDiskId)(nil)).Elem() +} + +type VirtualDiskLocalPMemBackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + Uuid string `xml:"uuid,omitempty"` + VolumeUUID string `xml:"volumeUUID,omitempty"` + ContentId string `xml:"contentId,omitempty"` +} + +func init() { + t["VirtualDiskLocalPMemBackingInfo"] = reflect.TypeOf((*VirtualDiskLocalPMemBackingInfo)(nil)).Elem() +} + +type VirtualDiskLocalPMemBackingOption struct { + VirtualDeviceFileBackingOption + + DiskMode ChoiceOption `xml:"diskMode"` + Growable bool `xml:"growable"` + HotGrowable bool `xml:"hotGrowable"` + Uuid bool `xml:"uuid"` +} + +func init() { + t["VirtualDiskLocalPMemBackingOption"] = reflect.TypeOf((*VirtualDiskLocalPMemBackingOption)(nil)).Elem() +} + +type VirtualDiskModeNotSupported struct { + DeviceNotSupported + + Mode string `xml:"mode"` +} + +func init() { + t["VirtualDiskModeNotSupported"] = reflect.TypeOf((*VirtualDiskModeNotSupported)(nil)).Elem() +} + +type VirtualDiskModeNotSupportedFault VirtualDiskModeNotSupported + +func init() { + t["VirtualDiskModeNotSupportedFault"] = reflect.TypeOf((*VirtualDiskModeNotSupportedFault)(nil)).Elem() +} + +type VirtualDiskOption struct { + VirtualDeviceOption + + CapacityInKB LongOption `xml:"capacityInKB"` + IoAllocationOption *StorageIOAllocationOption `xml:"ioAllocationOption,omitempty"` + VFlashCacheConfigOption *VirtualDiskOptionVFlashCacheConfigOption `xml:"vFlashCacheConfigOption,omitempty"` +} + +func init() { + t["VirtualDiskOption"] = reflect.TypeOf((*VirtualDiskOption)(nil)).Elem() +} + +type VirtualDiskOptionVFlashCacheConfigOption struct { + DynamicData + + CacheConsistencyType ChoiceOption `xml:"cacheConsistencyType"` + CacheMode ChoiceOption `xml:"cacheMode"` + ReservationInMB LongOption `xml:"reservationInMB"` + BlockSizeInKB LongOption `xml:"blockSizeInKB"` +} + +func init() { + t["VirtualDiskOptionVFlashCacheConfigOption"] = reflect.TypeOf((*VirtualDiskOptionVFlashCacheConfigOption)(nil)).Elem() +} + +type VirtualDiskPartitionedRawDiskVer2BackingInfo struct { + VirtualDiskRawDiskVer2BackingInfo + + Partition []int32 `xml:"partition"` +} + +func init() { + t["VirtualDiskPartitionedRawDiskVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskPartitionedRawDiskVer2BackingInfo)(nil)).Elem() +} + +type VirtualDiskPartitionedRawDiskVer2BackingOption struct { + VirtualDiskRawDiskVer2BackingOption +} + +func init() { + t["VirtualDiskPartitionedRawDiskVer2BackingOption"] = reflect.TypeOf((*VirtualDiskPartitionedRawDiskVer2BackingOption)(nil)).Elem() +} + +type VirtualDiskRawDiskMappingVer1BackingInfo struct { + VirtualDeviceFileBackingInfo + + LunUuid string `xml:"lunUuid,omitempty"` + DeviceName string `xml:"deviceName,omitempty"` + CompatibilityMode string `xml:"compatibilityMode,omitempty"` + DiskMode string `xml:"diskMode,omitempty"` + Uuid string `xml:"uuid,omitempty"` + ContentId string `xml:"contentId,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Parent *VirtualDiskRawDiskMappingVer1BackingInfo `xml:"parent,omitempty"` + DeltaDiskFormat string `xml:"deltaDiskFormat,omitempty"` + DeltaGrainSize int32 `xml:"deltaGrainSize,omitempty"` + Sharing string `xml:"sharing,omitempty"` +} + +func init() { + t["VirtualDiskRawDiskMappingVer1BackingInfo"] = reflect.TypeOf((*VirtualDiskRawDiskMappingVer1BackingInfo)(nil)).Elem() +} + +type VirtualDiskRawDiskMappingVer1BackingOption struct { + VirtualDeviceDeviceBackingOption + + DescriptorFileNameExtensions *ChoiceOption `xml:"descriptorFileNameExtensions,omitempty"` + CompatibilityMode ChoiceOption `xml:"compatibilityMode"` + DiskMode ChoiceOption `xml:"diskMode"` + Uuid bool `xml:"uuid"` +} + +func init() { + t["VirtualDiskRawDiskMappingVer1BackingOption"] = reflect.TypeOf((*VirtualDiskRawDiskMappingVer1BackingOption)(nil)).Elem() +} + +type VirtualDiskRawDiskVer2BackingInfo struct { + VirtualDeviceDeviceBackingInfo + + DescriptorFileName string `xml:"descriptorFileName"` + Uuid string `xml:"uuid,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Sharing string `xml:"sharing,omitempty"` +} + +func init() { + t["VirtualDiskRawDiskVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingInfo)(nil)).Elem() +} + +type VirtualDiskRawDiskVer2BackingOption struct { + VirtualDeviceDeviceBackingOption + + DescriptorFileNameExtensions ChoiceOption `xml:"descriptorFileNameExtensions"` + Uuid bool `xml:"uuid"` +} + +func init() { + t["VirtualDiskRawDiskVer2BackingOption"] = reflect.TypeOf((*VirtualDiskRawDiskVer2BackingOption)(nil)).Elem() +} + +type VirtualDiskRuleSpec struct { + ClusterRuleInfo + + DiskRuleType string `xml:"diskRuleType"` + DiskId []int32 `xml:"diskId,omitempty"` +} + +func init() { + t["VirtualDiskRuleSpec"] = reflect.TypeOf((*VirtualDiskRuleSpec)(nil)).Elem() +} + +type VirtualDiskSeSparseBackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + WriteThrough *bool `xml:"writeThrough"` + Uuid string `xml:"uuid,omitempty"` + ContentId string `xml:"contentId,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Parent *VirtualDiskSeSparseBackingInfo `xml:"parent,omitempty"` + DeltaDiskFormat string `xml:"deltaDiskFormat,omitempty"` + DigestEnabled *bool `xml:"digestEnabled"` + GrainSize int32 `xml:"grainSize,omitempty"` + KeyId *CryptoKeyId `xml:"keyId,omitempty"` +} + +func init() { + t["VirtualDiskSeSparseBackingInfo"] = reflect.TypeOf((*VirtualDiskSeSparseBackingInfo)(nil)).Elem() +} + +type VirtualDiskSeSparseBackingOption struct { + VirtualDeviceFileBackingOption + + DiskMode ChoiceOption `xml:"diskMode"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` + HotGrowable bool `xml:"hotGrowable"` + Uuid bool `xml:"uuid"` + DeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:"deltaDiskFormatsSupported"` +} + +func init() { + t["VirtualDiskSeSparseBackingOption"] = reflect.TypeOf((*VirtualDiskSeSparseBackingOption)(nil)).Elem() +} + +type VirtualDiskSparseVer1BackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + Split *bool `xml:"split"` + WriteThrough *bool `xml:"writeThrough"` + SpaceUsedInKB int64 `xml:"spaceUsedInKB,omitempty"` + ContentId string `xml:"contentId,omitempty"` + Parent *VirtualDiskSparseVer1BackingInfo `xml:"parent,omitempty"` +} + +func init() { + t["VirtualDiskSparseVer1BackingInfo"] = reflect.TypeOf((*VirtualDiskSparseVer1BackingInfo)(nil)).Elem() +} + +type VirtualDiskSparseVer1BackingOption struct { + VirtualDeviceFileBackingOption + + DiskModes ChoiceOption `xml:"diskModes"` + Split BoolOption `xml:"split"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` +} + +func init() { + t["VirtualDiskSparseVer1BackingOption"] = reflect.TypeOf((*VirtualDiskSparseVer1BackingOption)(nil)).Elem() +} + +type VirtualDiskSparseVer2BackingInfo struct { + VirtualDeviceFileBackingInfo + + DiskMode string `xml:"diskMode"` + Split *bool `xml:"split"` + WriteThrough *bool `xml:"writeThrough"` + SpaceUsedInKB int64 `xml:"spaceUsedInKB,omitempty"` + Uuid string `xml:"uuid,omitempty"` + ContentId string `xml:"contentId,omitempty"` + ChangeId string `xml:"changeId,omitempty"` + Parent *VirtualDiskSparseVer2BackingInfo `xml:"parent,omitempty"` + KeyId *CryptoKeyId `xml:"keyId,omitempty"` +} + +func init() { + t["VirtualDiskSparseVer2BackingInfo"] = reflect.TypeOf((*VirtualDiskSparseVer2BackingInfo)(nil)).Elem() +} + +type VirtualDiskSparseVer2BackingOption struct { + VirtualDeviceFileBackingOption + + DiskMode ChoiceOption `xml:"diskMode"` + Split BoolOption `xml:"split"` + WriteThrough BoolOption `xml:"writeThrough"` + Growable bool `xml:"growable"` + HotGrowable bool `xml:"hotGrowable"` + Uuid bool `xml:"uuid"` +} + +func init() { + t["VirtualDiskSparseVer2BackingOption"] = reflect.TypeOf((*VirtualDiskSparseVer2BackingOption)(nil)).Elem() +} + +type VirtualDiskSpec struct { + DynamicData + + DiskType string `xml:"diskType"` + AdapterType string `xml:"adapterType"` +} + +func init() { + t["VirtualDiskSpec"] = reflect.TypeOf((*VirtualDiskSpec)(nil)).Elem() +} + +type VirtualDiskVFlashCacheConfigInfo struct { + DynamicData + + VFlashModule string `xml:"vFlashModule,omitempty"` + ReservationInMB int64 `xml:"reservationInMB,omitempty"` + CacheConsistencyType string `xml:"cacheConsistencyType,omitempty"` + CacheMode string `xml:"cacheMode,omitempty"` + BlockSizeInKB int64 `xml:"blockSizeInKB,omitempty"` +} + +func init() { + t["VirtualDiskVFlashCacheConfigInfo"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfo)(nil)).Elem() +} + +type VirtualE1000 struct { + VirtualEthernetCard +} + +func init() { + t["VirtualE1000"] = reflect.TypeOf((*VirtualE1000)(nil)).Elem() +} + +type VirtualE1000Option struct { + VirtualEthernetCardOption +} + +func init() { + t["VirtualE1000Option"] = reflect.TypeOf((*VirtualE1000Option)(nil)).Elem() +} + +type VirtualE1000e struct { + VirtualEthernetCard +} + +func init() { + t["VirtualE1000e"] = reflect.TypeOf((*VirtualE1000e)(nil)).Elem() +} + +type VirtualE1000eOption struct { + VirtualEthernetCardOption +} + +func init() { + t["VirtualE1000eOption"] = reflect.TypeOf((*VirtualE1000eOption)(nil)).Elem() +} + +type VirtualEnsoniq1371 struct { + VirtualSoundCard +} + +func init() { + t["VirtualEnsoniq1371"] = reflect.TypeOf((*VirtualEnsoniq1371)(nil)).Elem() +} + +type VirtualEnsoniq1371Option struct { + VirtualSoundCardOption +} + +func init() { + t["VirtualEnsoniq1371Option"] = reflect.TypeOf((*VirtualEnsoniq1371Option)(nil)).Elem() +} + +type VirtualEthernetCard struct { + VirtualDevice + + AddressType string `xml:"addressType,omitempty"` + MacAddress string `xml:"macAddress,omitempty"` + WakeOnLanEnabled *bool `xml:"wakeOnLanEnabled"` + ResourceAllocation *VirtualEthernetCardResourceAllocation `xml:"resourceAllocation,omitempty"` + ExternalId string `xml:"externalId,omitempty"` + UptCompatibilityEnabled *bool `xml:"uptCompatibilityEnabled"` +} + +func init() { + t["VirtualEthernetCard"] = reflect.TypeOf((*VirtualEthernetCard)(nil)).Elem() +} + +type VirtualEthernetCardDVPortBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualEthernetCardDVPortBackingOption"] = reflect.TypeOf((*VirtualEthernetCardDVPortBackingOption)(nil)).Elem() +} + +type VirtualEthernetCardDistributedVirtualPortBackingInfo struct { + VirtualDeviceBackingInfo + + Port DistributedVirtualSwitchPortConnection `xml:"port"` +} + +func init() { + t["VirtualEthernetCardDistributedVirtualPortBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardDistributedVirtualPortBackingInfo)(nil)).Elem() +} + +type VirtualEthernetCardLegacyNetworkBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualEthernetCardLegacyNetworkBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkBackingInfo)(nil)).Elem() +} + +type VirtualEthernetCardLegacyNetworkBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualEthernetCardLegacyNetworkBackingOption"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkBackingOption)(nil)).Elem() +} + +type VirtualEthernetCardNetworkBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + Network *ManagedObjectReference `xml:"network,omitempty"` + InPassthroughMode *bool `xml:"inPassthroughMode"` +} + +func init() { + t["VirtualEthernetCardNetworkBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardNetworkBackingInfo)(nil)).Elem() +} + +type VirtualEthernetCardNetworkBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualEthernetCardNetworkBackingOption"] = reflect.TypeOf((*VirtualEthernetCardNetworkBackingOption)(nil)).Elem() +} + +type VirtualEthernetCardNotSupported struct { + DeviceNotSupported +} + +func init() { + t["VirtualEthernetCardNotSupported"] = reflect.TypeOf((*VirtualEthernetCardNotSupported)(nil)).Elem() +} + +type VirtualEthernetCardNotSupportedFault VirtualEthernetCardNotSupported + +func init() { + t["VirtualEthernetCardNotSupportedFault"] = reflect.TypeOf((*VirtualEthernetCardNotSupportedFault)(nil)).Elem() +} + +type VirtualEthernetCardOpaqueNetworkBackingInfo struct { + VirtualDeviceBackingInfo + + OpaqueNetworkId string `xml:"opaqueNetworkId"` + OpaqueNetworkType string `xml:"opaqueNetworkType"` +} + +func init() { + t["VirtualEthernetCardOpaqueNetworkBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardOpaqueNetworkBackingInfo)(nil)).Elem() +} + +type VirtualEthernetCardOpaqueNetworkBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualEthernetCardOpaqueNetworkBackingOption"] = reflect.TypeOf((*VirtualEthernetCardOpaqueNetworkBackingOption)(nil)).Elem() +} + +type VirtualEthernetCardOption struct { + VirtualDeviceOption + + SupportedOUI ChoiceOption `xml:"supportedOUI"` + MacType ChoiceOption `xml:"macType"` + WakeOnLanEnabled BoolOption `xml:"wakeOnLanEnabled"` + VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported"` + UptCompatibilityEnabled *BoolOption `xml:"uptCompatibilityEnabled,omitempty"` +} + +func init() { + t["VirtualEthernetCardOption"] = reflect.TypeOf((*VirtualEthernetCardOption)(nil)).Elem() +} + +type VirtualEthernetCardResourceAllocation struct { + DynamicData + + Reservation *int64 `xml:"reservation"` + Share SharesInfo `xml:"share"` + Limit *int64 `xml:"limit"` +} + +func init() { + t["VirtualEthernetCardResourceAllocation"] = reflect.TypeOf((*VirtualEthernetCardResourceAllocation)(nil)).Elem() +} + +type VirtualFloppy struct { + VirtualDevice +} + +func init() { + t["VirtualFloppy"] = reflect.TypeOf((*VirtualFloppy)(nil)).Elem() +} + +type VirtualFloppyDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualFloppyDeviceBackingInfo"] = reflect.TypeOf((*VirtualFloppyDeviceBackingInfo)(nil)).Elem() +} + +type VirtualFloppyDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualFloppyDeviceBackingOption"] = reflect.TypeOf((*VirtualFloppyDeviceBackingOption)(nil)).Elem() +} + +type VirtualFloppyImageBackingInfo struct { + VirtualDeviceFileBackingInfo +} + +func init() { + t["VirtualFloppyImageBackingInfo"] = reflect.TypeOf((*VirtualFloppyImageBackingInfo)(nil)).Elem() +} + +type VirtualFloppyImageBackingOption struct { + VirtualDeviceFileBackingOption +} + +func init() { + t["VirtualFloppyImageBackingOption"] = reflect.TypeOf((*VirtualFloppyImageBackingOption)(nil)).Elem() +} + +type VirtualFloppyOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualFloppyOption"] = reflect.TypeOf((*VirtualFloppyOption)(nil)).Elem() +} + +type VirtualFloppyRemoteDeviceBackingInfo struct { + VirtualDeviceRemoteDeviceBackingInfo +} + +func init() { + t["VirtualFloppyRemoteDeviceBackingInfo"] = reflect.TypeOf((*VirtualFloppyRemoteDeviceBackingInfo)(nil)).Elem() +} + +type VirtualFloppyRemoteDeviceBackingOption struct { + VirtualDeviceRemoteDeviceBackingOption +} + +func init() { + t["VirtualFloppyRemoteDeviceBackingOption"] = reflect.TypeOf((*VirtualFloppyRemoteDeviceBackingOption)(nil)).Elem() +} + +type VirtualHardware struct { + DynamicData + + NumCPU int32 `xml:"numCPU"` + NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty"` + MemoryMB int32 `xml:"memoryMB"` + VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` + VirtualSMCPresent *bool `xml:"virtualSMCPresent"` + Device []BaseVirtualDevice `xml:"device,omitempty,typeattr"` +} + +func init() { + t["VirtualHardware"] = reflect.TypeOf((*VirtualHardware)(nil)).Elem() +} + +type VirtualHardwareCompatibilityIssue struct { + VmConfigFault +} + +func init() { + t["VirtualHardwareCompatibilityIssue"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssue)(nil)).Elem() +} + +type VirtualHardwareCompatibilityIssueFault BaseVirtualHardwareCompatibilityIssue + +func init() { + t["VirtualHardwareCompatibilityIssueFault"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssueFault)(nil)).Elem() +} + +type VirtualHardwareOption struct { + DynamicData + + HwVersion int32 `xml:"hwVersion"` + VirtualDeviceOption []BaseVirtualDeviceOption `xml:"virtualDeviceOption,typeattr"` + DeviceListReadonly bool `xml:"deviceListReadonly"` + NumCPU []int32 `xml:"numCPU"` + NumCoresPerSocket *IntOption `xml:"numCoresPerSocket,omitempty"` + NumCpuReadonly bool `xml:"numCpuReadonly"` + MemoryMB LongOption `xml:"memoryMB"` + NumPCIControllers IntOption `xml:"numPCIControllers"` + NumIDEControllers IntOption `xml:"numIDEControllers"` + NumUSBControllers IntOption `xml:"numUSBControllers"` + NumUSBXHCIControllers *IntOption `xml:"numUSBXHCIControllers,omitempty"` + NumSIOControllers IntOption `xml:"numSIOControllers"` + NumPS2Controllers IntOption `xml:"numPS2Controllers"` + LicensingLimit []string `xml:"licensingLimit,omitempty"` + NumSupportedWwnPorts *IntOption `xml:"numSupportedWwnPorts,omitempty"` + NumSupportedWwnNodes *IntOption `xml:"numSupportedWwnNodes,omitempty"` + ResourceConfigOption *ResourceConfigOption `xml:"resourceConfigOption,omitempty"` + NumNVDIMMControllers *IntOption `xml:"numNVDIMMControllers,omitempty"` + NumTPMDevices *IntOption `xml:"numTPMDevices,omitempty"` +} + +func init() { + t["VirtualHardwareOption"] = reflect.TypeOf((*VirtualHardwareOption)(nil)).Elem() +} + +type VirtualHardwareVersionNotSupported struct { + VirtualHardwareCompatibilityIssue + + HostName string `xml:"hostName"` + Host ManagedObjectReference `xml:"host"` +} + +func init() { + t["VirtualHardwareVersionNotSupported"] = reflect.TypeOf((*VirtualHardwareVersionNotSupported)(nil)).Elem() +} + +type VirtualHardwareVersionNotSupportedFault VirtualHardwareVersionNotSupported + +func init() { + t["VirtualHardwareVersionNotSupportedFault"] = reflect.TypeOf((*VirtualHardwareVersionNotSupportedFault)(nil)).Elem() +} + +type VirtualHdAudioCard struct { + VirtualSoundCard +} + +func init() { + t["VirtualHdAudioCard"] = reflect.TypeOf((*VirtualHdAudioCard)(nil)).Elem() +} + +type VirtualHdAudioCardOption struct { + VirtualSoundCardOption +} + +func init() { + t["VirtualHdAudioCardOption"] = reflect.TypeOf((*VirtualHdAudioCardOption)(nil)).Elem() +} + +type VirtualIDEController struct { + VirtualController +} + +func init() { + t["VirtualIDEController"] = reflect.TypeOf((*VirtualIDEController)(nil)).Elem() +} + +type VirtualIDEControllerOption struct { + VirtualControllerOption + + NumIDEDisks IntOption `xml:"numIDEDisks"` + NumIDECdroms IntOption `xml:"numIDECdroms"` +} + +func init() { + t["VirtualIDEControllerOption"] = reflect.TypeOf((*VirtualIDEControllerOption)(nil)).Elem() +} + +type VirtualKeyboard struct { + VirtualDevice +} + +func init() { + t["VirtualKeyboard"] = reflect.TypeOf((*VirtualKeyboard)(nil)).Elem() +} + +type VirtualKeyboardOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualKeyboardOption"] = reflect.TypeOf((*VirtualKeyboardOption)(nil)).Elem() +} + +type VirtualLsiLogicController struct { + VirtualSCSIController +} + +func init() { + t["VirtualLsiLogicController"] = reflect.TypeOf((*VirtualLsiLogicController)(nil)).Elem() +} + +type VirtualLsiLogicControllerOption struct { + VirtualSCSIControllerOption +} + +func init() { + t["VirtualLsiLogicControllerOption"] = reflect.TypeOf((*VirtualLsiLogicControllerOption)(nil)).Elem() +} + +type VirtualLsiLogicSASController struct { + VirtualSCSIController +} + +func init() { + t["VirtualLsiLogicSASController"] = reflect.TypeOf((*VirtualLsiLogicSASController)(nil)).Elem() +} + +type VirtualLsiLogicSASControllerOption struct { + VirtualSCSIControllerOption +} + +func init() { + t["VirtualLsiLogicSASControllerOption"] = reflect.TypeOf((*VirtualLsiLogicSASControllerOption)(nil)).Elem() +} + +type VirtualMachineAffinityInfo struct { + DynamicData + + AffinitySet []int32 `xml:"affinitySet"` +} + +func init() { + t["VirtualMachineAffinityInfo"] = reflect.TypeOf((*VirtualMachineAffinityInfo)(nil)).Elem() +} + +type VirtualMachineBootOptions struct { + DynamicData + + BootDelay int64 `xml:"bootDelay,omitempty"` + EnterBIOSSetup *bool `xml:"enterBIOSSetup"` + EfiSecureBootEnabled *bool `xml:"efiSecureBootEnabled"` + BootRetryEnabled *bool `xml:"bootRetryEnabled"` + BootRetryDelay int64 `xml:"bootRetryDelay,omitempty"` + BootOrder []BaseVirtualMachineBootOptionsBootableDevice `xml:"bootOrder,omitempty,typeattr"` + NetworkBootProtocol string `xml:"networkBootProtocol,omitempty"` +} + +func init() { + t["VirtualMachineBootOptions"] = reflect.TypeOf((*VirtualMachineBootOptions)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableCdromDevice struct { + VirtualMachineBootOptionsBootableDevice +} + +func init() { + t["VirtualMachineBootOptionsBootableCdromDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableCdromDevice)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableDevice struct { + DynamicData +} + +func init() { + t["VirtualMachineBootOptionsBootableDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDevice)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableDiskDevice struct { + VirtualMachineBootOptionsBootableDevice + + DeviceKey int32 `xml:"deviceKey"` +} + +func init() { + t["VirtualMachineBootOptionsBootableDiskDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDiskDevice)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableEthernetDevice struct { + VirtualMachineBootOptionsBootableDevice + + DeviceKey int32 `xml:"deviceKey"` +} + +func init() { + t["VirtualMachineBootOptionsBootableEthernetDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableEthernetDevice)(nil)).Elem() +} + +type VirtualMachineBootOptionsBootableFloppyDevice struct { + VirtualMachineBootOptionsBootableDevice +} + +func init() { + t["VirtualMachineBootOptionsBootableFloppyDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableFloppyDevice)(nil)).Elem() +} + +type VirtualMachineCapability struct { + DynamicData + + SnapshotOperationsSupported bool `xml:"snapshotOperationsSupported"` + MultipleSnapshotsSupported bool `xml:"multipleSnapshotsSupported"` + SnapshotConfigSupported bool `xml:"snapshotConfigSupported"` + PoweredOffSnapshotsSupported bool `xml:"poweredOffSnapshotsSupported"` + MemorySnapshotsSupported bool `xml:"memorySnapshotsSupported"` + RevertToSnapshotSupported bool `xml:"revertToSnapshotSupported"` + QuiescedSnapshotsSupported bool `xml:"quiescedSnapshotsSupported"` + DisableSnapshotsSupported bool `xml:"disableSnapshotsSupported"` + LockSnapshotsSupported bool `xml:"lockSnapshotsSupported"` + ConsolePreferencesSupported bool `xml:"consolePreferencesSupported"` + CpuFeatureMaskSupported bool `xml:"cpuFeatureMaskSupported"` + S1AcpiManagementSupported bool `xml:"s1AcpiManagementSupported"` + SettingScreenResolutionSupported bool `xml:"settingScreenResolutionSupported"` + ToolsAutoUpdateSupported bool `xml:"toolsAutoUpdateSupported"` + VmNpivWwnSupported bool `xml:"vmNpivWwnSupported"` + NpivWwnOnNonRdmVmSupported bool `xml:"npivWwnOnNonRdmVmSupported"` + VmNpivWwnDisableSupported *bool `xml:"vmNpivWwnDisableSupported"` + VmNpivWwnUpdateSupported *bool `xml:"vmNpivWwnUpdateSupported"` + SwapPlacementSupported bool `xml:"swapPlacementSupported"` + ToolsSyncTimeSupported bool `xml:"toolsSyncTimeSupported"` + VirtualMmuUsageSupported bool `xml:"virtualMmuUsageSupported"` + DiskSharesSupported bool `xml:"diskSharesSupported"` + BootOptionsSupported bool `xml:"bootOptionsSupported"` + BootRetryOptionsSupported *bool `xml:"bootRetryOptionsSupported"` + SettingVideoRamSizeSupported bool `xml:"settingVideoRamSizeSupported"` + SettingDisplayTopologySupported *bool `xml:"settingDisplayTopologySupported"` + RecordReplaySupported *bool `xml:"recordReplaySupported"` + ChangeTrackingSupported *bool `xml:"changeTrackingSupported"` + MultipleCoresPerSocketSupported *bool `xml:"multipleCoresPerSocketSupported"` + HostBasedReplicationSupported *bool `xml:"hostBasedReplicationSupported"` + GuestAutoLockSupported *bool `xml:"guestAutoLockSupported"` + MemoryReservationLockSupported *bool `xml:"memoryReservationLockSupported"` + FeatureRequirementSupported *bool `xml:"featureRequirementSupported"` + PoweredOnMonitorTypeChangeSupported *bool `xml:"poweredOnMonitorTypeChangeSupported"` + SeSparseDiskSupported *bool `xml:"seSparseDiskSupported"` + NestedHVSupported *bool `xml:"nestedHVSupported"` + VPMCSupported *bool `xml:"vPMCSupported"` + SecureBootSupported *bool `xml:"secureBootSupported"` + PerVmEvcSupported *bool `xml:"perVmEvcSupported"` + VirtualMmuUsageIgnored *bool `xml:"virtualMmuUsageIgnored"` + VirtualExecUsageIgnored *bool `xml:"virtualExecUsageIgnored"` + DiskOnlySnapshotOnSuspendedVMSupported *bool `xml:"diskOnlySnapshotOnSuspendedVMSupported"` +} + +func init() { + t["VirtualMachineCapability"] = reflect.TypeOf((*VirtualMachineCapability)(nil)).Elem() +} + +type VirtualMachineCdromInfo struct { + VirtualMachineTargetInfo + + Description string `xml:"description,omitempty"` +} + +func init() { + t["VirtualMachineCdromInfo"] = reflect.TypeOf((*VirtualMachineCdromInfo)(nil)).Elem() +} + +type VirtualMachineCloneSpec struct { + DynamicData + + Location VirtualMachineRelocateSpec `xml:"location"` + Template bool `xml:"template"` + Config *VirtualMachineConfigSpec `xml:"config,omitempty"` + Customization *CustomizationSpec `xml:"customization,omitempty"` + PowerOn bool `xml:"powerOn"` + Snapshot *ManagedObjectReference `xml:"snapshot,omitempty"` + Memory *bool `xml:"memory"` +} + +func init() { + t["VirtualMachineCloneSpec"] = reflect.TypeOf((*VirtualMachineCloneSpec)(nil)).Elem() +} + +type VirtualMachineConfigInfo struct { + DynamicData + + ChangeVersion string `xml:"changeVersion"` + Modified time.Time `xml:"modified"` + Name string `xml:"name"` + GuestFullName string `xml:"guestFullName"` + Version string `xml:"version"` + Uuid string `xml:"uuid"` + CreateDate *time.Time `xml:"createDate"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty"` + NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty"` + NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty"` + NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty"` + NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty"` + NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled"` + NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks"` + LocationId string `xml:"locationId,omitempty"` + Template bool `xml:"template"` + GuestId string `xml:"guestId"` + AlternateGuestName string `xml:"alternateGuestName"` + Annotation string `xml:"annotation,omitempty"` + Files VirtualMachineFileInfo `xml:"files"` + Tools *ToolsConfigInfo `xml:"tools,omitempty"` + Flags VirtualMachineFlagInfo `xml:"flags"` + ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` + DefaultPowerOps VirtualMachineDefaultPowerOpInfo `xml:"defaultPowerOps"` + Hardware VirtualHardware `xml:"hardware"` + CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` + MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` + MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled"` + CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled"` + CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled"` + HotPlugMemoryLimit int64 `xml:"hotPlugMemoryLimit,omitempty"` + HotPlugMemoryIncrementSize int64 `xml:"hotPlugMemoryIncrementSize,omitempty"` + CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty"` + MemoryAffinity *VirtualMachineAffinityInfo `xml:"memoryAffinity,omitempty"` + NetworkShaper *VirtualMachineNetworkShaperInfo `xml:"networkShaper,omitempty"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` + CpuFeatureMask []HostCpuIdInfo `xml:"cpuFeatureMask,omitempty"` + DatastoreUrl []VirtualMachineConfigInfoDatastoreUrlPair `xml:"datastoreUrl,omitempty"` + SwapPlacement string `xml:"swapPlacement,omitempty"` + BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` + RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty"` + VAppConfig BaseVmConfigInfo `xml:"vAppConfig,omitempty,typeattr"` + VAssertsEnabled *bool `xml:"vAssertsEnabled"` + ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled"` + Firmware string `xml:"firmware,omitempty"` + MaxMksConnections int32 `xml:"maxMksConnections,omitempty"` + GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` + MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax"` + InitialOverhead *VirtualMachineConfigInfoOverheadInfo `xml:"initialOverhead,omitempty"` + NestedHVEnabled *bool `xml:"nestedHVEnabled"` + VPMCEnabled *bool `xml:"vPMCEnabled"` + ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty"` + ForkConfigInfo *VirtualMachineForkConfigInfo `xml:"forkConfigInfo,omitempty"` + VFlashCacheReservation int64 `xml:"vFlashCacheReservation,omitempty"` + VmxConfigChecksum []byte `xml:"vmxConfigChecksum,omitempty"` + MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled"` + VmStorageObjectId string `xml:"vmStorageObjectId,omitempty"` + SwapStorageObjectId string `xml:"swapStorageObjectId,omitempty"` + KeyId *CryptoKeyId `xml:"keyId,omitempty"` + GuestIntegrityInfo *VirtualMachineGuestIntegrityInfo `xml:"guestIntegrityInfo,omitempty"` + MigrateEncryption string `xml:"migrateEncryption,omitempty"` +} + +func init() { + t["VirtualMachineConfigInfo"] = reflect.TypeOf((*VirtualMachineConfigInfo)(nil)).Elem() +} + +type VirtualMachineConfigInfoDatastoreUrlPair struct { + DynamicData + + Name string `xml:"name"` + Url string `xml:"url"` +} + +func init() { + t["VirtualMachineConfigInfoDatastoreUrlPair"] = reflect.TypeOf((*VirtualMachineConfigInfoDatastoreUrlPair)(nil)).Elem() +} + +type VirtualMachineConfigInfoOverheadInfo struct { + DynamicData + + InitialMemoryReservation int64 `xml:"initialMemoryReservation,omitempty"` + InitialSwapReservation int64 `xml:"initialSwapReservation,omitempty"` +} + +func init() { + t["VirtualMachineConfigInfoOverheadInfo"] = reflect.TypeOf((*VirtualMachineConfigInfoOverheadInfo)(nil)).Elem() +} + +type VirtualMachineConfigOption struct { + DynamicData + + Version string `xml:"version"` + Description string `xml:"description"` + GuestOSDescriptor []GuestOsDescriptor `xml:"guestOSDescriptor"` + GuestOSDefaultIndex int32 `xml:"guestOSDefaultIndex"` + HardwareOptions VirtualHardwareOption `xml:"hardwareOptions"` + Capabilities VirtualMachineCapability `xml:"capabilities"` + Datastore DatastoreOption `xml:"datastore"` + DefaultDevice []BaseVirtualDevice `xml:"defaultDevice,omitempty,typeattr"` + SupportedMonitorType []string `xml:"supportedMonitorType"` + SupportedOvfEnvironmentTransport []string `xml:"supportedOvfEnvironmentTransport,omitempty"` + SupportedOvfInstallTransport []string `xml:"supportedOvfInstallTransport,omitempty"` + PropertyRelations []VirtualMachinePropertyRelation `xml:"propertyRelations,omitempty"` +} + +func init() { + t["VirtualMachineConfigOption"] = reflect.TypeOf((*VirtualMachineConfigOption)(nil)).Elem() +} + +type VirtualMachineConfigOptionDescriptor struct { + DynamicData + + Key string `xml:"key"` + Description string `xml:"description,omitempty"` + Host []ManagedObjectReference `xml:"host,omitempty"` + CreateSupported *bool `xml:"createSupported"` + DefaultConfigOption *bool `xml:"defaultConfigOption"` + RunSupported *bool `xml:"runSupported"` + UpgradeSupported *bool `xml:"upgradeSupported"` +} + +func init() { + t["VirtualMachineConfigOptionDescriptor"] = reflect.TypeOf((*VirtualMachineConfigOptionDescriptor)(nil)).Elem() +} + +type VirtualMachineConfigSpec struct { + DynamicData + + ChangeVersion string `xml:"changeVersion,omitempty"` + Name string `xml:"name,omitempty"` + Version string `xml:"version,omitempty"` + CreateDate *time.Time `xml:"createDate"` + Uuid string `xml:"uuid,omitempty"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty"` + NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty"` + NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty"` + NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty"` + NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty"` + NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled"` + NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks"` + NpivWorldWideNameOp string `xml:"npivWorldWideNameOp,omitempty"` + LocationId string `xml:"locationId,omitempty"` + GuestId string `xml:"guestId,omitempty"` + AlternateGuestName string `xml:"alternateGuestName,omitempty"` + Annotation string `xml:"annotation,omitempty"` + Files *VirtualMachineFileInfo `xml:"files,omitempty"` + Tools *ToolsConfigInfo `xml:"tools,omitempty"` + Flags *VirtualMachineFlagInfo `xml:"flags,omitempty"` + ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` + PowerOpInfo *VirtualMachineDefaultPowerOpInfo `xml:"powerOpInfo,omitempty"` + NumCPUs int32 `xml:"numCPUs,omitempty"` + NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty"` + MemoryMB int64 `xml:"memoryMB,omitempty"` + MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled"` + CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled"` + CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled"` + VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` + VirtualSMCPresent *bool `xml:"virtualSMCPresent"` + DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` + CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` + MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` + CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty"` + MemoryAffinity *VirtualMachineAffinityInfo `xml:"memoryAffinity,omitempty"` + NetworkShaper *VirtualMachineNetworkShaperInfo `xml:"networkShaper,omitempty"` + CpuFeatureMask []VirtualMachineCpuIdInfoSpec `xml:"cpuFeatureMask,omitempty"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` + SwapPlacement string `xml:"swapPlacement,omitempty"` + BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty"` + VAppConfig BaseVmConfigSpec `xml:"vAppConfig,omitempty,typeattr"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` + RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty"` + VAppConfigRemoved *bool `xml:"vAppConfigRemoved"` + VAssertsEnabled *bool `xml:"vAssertsEnabled"` + ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled"` + Firmware string `xml:"firmware,omitempty"` + MaxMksConnections int32 `xml:"maxMksConnections,omitempty"` + GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` + MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax"` + NestedHVEnabled *bool `xml:"nestedHVEnabled"` + VPMCEnabled *bool `xml:"vPMCEnabled"` + ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty"` + VmProfile []BaseVirtualMachineProfileSpec `xml:"vmProfile,omitempty,typeattr"` + MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` + MigrateEncryption string `xml:"migrateEncryption,omitempty"` +} + +func init() { + t["VirtualMachineConfigSpec"] = reflect.TypeOf((*VirtualMachineConfigSpec)(nil)).Elem() +} + +type VirtualMachineConfigSummary struct { + DynamicData + + Name string `xml:"name"` + Template bool `xml:"template"` + VmPathName string `xml:"vmPathName"` + MemorySizeMB int32 `xml:"memorySizeMB,omitempty"` + CpuReservation int32 `xml:"cpuReservation,omitempty"` + MemoryReservation int32 `xml:"memoryReservation,omitempty"` + NumCpu int32 `xml:"numCpu,omitempty"` + NumEthernetCards int32 `xml:"numEthernetCards,omitempty"` + NumVirtualDisks int32 `xml:"numVirtualDisks,omitempty"` + Uuid string `xml:"uuid,omitempty"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + GuestId string `xml:"guestId,omitempty"` + GuestFullName string `xml:"guestFullName,omitempty"` + Annotation string `xml:"annotation,omitempty"` + Product *VAppProductInfo `xml:"product,omitempty"` + InstallBootRequired *bool `xml:"installBootRequired"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` + TpmPresent *bool `xml:"tpmPresent"` + NumVmiopBackings int32 `xml:"numVmiopBackings,omitempty"` +} + +func init() { + t["VirtualMachineConfigSummary"] = reflect.TypeOf((*VirtualMachineConfigSummary)(nil)).Elem() +} + +type VirtualMachineConsolePreferences struct { + DynamicData + + PowerOnWhenOpened *bool `xml:"powerOnWhenOpened"` + EnterFullScreenOnPowerOn *bool `xml:"enterFullScreenOnPowerOn"` + CloseOnPowerOffOrSuspend *bool `xml:"closeOnPowerOffOrSuspend"` +} + +func init() { + t["VirtualMachineConsolePreferences"] = reflect.TypeOf((*VirtualMachineConsolePreferences)(nil)).Elem() +} + +type VirtualMachineCpuIdInfoSpec struct { + ArrayUpdateSpec + + Info *HostCpuIdInfo `xml:"info,omitempty"` +} + +func init() { + t["VirtualMachineCpuIdInfoSpec"] = reflect.TypeOf((*VirtualMachineCpuIdInfoSpec)(nil)).Elem() +} + +type VirtualMachineDatastoreInfo struct { + VirtualMachineTargetInfo + + Datastore DatastoreSummary `xml:"datastore"` + Capability DatastoreCapability `xml:"capability"` + MaxFileSize int64 `xml:"maxFileSize"` + MaxVirtualDiskCapacity int64 `xml:"maxVirtualDiskCapacity,omitempty"` + MaxPhysicalRDMFileSize int64 `xml:"maxPhysicalRDMFileSize,omitempty"` + MaxVirtualRDMFileSize int64 `xml:"maxVirtualRDMFileSize,omitempty"` + Mode string `xml:"mode"` + VStorageSupport string `xml:"vStorageSupport,omitempty"` +} + +func init() { + t["VirtualMachineDatastoreInfo"] = reflect.TypeOf((*VirtualMachineDatastoreInfo)(nil)).Elem() +} + +type VirtualMachineDatastoreVolumeOption struct { + DynamicData + + FileSystemType string `xml:"fileSystemType"` + MajorVersion int32 `xml:"majorVersion,omitempty"` +} + +func init() { + t["VirtualMachineDatastoreVolumeOption"] = reflect.TypeOf((*VirtualMachineDatastoreVolumeOption)(nil)).Elem() +} + +type VirtualMachineDefaultPowerOpInfo struct { + DynamicData + + PowerOffType string `xml:"powerOffType,omitempty"` + SuspendType string `xml:"suspendType,omitempty"` + ResetType string `xml:"resetType,omitempty"` + DefaultPowerOffType string `xml:"defaultPowerOffType,omitempty"` + DefaultSuspendType string `xml:"defaultSuspendType,omitempty"` + DefaultResetType string `xml:"defaultResetType,omitempty"` + StandbyAction string `xml:"standbyAction,omitempty"` +} + +func init() { + t["VirtualMachineDefaultPowerOpInfo"] = reflect.TypeOf((*VirtualMachineDefaultPowerOpInfo)(nil)).Elem() +} + +type VirtualMachineDefaultProfileSpec struct { + VirtualMachineProfileSpec +} + +func init() { + t["VirtualMachineDefaultProfileSpec"] = reflect.TypeOf((*VirtualMachineDefaultProfileSpec)(nil)).Elem() +} + +type VirtualMachineDefinedProfileSpec struct { + VirtualMachineProfileSpec + + ProfileId string `xml:"profileId"` + ReplicationSpec *ReplicationSpec `xml:"replicationSpec,omitempty"` + ProfileData *VirtualMachineProfileRawData `xml:"profileData,omitempty"` + ProfileParams []KeyValue `xml:"profileParams,omitempty"` +} + +func init() { + t["VirtualMachineDefinedProfileSpec"] = reflect.TypeOf((*VirtualMachineDefinedProfileSpec)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfo struct { + DynamicData + + RuntimeState BaseVirtualMachineDeviceRuntimeInfoDeviceRuntimeState `xml:"runtimeState,typeattr"` + Key int32 `xml:"key"` +} + +func init() { + t["VirtualMachineDeviceRuntimeInfo"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfo)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfoDeviceRuntimeState struct { + DynamicData +} + +func init() { + t["VirtualMachineDeviceRuntimeInfoDeviceRuntimeState"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoDeviceRuntimeState)(nil)).Elem() +} + +type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState struct { + VirtualMachineDeviceRuntimeInfoDeviceRuntimeState + + VmDirectPathGen2Active bool `xml:"vmDirectPathGen2Active"` + VmDirectPathGen2InactiveReasonVm []string `xml:"vmDirectPathGen2InactiveReasonVm,omitempty"` + VmDirectPathGen2InactiveReasonOther []string `xml:"vmDirectPathGen2InactiveReasonOther,omitempty"` + VmDirectPathGen2InactiveReasonExtended string `xml:"vmDirectPathGen2InactiveReasonExtended,omitempty"` + ReservationStatus string `xml:"reservationStatus,omitempty"` + AttachmentStatus string `xml:"attachmentStatus,omitempty"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` +} + +func init() { + t["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState)(nil)).Elem() +} + +type VirtualMachineDiskDeviceInfo struct { + VirtualMachineTargetInfo + + Capacity int64 `xml:"capacity,omitempty"` + Vm []ManagedObjectReference `xml:"vm,omitempty"` +} + +func init() { + t["VirtualMachineDiskDeviceInfo"] = reflect.TypeOf((*VirtualMachineDiskDeviceInfo)(nil)).Elem() +} + +type VirtualMachineDisplayTopology struct { + DynamicData + + X int32 `xml:"x"` + Y int32 `xml:"y"` + Width int32 `xml:"width"` + Height int32 `xml:"height"` +} + +func init() { + t["VirtualMachineDisplayTopology"] = reflect.TypeOf((*VirtualMachineDisplayTopology)(nil)).Elem() +} + +type VirtualMachineEmptyProfileSpec struct { + VirtualMachineProfileSpec +} + +func init() { + t["VirtualMachineEmptyProfileSpec"] = reflect.TypeOf((*VirtualMachineEmptyProfileSpec)(nil)).Elem() +} + +type VirtualMachineFeatureRequirement struct { + DynamicData + + Key string `xml:"key"` + FeatureName string `xml:"featureName"` + Value string `xml:"value"` +} + +func init() { + t["VirtualMachineFeatureRequirement"] = reflect.TypeOf((*VirtualMachineFeatureRequirement)(nil)).Elem() +} + +type VirtualMachineFileInfo struct { + DynamicData + + VmPathName string `xml:"vmPathName,omitempty"` + SnapshotDirectory string `xml:"snapshotDirectory,omitempty"` + SuspendDirectory string `xml:"suspendDirectory,omitempty"` + LogDirectory string `xml:"logDirectory,omitempty"` + FtMetadataDirectory string `xml:"ftMetadataDirectory,omitempty"` +} + +func init() { + t["VirtualMachineFileInfo"] = reflect.TypeOf((*VirtualMachineFileInfo)(nil)).Elem() +} + +type VirtualMachineFileLayout struct { + DynamicData + + ConfigFile []string `xml:"configFile,omitempty"` + LogFile []string `xml:"logFile,omitempty"` + Disk []VirtualMachineFileLayoutDiskLayout `xml:"disk,omitempty"` + Snapshot []VirtualMachineFileLayoutSnapshotLayout `xml:"snapshot,omitempty"` + SwapFile string `xml:"swapFile,omitempty"` +} + +func init() { + t["VirtualMachineFileLayout"] = reflect.TypeOf((*VirtualMachineFileLayout)(nil)).Elem() +} + +type VirtualMachineFileLayoutDiskLayout struct { + DynamicData + + Key int32 `xml:"key"` + DiskFile []string `xml:"diskFile"` +} + +func init() { + t["VirtualMachineFileLayoutDiskLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutDiskLayout)(nil)).Elem() +} + +type VirtualMachineFileLayoutEx struct { + DynamicData + + File []VirtualMachineFileLayoutExFileInfo `xml:"file,omitempty"` + Disk []VirtualMachineFileLayoutExDiskLayout `xml:"disk,omitempty"` + Snapshot []VirtualMachineFileLayoutExSnapshotLayout `xml:"snapshot,omitempty"` + Timestamp time.Time `xml:"timestamp"` +} + +func init() { + t["VirtualMachineFileLayoutEx"] = reflect.TypeOf((*VirtualMachineFileLayoutEx)(nil)).Elem() +} + +type VirtualMachineFileLayoutExDiskLayout struct { + DynamicData + + Key int32 `xml:"key"` + Chain []VirtualMachineFileLayoutExDiskUnit `xml:"chain,omitempty"` +} + +func init() { + t["VirtualMachineFileLayoutExDiskLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutExDiskLayout)(nil)).Elem() +} + +type VirtualMachineFileLayoutExDiskUnit struct { + DynamicData + + FileKey []int32 `xml:"fileKey"` +} + +func init() { + t["VirtualMachineFileLayoutExDiskUnit"] = reflect.TypeOf((*VirtualMachineFileLayoutExDiskUnit)(nil)).Elem() +} + +type VirtualMachineFileLayoutExFileInfo struct { + DynamicData + + Key int32 `xml:"key"` + Name string `xml:"name"` + Type string `xml:"type"` + Size int64 `xml:"size"` + UniqueSize int64 `xml:"uniqueSize,omitempty"` + BackingObjectId string `xml:"backingObjectId,omitempty"` + Accessible *bool `xml:"accessible"` +} + +func init() { + t["VirtualMachineFileLayoutExFileInfo"] = reflect.TypeOf((*VirtualMachineFileLayoutExFileInfo)(nil)).Elem() +} + +type VirtualMachineFileLayoutExSnapshotLayout struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + DataKey int32 `xml:"dataKey"` + MemoryKey int32 `xml:"memoryKey,omitempty"` + Disk []VirtualMachineFileLayoutExDiskLayout `xml:"disk,omitempty"` +} + +func init() { + t["VirtualMachineFileLayoutExSnapshotLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutExSnapshotLayout)(nil)).Elem() +} + +type VirtualMachineFileLayoutSnapshotLayout struct { + DynamicData + + Key ManagedObjectReference `xml:"key"` + SnapshotFile []string `xml:"snapshotFile"` +} + +func init() { + t["VirtualMachineFileLayoutSnapshotLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutSnapshotLayout)(nil)).Elem() +} + +type VirtualMachineFlagInfo struct { + DynamicData + + DisableAcceleration *bool `xml:"disableAcceleration"` + EnableLogging *bool `xml:"enableLogging"` + UseToe *bool `xml:"useToe"` + RunWithDebugInfo *bool `xml:"runWithDebugInfo"` + MonitorType string `xml:"monitorType,omitempty"` + HtSharing string `xml:"htSharing,omitempty"` + SnapshotDisabled *bool `xml:"snapshotDisabled"` + SnapshotLocked *bool `xml:"snapshotLocked"` + DiskUuidEnabled *bool `xml:"diskUuidEnabled"` + VirtualMmuUsage string `xml:"virtualMmuUsage,omitempty"` + VirtualExecUsage string `xml:"virtualExecUsage,omitempty"` + SnapshotPowerOffBehavior string `xml:"snapshotPowerOffBehavior,omitempty"` + RecordReplayEnabled *bool `xml:"recordReplayEnabled"` + FaultToleranceType string `xml:"faultToleranceType,omitempty"` + CbrcCacheEnabled *bool `xml:"cbrcCacheEnabled"` + VvtdEnabled *bool `xml:"vvtdEnabled"` + VbsEnabled *bool `xml:"vbsEnabled"` +} + +func init() { + t["VirtualMachineFlagInfo"] = reflect.TypeOf((*VirtualMachineFlagInfo)(nil)).Elem() +} + +type VirtualMachineFloppyInfo struct { + VirtualMachineTargetInfo +} + +func init() { + t["VirtualMachineFloppyInfo"] = reflect.TypeOf((*VirtualMachineFloppyInfo)(nil)).Elem() +} + +type VirtualMachineForkConfigInfo struct { + DynamicData + + ParentEnabled *bool `xml:"parentEnabled"` + ChildForkGroupId string `xml:"childForkGroupId,omitempty"` + ParentForkGroupId string `xml:"parentForkGroupId,omitempty"` + ChildType string `xml:"childType,omitempty"` +} + +func init() { + t["VirtualMachineForkConfigInfo"] = reflect.TypeOf((*VirtualMachineForkConfigInfo)(nil)).Elem() +} + +type VirtualMachineGuestIntegrityInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` +} + +func init() { + t["VirtualMachineGuestIntegrityInfo"] = reflect.TypeOf((*VirtualMachineGuestIntegrityInfo)(nil)).Elem() +} + +type VirtualMachineGuestQuiesceSpec struct { + DynamicData + + Timeout int32 `xml:"timeout,omitempty"` +} + +func init() { + t["VirtualMachineGuestQuiesceSpec"] = reflect.TypeOf((*VirtualMachineGuestQuiesceSpec)(nil)).Elem() +} + +type VirtualMachineGuestSummary struct { + DynamicData + + GuestId string `xml:"guestId,omitempty"` + GuestFullName string `xml:"guestFullName,omitempty"` + ToolsStatus VirtualMachineToolsStatus `xml:"toolsStatus,omitempty"` + ToolsVersionStatus string `xml:"toolsVersionStatus,omitempty"` + ToolsVersionStatus2 string `xml:"toolsVersionStatus2,omitempty"` + ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty"` + HostName string `xml:"hostName,omitempty"` + IpAddress string `xml:"ipAddress,omitempty"` +} + +func init() { + t["VirtualMachineGuestSummary"] = reflect.TypeOf((*VirtualMachineGuestSummary)(nil)).Elem() +} + +type VirtualMachineIdeDiskDeviceInfo struct { + VirtualMachineDiskDeviceInfo + + PartitionTable []VirtualMachineIdeDiskDevicePartitionInfo `xml:"partitionTable,omitempty"` +} + +func init() { + t["VirtualMachineIdeDiskDeviceInfo"] = reflect.TypeOf((*VirtualMachineIdeDiskDeviceInfo)(nil)).Elem() +} + +type VirtualMachineIdeDiskDevicePartitionInfo struct { + DynamicData + + Id int32 `xml:"id"` + Capacity int32 `xml:"capacity"` +} + +func init() { + t["VirtualMachineIdeDiskDevicePartitionInfo"] = reflect.TypeOf((*VirtualMachineIdeDiskDevicePartitionInfo)(nil)).Elem() +} + +type VirtualMachineImportSpec struct { + ImportSpec + + ConfigSpec VirtualMachineConfigSpec `xml:"configSpec"` + ResPoolEntity *ManagedObjectReference `xml:"resPoolEntity,omitempty"` +} + +func init() { + t["VirtualMachineImportSpec"] = reflect.TypeOf((*VirtualMachineImportSpec)(nil)).Elem() +} + +type VirtualMachineInstantCloneSpec struct { + DynamicData + + Name string `xml:"name"` + Location VirtualMachineRelocateSpec `xml:"location"` + Config []BaseOptionValue `xml:"config,omitempty,typeattr"` + BiosUuid string `xml:"biosUuid,omitempty"` +} + +func init() { + t["VirtualMachineInstantCloneSpec"] = reflect.TypeOf((*VirtualMachineInstantCloneSpec)(nil)).Elem() +} + +type VirtualMachineLegacyNetworkSwitchInfo struct { + DynamicData + + Name string `xml:"name"` +} + +func init() { + t["VirtualMachineLegacyNetworkSwitchInfo"] = reflect.TypeOf((*VirtualMachineLegacyNetworkSwitchInfo)(nil)).Elem() +} + +type VirtualMachineMemoryReservationInfo struct { + DynamicData + + VirtualMachineMin int64 `xml:"virtualMachineMin"` + VirtualMachineMax int64 `xml:"virtualMachineMax"` + VirtualMachineReserved int64 `xml:"virtualMachineReserved"` + AllocationPolicy string `xml:"allocationPolicy"` +} + +func init() { + t["VirtualMachineMemoryReservationInfo"] = reflect.TypeOf((*VirtualMachineMemoryReservationInfo)(nil)).Elem() +} + +type VirtualMachineMemoryReservationSpec struct { + DynamicData + + VirtualMachineReserved int64 `xml:"virtualMachineReserved,omitempty"` + AllocationPolicy string `xml:"allocationPolicy,omitempty"` +} + +func init() { + t["VirtualMachineMemoryReservationSpec"] = reflect.TypeOf((*VirtualMachineMemoryReservationSpec)(nil)).Elem() +} + +type VirtualMachineMessage struct { + DynamicData + + Id string `xml:"id"` + Argument []AnyType `xml:"argument,omitempty,typeattr"` + Text string `xml:"text,omitempty"` +} + +func init() { + t["VirtualMachineMessage"] = reflect.TypeOf((*VirtualMachineMessage)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadata struct { + DynamicData + + VmId string `xml:"vmId"` + Metadata string `xml:"metadata,omitempty"` +} + +func init() { + t["VirtualMachineMetadataManagerVmMetadata"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadata)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataInput struct { + DynamicData + + Operation string `xml:"operation"` + VmMetadata VirtualMachineMetadataManagerVmMetadata `xml:"vmMetadata"` +} + +func init() { + t["VirtualMachineMetadataManagerVmMetadataInput"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataInput)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataOwner struct { + DynamicData + + Name string `xml:"name"` +} + +func init() { + t["VirtualMachineMetadataManagerVmMetadataOwner"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOwner)(nil)).Elem() +} + +type VirtualMachineMetadataManagerVmMetadataResult struct { + DynamicData + + VmMetadata VirtualMachineMetadataManagerVmMetadata `xml:"vmMetadata"` + Error *LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["VirtualMachineMetadataManagerVmMetadataResult"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataResult)(nil)).Elem() +} + +type VirtualMachineMksTicket struct { + DynamicData + + Ticket string `xml:"ticket"` + CfgFile string `xml:"cfgFile"` + Host string `xml:"host,omitempty"` + Port int32 `xml:"port,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["VirtualMachineMksTicket"] = reflect.TypeOf((*VirtualMachineMksTicket)(nil)).Elem() +} + +type VirtualMachineNetworkInfo struct { + VirtualMachineTargetInfo + + Network BaseNetworkSummary `xml:"network,typeattr"` + Vswitch string `xml:"vswitch,omitempty"` +} + +func init() { + t["VirtualMachineNetworkInfo"] = reflect.TypeOf((*VirtualMachineNetworkInfo)(nil)).Elem() +} + +type VirtualMachineNetworkShaperInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + PeakBps int64 `xml:"peakBps,omitempty"` + AverageBps int64 `xml:"averageBps,omitempty"` + BurstSize int64 `xml:"burstSize,omitempty"` +} + +func init() { + t["VirtualMachineNetworkShaperInfo"] = reflect.TypeOf((*VirtualMachineNetworkShaperInfo)(nil)).Elem() +} + +type VirtualMachineParallelInfo struct { + VirtualMachineTargetInfo +} + +func init() { + t["VirtualMachineParallelInfo"] = reflect.TypeOf((*VirtualMachineParallelInfo)(nil)).Elem() +} + +type VirtualMachinePciPassthroughInfo struct { + VirtualMachineTargetInfo + + PciDevice HostPciDevice `xml:"pciDevice"` + SystemId string `xml:"systemId"` +} + +func init() { + t["VirtualMachinePciPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciPassthroughInfo)(nil)).Elem() +} + +type VirtualMachinePciSharedGpuPassthroughInfo struct { + VirtualMachineTargetInfo + + Vgpu string `xml:"vgpu"` +} + +func init() { + t["VirtualMachinePciSharedGpuPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem() +} + +type VirtualMachineProfileDetails struct { + DynamicData + + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + DiskProfileDetails []VirtualMachineProfileDetailsDiskProfileDetails `xml:"diskProfileDetails,omitempty"` +} + +func init() { + t["VirtualMachineProfileDetails"] = reflect.TypeOf((*VirtualMachineProfileDetails)(nil)).Elem() +} + +type VirtualMachineProfileDetailsDiskProfileDetails struct { + DynamicData + + DiskId int32 `xml:"diskId"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineProfileDetailsDiskProfileDetails"] = reflect.TypeOf((*VirtualMachineProfileDetailsDiskProfileDetails)(nil)).Elem() +} + +type VirtualMachineProfileRawData struct { + DynamicData + + ExtensionKey string `xml:"extensionKey"` + ObjectData string `xml:"objectData,omitempty"` +} + +func init() { + t["VirtualMachineProfileRawData"] = reflect.TypeOf((*VirtualMachineProfileRawData)(nil)).Elem() +} + +type VirtualMachineProfileSpec struct { + DynamicData +} + +func init() { + t["VirtualMachineProfileSpec"] = reflect.TypeOf((*VirtualMachineProfileSpec)(nil)).Elem() +} + +type VirtualMachinePropertyRelation struct { + DynamicData + + Key DynamicProperty `xml:"key"` + Relations []DynamicProperty `xml:"relations,omitempty"` +} + +func init() { + t["VirtualMachinePropertyRelation"] = reflect.TypeOf((*VirtualMachinePropertyRelation)(nil)).Elem() +} + +type VirtualMachineQuestionInfo struct { + DynamicData + + Id string `xml:"id"` + Text string `xml:"text"` + Choice ChoiceOption `xml:"choice"` + Message []VirtualMachineMessage `xml:"message,omitempty"` +} + +func init() { + t["VirtualMachineQuestionInfo"] = reflect.TypeOf((*VirtualMachineQuestionInfo)(nil)).Elem() +} + +type VirtualMachineQuickStats struct { + DynamicData + + OverallCpuUsage int32 `xml:"overallCpuUsage,omitempty"` + OverallCpuDemand int32 `xml:"overallCpuDemand,omitempty"` + GuestMemoryUsage int32 `xml:"guestMemoryUsage,omitempty"` + HostMemoryUsage int32 `xml:"hostMemoryUsage,omitempty"` + GuestHeartbeatStatus ManagedEntityStatus `xml:"guestHeartbeatStatus"` + DistributedCpuEntitlement int32 `xml:"distributedCpuEntitlement,omitempty"` + DistributedMemoryEntitlement int32 `xml:"distributedMemoryEntitlement,omitempty"` + StaticCpuEntitlement int32 `xml:"staticCpuEntitlement,omitempty"` + StaticMemoryEntitlement int32 `xml:"staticMemoryEntitlement,omitempty"` + PrivateMemory int32 `xml:"privateMemory,omitempty"` + SharedMemory int32 `xml:"sharedMemory,omitempty"` + SwappedMemory int32 `xml:"swappedMemory,omitempty"` + BalloonedMemory int32 `xml:"balloonedMemory,omitempty"` + ConsumedOverheadMemory int32 `xml:"consumedOverheadMemory,omitempty"` + FtLogBandwidth int32 `xml:"ftLogBandwidth,omitempty"` + FtSecondaryLatency int32 `xml:"ftSecondaryLatency,omitempty"` + FtLatencyStatus ManagedEntityStatus `xml:"ftLatencyStatus,omitempty"` + CompressedMemory int64 `xml:"compressedMemory,omitempty"` + UptimeSeconds int32 `xml:"uptimeSeconds,omitempty"` + SsdSwappedMemory int64 `xml:"ssdSwappedMemory,omitempty"` +} + +func init() { + t["VirtualMachineQuickStats"] = reflect.TypeOf((*VirtualMachineQuickStats)(nil)).Elem() +} + +type VirtualMachineRelocateSpec struct { + DynamicData + + Service *ServiceLocator `xml:"service,omitempty"` + Folder *ManagedObjectReference `xml:"folder,omitempty"` + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` + DiskMoveType string `xml:"diskMoveType,omitempty"` + Pool *ManagedObjectReference `xml:"pool,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Disk []VirtualMachineRelocateSpecDiskLocator `xml:"disk,omitempty"` + Transform VirtualMachineRelocateTransformation `xml:"transform,omitempty"` + DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineRelocateSpec"] = reflect.TypeOf((*VirtualMachineRelocateSpec)(nil)).Elem() +} + +type VirtualMachineRelocateSpecDiskLocator struct { + DynamicData + + DiskId int32 `xml:"diskId"` + Datastore ManagedObjectReference `xml:"datastore"` + DiskMoveType string `xml:"diskMoveType,omitempty"` + DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineRelocateSpecDiskLocator"] = reflect.TypeOf((*VirtualMachineRelocateSpecDiskLocator)(nil)).Elem() +} + +type VirtualMachineRuntimeInfo struct { + DynamicData + + Device []VirtualMachineDeviceRuntimeInfo `xml:"device,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + ConnectionState VirtualMachineConnectionState `xml:"connectionState"` + PowerState VirtualMachinePowerState `xml:"powerState"` + FaultToleranceState VirtualMachineFaultToleranceState `xml:"faultToleranceState,omitempty"` + DasVmProtection *VirtualMachineRuntimeInfoDasProtectionState `xml:"dasVmProtection,omitempty"` + ToolsInstallerMounted bool `xml:"toolsInstallerMounted"` + SuspendTime *time.Time `xml:"suspendTime"` + BootTime *time.Time `xml:"bootTime"` + SuspendInterval int64 `xml:"suspendInterval,omitempty"` + Question *VirtualMachineQuestionInfo `xml:"question,omitempty"` + MemoryOverhead int64 `xml:"memoryOverhead,omitempty"` + MaxCpuUsage int32 `xml:"maxCpuUsage,omitempty"` + MaxMemoryUsage int32 `xml:"maxMemoryUsage,omitempty"` + NumMksConnections int32 `xml:"numMksConnections"` + RecordReplayState VirtualMachineRecordReplayState `xml:"recordReplayState,omitempty"` + CleanPowerOff *bool `xml:"cleanPowerOff"` + NeedSecondaryReason string `xml:"needSecondaryReason,omitempty"` + OnlineStandby *bool `xml:"onlineStandby"` + MinRequiredEVCModeKey string `xml:"minRequiredEVCModeKey,omitempty"` + ConsolidationNeeded *bool `xml:"consolidationNeeded"` + OfflineFeatureRequirement []VirtualMachineFeatureRequirement `xml:"offlineFeatureRequirement,omitempty"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty"` + FeatureMask []HostFeatureMask `xml:"featureMask,omitempty"` + VFlashCacheAllocation int64 `xml:"vFlashCacheAllocation,omitempty"` + Paused *bool `xml:"paused"` + SnapshotInBackground *bool `xml:"snapshotInBackground"` + QuiescedForkParent *bool `xml:"quiescedForkParent"` + InstantCloneFrozen *bool `xml:"instantCloneFrozen"` + CryptoState string `xml:"cryptoState,omitempty"` +} + +func init() { + t["VirtualMachineRuntimeInfo"] = reflect.TypeOf((*VirtualMachineRuntimeInfo)(nil)).Elem() +} + +type VirtualMachineRuntimeInfoDasProtectionState struct { + DynamicData + + DasProtected bool `xml:"dasProtected"` +} + +func init() { + t["VirtualMachineRuntimeInfoDasProtectionState"] = reflect.TypeOf((*VirtualMachineRuntimeInfoDasProtectionState)(nil)).Elem() +} + +type VirtualMachineScsiDiskDeviceInfo struct { + VirtualMachineDiskDeviceInfo + + Disk *HostScsiDisk `xml:"disk,omitempty"` + TransportHint string `xml:"transportHint,omitempty"` + LunNumber int32 `xml:"lunNumber,omitempty"` +} + +func init() { + t["VirtualMachineScsiDiskDeviceInfo"] = reflect.TypeOf((*VirtualMachineScsiDiskDeviceInfo)(nil)).Elem() +} + +type VirtualMachineScsiPassthroughInfo struct { + VirtualMachineTargetInfo + + ScsiClass string `xml:"scsiClass"` + Vendor string `xml:"vendor"` + PhysicalUnitNumber int32 `xml:"physicalUnitNumber"` +} + +func init() { + t["VirtualMachineScsiPassthroughInfo"] = reflect.TypeOf((*VirtualMachineScsiPassthroughInfo)(nil)).Elem() +} + +type VirtualMachineSerialInfo struct { + VirtualMachineTargetInfo +} + +func init() { + t["VirtualMachineSerialInfo"] = reflect.TypeOf((*VirtualMachineSerialInfo)(nil)).Elem() +} + +type VirtualMachineSnapshotInfo struct { + DynamicData + + CurrentSnapshot *ManagedObjectReference `xml:"currentSnapshot,omitempty"` + RootSnapshotList []VirtualMachineSnapshotTree `xml:"rootSnapshotList"` +} + +func init() { + t["VirtualMachineSnapshotInfo"] = reflect.TypeOf((*VirtualMachineSnapshotInfo)(nil)).Elem() +} + +type VirtualMachineSnapshotTree struct { + DynamicData + + Snapshot ManagedObjectReference `xml:"snapshot"` + Vm ManagedObjectReference `xml:"vm"` + Name string `xml:"name"` + Description string `xml:"description"` + Id int32 `xml:"id,omitempty"` + CreateTime time.Time `xml:"createTime"` + State VirtualMachinePowerState `xml:"state"` + Quiesced bool `xml:"quiesced"` + BackupManifest string `xml:"backupManifest,omitempty"` + ChildSnapshotList []VirtualMachineSnapshotTree `xml:"childSnapshotList,omitempty"` + ReplaySupported *bool `xml:"replaySupported"` +} + +func init() { + t["VirtualMachineSnapshotTree"] = reflect.TypeOf((*VirtualMachineSnapshotTree)(nil)).Elem() +} + +type VirtualMachineSoundInfo struct { + VirtualMachineTargetInfo +} + +func init() { + t["VirtualMachineSoundInfo"] = reflect.TypeOf((*VirtualMachineSoundInfo)(nil)).Elem() +} + +type VirtualMachineSriovDevicePoolInfo struct { + DynamicData + + Key string `xml:"key"` +} + +func init() { + t["VirtualMachineSriovDevicePoolInfo"] = reflect.TypeOf((*VirtualMachineSriovDevicePoolInfo)(nil)).Elem() +} + +type VirtualMachineSriovInfo struct { + VirtualMachinePciPassthroughInfo + + VirtualFunction bool `xml:"virtualFunction"` + Pnic string `xml:"pnic,omitempty"` + DevicePool BaseVirtualMachineSriovDevicePoolInfo `xml:"devicePool,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineSriovInfo"] = reflect.TypeOf((*VirtualMachineSriovInfo)(nil)).Elem() +} + +type VirtualMachineSriovNetworkDevicePoolInfo struct { + VirtualMachineSriovDevicePoolInfo + + SwitchKey string `xml:"switchKey,omitempty"` + SwitchUuid string `xml:"switchUuid,omitempty"` +} + +func init() { + t["VirtualMachineSriovNetworkDevicePoolInfo"] = reflect.TypeOf((*VirtualMachineSriovNetworkDevicePoolInfo)(nil)).Elem() +} + +type VirtualMachineStorageInfo struct { + DynamicData + + PerDatastoreUsage []VirtualMachineUsageOnDatastore `xml:"perDatastoreUsage,omitempty"` + Timestamp time.Time `xml:"timestamp"` +} + +func init() { + t["VirtualMachineStorageInfo"] = reflect.TypeOf((*VirtualMachineStorageInfo)(nil)).Elem() +} + +type VirtualMachineStorageSummary struct { + DynamicData + + Committed int64 `xml:"committed"` + Uncommitted int64 `xml:"uncommitted"` + Unshared int64 `xml:"unshared"` + Timestamp time.Time `xml:"timestamp"` +} + +func init() { + t["VirtualMachineStorageSummary"] = reflect.TypeOf((*VirtualMachineStorageSummary)(nil)).Elem() +} + +type VirtualMachineSummary struct { + DynamicData + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Runtime VirtualMachineRuntimeInfo `xml:"runtime"` + Guest *VirtualMachineGuestSummary `xml:"guest,omitempty"` + Config VirtualMachineConfigSummary `xml:"config"` + Storage *VirtualMachineStorageSummary `xml:"storage,omitempty"` + QuickStats VirtualMachineQuickStats `xml:"quickStats"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + CustomValue []BaseCustomFieldValue `xml:"customValue,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineSummary"] = reflect.TypeOf((*VirtualMachineSummary)(nil)).Elem() +} + +type VirtualMachineTargetInfo struct { + DynamicData + + Name string `xml:"name"` + ConfigurationTag []string `xml:"configurationTag,omitempty"` +} + +func init() { + t["VirtualMachineTargetInfo"] = reflect.TypeOf((*VirtualMachineTargetInfo)(nil)).Elem() +} + +type VirtualMachineTicket struct { + DynamicData + + Ticket string `xml:"ticket"` + CfgFile string `xml:"cfgFile"` + Host string `xml:"host,omitempty"` + Port int32 `xml:"port,omitempty"` + SslThumbprint string `xml:"sslThumbprint,omitempty"` +} + +func init() { + t["VirtualMachineTicket"] = reflect.TypeOf((*VirtualMachineTicket)(nil)).Elem() +} + +type VirtualMachineUsageOnDatastore struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + Committed int64 `xml:"committed"` + Uncommitted int64 `xml:"uncommitted"` + Unshared int64 `xml:"unshared"` +} + +func init() { + t["VirtualMachineUsageOnDatastore"] = reflect.TypeOf((*VirtualMachineUsageOnDatastore)(nil)).Elem() +} + +type VirtualMachineUsbInfo struct { + VirtualMachineTargetInfo + + Description string `xml:"description"` + Vendor int32 `xml:"vendor"` + Product int32 `xml:"product"` + PhysicalPath string `xml:"physicalPath"` + Family []string `xml:"family,omitempty"` + Speed []string `xml:"speed,omitempty"` + Summary *VirtualMachineSummary `xml:"summary,omitempty"` +} + +func init() { + t["VirtualMachineUsbInfo"] = reflect.TypeOf((*VirtualMachineUsbInfo)(nil)).Elem() +} + +type VirtualMachineVFlashModuleInfo struct { + VirtualMachineTargetInfo + + VFlashModule HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption `xml:"vFlashModule"` +} + +func init() { + t["VirtualMachineVFlashModuleInfo"] = reflect.TypeOf((*VirtualMachineVFlashModuleInfo)(nil)).Elem() +} + +type VirtualMachineVMCIDevice struct { + VirtualDevice + + Id int64 `xml:"id,omitempty"` + AllowUnrestrictedCommunication *bool `xml:"allowUnrestrictedCommunication"` + FilterEnable *bool `xml:"filterEnable"` + FilterInfo *VirtualMachineVMCIDeviceFilterInfo `xml:"filterInfo,omitempty"` +} + +func init() { + t["VirtualMachineVMCIDevice"] = reflect.TypeOf((*VirtualMachineVMCIDevice)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceFilterInfo struct { + DynamicData + + Filters []VirtualMachineVMCIDeviceFilterSpec `xml:"filters,omitempty"` +} + +func init() { + t["VirtualMachineVMCIDeviceFilterInfo"] = reflect.TypeOf((*VirtualMachineVMCIDeviceFilterInfo)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceFilterSpec struct { + DynamicData + + Rank int64 `xml:"rank"` + Action string `xml:"action"` + Protocol string `xml:"protocol"` + Direction string `xml:"direction"` + LowerDstPortBoundary int64 `xml:"lowerDstPortBoundary,omitempty"` + UpperDstPortBoundary int64 `xml:"upperDstPortBoundary,omitempty"` +} + +func init() { + t["VirtualMachineVMCIDeviceFilterSpec"] = reflect.TypeOf((*VirtualMachineVMCIDeviceFilterSpec)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceOption struct { + VirtualDeviceOption + + AllowUnrestrictedCommunication BoolOption `xml:"allowUnrestrictedCommunication"` + FilterSpecOption *VirtualMachineVMCIDeviceOptionFilterSpecOption `xml:"filterSpecOption,omitempty"` + FilterSupported *BoolOption `xml:"filterSupported,omitempty"` +} + +func init() { + t["VirtualMachineVMCIDeviceOption"] = reflect.TypeOf((*VirtualMachineVMCIDeviceOption)(nil)).Elem() +} + +type VirtualMachineVMCIDeviceOptionFilterSpecOption struct { + DynamicData + + Action ChoiceOption `xml:"action"` + Protocol ChoiceOption `xml:"protocol"` + Direction ChoiceOption `xml:"direction"` + LowerDstPortBoundary LongOption `xml:"lowerDstPortBoundary"` + UpperDstPortBoundary LongOption `xml:"upperDstPortBoundary"` +} + +func init() { + t["VirtualMachineVMCIDeviceOptionFilterSpecOption"] = reflect.TypeOf((*VirtualMachineVMCIDeviceOptionFilterSpecOption)(nil)).Elem() +} + +type VirtualMachineVMIROM struct { + VirtualDevice +} + +func init() { + t["VirtualMachineVMIROM"] = reflect.TypeOf((*VirtualMachineVMIROM)(nil)).Elem() +} + +type VirtualMachineVideoCard struct { + VirtualDevice + + VideoRamSizeInKB int64 `xml:"videoRamSizeInKB,omitempty"` + NumDisplays int32 `xml:"numDisplays,omitempty"` + UseAutoDetect *bool `xml:"useAutoDetect"` + Enable3DSupport *bool `xml:"enable3DSupport"` + Use3dRenderer string `xml:"use3dRenderer,omitempty"` + GraphicsMemorySizeInKB int64 `xml:"graphicsMemorySizeInKB,omitempty"` +} + +func init() { + t["VirtualMachineVideoCard"] = reflect.TypeOf((*VirtualMachineVideoCard)(nil)).Elem() +} + +type VirtualMachineWindowsQuiesceSpec struct { + VirtualMachineGuestQuiesceSpec + + VssBackupType int32 `xml:"vssBackupType,omitempty"` + VssBootableSystemState *bool `xml:"vssBootableSystemState"` + VssPartialFileSupport *bool `xml:"vssPartialFileSupport"` + VssBackupContext string `xml:"vssBackupContext,omitempty"` +} + +func init() { + t["VirtualMachineWindowsQuiesceSpec"] = reflect.TypeOf((*VirtualMachineWindowsQuiesceSpec)(nil)).Elem() +} + +type VirtualMachineWipeResult struct { + DynamicData + + DiskId int32 `xml:"diskId"` + ShrinkableDiskSpace int64 `xml:"shrinkableDiskSpace"` +} + +func init() { + t["VirtualMachineWipeResult"] = reflect.TypeOf((*VirtualMachineWipeResult)(nil)).Elem() +} + +type VirtualNVDIMM struct { + VirtualDevice + + CapacityInMB int64 `xml:"capacityInMB"` +} + +func init() { + t["VirtualNVDIMM"] = reflect.TypeOf((*VirtualNVDIMM)(nil)).Elem() +} + +type VirtualNVDIMMBackingInfo struct { + VirtualDeviceFileBackingInfo + + Parent *VirtualNVDIMMBackingInfo `xml:"parent,omitempty"` + ChangeId string `xml:"changeId,omitempty"` +} + +func init() { + t["VirtualNVDIMMBackingInfo"] = reflect.TypeOf((*VirtualNVDIMMBackingInfo)(nil)).Elem() +} + +type VirtualNVDIMMController struct { + VirtualController +} + +func init() { + t["VirtualNVDIMMController"] = reflect.TypeOf((*VirtualNVDIMMController)(nil)).Elem() +} + +type VirtualNVDIMMControllerOption struct { + VirtualControllerOption + + NumNVDIMMControllers IntOption `xml:"numNVDIMMControllers"` +} + +func init() { + t["VirtualNVDIMMControllerOption"] = reflect.TypeOf((*VirtualNVDIMMControllerOption)(nil)).Elem() +} + +type VirtualNVDIMMOption struct { + VirtualDeviceOption + + CapacityInMB LongOption `xml:"capacityInMB"` + Growable bool `xml:"growable"` + HotGrowable bool `xml:"hotGrowable"` + GranularityInMB int64 `xml:"granularityInMB"` +} + +func init() { + t["VirtualNVDIMMOption"] = reflect.TypeOf((*VirtualNVDIMMOption)(nil)).Elem() +} + +type VirtualNVMEController struct { + VirtualController +} + +func init() { + t["VirtualNVMEController"] = reflect.TypeOf((*VirtualNVMEController)(nil)).Elem() +} + +type VirtualNVMEControllerOption struct { + VirtualControllerOption + + NumNVMEDisks IntOption `xml:"numNVMEDisks"` +} + +func init() { + t["VirtualNVMEControllerOption"] = reflect.TypeOf((*VirtualNVMEControllerOption)(nil)).Elem() +} + +type VirtualNicManagerNetConfig struct { + DynamicData + + NicType string `xml:"nicType"` + MultiSelectAllowed bool `xml:"multiSelectAllowed"` + CandidateVnic []HostVirtualNic `xml:"candidateVnic,omitempty"` + SelectedVnic []string `xml:"selectedVnic,omitempty"` +} + +func init() { + t["VirtualNicManagerNetConfig"] = reflect.TypeOf((*VirtualNicManagerNetConfig)(nil)).Elem() +} + +type VirtualPCIController struct { + VirtualController +} + +func init() { + t["VirtualPCIController"] = reflect.TypeOf((*VirtualPCIController)(nil)).Elem() +} + +type VirtualPCIControllerOption struct { + VirtualControllerOption + + NumSCSIControllers IntOption `xml:"numSCSIControllers"` + NumEthernetCards IntOption `xml:"numEthernetCards"` + NumVideoCards IntOption `xml:"numVideoCards"` + NumSoundCards IntOption `xml:"numSoundCards"` + NumVmiRoms IntOption `xml:"numVmiRoms"` + NumVmciDevices *IntOption `xml:"numVmciDevices,omitempty"` + NumPCIPassthroughDevices *IntOption `xml:"numPCIPassthroughDevices,omitempty"` + NumSasSCSIControllers *IntOption `xml:"numSasSCSIControllers,omitempty"` + NumVmxnet3EthernetCards *IntOption `xml:"numVmxnet3EthernetCards,omitempty"` + NumParaVirtualSCSIControllers *IntOption `xml:"numParaVirtualSCSIControllers,omitempty"` + NumSATAControllers *IntOption `xml:"numSATAControllers,omitempty"` + NumNVMEControllers *IntOption `xml:"numNVMEControllers,omitempty"` + NumVmxnet3VrdmaEthernetCards *IntOption `xml:"numVmxnet3VrdmaEthernetCards,omitempty"` +} + +func init() { + t["VirtualPCIControllerOption"] = reflect.TypeOf((*VirtualPCIControllerOption)(nil)).Elem() +} + +type VirtualPCIPassthrough struct { + VirtualDevice +} + +func init() { + t["VirtualPCIPassthrough"] = reflect.TypeOf((*VirtualPCIPassthrough)(nil)).Elem() +} + +type VirtualPCIPassthroughDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + Id string `xml:"id"` + DeviceId string `xml:"deviceId"` + SystemId string `xml:"systemId"` + VendorId int16 `xml:"vendorId"` +} + +func init() { + t["VirtualPCIPassthroughDeviceBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingInfo)(nil)).Elem() +} + +type VirtualPCIPassthroughDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualPCIPassthroughDeviceBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingOption)(nil)).Elem() +} + +type VirtualPCIPassthroughOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualPCIPassthroughOption"] = reflect.TypeOf((*VirtualPCIPassthroughOption)(nil)).Elem() +} + +type VirtualPCIPassthroughPluginBackingInfo struct { + VirtualDeviceBackingInfo +} + +func init() { + t["VirtualPCIPassthroughPluginBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingInfo)(nil)).Elem() +} + +type VirtualPCIPassthroughPluginBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualPCIPassthroughPluginBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingOption)(nil)).Elem() +} + +type VirtualPCIPassthroughVmiopBackingInfo struct { + VirtualPCIPassthroughPluginBackingInfo + + Vgpu string `xml:"vgpu,omitempty"` +} + +func init() { + t["VirtualPCIPassthroughVmiopBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughVmiopBackingInfo)(nil)).Elem() +} + +type VirtualPCIPassthroughVmiopBackingOption struct { + VirtualPCIPassthroughPluginBackingOption + + Vgpu StringOption `xml:"vgpu"` + MaxInstances int32 `xml:"maxInstances"` +} + +func init() { + t["VirtualPCIPassthroughVmiopBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughVmiopBackingOption)(nil)).Elem() +} + +type VirtualPCNet32 struct { + VirtualEthernetCard +} + +func init() { + t["VirtualPCNet32"] = reflect.TypeOf((*VirtualPCNet32)(nil)).Elem() +} + +type VirtualPCNet32Option struct { + VirtualEthernetCardOption + + SupportsMorphing bool `xml:"supportsMorphing"` +} + +func init() { + t["VirtualPCNet32Option"] = reflect.TypeOf((*VirtualPCNet32Option)(nil)).Elem() +} + +type VirtualPS2Controller struct { + VirtualController +} + +func init() { + t["VirtualPS2Controller"] = reflect.TypeOf((*VirtualPS2Controller)(nil)).Elem() +} + +type VirtualPS2ControllerOption struct { + VirtualControllerOption + + NumKeyboards IntOption `xml:"numKeyboards"` + NumPointingDevices IntOption `xml:"numPointingDevices"` +} + +func init() { + t["VirtualPS2ControllerOption"] = reflect.TypeOf((*VirtualPS2ControllerOption)(nil)).Elem() +} + +type VirtualParallelPort struct { + VirtualDevice +} + +func init() { + t["VirtualParallelPort"] = reflect.TypeOf((*VirtualParallelPort)(nil)).Elem() +} + +type VirtualParallelPortDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualParallelPortDeviceBackingInfo"] = reflect.TypeOf((*VirtualParallelPortDeviceBackingInfo)(nil)).Elem() +} + +type VirtualParallelPortDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualParallelPortDeviceBackingOption"] = reflect.TypeOf((*VirtualParallelPortDeviceBackingOption)(nil)).Elem() +} + +type VirtualParallelPortFileBackingInfo struct { + VirtualDeviceFileBackingInfo +} + +func init() { + t["VirtualParallelPortFileBackingInfo"] = reflect.TypeOf((*VirtualParallelPortFileBackingInfo)(nil)).Elem() +} + +type VirtualParallelPortFileBackingOption struct { + VirtualDeviceFileBackingOption +} + +func init() { + t["VirtualParallelPortFileBackingOption"] = reflect.TypeOf((*VirtualParallelPortFileBackingOption)(nil)).Elem() +} + +type VirtualParallelPortOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualParallelPortOption"] = reflect.TypeOf((*VirtualParallelPortOption)(nil)).Elem() +} + +type VirtualPointingDevice struct { + VirtualDevice +} + +func init() { + t["VirtualPointingDevice"] = reflect.TypeOf((*VirtualPointingDevice)(nil)).Elem() +} + +type VirtualPointingDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption + + HostPointingDevice ChoiceOption `xml:"hostPointingDevice"` +} + +func init() { + t["VirtualPointingDeviceBackingOption"] = reflect.TypeOf((*VirtualPointingDeviceBackingOption)(nil)).Elem() +} + +type VirtualPointingDeviceDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + HostPointingDevice string `xml:"hostPointingDevice"` +} + +func init() { + t["VirtualPointingDeviceDeviceBackingInfo"] = reflect.TypeOf((*VirtualPointingDeviceDeviceBackingInfo)(nil)).Elem() +} + +type VirtualPointingDeviceOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualPointingDeviceOption"] = reflect.TypeOf((*VirtualPointingDeviceOption)(nil)).Elem() +} + +type VirtualSATAController struct { + VirtualController +} + +func init() { + t["VirtualSATAController"] = reflect.TypeOf((*VirtualSATAController)(nil)).Elem() +} + +type VirtualSATAControllerOption struct { + VirtualControllerOption + + NumSATADisks IntOption `xml:"numSATADisks"` + NumSATACdroms IntOption `xml:"numSATACdroms"` +} + +func init() { + t["VirtualSATAControllerOption"] = reflect.TypeOf((*VirtualSATAControllerOption)(nil)).Elem() +} + +type VirtualSCSIController struct { + VirtualController + + HotAddRemove *bool `xml:"hotAddRemove"` + SharedBus VirtualSCSISharing `xml:"sharedBus"` + ScsiCtlrUnitNumber int32 `xml:"scsiCtlrUnitNumber,omitempty"` +} + +func init() { + t["VirtualSCSIController"] = reflect.TypeOf((*VirtualSCSIController)(nil)).Elem() +} + +type VirtualSCSIControllerOption struct { + VirtualControllerOption + + NumSCSIDisks IntOption `xml:"numSCSIDisks"` + NumSCSICdroms IntOption `xml:"numSCSICdroms"` + NumSCSIPassthrough IntOption `xml:"numSCSIPassthrough"` + Sharing []VirtualSCSISharing `xml:"sharing"` + DefaultSharedIndex int32 `xml:"defaultSharedIndex"` + HotAddRemove BoolOption `xml:"hotAddRemove"` + ScsiCtlrUnitNumber int32 `xml:"scsiCtlrUnitNumber"` +} + +func init() { + t["VirtualSCSIControllerOption"] = reflect.TypeOf((*VirtualSCSIControllerOption)(nil)).Elem() +} + +type VirtualSCSIPassthrough struct { + VirtualDevice +} + +func init() { + t["VirtualSCSIPassthrough"] = reflect.TypeOf((*VirtualSCSIPassthrough)(nil)).Elem() +} + +type VirtualSCSIPassthroughDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualSCSIPassthroughDeviceBackingInfo"] = reflect.TypeOf((*VirtualSCSIPassthroughDeviceBackingInfo)(nil)).Elem() +} + +type VirtualSCSIPassthroughDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualSCSIPassthroughDeviceBackingOption"] = reflect.TypeOf((*VirtualSCSIPassthroughDeviceBackingOption)(nil)).Elem() +} + +type VirtualSCSIPassthroughOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualSCSIPassthroughOption"] = reflect.TypeOf((*VirtualSCSIPassthroughOption)(nil)).Elem() +} + +type VirtualSIOController struct { + VirtualController +} + +func init() { + t["VirtualSIOController"] = reflect.TypeOf((*VirtualSIOController)(nil)).Elem() +} + +type VirtualSIOControllerOption struct { + VirtualControllerOption + + NumFloppyDrives IntOption `xml:"numFloppyDrives"` + NumSerialPorts IntOption `xml:"numSerialPorts"` + NumParallelPorts IntOption `xml:"numParallelPorts"` +} + +func init() { + t["VirtualSIOControllerOption"] = reflect.TypeOf((*VirtualSIOControllerOption)(nil)).Elem() +} + +type VirtualSerialPort struct { + VirtualDevice + + YieldOnPoll bool `xml:"yieldOnPoll"` +} + +func init() { + t["VirtualSerialPort"] = reflect.TypeOf((*VirtualSerialPort)(nil)).Elem() +} + +type VirtualSerialPortDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualSerialPortDeviceBackingInfo"] = reflect.TypeOf((*VirtualSerialPortDeviceBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualSerialPortDeviceBackingOption"] = reflect.TypeOf((*VirtualSerialPortDeviceBackingOption)(nil)).Elem() +} + +type VirtualSerialPortFileBackingInfo struct { + VirtualDeviceFileBackingInfo +} + +func init() { + t["VirtualSerialPortFileBackingInfo"] = reflect.TypeOf((*VirtualSerialPortFileBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortFileBackingOption struct { + VirtualDeviceFileBackingOption +} + +func init() { + t["VirtualSerialPortFileBackingOption"] = reflect.TypeOf((*VirtualSerialPortFileBackingOption)(nil)).Elem() +} + +type VirtualSerialPortOption struct { + VirtualDeviceOption + + YieldOnPoll BoolOption `xml:"yieldOnPoll"` +} + +func init() { + t["VirtualSerialPortOption"] = reflect.TypeOf((*VirtualSerialPortOption)(nil)).Elem() +} + +type VirtualSerialPortPipeBackingInfo struct { + VirtualDevicePipeBackingInfo + + Endpoint string `xml:"endpoint"` + NoRxLoss *bool `xml:"noRxLoss"` +} + +func init() { + t["VirtualSerialPortPipeBackingInfo"] = reflect.TypeOf((*VirtualSerialPortPipeBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortPipeBackingOption struct { + VirtualDevicePipeBackingOption + + Endpoint ChoiceOption `xml:"endpoint"` + NoRxLoss BoolOption `xml:"noRxLoss"` +} + +func init() { + t["VirtualSerialPortPipeBackingOption"] = reflect.TypeOf((*VirtualSerialPortPipeBackingOption)(nil)).Elem() +} + +type VirtualSerialPortThinPrintBackingInfo struct { + VirtualDeviceBackingInfo +} + +func init() { + t["VirtualSerialPortThinPrintBackingInfo"] = reflect.TypeOf((*VirtualSerialPortThinPrintBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortThinPrintBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualSerialPortThinPrintBackingOption"] = reflect.TypeOf((*VirtualSerialPortThinPrintBackingOption)(nil)).Elem() +} + +type VirtualSerialPortURIBackingInfo struct { + VirtualDeviceURIBackingInfo +} + +func init() { + t["VirtualSerialPortURIBackingInfo"] = reflect.TypeOf((*VirtualSerialPortURIBackingInfo)(nil)).Elem() +} + +type VirtualSerialPortURIBackingOption struct { + VirtualDeviceURIBackingOption +} + +func init() { + t["VirtualSerialPortURIBackingOption"] = reflect.TypeOf((*VirtualSerialPortURIBackingOption)(nil)).Elem() +} + +type VirtualSoundBlaster16 struct { + VirtualSoundCard +} + +func init() { + t["VirtualSoundBlaster16"] = reflect.TypeOf((*VirtualSoundBlaster16)(nil)).Elem() +} + +type VirtualSoundBlaster16Option struct { + VirtualSoundCardOption +} + +func init() { + t["VirtualSoundBlaster16Option"] = reflect.TypeOf((*VirtualSoundBlaster16Option)(nil)).Elem() +} + +type VirtualSoundCard struct { + VirtualDevice +} + +func init() { + t["VirtualSoundCard"] = reflect.TypeOf((*VirtualSoundCard)(nil)).Elem() +} + +type VirtualSoundCardDeviceBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualSoundCardDeviceBackingInfo"] = reflect.TypeOf((*VirtualSoundCardDeviceBackingInfo)(nil)).Elem() +} + +type VirtualSoundCardDeviceBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualSoundCardDeviceBackingOption"] = reflect.TypeOf((*VirtualSoundCardDeviceBackingOption)(nil)).Elem() +} + +type VirtualSoundCardOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualSoundCardOption"] = reflect.TypeOf((*VirtualSoundCardOption)(nil)).Elem() +} + +type VirtualSriovEthernetCard struct { + VirtualEthernetCard + + AllowGuestOSMtuChange *bool `xml:"allowGuestOSMtuChange"` + SriovBacking *VirtualSriovEthernetCardSriovBackingInfo `xml:"sriovBacking,omitempty"` +} + +func init() { + t["VirtualSriovEthernetCard"] = reflect.TypeOf((*VirtualSriovEthernetCard)(nil)).Elem() +} + +type VirtualSriovEthernetCardOption struct { + VirtualEthernetCardOption +} + +func init() { + t["VirtualSriovEthernetCardOption"] = reflect.TypeOf((*VirtualSriovEthernetCardOption)(nil)).Elem() +} + +type VirtualSriovEthernetCardSriovBackingInfo struct { + VirtualDeviceBackingInfo + + PhysicalFunctionBacking *VirtualPCIPassthroughDeviceBackingInfo `xml:"physicalFunctionBacking,omitempty"` + VirtualFunctionBacking *VirtualPCIPassthroughDeviceBackingInfo `xml:"virtualFunctionBacking,omitempty"` + VirtualFunctionIndex int32 `xml:"virtualFunctionIndex,omitempty"` +} + +func init() { + t["VirtualSriovEthernetCardSriovBackingInfo"] = reflect.TypeOf((*VirtualSriovEthernetCardSriovBackingInfo)(nil)).Elem() +} + +type VirtualSriovEthernetCardSriovBackingOption struct { + VirtualDeviceBackingOption +} + +func init() { + t["VirtualSriovEthernetCardSriovBackingOption"] = reflect.TypeOf((*VirtualSriovEthernetCardSriovBackingOption)(nil)).Elem() +} + +type VirtualSwitchProfile struct { + ApplyProfile + + Key string `xml:"key"` + Name string `xml:"name"` + Link LinkProfile `xml:"link"` + NumPorts NumPortsProfile `xml:"numPorts"` + NetworkPolicy NetworkPolicyProfile `xml:"networkPolicy"` +} + +func init() { + t["VirtualSwitchProfile"] = reflect.TypeOf((*VirtualSwitchProfile)(nil)).Elem() +} + +type VirtualSwitchSelectionProfile struct { + ApplyProfile +} + +func init() { + t["VirtualSwitchSelectionProfile"] = reflect.TypeOf((*VirtualSwitchSelectionProfile)(nil)).Elem() +} + +type VirtualTPM struct { + VirtualDevice + + EndorsementKeyCertificateSigningRequest [][]byte `xml:"endorsementKeyCertificateSigningRequest,omitempty"` + EndorsementKeyCertificate [][]byte `xml:"endorsementKeyCertificate,omitempty"` +} + +func init() { + t["VirtualTPM"] = reflect.TypeOf((*VirtualTPM)(nil)).Elem() +} + +type VirtualTPMOption struct { + VirtualDeviceOption + + SupportedFirmware []string `xml:"supportedFirmware,omitempty"` +} + +func init() { + t["VirtualTPMOption"] = reflect.TypeOf((*VirtualTPMOption)(nil)).Elem() +} + +type VirtualUSB struct { + VirtualDevice + + Connected bool `xml:"connected"` + Vendor int32 `xml:"vendor,omitempty"` + Product int32 `xml:"product,omitempty"` + Family []string `xml:"family,omitempty"` + Speed []string `xml:"speed,omitempty"` +} + +func init() { + t["VirtualUSB"] = reflect.TypeOf((*VirtualUSB)(nil)).Elem() +} + +type VirtualUSBController struct { + VirtualController + + AutoConnectDevices *bool `xml:"autoConnectDevices"` + EhciEnabled *bool `xml:"ehciEnabled"` +} + +func init() { + t["VirtualUSBController"] = reflect.TypeOf((*VirtualUSBController)(nil)).Elem() +} + +type VirtualUSBControllerOption struct { + VirtualControllerOption + + AutoConnectDevices BoolOption `xml:"autoConnectDevices"` + EhciSupported BoolOption `xml:"ehciSupported"` + SupportedSpeeds []string `xml:"supportedSpeeds,omitempty"` +} + +func init() { + t["VirtualUSBControllerOption"] = reflect.TypeOf((*VirtualUSBControllerOption)(nil)).Elem() +} + +type VirtualUSBControllerPciBusSlotInfo struct { + VirtualDevicePciBusSlotInfo + + EhciPciSlotNumber int32 `xml:"ehciPciSlotNumber,omitempty"` +} + +func init() { + t["VirtualUSBControllerPciBusSlotInfo"] = reflect.TypeOf((*VirtualUSBControllerPciBusSlotInfo)(nil)).Elem() +} + +type VirtualUSBOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualUSBOption"] = reflect.TypeOf((*VirtualUSBOption)(nil)).Elem() +} + +type VirtualUSBRemoteClientBackingInfo struct { + VirtualDeviceRemoteDeviceBackingInfo + + Hostname string `xml:"hostname"` +} + +func init() { + t["VirtualUSBRemoteClientBackingInfo"] = reflect.TypeOf((*VirtualUSBRemoteClientBackingInfo)(nil)).Elem() +} + +type VirtualUSBRemoteClientBackingOption struct { + VirtualDeviceRemoteDeviceBackingOption +} + +func init() { + t["VirtualUSBRemoteClientBackingOption"] = reflect.TypeOf((*VirtualUSBRemoteClientBackingOption)(nil)).Elem() +} + +type VirtualUSBRemoteHostBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + Hostname string `xml:"hostname"` +} + +func init() { + t["VirtualUSBRemoteHostBackingInfo"] = reflect.TypeOf((*VirtualUSBRemoteHostBackingInfo)(nil)).Elem() +} + +type VirtualUSBRemoteHostBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualUSBRemoteHostBackingOption"] = reflect.TypeOf((*VirtualUSBRemoteHostBackingOption)(nil)).Elem() +} + +type VirtualUSBUSBBackingInfo struct { + VirtualDeviceDeviceBackingInfo +} + +func init() { + t["VirtualUSBUSBBackingInfo"] = reflect.TypeOf((*VirtualUSBUSBBackingInfo)(nil)).Elem() +} + +type VirtualUSBUSBBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualUSBUSBBackingOption"] = reflect.TypeOf((*VirtualUSBUSBBackingOption)(nil)).Elem() +} + +type VirtualUSBXHCIController struct { + VirtualController + + AutoConnectDevices *bool `xml:"autoConnectDevices"` +} + +func init() { + t["VirtualUSBXHCIController"] = reflect.TypeOf((*VirtualUSBXHCIController)(nil)).Elem() +} + +type VirtualUSBXHCIControllerOption struct { + VirtualControllerOption + + AutoConnectDevices BoolOption `xml:"autoConnectDevices"` + SupportedSpeeds []string `xml:"supportedSpeeds"` +} + +func init() { + t["VirtualUSBXHCIControllerOption"] = reflect.TypeOf((*VirtualUSBXHCIControllerOption)(nil)).Elem() +} + +type VirtualVMIROMOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualVMIROMOption"] = reflect.TypeOf((*VirtualVMIROMOption)(nil)).Elem() +} + +type VirtualVideoCardOption struct { + VirtualDeviceOption + + VideoRamSizeInKB *LongOption `xml:"videoRamSizeInKB,omitempty"` + NumDisplays *IntOption `xml:"numDisplays,omitempty"` + UseAutoDetect *BoolOption `xml:"useAutoDetect,omitempty"` + Support3D *BoolOption `xml:"support3D,omitempty"` + Use3dRendererSupported *BoolOption `xml:"use3dRendererSupported,omitempty"` + GraphicsMemorySizeInKB *LongOption `xml:"graphicsMemorySizeInKB,omitempty"` + GraphicsMemorySizeSupported *BoolOption `xml:"graphicsMemorySizeSupported,omitempty"` +} + +func init() { + t["VirtualVideoCardOption"] = reflect.TypeOf((*VirtualVideoCardOption)(nil)).Elem() +} + +type VirtualVmxnet struct { + VirtualEthernetCard +} + +func init() { + t["VirtualVmxnet"] = reflect.TypeOf((*VirtualVmxnet)(nil)).Elem() +} + +type VirtualVmxnet2 struct { + VirtualVmxnet +} + +func init() { + t["VirtualVmxnet2"] = reflect.TypeOf((*VirtualVmxnet2)(nil)).Elem() +} + +type VirtualVmxnet2Option struct { + VirtualVmxnetOption +} + +func init() { + t["VirtualVmxnet2Option"] = reflect.TypeOf((*VirtualVmxnet2Option)(nil)).Elem() +} + +type VirtualVmxnet3 struct { + VirtualVmxnet +} + +func init() { + t["VirtualVmxnet3"] = reflect.TypeOf((*VirtualVmxnet3)(nil)).Elem() +} + +type VirtualVmxnet3Option struct { + VirtualVmxnetOption +} + +func init() { + t["VirtualVmxnet3Option"] = reflect.TypeOf((*VirtualVmxnet3Option)(nil)).Elem() +} + +type VirtualVmxnet3Vrdma struct { + VirtualVmxnet3 + + DeviceProtocol string `xml:"deviceProtocol,omitempty"` +} + +func init() { + t["VirtualVmxnet3Vrdma"] = reflect.TypeOf((*VirtualVmxnet3Vrdma)(nil)).Elem() +} + +type VirtualVmxnet3VrdmaOption struct { + VirtualVmxnet3Option + + DeviceProtocol *ChoiceOption `xml:"deviceProtocol,omitempty"` +} + +func init() { + t["VirtualVmxnet3VrdmaOption"] = reflect.TypeOf((*VirtualVmxnet3VrdmaOption)(nil)).Elem() +} + +type VirtualVmxnetOption struct { + VirtualEthernetCardOption +} + +func init() { + t["VirtualVmxnetOption"] = reflect.TypeOf((*VirtualVmxnetOption)(nil)).Elem() +} + +type VlanProfile struct { + ApplyProfile +} + +func init() { + t["VlanProfile"] = reflect.TypeOf((*VlanProfile)(nil)).Elem() +} + +type VmAcquiredMksTicketEvent struct { + VmEvent +} + +func init() { + t["VmAcquiredMksTicketEvent"] = reflect.TypeOf((*VmAcquiredMksTicketEvent)(nil)).Elem() +} + +type VmAcquiredTicketEvent struct { + VmEvent + + TicketType string `xml:"ticketType"` +} + +func init() { + t["VmAcquiredTicketEvent"] = reflect.TypeOf((*VmAcquiredTicketEvent)(nil)).Elem() +} + +type VmAlreadyExistsInDatacenter struct { + InvalidFolder + + Host ManagedObjectReference `xml:"host"` + Hostname string `xml:"hostname"` + Vm []ManagedObjectReference `xml:"vm"` +} + +func init() { + t["VmAlreadyExistsInDatacenter"] = reflect.TypeOf((*VmAlreadyExistsInDatacenter)(nil)).Elem() +} + +type VmAlreadyExistsInDatacenterFault VmAlreadyExistsInDatacenter + +func init() { + t["VmAlreadyExistsInDatacenterFault"] = reflect.TypeOf((*VmAlreadyExistsInDatacenterFault)(nil)).Elem() +} + +type VmAutoRenameEvent struct { + VmEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["VmAutoRenameEvent"] = reflect.TypeOf((*VmAutoRenameEvent)(nil)).Elem() +} + +type VmBeingClonedEvent struct { + VmCloneEvent + + DestFolder FolderEventArgument `xml:"destFolder"` + DestName string `xml:"destName"` + DestHost HostEventArgument `xml:"destHost"` +} + +func init() { + t["VmBeingClonedEvent"] = reflect.TypeOf((*VmBeingClonedEvent)(nil)).Elem() +} + +type VmBeingClonedNoFolderEvent struct { + VmCloneEvent + + DestName string `xml:"destName"` + DestHost HostEventArgument `xml:"destHost"` +} + +func init() { + t["VmBeingClonedNoFolderEvent"] = reflect.TypeOf((*VmBeingClonedNoFolderEvent)(nil)).Elem() +} + +type VmBeingCreatedEvent struct { + VmEvent + + ConfigSpec *VirtualMachineConfigSpec `xml:"configSpec,omitempty"` +} + +func init() { + t["VmBeingCreatedEvent"] = reflect.TypeOf((*VmBeingCreatedEvent)(nil)).Elem() +} + +type VmBeingDeployedEvent struct { + VmEvent + + SrcTemplate VmEventArgument `xml:"srcTemplate"` +} + +func init() { + t["VmBeingDeployedEvent"] = reflect.TypeOf((*VmBeingDeployedEvent)(nil)).Elem() +} + +type VmBeingHotMigratedEvent struct { + VmEvent + + DestHost HostEventArgument `xml:"destHost"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmBeingHotMigratedEvent"] = reflect.TypeOf((*VmBeingHotMigratedEvent)(nil)).Elem() +} + +type VmBeingMigratedEvent struct { + VmEvent + + DestHost HostEventArgument `xml:"destHost"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmBeingMigratedEvent"] = reflect.TypeOf((*VmBeingMigratedEvent)(nil)).Elem() +} + +type VmBeingRelocatedEvent struct { + VmRelocateSpecEvent + + DestHost HostEventArgument `xml:"destHost"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmBeingRelocatedEvent"] = reflect.TypeOf((*VmBeingRelocatedEvent)(nil)).Elem() +} + +type VmCloneEvent struct { + VmEvent +} + +func init() { + t["VmCloneEvent"] = reflect.TypeOf((*VmCloneEvent)(nil)).Elem() +} + +type VmCloneFailedEvent struct { + VmCloneEvent + + DestFolder FolderEventArgument `xml:"destFolder"` + DestName string `xml:"destName"` + DestHost HostEventArgument `xml:"destHost"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmCloneFailedEvent"] = reflect.TypeOf((*VmCloneFailedEvent)(nil)).Elem() +} + +type VmClonedEvent struct { + VmCloneEvent + + SourceVm VmEventArgument `xml:"sourceVm"` +} + +func init() { + t["VmClonedEvent"] = reflect.TypeOf((*VmClonedEvent)(nil)).Elem() +} + +type VmConfigFault struct { + VimFault +} + +func init() { + t["VmConfigFault"] = reflect.TypeOf((*VmConfigFault)(nil)).Elem() +} + +type VmConfigFaultFault BaseVmConfigFault + +func init() { + t["VmConfigFaultFault"] = reflect.TypeOf((*VmConfigFaultFault)(nil)).Elem() +} + +type VmConfigFileEncryptionInfo struct { + DynamicData + + KeyId *CryptoKeyId `xml:"keyId,omitempty"` +} + +func init() { + t["VmConfigFileEncryptionInfo"] = reflect.TypeOf((*VmConfigFileEncryptionInfo)(nil)).Elem() +} + +type VmConfigFileInfo struct { + FileInfo + + ConfigVersion int32 `xml:"configVersion,omitempty"` + Encryption *VmConfigFileEncryptionInfo `xml:"encryption,omitempty"` +} + +func init() { + t["VmConfigFileInfo"] = reflect.TypeOf((*VmConfigFileInfo)(nil)).Elem() +} + +type VmConfigFileQuery struct { + FileQuery + + Filter *VmConfigFileQueryFilter `xml:"filter,omitempty"` + Details *VmConfigFileQueryFlags `xml:"details,omitempty"` +} + +func init() { + t["VmConfigFileQuery"] = reflect.TypeOf((*VmConfigFileQuery)(nil)).Elem() +} + +type VmConfigFileQueryFilter struct { + DynamicData + + MatchConfigVersion []int32 `xml:"matchConfigVersion,omitempty"` + Encrypted *bool `xml:"encrypted"` +} + +func init() { + t["VmConfigFileQueryFilter"] = reflect.TypeOf((*VmConfigFileQueryFilter)(nil)).Elem() +} + +type VmConfigFileQueryFlags struct { + DynamicData + + ConfigVersion bool `xml:"configVersion"` + Encryption *bool `xml:"encryption"` +} + +func init() { + t["VmConfigFileQueryFlags"] = reflect.TypeOf((*VmConfigFileQueryFlags)(nil)).Elem() +} + +type VmConfigIncompatibleForFaultTolerance struct { + VmConfigFault + + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["VmConfigIncompatibleForFaultTolerance"] = reflect.TypeOf((*VmConfigIncompatibleForFaultTolerance)(nil)).Elem() +} + +type VmConfigIncompatibleForFaultToleranceFault VmConfigIncompatibleForFaultTolerance + +func init() { + t["VmConfigIncompatibleForFaultToleranceFault"] = reflect.TypeOf((*VmConfigIncompatibleForFaultToleranceFault)(nil)).Elem() +} + +type VmConfigIncompatibleForRecordReplay struct { + VmConfigFault + + Fault *LocalizedMethodFault `xml:"fault,omitempty"` +} + +func init() { + t["VmConfigIncompatibleForRecordReplay"] = reflect.TypeOf((*VmConfigIncompatibleForRecordReplay)(nil)).Elem() +} + +type VmConfigIncompatibleForRecordReplayFault VmConfigIncompatibleForRecordReplay + +func init() { + t["VmConfigIncompatibleForRecordReplayFault"] = reflect.TypeOf((*VmConfigIncompatibleForRecordReplayFault)(nil)).Elem() +} + +type VmConfigInfo struct { + DynamicData + + Product []VAppProductInfo `xml:"product,omitempty"` + Property []VAppPropertyInfo `xml:"property,omitempty"` + IpAssignment VAppIPAssignmentInfo `xml:"ipAssignment"` + Eula []string `xml:"eula,omitempty"` + OvfSection []VAppOvfSectionInfo `xml:"ovfSection,omitempty"` + OvfEnvironmentTransport []string `xml:"ovfEnvironmentTransport,omitempty"` + InstallBootRequired bool `xml:"installBootRequired"` + InstallBootStopDelay int32 `xml:"installBootStopDelay"` +} + +func init() { + t["VmConfigInfo"] = reflect.TypeOf((*VmConfigInfo)(nil)).Elem() +} + +type VmConfigMissingEvent struct { + VmEvent +} + +func init() { + t["VmConfigMissingEvent"] = reflect.TypeOf((*VmConfigMissingEvent)(nil)).Elem() +} + +type VmConfigSpec struct { + DynamicData + + Product []VAppProductSpec `xml:"product,omitempty"` + Property []VAppPropertySpec `xml:"property,omitempty"` + IpAssignment *VAppIPAssignmentInfo `xml:"ipAssignment,omitempty"` + Eula []string `xml:"eula,omitempty"` + OvfSection []VAppOvfSectionSpec `xml:"ovfSection,omitempty"` + OvfEnvironmentTransport []string `xml:"ovfEnvironmentTransport,omitempty"` + InstallBootRequired *bool `xml:"installBootRequired"` + InstallBootStopDelay int32 `xml:"installBootStopDelay,omitempty"` +} + +func init() { + t["VmConfigSpec"] = reflect.TypeOf((*VmConfigSpec)(nil)).Elem() +} + +type VmConnectedEvent struct { + VmEvent +} + +func init() { + t["VmConnectedEvent"] = reflect.TypeOf((*VmConnectedEvent)(nil)).Elem() +} + +type VmCreatedEvent struct { + VmEvent +} + +func init() { + t["VmCreatedEvent"] = reflect.TypeOf((*VmCreatedEvent)(nil)).Elem() +} + +type VmDasBeingResetEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VmDasBeingResetEvent"] = reflect.TypeOf((*VmDasBeingResetEvent)(nil)).Elem() +} + +type VmDasBeingResetWithScreenshotEvent struct { + VmDasBeingResetEvent + + ScreenshotFilePath string `xml:"screenshotFilePath"` +} + +func init() { + t["VmDasBeingResetWithScreenshotEvent"] = reflect.TypeOf((*VmDasBeingResetWithScreenshotEvent)(nil)).Elem() +} + +type VmDasResetFailedEvent struct { + VmEvent +} + +func init() { + t["VmDasResetFailedEvent"] = reflect.TypeOf((*VmDasResetFailedEvent)(nil)).Elem() +} + +type VmDasUpdateErrorEvent struct { + VmEvent +} + +func init() { + t["VmDasUpdateErrorEvent"] = reflect.TypeOf((*VmDasUpdateErrorEvent)(nil)).Elem() +} + +type VmDasUpdateOkEvent struct { + VmEvent +} + +func init() { + t["VmDasUpdateOkEvent"] = reflect.TypeOf((*VmDasUpdateOkEvent)(nil)).Elem() +} + +type VmDateRolledBackEvent struct { + VmEvent +} + +func init() { + t["VmDateRolledBackEvent"] = reflect.TypeOf((*VmDateRolledBackEvent)(nil)).Elem() +} + +type VmDeployFailedEvent struct { + VmEvent + + DestDatastore BaseEntityEventArgument `xml:"destDatastore,typeattr"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmDeployFailedEvent"] = reflect.TypeOf((*VmDeployFailedEvent)(nil)).Elem() +} + +type VmDeployedEvent struct { + VmEvent + + SrcTemplate VmEventArgument `xml:"srcTemplate"` +} + +func init() { + t["VmDeployedEvent"] = reflect.TypeOf((*VmDeployedEvent)(nil)).Elem() +} + +type VmDisconnectedEvent struct { + VmEvent +} + +func init() { + t["VmDisconnectedEvent"] = reflect.TypeOf((*VmDisconnectedEvent)(nil)).Elem() +} + +type VmDiscoveredEvent struct { + VmEvent +} + +func init() { + t["VmDiscoveredEvent"] = reflect.TypeOf((*VmDiscoveredEvent)(nil)).Elem() +} + +type VmDiskFailedEvent struct { + VmEvent + + Disk string `xml:"disk"` + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmDiskFailedEvent"] = reflect.TypeOf((*VmDiskFailedEvent)(nil)).Elem() +} + +type VmDiskFileEncryptionInfo struct { + DynamicData + + KeyId *CryptoKeyId `xml:"keyId,omitempty"` +} + +func init() { + t["VmDiskFileEncryptionInfo"] = reflect.TypeOf((*VmDiskFileEncryptionInfo)(nil)).Elem() +} + +type VmDiskFileInfo struct { + FileInfo + + DiskType string `xml:"diskType,omitempty"` + CapacityKb int64 `xml:"capacityKb,omitempty"` + HardwareVersion int32 `xml:"hardwareVersion,omitempty"` + ControllerType string `xml:"controllerType,omitempty"` + DiskExtents []string `xml:"diskExtents,omitempty"` + Thin *bool `xml:"thin"` + Encryption *VmDiskFileEncryptionInfo `xml:"encryption,omitempty"` +} + +func init() { + t["VmDiskFileInfo"] = reflect.TypeOf((*VmDiskFileInfo)(nil)).Elem() +} + +type VmDiskFileQuery struct { + FileQuery + + Filter *VmDiskFileQueryFilter `xml:"filter,omitempty"` + Details *VmDiskFileQueryFlags `xml:"details,omitempty"` +} + +func init() { + t["VmDiskFileQuery"] = reflect.TypeOf((*VmDiskFileQuery)(nil)).Elem() +} + +type VmDiskFileQueryFilter struct { + DynamicData + + DiskType []string `xml:"diskType,omitempty"` + MatchHardwareVersion []int32 `xml:"matchHardwareVersion,omitempty"` + ControllerType []string `xml:"controllerType,omitempty"` + Thin *bool `xml:"thin"` + Encrypted *bool `xml:"encrypted"` +} + +func init() { + t["VmDiskFileQueryFilter"] = reflect.TypeOf((*VmDiskFileQueryFilter)(nil)).Elem() +} + +type VmDiskFileQueryFlags struct { + DynamicData + + DiskType bool `xml:"diskType"` + CapacityKb bool `xml:"capacityKb"` + HardwareVersion bool `xml:"hardwareVersion"` + ControllerType *bool `xml:"controllerType"` + DiskExtents *bool `xml:"diskExtents"` + Thin *bool `xml:"thin"` + Encryption *bool `xml:"encryption"` +} + +func init() { + t["VmDiskFileQueryFlags"] = reflect.TypeOf((*VmDiskFileQueryFlags)(nil)).Elem() +} + +type VmEmigratingEvent struct { + VmEvent +} + +func init() { + t["VmEmigratingEvent"] = reflect.TypeOf((*VmEmigratingEvent)(nil)).Elem() +} + +type VmEndRecordingEvent struct { + VmEvent +} + +func init() { + t["VmEndRecordingEvent"] = reflect.TypeOf((*VmEndRecordingEvent)(nil)).Elem() +} + +type VmEndReplayingEvent struct { + VmEvent +} + +func init() { + t["VmEndReplayingEvent"] = reflect.TypeOf((*VmEndReplayingEvent)(nil)).Elem() +} + +type VmEvent struct { + Event + + Template bool `xml:"template"` +} + +func init() { + t["VmEvent"] = reflect.TypeOf((*VmEvent)(nil)).Elem() +} + +type VmEventArgument struct { + EntityEventArgument + + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["VmEventArgument"] = reflect.TypeOf((*VmEventArgument)(nil)).Elem() +} + +type VmFailedMigrateEvent struct { + VmEvent + + DestHost HostEventArgument `xml:"destHost"` + Reason LocalizedMethodFault `xml:"reason"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmFailedMigrateEvent"] = reflect.TypeOf((*VmFailedMigrateEvent)(nil)).Elem() +} + +type VmFailedRelayoutEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedRelayoutEvent"] = reflect.TypeOf((*VmFailedRelayoutEvent)(nil)).Elem() +} + +type VmFailedRelayoutOnVmfs2DatastoreEvent struct { + VmEvent +} + +func init() { + t["VmFailedRelayoutOnVmfs2DatastoreEvent"] = reflect.TypeOf((*VmFailedRelayoutOnVmfs2DatastoreEvent)(nil)).Elem() +} + +type VmFailedStartingSecondaryEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VmFailedStartingSecondaryEvent"] = reflect.TypeOf((*VmFailedStartingSecondaryEvent)(nil)).Elem() +} + +type VmFailedToPowerOffEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToPowerOffEvent"] = reflect.TypeOf((*VmFailedToPowerOffEvent)(nil)).Elem() +} + +type VmFailedToPowerOnEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToPowerOnEvent"] = reflect.TypeOf((*VmFailedToPowerOnEvent)(nil)).Elem() +} + +type VmFailedToRebootGuestEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToRebootGuestEvent"] = reflect.TypeOf((*VmFailedToRebootGuestEvent)(nil)).Elem() +} + +type VmFailedToResetEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToResetEvent"] = reflect.TypeOf((*VmFailedToResetEvent)(nil)).Elem() +} + +type VmFailedToShutdownGuestEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToShutdownGuestEvent"] = reflect.TypeOf((*VmFailedToShutdownGuestEvent)(nil)).Elem() +} + +type VmFailedToStandbyGuestEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToStandbyGuestEvent"] = reflect.TypeOf((*VmFailedToStandbyGuestEvent)(nil)).Elem() +} + +type VmFailedToSuspendEvent struct { + VmEvent + + Reason LocalizedMethodFault `xml:"reason"` +} + +func init() { + t["VmFailedToSuspendEvent"] = reflect.TypeOf((*VmFailedToSuspendEvent)(nil)).Elem() +} + +type VmFailedUpdatingSecondaryConfig struct { + VmEvent +} + +func init() { + t["VmFailedUpdatingSecondaryConfig"] = reflect.TypeOf((*VmFailedUpdatingSecondaryConfig)(nil)).Elem() +} + +type VmFailoverFailed struct { + VmEvent + + Reason *LocalizedMethodFault `xml:"reason,omitempty"` +} + +func init() { + t["VmFailoverFailed"] = reflect.TypeOf((*VmFailoverFailed)(nil)).Elem() +} + +type VmFaultToleranceConfigIssue struct { + VmFaultToleranceIssue + + Reason string `xml:"reason,omitempty"` + EntityName string `xml:"entityName,omitempty"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["VmFaultToleranceConfigIssue"] = reflect.TypeOf((*VmFaultToleranceConfigIssue)(nil)).Elem() +} + +type VmFaultToleranceConfigIssueFault VmFaultToleranceConfigIssue + +func init() { + t["VmFaultToleranceConfigIssueFault"] = reflect.TypeOf((*VmFaultToleranceConfigIssueFault)(nil)).Elem() +} + +type VmFaultToleranceConfigIssueWrapper struct { + VmFaultToleranceIssue + + EntityName string `xml:"entityName,omitempty"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + Error *LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["VmFaultToleranceConfigIssueWrapper"] = reflect.TypeOf((*VmFaultToleranceConfigIssueWrapper)(nil)).Elem() +} + +type VmFaultToleranceConfigIssueWrapperFault VmFaultToleranceConfigIssueWrapper + +func init() { + t["VmFaultToleranceConfigIssueWrapperFault"] = reflect.TypeOf((*VmFaultToleranceConfigIssueWrapperFault)(nil)).Elem() +} + +type VmFaultToleranceInvalidFileBacking struct { + VmFaultToleranceIssue + + BackingType string `xml:"backingType,omitempty"` + BackingFilename string `xml:"backingFilename,omitempty"` +} + +func init() { + t["VmFaultToleranceInvalidFileBacking"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBacking)(nil)).Elem() +} + +type VmFaultToleranceInvalidFileBackingFault VmFaultToleranceInvalidFileBacking + +func init() { + t["VmFaultToleranceInvalidFileBackingFault"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBackingFault)(nil)).Elem() +} + +type VmFaultToleranceIssue struct { + VimFault +} + +func init() { + t["VmFaultToleranceIssue"] = reflect.TypeOf((*VmFaultToleranceIssue)(nil)).Elem() +} + +type VmFaultToleranceIssueFault BaseVmFaultToleranceIssue + +func init() { + t["VmFaultToleranceIssueFault"] = reflect.TypeOf((*VmFaultToleranceIssueFault)(nil)).Elem() +} + +type VmFaultToleranceOpIssuesList struct { + VmFaultToleranceIssue + + Errors []LocalizedMethodFault `xml:"errors,omitempty"` + Warnings []LocalizedMethodFault `xml:"warnings,omitempty"` +} + +func init() { + t["VmFaultToleranceOpIssuesList"] = reflect.TypeOf((*VmFaultToleranceOpIssuesList)(nil)).Elem() +} + +type VmFaultToleranceOpIssuesListFault VmFaultToleranceOpIssuesList + +func init() { + t["VmFaultToleranceOpIssuesListFault"] = reflect.TypeOf((*VmFaultToleranceOpIssuesListFault)(nil)).Elem() +} + +type VmFaultToleranceStateChangedEvent struct { + VmEvent + + OldState VirtualMachineFaultToleranceState `xml:"oldState"` + NewState VirtualMachineFaultToleranceState `xml:"newState"` +} + +func init() { + t["VmFaultToleranceStateChangedEvent"] = reflect.TypeOf((*VmFaultToleranceStateChangedEvent)(nil)).Elem() +} + +type VmFaultToleranceTooManyFtVcpusOnHost struct { + InsufficientResourcesFault + + HostName string `xml:"hostName,omitempty"` + MaxNumFtVcpus int32 `xml:"maxNumFtVcpus"` +} + +func init() { + t["VmFaultToleranceTooManyFtVcpusOnHost"] = reflect.TypeOf((*VmFaultToleranceTooManyFtVcpusOnHost)(nil)).Elem() +} + +type VmFaultToleranceTooManyFtVcpusOnHostFault VmFaultToleranceTooManyFtVcpusOnHost + +func init() { + t["VmFaultToleranceTooManyFtVcpusOnHostFault"] = reflect.TypeOf((*VmFaultToleranceTooManyFtVcpusOnHostFault)(nil)).Elem() +} + +type VmFaultToleranceTooManyVMsOnHost struct { + InsufficientResourcesFault + + HostName string `xml:"hostName,omitempty"` + MaxNumFtVms int32 `xml:"maxNumFtVms"` +} + +func init() { + t["VmFaultToleranceTooManyVMsOnHost"] = reflect.TypeOf((*VmFaultToleranceTooManyVMsOnHost)(nil)).Elem() +} + +type VmFaultToleranceTooManyVMsOnHostFault VmFaultToleranceTooManyVMsOnHost + +func init() { + t["VmFaultToleranceTooManyVMsOnHostFault"] = reflect.TypeOf((*VmFaultToleranceTooManyVMsOnHostFault)(nil)).Elem() +} + +type VmFaultToleranceTurnedOffEvent struct { + VmEvent +} + +func init() { + t["VmFaultToleranceTurnedOffEvent"] = reflect.TypeOf((*VmFaultToleranceTurnedOffEvent)(nil)).Elem() +} + +type VmFaultToleranceVmTerminatedEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VmFaultToleranceVmTerminatedEvent"] = reflect.TypeOf((*VmFaultToleranceVmTerminatedEvent)(nil)).Elem() +} + +type VmGuestOSCrashedEvent struct { + VmEvent +} + +func init() { + t["VmGuestOSCrashedEvent"] = reflect.TypeOf((*VmGuestOSCrashedEvent)(nil)).Elem() +} + +type VmGuestRebootEvent struct { + VmEvent +} + +func init() { + t["VmGuestRebootEvent"] = reflect.TypeOf((*VmGuestRebootEvent)(nil)).Elem() +} + +type VmGuestShutdownEvent struct { + VmEvent +} + +func init() { + t["VmGuestShutdownEvent"] = reflect.TypeOf((*VmGuestShutdownEvent)(nil)).Elem() +} + +type VmGuestStandbyEvent struct { + VmEvent +} + +func init() { + t["VmGuestStandbyEvent"] = reflect.TypeOf((*VmGuestStandbyEvent)(nil)).Elem() +} + +type VmHealthMonitoringStateChangedEvent struct { + ClusterEvent + + State string `xml:"state"` + PrevState string `xml:"prevState,omitempty"` +} + +func init() { + t["VmHealthMonitoringStateChangedEvent"] = reflect.TypeOf((*VmHealthMonitoringStateChangedEvent)(nil)).Elem() +} + +type VmHostAffinityRuleViolation struct { + VmConfigFault + + VmName string `xml:"vmName"` + HostName string `xml:"hostName"` +} + +func init() { + t["VmHostAffinityRuleViolation"] = reflect.TypeOf((*VmHostAffinityRuleViolation)(nil)).Elem() +} + +type VmHostAffinityRuleViolationFault VmHostAffinityRuleViolation + +func init() { + t["VmHostAffinityRuleViolationFault"] = reflect.TypeOf((*VmHostAffinityRuleViolationFault)(nil)).Elem() +} + +type VmInstanceUuidAssignedEvent struct { + VmEvent + + InstanceUuid string `xml:"instanceUuid"` +} + +func init() { + t["VmInstanceUuidAssignedEvent"] = reflect.TypeOf((*VmInstanceUuidAssignedEvent)(nil)).Elem() +} + +type VmInstanceUuidChangedEvent struct { + VmEvent + + OldInstanceUuid string `xml:"oldInstanceUuid"` + NewInstanceUuid string `xml:"newInstanceUuid"` +} + +func init() { + t["VmInstanceUuidChangedEvent"] = reflect.TypeOf((*VmInstanceUuidChangedEvent)(nil)).Elem() +} + +type VmInstanceUuidConflictEvent struct { + VmEvent + + ConflictedVm VmEventArgument `xml:"conflictedVm"` + InstanceUuid string `xml:"instanceUuid"` +} + +func init() { + t["VmInstanceUuidConflictEvent"] = reflect.TypeOf((*VmInstanceUuidConflictEvent)(nil)).Elem() +} + +type VmLimitLicense struct { + NotEnoughLicenses + + Limit int32 `xml:"limit"` +} + +func init() { + t["VmLimitLicense"] = reflect.TypeOf((*VmLimitLicense)(nil)).Elem() +} + +type VmLimitLicenseFault VmLimitLicense + +func init() { + t["VmLimitLicenseFault"] = reflect.TypeOf((*VmLimitLicenseFault)(nil)).Elem() +} + +type VmLogFileInfo struct { + FileInfo +} + +func init() { + t["VmLogFileInfo"] = reflect.TypeOf((*VmLogFileInfo)(nil)).Elem() +} + +type VmLogFileQuery struct { + FileQuery +} + +func init() { + t["VmLogFileQuery"] = reflect.TypeOf((*VmLogFileQuery)(nil)).Elem() +} + +type VmMacAssignedEvent struct { + VmEvent + + Adapter string `xml:"adapter"` + Mac string `xml:"mac"` +} + +func init() { + t["VmMacAssignedEvent"] = reflect.TypeOf((*VmMacAssignedEvent)(nil)).Elem() +} + +type VmMacChangedEvent struct { + VmEvent + + Adapter string `xml:"adapter"` + OldMac string `xml:"oldMac"` + NewMac string `xml:"newMac"` +} + +func init() { + t["VmMacChangedEvent"] = reflect.TypeOf((*VmMacChangedEvent)(nil)).Elem() +} + +type VmMacConflictEvent struct { + VmEvent + + ConflictedVm VmEventArgument `xml:"conflictedVm"` + Mac string `xml:"mac"` +} + +func init() { + t["VmMacConflictEvent"] = reflect.TypeOf((*VmMacConflictEvent)(nil)).Elem() +} + +type VmMaxFTRestartCountReached struct { + VmEvent +} + +func init() { + t["VmMaxFTRestartCountReached"] = reflect.TypeOf((*VmMaxFTRestartCountReached)(nil)).Elem() +} + +type VmMaxRestartCountReached struct { + VmEvent +} + +func init() { + t["VmMaxRestartCountReached"] = reflect.TypeOf((*VmMaxRestartCountReached)(nil)).Elem() +} + +type VmMessageErrorEvent struct { + VmEvent + + Message string `xml:"message"` + MessageInfo []VirtualMachineMessage `xml:"messageInfo,omitempty"` +} + +func init() { + t["VmMessageErrorEvent"] = reflect.TypeOf((*VmMessageErrorEvent)(nil)).Elem() +} + +type VmMessageEvent struct { + VmEvent + + Message string `xml:"message"` + MessageInfo []VirtualMachineMessage `xml:"messageInfo,omitempty"` +} + +func init() { + t["VmMessageEvent"] = reflect.TypeOf((*VmMessageEvent)(nil)).Elem() +} + +type VmMessageWarningEvent struct { + VmEvent + + Message string `xml:"message"` + MessageInfo []VirtualMachineMessage `xml:"messageInfo,omitempty"` +} + +func init() { + t["VmMessageWarningEvent"] = reflect.TypeOf((*VmMessageWarningEvent)(nil)).Elem() +} + +type VmMetadataManagerFault struct { + VimFault +} + +func init() { + t["VmMetadataManagerFault"] = reflect.TypeOf((*VmMetadataManagerFault)(nil)).Elem() +} + +type VmMetadataManagerFaultFault VmMetadataManagerFault + +func init() { + t["VmMetadataManagerFaultFault"] = reflect.TypeOf((*VmMetadataManagerFaultFault)(nil)).Elem() +} + +type VmMigratedEvent struct { + VmEvent + + SourceHost HostEventArgument `xml:"sourceHost"` + SourceDatacenter *DatacenterEventArgument `xml:"sourceDatacenter,omitempty"` + SourceDatastore *DatastoreEventArgument `xml:"sourceDatastore,omitempty"` +} + +func init() { + t["VmMigratedEvent"] = reflect.TypeOf((*VmMigratedEvent)(nil)).Elem() +} + +type VmMonitorIncompatibleForFaultTolerance struct { + VimFault +} + +func init() { + t["VmMonitorIncompatibleForFaultTolerance"] = reflect.TypeOf((*VmMonitorIncompatibleForFaultTolerance)(nil)).Elem() +} + +type VmMonitorIncompatibleForFaultToleranceFault VmMonitorIncompatibleForFaultTolerance + +func init() { + t["VmMonitorIncompatibleForFaultToleranceFault"] = reflect.TypeOf((*VmMonitorIncompatibleForFaultToleranceFault)(nil)).Elem() +} + +type VmNoCompatibleHostForSecondaryEvent struct { + VmEvent +} + +func init() { + t["VmNoCompatibleHostForSecondaryEvent"] = reflect.TypeOf((*VmNoCompatibleHostForSecondaryEvent)(nil)).Elem() +} + +type VmNoNetworkAccessEvent struct { + VmEvent + + DestHost HostEventArgument `xml:"destHost"` +} + +func init() { + t["VmNoNetworkAccessEvent"] = reflect.TypeOf((*VmNoNetworkAccessEvent)(nil)).Elem() +} + +type VmNvramFileInfo struct { + FileInfo +} + +func init() { + t["VmNvramFileInfo"] = reflect.TypeOf((*VmNvramFileInfo)(nil)).Elem() +} + +type VmNvramFileQuery struct { + FileQuery +} + +func init() { + t["VmNvramFileQuery"] = reflect.TypeOf((*VmNvramFileQuery)(nil)).Elem() +} + +type VmOrphanedEvent struct { + VmEvent +} + +func init() { + t["VmOrphanedEvent"] = reflect.TypeOf((*VmOrphanedEvent)(nil)).Elem() +} + +type VmPodConfigForPlacement struct { + DynamicData + + StoragePod ManagedObjectReference `xml:"storagePod"` + Disk []PodDiskLocator `xml:"disk,omitempty"` + VmConfig *StorageDrsVmConfigInfo `xml:"vmConfig,omitempty"` + InterVmRule []BaseClusterRuleInfo `xml:"interVmRule,omitempty,typeattr"` +} + +func init() { + t["VmPodConfigForPlacement"] = reflect.TypeOf((*VmPodConfigForPlacement)(nil)).Elem() +} + +type VmPortGroupProfile struct { + PortGroupProfile +} + +func init() { + t["VmPortGroupProfile"] = reflect.TypeOf((*VmPortGroupProfile)(nil)).Elem() +} + +type VmPowerOffOnIsolationEvent struct { + VmPoweredOffEvent + + IsolatedHost HostEventArgument `xml:"isolatedHost"` +} + +func init() { + t["VmPowerOffOnIsolationEvent"] = reflect.TypeOf((*VmPowerOffOnIsolationEvent)(nil)).Elem() +} + +type VmPowerOnDisabled struct { + InvalidState +} + +func init() { + t["VmPowerOnDisabled"] = reflect.TypeOf((*VmPowerOnDisabled)(nil)).Elem() +} + +type VmPowerOnDisabledFault VmPowerOnDisabled + +func init() { + t["VmPowerOnDisabledFault"] = reflect.TypeOf((*VmPowerOnDisabledFault)(nil)).Elem() +} + +type VmPoweredOffEvent struct { + VmEvent +} + +func init() { + t["VmPoweredOffEvent"] = reflect.TypeOf((*VmPoweredOffEvent)(nil)).Elem() +} + +type VmPoweredOnEvent struct { + VmEvent +} + +func init() { + t["VmPoweredOnEvent"] = reflect.TypeOf((*VmPoweredOnEvent)(nil)).Elem() +} + +type VmPoweringOnWithCustomizedDVPortEvent struct { + VmEvent + + Vnic []VnicPortArgument `xml:"vnic"` +} + +func init() { + t["VmPoweringOnWithCustomizedDVPortEvent"] = reflect.TypeOf((*VmPoweringOnWithCustomizedDVPortEvent)(nil)).Elem() +} + +type VmPrimaryFailoverEvent struct { + VmEvent + + Reason string `xml:"reason,omitempty"` +} + +func init() { + t["VmPrimaryFailoverEvent"] = reflect.TypeOf((*VmPrimaryFailoverEvent)(nil)).Elem() +} + +type VmReconfiguredEvent struct { + VmEvent + + ConfigSpec VirtualMachineConfigSpec `xml:"configSpec"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty"` +} + +func init() { + t["VmReconfiguredEvent"] = reflect.TypeOf((*VmReconfiguredEvent)(nil)).Elem() +} + +type VmRegisteredEvent struct { + VmEvent +} + +func init() { + t["VmRegisteredEvent"] = reflect.TypeOf((*VmRegisteredEvent)(nil)).Elem() +} + +type VmRelayoutSuccessfulEvent struct { + VmEvent +} + +func init() { + t["VmRelayoutSuccessfulEvent"] = reflect.TypeOf((*VmRelayoutSuccessfulEvent)(nil)).Elem() +} + +type VmRelayoutUpToDateEvent struct { + VmEvent +} + +func init() { + t["VmRelayoutUpToDateEvent"] = reflect.TypeOf((*VmRelayoutUpToDateEvent)(nil)).Elem() +} + +type VmReloadFromPathEvent struct { + VmEvent + + ConfigPath string `xml:"configPath"` +} + +func init() { + t["VmReloadFromPathEvent"] = reflect.TypeOf((*VmReloadFromPathEvent)(nil)).Elem() +} + +type VmReloadFromPathFailedEvent struct { + VmEvent + + ConfigPath string `xml:"configPath"` +} + +func init() { + t["VmReloadFromPathFailedEvent"] = reflect.TypeOf((*VmReloadFromPathFailedEvent)(nil)).Elem() +} + +type VmRelocateFailedEvent struct { + VmRelocateSpecEvent + + DestHost HostEventArgument `xml:"destHost"` + Reason LocalizedMethodFault `xml:"reason"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty"` +} + +func init() { + t["VmRelocateFailedEvent"] = reflect.TypeOf((*VmRelocateFailedEvent)(nil)).Elem() +} + +type VmRelocateSpecEvent struct { + VmEvent +} + +func init() { + t["VmRelocateSpecEvent"] = reflect.TypeOf((*VmRelocateSpecEvent)(nil)).Elem() +} + +type VmRelocatedEvent struct { + VmRelocateSpecEvent + + SourceHost HostEventArgument `xml:"sourceHost"` + SourceDatacenter *DatacenterEventArgument `xml:"sourceDatacenter,omitempty"` + SourceDatastore *DatastoreEventArgument `xml:"sourceDatastore,omitempty"` +} + +func init() { + t["VmRelocatedEvent"] = reflect.TypeOf((*VmRelocatedEvent)(nil)).Elem() +} + +type VmRemoteConsoleConnectedEvent struct { + VmEvent +} + +func init() { + t["VmRemoteConsoleConnectedEvent"] = reflect.TypeOf((*VmRemoteConsoleConnectedEvent)(nil)).Elem() +} + +type VmRemoteConsoleDisconnectedEvent struct { + VmEvent +} + +func init() { + t["VmRemoteConsoleDisconnectedEvent"] = reflect.TypeOf((*VmRemoteConsoleDisconnectedEvent)(nil)).Elem() +} + +type VmRemovedEvent struct { + VmEvent +} + +func init() { + t["VmRemovedEvent"] = reflect.TypeOf((*VmRemovedEvent)(nil)).Elem() +} + +type VmRenamedEvent struct { + VmEvent + + OldName string `xml:"oldName"` + NewName string `xml:"newName"` +} + +func init() { + t["VmRenamedEvent"] = reflect.TypeOf((*VmRenamedEvent)(nil)).Elem() +} + +type VmRequirementsExceedCurrentEVCModeEvent struct { + VmEvent +} + +func init() { + t["VmRequirementsExceedCurrentEVCModeEvent"] = reflect.TypeOf((*VmRequirementsExceedCurrentEVCModeEvent)(nil)).Elem() +} + +type VmResettingEvent struct { + VmEvent +} + +func init() { + t["VmResettingEvent"] = reflect.TypeOf((*VmResettingEvent)(nil)).Elem() +} + +type VmResourcePoolMovedEvent struct { + VmEvent + + OldParent ResourcePoolEventArgument `xml:"oldParent"` + NewParent ResourcePoolEventArgument `xml:"newParent"` +} + +func init() { + t["VmResourcePoolMovedEvent"] = reflect.TypeOf((*VmResourcePoolMovedEvent)(nil)).Elem() +} + +type VmResourceReallocatedEvent struct { + VmEvent + + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty"` +} + +func init() { + t["VmResourceReallocatedEvent"] = reflect.TypeOf((*VmResourceReallocatedEvent)(nil)).Elem() +} + +type VmRestartedOnAlternateHostEvent struct { + VmPoweredOnEvent + + SourceHost HostEventArgument `xml:"sourceHost"` +} + +func init() { + t["VmRestartedOnAlternateHostEvent"] = reflect.TypeOf((*VmRestartedOnAlternateHostEvent)(nil)).Elem() +} + +type VmResumingEvent struct { + VmEvent +} + +func init() { + t["VmResumingEvent"] = reflect.TypeOf((*VmResumingEvent)(nil)).Elem() +} + +type VmSecondaryAddedEvent struct { + VmEvent +} + +func init() { + t["VmSecondaryAddedEvent"] = reflect.TypeOf((*VmSecondaryAddedEvent)(nil)).Elem() +} + +type VmSecondaryDisabledBySystemEvent struct { + VmEvent + + Reason *LocalizedMethodFault `xml:"reason,omitempty"` +} + +func init() { + t["VmSecondaryDisabledBySystemEvent"] = reflect.TypeOf((*VmSecondaryDisabledBySystemEvent)(nil)).Elem() +} + +type VmSecondaryDisabledEvent struct { + VmEvent +} + +func init() { + t["VmSecondaryDisabledEvent"] = reflect.TypeOf((*VmSecondaryDisabledEvent)(nil)).Elem() +} + +type VmSecondaryEnabledEvent struct { + VmEvent +} + +func init() { + t["VmSecondaryEnabledEvent"] = reflect.TypeOf((*VmSecondaryEnabledEvent)(nil)).Elem() +} + +type VmSecondaryStartedEvent struct { + VmEvent +} + +func init() { + t["VmSecondaryStartedEvent"] = reflect.TypeOf((*VmSecondaryStartedEvent)(nil)).Elem() +} + +type VmShutdownOnIsolationEvent struct { + VmPoweredOffEvent + + IsolatedHost HostEventArgument `xml:"isolatedHost"` + ShutdownResult string `xml:"shutdownResult,omitempty"` +} + +func init() { + t["VmShutdownOnIsolationEvent"] = reflect.TypeOf((*VmShutdownOnIsolationEvent)(nil)).Elem() +} + +type VmSmpFaultToleranceTooManyVMsOnHost struct { + InsufficientResourcesFault + + HostName string `xml:"hostName,omitempty"` + MaxNumSmpFtVms int32 `xml:"maxNumSmpFtVms"` +} + +func init() { + t["VmSmpFaultToleranceTooManyVMsOnHost"] = reflect.TypeOf((*VmSmpFaultToleranceTooManyVMsOnHost)(nil)).Elem() +} + +type VmSmpFaultToleranceTooManyVMsOnHostFault VmSmpFaultToleranceTooManyVMsOnHost + +func init() { + t["VmSmpFaultToleranceTooManyVMsOnHostFault"] = reflect.TypeOf((*VmSmpFaultToleranceTooManyVMsOnHostFault)(nil)).Elem() +} + +type VmSnapshotFileInfo struct { + FileInfo +} + +func init() { + t["VmSnapshotFileInfo"] = reflect.TypeOf((*VmSnapshotFileInfo)(nil)).Elem() +} + +type VmSnapshotFileQuery struct { + FileQuery +} + +func init() { + t["VmSnapshotFileQuery"] = reflect.TypeOf((*VmSnapshotFileQuery)(nil)).Elem() +} + +type VmStartRecordingEvent struct { + VmEvent +} + +func init() { + t["VmStartRecordingEvent"] = reflect.TypeOf((*VmStartRecordingEvent)(nil)).Elem() +} + +type VmStartReplayingEvent struct { + VmEvent +} + +func init() { + t["VmStartReplayingEvent"] = reflect.TypeOf((*VmStartReplayingEvent)(nil)).Elem() +} + +type VmStartingEvent struct { + VmEvent +} + +func init() { + t["VmStartingEvent"] = reflect.TypeOf((*VmStartingEvent)(nil)).Elem() +} + +type VmStartingSecondaryEvent struct { + VmEvent +} + +func init() { + t["VmStartingSecondaryEvent"] = reflect.TypeOf((*VmStartingSecondaryEvent)(nil)).Elem() +} + +type VmStaticMacConflictEvent struct { + VmEvent + + ConflictedVm VmEventArgument `xml:"conflictedVm"` + Mac string `xml:"mac"` +} + +func init() { + t["VmStaticMacConflictEvent"] = reflect.TypeOf((*VmStaticMacConflictEvent)(nil)).Elem() +} + +type VmStoppingEvent struct { + VmEvent +} + +func init() { + t["VmStoppingEvent"] = reflect.TypeOf((*VmStoppingEvent)(nil)).Elem() +} + +type VmSuspendedEvent struct { + VmEvent +} + +func init() { + t["VmSuspendedEvent"] = reflect.TypeOf((*VmSuspendedEvent)(nil)).Elem() +} + +type VmSuspendingEvent struct { + VmEvent +} + +func init() { + t["VmSuspendingEvent"] = reflect.TypeOf((*VmSuspendingEvent)(nil)).Elem() +} + +type VmTimedoutStartingSecondaryEvent struct { + VmEvent + + Timeout int64 `xml:"timeout,omitempty"` +} + +func init() { + t["VmTimedoutStartingSecondaryEvent"] = reflect.TypeOf((*VmTimedoutStartingSecondaryEvent)(nil)).Elem() +} + +type VmToolsUpgradeFault struct { + VimFault +} + +func init() { + t["VmToolsUpgradeFault"] = reflect.TypeOf((*VmToolsUpgradeFault)(nil)).Elem() +} + +type VmToolsUpgradeFaultFault BaseVmToolsUpgradeFault + +func init() { + t["VmToolsUpgradeFaultFault"] = reflect.TypeOf((*VmToolsUpgradeFaultFault)(nil)).Elem() +} + +type VmUnsupportedStartingEvent struct { + VmStartingEvent + + GuestId string `xml:"guestId"` +} + +func init() { + t["VmUnsupportedStartingEvent"] = reflect.TypeOf((*VmUnsupportedStartingEvent)(nil)).Elem() +} + +type VmUpgradeCompleteEvent struct { + VmEvent + + Version string `xml:"version"` +} + +func init() { + t["VmUpgradeCompleteEvent"] = reflect.TypeOf((*VmUpgradeCompleteEvent)(nil)).Elem() +} + +type VmUpgradeFailedEvent struct { + VmEvent +} + +func init() { + t["VmUpgradeFailedEvent"] = reflect.TypeOf((*VmUpgradeFailedEvent)(nil)).Elem() +} + +type VmUpgradingEvent struct { + VmEvent + + Version string `xml:"version"` +} + +func init() { + t["VmUpgradingEvent"] = reflect.TypeOf((*VmUpgradingEvent)(nil)).Elem() +} + +type VmUuidAssignedEvent struct { + VmEvent + + Uuid string `xml:"uuid"` +} + +func init() { + t["VmUuidAssignedEvent"] = reflect.TypeOf((*VmUuidAssignedEvent)(nil)).Elem() +} + +type VmUuidChangedEvent struct { + VmEvent + + OldUuid string `xml:"oldUuid"` + NewUuid string `xml:"newUuid"` +} + +func init() { + t["VmUuidChangedEvent"] = reflect.TypeOf((*VmUuidChangedEvent)(nil)).Elem() +} + +type VmUuidConflictEvent struct { + VmEvent + + ConflictedVm VmEventArgument `xml:"conflictedVm"` + Uuid string `xml:"uuid"` +} + +func init() { + t["VmUuidConflictEvent"] = reflect.TypeOf((*VmUuidConflictEvent)(nil)).Elem() +} + +type VmValidateMaxDevice struct { + VimFault + + Device string `xml:"device"` + Max int32 `xml:"max"` + Count int32 `xml:"count"` +} + +func init() { + t["VmValidateMaxDevice"] = reflect.TypeOf((*VmValidateMaxDevice)(nil)).Elem() +} + +type VmValidateMaxDeviceFault VmValidateMaxDevice + +func init() { + t["VmValidateMaxDeviceFault"] = reflect.TypeOf((*VmValidateMaxDeviceFault)(nil)).Elem() +} + +type VmVnicPoolReservationViolationClearEvent struct { + DvsEvent + + VmVnicResourcePoolKey string `xml:"vmVnicResourcePoolKey"` + VmVnicResourcePoolName string `xml:"vmVnicResourcePoolName,omitempty"` +} + +func init() { + t["VmVnicPoolReservationViolationClearEvent"] = reflect.TypeOf((*VmVnicPoolReservationViolationClearEvent)(nil)).Elem() +} + +type VmVnicPoolReservationViolationRaiseEvent struct { + DvsEvent + + VmVnicResourcePoolKey string `xml:"vmVnicResourcePoolKey"` + VmVnicResourcePoolName string `xml:"vmVnicResourcePoolName,omitempty"` +} + +func init() { + t["VmVnicPoolReservationViolationRaiseEvent"] = reflect.TypeOf((*VmVnicPoolReservationViolationRaiseEvent)(nil)).Elem() +} + +type VmWwnAssignedEvent struct { + VmEvent + + NodeWwns []int64 `xml:"nodeWwns"` + PortWwns []int64 `xml:"portWwns"` +} + +func init() { + t["VmWwnAssignedEvent"] = reflect.TypeOf((*VmWwnAssignedEvent)(nil)).Elem() +} + +type VmWwnChangedEvent struct { + VmEvent + + OldNodeWwns []int64 `xml:"oldNodeWwns,omitempty"` + OldPortWwns []int64 `xml:"oldPortWwns,omitempty"` + NewNodeWwns []int64 `xml:"newNodeWwns,omitempty"` + NewPortWwns []int64 `xml:"newPortWwns,omitempty"` +} + +func init() { + t["VmWwnChangedEvent"] = reflect.TypeOf((*VmWwnChangedEvent)(nil)).Elem() +} + +type VmWwnConflict struct { + InvalidVmConfig + + Vm *ManagedObjectReference `xml:"vm,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Name string `xml:"name,omitempty"` + Wwn int64 `xml:"wwn,omitempty"` +} + +func init() { + t["VmWwnConflict"] = reflect.TypeOf((*VmWwnConflict)(nil)).Elem() +} + +type VmWwnConflictEvent struct { + VmEvent + + ConflictedVms []VmEventArgument `xml:"conflictedVms,omitempty"` + ConflictedHosts []HostEventArgument `xml:"conflictedHosts,omitempty"` + Wwn int64 `xml:"wwn"` +} + +func init() { + t["VmWwnConflictEvent"] = reflect.TypeOf((*VmWwnConflictEvent)(nil)).Elem() +} + +type VmWwnConflictFault VmWwnConflict + +func init() { + t["VmWwnConflictFault"] = reflect.TypeOf((*VmWwnConflictFault)(nil)).Elem() +} + +type VmfsAlreadyMounted struct { + VmfsMountFault +} + +func init() { + t["VmfsAlreadyMounted"] = reflect.TypeOf((*VmfsAlreadyMounted)(nil)).Elem() +} + +type VmfsAlreadyMountedFault VmfsAlreadyMounted + +func init() { + t["VmfsAlreadyMountedFault"] = reflect.TypeOf((*VmfsAlreadyMountedFault)(nil)).Elem() +} + +type VmfsAmbiguousMount struct { + VmfsMountFault +} + +func init() { + t["VmfsAmbiguousMount"] = reflect.TypeOf((*VmfsAmbiguousMount)(nil)).Elem() +} + +type VmfsAmbiguousMountFault VmfsAmbiguousMount + +func init() { + t["VmfsAmbiguousMountFault"] = reflect.TypeOf((*VmfsAmbiguousMountFault)(nil)).Elem() +} + +type VmfsConfigOption struct { + DynamicData + + BlockSizeOption int32 `xml:"blockSizeOption"` + UnmapGranularityOption []int32 `xml:"unmapGranularityOption,omitempty"` + UnmapBandwidthFixedValue *LongOption `xml:"unmapBandwidthFixedValue,omitempty"` + UnmapBandwidthDynamicMin *LongOption `xml:"unmapBandwidthDynamicMin,omitempty"` + UnmapBandwidthDynamicMax *LongOption `xml:"unmapBandwidthDynamicMax,omitempty"` + UnmapBandwidthIncrement int64 `xml:"unmapBandwidthIncrement,omitempty"` +} + +func init() { + t["VmfsConfigOption"] = reflect.TypeOf((*VmfsConfigOption)(nil)).Elem() +} + +type VmfsDatastoreAllExtentOption struct { + VmfsDatastoreSingleExtentOption +} + +func init() { + t["VmfsDatastoreAllExtentOption"] = reflect.TypeOf((*VmfsDatastoreAllExtentOption)(nil)).Elem() +} + +type VmfsDatastoreBaseOption struct { + DynamicData + + Layout HostDiskPartitionLayout `xml:"layout"` + PartitionFormatChange *bool `xml:"partitionFormatChange"` +} + +func init() { + t["VmfsDatastoreBaseOption"] = reflect.TypeOf((*VmfsDatastoreBaseOption)(nil)).Elem() +} + +type VmfsDatastoreCreateSpec struct { + VmfsDatastoreSpec + + Partition HostDiskPartitionSpec `xml:"partition"` + Vmfs HostVmfsSpec `xml:"vmfs"` + Extent []HostScsiDiskPartition `xml:"extent,omitempty"` +} + +func init() { + t["VmfsDatastoreCreateSpec"] = reflect.TypeOf((*VmfsDatastoreCreateSpec)(nil)).Elem() +} + +type VmfsDatastoreExpandSpec struct { + VmfsDatastoreSpec + + Partition HostDiskPartitionSpec `xml:"partition"` + Extent HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["VmfsDatastoreExpandSpec"] = reflect.TypeOf((*VmfsDatastoreExpandSpec)(nil)).Elem() +} + +type VmfsDatastoreExtendSpec struct { + VmfsDatastoreSpec + + Partition HostDiskPartitionSpec `xml:"partition"` + Extent []HostScsiDiskPartition `xml:"extent"` +} + +func init() { + t["VmfsDatastoreExtendSpec"] = reflect.TypeOf((*VmfsDatastoreExtendSpec)(nil)).Elem() +} + +type VmfsDatastoreInfo struct { + DatastoreInfo + + MaxPhysicalRDMFileSize int64 `xml:"maxPhysicalRDMFileSize,omitempty"` + MaxVirtualRDMFileSize int64 `xml:"maxVirtualRDMFileSize,omitempty"` + Vmfs *HostVmfsVolume `xml:"vmfs,omitempty"` +} + +func init() { + t["VmfsDatastoreInfo"] = reflect.TypeOf((*VmfsDatastoreInfo)(nil)).Elem() +} + +type VmfsDatastoreMultipleExtentOption struct { + VmfsDatastoreBaseOption + + VmfsExtent []HostDiskPartitionBlockRange `xml:"vmfsExtent"` +} + +func init() { + t["VmfsDatastoreMultipleExtentOption"] = reflect.TypeOf((*VmfsDatastoreMultipleExtentOption)(nil)).Elem() +} + +type VmfsDatastoreOption struct { + DynamicData + + Info BaseVmfsDatastoreBaseOption `xml:"info,typeattr"` + Spec BaseVmfsDatastoreSpec `xml:"spec,typeattr"` +} + +func init() { + t["VmfsDatastoreOption"] = reflect.TypeOf((*VmfsDatastoreOption)(nil)).Elem() +} + +type VmfsDatastoreSingleExtentOption struct { + VmfsDatastoreBaseOption + + VmfsExtent HostDiskPartitionBlockRange `xml:"vmfsExtent"` +} + +func init() { + t["VmfsDatastoreSingleExtentOption"] = reflect.TypeOf((*VmfsDatastoreSingleExtentOption)(nil)).Elem() +} + +type VmfsDatastoreSpec struct { + DynamicData + + DiskUuid string `xml:"diskUuid"` +} + +func init() { + t["VmfsDatastoreSpec"] = reflect.TypeOf((*VmfsDatastoreSpec)(nil)).Elem() +} + +type VmfsMountFault struct { + HostConfigFault + + Uuid string `xml:"uuid"` +} + +func init() { + t["VmfsMountFault"] = reflect.TypeOf((*VmfsMountFault)(nil)).Elem() +} + +type VmfsMountFaultFault BaseVmfsMountFault + +func init() { + t["VmfsMountFaultFault"] = reflect.TypeOf((*VmfsMountFaultFault)(nil)).Elem() +} + +type VmfsUnmapBandwidthSpec struct { + DynamicData + + Policy string `xml:"policy"` + FixedValue int64 `xml:"fixedValue"` + DynamicMin int64 `xml:"dynamicMin"` + DynamicMax int64 `xml:"dynamicMax"` +} + +func init() { + t["VmfsUnmapBandwidthSpec"] = reflect.TypeOf((*VmfsUnmapBandwidthSpec)(nil)).Elem() +} + +type VmotionInterfaceNotEnabled struct { + HostPowerOpFailed +} + +func init() { + t["VmotionInterfaceNotEnabled"] = reflect.TypeOf((*VmotionInterfaceNotEnabled)(nil)).Elem() +} + +type VmotionInterfaceNotEnabledFault VmotionInterfaceNotEnabled + +func init() { + t["VmotionInterfaceNotEnabledFault"] = reflect.TypeOf((*VmotionInterfaceNotEnabledFault)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchPvlanSpec struct { + VmwareDistributedVirtualSwitchVlanSpec + + PvlanId int32 `xml:"pvlanId"` +} + +func init() { + t["VmwareDistributedVirtualSwitchPvlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchPvlanSpec)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchTrunkVlanSpec struct { + VmwareDistributedVirtualSwitchVlanSpec + + VlanId []NumericRange `xml:"vlanId,omitempty"` +} + +func init() { + t["VmwareDistributedVirtualSwitchTrunkVlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchTrunkVlanSpec)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchVlanIdSpec struct { + VmwareDistributedVirtualSwitchVlanSpec + + VlanId int32 `xml:"vlanId"` +} + +func init() { + t["VmwareDistributedVirtualSwitchVlanIdSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanIdSpec)(nil)).Elem() +} + +type VmwareDistributedVirtualSwitchVlanSpec struct { + InheritablePolicy +} + +func init() { + t["VmwareDistributedVirtualSwitchVlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanSpec)(nil)).Elem() +} + +type VmwareUplinkPortTeamingPolicy struct { + InheritablePolicy + + Policy *StringPolicy `xml:"policy,omitempty"` + ReversePolicy *BoolPolicy `xml:"reversePolicy,omitempty"` + NotifySwitches *BoolPolicy `xml:"notifySwitches,omitempty"` + RollingOrder *BoolPolicy `xml:"rollingOrder,omitempty"` + FailureCriteria *DVSFailureCriteria `xml:"failureCriteria,omitempty"` + UplinkPortOrder *VMwareUplinkPortOrderPolicy `xml:"uplinkPortOrder,omitempty"` +} + +func init() { + t["VmwareUplinkPortTeamingPolicy"] = reflect.TypeOf((*VmwareUplinkPortTeamingPolicy)(nil)).Elem() +} + +type VnicPortArgument struct { + DynamicData + + Vnic string `xml:"vnic"` + Port DistributedVirtualSwitchPortConnection `xml:"port"` +} + +func init() { + t["VnicPortArgument"] = reflect.TypeOf((*VnicPortArgument)(nil)).Elem() +} + +type VolumeEditorError struct { + CustomizationFault +} + +func init() { + t["VolumeEditorError"] = reflect.TypeOf((*VolumeEditorError)(nil)).Elem() +} + +type VolumeEditorErrorFault VolumeEditorError + +func init() { + t["VolumeEditorErrorFault"] = reflect.TypeOf((*VolumeEditorErrorFault)(nil)).Elem() +} + +type VramLimitLicense struct { + NotEnoughLicenses + + Limit int32 `xml:"limit"` +} + +func init() { + t["VramLimitLicense"] = reflect.TypeOf((*VramLimitLicense)(nil)).Elem() +} + +type VramLimitLicenseFault VramLimitLicense + +func init() { + t["VramLimitLicenseFault"] = reflect.TypeOf((*VramLimitLicenseFault)(nil)).Elem() +} + +type VsanClusterConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + DefaultConfig *VsanClusterConfigInfoHostDefaultInfo `xml:"defaultConfig,omitempty"` +} + +func init() { + t["VsanClusterConfigInfo"] = reflect.TypeOf((*VsanClusterConfigInfo)(nil)).Elem() +} + +type VsanClusterConfigInfoHostDefaultInfo struct { + DynamicData + + Uuid string `xml:"uuid,omitempty"` + AutoClaimStorage *bool `xml:"autoClaimStorage"` + ChecksumEnabled *bool `xml:"checksumEnabled"` +} + +func init() { + t["VsanClusterConfigInfoHostDefaultInfo"] = reflect.TypeOf((*VsanClusterConfigInfoHostDefaultInfo)(nil)).Elem() +} + +type VsanClusterUuidMismatch struct { + CannotMoveVsanEnabledHost + + HostClusterUuid string `xml:"hostClusterUuid"` + DestinationClusterUuid string `xml:"destinationClusterUuid"` +} + +func init() { + t["VsanClusterUuidMismatch"] = reflect.TypeOf((*VsanClusterUuidMismatch)(nil)).Elem() +} + +type VsanClusterUuidMismatchFault VsanClusterUuidMismatch + +func init() { + t["VsanClusterUuidMismatchFault"] = reflect.TypeOf((*VsanClusterUuidMismatchFault)(nil)).Elem() +} + +type VsanDiskFault struct { + VsanFault + + Device string `xml:"device,omitempty"` +} + +func init() { + t["VsanDiskFault"] = reflect.TypeOf((*VsanDiskFault)(nil)).Elem() +} + +type VsanDiskFaultFault BaseVsanDiskFault + +func init() { + t["VsanDiskFaultFault"] = reflect.TypeOf((*VsanDiskFaultFault)(nil)).Elem() +} + +type VsanFault struct { + VimFault +} + +func init() { + t["VsanFault"] = reflect.TypeOf((*VsanFault)(nil)).Elem() +} + +type VsanFaultFault BaseVsanFault + +func init() { + t["VsanFaultFault"] = reflect.TypeOf((*VsanFaultFault)(nil)).Elem() +} + +type VsanHostClusterStatus struct { + DynamicData + + Uuid string `xml:"uuid,omitempty"` + NodeUuid string `xml:"nodeUuid,omitempty"` + Health string `xml:"health"` + NodeState VsanHostClusterStatusState `xml:"nodeState"` + MemberUuid []string `xml:"memberUuid,omitempty"` +} + +func init() { + t["VsanHostClusterStatus"] = reflect.TypeOf((*VsanHostClusterStatus)(nil)).Elem() +} + +type VsanHostClusterStatusState struct { + DynamicData + + State string `xml:"state"` + Completion *VsanHostClusterStatusStateCompletionEstimate `xml:"completion,omitempty"` +} + +func init() { + t["VsanHostClusterStatusState"] = reflect.TypeOf((*VsanHostClusterStatusState)(nil)).Elem() +} + +type VsanHostClusterStatusStateCompletionEstimate struct { + DynamicData + + CompleteTime *time.Time `xml:"completeTime"` + PercentComplete int32 `xml:"percentComplete,omitempty"` +} + +func init() { + t["VsanHostClusterStatusStateCompletionEstimate"] = reflect.TypeOf((*VsanHostClusterStatusStateCompletionEstimate)(nil)).Elem() +} + +type VsanHostConfigInfo struct { + DynamicData + + Enabled *bool `xml:"enabled"` + HostSystem *ManagedObjectReference `xml:"hostSystem,omitempty"` + ClusterInfo *VsanHostConfigInfoClusterInfo `xml:"clusterInfo,omitempty"` + StorageInfo *VsanHostConfigInfoStorageInfo `xml:"storageInfo,omitempty"` + NetworkInfo *VsanHostConfigInfoNetworkInfo `xml:"networkInfo,omitempty"` + FaultDomainInfo *VsanHostFaultDomainInfo `xml:"faultDomainInfo,omitempty"` +} + +func init() { + t["VsanHostConfigInfo"] = reflect.TypeOf((*VsanHostConfigInfo)(nil)).Elem() +} + +type VsanHostConfigInfoClusterInfo struct { + DynamicData + + Uuid string `xml:"uuid,omitempty"` + NodeUuid string `xml:"nodeUuid,omitempty"` +} + +func init() { + t["VsanHostConfigInfoClusterInfo"] = reflect.TypeOf((*VsanHostConfigInfoClusterInfo)(nil)).Elem() +} + +type VsanHostConfigInfoNetworkInfo struct { + DynamicData + + Port []VsanHostConfigInfoNetworkInfoPortConfig `xml:"port,omitempty"` +} + +func init() { + t["VsanHostConfigInfoNetworkInfo"] = reflect.TypeOf((*VsanHostConfigInfoNetworkInfo)(nil)).Elem() +} + +type VsanHostConfigInfoNetworkInfoPortConfig struct { + DynamicData + + IpConfig *VsanHostIpConfig `xml:"ipConfig,omitempty"` + Device string `xml:"device"` +} + +func init() { + t["VsanHostConfigInfoNetworkInfoPortConfig"] = reflect.TypeOf((*VsanHostConfigInfoNetworkInfoPortConfig)(nil)).Elem() +} + +type VsanHostConfigInfoStorageInfo struct { + DynamicData + + AutoClaimStorage *bool `xml:"autoClaimStorage"` + DiskMapping []VsanHostDiskMapping `xml:"diskMapping,omitempty"` + DiskMapInfo []VsanHostDiskMapInfo `xml:"diskMapInfo,omitempty"` + ChecksumEnabled *bool `xml:"checksumEnabled"` +} + +func init() { + t["VsanHostConfigInfoStorageInfo"] = reflect.TypeOf((*VsanHostConfigInfoStorageInfo)(nil)).Elem() +} + +type VsanHostDecommissionMode struct { + DynamicData + + ObjectAction string `xml:"objectAction"` +} + +func init() { + t["VsanHostDecommissionMode"] = reflect.TypeOf((*VsanHostDecommissionMode)(nil)).Elem() +} + +type VsanHostDiskMapInfo struct { + DynamicData + + Mapping VsanHostDiskMapping `xml:"mapping"` + Mounted bool `xml:"mounted"` +} + +func init() { + t["VsanHostDiskMapInfo"] = reflect.TypeOf((*VsanHostDiskMapInfo)(nil)).Elem() +} + +type VsanHostDiskMapResult struct { + DynamicData + + Mapping VsanHostDiskMapping `xml:"mapping"` + DiskResult []VsanHostDiskResult `xml:"diskResult,omitempty"` + Error *LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["VsanHostDiskMapResult"] = reflect.TypeOf((*VsanHostDiskMapResult)(nil)).Elem() +} + +type VsanHostDiskMapping struct { + DynamicData + + Ssd HostScsiDisk `xml:"ssd"` + NonSsd []HostScsiDisk `xml:"nonSsd"` +} + +func init() { + t["VsanHostDiskMapping"] = reflect.TypeOf((*VsanHostDiskMapping)(nil)).Elem() +} + +type VsanHostDiskResult struct { + DynamicData + + Disk HostScsiDisk `xml:"disk"` + State string `xml:"state"` + VsanUuid string `xml:"vsanUuid,omitempty"` + Error *LocalizedMethodFault `xml:"error,omitempty"` + Degraded *bool `xml:"degraded"` +} + +func init() { + t["VsanHostDiskResult"] = reflect.TypeOf((*VsanHostDiskResult)(nil)).Elem() +} + +type VsanHostFaultDomainInfo struct { + DynamicData + + Name string `xml:"name"` +} + +func init() { + t["VsanHostFaultDomainInfo"] = reflect.TypeOf((*VsanHostFaultDomainInfo)(nil)).Elem() +} + +type VsanHostIpConfig struct { + DynamicData + + UpstreamIpAddress string `xml:"upstreamIpAddress"` + DownstreamIpAddress string `xml:"downstreamIpAddress"` +} + +func init() { + t["VsanHostIpConfig"] = reflect.TypeOf((*VsanHostIpConfig)(nil)).Elem() +} + +type VsanHostMembershipInfo struct { + DynamicData + + NodeUuid string `xml:"nodeUuid"` + Hostname string `xml:"hostname"` +} + +func init() { + t["VsanHostMembershipInfo"] = reflect.TypeOf((*VsanHostMembershipInfo)(nil)).Elem() +} + +type VsanHostRuntimeInfo struct { + DynamicData + + MembershipList []VsanHostMembershipInfo `xml:"membershipList,omitempty"` + DiskIssues []VsanHostRuntimeInfoDiskIssue `xml:"diskIssues,omitempty"` + AccessGenNo int32 `xml:"accessGenNo,omitempty"` +} + +func init() { + t["VsanHostRuntimeInfo"] = reflect.TypeOf((*VsanHostRuntimeInfo)(nil)).Elem() +} + +type VsanHostRuntimeInfoDiskIssue struct { + DynamicData + + DiskId string `xml:"diskId"` + Issue string `xml:"issue"` +} + +func init() { + t["VsanHostRuntimeInfoDiskIssue"] = reflect.TypeOf((*VsanHostRuntimeInfoDiskIssue)(nil)).Elem() +} + +type VsanHostVsanDiskInfo struct { + DynamicData + + VsanUuid string `xml:"vsanUuid"` + FormatVersion int32 `xml:"formatVersion"` +} + +func init() { + t["VsanHostVsanDiskInfo"] = reflect.TypeOf((*VsanHostVsanDiskInfo)(nil)).Elem() +} + +type VsanIncompatibleDiskMapping struct { + VsanDiskFault +} + +func init() { + t["VsanIncompatibleDiskMapping"] = reflect.TypeOf((*VsanIncompatibleDiskMapping)(nil)).Elem() +} + +type VsanIncompatibleDiskMappingFault VsanIncompatibleDiskMapping + +func init() { + t["VsanIncompatibleDiskMappingFault"] = reflect.TypeOf((*VsanIncompatibleDiskMappingFault)(nil)).Elem() +} + +type VsanNewPolicyBatch struct { + DynamicData + + Size []int64 `xml:"size,omitempty"` + Policy string `xml:"policy,omitempty"` +} + +func init() { + t["VsanNewPolicyBatch"] = reflect.TypeOf((*VsanNewPolicyBatch)(nil)).Elem() +} + +type VsanPolicyChangeBatch struct { + DynamicData + + Uuid []string `xml:"uuid,omitempty"` + Policy string `xml:"policy,omitempty"` +} + +func init() { + t["VsanPolicyChangeBatch"] = reflect.TypeOf((*VsanPolicyChangeBatch)(nil)).Elem() +} + +type VsanPolicyCost struct { + DynamicData + + ChangeDataSize int64 `xml:"changeDataSize,omitempty"` + CurrentDataSize int64 `xml:"currentDataSize,omitempty"` + TempDataSize int64 `xml:"tempDataSize,omitempty"` + CopyDataSize int64 `xml:"copyDataSize,omitempty"` + ChangeFlashReadCacheSize int64 `xml:"changeFlashReadCacheSize,omitempty"` + CurrentFlashReadCacheSize int64 `xml:"currentFlashReadCacheSize,omitempty"` + CurrentDiskSpaceToAddressSpaceRatio float32 `xml:"currentDiskSpaceToAddressSpaceRatio,omitempty"` + DiskSpaceToAddressSpaceRatio float32 `xml:"diskSpaceToAddressSpaceRatio,omitempty"` +} + +func init() { + t["VsanPolicyCost"] = reflect.TypeOf((*VsanPolicyCost)(nil)).Elem() +} + +type VsanPolicySatisfiability struct { + DynamicData + + Uuid string `xml:"uuid,omitempty"` + IsSatisfiable bool `xml:"isSatisfiable"` + Reason *LocalizableMessage `xml:"reason,omitempty"` + Cost *VsanPolicyCost `xml:"cost,omitempty"` +} + +func init() { + t["VsanPolicySatisfiability"] = reflect.TypeOf((*VsanPolicySatisfiability)(nil)).Elem() +} + +type VsanUpgradeSystemAPIBrokenIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemAPIBrokenIssue"] = reflect.TypeOf((*VsanUpgradeSystemAPIBrokenIssue)(nil)).Elem() +} + +type VsanUpgradeSystemAutoClaimEnabledOnHostsIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemAutoClaimEnabledOnHostsIssue"] = reflect.TypeOf((*VsanUpgradeSystemAutoClaimEnabledOnHostsIssue)(nil)).Elem() +} + +type VsanUpgradeSystemHostsDisconnectedIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemHostsDisconnectedIssue"] = reflect.TypeOf((*VsanUpgradeSystemHostsDisconnectedIssue)(nil)).Elem() +} + +type VsanUpgradeSystemMissingHostsInClusterIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemMissingHostsInClusterIssue"] = reflect.TypeOf((*VsanUpgradeSystemMissingHostsInClusterIssue)(nil)).Elem() +} + +type VsanUpgradeSystemNetworkPartitionInfo struct { + DynamicData + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemNetworkPartitionInfo"] = reflect.TypeOf((*VsanUpgradeSystemNetworkPartitionInfo)(nil)).Elem() +} + +type VsanUpgradeSystemNetworkPartitionIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Partitions []VsanUpgradeSystemNetworkPartitionInfo `xml:"partitions"` +} + +func init() { + t["VsanUpgradeSystemNetworkPartitionIssue"] = reflect.TypeOf((*VsanUpgradeSystemNetworkPartitionIssue)(nil)).Elem() +} + +type VsanUpgradeSystemNotEnoughFreeCapacityIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + ReducedRedundancyUpgradePossible bool `xml:"reducedRedundancyUpgradePossible"` +} + +func init() { + t["VsanUpgradeSystemNotEnoughFreeCapacityIssue"] = reflect.TypeOf((*VsanUpgradeSystemNotEnoughFreeCapacityIssue)(nil)).Elem() +} + +type VsanUpgradeSystemPreflightCheckIssue struct { + DynamicData + + Msg string `xml:"msg"` +} + +func init() { + t["VsanUpgradeSystemPreflightCheckIssue"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckIssue)(nil)).Elem() +} + +type VsanUpgradeSystemPreflightCheckResult struct { + DynamicData + + Issues []BaseVsanUpgradeSystemPreflightCheckIssue `xml:"issues,omitempty,typeattr"` + DiskMappingToRestore *VsanHostDiskMapping `xml:"diskMappingToRestore,omitempty"` +} + +func init() { + t["VsanUpgradeSystemPreflightCheckResult"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckResult)(nil)).Elem() +} + +type VsanUpgradeSystemRogueHostsInClusterIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Uuids []string `xml:"uuids"` +} + +func init() { + t["VsanUpgradeSystemRogueHostsInClusterIssue"] = reflect.TypeOf((*VsanUpgradeSystemRogueHostsInClusterIssue)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeHistoryDiskGroupOp struct { + VsanUpgradeSystemUpgradeHistoryItem + + Operation string `xml:"operation"` + DiskMapping VsanHostDiskMapping `xml:"diskMapping"` +} + +func init() { + t["VsanUpgradeSystemUpgradeHistoryDiskGroupOp"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryDiskGroupOp)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeHistoryItem struct { + DynamicData + + Timestamp time.Time `xml:"timestamp"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Message string `xml:"message"` + Task *ManagedObjectReference `xml:"task,omitempty"` +} + +func init() { + t["VsanUpgradeSystemUpgradeHistoryItem"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeHistoryPreflightFail struct { + VsanUpgradeSystemUpgradeHistoryItem + + PreflightResult VsanUpgradeSystemPreflightCheckResult `xml:"preflightResult"` +} + +func init() { + t["VsanUpgradeSystemUpgradeHistoryPreflightFail"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryPreflightFail)(nil)).Elem() +} + +type VsanUpgradeSystemUpgradeStatus struct { + DynamicData + + InProgress bool `xml:"inProgress"` + History []BaseVsanUpgradeSystemUpgradeHistoryItem `xml:"history,omitempty,typeattr"` + Aborted *bool `xml:"aborted"` + Completed *bool `xml:"completed"` + Progress int32 `xml:"progress,omitempty"` +} + +func init() { + t["VsanUpgradeSystemUpgradeStatus"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeStatus)(nil)).Elem() +} + +type VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Uuids []string `xml:"uuids"` +} + +func init() { + t["VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue"] = reflect.TypeOf((*VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue)(nil)).Elem() +} + +type VsanUpgradeSystemWrongEsxVersionIssue struct { + VsanUpgradeSystemPreflightCheckIssue + + Hosts []ManagedObjectReference `xml:"hosts"` +} + +func init() { + t["VsanUpgradeSystemWrongEsxVersionIssue"] = reflect.TypeOf((*VsanUpgradeSystemWrongEsxVersionIssue)(nil)).Elem() +} + +type VslmCloneSpec struct { + VslmMigrateSpec + + Name string `xml:"name"` + KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm"` +} + +func init() { + t["VslmCloneSpec"] = reflect.TypeOf((*VslmCloneSpec)(nil)).Elem() +} + +type VslmCreateSpec struct { + DynamicData + + Name string `xml:"name"` + KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm"` + BackingSpec BaseVslmCreateSpecBackingSpec `xml:"backingSpec,typeattr"` + CapacityInMB int64 `xml:"capacityInMB"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["VslmCreateSpec"] = reflect.TypeOf((*VslmCreateSpec)(nil)).Elem() +} + +type VslmCreateSpecBackingSpec struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + Path string `xml:"path,omitempty"` +} + +func init() { + t["VslmCreateSpecBackingSpec"] = reflect.TypeOf((*VslmCreateSpecBackingSpec)(nil)).Elem() +} + +type VslmCreateSpecDiskFileBackingSpec struct { + VslmCreateSpecBackingSpec + + ProvisioningType string `xml:"provisioningType,omitempty"` +} + +func init() { + t["VslmCreateSpecDiskFileBackingSpec"] = reflect.TypeOf((*VslmCreateSpecDiskFileBackingSpec)(nil)).Elem() +} + +type VslmCreateSpecRawDiskMappingBackingSpec struct { + VslmCreateSpecBackingSpec + + LunUuid string `xml:"lunUuid"` + CompatibilityMode string `xml:"compatibilityMode"` +} + +func init() { + t["VslmCreateSpecRawDiskMappingBackingSpec"] = reflect.TypeOf((*VslmCreateSpecRawDiskMappingBackingSpec)(nil)).Elem() +} + +type VslmMigrateSpec struct { + DynamicData + + BackingSpec BaseVslmCreateSpecBackingSpec `xml:"backingSpec,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Consolidate *bool `xml:"consolidate"` +} + +func init() { + t["VslmMigrateSpec"] = reflect.TypeOf((*VslmMigrateSpec)(nil)).Elem() +} + +type VslmRelocateSpec struct { + VslmMigrateSpec +} + +func init() { + t["VslmRelocateSpec"] = reflect.TypeOf((*VslmRelocateSpec)(nil)).Elem() +} + +type VslmTagEntry struct { + DynamicData + + TagName string `xml:"tagName"` + ParentCategoryName string `xml:"parentCategoryName"` +} + +func init() { + t["VslmTagEntry"] = reflect.TypeOf((*VslmTagEntry)(nil)).Elem() +} + +type VspanDestPortConflict struct { + DvsFault + + VspanSessionKey1 string `xml:"vspanSessionKey1"` + VspanSessionKey2 string `xml:"vspanSessionKey2"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanDestPortConflict"] = reflect.TypeOf((*VspanDestPortConflict)(nil)).Elem() +} + +type VspanDestPortConflictFault VspanDestPortConflict + +func init() { + t["VspanDestPortConflictFault"] = reflect.TypeOf((*VspanDestPortConflictFault)(nil)).Elem() +} + +type VspanPortConflict struct { + DvsFault + + VspanSessionKey1 string `xml:"vspanSessionKey1"` + VspanSessionKey2 string `xml:"vspanSessionKey2"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanPortConflict"] = reflect.TypeOf((*VspanPortConflict)(nil)).Elem() +} + +type VspanPortConflictFault VspanPortConflict + +func init() { + t["VspanPortConflictFault"] = reflect.TypeOf((*VspanPortConflictFault)(nil)).Elem() +} + +type VspanPortMoveFault struct { + DvsFault + + SrcPortgroupName string `xml:"srcPortgroupName"` + DestPortgroupName string `xml:"destPortgroupName"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanPortMoveFault"] = reflect.TypeOf((*VspanPortMoveFault)(nil)).Elem() +} + +type VspanPortMoveFaultFault VspanPortMoveFault + +func init() { + t["VspanPortMoveFaultFault"] = reflect.TypeOf((*VspanPortMoveFaultFault)(nil)).Elem() +} + +type VspanPortPromiscChangeFault struct { + DvsFault + + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanPortPromiscChangeFault"] = reflect.TypeOf((*VspanPortPromiscChangeFault)(nil)).Elem() +} + +type VspanPortPromiscChangeFaultFault VspanPortPromiscChangeFault + +func init() { + t["VspanPortPromiscChangeFaultFault"] = reflect.TypeOf((*VspanPortPromiscChangeFaultFault)(nil)).Elem() +} + +type VspanPortgroupPromiscChangeFault struct { + DvsFault + + PortgroupName string `xml:"portgroupName"` +} + +func init() { + t["VspanPortgroupPromiscChangeFault"] = reflect.TypeOf((*VspanPortgroupPromiscChangeFault)(nil)).Elem() +} + +type VspanPortgroupPromiscChangeFaultFault VspanPortgroupPromiscChangeFault + +func init() { + t["VspanPortgroupPromiscChangeFaultFault"] = reflect.TypeOf((*VspanPortgroupPromiscChangeFaultFault)(nil)).Elem() +} + +type VspanPortgroupTypeChangeFault struct { + DvsFault + + PortgroupName string `xml:"portgroupName"` +} + +func init() { + t["VspanPortgroupTypeChangeFault"] = reflect.TypeOf((*VspanPortgroupTypeChangeFault)(nil)).Elem() +} + +type VspanPortgroupTypeChangeFaultFault VspanPortgroupTypeChangeFault + +func init() { + t["VspanPortgroupTypeChangeFaultFault"] = reflect.TypeOf((*VspanPortgroupTypeChangeFaultFault)(nil)).Elem() +} + +type VspanPromiscuousPortNotSupported struct { + DvsFault + + VspanSessionKey string `xml:"vspanSessionKey"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanPromiscuousPortNotSupported"] = reflect.TypeOf((*VspanPromiscuousPortNotSupported)(nil)).Elem() +} + +type VspanPromiscuousPortNotSupportedFault VspanPromiscuousPortNotSupported + +func init() { + t["VspanPromiscuousPortNotSupportedFault"] = reflect.TypeOf((*VspanPromiscuousPortNotSupportedFault)(nil)).Elem() +} + +type VspanSameSessionPortConflict struct { + DvsFault + + VspanSessionKey string `xml:"vspanSessionKey"` + PortKey string `xml:"portKey"` +} + +func init() { + t["VspanSameSessionPortConflict"] = reflect.TypeOf((*VspanSameSessionPortConflict)(nil)).Elem() +} + +type VspanSameSessionPortConflictFault VspanSameSessionPortConflict + +func init() { + t["VspanSameSessionPortConflictFault"] = reflect.TypeOf((*VspanSameSessionPortConflictFault)(nil)).Elem() +} + +type VvolDatastoreInfo struct { + DatastoreInfo + + VvolDS *HostVvolVolume `xml:"vvolDS,omitempty"` +} + +func init() { + t["VvolDatastoreInfo"] = reflect.TypeOf((*VvolDatastoreInfo)(nil)).Elem() +} + +type WaitForUpdates WaitForUpdatesRequestType + +func init() { + t["WaitForUpdates"] = reflect.TypeOf((*WaitForUpdates)(nil)).Elem() +} + +type WaitForUpdatesEx WaitForUpdatesExRequestType + +func init() { + t["WaitForUpdatesEx"] = reflect.TypeOf((*WaitForUpdatesEx)(nil)).Elem() +} + +type WaitForUpdatesExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Version string `xml:"version,omitempty"` + Options *WaitOptions `xml:"options,omitempty"` +} + +func init() { + t["WaitForUpdatesExRequestType"] = reflect.TypeOf((*WaitForUpdatesExRequestType)(nil)).Elem() +} + +type WaitForUpdatesExResponse struct { + Returnval *UpdateSet `xml:"returnval,omitempty"` +} + +type WaitForUpdatesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["WaitForUpdatesRequestType"] = reflect.TypeOf((*WaitForUpdatesRequestType)(nil)).Elem() +} + +type WaitForUpdatesResponse struct { + Returnval UpdateSet `xml:"returnval"` +} + +type WaitOptions struct { + DynamicData + + MaxWaitSeconds *int32 `xml:"maxWaitSeconds"` + MaxObjectUpdates int32 `xml:"maxObjectUpdates,omitempty"` +} + +func init() { + t["WaitOptions"] = reflect.TypeOf((*WaitOptions)(nil)).Elem() +} + +type WakeOnLanNotSupported struct { + VirtualHardwareCompatibilityIssue +} + +func init() { + t["WakeOnLanNotSupported"] = reflect.TypeOf((*WakeOnLanNotSupported)(nil)).Elem() +} + +type WakeOnLanNotSupportedByVmotionNIC struct { + HostPowerOpFailed +} + +func init() { + t["WakeOnLanNotSupportedByVmotionNIC"] = reflect.TypeOf((*WakeOnLanNotSupportedByVmotionNIC)(nil)).Elem() +} + +type WakeOnLanNotSupportedByVmotionNICFault WakeOnLanNotSupportedByVmotionNIC + +func init() { + t["WakeOnLanNotSupportedByVmotionNICFault"] = reflect.TypeOf((*WakeOnLanNotSupportedByVmotionNICFault)(nil)).Elem() +} + +type WakeOnLanNotSupportedFault WakeOnLanNotSupported + +func init() { + t["WakeOnLanNotSupportedFault"] = reflect.TypeOf((*WakeOnLanNotSupportedFault)(nil)).Elem() +} + +type WarningUpgradeEvent struct { + UpgradeEvent +} + +func init() { + t["WarningUpgradeEvent"] = reflect.TypeOf((*WarningUpgradeEvent)(nil)).Elem() +} + +type WeeklyTaskScheduler struct { + DailyTaskScheduler + + Sunday bool `xml:"sunday"` + Monday bool `xml:"monday"` + Tuesday bool `xml:"tuesday"` + Wednesday bool `xml:"wednesday"` + Thursday bool `xml:"thursday"` + Friday bool `xml:"friday"` + Saturday bool `xml:"saturday"` +} + +func init() { + t["WeeklyTaskScheduler"] = reflect.TypeOf((*WeeklyTaskScheduler)(nil)).Elem() +} + +type WillLoseHAProtection struct { + MigrationFault + + Resolution string `xml:"resolution"` +} + +func init() { + t["WillLoseHAProtection"] = reflect.TypeOf((*WillLoseHAProtection)(nil)).Elem() +} + +type WillLoseHAProtectionFault WillLoseHAProtection + +func init() { + t["WillLoseHAProtectionFault"] = reflect.TypeOf((*WillLoseHAProtectionFault)(nil)).Elem() +} + +type WillModifyConfigCpuRequirements struct { + MigrationFault +} + +func init() { + t["WillModifyConfigCpuRequirements"] = reflect.TypeOf((*WillModifyConfigCpuRequirements)(nil)).Elem() +} + +type WillModifyConfigCpuRequirementsFault WillModifyConfigCpuRequirements + +func init() { + t["WillModifyConfigCpuRequirementsFault"] = reflect.TypeOf((*WillModifyConfigCpuRequirementsFault)(nil)).Elem() +} + +type WillResetSnapshotDirectory struct { + MigrationFault +} + +func init() { + t["WillResetSnapshotDirectory"] = reflect.TypeOf((*WillResetSnapshotDirectory)(nil)).Elem() +} + +type WillResetSnapshotDirectoryFault WillResetSnapshotDirectory + +func init() { + t["WillResetSnapshotDirectoryFault"] = reflect.TypeOf((*WillResetSnapshotDirectoryFault)(nil)).Elem() +} + +type WinNetBIOSConfigInfo struct { + NetBIOSConfigInfo + + PrimaryWINS string `xml:"primaryWINS"` + SecondaryWINS string `xml:"secondaryWINS,omitempty"` +} + +func init() { + t["WinNetBIOSConfigInfo"] = reflect.TypeOf((*WinNetBIOSConfigInfo)(nil)).Elem() +} + +type WipeDiskFault struct { + VimFault +} + +func init() { + t["WipeDiskFault"] = reflect.TypeOf((*WipeDiskFault)(nil)).Elem() +} + +type WipeDiskFaultFault WipeDiskFault + +func init() { + t["WipeDiskFaultFault"] = reflect.TypeOf((*WipeDiskFaultFault)(nil)).Elem() +} + +type WitnessNodeInfo struct { + DynamicData + + IpSettings CustomizationIPSettings `xml:"ipSettings"` + BiosUuid string `xml:"biosUuid,omitempty"` +} + +func init() { + t["WitnessNodeInfo"] = reflect.TypeOf((*WitnessNodeInfo)(nil)).Elem() +} + +type XmlToCustomizationSpecItem XmlToCustomizationSpecItemRequestType + +func init() { + t["XmlToCustomizationSpecItem"] = reflect.TypeOf((*XmlToCustomizationSpecItem)(nil)).Elem() +} + +type XmlToCustomizationSpecItemRequestType struct { + This ManagedObjectReference `xml:"_this"` + SpecItemXml string `xml:"specItemXml"` +} + +func init() { + t["XmlToCustomizationSpecItemRequestType"] = reflect.TypeOf((*XmlToCustomizationSpecItemRequestType)(nil)).Elem() +} + +type XmlToCustomizationSpecItemResponse struct { + Returnval CustomizationSpecItem `xml:"returnval"` +} + +type ZeroFillVirtualDiskRequestType struct { + This ManagedObjectReference `xml:"_this"` + Name string `xml:"name"` + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty"` +} + +func init() { + t["ZeroFillVirtualDiskRequestType"] = reflect.TypeOf((*ZeroFillVirtualDiskRequestType)(nil)).Elem() +} + +type ZeroFillVirtualDisk_Task ZeroFillVirtualDiskRequestType + +func init() { + t["ZeroFillVirtualDisk_Task"] = reflect.TypeOf((*ZeroFillVirtualDisk_Task)(nil)).Elem() +} + +type ZeroFillVirtualDisk_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConfigureVchaRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigSpec VchaClusterConfigSpec `xml:"configSpec"` +} + +func init() { + t["configureVchaRequestType"] = reflect.TypeOf((*ConfigureVchaRequestType)(nil)).Elem() +} + +type ConfigureVcha_Task ConfigureVchaRequestType + +func init() { + t["configureVcha_Task"] = reflect.TypeOf((*ConfigureVcha_Task)(nil)).Elem() +} + +type ConfigureVcha_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreatePassiveNodeRequestType struct { + This ManagedObjectReference `xml:"_this"` + PassiveDeploymentSpec PassiveNodeDeploymentSpec `xml:"passiveDeploymentSpec"` + SourceVcSpec SourceNodeSpec `xml:"sourceVcSpec"` +} + +func init() { + t["createPassiveNodeRequestType"] = reflect.TypeOf((*CreatePassiveNodeRequestType)(nil)).Elem() +} + +type CreatePassiveNode_Task CreatePassiveNodeRequestType + +func init() { + t["createPassiveNode_Task"] = reflect.TypeOf((*CreatePassiveNode_Task)(nil)).Elem() +} + +type CreatePassiveNode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateWitnessNodeRequestType struct { + This ManagedObjectReference `xml:"_this"` + WitnessDeploymentSpec BaseNodeDeploymentSpec `xml:"witnessDeploymentSpec,typeattr"` + SourceVcSpec SourceNodeSpec `xml:"sourceVcSpec"` +} + +func init() { + t["createWitnessNodeRequestType"] = reflect.TypeOf((*CreateWitnessNodeRequestType)(nil)).Elem() +} + +type CreateWitnessNode_Task CreateWitnessNodeRequestType + +func init() { + t["createWitnessNode_Task"] = reflect.TypeOf((*CreateWitnessNode_Task)(nil)).Elem() +} + +type CreateWitnessNode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DeployVchaRequestType struct { + This ManagedObjectReference `xml:"_this"` + DeploymentSpec VchaClusterDeploymentSpec `xml:"deploymentSpec"` +} + +func init() { + t["deployVchaRequestType"] = reflect.TypeOf((*DeployVchaRequestType)(nil)).Elem() +} + +type DeployVcha_Task DeployVchaRequestType + +func init() { + t["deployVcha_Task"] = reflect.TypeOf((*DeployVcha_Task)(nil)).Elem() +} + +type DeployVcha_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DestroyVchaRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["destroyVchaRequestType"] = reflect.TypeOf((*DestroyVchaRequestType)(nil)).Elem() +} + +type DestroyVcha_Task DestroyVchaRequestType + +func init() { + t["destroyVcha_Task"] = reflect.TypeOf((*DestroyVcha_Task)(nil)).Elem() +} + +type DestroyVcha_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type FetchSoftwarePackages FetchSoftwarePackagesRequestType + +func init() { + t["fetchSoftwarePackages"] = reflect.TypeOf((*FetchSoftwarePackages)(nil)).Elem() +} + +type FetchSoftwarePackagesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["fetchSoftwarePackagesRequestType"] = reflect.TypeOf((*FetchSoftwarePackagesRequestType)(nil)).Elem() +} + +type FetchSoftwarePackagesResponse struct { + Returnval []SoftwarePackage `xml:"returnval,omitempty"` +} + +type GetClusterMode GetClusterModeRequestType + +func init() { + t["getClusterMode"] = reflect.TypeOf((*GetClusterMode)(nil)).Elem() +} + +type GetClusterModeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["getClusterModeRequestType"] = reflect.TypeOf((*GetClusterModeRequestType)(nil)).Elem() +} + +type GetClusterModeResponse struct { + Returnval string `xml:"returnval"` +} + +type GetVchaConfig GetVchaConfigRequestType + +func init() { + t["getVchaConfig"] = reflect.TypeOf((*GetVchaConfig)(nil)).Elem() +} + +type GetVchaConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["getVchaConfigRequestType"] = reflect.TypeOf((*GetVchaConfigRequestType)(nil)).Elem() +} + +type GetVchaConfigResponse struct { + Returnval VchaClusterConfigInfo `xml:"returnval"` +} + +type InitiateFailoverRequestType struct { + This ManagedObjectReference `xml:"_this"` + Planned bool `xml:"planned"` +} + +func init() { + t["initiateFailoverRequestType"] = reflect.TypeOf((*InitiateFailoverRequestType)(nil)).Elem() +} + +type InitiateFailover_Task InitiateFailoverRequestType + +func init() { + t["initiateFailover_Task"] = reflect.TypeOf((*InitiateFailover_Task)(nil)).Elem() +} + +type InitiateFailover_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type InstallDate InstallDateRequestType + +func init() { + t["installDate"] = reflect.TypeOf((*InstallDate)(nil)).Elem() +} + +type InstallDateRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["installDateRequestType"] = reflect.TypeOf((*InstallDateRequestType)(nil)).Elem() +} + +type InstallDateResponse struct { + Returnval time.Time `xml:"returnval"` +} + +type PrepareVchaRequestType struct { + This ManagedObjectReference `xml:"_this"` + NetworkSpec VchaClusterNetworkSpec `xml:"networkSpec"` +} + +func init() { + t["prepareVchaRequestType"] = reflect.TypeOf((*PrepareVchaRequestType)(nil)).Elem() +} + +type PrepareVcha_Task PrepareVchaRequestType + +func init() { + t["prepareVcha_Task"] = reflect.TypeOf((*PrepareVcha_Task)(nil)).Elem() +} + +type PrepareVcha_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type QueryDatacenterConfigOptionDescriptor QueryDatacenterConfigOptionDescriptorRequestType + +func init() { + t["queryDatacenterConfigOptionDescriptor"] = reflect.TypeOf((*QueryDatacenterConfigOptionDescriptor)(nil)).Elem() +} + +type QueryDatacenterConfigOptionDescriptorRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["queryDatacenterConfigOptionDescriptorRequestType"] = reflect.TypeOf((*QueryDatacenterConfigOptionDescriptorRequestType)(nil)).Elem() +} + +type QueryDatacenterConfigOptionDescriptorResponse struct { + Returnval []VirtualMachineConfigOptionDescriptor `xml:"returnval,omitempty"` +} + +type ReloadVirtualMachineFromPathRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConfigurationPath string `xml:"configurationPath"` +} + +func init() { + t["reloadVirtualMachineFromPathRequestType"] = reflect.TypeOf((*ReloadVirtualMachineFromPathRequestType)(nil)).Elem() +} + +type ReloadVirtualMachineFromPath_Task ReloadVirtualMachineFromPathRequestType + +func init() { + t["reloadVirtualMachineFromPath_Task"] = reflect.TypeOf((*ReloadVirtualMachineFromPath_Task)(nil)).Elem() +} + +type ReloadVirtualMachineFromPath_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SetClusterModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + Mode string `xml:"mode"` +} + +func init() { + t["setClusterModeRequestType"] = reflect.TypeOf((*SetClusterModeRequestType)(nil)).Elem() +} + +type SetClusterMode_Task SetClusterModeRequestType + +func init() { + t["setClusterMode_Task"] = reflect.TypeOf((*SetClusterMode_Task)(nil)).Elem() +} + +type SetClusterMode_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type SetCustomValue SetCustomValueRequestType + +func init() { + t["setCustomValue"] = reflect.TypeOf((*SetCustomValue)(nil)).Elem() +} + +type SetCustomValueRequestType struct { + This ManagedObjectReference `xml:"_this"` + Key string `xml:"key"` + Value string `xml:"value"` +} + +func init() { + t["setCustomValueRequestType"] = reflect.TypeOf((*SetCustomValueRequestType)(nil)).Elem() +} + +type SetCustomValueResponse struct { +} + +type UnregisterVAppRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["unregisterVAppRequestType"] = reflect.TypeOf((*UnregisterVAppRequestType)(nil)).Elem() +} + +type UnregisterVApp_Task UnregisterVAppRequestType + +func init() { + t["unregisterVApp_Task"] = reflect.TypeOf((*UnregisterVApp_Task)(nil)).Elem() +} + +type UnregisterVApp_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type VersionURI string + +func init() { + t["versionURI"] = reflect.TypeOf((*VersionURI)(nil)).Elem() +} + +type VslmInfrastructureObjectPolicy struct { + DynamicData + + Name string `xml:"name"` + BackingObjectId string `xml:"backingObjectId"` + ProfileId string `xml:"profileId"` + Error *LocalizedMethodFault `xml:"error,omitempty"` +} + +func init() { + t["vslmInfrastructureObjectPolicy"] = reflect.TypeOf((*VslmInfrastructureObjectPolicy)(nil)).Elem() +} + +type VslmInfrastructureObjectPolicySpec struct { + DynamicData + + Datastore ManagedObjectReference `xml:"datastore"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` +} + +func init() { + t["vslmInfrastructureObjectPolicySpec"] = reflect.TypeOf((*VslmInfrastructureObjectPolicySpec)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE b/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE new file mode 100644 index 000000000000..74487567632c --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/extras.go b/vendor/github.com/vmware/govmomi/vim25/xml/extras.go new file mode 100644 index 000000000000..9a15b7c8eb6d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/extras.go @@ -0,0 +1,99 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package xml + +import ( + "reflect" + "time" +) + +var xmlSchemaInstance = Name{Space: "http://www.w3.org/2001/XMLSchema-instance", Local: "type"} + +var xsiType = Name{Space: "xsi", Local: "type"} + +var stringToTypeMap = map[string]reflect.Type{ + "xsd:boolean": reflect.TypeOf((*bool)(nil)).Elem(), + "xsd:byte": reflect.TypeOf((*int8)(nil)).Elem(), + "xsd:short": reflect.TypeOf((*int16)(nil)).Elem(), + "xsd:int": reflect.TypeOf((*int32)(nil)).Elem(), + "xsd:long": reflect.TypeOf((*int64)(nil)).Elem(), + "xsd:unsignedByte": reflect.TypeOf((*uint8)(nil)).Elem(), + "xsd:unsignedShort": reflect.TypeOf((*uint16)(nil)).Elem(), + "xsd:unsignedInt": reflect.TypeOf((*uint32)(nil)).Elem(), + "xsd:unsignedLong": reflect.TypeOf((*uint64)(nil)).Elem(), + "xsd:float": reflect.TypeOf((*float32)(nil)).Elem(), + "xsd:double": reflect.TypeOf((*float64)(nil)).Elem(), + "xsd:string": reflect.TypeOf((*string)(nil)).Elem(), + "xsd:dateTime": reflect.TypeOf((*time.Time)(nil)).Elem(), + "xsd:base64Binary": reflect.TypeOf((*[]byte)(nil)).Elem(), +} + +// Return a reflect.Type for the specified type. Nil if unknown. +func stringToType(s string) reflect.Type { + return stringToTypeMap[s] +} + +// Return a string for the specified reflect.Type. Panic if unknown. +func typeToString(typ reflect.Type) string { + switch typ.Kind() { + case reflect.Bool: + return "xsd:boolean" + case reflect.Int8: + return "xsd:byte" + case reflect.Int16: + return "xsd:short" + case reflect.Int32: + return "xsd:int" + case reflect.Int, reflect.Int64: + return "xsd:long" + case reflect.Uint8: + return "xsd:unsignedByte" + case reflect.Uint16: + return "xsd:unsignedShort" + case reflect.Uint32: + return "xsd:unsignedInt" + case reflect.Uint, reflect.Uint64: + return "xsd:unsignedLong" + case reflect.Float32: + return "xsd:float" + case reflect.Float64: + return "xsd:double" + case reflect.String: + name := typ.Name() + if name == "string" { + return "xsd:string" + } + return name + case reflect.Struct: + if typ == stringToTypeMap["xsd:dateTime"] { + return "xsd:dateTime" + } + + // Expect any other struct to be handled... + return typ.Name() + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { + return "xsd:base64Binary" + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return "xsd:base64Binary" + } + } + + panic("don't know what to do for type: " + typ.String()) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go b/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go new file mode 100644 index 000000000000..39bbac1d1719 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go @@ -0,0 +1,949 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + endComment = []byte("-->") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a larger operation +// such as Encode or EncodeElement (or a custom Marshaler's MarshalXML invoked +// during those), and those will call Flush when finished. +// Callers that create an Encoder and then invoke EncodeToken directly, without +// using Encode or EncodeElement, need to call Flush when finished to ensure +// that the XML is written to the underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only as the first token +// in the stream. +func (enc *Encoder) EncodeToken(t Token) error { + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + EscapeText(p, t) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if bytes.Contains(t, endDirective) { + return fmt.Errorf("xml: EncodeToken of Directive containing > marker") + } + p.WriteString("") + } + return p.cachedWriteError() +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []string + tags []Name +} + +// createAttrPrefix finds the name space prefix attribute to use for the given name space, +// defining a new prefix if necessary. It returns the prefix. +func (p *printer) createAttrPrefix(url string) string { + if prefix := p.attrPrefix[url]; prefix != "" { + return prefix + } + + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + + // Need to define a new name space. + if p.attrPrefix == nil { + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url + + p.WriteString(`xmlns:`) + p.WriteString(prefix) + p.WriteString(`="`) + EscapeText(p, []byte(url)) + p.WriteString(`" `) + + p.prefixes = append(p.prefixes, prefix) + + return prefix +} + +// deleteAttrPrefix removes an attribute name space prefix. +func (p *printer) deleteAttrPrefix(prefix string) { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) +} + +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, "") +} + +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix == "" { + break + } + p.deleteAttrPrefix(prefix) + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + } + if start.Name.Local == "" && finfo != nil { + start.Name.Space, start.Name.Local = finfo.xmlns, finfo.name + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // Add type attribute if necessary + if finfo != nil && finfo.flags&fTypeAttr != 0 { + start.Attr = append(start.Attr, Attr{xmlSchemaInstance, typeToString(typ)}) + } + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + continue + } + + if fv.Kind() == reflect.Interface && fv.IsNil() { + continue + } + + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + continue + } + + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + continue + } + } + + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + continue + } + + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + continue + } + } + + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + continue + } + fv = fv.Elem() + } + + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return err + } + if b != nil { + s = string(b) + } + start.Attr = append(start.Attr, Attr{name, s}) + } + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + + // Add type attribute if necessary + if finfo != nil && finfo.flags&fTypeAttr != 0 { + start.Attr = append(start.Attr, Attr{xmlSchemaInstance, typeToString(typ)}) + } + + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + + p.writeIndent(1) + p.WriteByte('<') + p.WriteString(start.Name.Local) + + if start.Name.Space != "" { + p.WriteString(` xmlns="`) + p.EscapeString(start.Name.Space) + p.WriteByte('"') + } + + // Attributes + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" { + continue + } + p.WriteByte(' ') + if name.Space != "" { + p.WriteString(p.createAttrPrefix(name.Space)) + p.WriteByte(':') + } + p.WriteString(name.Local) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.WriteString(name.Local) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.trim(finfo.parents); err != nil { + return err + } + if len(finfo.parents) > len(s.stack) { + if vf.Kind() != reflect.Ptr && vf.Kind() != reflect.Interface || !vf.IsNil() { + if err := s.push(finfo.parents[len(s.stack):]); err != nil { + return err + } + } + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + s.trim(nil) + return p.cachedWriteError() +} + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + stack []string +} + +// trim updates the XML context to match the longest common prefix of the stack +// and the given parents. A closing tag will be written for every parent +// popped. Passing a zero slice or nil will close all the elements. +func (s *parentStack) trim(parents []string) error { + split := 0 + for ; split < len(parents) && split < len(s.stack); split++ { + if parents[split] != s.stack[split] { + break + } + } + for i := len(s.stack) - 1; i >= split; i-- { + if err := s.p.writeEnd(Name{Local: s.stack[i]}); err != nil { + return err + } + } + s.stack = parents[:split] + return nil +} + +// push adds parent elements to the stack and writes open tags. +func (s *parentStack) push(parents []string) error { + for i := 0; i < len(parents); i++ { + if err := s.p.writeStart(&StartElement{Name: Name{Local: parents[i]}}); err != nil { + return err + } + } + s.stack = append(s.stack, parents...) + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/read.go b/vendor/github.com/vmware/govmomi/vim25/xml/read.go new file mode 100644 index 000000000000..fe35fce6ca45 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/read.go @@ -0,0 +1,781 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Find reflect.Type for an element's type attribute. +func (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect.Type { + t := "" + for i, a := range start.Attr { + if a.Name == xmlSchemaInstance || a.Name == xsiType { + t = a.Value + // HACK: ensure xsi:type is last in the list to avoid using that value for + // a "type" attribute, such as ManagedObjectReference.Type for example. + // Note that xsi:type is already the last attribute in VC/ESX responses. + // This is only an issue with govmomi simulator generated responses. + // Proper fix will require finding a few needles in this xml package haystack. + // Note: govmomi uses xmlSchemaInstance, other clients (e.g. rbvmomi) use xsiType. + // They are the same thing to XML parsers, but not to this hack here. + x := len(start.Attr) - 1 + if i != x { + start.Attr[i] = start.Attr[x] + start.Attr[x] = a + } + break + } + } + + if t == "" { + // No type attribute; fall back to looking up type by interface name. + t = val.Type().Name() + } + + // Maybe the type is a basic xsd:* type. + typ := stringToType(t) + if typ != nil { + return typ + } + + // Maybe the type is a custom type. + if p.TypeFunc != nil { + if typ, ok := p.TypeFunc(t); ok { + return typ + } + } + + return nil +} + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Try to figure out type for empty interface values. + if val.Kind() == reflect.Interface && val.IsNil() { + typ := p.typeForElement(val, start) + if typ != nil { + pval := reflect.New(typ).Elem() + err := p.unmarshal(pval, start) + if err != nil { + return err + } + + for i := 0; i < 2; i++ { + if typ.Implements(val.Type()) { + val.Set(pval) + return nil + } + + typ = reflect.PtrTo(typ) + pval = pval.Addr() + } + + val.Set(pval) + return nil + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + var utmp uint64 + if len(src) > 0 && src[0] == '-' { + // Negative value for unsigned field. + // Assume it was serialized following two's complement. + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + // Reinterpret value based on type width. + switch dst.Type().Bits() { + case 8: + utmp = uint64(uint8(itmp)) + case 16: + utmp = uint64(uint16(itmp)) + case 32: + utmp = uint64(uint32(itmp)) + case 64: + utmp = uint64(uint64(itmp)) + } + } else { + utmp, err = strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go b/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go new file mode 100644 index 000000000000..086e83b69913 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go @@ -0,0 +1,366 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + fTypeAttr + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + case "typeattr": + finfo.flags |= fTypeAttr + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/xml.go b/vendor/github.com/vmware/govmomi/vim25/xml/xml.go new file mode 100644 index 000000000000..6c6c5c8212b8 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/xml/xml.go @@ -0,0 +1,1939 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated +// with a name space identifier (Space). +// In tokens returned by Decoder.Token, the Space identifier +// is given as a canonical URL, not the short prefix used +// in the document being parsed. +type Name struct { + Space, Local string +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + // TypeFunc is used to map type names to actual types. + TypeFunc func(string) (reflect.Type, bool) + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
    +// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + enc := procInstEncoding(string(data)) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for | + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 000000000000..ee492ba86b12 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) &^ (kernelAlign - 1) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 000000000000..d432835b4195 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd openbsd + +package socket + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 000000000000..b4f41b5522f9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix freebsd netbsd openbsd + +package socket + +import ( + "runtime" + "unsafe" +) + +func probeProtocolStack() int { + if (runtime.GOOS == "netbsd" || runtime.GOOS == "openbsd") && runtime.GOARCH == "arm" { + return 8 + } + if runtime.GOOS == "aix" { + return 1 + } + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_const_unix.go b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go new file mode 100644 index 000000000000..43797d6e5353 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_const_unix.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "golang.org/x/sys/unix" + +const ( + sysAF_UNSPEC = unix.AF_UNSPEC + sysAF_INET = unix.AF_INET + sysAF_INET6 = unix.AF_INET6 + + sysSOCK_RAW = unix.SOCK_RAW +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 000000000000..b17d223bff20 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 000000000000..ed0448fe9851 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "sync" + "syscall" + "unsafe" +) + +// See version list in https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/param.h +var ( + osreldateOnce sync.Once + osreldate uint32 +) + +// First __DragonFly_version after September 2019 ABI changes +// http://lists.dragonflybsd.org/pipermail/users/2019-September/358280.html +const _dragonflyABIChangeVersion = 500705 + +func probeProtocolStack() int { + osreldateOnce.Do(func() { osreldate, _ = syscall.SysctlUint32("kern.osreldate") }) + var p uintptr + if int(unsafe.Sizeof(p)) == 8 && osreldate >= _dragonflyABIChangeVersion { + return int(unsafe.Sizeof(p)) + } + // 64-bit Dragonfly before the September 2019 ABI changes still requires + // 32-bit aligned access to network subsystem. + return 4 +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go new file mode 100644 index 000000000000..02d2b3cc8357 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linkname.go b/vendor/golang.org/x/net/internal/socket/sys_linkname.go new file mode 100644 index 000000000000..61c3f38a51b3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linkname.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix go1.12,darwin + +package socket + +import ( + "syscall" + "unsafe" +) + +//go:linkname syscall_getsockopt syscall.getsockopt +func syscall_getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *uint32) error + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall_getsockopt(int(s), level, name, unsafe.Pointer(&b[0]), &l) + return int(l), err +} + +//go:linkname syscall_setsockopt syscall.setsockopt +func syscall_setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) error + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall_setsockopt(int(s), level, name, unsafe.Pointer(&b[0]), uintptr(len(b))) +} + +//go:linkname syscall_recvmsg syscall.recvmsg +func syscall_recvmsg(s int, msg *syscall.Msghdr, flags int) (n int, err error) + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return syscall_recvmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +} + +//go:linkname syscall_sendmsg syscall.sendmsg +func syscall_sendmsg(s int, msg *syscall.Msghdr, flags int) (n int, err error) + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return syscall_sendmsg(int(s), (*syscall.Msghdr)(unsafe.Pointer(h)), flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 000000000000..1559521e0382 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 000000000000..235b2cc08a60 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.s b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s new file mode 100644 index 000000000000..93e7d75ec03b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 000000000000..9decee2e59a1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 000000000000..d753b436dff5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 000000000000..b670894366d0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 000000000000..9c0d74014f39 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 000000000000..071a4aba8b24 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 000000000000..071a4aba8b24 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 000000000000..9c0d74014f39 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 000000000000..21c1e3f004a0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 000000000000..21c1e3f004a0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go new file mode 100644 index 000000000000..64f69f1dc559 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64 + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 000000000000..327979efbb49 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 000000000000..06d75628c9be --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 000000000000..431851c12e5d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 000000000000..22eae809c9e1 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "android", "illumos", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "android", "illumos", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "android", "illumos", "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +// update refreshes the network interface information if the cache was last +// updated more than 1 minute ago, or if force is set. It returns whether the +// cache was updated. +func (zc *ipv6ZoneCache) update(ift []net.Interface, force bool) (updated bool) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if !force && zc.lastFetched.After(now.Add(-60*time.Second)) { + return false + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return false + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } + return true +} + +func (zc *ipv6ZoneCache) name(zone int) string { + updated := zoneCache.update(nil, false) + zoneCache.RLock() + name, ok := zoneCache.toName[zone] + zoneCache.RUnlock() + if !ok && !updated { + zoneCache.update(nil, true) + zoneCache.RLock() + name, ok = zoneCache.toName[zone] + zoneCache.RUnlock() + } + if !ok { // last resort + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + updated := zoneCache.update(nil, false) + zoneCache.RLock() + index, ok := zoneCache.toIndex[zone] + zoneCache.RUnlock() + if !ok && !updated { + zoneCache.update(nil, true) + zoneCache.RLock() + index, ok = zoneCache.toIndex[zone] + zoneCache.RUnlock() + } + if !ok { // last resort + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 000000000000..66b5547868cc --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 000000000000..a18ac5ed7555 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 000000000000..0f6174262816 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,63 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errNotImplemented +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errNotImplemented +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errNotImplemented +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 000000000000..0eb71283f52f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 000000000000..d556a4461570 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = windows.AF_UNSPEC + sysAF_INET = windows.AF_INET + sysAF_INET6 = windows.AF_INET6 + + sysSOCK_RAW = windows.SOCK_RAW +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errNotImplemented +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go new file mode 100644 index 000000000000..e740c8f024c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go @@ -0,0 +1,60 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 000000000000..083bda51c3c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 000000000000..55c6c9f5770c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 000000000000..083bda51c3c0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 000000000000..55c6c9f5770c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 000000000000..8b7d161d7d52 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_dragonfly.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 000000000000..3e71ff57438e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 000000000000..238d90de6266 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 000000000000..3e71ff57438e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go new file mode 100644 index 000000000000..238d90de6266 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 000000000000..d33025b70db0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,54 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 000000000000..b20d21677490 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,57 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 000000000000..1bb10a4289ff --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 000000000000..7f6e8a7fa4de --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 000000000000..1bb10a4289ff --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 000000000000..7f6e8a7fa4de --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 000000000000..7f6e8a7fa4de --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 000000000000..1bb10a4289ff --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,55 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 000000000000..7f6e8a7fa4de --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 000000000000..7f6e8a7fa4de --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go new file mode 100644 index 000000000000..f12a1d768244 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go @@ -0,0 +1,59 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_0 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 000000000000..7f6e8a7fa4de --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,58 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 000000000000..7e258cec29d3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,57 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 000000000000..b3f9c0d7e5ab --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,60 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 000000000000..7e258cec29d3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,57 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go new file mode 100644 index 000000000000..da26ef019cde --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go @@ -0,0 +1,59 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 000000000000..73655a14c4cf --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 000000000000..0a4de80f2357 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 000000000000..73655a14c4cf --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,51 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go new file mode 100644 index 000000000000..0a4de80f2357 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go @@ -0,0 +1,53 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 000000000000..353cd5fb4ec3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,52 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_solaris.go + +package socket + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 000000000000..1a3a4fc0c109 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,194 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if compatFreeBSD32 && ms[0].NN > 0 { + adjustFreeBSD32(&ms[0]) + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 000000000000..a2b02ca95b97 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 000000000000..69c4f553cda4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,41 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) +} + +func parseDst(cm *ControlMessage, b []byte) { + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) +} + +func parseInterface(cm *ControlMessage, b []byte) { + var sadl syscall.SockaddrDatalink + copy((*[unsafe.Sizeof(sadl)]byte)(unsafe.Pointer(&sadl))[:], b) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 000000000000..425338f35bf1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) + if cm != nil { + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInetPktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 000000000000..a0c049d683ad --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 000000000000..b27fa4903a99 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if so, ok := sockOpts[ssoPacketInfo]; ok { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 000000000000..82c6306421b9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,12 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go new file mode 100644 index 000000000000..c191c22aba4a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -0,0 +1,264 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errNotImplemented + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errNotImplemented + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errNotImplemented + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errNotImplemented + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errNotImplemented + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errNotImplemented + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 000000000000..245834979938 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,244 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPConn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 000000000000..4a6d7a85ee67 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,186 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + p := &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return errInvalidConn + } + return c.packetHandler.IPConn.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + return nil, err + } + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errNotImplemented + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go new file mode 100644 index 000000000000..51c12371eb46 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -0,0 +1,55 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoTOS] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoTTL] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 000000000000..c271ca46cb98 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,172 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of h. +// +// The returned slice is in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, errNilHeader + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// Parse parses b as an IPv4 header and stores the result in h. +// +// The provided b must be in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. +func (h *Header) Parse(b []byte) error { + if h == nil || b == nil { + return errNilHeader + } + if len(b) < HeaderLen { + return errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if len(b) < hdrlen { + return errExtHeaderTooShort + } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + case "freebsd": + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +// +// The provided b must be in the format used by a raw IP socket on the +// local system. +// This may differ from the wire format, depending on the system. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 000000000000..e845a7376ea3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "errors" + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +var ( + errInvalidConn = errors.New("invalid connection") + errMissingAddress = errors.New("missing address") + errNilHeader = errors.New("nil header") + errHeaderTooShort = errors.New("header too short") + errExtHeaderTooShort = errors.New("extension header too short") + errInvalidConnType = errors.New("invalid conn type") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) + + // See https://www.freebsd.org/doc/en/books/porters-handbook/versions.html. + freebsdVersion uint32 + compatFreeBSD32 bool // 386 emulation on amd64 +) + +// See golang.org/issue/30899. +func adjustFreeBSD32(m *socket.Message) { + // FreeBSD 12.0-RELEASE is affected by https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=236737 + if 1200086 <= freebsdVersion && freebsdVersion < 1201000 { + l := (m.NN + 4 - 1) &^ (4 - 1) + if m.NN < l && l <= len(m.OOB) { + m.NN = l + } + } +} + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 000000000000..4375b4099b80 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,38 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris + ICMPTypeExtendedEchoRequest ICMPType = 42 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 43 // Extended Echo Reply +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", + 42: "extended echo request", + 43: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 000000000000..9902bb3d2a55 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + icmpFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 000000000000..6e1c5c80ad1d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *icmpFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 000000000000..21bb29ab3669 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sizeofICMPFilter = 0x0 + +type icmpFilter struct { +} + +func (f *icmpFilter) accept(typ ICMPType) { +} + +func (f *icmpFilter) block(typ ICMPType) { +} + +func (f *icmpFilter) setAll(block bool) { +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 000000000000..7d784e06dd08 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,117 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + *net.IPConn + *socket.Conn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, errInvalidConn + } + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = +// Len = +// TOS = +// TotalLen = +// ID = platform sets an appropriate value if ID is zero +// FragOff = +// TTL = +// Protocol = +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return errInvalidConn + } + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 000000000000..f95f811acd2b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 000000000000..e7614661d7b7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,84 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + if compatFreeBSD32 { + adjustFreeBSD32(&m) + } + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 000000000000..1116256f2457 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,39 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package ipv4 + +import "net" + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 000000000000..22e90c0392c5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 000000000000..dea64519d8e2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errNotImplemented + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errNotImplemented + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 000000000000..37d4806b3429 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errNotImplemented +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errNotImplemented +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_aix.go b/vendor/golang.org/x/net/ipv4/sys_aix.go new file mode 100644 index 000000000000..3d1201e6d7c7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_aix.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go new file mode 100644 index 000000000000..76d670acaa9e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -0,0 +1,122 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd solaris windows + +package ipv4 + +import ( + "errors" + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var errNoSuchInterface = errors.New("no such interface") + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 000000000000..6dc339ce67a3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go new file mode 100644 index 000000000000..1f24f69f3b08 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err + } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go new file mode 100644 index 000000000000..48ef55624ec4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 000000000000..5c03dce3b774 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" + "golang.org/x/sys/unix" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := unix.SockFprog{ + Len: uint16(len(f)), + Filter: (*unix.SockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[unix.SizeofSockFprog]byte)(unsafe.Pointer(&prog))[:unix.SizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 000000000000..5c98642716b5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 000000000000..58256dd9d6fa --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 000000000000..ac213c73509d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,65 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 000000000000..859764f33a55 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 000000000000..482873d9a15d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + compatFreeBSD32 = true + break + } + } + } +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 000000000000..cf755c7fbaf7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,60 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" + "golang.org/x/sys/unix" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: unix.SOL_SOCKET, Name: unix.SO_ATTACH_FILTER, Len: unix.SizeofSockFprog}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_solaris.go b/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 000000000000..832fef1e2e25 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 000000000000..eeced7f3138b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,52 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 000000000000..c0921674b050 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 000000000000..b9c85b334fcc --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 000000000000..b0913d539c30 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,67 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc +) + +type inetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type ipMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type ipMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go new file mode 100644 index 000000000000..c741d5c8ea91 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go @@ -0,0 +1,33 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x20 + sysIP_RECVTTL = 0x22 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 000000000000..e05a251ba076 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 000000000000..6d65e9fcb815 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,31 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_dragonfly.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 000000000000..136e2b8f1d61 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 000000000000..4f730f19ef34 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 000000000000..4f730f19ef34 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm64.go new file mode 100644 index 000000000000..ecebf3272392 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm64.go @@ -0,0 +1,93 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [112]uint8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 000000000000..1c7fdfa13af9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 000000000000..a04e785187af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 000000000000..1c7fdfa13af9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 000000000000..a04e785187af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 000000000000..1c7fdfa13af9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 000000000000..a04e785187af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 000000000000..a04e785187af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 000000000000..1c7fdfa13af9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 000000000000..3c5ea54731ed --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,130 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 000000000000..a04e785187af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 000000000000..a04e785187af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go new file mode 100644 index 000000000000..e626134a8b50 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go @@ -0,0 +1,134 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 000000000000..a04e785187af --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,132 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 000000000000..8cfc648ad7e8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 000000000000..37629cb0ab5f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 000000000000..cb80a308b0e6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,100 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_solaris.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 + + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 000000000000..2ccb9849c785 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,116 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 000000000000..2da644413b4a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 000000000000..9fd9eb15e3bc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 000000000000..8c221b598957 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) + } + return m.Next(4) +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 000000000000..1d773cbcc8e6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 000000000000..0971a008bf4f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 000000000000..8882d81934dd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go new file mode 100644 index 000000000000..1f422e71dc35 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -0,0 +1,301 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errNotImplemented + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errNotImplemented + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errNotImplemented + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errNotImplemented + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, errInvalidConn + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errNotImplemented + } + offset, err = so.GetInt(c.Conn) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return errNotImplemented + } + if !on { + offset = -1 + } + return so.SetInt(c.Conn, offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errNotImplemented + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errNotImplemented + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errNotImplemented + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 000000000000..e0be9d50d70d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,243 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 8200. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPConn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on JS, NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 000000000000..f534a0bf38d3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,127 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errNotImplemented + } + _, mtu, err := so.getMTUInfo(c.Conn) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return errInvalidConn + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + return &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go new file mode 100644 index 000000000000..0326aed6def8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -0,0 +1,56 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, errInvalidConn + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errNotImplemented + } + return so.GetInt(c.Conn) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return errInvalidConn + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errNotImplemented + } + return so.SetInt(c.Conn, hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 000000000000..e05cb08b21ce --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 000000000000..c2d508f9c303 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "errors" + "net" + "runtime" +) + +var ( + errInvalidConn = errors.New("invalid connection") + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errNotImplemented = errors.New("not implemented on " + runtime.GOOS + "/" + runtime.GOARCH) +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 000000000000..32db1aa94964 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,86 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message + ICMPTypeExtendedEchoRequest ICMPType = 160 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 161 // Extended Echo Reply +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", + 160: "extended echo request", + 161: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 000000000000..b7f48e27b837 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + icmpv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 000000000000..b03025cdccfc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 000000000000..647f6b44fff1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 000000000000..7c23bb1cf6fd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 000000000000..370e51acd1fb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +type icmpv6Filter struct { +} + +func (f *icmpv6Filter) accept(typ ICMPType) { +} + +func (f *icmpv6Filter) block(typ ICMPType) { +} + +func (f *icmpv6Filter) setAll(block bool) { +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 000000000000..443cd0736762 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 000000000000..a8197f16958a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 000000000000..284a04278ed0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,70 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err = c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 000000000000..c5a4c967527d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package ipv6 + +import "net" + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, errInvalidConn + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, errInvalidConn + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 000000000000..cc3907df385c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 000000000000..824c623ccefd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,89 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "runtime" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errNotImplemented + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errNotImplemented + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 || runtime.GOOS == "aix" { + // AIX kernel might return a wrong address. + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errNotImplemented + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 000000000000..0a87a93bbd4d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errNotImplemented +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errNotImplemented +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errNotImplemented +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + return nil, 0, errNotImplemented +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_aix.go b/vendor/golang.org/x/net/ipv6/sys_aix.go new file mode 100644 index 000000000000..bce7091fb0b3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_aix.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 000000000000..8c3934c3eecb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 000000000000..87ae4818144f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 000000000000..90ef4dfaf434 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" + "golang.org/x/sys/unix" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := unix.SockFprog{ + Len: uint16(len(f)), + Filter: (*unix.SockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[unix.SizeofSockFprog]byte)(unsafe.Pointer(&prog))[:unix.SizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 000000000000..eb9f8316237a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 000000000000..e416eaa1fe4a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 000000000000..12cc5cb2c1e2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,78 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 000000000000..85a9f5d07de4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + compatFreeBSD32 = true + break + } + } + } +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 000000000000..96e8093a307d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" + "golang.org/x/sys/unix" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: unix.SOL_SOCKET, Name: unix.SO_ATTACH_FILTER, Len: unix.SizeofSockFprog}}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_solaris.go b/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 000000000000..d348b5f6e45a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 000000000000..9b52e978cbd9 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var compatFreeBSD32 bool // 386 emulation on amd64 + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if compatFreeBSD32 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 000000000000..d5bc1108c5f2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errNotImplemented +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 000000000000..4f252d09f6cb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 000000000000..fc36b018bd2f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sizeofSockaddrInet6 = 0x1c + + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 +) + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go new file mode 100644 index 000000000000..bf44e338bd80 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go @@ -0,0 +1,103 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_aix.go + +// Added for go1.11 compatibility +// +build aix + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysICMP6_FILTER = 0x26 + + sysIPV6_CHECKSUM = 0x27 + sysIPV6_V6ONLY = 0x25 + + sysIPV6_RTHDRDSTOPTS = 0x37 + + sysIPV6_RECVPKTINFO = 0x23 + sysIPV6_RECVHOPLIMIT = 0x29 + sysIPV6_RECVRTHDR = 0x33 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_RECVDSTOPTS = 0x38 + + sysIPV6_USE_MIN_MTU = 0x2c + sysIPV6_RECVPATHMTU = 0x2f + sysIPV6_PATHMTU = 0x2e + + sysIPV6_PKTINFO = 0x21 + sysIPV6_HOPLIMIT = 0x28 + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x34 + sysIPV6_DSTOPTS = 0x36 + sysIPV6_RTHDR = 0x32 + + sysIPV6_RECVTCLASS = 0x2a + + sysIPV6_TCLASS = 0x2b + sysIPV6_DONTFRAG = 0x2d + + sizeofSockaddrStorage = 0x508 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x510 + sizeofGroupSourceReq = 0xa18 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + X__ss_len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [1265]uint8 + Pad_cgo_0 [7]byte +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 000000000000..555744afd71a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 000000000000..cf3cc1024acb --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,88 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_dragonfly.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 000000000000..73f31b260ea1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 000000000000..490ce7cf1044 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 000000000000..490ce7cf1044 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm64.go new file mode 100644 index 000000000000..47e99ac9d304 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm64.go @@ -0,0 +1,122 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]uint8 + X__ss_align int64 + X__ss_pad2 [112]uint8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 000000000000..bde4a8f8f5d2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 000000000000..992ac9ec5f2c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 000000000000..bde4a8f8f5d2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 000000000000..992ac9ec5f2c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 000000000000..bde4a8f8f5d2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 000000000000..992ac9ec5f2c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 000000000000..992ac9ec5f2c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 000000000000..bde4a8f8f5d2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 000000000000..66fd23612104 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,152 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 000000000000..992ac9ec5f2c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 000000000000..992ac9ec5f2c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go new file mode 100644 index 000000000000..6083ddcedcba --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go @@ -0,0 +1,156 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +// +build riscv64 + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 000000000000..992ac9ec5f2c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,154 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 000000000000..e39571e07250 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 000000000000..cc1899a630c2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 000000000000..690eef9341ac --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,131 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_solaris.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 000000000000..200617ea8647 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,181 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// https://publicsuffix.org/ +// +// A public suffix is one under which Internet users can directly register +// names. It is related to, but different from, a TLD (top level domain). +// +// "com" is a TLD (top level domain). Top level means it has no dots. +// +// "com" is also a public suffix. Amazon and Google have registered different +// siblings under that domain: "amazon.com" and "google.com". +// +// "au" is another TLD, again because it has no dots. But it's not "amazon.au". +// Instead, it's "amazon.com.au". +// +// "com.au" isn't an actual TLD, because it's not at the top level (it has +// dots). But it is an eTLD (effective TLD), because that's the branching point +// for domain name registrars. +// +// Another name for "an eTLD" is "a public suffix". Often, what's more of +// interest is the eTLD+1, or one more label than the public suffix. For +// example, browsers partition read/write access to HTTP cookies according to +// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from +// "google.com.au", but web pages served from "maps.google.com" can share +// cookies from "www.google.com", so you don't have to sign into Google Maps +// separately from signing into Google Web Search. Note that all four of those +// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, +// the last two are not (but share the same eTLD+1: "google.com"). +// +// All of these domains have the same eTLD+1: +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". +// +// There is no closed form algorithm to calculate the eTLD of a domain. +// Instead, the calculation is data driven. This package provides a +// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at +// https://publicsuffix.org/ +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is either a +// privately managed domain (and in practice, not a top level domain) or an +// unmanaged top level domain (and not explicitly mentioned in the +// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN +// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and +// "cromulent" is an unmanaged top level domain. +// +// Use cases for distinguishing ICANN domains like "foo.com" from private +// domains like "foo.appspot.com" can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, icannNode, wildcard := domain, len(domain), false, false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + icann = icannNode + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) + icannNode = u&(1<>= nodesBitsICANN + u = children[u&(1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1<= len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = c + nDst++ + nSrc = i + 1 + continue + } + + decode := &m.charmap.decode[c] + n := int(decode.len) + if nDst+n > len(dst) { + err = transform.ErrShortDst + break + } + // It's 15% faster to avoid calling copy for these tiny slices. + for j := 0; j < n; j++ { + dst[nDst] = decode.data[j] + nDst++ + } + nSrc = i + 1 + } + return nDst, nSrc, err +} + +// DecodeByte returns the Charmap's rune decoding of the byte b. +func (m *Charmap) DecodeByte(b byte) rune { + switch x := &m.decode[b]; x.len { + case 1: + return rune(x.data[0]) + case 2: + return rune(x.data[0]&0x1f)<<6 | rune(x.data[1]&0x3f) + default: + return rune(x.data[0]&0x0f)<<12 | rune(x.data[1]&0x3f)<<6 | rune(x.data[2]&0x3f) + } +} + +// charmapEncoder implements transform.Transformer by encoding from UTF-8. +type charmapEncoder struct { + transform.NopResetter + charmap *Charmap +} + +func (m charmapEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + r, size := rune(0), 0 +loop: + for nSrc < len(src) { + if nDst >= len(dst) { + err = transform.ErrShortDst + break + } + r = rune(src[nSrc]) + + // Decode a 1-byte rune. + if r < utf8.RuneSelf { + if m.charmap.asciiSuperset { + nSrc++ + dst[nDst] = uint8(r) + nDst++ + continue + } + size = 1 + + } else { + // Decode a multi-byte rune. + r, size = utf8.DecodeRune(src[nSrc:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + } else { + err = internal.RepertoireError(m.charmap.replacement) + } + break + } + } + + // Binary search in [low, high) for that rune in the m.charmap.encode table. + for low, high := int(m.charmap.low), 0x100; ; { + if low >= high { + err = internal.RepertoireError(m.charmap.replacement) + break loop + } + mid := (low + high) / 2 + got := m.charmap.encode[mid] + gotRune := rune(got & (1<<24 - 1)) + if gotRune < r { + low = mid + 1 + } else if gotRune > r { + high = mid + } else { + dst[nDst] = byte(got >> 24) + nDst++ + break + } + } + nSrc += size + } + return nDst, nSrc, err +} + +// EncodeRune returns the Charmap's byte encoding of the rune r. ok is whether +// r is in the Charmap's repertoire. If not, b is set to the Charmap's +// replacement byte. This is often the ASCII substitute character '\x1a'. +func (m *Charmap) EncodeRune(r rune) (b byte, ok bool) { + if r < utf8.RuneSelf && m.asciiSuperset { + return byte(r), true + } + for low, high := int(m.low), 0x100; ; { + if low >= high { + return m.replacement, false + } + mid := (low + high) / 2 + got := m.encode[mid] + gotRune := rune(got & (1<<24 - 1)) + if gotRune < r { + low = mid + 1 + } else if gotRune > r { + high = mid + } else { + return byte(got >> 24), true + } + } +} diff --git a/vendor/golang.org/x/text/encoding/charmap/tables.go b/vendor/golang.org/x/text/encoding/charmap/tables.go new file mode 100644 index 000000000000..cf7281e9e36e --- /dev/null +++ b/vendor/golang.org/x/text/encoding/charmap/tables.go @@ -0,0 +1,7410 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package charmap + +import ( + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal/identifier" +) + +// CodePage037 is the IBM Code Page 037 encoding. +var CodePage037 *Charmap = &codePage037 + +var codePage037 = Charmap{ + name: "IBM Code Page 037", + mib: identifier.IBM037, + asciiSuperset: false, + low: 0x00, + replacement: 0x3f, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x97, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {1, [3]byte{0x0a, 0x00, 0x00}}, + {1, [3]byte{0x17, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {1, [3]byte{0x04, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {1, [3]byte{0x1a, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa7, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {1, [3]byte{0x2e, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x28, 0x00, 0x00}}, + {1, [3]byte{0x2b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {1, [3]byte{0x21, 0x00, 0x00}}, {1, [3]byte{0x24, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x3b, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xac, 0x00}}, + {1, [3]byte{0x2d, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {1, [3]byte{0x2c, 0x00, 0x00}}, + {1, [3]byte{0x25, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {1, [3]byte{0x60, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x3d, 0x00, 0x00}}, {1, [3]byte{0x22, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {1, [3]byte{0x6a, 0x00, 0x00}}, + {1, [3]byte{0x6b, 0x00, 0x00}}, {1, [3]byte{0x6c, 0x00, 0x00}}, + {1, [3]byte{0x6d, 0x00, 0x00}}, {1, [3]byte{0x6e, 0x00, 0x00}}, + {1, [3]byte{0x6f, 0x00, 0x00}}, {1, [3]byte{0x70, 0x00, 0x00}}, + {1, [3]byte{0x71, 0x00, 0x00}}, {1, [3]byte{0x72, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, + {1, [3]byte{0x73, 0x00, 0x00}}, {1, [3]byte{0x74, 0x00, 0x00}}, + {1, [3]byte{0x75, 0x00, 0x00}}, {1, [3]byte{0x76, 0x00, 0x00}}, + {1, [3]byte{0x77, 0x00, 0x00}}, {1, [3]byte{0x78, 0x00, 0x00}}, + {1, [3]byte{0x79, 0x00, 0x00}}, {1, [3]byte{0x7a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xbc, 0x00}}, + {2, [3]byte{0xc2, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {1, [3]byte{0x5b, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x4a, 0x00, 0x00}}, + {1, [3]byte{0x4b, 0x00, 0x00}}, {1, [3]byte{0x4c, 0x00, 0x00}}, + {1, [3]byte{0x4d, 0x00, 0x00}}, {1, [3]byte{0x4e, 0x00, 0x00}}, + {1, [3]byte{0x4f, 0x00, 0x00}}, {1, [3]byte{0x50, 0x00, 0x00}}, + {1, [3]byte{0x51, 0x00, 0x00}}, {1, [3]byte{0x52, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb9, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {1, [3]byte{0x53, 0x00, 0x00}}, {1, [3]byte{0x54, 0x00, 0x00}}, + {1, [3]byte{0x55, 0x00, 0x00}}, {1, [3]byte{0x56, 0x00, 0x00}}, + {1, [3]byte{0x57, 0x00, 0x00}}, {1, [3]byte{0x58, 0x00, 0x00}}, + {1, [3]byte{0x59, 0x00, 0x00}}, {1, [3]byte{0x5a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x37000004, 0x2d000005, 0x2e000006, 0x2f000007, + 0x16000008, 0x05000009, 0x2500000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x3c000014, 0x3d000015, 0x32000016, 0x26000017, + 0x18000018, 0x19000019, 0x3f00001a, 0x2700001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x40000020, 0x5a000021, 0x7f000022, 0x7b000023, 0x5b000024, 0x6c000025, 0x50000026, 0x7d000027, + 0x4d000028, 0x5d000029, 0x5c00002a, 0x4e00002b, 0x6b00002c, 0x6000002d, 0x4b00002e, 0x6100002f, + 0xf0000030, 0xf1000031, 0xf2000032, 0xf3000033, 0xf4000034, 0xf5000035, 0xf6000036, 0xf7000037, + 0xf8000038, 0xf9000039, 0x7a00003a, 0x5e00003b, 0x4c00003c, 0x7e00003d, 0x6e00003e, 0x6f00003f, + 0x7c000040, 0xc1000041, 0xc2000042, 0xc3000043, 0xc4000044, 0xc5000045, 0xc6000046, 0xc7000047, + 0xc8000048, 0xc9000049, 0xd100004a, 0xd200004b, 0xd300004c, 0xd400004d, 0xd500004e, 0xd600004f, + 0xd7000050, 0xd8000051, 0xd9000052, 0xe2000053, 0xe3000054, 0xe4000055, 0xe5000056, 0xe6000057, + 0xe7000058, 0xe8000059, 0xe900005a, 0xba00005b, 0xe000005c, 0xbb00005d, 0xb000005e, 0x6d00005f, + 0x79000060, 0x81000061, 0x82000062, 0x83000063, 0x84000064, 0x85000065, 0x86000066, 0x87000067, + 0x88000068, 0x89000069, 0x9100006a, 0x9200006b, 0x9300006c, 0x9400006d, 0x9500006e, 0x9600006f, + 0x97000070, 0x98000071, 0x99000072, 0xa2000073, 0xa3000074, 0xa4000075, 0xa5000076, 0xa6000077, + 0xa7000078, 0xa8000079, 0xa900007a, 0xc000007b, 0x4f00007c, 0xd000007d, 0xa100007e, 0x0700007f, + 0x20000080, 0x21000081, 0x22000082, 0x23000083, 0x24000084, 0x15000085, 0x06000086, 0x17000087, + 0x28000088, 0x29000089, 0x2a00008a, 0x2b00008b, 0x2c00008c, 0x0900008d, 0x0a00008e, 0x1b00008f, + 0x30000090, 0x31000091, 0x1a000092, 0x33000093, 0x34000094, 0x35000095, 0x36000096, 0x08000097, + 0x38000098, 0x39000099, 0x3a00009a, 0x3b00009b, 0x0400009c, 0x1400009d, 0x3e00009e, 0xff00009f, + 0x410000a0, 0xaa0000a1, 0x4a0000a2, 0xb10000a3, 0x9f0000a4, 0xb20000a5, 0x6a0000a6, 0xb50000a7, + 0xbd0000a8, 0xb40000a9, 0x9a0000aa, 0x8a0000ab, 0x5f0000ac, 0xca0000ad, 0xaf0000ae, 0xbc0000af, + 0x900000b0, 0x8f0000b1, 0xea0000b2, 0xfa0000b3, 0xbe0000b4, 0xa00000b5, 0xb60000b6, 0xb30000b7, + 0x9d0000b8, 0xda0000b9, 0x9b0000ba, 0x8b0000bb, 0xb70000bc, 0xb80000bd, 0xb90000be, 0xab0000bf, + 0x640000c0, 0x650000c1, 0x620000c2, 0x660000c3, 0x630000c4, 0x670000c5, 0x9e0000c6, 0x680000c7, + 0x740000c8, 0x710000c9, 0x720000ca, 0x730000cb, 0x780000cc, 0x750000cd, 0x760000ce, 0x770000cf, + 0xac0000d0, 0x690000d1, 0xed0000d2, 0xee0000d3, 0xeb0000d4, 0xef0000d5, 0xec0000d6, 0xbf0000d7, + 0x800000d8, 0xfd0000d9, 0xfe0000da, 0xfb0000db, 0xfc0000dc, 0xad0000dd, 0xae0000de, 0x590000df, + 0x440000e0, 0x450000e1, 0x420000e2, 0x460000e3, 0x430000e4, 0x470000e5, 0x9c0000e6, 0x480000e7, + 0x540000e8, 0x510000e9, 0x520000ea, 0x530000eb, 0x580000ec, 0x550000ed, 0x560000ee, 0x570000ef, + 0x8c0000f0, 0x490000f1, 0xcd0000f2, 0xce0000f3, 0xcb0000f4, 0xcf0000f5, 0xcc0000f6, 0xe10000f7, + 0x700000f8, 0xdd0000f9, 0xde0000fa, 0xdb0000fb, 0xdc0000fc, 0x8d0000fd, 0x8e0000fe, 0xdf0000ff, + }, +} + +// CodePage437 is the IBM Code Page 437 encoding. +var CodePage437 *Charmap = &codePage437 + +var codePage437 = Charmap{ + name: "IBM Code Page 437", + mib: identifier.PC8CodePage437, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9b0000a2, 0x9c0000a3, 0x9d0000a5, 0xa60000aa, 0xae0000ab, 0xaa0000ac, + 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xaf0000bb, 0xac0000bc, + 0xab0000bd, 0xa80000bf, 0x8e0000c4, 0x8f0000c5, 0x920000c6, 0x800000c7, 0x900000c9, 0xa50000d1, + 0x990000d6, 0x9a0000dc, 0xe10000df, 0x850000e0, 0xa00000e1, 0x830000e2, 0x840000e4, 0x860000e5, + 0x910000e6, 0x870000e7, 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8d0000ec, 0xa10000ed, + 0x8c0000ee, 0x8b0000ef, 0xa40000f1, 0x950000f2, 0xa20000f3, 0x930000f4, 0x940000f6, 0xf60000f7, + 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0x980000ff, 0x9f000192, 0xe2000393, 0xe9000398, + 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, + 0xe70003c4, 0xed0003c6, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage850 is the IBM Code Page 850 encoding. +var CodePage850 *Charmap = &codePage850 + +var codePage850 = Charmap{ + name: "IBM Code Page 850", + mib: identifier.PC850Multilingual, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x98, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x80, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0x8e, 0x00}}, + {2, [3]byte{0xc3, 0x8f, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {2, [3]byte{0xc2, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0xbe, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9a, 0x00}}, + {2, [3]byte{0xc3, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x97}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0xbd0000a2, 0x9c0000a3, 0xcf0000a4, 0xbe0000a5, 0xdd0000a6, 0xf50000a7, + 0xf90000a8, 0xb80000a9, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf00000ad, 0xa90000ae, 0xee0000af, + 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xfc0000b3, 0xef0000b4, 0xe60000b5, 0xf40000b6, 0xfa0000b7, + 0xf70000b8, 0xfb0000b9, 0xa70000ba, 0xaf0000bb, 0xac0000bc, 0xab0000bd, 0xf30000be, 0xa80000bf, + 0xb70000c0, 0xb50000c1, 0xb60000c2, 0xc70000c3, 0x8e0000c4, 0x8f0000c5, 0x920000c6, 0x800000c7, + 0xd40000c8, 0x900000c9, 0xd20000ca, 0xd30000cb, 0xde0000cc, 0xd60000cd, 0xd70000ce, 0xd80000cf, + 0xd10000d0, 0xa50000d1, 0xe30000d2, 0xe00000d3, 0xe20000d4, 0xe50000d5, 0x990000d6, 0x9e0000d7, + 0x9d0000d8, 0xeb0000d9, 0xe90000da, 0xea0000db, 0x9a0000dc, 0xed0000dd, 0xe80000de, 0xe10000df, + 0x850000e0, 0xa00000e1, 0x830000e2, 0xc60000e3, 0x840000e4, 0x860000e5, 0x910000e6, 0x870000e7, + 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8d0000ec, 0xa10000ed, 0x8c0000ee, 0x8b0000ef, + 0xd00000f0, 0xa40000f1, 0x950000f2, 0xa20000f3, 0x930000f4, 0xe40000f5, 0x940000f6, 0xf60000f7, + 0x9b0000f8, 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0xec0000fd, 0xe70000fe, 0x980000ff, + 0xd5000131, 0x9f000192, 0xf2002017, 0xc4002500, 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, + 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, + 0xc9002554, 0xbb002557, 0xc800255a, 0xbc00255d, 0xcc002560, 0xb9002563, 0xcb002566, 0xca002569, + 0xce00256c, 0xdf002580, 0xdc002584, 0xdb002588, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage852 is the IBM Code Page 852 encoding. +var CodePage852 *Charmap = &codePage852 + +var codePage852 = Charmap{ + name: "IBM Code Page 852", + mib: identifier.PCp852, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc5, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc5, 0x82, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc5, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc5, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0x86, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc4, 0xb9, 0x00}}, + {2, [3]byte{0xc4, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc4, 0xbd, 0x00}}, + {2, [3]byte{0xc4, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0x9a, 0x00}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xa4, 0x00}}, + {2, [3]byte{0xc5, 0xa5, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc4, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc4, 0x84, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc5, 0xbe, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc4, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc5, 0xba, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc5, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x9a, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xbc, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {2, [3]byte{0xc4, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc4, 0x90, 0x00}}, + {2, [3]byte{0xc4, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x8f, 0x00}}, {2, [3]byte{0xc5, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0x8e, 0x00}}, + {2, [3]byte{0xc4, 0x9b, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {2, [3]byte{0xc5, 0xa2, 0x00}}, + {2, [3]byte{0xc5, 0xae, 0x00}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x84, 0x00}}, {2, [3]byte{0xc5, 0x88, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc5, 0xa1, 0x00}}, + {2, [3]byte{0xc5, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x9a, 0x00}}, + {2, [3]byte{0xc5, 0x95, 0x00}}, {2, [3]byte{0xc5, 0xb0, 0x00}}, + {2, [3]byte{0xc3, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xa3, 0x00}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xcb, 0x9d, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + {2, [3]byte{0xcb, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xcb, 0x99, 0x00}}, {2, [3]byte{0xc5, 0xb1, 0x00}}, + {2, [3]byte{0xc5, 0x98, 0x00}}, {2, [3]byte{0xc5, 0x99, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xcf0000a4, 0xf50000a7, 0xf90000a8, 0xae0000ab, 0xaa0000ac, 0xf00000ad, 0xf80000b0, + 0xef0000b4, 0xf70000b8, 0xaf0000bb, 0xb50000c1, 0xb60000c2, 0x8e0000c4, 0x800000c7, 0x900000c9, + 0xd30000cb, 0xd60000cd, 0xd70000ce, 0xe00000d3, 0xe20000d4, 0x990000d6, 0x9e0000d7, 0xe90000da, + 0x9a0000dc, 0xed0000dd, 0xe10000df, 0xa00000e1, 0x830000e2, 0x840000e4, 0x870000e7, 0x820000e9, + 0x890000eb, 0xa10000ed, 0x8c0000ee, 0xa20000f3, 0x930000f4, 0x940000f6, 0xf60000f7, 0xa30000fa, + 0x810000fc, 0xec0000fd, 0xc6000102, 0xc7000103, 0xa4000104, 0xa5000105, 0x8f000106, 0x86000107, + 0xac00010c, 0x9f00010d, 0xd200010e, 0xd400010f, 0xd1000110, 0xd0000111, 0xa8000118, 0xa9000119, + 0xb700011a, 0xd800011b, 0x91000139, 0x9200013a, 0x9500013d, 0x9600013e, 0x9d000141, 0x88000142, + 0xe3000143, 0xe4000144, 0xd5000147, 0xe5000148, 0x8a000150, 0x8b000151, 0xe8000154, 0xea000155, + 0xfc000158, 0xfd000159, 0x9700015a, 0x9800015b, 0xb800015e, 0xad00015f, 0xe6000160, 0xe7000161, + 0xdd000162, 0xee000163, 0x9b000164, 0x9c000165, 0xde00016e, 0x8500016f, 0xeb000170, 0xfb000171, + 0x8d000179, 0xab00017a, 0xbd00017b, 0xbe00017c, 0xa600017d, 0xa700017e, 0xf30002c7, 0xf40002d8, + 0xfa0002d9, 0xf20002db, 0xf10002dd, 0xc4002500, 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, + 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, + 0xc9002554, 0xbb002557, 0xc800255a, 0xbc00255d, 0xcc002560, 0xb9002563, 0xcb002566, 0xca002569, + 0xce00256c, 0xdf002580, 0xdc002584, 0xdb002588, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage855 is the IBM Code Page 855 encoding. +var CodePage855 *Charmap = &codePage855 + +var codePage855 = Charmap{ + name: "IBM Code Page 855", + mib: identifier.IBM855, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd1, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x82, 0x00}}, + {2, [3]byte{0xd1, 0x93, 0x00}}, {2, [3]byte{0xd0, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x91, 0x00}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x84, 0x00}}, + {2, [3]byte{0xd1, 0x95, 0x00}}, {2, [3]byte{0xd0, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x86, 0x00}}, + {2, [3]byte{0xd1, 0x97, 0x00}}, {2, [3]byte{0xd0, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x88, 0x00}}, + {2, [3]byte{0xd1, 0x99, 0x00}}, {2, [3]byte{0xd0, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x8a, 0x00}}, + {2, [3]byte{0xd1, 0x9b, 0x00}}, {2, [3]byte{0xd0, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x8c, 0x00}}, + {2, [3]byte{0xd1, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x8e, 0x00}}, + {2, [3]byte{0xd1, 0x9f, 0x00}}, {2, [3]byte{0xd0, 0x8f, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd0, 0xae, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd0, 0xaa, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0x90, 0x00}}, + {2, [3]byte{0xd0, 0xb1, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd0, 0xa6, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0x94, 0x00}}, + {2, [3]byte{0xd0, 0xb5, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd0, 0xa4, 0x00}}, + {2, [3]byte{0xd0, 0xb3, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd0, 0xa5, 0x00}}, {2, [3]byte{0xd0, 0xb8, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0x99, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0x9a, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xd0, 0xbb, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0x9c, 0x00}}, + {2, [3]byte{0xd0, 0xbd, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0x9e, 0x00}}, + {2, [3]byte{0xd0, 0xbf, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd1, 0x8f, 0x00}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xd0, 0xaf, 0x00}}, {2, [3]byte{0xd1, 0x80, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd0, 0xa1, 0x00}}, {2, [3]byte{0xd1, 0x82, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd0, 0xa3, 0x00}}, {2, [3]byte{0xd0, 0xb6, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0xb2, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd1, 0x8c, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {3, [3]byte{0xe2, 0x84, 0x96}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd0, 0xab, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0x97, 0x00}}, {2, [3]byte{0xd1, 0x88, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd0, 0xad, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd0, 0xa9, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd0, 0xa7, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xcf0000a4, 0xfd0000a7, 0xae0000ab, 0xf00000ad, 0xaf0000bb, 0x85000401, 0x81000402, + 0x83000403, 0x87000404, 0x89000405, 0x8b000406, 0x8d000407, 0x8f000408, 0x91000409, 0x9300040a, + 0x9500040b, 0x9700040c, 0x9900040e, 0x9b00040f, 0xa1000410, 0xa3000411, 0xec000412, 0xad000413, + 0xa7000414, 0xa9000415, 0xea000416, 0xf4000417, 0xb8000418, 0xbe000419, 0xc700041a, 0xd100041b, + 0xd300041c, 0xd500041d, 0xd700041e, 0xdd00041f, 0xe2000420, 0xe4000421, 0xe6000422, 0xe8000423, + 0xab000424, 0xb6000425, 0xa5000426, 0xfc000427, 0xf6000428, 0xfa000429, 0x9f00042a, 0xf200042b, + 0xee00042c, 0xf800042d, 0x9d00042e, 0xe000042f, 0xa0000430, 0xa2000431, 0xeb000432, 0xac000433, + 0xa6000434, 0xa8000435, 0xe9000436, 0xf3000437, 0xb7000438, 0xbd000439, 0xc600043a, 0xd000043b, + 0xd200043c, 0xd400043d, 0xd600043e, 0xd800043f, 0xe1000440, 0xe3000441, 0xe5000442, 0xe7000443, + 0xaa000444, 0xb5000445, 0xa4000446, 0xfb000447, 0xf5000448, 0xf9000449, 0x9e00044a, 0xf100044b, + 0xed00044c, 0xf700044d, 0x9c00044e, 0xde00044f, 0x84000451, 0x80000452, 0x82000453, 0x86000454, + 0x88000455, 0x8a000456, 0x8c000457, 0x8e000458, 0x90000459, 0x9200045a, 0x9400045b, 0x9600045c, + 0x9800045e, 0x9a00045f, 0xef002116, 0xc4002500, 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, + 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, + 0xc9002554, 0xbb002557, 0xc800255a, 0xbc00255d, 0xcc002560, 0xb9002563, 0xcb002566, 0xca002569, + 0xce00256c, 0xdf002580, 0xdc002584, 0xdb002588, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage858 is the Windows Code Page 858 encoding. +var CodePage858 *Charmap = &codePage858 + +var codePage858 = Charmap{ + name: "Windows Code Page 858", + mib: identifier.IBM00858, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x98, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x80, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {3, [3]byte{0xe2, 0x82, 0xac}}, + {2, [3]byte{0xc3, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0x8e, 0x00}}, + {2, [3]byte{0xc3, 0x8f, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {2, [3]byte{0xc2, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0xbe, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9a, 0x00}}, + {2, [3]byte{0xc3, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x97}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0xbd0000a2, 0x9c0000a3, 0xcf0000a4, 0xbe0000a5, 0xdd0000a6, 0xf50000a7, + 0xf90000a8, 0xb80000a9, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf00000ad, 0xa90000ae, 0xee0000af, + 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xfc0000b3, 0xef0000b4, 0xe60000b5, 0xf40000b6, 0xfa0000b7, + 0xf70000b8, 0xfb0000b9, 0xa70000ba, 0xaf0000bb, 0xac0000bc, 0xab0000bd, 0xf30000be, 0xa80000bf, + 0xb70000c0, 0xb50000c1, 0xb60000c2, 0xc70000c3, 0x8e0000c4, 0x8f0000c5, 0x920000c6, 0x800000c7, + 0xd40000c8, 0x900000c9, 0xd20000ca, 0xd30000cb, 0xde0000cc, 0xd60000cd, 0xd70000ce, 0xd80000cf, + 0xd10000d0, 0xa50000d1, 0xe30000d2, 0xe00000d3, 0xe20000d4, 0xe50000d5, 0x990000d6, 0x9e0000d7, + 0x9d0000d8, 0xeb0000d9, 0xe90000da, 0xea0000db, 0x9a0000dc, 0xed0000dd, 0xe80000de, 0xe10000df, + 0x850000e0, 0xa00000e1, 0x830000e2, 0xc60000e3, 0x840000e4, 0x860000e5, 0x910000e6, 0x870000e7, + 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8d0000ec, 0xa10000ed, 0x8c0000ee, 0x8b0000ef, + 0xd00000f0, 0xa40000f1, 0x950000f2, 0xa20000f3, 0x930000f4, 0xe40000f5, 0x940000f6, 0xf60000f7, + 0x9b0000f8, 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0xec0000fd, 0xe70000fe, 0x980000ff, + 0x9f000192, 0xf2002017, 0xd50020ac, 0xc4002500, 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, + 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, + 0xc9002554, 0xbb002557, 0xc800255a, 0xbc00255d, 0xcc002560, 0xb9002563, 0xcb002566, 0xca002569, + 0xce00256c, 0xdf002580, 0xdc002584, 0xdb002588, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage860 is the IBM Code Page 860 encoding. +var CodePage860 *Charmap = &codePage860 + +var codePage860 = Charmap{ + name: "IBM Code Page 860", + mib: identifier.IBM860, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0x81, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0x8a, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0x80, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9b0000a2, 0x9c0000a3, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf80000b0, + 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xaf0000bb, 0xac0000bc, 0xab0000bd, + 0xa80000bf, 0x910000c0, 0x860000c1, 0x8f0000c2, 0x8e0000c3, 0x800000c7, 0x920000c8, 0x900000c9, + 0x890000ca, 0x980000cc, 0x8b0000cd, 0xa50000d1, 0xa90000d2, 0x9f0000d3, 0x8c0000d4, 0x990000d5, + 0x9d0000d9, 0x960000da, 0x9a0000dc, 0xe10000df, 0x850000e0, 0xa00000e1, 0x830000e2, 0x840000e3, + 0x870000e7, 0x8a0000e8, 0x820000e9, 0x880000ea, 0x8d0000ec, 0xa10000ed, 0xa40000f1, 0x950000f2, + 0xa20000f3, 0x930000f4, 0x940000f5, 0xf60000f7, 0x970000f9, 0xa30000fa, 0x810000fc, 0xe2000393, + 0xe9000398, 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, + 0xe50003c3, 0xe70003c4, 0xed0003c6, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, + 0xef002229, 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage862 is the IBM Code Page 862 encoding. +var CodePage862 *Charmap = &codePage862 + +var codePage862 = Charmap{ + name: "IBM Code Page 862", + mib: identifier.PC862LatinHebrew, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd7, 0x90, 0x00}}, {2, [3]byte{0xd7, 0x91, 0x00}}, + {2, [3]byte{0xd7, 0x92, 0x00}}, {2, [3]byte{0xd7, 0x93, 0x00}}, + {2, [3]byte{0xd7, 0x94, 0x00}}, {2, [3]byte{0xd7, 0x95, 0x00}}, + {2, [3]byte{0xd7, 0x96, 0x00}}, {2, [3]byte{0xd7, 0x97, 0x00}}, + {2, [3]byte{0xd7, 0x98, 0x00}}, {2, [3]byte{0xd7, 0x99, 0x00}}, + {2, [3]byte{0xd7, 0x9a, 0x00}}, {2, [3]byte{0xd7, 0x9b, 0x00}}, + {2, [3]byte{0xd7, 0x9c, 0x00}}, {2, [3]byte{0xd7, 0x9d, 0x00}}, + {2, [3]byte{0xd7, 0x9e, 0x00}}, {2, [3]byte{0xd7, 0x9f, 0x00}}, + {2, [3]byte{0xd7, 0xa0, 0x00}}, {2, [3]byte{0xd7, 0xa1, 0x00}}, + {2, [3]byte{0xd7, 0xa2, 0x00}}, {2, [3]byte{0xd7, 0xa3, 0x00}}, + {2, [3]byte{0xd7, 0xa4, 0x00}}, {2, [3]byte{0xd7, 0xa5, 0x00}}, + {2, [3]byte{0xd7, 0xa6, 0x00}}, {2, [3]byte{0xd7, 0xa7, 0x00}}, + {2, [3]byte{0xd7, 0xa8, 0x00}}, {2, [3]byte{0xd7, 0xa9, 0x00}}, + {2, [3]byte{0xd7, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9b0000a2, 0x9c0000a3, 0x9d0000a5, 0xa60000aa, 0xae0000ab, 0xaa0000ac, + 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xaf0000bb, 0xac0000bc, + 0xab0000bd, 0xa80000bf, 0xa50000d1, 0xe10000df, 0xa00000e1, 0xa10000ed, 0xa40000f1, 0xa20000f3, + 0xf60000f7, 0xa30000fa, 0x9f000192, 0xe2000393, 0xe9000398, 0xe40003a3, 0xe80003a6, 0xea0003a9, + 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, 0xe70003c4, 0xed0003c6, 0x800005d0, + 0x810005d1, 0x820005d2, 0x830005d3, 0x840005d4, 0x850005d5, 0x860005d6, 0x870005d7, 0x880005d8, + 0x890005d9, 0x8a0005da, 0x8b0005db, 0x8c0005dc, 0x8d0005dd, 0x8e0005de, 0x8f0005df, 0x900005e0, + 0x910005e1, 0x920005e2, 0x930005e3, 0x940005e4, 0x950005e5, 0x960005e6, 0x970005e7, 0x980005e8, + 0x990005e9, 0x9a0005ea, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage863 is the IBM Code Page 863 encoding. +var CodePage863 *Charmap = &codePage863 + +var codePage863 = Charmap{ + name: "IBM Code Page 863", + mib: identifier.IBM863, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x97}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0x88, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0x8b, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9b, 0x00}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0x9b0000a2, 0x9c0000a3, 0x980000a4, 0xa00000a6, 0x8f0000a7, 0xa40000a8, 0xae0000ab, + 0xaa0000ac, 0xa70000af, 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xa60000b3, 0xa10000b4, 0xe60000b5, + 0x860000b6, 0xfa0000b7, 0xa50000b8, 0xaf0000bb, 0xac0000bc, 0xab0000bd, 0xad0000be, 0x8e0000c0, + 0x840000c2, 0x800000c7, 0x910000c8, 0x900000c9, 0x920000ca, 0x940000cb, 0xa80000ce, 0x950000cf, + 0x990000d4, 0x9d0000d9, 0x9e0000db, 0x9a0000dc, 0xe10000df, 0x850000e0, 0x830000e2, 0x870000e7, + 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8c0000ee, 0x8b0000ef, 0xa20000f3, 0x930000f4, + 0xf60000f7, 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0x9f000192, 0xe2000393, 0xe9000398, + 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, + 0xe70003c4, 0xed0003c6, 0x8d002017, 0xfc00207f, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage865 is the IBM Code Page 865 encoding. +var CodePage865 *Charmap = &codePage865 + +var codePage865 = Charmap{ + name: "IBM Code Page 865", + mib: identifier.IBM865, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x98, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9c0000a3, 0xaf0000a4, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf80000b0, + 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xac0000bc, 0xab0000bd, 0xa80000bf, + 0x8e0000c4, 0x8f0000c5, 0x920000c6, 0x800000c7, 0x900000c9, 0xa50000d1, 0x990000d6, 0x9d0000d8, + 0x9a0000dc, 0xe10000df, 0x850000e0, 0xa00000e1, 0x830000e2, 0x840000e4, 0x860000e5, 0x910000e6, + 0x870000e7, 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8d0000ec, 0xa10000ed, 0x8c0000ee, + 0x8b0000ef, 0xa40000f1, 0x950000f2, 0xa20000f3, 0x930000f4, 0x940000f6, 0xf60000f7, 0x9b0000f8, + 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0x980000ff, 0x9f000192, 0xe2000393, 0xe9000398, + 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, + 0xe70003c4, 0xed0003c6, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage866 is the IBM Code Page 866 encoding. +var CodePage866 *Charmap = &codePage866 + +var codePage866 = Charmap{ + name: "IBM Code Page 866", + mib: identifier.IBM866, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, + {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {2, [3]byte{0xd0, 0x81, 0x00}}, {2, [3]byte{0xd1, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x94, 0x00}}, + {2, [3]byte{0xd0, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x9e, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x84, 0x96}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xfd0000a4, 0xf80000b0, 0xfa0000b7, 0xf0000401, 0xf2000404, 0xf4000407, 0xf600040e, + 0x80000410, 0x81000411, 0x82000412, 0x83000413, 0x84000414, 0x85000415, 0x86000416, 0x87000417, + 0x88000418, 0x89000419, 0x8a00041a, 0x8b00041b, 0x8c00041c, 0x8d00041d, 0x8e00041e, 0x8f00041f, + 0x90000420, 0x91000421, 0x92000422, 0x93000423, 0x94000424, 0x95000425, 0x96000426, 0x97000427, + 0x98000428, 0x99000429, 0x9a00042a, 0x9b00042b, 0x9c00042c, 0x9d00042d, 0x9e00042e, 0x9f00042f, + 0xa0000430, 0xa1000431, 0xa2000432, 0xa3000433, 0xa4000434, 0xa5000435, 0xa6000436, 0xa7000437, + 0xa8000438, 0xa9000439, 0xaa00043a, 0xab00043b, 0xac00043c, 0xad00043d, 0xae00043e, 0xaf00043f, + 0xe0000440, 0xe1000441, 0xe2000442, 0xe3000443, 0xe4000444, 0xe5000445, 0xe6000446, 0xe7000447, + 0xe8000448, 0xe9000449, 0xea00044a, 0xeb00044b, 0xec00044c, 0xed00044d, 0xee00044e, 0xef00044f, + 0xf1000451, 0xf3000454, 0xf5000457, 0xf700045e, 0xfc002116, 0xf9002219, 0xfb00221a, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage1047 is the IBM Code Page 1047 encoding. +var CodePage1047 *Charmap = &codePage1047 + +var codePage1047 = Charmap{ + name: "IBM Code Page 1047", + mib: identifier.IBM1047, + asciiSuperset: false, + low: 0x00, + replacement: 0x3f, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x97, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {1, [3]byte{0x0a, 0x00, 0x00}}, + {1, [3]byte{0x17, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {1, [3]byte{0x04, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {1, [3]byte{0x1a, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa7, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {1, [3]byte{0x2e, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x28, 0x00, 0x00}}, + {1, [3]byte{0x2b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {1, [3]byte{0x21, 0x00, 0x00}}, {1, [3]byte{0x24, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x3b, 0x00, 0x00}}, {1, [3]byte{0x5e, 0x00, 0x00}}, + {1, [3]byte{0x2d, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {1, [3]byte{0x2c, 0x00, 0x00}}, + {1, [3]byte{0x25, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {1, [3]byte{0x60, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x3d, 0x00, 0x00}}, {1, [3]byte{0x22, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {1, [3]byte{0x6a, 0x00, 0x00}}, + {1, [3]byte{0x6b, 0x00, 0x00}}, {1, [3]byte{0x6c, 0x00, 0x00}}, + {1, [3]byte{0x6d, 0x00, 0x00}}, {1, [3]byte{0x6e, 0x00, 0x00}}, + {1, [3]byte{0x6f, 0x00, 0x00}}, {1, [3]byte{0x70, 0x00, 0x00}}, + {1, [3]byte{0x71, 0x00, 0x00}}, {1, [3]byte{0x72, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, + {1, [3]byte{0x73, 0x00, 0x00}}, {1, [3]byte{0x74, 0x00, 0x00}}, + {1, [3]byte{0x75, 0x00, 0x00}}, {1, [3]byte{0x76, 0x00, 0x00}}, + {1, [3]byte{0x77, 0x00, 0x00}}, {1, [3]byte{0x78, 0x00, 0x00}}, + {1, [3]byte{0x79, 0x00, 0x00}}, {1, [3]byte{0x7a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xbc, 0x00}}, + {2, [3]byte{0xc2, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc3, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x4a, 0x00, 0x00}}, + {1, [3]byte{0x4b, 0x00, 0x00}}, {1, [3]byte{0x4c, 0x00, 0x00}}, + {1, [3]byte{0x4d, 0x00, 0x00}}, {1, [3]byte{0x4e, 0x00, 0x00}}, + {1, [3]byte{0x4f, 0x00, 0x00}}, {1, [3]byte{0x50, 0x00, 0x00}}, + {1, [3]byte{0x51, 0x00, 0x00}}, {1, [3]byte{0x52, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb9, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {1, [3]byte{0x53, 0x00, 0x00}}, {1, [3]byte{0x54, 0x00, 0x00}}, + {1, [3]byte{0x55, 0x00, 0x00}}, {1, [3]byte{0x56, 0x00, 0x00}}, + {1, [3]byte{0x57, 0x00, 0x00}}, {1, [3]byte{0x58, 0x00, 0x00}}, + {1, [3]byte{0x59, 0x00, 0x00}}, {1, [3]byte{0x5a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x37000004, 0x2d000005, 0x2e000006, 0x2f000007, + 0x16000008, 0x05000009, 0x2500000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x3c000014, 0x3d000015, 0x32000016, 0x26000017, + 0x18000018, 0x19000019, 0x3f00001a, 0x2700001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x40000020, 0x5a000021, 0x7f000022, 0x7b000023, 0x5b000024, 0x6c000025, 0x50000026, 0x7d000027, + 0x4d000028, 0x5d000029, 0x5c00002a, 0x4e00002b, 0x6b00002c, 0x6000002d, 0x4b00002e, 0x6100002f, + 0xf0000030, 0xf1000031, 0xf2000032, 0xf3000033, 0xf4000034, 0xf5000035, 0xf6000036, 0xf7000037, + 0xf8000038, 0xf9000039, 0x7a00003a, 0x5e00003b, 0x4c00003c, 0x7e00003d, 0x6e00003e, 0x6f00003f, + 0x7c000040, 0xc1000041, 0xc2000042, 0xc3000043, 0xc4000044, 0xc5000045, 0xc6000046, 0xc7000047, + 0xc8000048, 0xc9000049, 0xd100004a, 0xd200004b, 0xd300004c, 0xd400004d, 0xd500004e, 0xd600004f, + 0xd7000050, 0xd8000051, 0xd9000052, 0xe2000053, 0xe3000054, 0xe4000055, 0xe5000056, 0xe6000057, + 0xe7000058, 0xe8000059, 0xe900005a, 0xad00005b, 0xe000005c, 0xbd00005d, 0x5f00005e, 0x6d00005f, + 0x79000060, 0x81000061, 0x82000062, 0x83000063, 0x84000064, 0x85000065, 0x86000066, 0x87000067, + 0x88000068, 0x89000069, 0x9100006a, 0x9200006b, 0x9300006c, 0x9400006d, 0x9500006e, 0x9600006f, + 0x97000070, 0x98000071, 0x99000072, 0xa2000073, 0xa3000074, 0xa4000075, 0xa5000076, 0xa6000077, + 0xa7000078, 0xa8000079, 0xa900007a, 0xc000007b, 0x4f00007c, 0xd000007d, 0xa100007e, 0x0700007f, + 0x20000080, 0x21000081, 0x22000082, 0x23000083, 0x24000084, 0x15000085, 0x06000086, 0x17000087, + 0x28000088, 0x29000089, 0x2a00008a, 0x2b00008b, 0x2c00008c, 0x0900008d, 0x0a00008e, 0x1b00008f, + 0x30000090, 0x31000091, 0x1a000092, 0x33000093, 0x34000094, 0x35000095, 0x36000096, 0x08000097, + 0x38000098, 0x39000099, 0x3a00009a, 0x3b00009b, 0x0400009c, 0x1400009d, 0x3e00009e, 0xff00009f, + 0x410000a0, 0xaa0000a1, 0x4a0000a2, 0xb10000a3, 0x9f0000a4, 0xb20000a5, 0x6a0000a6, 0xb50000a7, + 0xbb0000a8, 0xb40000a9, 0x9a0000aa, 0x8a0000ab, 0xb00000ac, 0xca0000ad, 0xaf0000ae, 0xbc0000af, + 0x900000b0, 0x8f0000b1, 0xea0000b2, 0xfa0000b3, 0xbe0000b4, 0xa00000b5, 0xb60000b6, 0xb30000b7, + 0x9d0000b8, 0xda0000b9, 0x9b0000ba, 0x8b0000bb, 0xb70000bc, 0xb80000bd, 0xb90000be, 0xab0000bf, + 0x640000c0, 0x650000c1, 0x620000c2, 0x660000c3, 0x630000c4, 0x670000c5, 0x9e0000c6, 0x680000c7, + 0x740000c8, 0x710000c9, 0x720000ca, 0x730000cb, 0x780000cc, 0x750000cd, 0x760000ce, 0x770000cf, + 0xac0000d0, 0x690000d1, 0xed0000d2, 0xee0000d3, 0xeb0000d4, 0xef0000d5, 0xec0000d6, 0xbf0000d7, + 0x800000d8, 0xfd0000d9, 0xfe0000da, 0xfb0000db, 0xfc0000dc, 0xba0000dd, 0xae0000de, 0x590000df, + 0x440000e0, 0x450000e1, 0x420000e2, 0x460000e3, 0x430000e4, 0x470000e5, 0x9c0000e6, 0x480000e7, + 0x540000e8, 0x510000e9, 0x520000ea, 0x530000eb, 0x580000ec, 0x550000ed, 0x560000ee, 0x570000ef, + 0x8c0000f0, 0x490000f1, 0xcd0000f2, 0xce0000f3, 0xcb0000f4, 0xcf0000f5, 0xcc0000f6, 0xe10000f7, + 0x700000f8, 0xdd0000f9, 0xde0000fa, 0xdb0000fb, 0xdc0000fc, 0x8d0000fd, 0x8e0000fe, 0xdf0000ff, + }, +} + +// CodePage1140 is the IBM Code Page 1140 encoding. +var CodePage1140 *Charmap = &codePage1140 + +var codePage1140 = Charmap{ + name: "IBM Code Page 1140", + mib: identifier.IBM01140, + asciiSuperset: false, + low: 0x00, + replacement: 0x3f, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x97, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {1, [3]byte{0x0a, 0x00, 0x00}}, + {1, [3]byte{0x17, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {1, [3]byte{0x04, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {1, [3]byte{0x1a, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa7, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {1, [3]byte{0x2e, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x28, 0x00, 0x00}}, + {1, [3]byte{0x2b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {1, [3]byte{0x21, 0x00, 0x00}}, {1, [3]byte{0x24, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x3b, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xac, 0x00}}, + {1, [3]byte{0x2d, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {1, [3]byte{0x2c, 0x00, 0x00}}, + {1, [3]byte{0x25, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {1, [3]byte{0x60, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x3d, 0x00, 0x00}}, {1, [3]byte{0x22, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {1, [3]byte{0x6a, 0x00, 0x00}}, + {1, [3]byte{0x6b, 0x00, 0x00}}, {1, [3]byte{0x6c, 0x00, 0x00}}, + {1, [3]byte{0x6d, 0x00, 0x00}}, {1, [3]byte{0x6e, 0x00, 0x00}}, + {1, [3]byte{0x6f, 0x00, 0x00}}, {1, [3]byte{0x70, 0x00, 0x00}}, + {1, [3]byte{0x71, 0x00, 0x00}}, {1, [3]byte{0x72, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x82, 0xac}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, + {1, [3]byte{0x73, 0x00, 0x00}}, {1, [3]byte{0x74, 0x00, 0x00}}, + {1, [3]byte{0x75, 0x00, 0x00}}, {1, [3]byte{0x76, 0x00, 0x00}}, + {1, [3]byte{0x77, 0x00, 0x00}}, {1, [3]byte{0x78, 0x00, 0x00}}, + {1, [3]byte{0x79, 0x00, 0x00}}, {1, [3]byte{0x7a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xbc, 0x00}}, + {2, [3]byte{0xc2, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {1, [3]byte{0x5b, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x4a, 0x00, 0x00}}, + {1, [3]byte{0x4b, 0x00, 0x00}}, {1, [3]byte{0x4c, 0x00, 0x00}}, + {1, [3]byte{0x4d, 0x00, 0x00}}, {1, [3]byte{0x4e, 0x00, 0x00}}, + {1, [3]byte{0x4f, 0x00, 0x00}}, {1, [3]byte{0x50, 0x00, 0x00}}, + {1, [3]byte{0x51, 0x00, 0x00}}, {1, [3]byte{0x52, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb9, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {1, [3]byte{0x53, 0x00, 0x00}}, {1, [3]byte{0x54, 0x00, 0x00}}, + {1, [3]byte{0x55, 0x00, 0x00}}, {1, [3]byte{0x56, 0x00, 0x00}}, + {1, [3]byte{0x57, 0x00, 0x00}}, {1, [3]byte{0x58, 0x00, 0x00}}, + {1, [3]byte{0x59, 0x00, 0x00}}, {1, [3]byte{0x5a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x37000004, 0x2d000005, 0x2e000006, 0x2f000007, + 0x16000008, 0x05000009, 0x2500000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x3c000014, 0x3d000015, 0x32000016, 0x26000017, + 0x18000018, 0x19000019, 0x3f00001a, 0x2700001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x40000020, 0x5a000021, 0x7f000022, 0x7b000023, 0x5b000024, 0x6c000025, 0x50000026, 0x7d000027, + 0x4d000028, 0x5d000029, 0x5c00002a, 0x4e00002b, 0x6b00002c, 0x6000002d, 0x4b00002e, 0x6100002f, + 0xf0000030, 0xf1000031, 0xf2000032, 0xf3000033, 0xf4000034, 0xf5000035, 0xf6000036, 0xf7000037, + 0xf8000038, 0xf9000039, 0x7a00003a, 0x5e00003b, 0x4c00003c, 0x7e00003d, 0x6e00003e, 0x6f00003f, + 0x7c000040, 0xc1000041, 0xc2000042, 0xc3000043, 0xc4000044, 0xc5000045, 0xc6000046, 0xc7000047, + 0xc8000048, 0xc9000049, 0xd100004a, 0xd200004b, 0xd300004c, 0xd400004d, 0xd500004e, 0xd600004f, + 0xd7000050, 0xd8000051, 0xd9000052, 0xe2000053, 0xe3000054, 0xe4000055, 0xe5000056, 0xe6000057, + 0xe7000058, 0xe8000059, 0xe900005a, 0xba00005b, 0xe000005c, 0xbb00005d, 0xb000005e, 0x6d00005f, + 0x79000060, 0x81000061, 0x82000062, 0x83000063, 0x84000064, 0x85000065, 0x86000066, 0x87000067, + 0x88000068, 0x89000069, 0x9100006a, 0x9200006b, 0x9300006c, 0x9400006d, 0x9500006e, 0x9600006f, + 0x97000070, 0x98000071, 0x99000072, 0xa2000073, 0xa3000074, 0xa4000075, 0xa5000076, 0xa6000077, + 0xa7000078, 0xa8000079, 0xa900007a, 0xc000007b, 0x4f00007c, 0xd000007d, 0xa100007e, 0x0700007f, + 0x20000080, 0x21000081, 0x22000082, 0x23000083, 0x24000084, 0x15000085, 0x06000086, 0x17000087, + 0x28000088, 0x29000089, 0x2a00008a, 0x2b00008b, 0x2c00008c, 0x0900008d, 0x0a00008e, 0x1b00008f, + 0x30000090, 0x31000091, 0x1a000092, 0x33000093, 0x34000094, 0x35000095, 0x36000096, 0x08000097, + 0x38000098, 0x39000099, 0x3a00009a, 0x3b00009b, 0x0400009c, 0x1400009d, 0x3e00009e, 0xff00009f, + 0x410000a0, 0xaa0000a1, 0x4a0000a2, 0xb10000a3, 0xb20000a5, 0x6a0000a6, 0xb50000a7, 0xbd0000a8, + 0xb40000a9, 0x9a0000aa, 0x8a0000ab, 0x5f0000ac, 0xca0000ad, 0xaf0000ae, 0xbc0000af, 0x900000b0, + 0x8f0000b1, 0xea0000b2, 0xfa0000b3, 0xbe0000b4, 0xa00000b5, 0xb60000b6, 0xb30000b7, 0x9d0000b8, + 0xda0000b9, 0x9b0000ba, 0x8b0000bb, 0xb70000bc, 0xb80000bd, 0xb90000be, 0xab0000bf, 0x640000c0, + 0x650000c1, 0x620000c2, 0x660000c3, 0x630000c4, 0x670000c5, 0x9e0000c6, 0x680000c7, 0x740000c8, + 0x710000c9, 0x720000ca, 0x730000cb, 0x780000cc, 0x750000cd, 0x760000ce, 0x770000cf, 0xac0000d0, + 0x690000d1, 0xed0000d2, 0xee0000d3, 0xeb0000d4, 0xef0000d5, 0xec0000d6, 0xbf0000d7, 0x800000d8, + 0xfd0000d9, 0xfe0000da, 0xfb0000db, 0xfc0000dc, 0xad0000dd, 0xae0000de, 0x590000df, 0x440000e0, + 0x450000e1, 0x420000e2, 0x460000e3, 0x430000e4, 0x470000e5, 0x9c0000e6, 0x480000e7, 0x540000e8, + 0x510000e9, 0x520000ea, 0x530000eb, 0x580000ec, 0x550000ed, 0x560000ee, 0x570000ef, 0x8c0000f0, + 0x490000f1, 0xcd0000f2, 0xce0000f3, 0xcb0000f4, 0xcf0000f5, 0xcc0000f6, 0xe10000f7, 0x700000f8, + 0xdd0000f9, 0xde0000fa, 0xdb0000fb, 0xdc0000fc, 0x8d0000fd, 0x8e0000fe, 0xdf0000ff, 0x9f0020ac, + }, +} + +// ISO8859_1 is the ISO 8859-1 encoding. +var ISO8859_1 *Charmap = &iso8859_1 + +var iso8859_1 = Charmap{ + name: "ISO 8859-1", + mib: identifier.ISOLatin1, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {2, [3]byte{0xc2, 0x97, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0x9d, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x80000080, 0x81000081, 0x82000082, 0x83000083, 0x84000084, 0x85000085, 0x86000086, 0x87000087, + 0x88000088, 0x89000089, 0x8a00008a, 0x8b00008b, 0x8c00008c, 0x8d00008d, 0x8e00008e, 0x8f00008f, + 0x90000090, 0x91000091, 0x92000092, 0x93000093, 0x94000094, 0x95000095, 0x96000096, 0x97000097, + 0x98000098, 0x99000099, 0x9a00009a, 0x9b00009b, 0x9c00009c, 0x9d00009d, 0x9e00009e, 0x9f00009f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd00000d0, 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, + 0xd80000d8, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdd0000dd, 0xde0000de, 0xdf0000df, + 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, + 0xf00000f0, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, + 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xfd0000fd, 0xfe0000fe, 0xff0000ff, + }, +} + +// ISO8859_2 is the ISO 8859-2 encoding. +var ISO8859_2 *Charmap = &iso8859_2 + +var iso8859_2 = Charmap{ + name: "ISO 8859-2", + mib: identifier.ISOLatin2, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xcb, 0x98, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0xbd, 0x00}}, + {2, [3]byte{0xc5, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc5, 0xa0, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc5, 0xa4, 0x00}}, + {2, [3]byte{0xc5, 0xb9, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc4, 0xbe, 0x00}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xa1, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc5, 0xa5, 0x00}}, + {2, [3]byte{0xc5, 0xba, 0x00}}, {2, [3]byte{0xcb, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0xb9, 0x00}}, + {2, [3]byte{0xc4, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc4, 0x8e, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc5, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc5, 0x98, 0x00}}, {2, [3]byte{0xc5, 0xae, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xb0, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc5, 0x95, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0xba, 0x00}}, + {2, [3]byte{0xc4, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc4, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc5, 0x88, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc5, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0x99, 0x00}}, {2, [3]byte{0xc5, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc5, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc5, 0xa3, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xa70000a7, 0xa80000a8, 0xad0000ad, 0xb00000b0, 0xb40000b4, 0xb80000b8, + 0xc10000c1, 0xc20000c2, 0xc40000c4, 0xc70000c7, 0xc90000c9, 0xcb0000cb, 0xcd0000cd, 0xce0000ce, + 0xd30000d3, 0xd40000d4, 0xd60000d6, 0xd70000d7, 0xda0000da, 0xdc0000dc, 0xdd0000dd, 0xdf0000df, + 0xe10000e1, 0xe20000e2, 0xe40000e4, 0xe70000e7, 0xe90000e9, 0xeb0000eb, 0xed0000ed, 0xee0000ee, + 0xf30000f3, 0xf40000f4, 0xf60000f6, 0xf70000f7, 0xfa0000fa, 0xfc0000fc, 0xfd0000fd, 0xc3000102, + 0xe3000103, 0xa1000104, 0xb1000105, 0xc6000106, 0xe6000107, 0xc800010c, 0xe800010d, 0xcf00010e, + 0xef00010f, 0xd0000110, 0xf0000111, 0xca000118, 0xea000119, 0xcc00011a, 0xec00011b, 0xc5000139, + 0xe500013a, 0xa500013d, 0xb500013e, 0xa3000141, 0xb3000142, 0xd1000143, 0xf1000144, 0xd2000147, + 0xf2000148, 0xd5000150, 0xf5000151, 0xc0000154, 0xe0000155, 0xd8000158, 0xf8000159, 0xa600015a, + 0xb600015b, 0xaa00015e, 0xba00015f, 0xa9000160, 0xb9000161, 0xde000162, 0xfe000163, 0xab000164, + 0xbb000165, 0xd900016e, 0xf900016f, 0xdb000170, 0xfb000171, 0xac000179, 0xbc00017a, 0xaf00017b, + 0xbf00017c, 0xae00017d, 0xbe00017e, 0xb70002c7, 0xa20002d8, 0xff0002d9, 0xb20002db, 0xbd0002dd, + 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, + 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, + 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, + 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, + }, +} + +// ISO8859_3 is the ISO 8859-3 encoding. +var ISO8859_3 *Charmap = &iso8859_3 + +var iso8859_3 = Charmap{ + name: "ISO 8859-3", + mib: identifier.ISOLatin3, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0xa6, 0x00}}, + {2, [3]byte{0xcb, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc4, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc4, 0xb0, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc4, 0x9e, 0x00}}, + {2, [3]byte{0xc4, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc4, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc4, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc4, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0xb5, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0x8a, 0x00}}, + {2, [3]byte{0xc4, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc4, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc4, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xac, 0x00}}, + {2, [3]byte{0xc5, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc4, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc4, 0x9d, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc5, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0x9d, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa30000a3, 0xa40000a4, 0xa70000a7, 0xa80000a8, 0xad0000ad, 0xb00000b0, 0xb20000b2, + 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb70000b7, 0xb80000b8, 0xbd0000bd, 0xc00000c0, 0xc10000c1, + 0xc20000c2, 0xc40000c4, 0xc70000c7, 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, + 0xcd0000cd, 0xce0000ce, 0xcf0000cf, 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd60000d6, + 0xd70000d7, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdf0000df, 0xe00000e0, 0xe10000e1, + 0xe20000e2, 0xe40000e4, 0xe70000e7, 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, + 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, 0xf60000f6, + 0xf70000f7, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xc6000108, 0xe6000109, 0xc500010a, + 0xe500010b, 0xd800011c, 0xf800011d, 0xab00011e, 0xbb00011f, 0xd5000120, 0xf5000121, 0xa6000124, + 0xb6000125, 0xa1000126, 0xb1000127, 0xa9000130, 0xb9000131, 0xac000134, 0xbc000135, 0xde00015c, + 0xfe00015d, 0xaa00015e, 0xba00015f, 0xdd00016c, 0xfd00016d, 0xaf00017b, 0xbf00017c, 0xa20002d8, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + }, +} + +// ISO8859_4 is the ISO 8859-4 encoding. +var ISO8859_4 *Charmap = &iso8859_4 + +var iso8859_4 = Charmap{ + name: "ISO 8859-4", + mib: identifier.ISOLatin4, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xc4, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0x96, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0xa8, 0x00}}, + {2, [3]byte{0xc4, 0xbb, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc5, 0xa0, 0x00}}, + {2, [3]byte{0xc4, 0x92, 0x00}}, {2, [3]byte{0xc4, 0xa2, 0x00}}, + {2, [3]byte{0xc5, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0x97, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc4, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0xbc, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xa1, 0x00}}, + {2, [3]byte{0xc4, 0x93, 0x00}}, {2, [3]byte{0xc4, 0xa3, 0x00}}, + {2, [3]byte{0xc5, 0xa7, 0x00}}, {2, [3]byte{0xc5, 0x8a, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc4, 0xae, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc4, 0xaa, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x85, 0x00}}, + {2, [3]byte{0xc5, 0x8c, 0x00}}, {2, [3]byte{0xc4, 0xb6, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc5, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xa8, 0x00}}, + {2, [3]byte{0xc5, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0x81, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc4, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x97, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc4, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc5, 0x86, 0x00}}, + {2, [3]byte{0xc5, 0x8d, 0x00}}, {2, [3]byte{0xc4, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc5, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0xab, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xa70000a7, 0xa80000a8, 0xad0000ad, 0xaf0000af, 0xb00000b0, 0xb40000b4, + 0xb80000b8, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc90000c9, + 0xcb0000cb, 0xcd0000cd, 0xce0000ce, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xd80000d8, + 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdf0000df, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, + 0xe50000e5, 0xe60000e6, 0xe90000e9, 0xeb0000eb, 0xed0000ed, 0xee0000ee, 0xf40000f4, 0xf50000f5, + 0xf60000f6, 0xf70000f7, 0xf80000f8, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xc0000100, 0xe0000101, + 0xa1000104, 0xb1000105, 0xc800010c, 0xe800010d, 0xd0000110, 0xf0000111, 0xaa000112, 0xba000113, + 0xcc000116, 0xec000117, 0xca000118, 0xea000119, 0xab000122, 0xbb000123, 0xa5000128, 0xb5000129, + 0xcf00012a, 0xef00012b, 0xc700012e, 0xe700012f, 0xd3000136, 0xf3000137, 0xa2000138, 0xa600013b, + 0xb600013c, 0xd1000145, 0xf1000146, 0xbd00014a, 0xbf00014b, 0xd200014c, 0xf200014d, 0xa3000156, + 0xb3000157, 0xa9000160, 0xb9000161, 0xac000166, 0xbc000167, 0xdd000168, 0xfd000169, 0xde00016a, + 0xfe00016b, 0xd9000172, 0xf9000173, 0xae00017d, 0xbe00017e, 0xb70002c7, 0xff0002d9, 0xb20002db, + 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, + 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, + 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, + 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, + }, +} + +// ISO8859_5 is the ISO 8859-5 encoding. +var ISO8859_5 *Charmap = &iso8859_5 + +var iso8859_5 = Charmap{ + name: "ISO 8859-5", + mib: identifier.ISOLatinCyrillic, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {2, [3]byte{0xd0, 0x82, 0x00}}, {2, [3]byte{0xd0, 0x83, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xd0, 0x85, 0x00}}, + {2, [3]byte{0xd0, 0x86, 0x00}}, {2, [3]byte{0xd0, 0x87, 0x00}}, + {2, [3]byte{0xd0, 0x88, 0x00}}, {2, [3]byte{0xd0, 0x89, 0x00}}, + {2, [3]byte{0xd0, 0x8a, 0x00}}, {2, [3]byte{0xd0, 0x8b, 0x00}}, + {2, [3]byte{0xd0, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xd0, 0x8f, 0x00}}, + {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, + {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {3, [3]byte{0xe2, 0x84, 0x96}}, {2, [3]byte{0xd1, 0x91, 0x00}}, + {2, [3]byte{0xd1, 0x92, 0x00}}, {2, [3]byte{0xd1, 0x93, 0x00}}, + {2, [3]byte{0xd1, 0x94, 0x00}}, {2, [3]byte{0xd1, 0x95, 0x00}}, + {2, [3]byte{0xd1, 0x96, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {2, [3]byte{0xd1, 0x98, 0x00}}, {2, [3]byte{0xd1, 0x99, 0x00}}, + {2, [3]byte{0xd1, 0x9a, 0x00}}, {2, [3]byte{0xd1, 0x9b, 0x00}}, + {2, [3]byte{0xd1, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xd1, 0x9e, 0x00}}, {2, [3]byte{0xd1, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xfd0000a7, 0xad0000ad, 0xa1000401, 0xa2000402, 0xa3000403, 0xa4000404, 0xa5000405, + 0xa6000406, 0xa7000407, 0xa8000408, 0xa9000409, 0xaa00040a, 0xab00040b, 0xac00040c, 0xae00040e, + 0xaf00040f, 0xb0000410, 0xb1000411, 0xb2000412, 0xb3000413, 0xb4000414, 0xb5000415, 0xb6000416, + 0xb7000417, 0xb8000418, 0xb9000419, 0xba00041a, 0xbb00041b, 0xbc00041c, 0xbd00041d, 0xbe00041e, + 0xbf00041f, 0xc0000420, 0xc1000421, 0xc2000422, 0xc3000423, 0xc4000424, 0xc5000425, 0xc6000426, + 0xc7000427, 0xc8000428, 0xc9000429, 0xca00042a, 0xcb00042b, 0xcc00042c, 0xcd00042d, 0xce00042e, + 0xcf00042f, 0xd0000430, 0xd1000431, 0xd2000432, 0xd3000433, 0xd4000434, 0xd5000435, 0xd6000436, + 0xd7000437, 0xd8000438, 0xd9000439, 0xda00043a, 0xdb00043b, 0xdc00043c, 0xdd00043d, 0xde00043e, + 0xdf00043f, 0xe0000440, 0xe1000441, 0xe2000442, 0xe3000443, 0xe4000444, 0xe5000445, 0xe6000446, + 0xe7000447, 0xe8000448, 0xe9000449, 0xea00044a, 0xeb00044b, 0xec00044c, 0xed00044d, 0xee00044e, + 0xef00044f, 0xf1000451, 0xf2000452, 0xf3000453, 0xf4000454, 0xf5000455, 0xf6000456, 0xf7000457, + 0xf8000458, 0xf9000459, 0xfa00045a, 0xfb00045b, 0xfc00045c, 0xfe00045e, 0xff00045f, 0xf0002116, + 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, + 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, + 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, + 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, + }, +} + +// ISO8859_6 is the ISO 8859-6 encoding. +var ISO8859_6 *Charmap = &iso8859_6 + +var iso8859_6 = Charmap{ + name: "ISO 8859-6", + mib: identifier.ISOLatinArabic, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xd8, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xd8, 0x9b, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xd8, 0x9f, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xd8, 0xa1, 0x00}}, + {2, [3]byte{0xd8, 0xa2, 0x00}}, {2, [3]byte{0xd8, 0xa3, 0x00}}, + {2, [3]byte{0xd8, 0xa4, 0x00}}, {2, [3]byte{0xd8, 0xa5, 0x00}}, + {2, [3]byte{0xd8, 0xa6, 0x00}}, {2, [3]byte{0xd8, 0xa7, 0x00}}, + {2, [3]byte{0xd8, 0xa8, 0x00}}, {2, [3]byte{0xd8, 0xa9, 0x00}}, + {2, [3]byte{0xd8, 0xaa, 0x00}}, {2, [3]byte{0xd8, 0xab, 0x00}}, + {2, [3]byte{0xd8, 0xac, 0x00}}, {2, [3]byte{0xd8, 0xad, 0x00}}, + {2, [3]byte{0xd8, 0xae, 0x00}}, {2, [3]byte{0xd8, 0xaf, 0x00}}, + {2, [3]byte{0xd8, 0xb0, 0x00}}, {2, [3]byte{0xd8, 0xb1, 0x00}}, + {2, [3]byte{0xd8, 0xb2, 0x00}}, {2, [3]byte{0xd8, 0xb3, 0x00}}, + {2, [3]byte{0xd8, 0xb4, 0x00}}, {2, [3]byte{0xd8, 0xb5, 0x00}}, + {2, [3]byte{0xd8, 0xb6, 0x00}}, {2, [3]byte{0xd8, 0xb7, 0x00}}, + {2, [3]byte{0xd8, 0xb8, 0x00}}, {2, [3]byte{0xd8, 0xb9, 0x00}}, + {2, [3]byte{0xd8, 0xba, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xd9, 0x80, 0x00}}, {2, [3]byte{0xd9, 0x81, 0x00}}, + {2, [3]byte{0xd9, 0x82, 0x00}}, {2, [3]byte{0xd9, 0x83, 0x00}}, + {2, [3]byte{0xd9, 0x84, 0x00}}, {2, [3]byte{0xd9, 0x85, 0x00}}, + {2, [3]byte{0xd9, 0x86, 0x00}}, {2, [3]byte{0xd9, 0x87, 0x00}}, + {2, [3]byte{0xd9, 0x88, 0x00}}, {2, [3]byte{0xd9, 0x89, 0x00}}, + {2, [3]byte{0xd9, 0x8a, 0x00}}, {2, [3]byte{0xd9, 0x8b, 0x00}}, + {2, [3]byte{0xd9, 0x8c, 0x00}}, {2, [3]byte{0xd9, 0x8d, 0x00}}, + {2, [3]byte{0xd9, 0x8e, 0x00}}, {2, [3]byte{0xd9, 0x8f, 0x00}}, + {2, [3]byte{0xd9, 0x90, 0x00}}, {2, [3]byte{0xd9, 0x91, 0x00}}, + {2, [3]byte{0xd9, 0x92, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xad0000ad, 0xac00060c, 0xbb00061b, 0xbf00061f, 0xc1000621, 0xc2000622, + 0xc3000623, 0xc4000624, 0xc5000625, 0xc6000626, 0xc7000627, 0xc8000628, 0xc9000629, 0xca00062a, + 0xcb00062b, 0xcc00062c, 0xcd00062d, 0xce00062e, 0xcf00062f, 0xd0000630, 0xd1000631, 0xd2000632, + 0xd3000633, 0xd4000634, 0xd5000635, 0xd6000636, 0xd7000637, 0xd8000638, 0xd9000639, 0xda00063a, + 0xe0000640, 0xe1000641, 0xe2000642, 0xe3000643, 0xe4000644, 0xe5000645, 0xe6000646, 0xe7000647, + 0xe8000648, 0xe9000649, 0xea00064a, 0xeb00064b, 0xec00064c, 0xed00064d, 0xee00064e, 0xef00064f, + 0xf0000650, 0xf1000651, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + }, +} + +// ISO8859_7 is the ISO 8859-7 encoding. +var ISO8859_7 *Charmap = &iso8859_7 + +var iso8859_7 = Charmap{ + name: "ISO 8859-7", + mib: identifier.ISOLatinGreek, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xe2, 0x82, 0xaf}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xcd, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x95}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xce, 0x84, 0x00}}, {2, [3]byte{0xce, 0x85, 0x00}}, + {2, [3]byte{0xce, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xce, 0x88, 0x00}}, {2, [3]byte{0xce, 0x89, 0x00}}, + {2, [3]byte{0xce, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xce, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xce, 0x8e, 0x00}}, {2, [3]byte{0xce, 0x8f, 0x00}}, + {2, [3]byte{0xce, 0x90, 0x00}}, {2, [3]byte{0xce, 0x91, 0x00}}, + {2, [3]byte{0xce, 0x92, 0x00}}, {2, [3]byte{0xce, 0x93, 0x00}}, + {2, [3]byte{0xce, 0x94, 0x00}}, {2, [3]byte{0xce, 0x95, 0x00}}, + {2, [3]byte{0xce, 0x96, 0x00}}, {2, [3]byte{0xce, 0x97, 0x00}}, + {2, [3]byte{0xce, 0x98, 0x00}}, {2, [3]byte{0xce, 0x99, 0x00}}, + {2, [3]byte{0xce, 0x9a, 0x00}}, {2, [3]byte{0xce, 0x9b, 0x00}}, + {2, [3]byte{0xce, 0x9c, 0x00}}, {2, [3]byte{0xce, 0x9d, 0x00}}, + {2, [3]byte{0xce, 0x9e, 0x00}}, {2, [3]byte{0xce, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0xa0, 0x00}}, {2, [3]byte{0xce, 0xa1, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xce, 0xa3, 0x00}}, + {2, [3]byte{0xce, 0xa4, 0x00}}, {2, [3]byte{0xce, 0xa5, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0xa7, 0x00}}, + {2, [3]byte{0xce, 0xa8, 0x00}}, {2, [3]byte{0xce, 0xa9, 0x00}}, + {2, [3]byte{0xce, 0xaa, 0x00}}, {2, [3]byte{0xce, 0xab, 0x00}}, + {2, [3]byte{0xce, 0xac, 0x00}}, {2, [3]byte{0xce, 0xad, 0x00}}, + {2, [3]byte{0xce, 0xae, 0x00}}, {2, [3]byte{0xce, 0xaf, 0x00}}, + {2, [3]byte{0xce, 0xb0, 0x00}}, {2, [3]byte{0xce, 0xb1, 0x00}}, + {2, [3]byte{0xce, 0xb2, 0x00}}, {2, [3]byte{0xce, 0xb3, 0x00}}, + {2, [3]byte{0xce, 0xb4, 0x00}}, {2, [3]byte{0xce, 0xb5, 0x00}}, + {2, [3]byte{0xce, 0xb6, 0x00}}, {2, [3]byte{0xce, 0xb7, 0x00}}, + {2, [3]byte{0xce, 0xb8, 0x00}}, {2, [3]byte{0xce, 0xb9, 0x00}}, + {2, [3]byte{0xce, 0xba, 0x00}}, {2, [3]byte{0xce, 0xbb, 0x00}}, + {2, [3]byte{0xce, 0xbc, 0x00}}, {2, [3]byte{0xce, 0xbd, 0x00}}, + {2, [3]byte{0xce, 0xbe, 0x00}}, {2, [3]byte{0xce, 0xbf, 0x00}}, + {2, [3]byte{0xcf, 0x80, 0x00}}, {2, [3]byte{0xcf, 0x81, 0x00}}, + {2, [3]byte{0xcf, 0x82, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xcf, 0x84, 0x00}}, {2, [3]byte{0xcf, 0x85, 0x00}}, + {2, [3]byte{0xcf, 0x86, 0x00}}, {2, [3]byte{0xcf, 0x87, 0x00}}, + {2, [3]byte{0xcf, 0x88, 0x00}}, {2, [3]byte{0xcf, 0x89, 0x00}}, + {2, [3]byte{0xcf, 0x8a, 0x00}}, {2, [3]byte{0xcf, 0x8b, 0x00}}, + {2, [3]byte{0xcf, 0x8c, 0x00}}, {2, [3]byte{0xcf, 0x8d, 0x00}}, + {2, [3]byte{0xcf, 0x8e, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa30000a3, 0xa60000a6, 0xa70000a7, 0xa80000a8, 0xa90000a9, 0xab0000ab, 0xac0000ac, + 0xad0000ad, 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb70000b7, 0xbb0000bb, 0xbd0000bd, + 0xaa00037a, 0xb4000384, 0xb5000385, 0xb6000386, 0xb8000388, 0xb9000389, 0xba00038a, 0xbc00038c, + 0xbe00038e, 0xbf00038f, 0xc0000390, 0xc1000391, 0xc2000392, 0xc3000393, 0xc4000394, 0xc5000395, + 0xc6000396, 0xc7000397, 0xc8000398, 0xc9000399, 0xca00039a, 0xcb00039b, 0xcc00039c, 0xcd00039d, + 0xce00039e, 0xcf00039f, 0xd00003a0, 0xd10003a1, 0xd30003a3, 0xd40003a4, 0xd50003a5, 0xd60003a6, + 0xd70003a7, 0xd80003a8, 0xd90003a9, 0xda0003aa, 0xdb0003ab, 0xdc0003ac, 0xdd0003ad, 0xde0003ae, + 0xdf0003af, 0xe00003b0, 0xe10003b1, 0xe20003b2, 0xe30003b3, 0xe40003b4, 0xe50003b5, 0xe60003b6, + 0xe70003b7, 0xe80003b8, 0xe90003b9, 0xea0003ba, 0xeb0003bb, 0xec0003bc, 0xed0003bd, 0xee0003be, + 0xef0003bf, 0xf00003c0, 0xf10003c1, 0xf20003c2, 0xf30003c3, 0xf40003c4, 0xf50003c5, 0xf60003c6, + 0xf70003c7, 0xf80003c8, 0xf90003c9, 0xfa0003ca, 0xfb0003cb, 0xfc0003cc, 0xfd0003cd, 0xfe0003ce, + 0xaf002015, 0xa1002018, 0xa2002019, 0xa40020ac, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + }, +} + +// ISO8859_8 is the ISO 8859-8 encoding. +var ISO8859_8 *Charmap = &iso8859_8 + +var iso8859_8 = Charmap{ + name: "ISO 8859-8", + mib: identifier.ISOLatinHebrew, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x97}}, + {2, [3]byte{0xd7, 0x90, 0x00}}, {2, [3]byte{0xd7, 0x91, 0x00}}, + {2, [3]byte{0xd7, 0x92, 0x00}}, {2, [3]byte{0xd7, 0x93, 0x00}}, + {2, [3]byte{0xd7, 0x94, 0x00}}, {2, [3]byte{0xd7, 0x95, 0x00}}, + {2, [3]byte{0xd7, 0x96, 0x00}}, {2, [3]byte{0xd7, 0x97, 0x00}}, + {2, [3]byte{0xd7, 0x98, 0x00}}, {2, [3]byte{0xd7, 0x99, 0x00}}, + {2, [3]byte{0xd7, 0x9a, 0x00}}, {2, [3]byte{0xd7, 0x9b, 0x00}}, + {2, [3]byte{0xd7, 0x9c, 0x00}}, {2, [3]byte{0xd7, 0x9d, 0x00}}, + {2, [3]byte{0xd7, 0x9e, 0x00}}, {2, [3]byte{0xd7, 0x9f, 0x00}}, + {2, [3]byte{0xd7, 0xa0, 0x00}}, {2, [3]byte{0xd7, 0xa1, 0x00}}, + {2, [3]byte{0xd7, 0xa2, 0x00}}, {2, [3]byte{0xd7, 0xa3, 0x00}}, + {2, [3]byte{0xd7, 0xa4, 0x00}}, {2, [3]byte{0xd7, 0xa5, 0x00}}, + {2, [3]byte{0xd7, 0xa6, 0x00}}, {2, [3]byte{0xd7, 0xa7, 0x00}}, + {2, [3]byte{0xd7, 0xa8, 0x00}}, {2, [3]byte{0xd7, 0xa9, 0x00}}, + {2, [3]byte{0xd7, 0xaa, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x8e}}, + {3, [3]byte{0xe2, 0x80, 0x8f}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, 0xa80000a8, + 0xa90000a9, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, 0xb00000b0, 0xb10000b1, + 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb80000b8, 0xb90000b9, + 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xaa0000d7, 0xba0000f7, 0xe00005d0, 0xe10005d1, + 0xe20005d2, 0xe30005d3, 0xe40005d4, 0xe50005d5, 0xe60005d6, 0xe70005d7, 0xe80005d8, 0xe90005d9, + 0xea0005da, 0xeb0005db, 0xec0005dc, 0xed0005dd, 0xee0005de, 0xef0005df, 0xf00005e0, 0xf10005e1, + 0xf20005e2, 0xf30005e3, 0xf40005e4, 0xf50005e5, 0xf60005e6, 0xf70005e7, 0xf80005e8, 0xf90005e9, + 0xfa0005ea, 0xfd00200e, 0xfe00200f, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + }, +} + +// ISO8859_9 is the ISO 8859-9 encoding. +var ISO8859_9 *Charmap = &iso8859_9 + +var iso8859_9 = Charmap{ + name: "ISO 8859-9", + mib: identifier.ISOLatin5, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {2, [3]byte{0xc2, 0x97, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0x9d, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc4, 0xb0, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x80000080, 0x81000081, 0x82000082, 0x83000083, 0x84000084, 0x85000085, 0x86000086, 0x87000087, + 0x88000088, 0x89000089, 0x8a00008a, 0x8b00008b, 0x8c00008c, 0x8d00008d, 0x8e00008e, 0x8f00008f, + 0x90000090, 0x91000091, 0x92000092, 0x93000093, 0x94000094, 0x95000095, 0x96000096, 0x97000097, + 0x98000098, 0x99000099, 0x9a00009a, 0x9b00009b, 0x9c00009c, 0x9d00009d, 0x9e00009e, 0x9f00009f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xd80000d8, + 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, + 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, 0xe80000e8, 0xe90000e9, 0xea0000ea, + 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, 0xf20000f2, 0xf30000f3, + 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, + 0xfc0000fc, 0xff0000ff, 0xd000011e, 0xf000011f, 0xdd000130, 0xfd000131, 0xde00015e, 0xfe00015f, + }, +} + +// ISO8859_10 is the ISO 8859-10 encoding. +var ISO8859_10 *Charmap = &iso8859_10 + +var iso8859_10 = Charmap{ + name: "ISO 8859-10", + mib: identifier.ISOLatin6, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xc4, 0x92, 0x00}}, {2, [3]byte{0xc4, 0xa2, 0x00}}, + {2, [3]byte{0xc4, 0xaa, 0x00}}, {2, [3]byte{0xc4, 0xa8, 0x00}}, + {2, [3]byte{0xc4, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc4, 0xbb, 0x00}}, {2, [3]byte{0xc4, 0x90, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc5, 0xa6, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0xaa, 0x00}}, {2, [3]byte{0xc5, 0x8a, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xc4, 0x93, 0x00}}, {2, [3]byte{0xc4, 0xa3, 0x00}}, + {2, [3]byte{0xc4, 0xab, 0x00}}, {2, [3]byte{0xc4, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc4, 0xbc, 0x00}}, {2, [3]byte{0xc4, 0x91, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc5, 0xa7, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x95}}, + {2, [3]byte{0xc5, 0xab, 0x00}}, {2, [3]byte{0xc5, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc4, 0xae, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x85, 0x00}}, + {2, [3]byte{0xc5, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc5, 0xa8, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc5, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0x81, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc4, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x97, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc5, 0x86, 0x00}}, + {2, [3]byte{0xc5, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc5, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc4, 0xb8, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa70000a7, 0xad0000ad, 0xb00000b0, 0xb70000b7, 0xc10000c1, 0xc20000c2, 0xc30000c3, + 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc90000c9, 0xcb0000cb, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd00000d0, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd80000d8, 0xda0000da, 0xdb0000db, + 0xdc0000dc, 0xdd0000dd, 0xde0000de, 0xdf0000df, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, + 0xe50000e5, 0xe60000e6, 0xe90000e9, 0xeb0000eb, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf00000f0, + 0xf30000f3, 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf80000f8, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, + 0xfd0000fd, 0xfe0000fe, 0xc0000100, 0xe0000101, 0xa1000104, 0xb1000105, 0xc800010c, 0xe800010d, + 0xa9000110, 0xb9000111, 0xa2000112, 0xb2000113, 0xcc000116, 0xec000117, 0xca000118, 0xea000119, + 0xa3000122, 0xb3000123, 0xa5000128, 0xb5000129, 0xa400012a, 0xb400012b, 0xc700012e, 0xe700012f, + 0xa6000136, 0xb6000137, 0xff000138, 0xa800013b, 0xb800013c, 0xd1000145, 0xf1000146, 0xaf00014a, + 0xbf00014b, 0xd200014c, 0xf200014d, 0xaa000160, 0xba000161, 0xab000166, 0xbb000167, 0xd7000168, + 0xf7000169, 0xae00016a, 0xbe00016b, 0xd9000172, 0xf9000173, 0xac00017d, 0xbc00017e, 0xbd002015, + 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, + 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, + 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, + 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, + }, +} + +// ISO8859_13 is the ISO 8859-13 encoding. +var ISO8859_13 *Charmap = &iso8859_13 + +var iso8859_13 = Charmap{ + name: "ISO 8859-13", + mib: identifier.ISO885913, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x9d}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x9e}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0x96, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc3, 0x86, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9c}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc5, 0x97, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc4, 0x84, 0x00}}, {2, [3]byte{0xc4, 0xae, 0x00}}, + {2, [3]byte{0xc4, 0x80, 0x00}}, {2, [3]byte{0xc4, 0x86, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc4, 0x92, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc5, 0xb9, 0x00}}, {2, [3]byte{0xc4, 0x96, 0x00}}, + {2, [3]byte{0xc4, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0xb6, 0x00}}, + {2, [3]byte{0xc4, 0xaa, 0x00}}, {2, [3]byte{0xc4, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x85, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc5, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc5, 0xb2, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc5, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xaa, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0x85, 0x00}}, {2, [3]byte{0xc4, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x81, 0x00}}, {2, [3]byte{0xc4, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc4, 0x93, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0xba, 0x00}}, {2, [3]byte{0xc4, 0x97, 0x00}}, + {2, [3]byte{0xc4, 0xa3, 0x00}}, {2, [3]byte{0xc4, 0xb7, 0x00}}, + {2, [3]byte{0xc4, 0xab, 0x00}}, {2, [3]byte{0xc4, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc5, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc5, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0xb3, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x99}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa60000a6, 0xa70000a7, 0xa90000a9, 0xab0000ab, + 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb50000b5, + 0xb60000b6, 0xb70000b7, 0xb90000b9, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xc40000c4, + 0xc50000c5, 0xaf0000c6, 0xc90000c9, 0xd30000d3, 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xa80000d8, + 0xdc0000dc, 0xdf0000df, 0xe40000e4, 0xe50000e5, 0xbf0000e6, 0xe90000e9, 0xf30000f3, 0xf50000f5, + 0xf60000f6, 0xf70000f7, 0xb80000f8, 0xfc0000fc, 0xc2000100, 0xe2000101, 0xc0000104, 0xe0000105, + 0xc3000106, 0xe3000107, 0xc800010c, 0xe800010d, 0xc7000112, 0xe7000113, 0xcb000116, 0xeb000117, + 0xc6000118, 0xe6000119, 0xcc000122, 0xec000123, 0xce00012a, 0xee00012b, 0xc100012e, 0xe100012f, + 0xcd000136, 0xed000137, 0xcf00013b, 0xef00013c, 0xd9000141, 0xf9000142, 0xd1000143, 0xf1000144, + 0xd2000145, 0xf2000146, 0xd400014c, 0xf400014d, 0xaa000156, 0xba000157, 0xda00015a, 0xfa00015b, + 0xd0000160, 0xf0000161, 0xdb00016a, 0xfb00016b, 0xd8000172, 0xf8000173, 0xca000179, 0xea00017a, + 0xdd00017b, 0xfd00017c, 0xde00017d, 0xfe00017e, 0xff002019, 0xb400201c, 0xa100201d, 0xa500201e, + 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, + 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, + 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, + 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, + }, +} + +// ISO8859_14 is the ISO 8859-14 encoding. +var ISO8859_14 *Charmap = &iso8859_14 + +var iso8859_14 = Charmap{ + name: "ISO 8859-14", + mib: identifier.ISO885914, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe1, 0xb8, 0x82}}, + {3, [3]byte{0xe1, 0xb8, 0x83}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc4, 0x8a, 0x00}}, {2, [3]byte{0xc4, 0x8b, 0x00}}, + {3, [3]byte{0xe1, 0xb8, 0x8a}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {3, [3]byte{0xe1, 0xba, 0x80}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {3, [3]byte{0xe1, 0xba, 0x82}}, {3, [3]byte{0xe1, 0xb8, 0x8b}}, + {3, [3]byte{0xe1, 0xbb, 0xb2}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {3, [3]byte{0xe1, 0xb8, 0x9e}}, {3, [3]byte{0xe1, 0xb8, 0x9f}}, + {2, [3]byte{0xc4, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0xa1, 0x00}}, + {3, [3]byte{0xe1, 0xb9, 0x80}}, {3, [3]byte{0xe1, 0xb9, 0x81}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {3, [3]byte{0xe1, 0xb9, 0x96}}, + {3, [3]byte{0xe1, 0xba, 0x81}}, {3, [3]byte{0xe1, 0xb9, 0x97}}, + {3, [3]byte{0xe1, 0xba, 0x83}}, {3, [3]byte{0xe1, 0xb9, 0xa0}}, + {3, [3]byte{0xe1, 0xbb, 0xb3}}, {3, [3]byte{0xe1, 0xba, 0x84}}, + {3, [3]byte{0xe1, 0xba, 0x85}}, {3, [3]byte{0xe1, 0xb9, 0xa1}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc5, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {3, [3]byte{0xe1, 0xb9, 0xaa}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc5, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {3, [3]byte{0xe1, 0xb9, 0xab}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc5, 0xb7, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa30000a3, 0xa70000a7, 0xa90000a9, 0xad0000ad, 0xae0000ae, 0xb60000b6, 0xc00000c0, + 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, 0xc80000c8, + 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, 0xd10000d1, + 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd80000d8, 0xd90000d9, 0xda0000da, + 0xdb0000db, 0xdc0000dc, 0xdd0000dd, 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe30000e3, + 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, + 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, + 0xf50000f5, 0xf60000f6, 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xfd0000fd, + 0xff0000ff, 0xa400010a, 0xa500010b, 0xb2000120, 0xb3000121, 0xd0000174, 0xf0000175, 0xde000176, + 0xfe000177, 0xaf000178, 0xa1001e02, 0xa2001e03, 0xa6001e0a, 0xab001e0b, 0xb0001e1e, 0xb1001e1f, + 0xb4001e40, 0xb5001e41, 0xb7001e56, 0xb9001e57, 0xbb001e60, 0xbf001e61, 0xd7001e6a, 0xf7001e6b, + 0xa8001e80, 0xb8001e81, 0xaa001e82, 0xba001e83, 0xbd001e84, 0xbe001e85, 0xac001ef2, 0xbc001ef3, + 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, + 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, + 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, + 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, + }, +} + +// ISO8859_15 is the ISO 8859-15 encoding. +var ISO8859_15 *Charmap = &iso8859_15 + +var iso8859_15 = Charmap{ + name: "ISO 8859-15", + mib: identifier.ISO885915, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {2, [3]byte{0xc5, 0x93, 0x00}}, + {2, [3]byte{0xc5, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa50000a5, 0xa70000a7, 0xa90000a9, 0xaa0000aa, + 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, 0xb00000b0, 0xb10000b1, 0xb20000b2, + 0xb30000b3, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd00000d0, 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, + 0xd80000d8, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdd0000dd, 0xde0000de, 0xdf0000df, + 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, + 0xf00000f0, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, + 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xfd0000fd, 0xfe0000fe, 0xff0000ff, + 0xbc000152, 0xbd000153, 0xa6000160, 0xa8000161, 0xbe000178, 0xb400017d, 0xb800017e, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + }, +} + +// ISO8859_16 is the ISO 8859-16 encoding. +var ISO8859_16 *Charmap = &iso8859_16 + +var iso8859_16 = Charmap{ + name: "ISO 8859-16", + mib: identifier.ISO885916, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xc4, 0x85, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xe2, 0x80, 0x9e}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc8, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc5, 0xb9, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0xba, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x9d}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc4, 0x8d, 0x00}}, + {2, [3]byte{0xc8, 0x99, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {2, [3]byte{0xc5, 0x93, 0x00}}, + {2, [3]byte{0xc5, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0x86, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc5, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc5, 0x9a, 0x00}}, + {2, [3]byte{0xc5, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc4, 0x98, 0x00}}, + {2, [3]byte{0xc8, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc5, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc5, 0x9b, 0x00}}, + {2, [3]byte{0xc5, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc4, 0x99, 0x00}}, + {2, [3]byte{0xc8, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa70000a7, 0xa90000a9, 0xab0000ab, 0xad0000ad, 0xb00000b0, 0xb10000b1, 0xb60000b6, + 0xb70000b7, 0xbb0000bb, 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc40000c4, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd60000d6, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, + 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe40000e4, 0xe60000e6, 0xe70000e7, 0xe80000e8, + 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf20000f2, + 0xf30000f3, 0xf40000f4, 0xf60000f6, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xff0000ff, + 0xc3000102, 0xe3000103, 0xa1000104, 0xa2000105, 0xc5000106, 0xe5000107, 0xb200010c, 0xb900010d, + 0xd0000110, 0xf0000111, 0xdd000118, 0xfd000119, 0xa3000141, 0xb3000142, 0xd1000143, 0xf1000144, + 0xd5000150, 0xf5000151, 0xbc000152, 0xbd000153, 0xd700015a, 0xf700015b, 0xa6000160, 0xa8000161, + 0xd8000170, 0xf8000171, 0xbe000178, 0xac000179, 0xae00017a, 0xaf00017b, 0xbf00017c, 0xb400017d, + 0xb800017e, 0xaa000218, 0xba000219, 0xde00021a, 0xfe00021b, 0xb500201d, 0xa500201e, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + }, +} + +// KOI8R is the KOI8-R encoding. +var KOI8R *Charmap = &koi8R + +var koi8R = Charmap{ + name: "KOI8-R", + mib: identifier.KOI8R, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x9c}}, {3, [3]byte{0xe2, 0x94, 0xa4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xbc}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x90}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x8c, 0xa0}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {3, [3]byte{0xe2, 0x88, 0x9a}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {3, [3]byte{0xe2, 0x89, 0xa4}}, {3, [3]byte{0xe2, 0x89, 0xa5}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x90}}, {3, [3]byte{0xe2, 0x95, 0x91}}, + {3, [3]byte{0xe2, 0x95, 0x92}}, {2, [3]byte{0xd1, 0x91, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x97}}, {3, [3]byte{0xe2, 0x95, 0x98}}, + {3, [3]byte{0xe2, 0x95, 0x99}}, {3, [3]byte{0xe2, 0x95, 0x9a}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9e}}, + {3, [3]byte{0xe2, 0x95, 0x9f}}, {3, [3]byte{0xe2, 0x95, 0xa0}}, + {3, [3]byte{0xe2, 0x95, 0xa1}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa5}}, + {3, [3]byte{0xe2, 0x95, 0xa6}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa9}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd0, 0xb0, 0x00}}, + {2, [3]byte{0xd0, 0xb1, 0x00}}, {2, [3]byte{0xd1, 0x86, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd1, 0x85, 0x00}}, {2, [3]byte{0xd0, 0xb8, 0x00}}, + {2, [3]byte{0xd0, 0xb9, 0x00}}, {2, [3]byte{0xd0, 0xba, 0x00}}, + {2, [3]byte{0xd0, 0xbb, 0x00}}, {2, [3]byte{0xd0, 0xbc, 0x00}}, + {2, [3]byte{0xd0, 0xbd, 0x00}}, {2, [3]byte{0xd0, 0xbe, 0x00}}, + {2, [3]byte{0xd0, 0xbf, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb2, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd0, 0xb7, 0x00}}, {2, [3]byte{0xd1, 0x88, 0x00}}, + {2, [3]byte{0xd1, 0x8d, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x8a, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0x90, 0x00}}, + {2, [3]byte{0xd0, 0x91, 0x00}}, {2, [3]byte{0xd0, 0xa6, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0xa5, 0x00}}, {2, [3]byte{0xd0, 0x98, 0x00}}, + {2, [3]byte{0xd0, 0x99, 0x00}}, {2, [3]byte{0xd0, 0x9a, 0x00}}, + {2, [3]byte{0xd0, 0x9b, 0x00}}, {2, [3]byte{0xd0, 0x9c, 0x00}}, + {2, [3]byte{0xd0, 0x9d, 0x00}}, {2, [3]byte{0xd0, 0x9e, 0x00}}, + {2, [3]byte{0xd0, 0x9f, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x92, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0x97, 0x00}}, {2, [3]byte{0xd0, 0xa8, 0x00}}, + {2, [3]byte{0xd0, 0xad, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xa7, 0x00}}, {2, [3]byte{0xd0, 0xaa, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x9a0000a0, 0xbf0000a9, 0x9c0000b0, 0x9d0000b2, 0x9e0000b7, 0x9f0000f7, 0xb3000401, 0xe1000410, + 0xe2000411, 0xf7000412, 0xe7000413, 0xe4000414, 0xe5000415, 0xf6000416, 0xfa000417, 0xe9000418, + 0xea000419, 0xeb00041a, 0xec00041b, 0xed00041c, 0xee00041d, 0xef00041e, 0xf000041f, 0xf2000420, + 0xf3000421, 0xf4000422, 0xf5000423, 0xe6000424, 0xe8000425, 0xe3000426, 0xfe000427, 0xfb000428, + 0xfd000429, 0xff00042a, 0xf900042b, 0xf800042c, 0xfc00042d, 0xe000042e, 0xf100042f, 0xc1000430, + 0xc2000431, 0xd7000432, 0xc7000433, 0xc4000434, 0xc5000435, 0xd6000436, 0xda000437, 0xc9000438, + 0xca000439, 0xcb00043a, 0xcc00043b, 0xcd00043c, 0xce00043d, 0xcf00043e, 0xd000043f, 0xd2000440, + 0xd3000441, 0xd4000442, 0xd5000443, 0xc6000444, 0xc8000445, 0xc3000446, 0xde000447, 0xdb000448, + 0xdd000449, 0xdf00044a, 0xd900044b, 0xd800044c, 0xdc00044d, 0xc000044e, 0xd100044f, 0xa3000451, + 0x95002219, 0x9600221a, 0x97002248, 0x98002264, 0x99002265, 0x93002320, 0x9b002321, 0x80002500, + 0x81002502, 0x8200250c, 0x83002510, 0x84002514, 0x85002518, 0x8600251c, 0x87002524, 0x8800252c, + 0x89002534, 0x8a00253c, 0xa0002550, 0xa1002551, 0xa2002552, 0xa4002553, 0xa5002554, 0xa6002555, + 0xa7002556, 0xa8002557, 0xa9002558, 0xaa002559, 0xab00255a, 0xac00255b, 0xad00255c, 0xae00255d, + 0xaf00255e, 0xb000255f, 0xb1002560, 0xb2002561, 0xb4002562, 0xb5002563, 0xb6002564, 0xb7002565, + 0xb8002566, 0xb9002567, 0xba002568, 0xbb002569, 0xbc00256a, 0xbd00256b, 0xbe00256c, 0x8b002580, + 0x8c002584, 0x8d002588, 0x8e00258c, 0x8f002590, 0x90002591, 0x91002592, 0x92002593, 0x940025a0, + }, +} + +// KOI8U is the KOI8-U encoding. +var KOI8U *Charmap = &koi8U + +var koi8U = Charmap{ + name: "KOI8-U", + mib: identifier.KOI8U, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x9c}}, {3, [3]byte{0xe2, 0x94, 0xa4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xbc}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x90}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x8c, 0xa0}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {3, [3]byte{0xe2, 0x88, 0x9a}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {3, [3]byte{0xe2, 0x89, 0xa4}}, {3, [3]byte{0xe2, 0x89, 0xa5}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x90}}, {3, [3]byte{0xe2, 0x95, 0x91}}, + {3, [3]byte{0xe2, 0x95, 0x92}}, {2, [3]byte{0xd1, 0x91, 0x00}}, + {2, [3]byte{0xd1, 0x94, 0x00}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {2, [3]byte{0xd1, 0x96, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x97}}, {3, [3]byte{0xe2, 0x95, 0x98}}, + {3, [3]byte{0xe2, 0x95, 0x99}}, {3, [3]byte{0xe2, 0x95, 0x9a}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {2, [3]byte{0xd2, 0x91, 0x00}}, + {2, [3]byte{0xd1, 0x9e, 0x00}}, {3, [3]byte{0xe2, 0x95, 0x9e}}, + {3, [3]byte{0xe2, 0x95, 0x9f}}, {3, [3]byte{0xe2, 0x95, 0xa0}}, + {3, [3]byte{0xe2, 0x95, 0xa1}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {2, [3]byte{0xd0, 0x86, 0x00}}, {2, [3]byte{0xd0, 0x87, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0xa6}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa9}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {2, [3]byte{0xd2, 0x90, 0x00}}, + {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd0, 0xb0, 0x00}}, + {2, [3]byte{0xd0, 0xb1, 0x00}}, {2, [3]byte{0xd1, 0x86, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd1, 0x85, 0x00}}, {2, [3]byte{0xd0, 0xb8, 0x00}}, + {2, [3]byte{0xd0, 0xb9, 0x00}}, {2, [3]byte{0xd0, 0xba, 0x00}}, + {2, [3]byte{0xd0, 0xbb, 0x00}}, {2, [3]byte{0xd0, 0xbc, 0x00}}, + {2, [3]byte{0xd0, 0xbd, 0x00}}, {2, [3]byte{0xd0, 0xbe, 0x00}}, + {2, [3]byte{0xd0, 0xbf, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb2, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd0, 0xb7, 0x00}}, {2, [3]byte{0xd1, 0x88, 0x00}}, + {2, [3]byte{0xd1, 0x8d, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x8a, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0x90, 0x00}}, + {2, [3]byte{0xd0, 0x91, 0x00}}, {2, [3]byte{0xd0, 0xa6, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0xa5, 0x00}}, {2, [3]byte{0xd0, 0x98, 0x00}}, + {2, [3]byte{0xd0, 0x99, 0x00}}, {2, [3]byte{0xd0, 0x9a, 0x00}}, + {2, [3]byte{0xd0, 0x9b, 0x00}}, {2, [3]byte{0xd0, 0x9c, 0x00}}, + {2, [3]byte{0xd0, 0x9d, 0x00}}, {2, [3]byte{0xd0, 0x9e, 0x00}}, + {2, [3]byte{0xd0, 0x9f, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x92, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0x97, 0x00}}, {2, [3]byte{0xd0, 0xa8, 0x00}}, + {2, [3]byte{0xd0, 0xad, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xa7, 0x00}}, {2, [3]byte{0xd0, 0xaa, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x9a0000a0, 0xbf0000a9, 0x9c0000b0, 0x9d0000b2, 0x9e0000b7, 0x9f0000f7, 0xb3000401, 0xb4000404, + 0xb6000406, 0xb7000407, 0xbe00040e, 0xe1000410, 0xe2000411, 0xf7000412, 0xe7000413, 0xe4000414, + 0xe5000415, 0xf6000416, 0xfa000417, 0xe9000418, 0xea000419, 0xeb00041a, 0xec00041b, 0xed00041c, + 0xee00041d, 0xef00041e, 0xf000041f, 0xf2000420, 0xf3000421, 0xf4000422, 0xf5000423, 0xe6000424, + 0xe8000425, 0xe3000426, 0xfe000427, 0xfb000428, 0xfd000429, 0xff00042a, 0xf900042b, 0xf800042c, + 0xfc00042d, 0xe000042e, 0xf100042f, 0xc1000430, 0xc2000431, 0xd7000432, 0xc7000433, 0xc4000434, + 0xc5000435, 0xd6000436, 0xda000437, 0xc9000438, 0xca000439, 0xcb00043a, 0xcc00043b, 0xcd00043c, + 0xce00043d, 0xcf00043e, 0xd000043f, 0xd2000440, 0xd3000441, 0xd4000442, 0xd5000443, 0xc6000444, + 0xc8000445, 0xc3000446, 0xde000447, 0xdb000448, 0xdd000449, 0xdf00044a, 0xd900044b, 0xd800044c, + 0xdc00044d, 0xc000044e, 0xd100044f, 0xa3000451, 0xa4000454, 0xa6000456, 0xa7000457, 0xae00045e, + 0xbd000490, 0xad000491, 0x95002219, 0x9600221a, 0x97002248, 0x98002264, 0x99002265, 0x93002320, + 0x9b002321, 0x80002500, 0x81002502, 0x8200250c, 0x83002510, 0x84002514, 0x85002518, 0x8600251c, + 0x87002524, 0x8800252c, 0x89002534, 0x8a00253c, 0xa0002550, 0xa1002551, 0xa2002552, 0xa5002554, + 0xa8002557, 0xa9002558, 0xaa002559, 0xab00255a, 0xac00255b, 0xaf00255e, 0xb000255f, 0xb1002560, + 0xb2002561, 0xb5002563, 0xb8002566, 0xb9002567, 0xba002568, 0xbb002569, 0xbc00256a, 0x8b002580, + 0x8c002584, 0x8d002588, 0x8e00258c, 0x8f002590, 0x90002591, 0x91002592, 0x92002593, 0x940025a0, + }, +} + +// Macintosh is the Macintosh encoding. +var Macintosh *Charmap = &macintosh + +var macintosh = Charmap{ + name: "Macintosh", + mib: identifier.Macintosh, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x91, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa8, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {2, [3]byte{0xc2, 0xb0, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa7, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {3, [3]byte{0xe2, 0x84, 0xa2}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {3, [3]byte{0xe2, 0x89, 0xa0}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x98, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa4}}, {3, [3]byte{0xe2, 0x89, 0xa5}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x82}}, {3, [3]byte{0xe2, 0x88, 0x91}}, + {3, [3]byte{0xe2, 0x88, 0x8f}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0xab}}, {2, [3]byte{0xc2, 0xaa, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xce, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {2, [3]byte{0xc6, 0x92, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {3, [3]byte{0xe2, 0x88, 0x86}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xbb, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0x80, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {2, [3]byte{0xc5, 0x93, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xe2, 0x80, 0x9c}}, {3, [3]byte{0xe2, 0x80, 0x9d}}, + {3, [3]byte{0xe2, 0x80, 0x98}}, {3, [3]byte{0xe2, 0x80, 0x99}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x97, 0x8a}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {3, [3]byte{0xe2, 0x81, 0x84}}, {3, [3]byte{0xe2, 0x82, 0xac}}, + {3, [3]byte{0xe2, 0x80, 0xb9}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {3, [3]byte{0xef, 0xac, 0x81}}, {3, [3]byte{0xef, 0xac, 0x82}}, + {3, [3]byte{0xe2, 0x80, 0xa1}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {3, [3]byte{0xe2, 0x80, 0x9e}}, + {3, [3]byte{0xe2, 0x80, 0xb0}}, {2, [3]byte{0xc3, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x8b, 0x00}}, {2, [3]byte{0xc3, 0x88, 0x00}}, + {2, [3]byte{0xc3, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0x8e, 0x00}}, + {2, [3]byte{0xc3, 0x8f, 0x00}}, {2, [3]byte{0xc3, 0x8c, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {3, [3]byte{0xef, 0xa3, 0xbf}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x99, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {2, [3]byte{0xcb, 0x9c, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xcb, 0x98, 0x00}}, + {2, [3]byte{0xcb, 0x99, 0x00}}, {2, [3]byte{0xcb, 0x9a, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xcb, 0x9d, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xca0000a0, 0xc10000a1, 0xa20000a2, 0xa30000a3, 0xb40000a5, 0xa40000a7, 0xac0000a8, 0xa90000a9, + 0xbb0000aa, 0xc70000ab, 0xc20000ac, 0xa80000ae, 0xf80000af, 0xa10000b0, 0xb10000b1, 0xab0000b4, + 0xb50000b5, 0xa60000b6, 0xe10000b7, 0xfc0000b8, 0xbc0000ba, 0xc80000bb, 0xc00000bf, 0xcb0000c0, + 0xe70000c1, 0xe50000c2, 0xcc0000c3, 0x800000c4, 0x810000c5, 0xae0000c6, 0x820000c7, 0xe90000c8, + 0x830000c9, 0xe60000ca, 0xe80000cb, 0xed0000cc, 0xea0000cd, 0xeb0000ce, 0xec0000cf, 0x840000d1, + 0xf10000d2, 0xee0000d3, 0xef0000d4, 0xcd0000d5, 0x850000d6, 0xaf0000d8, 0xf40000d9, 0xf20000da, + 0xf30000db, 0x860000dc, 0xa70000df, 0x880000e0, 0x870000e1, 0x890000e2, 0x8b0000e3, 0x8a0000e4, + 0x8c0000e5, 0xbe0000e6, 0x8d0000e7, 0x8f0000e8, 0x8e0000e9, 0x900000ea, 0x910000eb, 0x930000ec, + 0x920000ed, 0x940000ee, 0x950000ef, 0x960000f1, 0x980000f2, 0x970000f3, 0x990000f4, 0x9b0000f5, + 0x9a0000f6, 0xd60000f7, 0xbf0000f8, 0x9d0000f9, 0x9c0000fa, 0x9e0000fb, 0x9f0000fc, 0xd80000ff, + 0xf5000131, 0xce000152, 0xcf000153, 0xd9000178, 0xc4000192, 0xf60002c6, 0xff0002c7, 0xf90002d8, + 0xfa0002d9, 0xfb0002da, 0xfe0002db, 0xf70002dc, 0xfd0002dd, 0xbd0003a9, 0xb90003c0, 0xd0002013, + 0xd1002014, 0xd4002018, 0xd5002019, 0xe200201a, 0xd200201c, 0xd300201d, 0xe300201e, 0xa0002020, + 0xe0002021, 0xa5002022, 0xc9002026, 0xe4002030, 0xdc002039, 0xdd00203a, 0xda002044, 0xdb0020ac, + 0xaa002122, 0xb6002202, 0xc6002206, 0xb800220f, 0xb7002211, 0xc300221a, 0xb000221e, 0xba00222b, + 0xc5002248, 0xad002260, 0xb2002264, 0xb3002265, 0xd70025ca, 0xf000f8ff, 0xde00fb01, 0xdf00fb02, + }, +} + +// MacintoshCyrillic is the Macintosh Cyrillic encoding. +var MacintoshCyrillic *Charmap = &macintoshCyrillic + +var macintoshCyrillic = Charmap{ + name: "Macintosh Cyrillic", + mib: identifier.MacintoshCyrillic, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, + {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {2, [3]byte{0xc2, 0xb0, 0x00}}, + {2, [3]byte{0xd2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa7, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0x86, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {3, [3]byte{0xe2, 0x84, 0xa2}}, {2, [3]byte{0xd0, 0x82, 0x00}}, + {2, [3]byte{0xd1, 0x92, 0x00}}, {3, [3]byte{0xe2, 0x89, 0xa0}}, + {2, [3]byte{0xd0, 0x83, 0x00}}, {2, [3]byte{0xd1, 0x93, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa4}}, {3, [3]byte{0xe2, 0x89, 0xa5}}, + {2, [3]byte{0xd1, 0x96, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xd2, 0x91, 0x00}}, {2, [3]byte{0xd0, 0x88, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x94, 0x00}}, + {2, [3]byte{0xd0, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x89, 0x00}}, {2, [3]byte{0xd1, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x9a, 0x00}}, + {2, [3]byte{0xd1, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x85, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {2, [3]byte{0xc6, 0x92, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {3, [3]byte{0xe2, 0x88, 0x86}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xbb, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x9b, 0x00}}, {2, [3]byte{0xd0, 0x8c, 0x00}}, + {2, [3]byte{0xd1, 0x9c, 0x00}}, {2, [3]byte{0xd1, 0x95, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xe2, 0x80, 0x9c}}, {3, [3]byte{0xe2, 0x80, 0x9d}}, + {3, [3]byte{0xe2, 0x80, 0x98}}, {3, [3]byte{0xe2, 0x80, 0x99}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x9e}}, + {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x9e, 0x00}}, + {2, [3]byte{0xd0, 0x8f, 0x00}}, {2, [3]byte{0xd1, 0x9f, 0x00}}, + {3, [3]byte{0xe2, 0x84, 0x96}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x91, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {3, [3]byte{0xe2, 0x82, 0xac}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xca0000a0, 0xa30000a3, 0xa40000a7, 0xa90000a9, 0xc70000ab, 0xc20000ac, 0xa80000ae, 0xa10000b0, + 0xb10000b1, 0xb50000b5, 0xa60000b6, 0xc80000bb, 0xd60000f7, 0xc4000192, 0xdd000401, 0xab000402, + 0xae000403, 0xb8000404, 0xc1000405, 0xa7000406, 0xba000407, 0xb7000408, 0xbc000409, 0xbe00040a, + 0xcb00040b, 0xcd00040c, 0xd800040e, 0xda00040f, 0x80000410, 0x81000411, 0x82000412, 0x83000413, + 0x84000414, 0x85000415, 0x86000416, 0x87000417, 0x88000418, 0x89000419, 0x8a00041a, 0x8b00041b, + 0x8c00041c, 0x8d00041d, 0x8e00041e, 0x8f00041f, 0x90000420, 0x91000421, 0x92000422, 0x93000423, + 0x94000424, 0x95000425, 0x96000426, 0x97000427, 0x98000428, 0x99000429, 0x9a00042a, 0x9b00042b, + 0x9c00042c, 0x9d00042d, 0x9e00042e, 0x9f00042f, 0xe0000430, 0xe1000431, 0xe2000432, 0xe3000433, + 0xe4000434, 0xe5000435, 0xe6000436, 0xe7000437, 0xe8000438, 0xe9000439, 0xea00043a, 0xeb00043b, + 0xec00043c, 0xed00043d, 0xee00043e, 0xef00043f, 0xf0000440, 0xf1000441, 0xf2000442, 0xf3000443, + 0xf4000444, 0xf5000445, 0xf6000446, 0xf7000447, 0xf8000448, 0xf9000449, 0xfa00044a, 0xfb00044b, + 0xfc00044c, 0xfd00044d, 0xfe00044e, 0xdf00044f, 0xde000451, 0xac000452, 0xaf000453, 0xb9000454, + 0xcf000455, 0xb4000456, 0xbb000457, 0xc0000458, 0xbd000459, 0xbf00045a, 0xcc00045b, 0xce00045c, + 0xd900045e, 0xdb00045f, 0xa2000490, 0xb6000491, 0xd0002013, 0xd1002014, 0xd4002018, 0xd5002019, + 0xd200201c, 0xd300201d, 0xd700201e, 0xa0002020, 0xa5002022, 0xc9002026, 0xff0020ac, 0xdc002116, + 0xaa002122, 0xc6002206, 0xc300221a, 0xb000221e, 0xc5002248, 0xad002260, 0xb2002264, 0xb3002265, + }, +} + +// Windows874 is the Windows 874 encoding. +var Windows874 *Charmap = &windows874 + +var windows874 = Charmap{ + name: "Windows 874", + mib: identifier.Windows874, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe0, 0xb8, 0x81}}, + {3, [3]byte{0xe0, 0xb8, 0x82}}, {3, [3]byte{0xe0, 0xb8, 0x83}}, + {3, [3]byte{0xe0, 0xb8, 0x84}}, {3, [3]byte{0xe0, 0xb8, 0x85}}, + {3, [3]byte{0xe0, 0xb8, 0x86}}, {3, [3]byte{0xe0, 0xb8, 0x87}}, + {3, [3]byte{0xe0, 0xb8, 0x88}}, {3, [3]byte{0xe0, 0xb8, 0x89}}, + {3, [3]byte{0xe0, 0xb8, 0x8a}}, {3, [3]byte{0xe0, 0xb8, 0x8b}}, + {3, [3]byte{0xe0, 0xb8, 0x8c}}, {3, [3]byte{0xe0, 0xb8, 0x8d}}, + {3, [3]byte{0xe0, 0xb8, 0x8e}}, {3, [3]byte{0xe0, 0xb8, 0x8f}}, + {3, [3]byte{0xe0, 0xb8, 0x90}}, {3, [3]byte{0xe0, 0xb8, 0x91}}, + {3, [3]byte{0xe0, 0xb8, 0x92}}, {3, [3]byte{0xe0, 0xb8, 0x93}}, + {3, [3]byte{0xe0, 0xb8, 0x94}}, {3, [3]byte{0xe0, 0xb8, 0x95}}, + {3, [3]byte{0xe0, 0xb8, 0x96}}, {3, [3]byte{0xe0, 0xb8, 0x97}}, + {3, [3]byte{0xe0, 0xb8, 0x98}}, {3, [3]byte{0xe0, 0xb8, 0x99}}, + {3, [3]byte{0xe0, 0xb8, 0x9a}}, {3, [3]byte{0xe0, 0xb8, 0x9b}}, + {3, [3]byte{0xe0, 0xb8, 0x9c}}, {3, [3]byte{0xe0, 0xb8, 0x9d}}, + {3, [3]byte{0xe0, 0xb8, 0x9e}}, {3, [3]byte{0xe0, 0xb8, 0x9f}}, + {3, [3]byte{0xe0, 0xb8, 0xa0}}, {3, [3]byte{0xe0, 0xb8, 0xa1}}, + {3, [3]byte{0xe0, 0xb8, 0xa2}}, {3, [3]byte{0xe0, 0xb8, 0xa3}}, + {3, [3]byte{0xe0, 0xb8, 0xa4}}, {3, [3]byte{0xe0, 0xb8, 0xa5}}, + {3, [3]byte{0xe0, 0xb8, 0xa6}}, {3, [3]byte{0xe0, 0xb8, 0xa7}}, + {3, [3]byte{0xe0, 0xb8, 0xa8}}, {3, [3]byte{0xe0, 0xb8, 0xa9}}, + {3, [3]byte{0xe0, 0xb8, 0xaa}}, {3, [3]byte{0xe0, 0xb8, 0xab}}, + {3, [3]byte{0xe0, 0xb8, 0xac}}, {3, [3]byte{0xe0, 0xb8, 0xad}}, + {3, [3]byte{0xe0, 0xb8, 0xae}}, {3, [3]byte{0xe0, 0xb8, 0xaf}}, + {3, [3]byte{0xe0, 0xb8, 0xb0}}, {3, [3]byte{0xe0, 0xb8, 0xb1}}, + {3, [3]byte{0xe0, 0xb8, 0xb2}}, {3, [3]byte{0xe0, 0xb8, 0xb3}}, + {3, [3]byte{0xe0, 0xb8, 0xb4}}, {3, [3]byte{0xe0, 0xb8, 0xb5}}, + {3, [3]byte{0xe0, 0xb8, 0xb6}}, {3, [3]byte{0xe0, 0xb8, 0xb7}}, + {3, [3]byte{0xe0, 0xb8, 0xb8}}, {3, [3]byte{0xe0, 0xb8, 0xb9}}, + {3, [3]byte{0xe0, 0xb8, 0xba}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe0, 0xb8, 0xbf}}, + {3, [3]byte{0xe0, 0xb9, 0x80}}, {3, [3]byte{0xe0, 0xb9, 0x81}}, + {3, [3]byte{0xe0, 0xb9, 0x82}}, {3, [3]byte{0xe0, 0xb9, 0x83}}, + {3, [3]byte{0xe0, 0xb9, 0x84}}, {3, [3]byte{0xe0, 0xb9, 0x85}}, + {3, [3]byte{0xe0, 0xb9, 0x86}}, {3, [3]byte{0xe0, 0xb9, 0x87}}, + {3, [3]byte{0xe0, 0xb9, 0x88}}, {3, [3]byte{0xe0, 0xb9, 0x89}}, + {3, [3]byte{0xe0, 0xb9, 0x8a}}, {3, [3]byte{0xe0, 0xb9, 0x8b}}, + {3, [3]byte{0xe0, 0xb9, 0x8c}}, {3, [3]byte{0xe0, 0xb9, 0x8d}}, + {3, [3]byte{0xe0, 0xb9, 0x8e}}, {3, [3]byte{0xe0, 0xb9, 0x8f}}, + {3, [3]byte{0xe0, 0xb9, 0x90}}, {3, [3]byte{0xe0, 0xb9, 0x91}}, + {3, [3]byte{0xe0, 0xb9, 0x92}}, {3, [3]byte{0xe0, 0xb9, 0x93}}, + {3, [3]byte{0xe0, 0xb9, 0x94}}, {3, [3]byte{0xe0, 0xb9, 0x95}}, + {3, [3]byte{0xe0, 0xb9, 0x96}}, {3, [3]byte{0xe0, 0xb9, 0x97}}, + {3, [3]byte{0xe0, 0xb9, 0x98}}, {3, [3]byte{0xe0, 0xb9, 0x99}}, + {3, [3]byte{0xe0, 0xb9, 0x9a}}, {3, [3]byte{0xe0, 0xb9, 0x9b}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa1000e01, 0xa2000e02, 0xa3000e03, 0xa4000e04, 0xa5000e05, 0xa6000e06, 0xa7000e07, + 0xa8000e08, 0xa9000e09, 0xaa000e0a, 0xab000e0b, 0xac000e0c, 0xad000e0d, 0xae000e0e, 0xaf000e0f, + 0xb0000e10, 0xb1000e11, 0xb2000e12, 0xb3000e13, 0xb4000e14, 0xb5000e15, 0xb6000e16, 0xb7000e17, + 0xb8000e18, 0xb9000e19, 0xba000e1a, 0xbb000e1b, 0xbc000e1c, 0xbd000e1d, 0xbe000e1e, 0xbf000e1f, + 0xc0000e20, 0xc1000e21, 0xc2000e22, 0xc3000e23, 0xc4000e24, 0xc5000e25, 0xc6000e26, 0xc7000e27, + 0xc8000e28, 0xc9000e29, 0xca000e2a, 0xcb000e2b, 0xcc000e2c, 0xcd000e2d, 0xce000e2e, 0xcf000e2f, + 0xd0000e30, 0xd1000e31, 0xd2000e32, 0xd3000e33, 0xd4000e34, 0xd5000e35, 0xd6000e36, 0xd7000e37, + 0xd8000e38, 0xd9000e39, 0xda000e3a, 0xdf000e3f, 0xe0000e40, 0xe1000e41, 0xe2000e42, 0xe3000e43, + 0xe4000e44, 0xe5000e45, 0xe6000e46, 0xe7000e47, 0xe8000e48, 0xe9000e49, 0xea000e4a, 0xeb000e4b, + 0xec000e4c, 0xed000e4d, 0xee000e4e, 0xef000e4f, 0xf0000e50, 0xf1000e51, 0xf2000e52, 0xf3000e53, + 0xf4000e54, 0xf5000e55, 0xf6000e56, 0xf7000e57, 0xf8000e58, 0xf9000e59, 0xfa000e5a, 0xfb000e5b, + 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x9300201c, 0x9400201d, 0x95002022, 0x85002026, + 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, + 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, + 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, + 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, + }, +} + +// Windows1250 is the Windows 1250 encoding. +var Windows1250 *Charmap = &windows1250 + +var windows1250 = Charmap{ + name: "Windows 1250", + mib: identifier.Windows1250, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xa4, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc5, 0xb9, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0xa5, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + {2, [3]byte{0xcb, 0x98, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc4, 0xbd, 0x00}}, {2, [3]byte{0xcb, 0x9d, 0x00}}, + {2, [3]byte{0xc4, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0xb9, 0x00}}, + {2, [3]byte{0xc4, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc4, 0x8e, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc5, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc5, 0x98, 0x00}}, {2, [3]byte{0xc5, 0xae, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xb0, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc5, 0x95, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0xba, 0x00}}, + {2, [3]byte{0xc4, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc4, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc5, 0x88, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc5, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0x99, 0x00}}, {2, [3]byte{0xc5, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc5, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc5, 0xa3, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xa60000a6, 0xa70000a7, 0xa80000a8, 0xa90000a9, 0xab0000ab, 0xac0000ac, + 0xad0000ad, 0xae0000ae, 0xb00000b0, 0xb10000b1, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xbb0000bb, 0xc10000c1, 0xc20000c2, 0xc40000c4, 0xc70000c7, 0xc90000c9, 0xcb0000cb, + 0xcd0000cd, 0xce0000ce, 0xd30000d3, 0xd40000d4, 0xd60000d6, 0xd70000d7, 0xda0000da, 0xdc0000dc, + 0xdd0000dd, 0xdf0000df, 0xe10000e1, 0xe20000e2, 0xe40000e4, 0xe70000e7, 0xe90000e9, 0xeb0000eb, + 0xed0000ed, 0xee0000ee, 0xf30000f3, 0xf40000f4, 0xf60000f6, 0xf70000f7, 0xfa0000fa, 0xfc0000fc, + 0xfd0000fd, 0xc3000102, 0xe3000103, 0xa5000104, 0xb9000105, 0xc6000106, 0xe6000107, 0xc800010c, + 0xe800010d, 0xcf00010e, 0xef00010f, 0xd0000110, 0xf0000111, 0xca000118, 0xea000119, 0xcc00011a, + 0xec00011b, 0xc5000139, 0xe500013a, 0xbc00013d, 0xbe00013e, 0xa3000141, 0xb3000142, 0xd1000143, + 0xf1000144, 0xd2000147, 0xf2000148, 0xd5000150, 0xf5000151, 0xc0000154, 0xe0000155, 0xd8000158, + 0xf8000159, 0x8c00015a, 0x9c00015b, 0xaa00015e, 0xba00015f, 0x8a000160, 0x9a000161, 0xde000162, + 0xfe000163, 0x8d000164, 0x9d000165, 0xd900016e, 0xf900016f, 0xdb000170, 0xfb000171, 0x8f000179, + 0x9f00017a, 0xaf00017b, 0xbf00017c, 0x8e00017d, 0x9e00017e, 0xa10002c7, 0xa20002d8, 0xff0002d9, + 0xb20002db, 0xbd0002dd, 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, + 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, + 0x9b00203a, 0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1251 is the Windows 1251 encoding. +var Windows1251 *Charmap = &windows1251 + +var windows1251 = Charmap{ + name: "Windows 1251", + mib: identifier.Windows1251, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd0, 0x82, 0x00}}, {2, [3]byte{0xd0, 0x83, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xd1, 0x93, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xd0, 0x89, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xd0, 0x8a, 0x00}}, {2, [3]byte{0xd0, 0x8c, 0x00}}, + {2, [3]byte{0xd0, 0x8b, 0x00}}, {2, [3]byte{0xd0, 0x8f, 0x00}}, + {2, [3]byte{0xd1, 0x92, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xd1, 0x99, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xd1, 0x9a, 0x00}}, {2, [3]byte{0xd1, 0x9c, 0x00}}, + {2, [3]byte{0xd1, 0x9b, 0x00}}, {2, [3]byte{0xd1, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0x8e, 0x00}}, + {2, [3]byte{0xd1, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x88, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xd2, 0x90, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0x81, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xd0, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x96, 0x00}}, + {2, [3]byte{0xd2, 0x91, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xd1, 0x91, 0x00}}, {3, [3]byte{0xe2, 0x84, 0x96}}, + {2, [3]byte{0xd1, 0x94, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xd1, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x95, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, + {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xa60000a6, 0xa70000a7, 0xa90000a9, 0xab0000ab, 0xac0000ac, 0xad0000ad, + 0xae0000ae, 0xb00000b0, 0xb10000b1, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xbb0000bb, 0xa8000401, + 0x80000402, 0x81000403, 0xaa000404, 0xbd000405, 0xb2000406, 0xaf000407, 0xa3000408, 0x8a000409, + 0x8c00040a, 0x8e00040b, 0x8d00040c, 0xa100040e, 0x8f00040f, 0xc0000410, 0xc1000411, 0xc2000412, + 0xc3000413, 0xc4000414, 0xc5000415, 0xc6000416, 0xc7000417, 0xc8000418, 0xc9000419, 0xca00041a, + 0xcb00041b, 0xcc00041c, 0xcd00041d, 0xce00041e, 0xcf00041f, 0xd0000420, 0xd1000421, 0xd2000422, + 0xd3000423, 0xd4000424, 0xd5000425, 0xd6000426, 0xd7000427, 0xd8000428, 0xd9000429, 0xda00042a, + 0xdb00042b, 0xdc00042c, 0xdd00042d, 0xde00042e, 0xdf00042f, 0xe0000430, 0xe1000431, 0xe2000432, + 0xe3000433, 0xe4000434, 0xe5000435, 0xe6000436, 0xe7000437, 0xe8000438, 0xe9000439, 0xea00043a, + 0xeb00043b, 0xec00043c, 0xed00043d, 0xee00043e, 0xef00043f, 0xf0000440, 0xf1000441, 0xf2000442, + 0xf3000443, 0xf4000444, 0xf5000445, 0xf6000446, 0xf7000447, 0xf8000448, 0xf9000449, 0xfa00044a, + 0xfb00044b, 0xfc00044c, 0xfd00044d, 0xfe00044e, 0xff00044f, 0xb8000451, 0x90000452, 0x83000453, + 0xba000454, 0xbe000455, 0xb3000456, 0xbf000457, 0xbc000458, 0x9a000459, 0x9c00045a, 0x9e00045b, + 0x9d00045c, 0xa200045e, 0x9f00045f, 0xa5000490, 0xb4000491, 0x96002013, 0x97002014, 0x91002018, + 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, + 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0x880020ac, 0xb9002116, 0x99002122, 0x99002122, + }, +} + +// Windows1252 is the Windows 1252 encoding. +var Windows1252 *Charmap = &windows1252 + +var windows1252 = Charmap{ + name: "Windows 1252", + mib: identifier.Windows1252, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xcb, 0x9c, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x93, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd00000d0, 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, + 0xd80000d8, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdd0000dd, 0xde0000de, 0xdf0000df, + 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, + 0xf00000f0, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, + 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xfd0000fd, 0xfe0000fe, 0xff0000ff, + 0x8c000152, 0x9c000153, 0x8a000160, 0x9a000161, 0x9f000178, 0x8e00017d, 0x9e00017e, 0x83000192, + 0x880002c6, 0x980002dc, 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, + 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, + 0x9b00203a, 0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1253 is the Windows 1253 encoding. +var Windows1253 *Charmap = &windows1253 + +var windows1253 = Charmap{ + name: "Windows 1253", + mib: identifier.Windows1253, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xce, 0x85, 0x00}}, + {2, [3]byte{0xce, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x95}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xce, 0x84, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xce, 0x88, 0x00}}, {2, [3]byte{0xce, 0x89, 0x00}}, + {2, [3]byte{0xce, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xce, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xce, 0x8e, 0x00}}, {2, [3]byte{0xce, 0x8f, 0x00}}, + {2, [3]byte{0xce, 0x90, 0x00}}, {2, [3]byte{0xce, 0x91, 0x00}}, + {2, [3]byte{0xce, 0x92, 0x00}}, {2, [3]byte{0xce, 0x93, 0x00}}, + {2, [3]byte{0xce, 0x94, 0x00}}, {2, [3]byte{0xce, 0x95, 0x00}}, + {2, [3]byte{0xce, 0x96, 0x00}}, {2, [3]byte{0xce, 0x97, 0x00}}, + {2, [3]byte{0xce, 0x98, 0x00}}, {2, [3]byte{0xce, 0x99, 0x00}}, + {2, [3]byte{0xce, 0x9a, 0x00}}, {2, [3]byte{0xce, 0x9b, 0x00}}, + {2, [3]byte{0xce, 0x9c, 0x00}}, {2, [3]byte{0xce, 0x9d, 0x00}}, + {2, [3]byte{0xce, 0x9e, 0x00}}, {2, [3]byte{0xce, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0xa0, 0x00}}, {2, [3]byte{0xce, 0xa1, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xce, 0xa3, 0x00}}, + {2, [3]byte{0xce, 0xa4, 0x00}}, {2, [3]byte{0xce, 0xa5, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0xa7, 0x00}}, + {2, [3]byte{0xce, 0xa8, 0x00}}, {2, [3]byte{0xce, 0xa9, 0x00}}, + {2, [3]byte{0xce, 0xaa, 0x00}}, {2, [3]byte{0xce, 0xab, 0x00}}, + {2, [3]byte{0xce, 0xac, 0x00}}, {2, [3]byte{0xce, 0xad, 0x00}}, + {2, [3]byte{0xce, 0xae, 0x00}}, {2, [3]byte{0xce, 0xaf, 0x00}}, + {2, [3]byte{0xce, 0xb0, 0x00}}, {2, [3]byte{0xce, 0xb1, 0x00}}, + {2, [3]byte{0xce, 0xb2, 0x00}}, {2, [3]byte{0xce, 0xb3, 0x00}}, + {2, [3]byte{0xce, 0xb4, 0x00}}, {2, [3]byte{0xce, 0xb5, 0x00}}, + {2, [3]byte{0xce, 0xb6, 0x00}}, {2, [3]byte{0xce, 0xb7, 0x00}}, + {2, [3]byte{0xce, 0xb8, 0x00}}, {2, [3]byte{0xce, 0xb9, 0x00}}, + {2, [3]byte{0xce, 0xba, 0x00}}, {2, [3]byte{0xce, 0xbb, 0x00}}, + {2, [3]byte{0xce, 0xbc, 0x00}}, {2, [3]byte{0xce, 0xbd, 0x00}}, + {2, [3]byte{0xce, 0xbe, 0x00}}, {2, [3]byte{0xce, 0xbf, 0x00}}, + {2, [3]byte{0xcf, 0x80, 0x00}}, {2, [3]byte{0xcf, 0x81, 0x00}}, + {2, [3]byte{0xcf, 0x82, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xcf, 0x84, 0x00}}, {2, [3]byte{0xcf, 0x85, 0x00}}, + {2, [3]byte{0xcf, 0x86, 0x00}}, {2, [3]byte{0xcf, 0x87, 0x00}}, + {2, [3]byte{0xcf, 0x88, 0x00}}, {2, [3]byte{0xcf, 0x89, 0x00}}, + {2, [3]byte{0xcf, 0x8a, 0x00}}, {2, [3]byte{0xcf, 0x8b, 0x00}}, + {2, [3]byte{0xcf, 0x8c, 0x00}}, {2, [3]byte{0xcf, 0x8d, 0x00}}, + {2, [3]byte{0xcf, 0x8e, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, 0xa80000a8, 0xa90000a9, + 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, + 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xbb0000bb, 0xbd0000bd, 0x83000192, 0xb4000384, 0xa1000385, + 0xa2000386, 0xb8000388, 0xb9000389, 0xba00038a, 0xbc00038c, 0xbe00038e, 0xbf00038f, 0xc0000390, + 0xc1000391, 0xc2000392, 0xc3000393, 0xc4000394, 0xc5000395, 0xc6000396, 0xc7000397, 0xc8000398, + 0xc9000399, 0xca00039a, 0xcb00039b, 0xcc00039c, 0xcd00039d, 0xce00039e, 0xcf00039f, 0xd00003a0, + 0xd10003a1, 0xd30003a3, 0xd40003a4, 0xd50003a5, 0xd60003a6, 0xd70003a7, 0xd80003a8, 0xd90003a9, + 0xda0003aa, 0xdb0003ab, 0xdc0003ac, 0xdd0003ad, 0xde0003ae, 0xdf0003af, 0xe00003b0, 0xe10003b1, + 0xe20003b2, 0xe30003b3, 0xe40003b4, 0xe50003b5, 0xe60003b6, 0xe70003b7, 0xe80003b8, 0xe90003b9, + 0xea0003ba, 0xeb0003bb, 0xec0003bc, 0xed0003bd, 0xee0003be, 0xef0003bf, 0xf00003c0, 0xf10003c1, + 0xf20003c2, 0xf30003c3, 0xf40003c4, 0xf50003c5, 0xf60003c6, 0xf70003c7, 0xf80003c8, 0xf90003c9, + 0xfa0003ca, 0xfb0003cb, 0xfc0003cc, 0xfd0003cd, 0xfe0003ce, 0x96002013, 0x97002014, 0xaf002015, + 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, + 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0x800020ac, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1254 is the Windows 1254 encoding. +var Windows1254 *Charmap = &windows1254 + +var windows1254 = Charmap{ + name: "Windows 1254", + mib: identifier.Windows1254, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xcb, 0x9c, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x93, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc4, 0xb0, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xd80000d8, + 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, + 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, 0xe80000e8, 0xe90000e9, 0xea0000ea, + 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, 0xf20000f2, 0xf30000f3, + 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, + 0xfc0000fc, 0xff0000ff, 0xd000011e, 0xf000011f, 0xdd000130, 0xfd000131, 0x8c000152, 0x9c000153, + 0xde00015e, 0xfe00015f, 0x8a000160, 0x9a000161, 0x9f000178, 0x83000192, 0x880002c6, 0x980002dc, + 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, + 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0x800020ac, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1255 is the Windows 1255 encoding. +var Windows1255 *Charmap = &windows1255 + +var windows1255 = Charmap{ + name: "Windows 1255", + mib: identifier.Windows1255, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xcb, 0x9c, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xaa}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xd6, 0xb0, 0x00}}, {2, [3]byte{0xd6, 0xb1, 0x00}}, + {2, [3]byte{0xd6, 0xb2, 0x00}}, {2, [3]byte{0xd6, 0xb3, 0x00}}, + {2, [3]byte{0xd6, 0xb4, 0x00}}, {2, [3]byte{0xd6, 0xb5, 0x00}}, + {2, [3]byte{0xd6, 0xb6, 0x00}}, {2, [3]byte{0xd6, 0xb7, 0x00}}, + {2, [3]byte{0xd6, 0xb8, 0x00}}, {2, [3]byte{0xd6, 0xb9, 0x00}}, + {2, [3]byte{0xd6, 0xba, 0x00}}, {2, [3]byte{0xd6, 0xbb, 0x00}}, + {2, [3]byte{0xd6, 0xbc, 0x00}}, {2, [3]byte{0xd6, 0xbd, 0x00}}, + {2, [3]byte{0xd6, 0xbe, 0x00}}, {2, [3]byte{0xd6, 0xbf, 0x00}}, + {2, [3]byte{0xd7, 0x80, 0x00}}, {2, [3]byte{0xd7, 0x81, 0x00}}, + {2, [3]byte{0xd7, 0x82, 0x00}}, {2, [3]byte{0xd7, 0x83, 0x00}}, + {2, [3]byte{0xd7, 0xb0, 0x00}}, {2, [3]byte{0xd7, 0xb1, 0x00}}, + {2, [3]byte{0xd7, 0xb2, 0x00}}, {2, [3]byte{0xd7, 0xb3, 0x00}}, + {2, [3]byte{0xd7, 0xb4, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xd7, 0x90, 0x00}}, {2, [3]byte{0xd7, 0x91, 0x00}}, + {2, [3]byte{0xd7, 0x92, 0x00}}, {2, [3]byte{0xd7, 0x93, 0x00}}, + {2, [3]byte{0xd7, 0x94, 0x00}}, {2, [3]byte{0xd7, 0x95, 0x00}}, + {2, [3]byte{0xd7, 0x96, 0x00}}, {2, [3]byte{0xd7, 0x97, 0x00}}, + {2, [3]byte{0xd7, 0x98, 0x00}}, {2, [3]byte{0xd7, 0x99, 0x00}}, + {2, [3]byte{0xd7, 0x9a, 0x00}}, {2, [3]byte{0xd7, 0x9b, 0x00}}, + {2, [3]byte{0xd7, 0x9c, 0x00}}, {2, [3]byte{0xd7, 0x9d, 0x00}}, + {2, [3]byte{0xd7, 0x9e, 0x00}}, {2, [3]byte{0xd7, 0x9f, 0x00}}, + {2, [3]byte{0xd7, 0xa0, 0x00}}, {2, [3]byte{0xd7, 0xa1, 0x00}}, + {2, [3]byte{0xd7, 0xa2, 0x00}}, {2, [3]byte{0xd7, 0xa3, 0x00}}, + {2, [3]byte{0xd7, 0xa4, 0x00}}, {2, [3]byte{0xd7, 0xa5, 0x00}}, + {2, [3]byte{0xd7, 0xa6, 0x00}}, {2, [3]byte{0xd7, 0xa7, 0x00}}, + {2, [3]byte{0xd7, 0xa8, 0x00}}, {2, [3]byte{0xd7, 0xa9, 0x00}}, + {2, [3]byte{0xd7, 0xaa, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x8e}}, + {3, [3]byte{0xe2, 0x80, 0x8f}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa50000a5, 0xa60000a6, 0xa70000a7, 0xa80000a8, + 0xa90000a9, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, 0xb00000b0, 0xb10000b1, + 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb80000b8, 0xb90000b9, + 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, 0xaa0000d7, 0xba0000f7, 0x83000192, + 0x880002c6, 0x980002dc, 0xc00005b0, 0xc10005b1, 0xc20005b2, 0xc30005b3, 0xc40005b4, 0xc50005b5, + 0xc60005b6, 0xc70005b7, 0xc80005b8, 0xc90005b9, 0xca0005ba, 0xcb0005bb, 0xcc0005bc, 0xcd0005bd, + 0xce0005be, 0xcf0005bf, 0xd00005c0, 0xd10005c1, 0xd20005c2, 0xd30005c3, 0xe00005d0, 0xe10005d1, + 0xe20005d2, 0xe30005d3, 0xe40005d4, 0xe50005d5, 0xe60005d6, 0xe70005d7, 0xe80005d8, 0xe90005d9, + 0xea0005da, 0xeb0005db, 0xec0005dc, 0xed0005dd, 0xee0005de, 0xef0005df, 0xf00005e0, 0xf10005e1, + 0xf20005e2, 0xf30005e3, 0xf40005e4, 0xf50005e5, 0xf60005e6, 0xf70005e7, 0xf80005e8, 0xf90005e9, + 0xfa0005ea, 0xd40005f0, 0xd50005f1, 0xd60005f2, 0xd70005f3, 0xd80005f4, 0xfd00200e, 0xfe00200f, + 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, + 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0xa40020aa, + 0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1256 is the Windows 1256 encoding. +var Windows1256 *Charmap = &windows1256 + +var windows1256 = Charmap{ + name: "Windows 1256", + mib: identifier.Windows1256, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {2, [3]byte{0xd9, 0xbe, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xd9, 0xb9, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {2, [3]byte{0xda, 0x86, 0x00}}, + {2, [3]byte{0xda, 0x98, 0x00}}, {2, [3]byte{0xda, 0x88, 0x00}}, + {2, [3]byte{0xda, 0xaf, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xda, 0xa9, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xda, 0x91, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x93, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x8c}}, + {3, [3]byte{0xe2, 0x80, 0x8d}}, {2, [3]byte{0xda, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xd8, 0x8c, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xda, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xd8, 0x9b, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xd8, 0x9f, 0x00}}, + {2, [3]byte{0xdb, 0x81, 0x00}}, {2, [3]byte{0xd8, 0xa1, 0x00}}, + {2, [3]byte{0xd8, 0xa2, 0x00}}, {2, [3]byte{0xd8, 0xa3, 0x00}}, + {2, [3]byte{0xd8, 0xa4, 0x00}}, {2, [3]byte{0xd8, 0xa5, 0x00}}, + {2, [3]byte{0xd8, 0xa6, 0x00}}, {2, [3]byte{0xd8, 0xa7, 0x00}}, + {2, [3]byte{0xd8, 0xa8, 0x00}}, {2, [3]byte{0xd8, 0xa9, 0x00}}, + {2, [3]byte{0xd8, 0xaa, 0x00}}, {2, [3]byte{0xd8, 0xab, 0x00}}, + {2, [3]byte{0xd8, 0xac, 0x00}}, {2, [3]byte{0xd8, 0xad, 0x00}}, + {2, [3]byte{0xd8, 0xae, 0x00}}, {2, [3]byte{0xd8, 0xaf, 0x00}}, + {2, [3]byte{0xd8, 0xb0, 0x00}}, {2, [3]byte{0xd8, 0xb1, 0x00}}, + {2, [3]byte{0xd8, 0xb2, 0x00}}, {2, [3]byte{0xd8, 0xb3, 0x00}}, + {2, [3]byte{0xd8, 0xb4, 0x00}}, {2, [3]byte{0xd8, 0xb5, 0x00}}, + {2, [3]byte{0xd8, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xd8, 0xb7, 0x00}}, {2, [3]byte{0xd8, 0xb8, 0x00}}, + {2, [3]byte{0xd8, 0xb9, 0x00}}, {2, [3]byte{0xd8, 0xba, 0x00}}, + {2, [3]byte{0xd9, 0x80, 0x00}}, {2, [3]byte{0xd9, 0x81, 0x00}}, + {2, [3]byte{0xd9, 0x82, 0x00}}, {2, [3]byte{0xd9, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xd9, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xd9, 0x85, 0x00}}, + {2, [3]byte{0xd9, 0x86, 0x00}}, {2, [3]byte{0xd9, 0x87, 0x00}}, + {2, [3]byte{0xd9, 0x88, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xd9, 0x89, 0x00}}, {2, [3]byte{0xd9, 0x8a, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xd9, 0x8b, 0x00}}, {2, [3]byte{0xd9, 0x8c, 0x00}}, + {2, [3]byte{0xd9, 0x8d, 0x00}}, {2, [3]byte{0xd9, 0x8e, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xd9, 0x8f, 0x00}}, + {2, [3]byte{0xd9, 0x90, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xd9, 0x91, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xd9, 0x92, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x8e}}, + {3, [3]byte{0xe2, 0x80, 0x8f}}, {2, [3]byte{0xdb, 0x92, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, 0xa80000a8, + 0xa90000a9, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, 0xb00000b0, 0xb10000b1, + 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb80000b8, 0xb90000b9, + 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xd70000d7, 0xe00000e0, 0xe20000e2, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xee0000ee, 0xef0000ef, 0xf40000f4, 0xf70000f7, + 0xf90000f9, 0xfb0000fb, 0xfc0000fc, 0x8c000152, 0x9c000153, 0x83000192, 0x880002c6, 0xa100060c, + 0xba00061b, 0xbf00061f, 0xc1000621, 0xc2000622, 0xc3000623, 0xc4000624, 0xc5000625, 0xc6000626, + 0xc7000627, 0xc8000628, 0xc9000629, 0xca00062a, 0xcb00062b, 0xcc00062c, 0xcd00062d, 0xce00062e, + 0xcf00062f, 0xd0000630, 0xd1000631, 0xd2000632, 0xd3000633, 0xd4000634, 0xd5000635, 0xd6000636, + 0xd8000637, 0xd9000638, 0xda000639, 0xdb00063a, 0xdc000640, 0xdd000641, 0xde000642, 0xdf000643, + 0xe1000644, 0xe3000645, 0xe4000646, 0xe5000647, 0xe6000648, 0xec000649, 0xed00064a, 0xf000064b, + 0xf100064c, 0xf200064d, 0xf300064e, 0xf500064f, 0xf6000650, 0xf8000651, 0xfa000652, 0x8a000679, + 0x8100067e, 0x8d000686, 0x8f000688, 0x9a000691, 0x8e000698, 0x980006a9, 0x900006af, 0x9f0006ba, + 0xaa0006be, 0xc00006c1, 0xff0006d2, 0x9d00200c, 0x9e00200d, 0xfd00200e, 0xfe00200f, 0x96002013, + 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, + 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0x800020ac, 0x99002122, + }, +} + +// Windows1257 is the Windows 1257 encoding. +var Windows1257 *Charmap = &windows1257 + +var windows1257 = Charmap{ + name: "Windows 1257", + mib: identifier.Windows1257, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xcb, 0x87, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0x96, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc3, 0x86, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc5, 0x97, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc4, 0x84, 0x00}}, {2, [3]byte{0xc4, 0xae, 0x00}}, + {2, [3]byte{0xc4, 0x80, 0x00}}, {2, [3]byte{0xc4, 0x86, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc4, 0x92, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc5, 0xb9, 0x00}}, {2, [3]byte{0xc4, 0x96, 0x00}}, + {2, [3]byte{0xc4, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0xb6, 0x00}}, + {2, [3]byte{0xc4, 0xaa, 0x00}}, {2, [3]byte{0xc4, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x85, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc5, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc5, 0xb2, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc5, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xaa, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0x85, 0x00}}, {2, [3]byte{0xc4, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x81, 0x00}}, {2, [3]byte{0xc4, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc4, 0x93, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0xba, 0x00}}, {2, [3]byte{0xc4, 0x97, 0x00}}, + {2, [3]byte{0xc4, 0xa3, 0x00}}, {2, [3]byte{0xc4, 0xb7, 0x00}}, + {2, [3]byte{0xc4, 0xab, 0x00}}, {2, [3]byte{0xc4, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc5, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc5, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0xb3, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa60000a6, 0xa70000a7, 0x8d0000a8, 0xa90000a9, + 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0x9d0000af, 0xb00000b0, 0xb10000b1, 0xb20000b2, + 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0x8f0000b8, 0xb90000b9, 0xbb0000bb, + 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xc40000c4, 0xc50000c5, 0xaf0000c6, 0xc90000c9, 0xd30000d3, + 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xa80000d8, 0xdc0000dc, 0xdf0000df, 0xe40000e4, 0xe50000e5, + 0xbf0000e6, 0xe90000e9, 0xf30000f3, 0xf50000f5, 0xf60000f6, 0xf70000f7, 0xb80000f8, 0xfc0000fc, + 0xc2000100, 0xe2000101, 0xc0000104, 0xe0000105, 0xc3000106, 0xe3000107, 0xc800010c, 0xe800010d, + 0xc7000112, 0xe7000113, 0xcb000116, 0xeb000117, 0xc6000118, 0xe6000119, 0xcc000122, 0xec000123, + 0xce00012a, 0xee00012b, 0xc100012e, 0xe100012f, 0xcd000136, 0xed000137, 0xcf00013b, 0xef00013c, + 0xd9000141, 0xf9000142, 0xd1000143, 0xf1000144, 0xd2000145, 0xf2000146, 0xd400014c, 0xf400014d, + 0xaa000156, 0xba000157, 0xda00015a, 0xfa00015b, 0xd0000160, 0xf0000161, 0xdb00016a, 0xfb00016b, + 0xd8000172, 0xf8000173, 0xca000179, 0xea00017a, 0xdd00017b, 0xfd00017c, 0xde00017d, 0xfe00017e, + 0x8e0002c7, 0xff0002d9, 0x9e0002db, 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, + 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, + 0x8b002039, 0x9b00203a, 0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1258 is the Windows 1258 encoding. +var Windows1258 *Charmap = &windows1258 + +var windows1258 = Charmap{ + name: "Windows 1258", + mib: identifier.Windows1258, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xcb, 0x9c, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x93, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xcc, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xcc, 0x89, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc6, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc6, 0xaf, 0x00}}, + {2, [3]byte{0xcc, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xcc, 0x81, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xcc, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc6, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc6, 0xb0, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xab}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, 0xc80000c8, + 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, 0xd10000d1, 0xd30000d3, + 0xd40000d4, 0xd60000d6, 0xd70000d7, 0xd80000d8, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, + 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, + 0xf30000f3, 0xf40000f4, 0xf60000f6, 0xf70000f7, 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, + 0xfc0000fc, 0xff0000ff, 0xc3000102, 0xe3000103, 0xd0000110, 0xf0000111, 0x8c000152, 0x9c000153, + 0x9f000178, 0x83000192, 0xd50001a0, 0xf50001a1, 0xdd0001af, 0xfd0001b0, 0x880002c6, 0x980002dc, + 0xcc000300, 0xec000301, 0xde000303, 0xd2000309, 0xf2000323, 0x96002013, 0x97002014, 0x91002018, + 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, + 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0xfe0020ab, 0x800020ac, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// XUserDefined is the X-User-Defined encoding. +// +// It is defined at http://encoding.spec.whatwg.org/#x-user-defined +var XUserDefined *Charmap = &xUserDefined + +var xUserDefined = Charmap{ + name: "X-User-Defined", + mib: identifier.XUserDefined, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0x9e, 0x80}}, {3, [3]byte{0xef, 0x9e, 0x81}}, + {3, [3]byte{0xef, 0x9e, 0x82}}, {3, [3]byte{0xef, 0x9e, 0x83}}, + {3, [3]byte{0xef, 0x9e, 0x84}}, {3, [3]byte{0xef, 0x9e, 0x85}}, + {3, [3]byte{0xef, 0x9e, 0x86}}, {3, [3]byte{0xef, 0x9e, 0x87}}, + {3, [3]byte{0xef, 0x9e, 0x88}}, {3, [3]byte{0xef, 0x9e, 0x89}}, + {3, [3]byte{0xef, 0x9e, 0x8a}}, {3, [3]byte{0xef, 0x9e, 0x8b}}, + {3, [3]byte{0xef, 0x9e, 0x8c}}, {3, [3]byte{0xef, 0x9e, 0x8d}}, + {3, [3]byte{0xef, 0x9e, 0x8e}}, {3, [3]byte{0xef, 0x9e, 0x8f}}, + {3, [3]byte{0xef, 0x9e, 0x90}}, {3, [3]byte{0xef, 0x9e, 0x91}}, + {3, [3]byte{0xef, 0x9e, 0x92}}, {3, [3]byte{0xef, 0x9e, 0x93}}, + {3, [3]byte{0xef, 0x9e, 0x94}}, {3, [3]byte{0xef, 0x9e, 0x95}}, + {3, [3]byte{0xef, 0x9e, 0x96}}, {3, [3]byte{0xef, 0x9e, 0x97}}, + {3, [3]byte{0xef, 0x9e, 0x98}}, {3, [3]byte{0xef, 0x9e, 0x99}}, + {3, [3]byte{0xef, 0x9e, 0x9a}}, {3, [3]byte{0xef, 0x9e, 0x9b}}, + {3, [3]byte{0xef, 0x9e, 0x9c}}, {3, [3]byte{0xef, 0x9e, 0x9d}}, + {3, [3]byte{0xef, 0x9e, 0x9e}}, {3, [3]byte{0xef, 0x9e, 0x9f}}, + {3, [3]byte{0xef, 0x9e, 0xa0}}, {3, [3]byte{0xef, 0x9e, 0xa1}}, + {3, [3]byte{0xef, 0x9e, 0xa2}}, {3, [3]byte{0xef, 0x9e, 0xa3}}, + {3, [3]byte{0xef, 0x9e, 0xa4}}, {3, [3]byte{0xef, 0x9e, 0xa5}}, + {3, [3]byte{0xef, 0x9e, 0xa6}}, {3, [3]byte{0xef, 0x9e, 0xa7}}, + {3, [3]byte{0xef, 0x9e, 0xa8}}, {3, [3]byte{0xef, 0x9e, 0xa9}}, + {3, [3]byte{0xef, 0x9e, 0xaa}}, {3, [3]byte{0xef, 0x9e, 0xab}}, + {3, [3]byte{0xef, 0x9e, 0xac}}, {3, [3]byte{0xef, 0x9e, 0xad}}, + {3, [3]byte{0xef, 0x9e, 0xae}}, {3, [3]byte{0xef, 0x9e, 0xaf}}, + {3, [3]byte{0xef, 0x9e, 0xb0}}, {3, [3]byte{0xef, 0x9e, 0xb1}}, + {3, [3]byte{0xef, 0x9e, 0xb2}}, {3, [3]byte{0xef, 0x9e, 0xb3}}, + {3, [3]byte{0xef, 0x9e, 0xb4}}, {3, [3]byte{0xef, 0x9e, 0xb5}}, + {3, [3]byte{0xef, 0x9e, 0xb6}}, {3, [3]byte{0xef, 0x9e, 0xb7}}, + {3, [3]byte{0xef, 0x9e, 0xb8}}, {3, [3]byte{0xef, 0x9e, 0xb9}}, + {3, [3]byte{0xef, 0x9e, 0xba}}, {3, [3]byte{0xef, 0x9e, 0xbb}}, + {3, [3]byte{0xef, 0x9e, 0xbc}}, {3, [3]byte{0xef, 0x9e, 0xbd}}, + {3, [3]byte{0xef, 0x9e, 0xbe}}, {3, [3]byte{0xef, 0x9e, 0xbf}}, + {3, [3]byte{0xef, 0x9f, 0x80}}, {3, [3]byte{0xef, 0x9f, 0x81}}, + {3, [3]byte{0xef, 0x9f, 0x82}}, {3, [3]byte{0xef, 0x9f, 0x83}}, + {3, [3]byte{0xef, 0x9f, 0x84}}, {3, [3]byte{0xef, 0x9f, 0x85}}, + {3, [3]byte{0xef, 0x9f, 0x86}}, {3, [3]byte{0xef, 0x9f, 0x87}}, + {3, [3]byte{0xef, 0x9f, 0x88}}, {3, [3]byte{0xef, 0x9f, 0x89}}, + {3, [3]byte{0xef, 0x9f, 0x8a}}, {3, [3]byte{0xef, 0x9f, 0x8b}}, + {3, [3]byte{0xef, 0x9f, 0x8c}}, {3, [3]byte{0xef, 0x9f, 0x8d}}, + {3, [3]byte{0xef, 0x9f, 0x8e}}, {3, [3]byte{0xef, 0x9f, 0x8f}}, + {3, [3]byte{0xef, 0x9f, 0x90}}, {3, [3]byte{0xef, 0x9f, 0x91}}, + {3, [3]byte{0xef, 0x9f, 0x92}}, {3, [3]byte{0xef, 0x9f, 0x93}}, + {3, [3]byte{0xef, 0x9f, 0x94}}, {3, [3]byte{0xef, 0x9f, 0x95}}, + {3, [3]byte{0xef, 0x9f, 0x96}}, {3, [3]byte{0xef, 0x9f, 0x97}}, + {3, [3]byte{0xef, 0x9f, 0x98}}, {3, [3]byte{0xef, 0x9f, 0x99}}, + {3, [3]byte{0xef, 0x9f, 0x9a}}, {3, [3]byte{0xef, 0x9f, 0x9b}}, + {3, [3]byte{0xef, 0x9f, 0x9c}}, {3, [3]byte{0xef, 0x9f, 0x9d}}, + {3, [3]byte{0xef, 0x9f, 0x9e}}, {3, [3]byte{0xef, 0x9f, 0x9f}}, + {3, [3]byte{0xef, 0x9f, 0xa0}}, {3, [3]byte{0xef, 0x9f, 0xa1}}, + {3, [3]byte{0xef, 0x9f, 0xa2}}, {3, [3]byte{0xef, 0x9f, 0xa3}}, + {3, [3]byte{0xef, 0x9f, 0xa4}}, {3, [3]byte{0xef, 0x9f, 0xa5}}, + {3, [3]byte{0xef, 0x9f, 0xa6}}, {3, [3]byte{0xef, 0x9f, 0xa7}}, + {3, [3]byte{0xef, 0x9f, 0xa8}}, {3, [3]byte{0xef, 0x9f, 0xa9}}, + {3, [3]byte{0xef, 0x9f, 0xaa}}, {3, [3]byte{0xef, 0x9f, 0xab}}, + {3, [3]byte{0xef, 0x9f, 0xac}}, {3, [3]byte{0xef, 0x9f, 0xad}}, + {3, [3]byte{0xef, 0x9f, 0xae}}, {3, [3]byte{0xef, 0x9f, 0xaf}}, + {3, [3]byte{0xef, 0x9f, 0xb0}}, {3, [3]byte{0xef, 0x9f, 0xb1}}, + {3, [3]byte{0xef, 0x9f, 0xb2}}, {3, [3]byte{0xef, 0x9f, 0xb3}}, + {3, [3]byte{0xef, 0x9f, 0xb4}}, {3, [3]byte{0xef, 0x9f, 0xb5}}, + {3, [3]byte{0xef, 0x9f, 0xb6}}, {3, [3]byte{0xef, 0x9f, 0xb7}}, + {3, [3]byte{0xef, 0x9f, 0xb8}}, {3, [3]byte{0xef, 0x9f, 0xb9}}, + {3, [3]byte{0xef, 0x9f, 0xba}}, {3, [3]byte{0xef, 0x9f, 0xbb}}, + {3, [3]byte{0xef, 0x9f, 0xbc}}, {3, [3]byte{0xef, 0x9f, 0xbd}}, + {3, [3]byte{0xef, 0x9f, 0xbe}}, {3, [3]byte{0xef, 0x9f, 0xbf}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x8000f780, 0x8100f781, 0x8200f782, 0x8300f783, 0x8400f784, 0x8500f785, 0x8600f786, 0x8700f787, + 0x8800f788, 0x8900f789, 0x8a00f78a, 0x8b00f78b, 0x8c00f78c, 0x8d00f78d, 0x8e00f78e, 0x8f00f78f, + 0x9000f790, 0x9100f791, 0x9200f792, 0x9300f793, 0x9400f794, 0x9500f795, 0x9600f796, 0x9700f797, + 0x9800f798, 0x9900f799, 0x9a00f79a, 0x9b00f79b, 0x9c00f79c, 0x9d00f79d, 0x9e00f79e, 0x9f00f79f, + 0xa000f7a0, 0xa100f7a1, 0xa200f7a2, 0xa300f7a3, 0xa400f7a4, 0xa500f7a5, 0xa600f7a6, 0xa700f7a7, + 0xa800f7a8, 0xa900f7a9, 0xaa00f7aa, 0xab00f7ab, 0xac00f7ac, 0xad00f7ad, 0xae00f7ae, 0xaf00f7af, + 0xb000f7b0, 0xb100f7b1, 0xb200f7b2, 0xb300f7b3, 0xb400f7b4, 0xb500f7b5, 0xb600f7b6, 0xb700f7b7, + 0xb800f7b8, 0xb900f7b9, 0xba00f7ba, 0xbb00f7bb, 0xbc00f7bc, 0xbd00f7bd, 0xbe00f7be, 0xbf00f7bf, + 0xc000f7c0, 0xc100f7c1, 0xc200f7c2, 0xc300f7c3, 0xc400f7c4, 0xc500f7c5, 0xc600f7c6, 0xc700f7c7, + 0xc800f7c8, 0xc900f7c9, 0xca00f7ca, 0xcb00f7cb, 0xcc00f7cc, 0xcd00f7cd, 0xce00f7ce, 0xcf00f7cf, + 0xd000f7d0, 0xd100f7d1, 0xd200f7d2, 0xd300f7d3, 0xd400f7d4, 0xd500f7d5, 0xd600f7d6, 0xd700f7d7, + 0xd800f7d8, 0xd900f7d9, 0xda00f7da, 0xdb00f7db, 0xdc00f7dc, 0xdd00f7dd, 0xde00f7de, 0xdf00f7df, + 0xe000f7e0, 0xe100f7e1, 0xe200f7e2, 0xe300f7e3, 0xe400f7e4, 0xe500f7e5, 0xe600f7e6, 0xe700f7e7, + 0xe800f7e8, 0xe900f7e9, 0xea00f7ea, 0xeb00f7eb, 0xec00f7ec, 0xed00f7ed, 0xee00f7ee, 0xef00f7ef, + 0xf000f7f0, 0xf100f7f1, 0xf200f7f2, 0xf300f7f3, 0xf400f7f4, 0xf500f7f5, 0xf600f7f6, 0xf700f7f7, + 0xf800f7f8, 0xf900f7f9, 0xfa00f7fa, 0xfb00f7fb, 0xfc00f7fc, 0xfd00f7fd, 0xfe00f7fe, 0xff00f7ff, + }, +} +var listAll = []encoding.Encoding{ + CodePage037, + CodePage437, + CodePage850, + CodePage852, + CodePage855, + CodePage858, + CodePage860, + CodePage862, + CodePage863, + CodePage865, + CodePage866, + CodePage1047, + CodePage1140, + ISO8859_1, + ISO8859_2, + ISO8859_3, + ISO8859_4, + ISO8859_5, + ISO8859_6, + ISO8859_6E, + ISO8859_6I, + ISO8859_7, + ISO8859_8, + ISO8859_8E, + ISO8859_8I, + ISO8859_9, + ISO8859_10, + ISO8859_13, + ISO8859_14, + ISO8859_15, + ISO8859_16, + KOI8R, + KOI8U, + Macintosh, + MacintoshCyrillic, + Windows874, + Windows1250, + Windows1251, + Windows1252, + Windows1253, + Windows1254, + Windows1255, + Windows1256, + Windows1257, + Windows1258, + XUserDefined, +} + +// Total table size 87024 bytes (84KiB); checksum: 811C9DC5 diff --git a/vendor/gopkg.in/resty.v1/.gitignore b/vendor/gopkg.in/resty.v1/.gitignore new file mode 100644 index 000000000000..8aa2df43d357 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +coverage.out +coverage.txt +go.sum diff --git a/vendor/gopkg.in/resty.v1/.travis.yml b/vendor/gopkg.in/resty.v1/.travis.yml new file mode 100644 index 000000000000..d1100290056b --- /dev/null +++ b/vendor/gopkg.in/resty.v1/.travis.yml @@ -0,0 +1,28 @@ +language: go + +sudo: false + +go: +# - 1.3 +# - 1.4 +# - 1.5 +# - 1.6 +# - 1.7 +# - 1.8.x +# - 1.9.x + - 1.10.x + - 1.11.x + - tip + +install: + - go get -v -t ./... + +script: + - go test ./... -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) + +matrix: + allow_failures: + - go: tip diff --git a/vendor/gopkg.in/resty.v1/BUILD.bazel b/vendor/gopkg.in/resty.v1/BUILD.bazel new file mode 100644 index 000000000000..698c326c5669 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/BUILD.bazel @@ -0,0 +1,36 @@ +package(default_visibility = ["//visibility:private"]) + +load("@bazel_gazelle//:def.bzl", "gazelle") +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +gazelle( + name = "gazelle", + command = "fix", + prefix = "gopkg.in/resty.v1", +) + +go_library( + name = "go_default_library", + srcs = glob( + ["*.go"], + exclude = ["*_test.go"], + ), + importpath = "gopkg.in/resty.v1", + visibility = ["//visibility:public"], + deps = ["@org_golang_x_net//publicsuffix:go_default_library"], +) + +go_test( + name = "go_default_test", + srcs = + glob( + ["*_test.go"], + exclude = ["example_test.go"], + ), + data = glob([".testdata/*"]), + embed = [":go_default_library"], + importpath = "gopkg.in/resty.v1", + deps = [ + "@org_golang_x_net//proxy:go_default_library", + ], +) diff --git a/vendor/gopkg.in/resty.v1/LICENSE b/vendor/gopkg.in/resty.v1/LICENSE new file mode 100644 index 000000000000..5cf0a2f999d9 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015-2019 Jeevanandam M., https://myjeeva.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/resty.v1/README.md b/vendor/gopkg.in/resty.v1/README.md new file mode 100644 index 000000000000..8305734db876 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/README.md @@ -0,0 +1,741 @@ +

    +

    Resty

    +

    Simple HTTP and REST client library for Go (inspired by Ruby rest-client)

    +

    Features section describes in detail about Resty capabilities

    +

    +

    +

    Build Status Code Coverage Go Report Card Release Version GoDoc License

    +

    +

    +

    Resty Communication Channels

    +

    Chat on Gitter - Resty Community Twitter @go_resty

    +

    + +## News + + * Resty `v2` development is in-progress :smile: + * v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019. + * v1.11.0 [released](https://github.com/go-resty/resty/releases/tag/v1.11.0) and tagged on Jan 06, 2019. + * v1.10.3 [released](https://github.com/go-resty/resty/releases/tag/v1.10.3) and tagged on Dec 04, 2018. + * v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors). + +## Features + + * GET, POST, PUT, DELETE, HEAD, PATCH, OPTIONS, etc. + * Simple and chainable methods for settings and request + * Request Body can be `string`, `[]byte`, `struct`, `map`, `slice` and `io.Reader` too + * Auto detects `Content-Type` + * Buffer less processing for `io.Reader` + * [Response](https://godoc.org/gopkg.in/resty.v1#Response) object gives you more possibility + * Access as `[]byte` array - `response.Body()` OR Access as `string` - `response.String()` + * Know your `response.Time()` and when we `response.ReceivedAt()` + * Automatic marshal and unmarshal for `JSON` and `XML` content type + * Default is `JSON`, if you supply `struct/map` without header `Content-Type` + * For auto-unmarshal, refer to - + - Success scenario [Request.SetResult()](https://godoc.org/gopkg.in/resty.v1#Request.SetResult) and [Response.Result()](https://godoc.org/gopkg.in/resty.v1#Response.Result). + - Error scenario [Request.SetError()](https://godoc.org/gopkg.in/resty.v1#Request.SetError) and [Response.Error()](https://godoc.org/gopkg.in/resty.v1#Response.Error). + - Supports [RFC7807](https://tools.ietf.org/html/rfc7807) - `application/problem+json` & `application/problem+xml` + * Easy to upload one or more file(s) via `multipart/form-data` + * Auto detects file content type + * Request URL [Path Params (aka URI Params)](https://godoc.org/gopkg.in/resty.v1#Request.SetPathParams) + * Backoff Retry Mechanism with retry condition function [reference](retry_test.go) + * resty client HTTP & REST [Request](https://godoc.org/gopkg.in/resty.v1#Client.OnBeforeRequest) and [Response](https://godoc.org/gopkg.in/resty.v1#Client.OnAfterResponse) middlewares + * `Request.SetContext` supported `go1.7` and above + * Authorization option of `BasicAuth` and `Bearer` token + * Set request `ContentLength` value for all request or particular request + * Choose between HTTP and REST mode. Default is `REST` + * `HTTP` - default up to 10 redirects and no automatic response unmarshal + * `REST` - defaults to no redirects and automatic response marshal/unmarshal for `JSON` & `XML` + * Custom [Root Certificates](https://godoc.org/gopkg.in/resty.v1#Client.SetRootCertificate) and Client [Certificates](https://godoc.org/gopkg.in/resty.v1#Client.SetCertificates) + * Download/Save HTTP response directly into File, like `curl -o` flag. See [SetOutputDirectory](https://godoc.org/gopkg.in/resty.v1#Client.SetOutputDirectory) & [SetOutput](https://godoc.org/gopkg.in/resty.v1#Request.SetOutput). + * Cookies for your request and CookieJar support + * SRV Record based request instead of Host URL + * Client settings like `Timeout`, `RedirectPolicy`, `Proxy`, `TLSClientConfig`, `Transport`, etc. + * Optionally allows GET request with payload, see [SetAllowGetMethodPayload](https://godoc.org/gopkg.in/resty.v1#Client.SetAllowGetMethodPayload) + * Supports registering external JSON library into resty, see [how to use](https://github.com/go-resty/resty/issues/76#issuecomment-314015250) + * Exposes Response reader without reading response (no auto-unmarshaling) if need be, see [how to use](https://github.com/go-resty/resty/issues/87#issuecomment-322100604) + * Option to specify expected `Content-Type` when response `Content-Type` header missing. Refer to [#92](https://github.com/go-resty/resty/issues/92) + * Resty design + * Have client level settings & options and also override at Request level if you want to + * Request and Response middlewares + * Create Multiple clients if you want to `resty.New()` + * Supports `http.RoundTripper` implementation, see [SetTransport](https://godoc.org/gopkg.in/resty.v1#Client.SetTransport) + * goroutine concurrent safe + * REST and HTTP modes + * Debug mode - clean and informative logging presentation + * Gzip - Go does it automatically also resty has fallback handling too + * Works fine with `HTTP/2` and `HTTP/1.1` + * [Bazel support](#bazel-support) + * Easily mock resty for testing, [for e.g.](#mocking-http-requests-using-httpmock-library) + * Well tested client library + +Resty works with `go1.3` and above. + +### Included Batteries + + * Redirect Policies - see [how to use](#redirect-policy) + * NoRedirectPolicy + * FlexibleRedirectPolicy + * DomainCheckRedirectPolicy + * etc. [more info](redirect.go) + * Retry Mechanism [how to use](#retries) + * Backoff Retry + * Conditional Retry + * SRV Record based request instead of Host URL [how to use](resty_test.go#L1412) + * etc (upcoming - throw your idea's [here](https://github.com/go-resty/resty/issues)). + +## Installation + +#### Stable Version - Production Ready + +Please refer section [Versioning](#versioning) for detailed info. + +##### go.mod + +```bash +require gopkg.in/resty.v1 v1.12.0 +``` + +##### go get +```bash +go get -u gopkg.in/resty.v1 +``` + +#### Heads up for upcoming Resty v2 + +Resty v2 release will be moving away from `gopkg.in` proxy versioning. It will completely follow and adpating Go Mod versioning recommendation. For e.g.: module definition would be `module github.com/go-resty/resty/v2`. + + +## It might be beneficial for your project :smile: + +Resty author also published following projects for Go Community. + + * [aah framework](https://aahframework.org) - A secure, flexible, rapid Go web framework. + * [THUMBAI](https://thumbai.app), [Source Code](https://github.com/thumbai/thumbai) - Go Mod Repository, Go Vanity Service and Simple Proxy Server. + * [go-model](https://github.com/jeevatkm/go-model) - Robust & Easy to use model mapper and utility methods for Go `struct`. + +## Usage + +The following samples will assist you to become as comfortable as possible with resty library. Resty comes with ready to use DefaultClient. + +Import resty into your code and refer it as `resty`. + +```go +import "gopkg.in/resty.v1" +``` + +#### Simple GET + +```go +// GET request +resp, err := resty.R().Get("http://httpbin.org/get") + +// explore response object +fmt.Printf("\nError: %v", err) +fmt.Printf("\nResponse Status Code: %v", resp.StatusCode()) +fmt.Printf("\nResponse Status: %v", resp.Status()) +fmt.Printf("\nResponse Time: %v", resp.Time()) +fmt.Printf("\nResponse Received At: %v", resp.ReceivedAt()) +fmt.Printf("\nResponse Body: %v", resp) // or resp.String() or string(resp.Body()) +// more... + +/* Output +Error: +Response Status Code: 200 +Response Status: 200 OK +Response Time: 160.1151ms +Response Received At: 2018-10-16 16:28:34.8595663 -0700 PDT m=+0.166119401 +Response Body: { + "args": {}, + "headers": { + "Accept-Encoding": "gzip", + "Connection": "close", + "Host": "httpbin.org", + "User-Agent": "go-resty/1.10.0 (https://github.com/go-resty/resty)" + }, + "origin": "0.0.0.0", + "url": "http://httpbin.org/get" +} +*/ +``` + +#### Enhanced GET + +```go +resp, err := resty.R(). + SetQueryParams(map[string]string{ + "page_no": "1", + "limit": "20", + "sort":"name", + "order": "asc", + "random":strconv.FormatInt(time.Now().Unix(), 10), + }). + SetHeader("Accept", "application/json"). + SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F"). + Get("/search_result") + + +// Sample of using Request.SetQueryString method +resp, err := resty.R(). + SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more"). + SetHeader("Accept", "application/json"). + SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F"). + Get("/show_product") +``` + +#### Various POST method combinations + +```go +// POST JSON string +// No need to set content type, if you have client level setting +resp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + SetBody(`{"username":"testuser", "password":"testpass"}`). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + Post("https://myapp.com/login") + +// POST []byte array +// No need to set content type, if you have client level setting +resp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + Post("https://myapp.com/login") + +// POST Struct, default is JSON content type. No need to set one +resp, err := resty.R(). + SetBody(User{Username: "testuser", Password: "testpass"}). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + SetError(&AuthError{}). // or SetError(AuthError{}). + Post("https://myapp.com/login") + +// POST Map, default is JSON content type. No need to set one +resp, err := resty.R(). + SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}). + SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}). + SetError(&AuthError{}). // or SetError(AuthError{}). + Post("https://myapp.com/login") + +// POST of raw bytes for file upload. For example: upload file to Dropbox +fileBytes, _ := ioutil.ReadFile("/Users/jeeva/mydocument.pdf") + +// See we are not setting content-type header, since go-resty automatically detects Content-Type for you +resp, err := resty.R(). + SetBody(fileBytes). + SetContentLength(true). // Dropbox expects this value + SetAuthToken(""). + SetError(&DropboxError{}). // or SetError(DropboxError{}). + Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // for upload Dropbox supports PUT too + +// Note: resty detects Content-Type for request body/payload if content type header is not set. +// * For struct and map data type defaults to 'application/json' +// * Fallback is plain text content type +``` + +#### Sample PUT + +You can use various combinations of `PUT` method call like demonstrated for `POST`. + +```go +// Note: This is one sample of PUT method usage, refer POST for more combination + +// Request goes as JSON content type +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetBody(Article{ + Title: "go-resty", + Content: "This is my article content, oh ya!", + Author: "Jeevanandam M", + Tags: []string{"article", "sample", "resty"}, + }). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Put("https://myapp.com/article/1234") +``` + +#### Sample PATCH + +You can use various combinations of `PATCH` method call like demonstrated for `POST`. + +```go +// Note: This is one sample of PUT method usage, refer POST for more combination + +// Request goes as JSON content type +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetBody(Article{ + Tags: []string{"new tag1", "new tag2"}, + }). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Patch("https://myapp.com/articles/1234") +``` + +#### Sample DELETE, HEAD, OPTIONS + +```go +// DELETE a article +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + Delete("https://myapp.com/articles/1234") + +// DELETE a articles with payload/body as a JSON string +// No need to set auth token, error, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + SetError(&Error{}). // or SetError(Error{}). + SetHeader("Content-Type", "application/json"). + SetBody(`{article_ids: [1002, 1006, 1007, 87683, 45432] }`). + Delete("https://myapp.com/articles") + +// HEAD of resource +// No need to set auth token, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + Head("https://myapp.com/videos/hi-res-video") + +// OPTIONS of resource +// No need to set auth token, if you have client level settings +resp, err := resty.R(). + SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD"). + Options("https://myapp.com/servers/nyc-dc-01") +``` + +### Multipart File(s) upload + +#### Using io.Reader + +```go +profileImgBytes, _ := ioutil.ReadFile("/Users/jeeva/test-img.png") +notesBytes, _ := ioutil.ReadFile("/Users/jeeva/text-file.txt") + +resp, err := resty.R(). + SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)). + SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)). + SetFormData(map[string]string{ + "first_name": "Jeevanandam", + "last_name": "M", + }). + Post("http://myapp.com/upload") +``` + +#### Using File directly from Path + +```go +// Single file scenario +resp, err := resty.R(). + SetFile("profile_img", "/Users/jeeva/test-img.png"). + Post("http://myapp.com/upload") + +// Multiple files scenario +resp, err := resty.R(). + SetFiles(map[string]string{ + "profile_img": "/Users/jeeva/test-img.png", + "notes": "/Users/jeeva/text-file.txt", + }). + Post("http://myapp.com/upload") + +// Multipart of form fields and files +resp, err := resty.R(). + SetFiles(map[string]string{ + "profile_img": "/Users/jeeva/test-img.png", + "notes": "/Users/jeeva/text-file.txt", + }). + SetFormData(map[string]string{ + "first_name": "Jeevanandam", + "last_name": "M", + "zip_code": "00001", + "city": "my city", + "access_token": "C6A79608-782F-4ED0-A11D-BD82FAD829CD", + }). + Post("http://myapp.com/profile") +``` + +#### Sample Form submission + +```go +// just mentioning about POST as an example with simple flow +// User Login +resp, err := resty.R(). + SetFormData(map[string]string{ + "username": "jeeva", + "password": "mypass", + }). + Post("http://myapp.com/login") + +// Followed by profile update +resp, err := resty.R(). + SetFormData(map[string]string{ + "first_name": "Jeevanandam", + "last_name": "M", + "zip_code": "00001", + "city": "new city update", + }). + Post("http://myapp.com/profile") + +// Multi value form data +criteria := url.Values{ + "search_criteria": []string{"book", "glass", "pencil"}, +} +resp, err := resty.R(). + SetMultiValueFormData(criteria). + Post("http://myapp.com/search") +``` + +#### Save HTTP Response into File + +```go +// Setting output directory path, If directory not exists then resty creates one! +// This is optional one, if you're planning using absoule path in +// `Request.SetOutput` and can used together. +resty.SetOutputDirectory("/Users/jeeva/Downloads") + +// HTTP response gets saved into file, similar to curl -o flag +_, err := resty.R(). + SetOutput("plugin/ReplyWithHeader-v5.1-beta.zip"). + Get("http://bit.ly/1LouEKr") + +// OR using absolute path +// Note: output directory path is not used for absoulte path +_, err := resty.R(). + SetOutput("/MyDownloads/plugin/ReplyWithHeader-v5.1-beta.zip"). + Get("http://bit.ly/1LouEKr") +``` + +#### Request URL Path Params + +Resty provides easy to use dynamic request URL path params. Params can be set at client and request level. Client level params value can be overridden at request level. + +```go +resty.R().SetPathParams(map[string]string{ + "userId": "sample@sample.com", + "subAccountId": "100002", +}). +Get("/v1/users/{userId}/{subAccountId}/details") + +// Result: +// Composed URL - /v1/users/sample@sample.com/100002/details +``` + +#### Request and Response Middleware + +Resty provides middleware ability to manipulate for Request and Response. It is more flexible than callback approach. + +```go +// Registering Request Middleware +resty.OnBeforeRequest(func(c *resty.Client, req *resty.Request) error { + // Now you have access to Client and current Request object + // manipulate it as per your need + + return nil // if its success otherwise return error + }) + +// Registering Response Middleware +resty.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error { + // Now you have access to Client and current Response object + // manipulate it as per your need + + return nil // if its success otherwise return error + }) +``` + +#### Redirect Policy + +Resty provides few ready to use redirect policy(s) also it supports multiple policies together. + +```go +// Assign Client Redirect Policy. Create one as per you need +resty.SetRedirectPolicy(resty.FlexibleRedirectPolicy(15)) + +// Wanna multiple policies such as redirect count, domain name check, etc +resty.SetRedirectPolicy(resty.FlexibleRedirectPolicy(20), + resty.DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net")) +``` + +##### Custom Redirect Policy + +Implement [RedirectPolicy](redirect.go#L20) interface and register it with resty client. Have a look [redirect.go](redirect.go) for more information. + +```go +// Using raw func into resty.SetRedirectPolicy +resty.SetRedirectPolicy(resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + // Implement your logic here + + // return nil for continue redirect otherwise return error to stop/prevent redirect + return nil +})) + +//--------------------------------------------------- + +// Using struct create more flexible redirect policy +type CustomRedirectPolicy struct { + // variables goes here +} + +func (c *CustomRedirectPolicy) Apply(req *http.Request, via []*http.Request) error { + // Implement your logic here + + // return nil for continue redirect otherwise return error to stop/prevent redirect + return nil +} + +// Registering in resty +resty.SetRedirectPolicy(CustomRedirectPolicy{/* initialize variables */}) +``` + +#### Custom Root Certificates and Client Certificates + +```go +// Custom Root certificates, just supply .pem file. +// you can add one or more root certificates, its get appended +resty.SetRootCertificate("/path/to/root/pemFile1.pem") +resty.SetRootCertificate("/path/to/root/pemFile2.pem") +// ... and so on! + +// Adding Client Certificates, you add one or more certificates +// Sample for creating certificate object +// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data. +cert1, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key") +if err != nil { + log.Fatalf("ERROR client certificate: %s", err) +} +// ... + +// You add one or more certificates +resty.SetCertificates(cert1, cert2, cert3) +``` + +#### Proxy Settings - Client as well as at Request Level + +Default `Go` supports Proxy via environment variable `HTTP_PROXY`. Resty provides support via `SetProxy` & `RemoveProxy`. +Choose as per your need. + +**Client Level Proxy** settings applied to all the request + +```go +// Setting a Proxy URL and Port +resty.SetProxy("http://proxyserver:8888") + +// Want to remove proxy setting +resty.RemoveProxy() +``` + +#### Retries + +Resty uses [backoff](http://www.awsarchitectureblog.com/2015/03/backoff.html) +to increase retry intervals after each attempt. + +Usage example: + +```go +// Retries are configured per client +resty. + // Set retry count to non zero to enable retries + SetRetryCount(3). + // You can override initial retry wait time. + // Default is 100 milliseconds. + SetRetryWaitTime(5 * time.Second). + // MaxWaitTime can be overridden as well. + // Default is 2 seconds. + SetRetryMaxWaitTime(20 * time.Second) +``` + +Above setup will result in resty retrying requests returned non nil error up to +3 times with delay increased after each attempt. + +You can optionally provide client with custom retry conditions: + +```go +resty.AddRetryCondition( + // Condition function will be provided with *resty.Response as a + // parameter. It is expected to return (bool, error) pair. Resty will retry + // in case condition returns true or non nil error. + func(r *resty.Response) (bool, error) { + return r.StatusCode() == http.StatusTooManyRequests, nil + }, +) +``` + +Above example will make resty retry requests ended with `429 Too Many Requests` +status code. + +Multiple retry conditions can be added. + +It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios +implemented. [Reference](retry_test.go). + +#### Choose REST or HTTP mode + +```go +// REST mode. This is Default. +resty.SetRESTMode() + +// HTTP mode +resty.SetHTTPMode() +``` + +#### Allow GET request with Payload + +```go +// Allow GET request with Payload. This is disabled by default. +resty.SetAllowGetMethodPayload(true) +``` + +#### Wanna Multiple Clients + +```go +// Here you go! +// Client 1 +client1 := resty.New() +client1.R().Get("http://httpbin.org") +// ... + +// Client 2 +client2 := resty.New() +client2.R().Head("http://httpbin.org") +// ... + +// Bend it as per your need!!! +``` + +#### Remaining Client Settings & its Options + +```go +// Unique settings at Client level +//-------------------------------- +// Enable debug mode +resty.SetDebug(true) + +// Using you custom log writer +logFile, _ := os.OpenFile("/Users/jeeva/go-resty.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) +resty.SetLogger(logFile) + +// Assign Client TLSClientConfig +// One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial +resty.SetTLSClientConfig(&tls.Config{ RootCAs: roots }) + +// or One can disable security check (https) +resty.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true }) + +// Set client timeout as per your need +resty.SetTimeout(1 * time.Minute) + + +// You can override all below settings and options at request level if you want to +//-------------------------------------------------------------------------------- +// Host URL for all request. So you can use relative URL in the request +resty.SetHostURL("http://httpbin.org") + +// Headers for all request +resty.SetHeader("Accept", "application/json") +resty.SetHeaders(map[string]string{ + "Content-Type": "application/json", + "User-Agent": "My custom User Agent String", + }) + +// Cookies for all request +resty.SetCookie(&http.Cookie{ + Name:"go-resty", + Value:"This is cookie value", + Path: "/", + Domain: "sample.com", + MaxAge: 36000, + HttpOnly: true, + Secure: false, + }) +resty.SetCookies(cookies) + +// URL query parameters for all request +resty.SetQueryParam("user_id", "00001") +resty.SetQueryParams(map[string]string{ // sample of those who use this manner + "api_key": "api-key-here", + "api_secert": "api-secert", + }) +resty.R().SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more") + +// Form data for all request. Typically used with POST and PUT +resty.SetFormData(map[string]string{ + "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F", + }) + +// Basic Auth for all request +resty.SetBasicAuth("myuser", "mypass") + +// Bearer Auth Token for all request +resty.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F") + +// Enabling Content length value for all request +resty.SetContentLength(true) + +// Registering global Error object structure for JSON/XML request +resty.SetError(&Error{}) // or resty.SetError(Error{}) +``` + +#### Unix Socket + +```go +unixSocket := "/var/run/my_socket.sock" + +// Create a Go's http.Transport so we can set it in resty. +transport := http.Transport{ + Dial: func(_, _ string) (net.Conn, error) { + return net.Dial("unix", unixSocket) + }, +} + +// Set the previous transport that we created, set the scheme of the communication to the +// socket and set the unixSocket as the HostURL. +r := resty.New().SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket) + +// No need to write the host's URL on the request, just the path. +r.R().Get("/index.html") +``` + +#### Bazel support + +Resty can be built, tested and depended upon via [Bazel](https://bazel.build). +For example, to run all tests: + +```shell +bazel test :go_default_test +``` + +#### Mocking http requests using [httpmock](https://github.com/jarcoal/httpmock) library + +In order to mock the http requests when testing your application you +could use the `httpmock` library. + +When using the default resty client, you should pass the client to the library as follow: + +```go +httpmock.ActivateNonDefault(resty.DefaultClient.GetClient()) +``` + +More detailed example of mocking resty http requests using ginko could be found [here](https://github.com/jarcoal/httpmock#ginkgo--resty-example). + +## Versioning + +resty releases versions according to [Semantic Versioning](http://semver.org) + + * `gopkg.in/resty.vX` points to appropriate tagged versions; `X` denotes version series number and it's a stable release for production use. For e.g. `gopkg.in/resty.v0`. + * Development takes place at the master branch. Although the code in master should always compile and test successfully, it might break API's. I aim to maintain backwards compatibility, but sometimes API's and behavior might be changed to fix a bug. + +## Contribution + +I would welcome your contribution! If you find any improvement or issue you want to fix, feel free to send a pull request, I like pull requests that include test cases for fix/enhancement. I have done my best to bring pretty good code coverage. Feel free to write tests. + +BTW, I'd like to know what you think about `Resty`. Kindly open an issue or send me an email; it'd mean a lot to me. + +## Creator + +[Jeevanandam M.](https://github.com/jeevatkm) (jeeva@myjeeva.com) + +## Contributors + +Have a look on [Contributors](https://github.com/go-resty/resty/graphs/contributors) page. + +## License + +Resty released under MIT license, refer [LICENSE](LICENSE) file. diff --git a/vendor/gopkg.in/resty.v1/WORKSPACE b/vendor/gopkg.in/resty.v1/WORKSPACE new file mode 100644 index 000000000000..5459d63218fb --- /dev/null +++ b/vendor/gopkg.in/resty.v1/WORKSPACE @@ -0,0 +1,27 @@ +workspace(name = "resty") + +git_repository( + name = "io_bazel_rules_go", + remote = "https://github.com/bazelbuild/rules_go.git", + tag = "0.13.0", +) + +git_repository( + name = "bazel_gazelle", + remote = "https://github.com/bazelbuild/bazel-gazelle.git", + tag = "0.13.0", +) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_rules_dependencies", + "go_register_toolchains", +) + +go_rules_dependencies() + +go_register_toolchains() + +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") + +gazelle_dependencies() diff --git a/vendor/gopkg.in/resty.v1/client.go b/vendor/gopkg.in/resty.v1/client.go new file mode 100644 index 000000000000..68d447fa69c2 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/client.go @@ -0,0 +1,926 @@ +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "reflect" + "regexp" + "strings" + "sync" + "time" +) + +const ( + // MethodGet HTTP method + MethodGet = "GET" + + // MethodPost HTTP method + MethodPost = "POST" + + // MethodPut HTTP method + MethodPut = "PUT" + + // MethodDelete HTTP method + MethodDelete = "DELETE" + + // MethodPatch HTTP method + MethodPatch = "PATCH" + + // MethodHead HTTP method + MethodHead = "HEAD" + + // MethodOptions HTTP method + MethodOptions = "OPTIONS" +) + +var ( + hdrUserAgentKey = http.CanonicalHeaderKey("User-Agent") + hdrAcceptKey = http.CanonicalHeaderKey("Accept") + hdrContentTypeKey = http.CanonicalHeaderKey("Content-Type") + hdrContentLengthKey = http.CanonicalHeaderKey("Content-Length") + hdrContentEncodingKey = http.CanonicalHeaderKey("Content-Encoding") + hdrAuthorizationKey = http.CanonicalHeaderKey("Authorization") + + plainTextType = "text/plain; charset=utf-8" + jsonContentType = "application/json; charset=utf-8" + formContentType = "application/x-www-form-urlencoded" + + jsonCheck = regexp.MustCompile(`(?i:(application|text)/(json|.*\+json|json\-.*)(;|$))`) + xmlCheck = regexp.MustCompile(`(?i:(application|text)/(xml|.*\+xml)(;|$))`) + + hdrUserAgentValue = "go-resty/%s (https://github.com/go-resty/resty)" + bufPool = &sync.Pool{New: func() interface{} { return &bytes.Buffer{} }} +) + +// Client type is used for HTTP/RESTful global values +// for all request raised from the client +type Client struct { + HostURL string + QueryParam url.Values + FormData url.Values + Header http.Header + UserInfo *User + Token string + Cookies []*http.Cookie + Error reflect.Type + Debug bool + DisableWarn bool + AllowGetMethodPayload bool + Log *log.Logger + RetryCount int + RetryWaitTime time.Duration + RetryMaxWaitTime time.Duration + RetryConditions []RetryConditionFunc + JSONMarshal func(v interface{}) ([]byte, error) + JSONUnmarshal func(data []byte, v interface{}) error + + jsonEscapeHTML bool + httpClient *http.Client + setContentLength bool + isHTTPMode bool + outputDirectory string + scheme string + proxyURL *url.URL + closeConnection bool + notParseResponse bool + debugBodySizeLimit int64 + logPrefix string + pathParams map[string]string + beforeRequest []func(*Client, *Request) error + udBeforeRequest []func(*Client, *Request) error + preReqHook func(*Client, *Request) error + afterResponse []func(*Client, *Response) error + requestLog func(*RequestLog) error + responseLog func(*ResponseLog) error +} + +// User type is to hold an username and password information +type User struct { + Username, Password string +} + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// Client methods +//___________________________________ + +// SetHostURL method is to set Host URL in the client instance. It will be used with request +// raised from this client with relative URL +// // Setting HTTP address +// resty.SetHostURL("http://myjeeva.com") +// +// // Setting HTTPS address +// resty.SetHostURL("https://myjeeva.com") +// +func (c *Client) SetHostURL(url string) *Client { + c.HostURL = strings.TrimRight(url, "/") + return c +} + +// SetHeader method sets a single header field and its value in the client instance. +// These headers will be applied to all requests raised from this client instance. +// Also it can be overridden at request level header options, see `resty.R().SetHeader` +// or `resty.R().SetHeaders`. +// +// Example: To set `Content-Type` and `Accept` as `application/json` +// +// resty. +// SetHeader("Content-Type", "application/json"). +// SetHeader("Accept", "application/json") +// +func (c *Client) SetHeader(header, value string) *Client { + c.Header.Set(header, value) + return c +} + +// SetHeaders method sets multiple headers field and its values at one go in the client instance. +// These headers will be applied to all requests raised from this client instance. Also it can be +// overridden at request level headers options, see `resty.R().SetHeaders` or `resty.R().SetHeader`. +// +// Example: To set `Content-Type` and `Accept` as `application/json` +// +// resty.SetHeaders(map[string]string{ +// "Content-Type": "application/json", +// "Accept": "application/json", +// }) +// +func (c *Client) SetHeaders(headers map[string]string) *Client { + for h, v := range headers { + c.Header.Set(h, v) + } + + return c +} + +// SetCookieJar method sets custom http.CookieJar in the resty client. Its way to override default. +// Example: sometimes we don't want to save cookies in api contacting, we can remove the default +// CookieJar in resty client. +// +// resty.SetCookieJar(nil) +// +func (c *Client) SetCookieJar(jar http.CookieJar) *Client { + c.httpClient.Jar = jar + return c +} + +// SetCookie method appends a single cookie in the client instance. +// These cookies will be added to all the request raised from this client instance. +// resty.SetCookie(&http.Cookie{ +// Name:"go-resty", +// Value:"This is cookie value", +// Path: "/", +// Domain: "sample.com", +// MaxAge: 36000, +// HttpOnly: true, +// Secure: false, +// }) +// +func (c *Client) SetCookie(hc *http.Cookie) *Client { + c.Cookies = append(c.Cookies, hc) + return c +} + +// SetCookies method sets an array of cookies in the client instance. +// These cookies will be added to all the request raised from this client instance. +// cookies := make([]*http.Cookie, 0) +// +// cookies = append(cookies, &http.Cookie{ +// Name:"go-resty-1", +// Value:"This is cookie 1 value", +// Path: "/", +// Domain: "sample.com", +// MaxAge: 36000, +// HttpOnly: true, +// Secure: false, +// }) +// +// cookies = append(cookies, &http.Cookie{ +// Name:"go-resty-2", +// Value:"This is cookie 2 value", +// Path: "/", +// Domain: "sample.com", +// MaxAge: 36000, +// HttpOnly: true, +// Secure: false, +// }) +// +// // Setting a cookies into resty +// resty.SetCookies(cookies) +// +func (c *Client) SetCookies(cs []*http.Cookie) *Client { + c.Cookies = append(c.Cookies, cs...) + return c +} + +// SetQueryParam method sets single parameter and its value in the client instance. +// It will be formed as query string for the request. For example: `search=kitchen%20papers&size=large` +// in the URL after `?` mark. These query params will be added to all the request raised from +// this client instance. Also it can be overridden at request level Query Param options, +// see `resty.R().SetQueryParam` or `resty.R().SetQueryParams`. +// resty. +// SetQueryParam("search", "kitchen papers"). +// SetQueryParam("size", "large") +// +func (c *Client) SetQueryParam(param, value string) *Client { + c.QueryParam.Set(param, value) + return c +} + +// SetQueryParams method sets multiple parameters and their values at one go in the client instance. +// It will be formed as query string for the request. For example: `search=kitchen%20papers&size=large` +// in the URL after `?` mark. These query params will be added to all the request raised from this +// client instance. Also it can be overridden at request level Query Param options, +// see `resty.R().SetQueryParams` or `resty.R().SetQueryParam`. +// resty.SetQueryParams(map[string]string{ +// "search": "kitchen papers", +// "size": "large", +// }) +// +func (c *Client) SetQueryParams(params map[string]string) *Client { + for p, v := range params { + c.SetQueryParam(p, v) + } + + return c +} + +// SetFormData method sets Form parameters and their values in the client instance. +// It's applicable only HTTP method `POST` and `PUT` and requets content type would be set as +// `application/x-www-form-urlencoded`. These form data will be added to all the request raised from +// this client instance. Also it can be overridden at request level form data, see `resty.R().SetFormData`. +// resty.SetFormData(map[string]string{ +// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F", +// "user_id": "3455454545", +// }) +// +func (c *Client) SetFormData(data map[string]string) *Client { + for k, v := range data { + c.FormData.Set(k, v) + } + + return c +} + +// SetBasicAuth method sets the basic authentication header in the HTTP request. Example: +// Authorization: Basic +// +// Example: To set the header for username "go-resty" and password "welcome" +// resty.SetBasicAuth("go-resty", "welcome") +// +// This basic auth information gets added to all the request rasied from this client instance. +// Also it can be overridden or set one at the request level is supported, see `resty.R().SetBasicAuth`. +// +func (c *Client) SetBasicAuth(username, password string) *Client { + c.UserInfo = &User{Username: username, Password: password} + return c +} + +// SetAuthToken method sets bearer auth token header in the HTTP request. Example: +// Authorization: Bearer +// +// Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F +// +// resty.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F") +// +// This bearer auth token gets added to all the request rasied from this client instance. +// Also it can be overridden or set one at the request level is supported, see `resty.R().SetAuthToken`. +// +func (c *Client) SetAuthToken(token string) *Client { + c.Token = token + return c +} + +// R method creates a request instance, its used for Get, Post, Put, Delete, Patch, Head and Options. +func (c *Client) R() *Request { + r := &Request{ + QueryParam: url.Values{}, + FormData: url.Values{}, + Header: http.Header{}, + + client: c, + multipartFiles: []*File{}, + multipartFields: []*MultipartField{}, + pathParams: map[string]string{}, + jsonEscapeHTML: true, + } + + return r +} + +// NewRequest is an alias for R(). Creates a request instance, its used for +// Get, Post, Put, Delete, Patch, Head and Options. +func (c *Client) NewRequest() *Request { + return c.R() +} + +// OnBeforeRequest method appends request middleware into the before request chain. +// Its gets applied after default `go-resty` request middlewares and before request +// been sent from `go-resty` to host server. +// resty.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error { +// // Now you have access to Client and Request instance +// // manipulate it as per your need +// +// return nil // if its success otherwise return error +// }) +// +func (c *Client) OnBeforeRequest(m func(*Client, *Request) error) *Client { + c.udBeforeRequest = append(c.udBeforeRequest, m) + return c +} + +// OnAfterResponse method appends response middleware into the after response chain. +// Once we receive response from host server, default `go-resty` response middleware +// gets applied and then user assigened response middlewares applied. +// resty.OnAfterResponse(func(c *resty.Client, r *resty.Response) error { +// // Now you have access to Client and Response instance +// // manipulate it as per your need +// +// return nil // if its success otherwise return error +// }) +// +func (c *Client) OnAfterResponse(m func(*Client, *Response) error) *Client { + c.afterResponse = append(c.afterResponse, m) + return c +} + +// SetPreRequestHook method sets the given pre-request function into resty client. +// It is called right before the request is fired. +// +// Note: Only one pre-request hook can be registered. Use `resty.OnBeforeRequest` for mutilple. +func (c *Client) SetPreRequestHook(h func(*Client, *Request) error) *Client { + if c.preReqHook != nil { + c.Log.Printf("Overwriting an existing pre-request hook: %s", functionName(h)) + } + c.preReqHook = h + return c +} + +// SetDebug method enables the debug mode on `go-resty` client. Client logs details of every request and response. +// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one. +// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one. +// resty.SetDebug(true) +// +func (c *Client) SetDebug(d bool) *Client { + c.Debug = d + return c +} + +// SetDebugBodyLimit sets the maximum size for which the response body will be logged in debug mode. +// resty.SetDebugBodyLimit(1000000) +// +func (c *Client) SetDebugBodyLimit(sl int64) *Client { + c.debugBodySizeLimit = sl + return c +} + +// OnRequestLog method used to set request log callback into resty. Registered callback gets +// called before the resty actually logs the information. +func (c *Client) OnRequestLog(rl func(*RequestLog) error) *Client { + if c.requestLog != nil { + c.Log.Printf("Overwriting an existing on-request-log callback from=%s to=%s", functionName(c.requestLog), functionName(rl)) + } + c.requestLog = rl + return c +} + +// OnResponseLog method used to set response log callback into resty. Registered callback gets +// called before the resty actually logs the information. +func (c *Client) OnResponseLog(rl func(*ResponseLog) error) *Client { + if c.responseLog != nil { + c.Log.Printf("Overwriting an existing on-response-log callback from=%s to=%s", functionName(c.responseLog), functionName(rl)) + } + c.responseLog = rl + return c +} + +// SetDisableWarn method disables the warning message on `go-resty` client. +// For example: go-resty warns the user when BasicAuth used on HTTP mode. +// resty.SetDisableWarn(true) +// +func (c *Client) SetDisableWarn(d bool) *Client { + c.DisableWarn = d + return c +} + +// SetAllowGetMethodPayload method allows the GET method with payload on `go-resty` client. +// For example: go-resty allows the user sends request with a payload on HTTP GET method. +// resty.SetAllowGetMethodPayload(true) +// +func (c *Client) SetAllowGetMethodPayload(a bool) *Client { + c.AllowGetMethodPayload = a + return c +} + +// SetLogger method sets given writer for logging go-resty request and response details. +// Default is os.Stderr +// file, _ := os.OpenFile("/Users/jeeva/go-resty.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) +// +// resty.SetLogger(file) +// +func (c *Client) SetLogger(w io.Writer) *Client { + c.Log = getLogger(w) + return c +} + +// SetContentLength method enables the HTTP header `Content-Length` value for every request. +// By default go-resty won't set `Content-Length`. +// resty.SetContentLength(true) +// +// Also you have an option to enable for particular request. See `resty.R().SetContentLength` +// +func (c *Client) SetContentLength(l bool) *Client { + c.setContentLength = l + return c +} + +// SetTimeout method sets timeout for request raised from client. +// resty.SetTimeout(time.Duration(1 * time.Minute)) +// +func (c *Client) SetTimeout(timeout time.Duration) *Client { + c.httpClient.Timeout = timeout + return c +} + +// SetError method is to register the global or client common `Error` object into go-resty. +// It is used for automatic unmarshalling if response status code is greater than 399 and +// content type either JSON or XML. Can be pointer or non-pointer. +// resty.SetError(&Error{}) +// // OR +// resty.SetError(Error{}) +// +func (c *Client) SetError(err interface{}) *Client { + c.Error = typeOf(err) + return c +} + +// SetRedirectPolicy method sets the client redirect poilicy. go-resty provides ready to use +// redirect policies. Wanna create one for yourself refer `redirect.go`. +// +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20)) +// +// // Need multiple redirect policies together +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20), DomainCheckRedirectPolicy("host1.com", "host2.net")) +// +func (c *Client) SetRedirectPolicy(policies ...interface{}) *Client { + for _, p := range policies { + if _, ok := p.(RedirectPolicy); !ok { + c.Log.Printf("ERORR: %v does not implement resty.RedirectPolicy (missing Apply method)", + functionName(p)) + } + } + + c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + for _, p := range policies { + if err := p.(RedirectPolicy).Apply(req, via); err != nil { + return err + } + } + return nil // looks good, go ahead + } + + return c +} + +// SetRetryCount method enables retry on `go-resty` client and allows you +// to set no. of retry count. Resty uses a Backoff mechanism. +func (c *Client) SetRetryCount(count int) *Client { + c.RetryCount = count + return c +} + +// SetRetryWaitTime method sets default wait time to sleep before retrying +// request. +// Default is 100 milliseconds. +func (c *Client) SetRetryWaitTime(waitTime time.Duration) *Client { + c.RetryWaitTime = waitTime + return c +} + +// SetRetryMaxWaitTime method sets max wait time to sleep before retrying +// request. +// Default is 2 seconds. +func (c *Client) SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client { + c.RetryMaxWaitTime = maxWaitTime + return c +} + +// AddRetryCondition method adds a retry condition function to array of functions +// that are checked to determine if the request is retried. The request will +// retry if any of the functions return true and error is nil. +func (c *Client) AddRetryCondition(condition RetryConditionFunc) *Client { + c.RetryConditions = append(c.RetryConditions, condition) + return c +} + +// SetHTTPMode method sets go-resty mode to 'http' +func (c *Client) SetHTTPMode() *Client { + return c.SetMode("http") +} + +// SetRESTMode method sets go-resty mode to 'rest' +func (c *Client) SetRESTMode() *Client { + return c.SetMode("rest") +} + +// SetMode method sets go-resty client mode to given value such as 'http' & 'rest'. +// 'rest': +// - No Redirect +// - Automatic response unmarshal if it is JSON or XML +// 'http': +// - Up to 10 Redirects +// - No automatic unmarshall. Response will be treated as `response.String()` +// +// If you want more redirects, use FlexibleRedirectPolicy +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20)) +// +func (c *Client) SetMode(mode string) *Client { + // HTTP + if mode == "http" { + c.isHTTPMode = true + c.SetRedirectPolicy(FlexibleRedirectPolicy(10)) + c.afterResponse = []func(*Client, *Response) error{ + responseLogger, + saveResponseIntoFile, + } + return c + } + + // RESTful + c.isHTTPMode = false + c.SetRedirectPolicy(NoRedirectPolicy()) + c.afterResponse = []func(*Client, *Response) error{ + responseLogger, + parseResponseBody, + saveResponseIntoFile, + } + return c +} + +// Mode method returns the current client mode. Typically its a "http" or "rest". +// Default is "rest" +func (c *Client) Mode() string { + if c.isHTTPMode { + return "http" + } + return "rest" +} + +// SetTLSClientConfig method sets TLSClientConfig for underling client Transport. +// +// Example: +// // One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial +// resty.SetTLSClientConfig(&tls.Config{ RootCAs: roots }) +// +// // or One can disable security check (https) +// resty.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true }) +// Note: This method overwrites existing `TLSClientConfig`. +// +func (c *Client) SetTLSClientConfig(config *tls.Config) *Client { + transport, err := c.getTransport() + if err != nil { + c.Log.Printf("ERROR %v", err) + return c + } + transport.TLSClientConfig = config + return c +} + +// SetProxy method sets the Proxy URL and Port for resty client. +// resty.SetProxy("http://proxyserver:8888") +// +// Alternatives: At request level proxy, see `Request.SetProxy`. OR Without this `SetProxy` method, +// you can also set Proxy via environment variable. By default `Go` uses setting from `HTTP_PROXY`. +// +func (c *Client) SetProxy(proxyURL string) *Client { + transport, err := c.getTransport() + if err != nil { + c.Log.Printf("ERROR %v", err) + return c + } + + if pURL, err := url.Parse(proxyURL); err == nil { + c.proxyURL = pURL + transport.Proxy = http.ProxyURL(c.proxyURL) + } else { + c.Log.Printf("ERROR %v", err) + c.RemoveProxy() + } + return c +} + +// RemoveProxy method removes the proxy configuration from resty client +// resty.RemoveProxy() +// +func (c *Client) RemoveProxy() *Client { + transport, err := c.getTransport() + if err != nil { + c.Log.Printf("ERROR %v", err) + return c + } + c.proxyURL = nil + transport.Proxy = nil + return c +} + +// SetCertificates method helps to set client certificates into resty conveniently. +// +func (c *Client) SetCertificates(certs ...tls.Certificate) *Client { + config, err := c.getTLSConfig() + if err != nil { + c.Log.Printf("ERROR %v", err) + return c + } + config.Certificates = append(config.Certificates, certs...) + return c +} + +// SetRootCertificate method helps to add one or more root certificates into resty client +// resty.SetRootCertificate("/path/to/root/pemFile.pem") +// +func (c *Client) SetRootCertificate(pemFilePath string) *Client { + rootPemData, err := ioutil.ReadFile(pemFilePath) + if err != nil { + c.Log.Printf("ERROR %v", err) + return c + } + + config, err := c.getTLSConfig() + if err != nil { + c.Log.Printf("ERROR %v", err) + return c + } + if config.RootCAs == nil { + config.RootCAs = x509.NewCertPool() + } + + config.RootCAs.AppendCertsFromPEM(rootPemData) + + return c +} + +// SetOutputDirectory method sets output directory for saving HTTP response into file. +// If the output directory not exists then resty creates one. This setting is optional one, +// if you're planning using absoule path in `Request.SetOutput` and can used together. +// resty.SetOutputDirectory("/save/http/response/here") +// +func (c *Client) SetOutputDirectory(dirPath string) *Client { + c.outputDirectory = dirPath + return c +} + +// SetTransport method sets custom `*http.Transport` or any `http.RoundTripper` +// compatible interface implementation in the resty client. +// +// NOTE: +// +// - If transport is not type of `*http.Transport` then you may not be able to +// take advantage of some of the `resty` client settings. +// +// - It overwrites the resty client transport instance and it's configurations. +// +// transport := &http.Transport{ +// // somthing like Proxying to httptest.Server, etc... +// Proxy: func(req *http.Request) (*url.URL, error) { +// return url.Parse(server.URL) +// }, +// } +// +// resty.SetTransport(transport) +// +func (c *Client) SetTransport(transport http.RoundTripper) *Client { + if transport != nil { + c.httpClient.Transport = transport + } + return c +} + +// SetScheme method sets custom scheme in the resty client. It's way to override default. +// resty.SetScheme("http") +// +func (c *Client) SetScheme(scheme string) *Client { + if !IsStringEmpty(scheme) { + c.scheme = scheme + } + + return c +} + +// SetCloseConnection method sets variable `Close` in http request struct with the given +// value. More info: https://golang.org/src/net/http/request.go +func (c *Client) SetCloseConnection(close bool) *Client { + c.closeConnection = close + return c +} + +// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically. +// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body, +// otherwise you might get into connection leaks, no connection reuse. +// +// Please Note: Response middlewares are not applicable, if you use this option. Basically you have +// taken over the control of response parsing from `Resty`. +func (c *Client) SetDoNotParseResponse(parse bool) *Client { + c.notParseResponse = parse + return c +} + +// SetLogPrefix method sets the Resty logger prefix value. +func (c *Client) SetLogPrefix(prefix string) *Client { + c.logPrefix = prefix + c.Log.SetPrefix(prefix) + return c +} + +// SetPathParams method sets multiple URL path key-value pairs at one go in the +// resty client instance. +// resty.SetPathParams(map[string]string{ +// "userId": "sample@sample.com", +// "subAccountId": "100002", +// }) +// +// Result: +// URL - /v1/users/{userId}/{subAccountId}/details +// Composed URL - /v1/users/sample@sample.com/100002/details +// It replace the value of the key while composing request URL. Also it can be +// overridden at request level Path Params options, see `Request.SetPathParams`. +func (c *Client) SetPathParams(params map[string]string) *Client { + for p, v := range params { + c.pathParams[p] = v + } + return c +} + +// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal. +// +// NOTE: This option only applicable to standard JSON Marshaller. +func (c *Client) SetJSONEscapeHTML(b bool) *Client { + c.jsonEscapeHTML = b + return c +} + +// IsProxySet method returns the true if proxy is set on client otherwise false. +func (c *Client) IsProxySet() bool { + return c.proxyURL != nil +} + +// GetClient method returns the current http.Client used by the resty client. +func (c *Client) GetClient() *http.Client { + return c.httpClient +} + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// Client Unexported methods +//___________________________________ + +// executes the given `Request` object and returns response +func (c *Client) execute(req *Request) (*Response, error) { + defer releaseBuffer(req.bodyBuf) + // Apply Request middleware + var err error + + // user defined on before request methods + // to modify the *resty.Request object + for _, f := range c.udBeforeRequest { + if err = f(c, req); err != nil { + return nil, err + } + } + + // resty middlewares + for _, f := range c.beforeRequest { + if err = f(c, req); err != nil { + return nil, err + } + } + + // call pre-request if defined + if c.preReqHook != nil { + if err = c.preReqHook(c, req); err != nil { + return nil, err + } + } + + if hostHeader := req.Header.Get("Host"); hostHeader != "" { + req.RawRequest.Host = hostHeader + } + + if err = requestLogger(c, req); err != nil { + return nil, err + } + + req.Time = time.Now() + resp, err := c.httpClient.Do(req.RawRequest) + + response := &Response{ + Request: req, + RawResponse: resp, + receivedAt: time.Now(), + } + + if err != nil || req.notParseResponse || c.notParseResponse { + return response, err + } + + if !req.isSaveResponse { + defer closeq(resp.Body) + body := resp.Body + + // GitHub #142 & #187 + if strings.EqualFold(resp.Header.Get(hdrContentEncodingKey), "gzip") && resp.ContentLength != 0 { + if _, ok := body.(*gzip.Reader); !ok { + body, err = gzip.NewReader(body) + if err != nil { + return response, err + } + defer closeq(body) + } + } + + if response.body, err = ioutil.ReadAll(body); err != nil { + return response, err + } + + response.size = int64(len(response.body)) + } + + // Apply Response middleware + for _, f := range c.afterResponse { + if err = f(c, response); err != nil { + break + } + } + + return response, err +} + +// enables a log prefix +func (c *Client) enableLogPrefix() { + c.Log.SetFlags(log.LstdFlags) + c.Log.SetPrefix(c.logPrefix) +} + +// disables a log prefix +func (c *Client) disableLogPrefix() { + c.Log.SetFlags(0) + c.Log.SetPrefix("") +} + +// getting TLS client config if not exists then create one +func (c *Client) getTLSConfig() (*tls.Config, error) { + transport, err := c.getTransport() + if err != nil { + return nil, err + } + if transport.TLSClientConfig == nil { + transport.TLSClientConfig = &tls.Config{} + } + return transport.TLSClientConfig, nil +} + +// returns `*http.Transport` currently in use or error +// in case currently used `transport` is not an `*http.Transport` +func (c *Client) getTransport() (*http.Transport, error) { + if c.httpClient.Transport == nil { + c.SetTransport(new(http.Transport)) + } + + if transport, ok := c.httpClient.Transport.(*http.Transport); ok { + return transport, nil + } + return nil, errors.New("current transport is not an *http.Transport instance") +} + +// +// File +// + +// File represent file information for multipart request +type File struct { + Name string + ParamName string + io.Reader +} + +// String returns string value of current file details +func (f *File) String() string { + return fmt.Sprintf("ParamName: %v; FileName: %v", f.ParamName, f.Name) +} + +// MultipartField represent custom data part for multipart request +type MultipartField struct { + Param string + FileName string + ContentType string + io.Reader +} diff --git a/vendor/gopkg.in/resty.v1/default.go b/vendor/gopkg.in/resty.v1/default.go new file mode 100644 index 000000000000..cc6ae478cfa6 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/default.go @@ -0,0 +1,327 @@ +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "crypto/tls" + "encoding/json" + "io" + "math" + "net/http" + "net/http/cookiejar" + "net/url" + "os" + "time" + + "golang.org/x/net/publicsuffix" +) + +// DefaultClient of resty +var DefaultClient *Client + +// New method creates a new go-resty client. +func New() *Client { + cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + return createClient(&http.Client{Jar: cookieJar}) +} + +// NewWithClient method create a new go-resty client with given `http.Client`. +func NewWithClient(hc *http.Client) *Client { + return createClient(hc) +} + +// R creates a new resty request object, it is used form a HTTP/RESTful request +// such as GET, POST, PUT, DELETE, HEAD, PATCH and OPTIONS. +func R() *Request { + return DefaultClient.R() +} + +// NewRequest is an alias for R(). Creates a new resty request object, it is used form a HTTP/RESTful request +// such as GET, POST, PUT, DELETE, HEAD, PATCH and OPTIONS. +func NewRequest() *Request { + return R() +} + +// SetHostURL sets Host URL. See `Client.SetHostURL for more information. +func SetHostURL(url string) *Client { + return DefaultClient.SetHostURL(url) +} + +// SetHeader sets single header. See `Client.SetHeader` for more information. +func SetHeader(header, value string) *Client { + return DefaultClient.SetHeader(header, value) +} + +// SetHeaders sets multiple headers. See `Client.SetHeaders` for more information. +func SetHeaders(headers map[string]string) *Client { + return DefaultClient.SetHeaders(headers) +} + +// SetCookieJar sets custom http.CookieJar. See `Client.SetCookieJar` for more information. +func SetCookieJar(jar http.CookieJar) *Client { + return DefaultClient.SetCookieJar(jar) +} + +// SetCookie sets single cookie object. See `Client.SetCookie` for more information. +func SetCookie(hc *http.Cookie) *Client { + return DefaultClient.SetCookie(hc) +} + +// SetCookies sets multiple cookie object. See `Client.SetCookies` for more information. +func SetCookies(cs []*http.Cookie) *Client { + return DefaultClient.SetCookies(cs) +} + +// SetQueryParam method sets single parameter and its value. See `Client.SetQueryParam` for more information. +func SetQueryParam(param, value string) *Client { + return DefaultClient.SetQueryParam(param, value) +} + +// SetQueryParams method sets multiple parameters and its value. See `Client.SetQueryParams` for more information. +func SetQueryParams(params map[string]string) *Client { + return DefaultClient.SetQueryParams(params) +} + +// SetFormData method sets Form parameters and its values. See `Client.SetFormData` for more information. +func SetFormData(data map[string]string) *Client { + return DefaultClient.SetFormData(data) +} + +// SetBasicAuth method sets the basic authentication header. See `Client.SetBasicAuth` for more information. +func SetBasicAuth(username, password string) *Client { + return DefaultClient.SetBasicAuth(username, password) +} + +// SetAuthToken method sets bearer auth token header. See `Client.SetAuthToken` for more information. +func SetAuthToken(token string) *Client { + return DefaultClient.SetAuthToken(token) +} + +// OnBeforeRequest method sets request middleware. See `Client.OnBeforeRequest` for more information. +func OnBeforeRequest(m func(*Client, *Request) error) *Client { + return DefaultClient.OnBeforeRequest(m) +} + +// OnAfterResponse method sets response middleware. See `Client.OnAfterResponse` for more information. +func OnAfterResponse(m func(*Client, *Response) error) *Client { + return DefaultClient.OnAfterResponse(m) +} + +// SetPreRequestHook method sets the pre-request hook. See `Client.SetPreRequestHook` for more information. +func SetPreRequestHook(h func(*Client, *Request) error) *Client { + return DefaultClient.SetPreRequestHook(h) +} + +// SetDebug method enables the debug mode. See `Client.SetDebug` for more information. +func SetDebug(d bool) *Client { + return DefaultClient.SetDebug(d) +} + +// SetDebugBodyLimit method sets the response body limit for debug mode. See `Client.SetDebugBodyLimit` for more information. +func SetDebugBodyLimit(sl int64) *Client { + return DefaultClient.SetDebugBodyLimit(sl) +} + +// SetAllowGetMethodPayload method allows the GET method with payload. See `Client.SetAllowGetMethodPayload` for more information. +func SetAllowGetMethodPayload(a bool) *Client { + return DefaultClient.SetAllowGetMethodPayload(a) +} + +// SetRetryCount method sets the retry count. See `Client.SetRetryCount` for more information. +func SetRetryCount(count int) *Client { + return DefaultClient.SetRetryCount(count) +} + +// SetRetryWaitTime method sets the retry wait time. See `Client.SetRetryWaitTime` for more information. +func SetRetryWaitTime(waitTime time.Duration) *Client { + return DefaultClient.SetRetryWaitTime(waitTime) +} + +// SetRetryMaxWaitTime method sets the retry max wait time. See `Client.SetRetryMaxWaitTime` for more information. +func SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client { + return DefaultClient.SetRetryMaxWaitTime(maxWaitTime) +} + +// AddRetryCondition method appends check function for retry. See `Client.AddRetryCondition` for more information. +func AddRetryCondition(condition RetryConditionFunc) *Client { + return DefaultClient.AddRetryCondition(condition) +} + +// SetDisableWarn method disables warning comes from `go-resty` client. See `Client.SetDisableWarn` for more information. +func SetDisableWarn(d bool) *Client { + return DefaultClient.SetDisableWarn(d) +} + +// SetLogger method sets given writer for logging. See `Client.SetLogger` for more information. +func SetLogger(w io.Writer) *Client { + return DefaultClient.SetLogger(w) +} + +// SetContentLength method enables `Content-Length` value. See `Client.SetContentLength` for more information. +func SetContentLength(l bool) *Client { + return DefaultClient.SetContentLength(l) +} + +// SetError method is to register the global or client common `Error` object. See `Client.SetError` for more information. +func SetError(err interface{}) *Client { + return DefaultClient.SetError(err) +} + +// SetRedirectPolicy method sets the client redirect poilicy. See `Client.SetRedirectPolicy` for more information. +func SetRedirectPolicy(policies ...interface{}) *Client { + return DefaultClient.SetRedirectPolicy(policies...) +} + +// SetHTTPMode method sets go-resty mode into HTTP. See `Client.SetMode` for more information. +func SetHTTPMode() *Client { + return DefaultClient.SetHTTPMode() +} + +// SetRESTMode method sets go-resty mode into RESTful. See `Client.SetMode` for more information. +func SetRESTMode() *Client { + return DefaultClient.SetRESTMode() +} + +// Mode method returns the current client mode. See `Client.Mode` for more information. +func Mode() string { + return DefaultClient.Mode() +} + +// SetTLSClientConfig method sets TLSClientConfig for underling client Transport. See `Client.SetTLSClientConfig` for more information. +func SetTLSClientConfig(config *tls.Config) *Client { + return DefaultClient.SetTLSClientConfig(config) +} + +// SetTimeout method sets timeout for request. See `Client.SetTimeout` for more information. +func SetTimeout(timeout time.Duration) *Client { + return DefaultClient.SetTimeout(timeout) +} + +// SetProxy method sets Proxy for request. See `Client.SetProxy` for more information. +func SetProxy(proxyURL string) *Client { + return DefaultClient.SetProxy(proxyURL) +} + +// RemoveProxy method removes the proxy configuration. See `Client.RemoveProxy` for more information. +func RemoveProxy() *Client { + return DefaultClient.RemoveProxy() +} + +// SetCertificates method helps to set client certificates into resty conveniently. +// See `Client.SetCertificates` for more information and example. +func SetCertificates(certs ...tls.Certificate) *Client { + return DefaultClient.SetCertificates(certs...) +} + +// SetRootCertificate method helps to add one or more root certificates into resty client. +// See `Client.SetRootCertificate` for more information. +func SetRootCertificate(pemFilePath string) *Client { + return DefaultClient.SetRootCertificate(pemFilePath) +} + +// SetOutputDirectory method sets output directory. See `Client.SetOutputDirectory` for more information. +func SetOutputDirectory(dirPath string) *Client { + return DefaultClient.SetOutputDirectory(dirPath) +} + +// SetTransport method sets custom `*http.Transport` or any `http.RoundTripper` +// compatible interface implementation in the resty client. +// See `Client.SetTransport` for more information. +func SetTransport(transport http.RoundTripper) *Client { + return DefaultClient.SetTransport(transport) +} + +// SetScheme method sets custom scheme in the resty client. +// See `Client.SetScheme` for more information. +func SetScheme(scheme string) *Client { + return DefaultClient.SetScheme(scheme) +} + +// SetCloseConnection method sets close connection value in the resty client. +// See `Client.SetCloseConnection` for more information. +func SetCloseConnection(close bool) *Client { + return DefaultClient.SetCloseConnection(close) +} + +// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically. +// See `Client.SetDoNotParseResponse` for more information. +func SetDoNotParseResponse(parse bool) *Client { + return DefaultClient.SetDoNotParseResponse(parse) +} + +// SetPathParams method sets the Request path parameter key-value pairs. See +// `Client.SetPathParams` for more information. +func SetPathParams(params map[string]string) *Client { + return DefaultClient.SetPathParams(params) +} + +// IsProxySet method returns the true if proxy is set on client otherwise false. +// See `Client.IsProxySet` for more information. +func IsProxySet() bool { + return DefaultClient.IsProxySet() +} + +// GetClient method returns the current `http.Client` used by the default resty client. +func GetClient() *http.Client { + return DefaultClient.httpClient +} + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// Unexported methods +//___________________________________ + +func createClient(hc *http.Client) *Client { + c := &Client{ + HostURL: "", + QueryParam: url.Values{}, + FormData: url.Values{}, + Header: http.Header{}, + UserInfo: nil, + Token: "", + Cookies: make([]*http.Cookie, 0), + Debug: false, + Log: getLogger(os.Stderr), + RetryCount: 0, + RetryWaitTime: defaultWaitTime, + RetryMaxWaitTime: defaultMaxWaitTime, + JSONMarshal: json.Marshal, + JSONUnmarshal: json.Unmarshal, + jsonEscapeHTML: true, + httpClient: hc, + debugBodySizeLimit: math.MaxInt32, + pathParams: make(map[string]string), + } + + // Log Prefix + c.SetLogPrefix("RESTY ") + + // Default redirect policy + c.SetRedirectPolicy(NoRedirectPolicy()) + + // default before request middlewares + c.beforeRequest = []func(*Client, *Request) error{ + parseRequestURL, + parseRequestHeader, + parseRequestBody, + createHTTPRequest, + addCredentials, + } + + // user defined request middlewares + c.udBeforeRequest = []func(*Client, *Request) error{} + + // default after response middlewares + c.afterResponse = []func(*Client, *Response) error{ + responseLogger, + parseResponseBody, + saveResponseIntoFile, + } + + return c +} + +func init() { + DefaultClient = New() +} diff --git a/vendor/gopkg.in/resty.v1/go.mod b/vendor/gopkg.in/resty.v1/go.mod new file mode 100644 index 000000000000..61341be87bce --- /dev/null +++ b/vendor/gopkg.in/resty.v1/go.mod @@ -0,0 +1,3 @@ +module gopkg.in/resty.v1 + +require golang.org/x/net v0.0.0-20181220203305-927f97764cc3 diff --git a/vendor/gopkg.in/resty.v1/middleware.go b/vendor/gopkg.in/resty.v1/middleware.go new file mode 100644 index 000000000000..9b6f102ed6fb --- /dev/null +++ b/vendor/gopkg.in/resty.v1/middleware.go @@ -0,0 +1,469 @@ +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "time" +) + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// Request Middleware(s) +//___________________________________ + +func parseRequestURL(c *Client, r *Request) error { + // GitHub #103 Path Params + if len(r.pathParams) > 0 { + for p, v := range r.pathParams { + r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1) + } + } + if len(c.pathParams) > 0 { + for p, v := range c.pathParams { + r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1) + } + } + + // Parsing request URL + reqURL, err := url.Parse(r.URL) + if err != nil { + return err + } + + // If Request.URL is relative path then added c.HostURL into + // the request URL otherwise Request.URL will be used as-is + if !reqURL.IsAbs() { + r.URL = reqURL.String() + if len(r.URL) > 0 && r.URL[0] != '/' { + r.URL = "/" + r.URL + } + + reqURL, err = url.Parse(c.HostURL + r.URL) + if err != nil { + return err + } + } + + // Adding Query Param + query := make(url.Values) + for k, v := range c.QueryParam { + for _, iv := range v { + query.Add(k, iv) + } + } + + for k, v := range r.QueryParam { + // remove query param from client level by key + // since overrides happens for that key in the request + query.Del(k) + + for _, iv := range v { + query.Add(k, iv) + } + } + + // GitHub #123 Preserve query string order partially. + // Since not feasible in `SetQuery*` resty methods, because + // standard package `url.Encode(...)` sorts the query params + // alphabetically + if len(query) > 0 { + if IsStringEmpty(reqURL.RawQuery) { + reqURL.RawQuery = query.Encode() + } else { + reqURL.RawQuery = reqURL.RawQuery + "&" + query.Encode() + } + } + + r.URL = reqURL.String() + + return nil +} + +func parseRequestHeader(c *Client, r *Request) error { + hdr := make(http.Header) + for k := range c.Header { + hdr[k] = append(hdr[k], c.Header[k]...) + } + + for k := range r.Header { + hdr.Del(k) + hdr[k] = append(hdr[k], r.Header[k]...) + } + + if IsStringEmpty(hdr.Get(hdrUserAgentKey)) { + hdr.Set(hdrUserAgentKey, fmt.Sprintf(hdrUserAgentValue, Version)) + } + + ct := hdr.Get(hdrContentTypeKey) + if IsStringEmpty(hdr.Get(hdrAcceptKey)) && !IsStringEmpty(ct) && + (IsJSONType(ct) || IsXMLType(ct)) { + hdr.Set(hdrAcceptKey, hdr.Get(hdrContentTypeKey)) + } + + r.Header = hdr + + return nil +} + +func parseRequestBody(c *Client, r *Request) (err error) { + if isPayloadSupported(r.Method, c.AllowGetMethodPayload) { + // Handling Multipart + if r.isMultiPart && !(r.Method == MethodPatch) { + if err = handleMultipart(c, r); err != nil { + return + } + + goto CL + } + + // Handling Form Data + if len(c.FormData) > 0 || len(r.FormData) > 0 { + handleFormData(c, r) + + goto CL + } + + // Handling Request body + if r.Body != nil { + handleContentType(c, r) + + if err = handleRequestBody(c, r); err != nil { + return + } + } + } + +CL: + // by default resty won't set content length, you can if you want to :) + if (c.setContentLength || r.setContentLength) && r.bodyBuf != nil { + r.Header.Set(hdrContentLengthKey, fmt.Sprintf("%d", r.bodyBuf.Len())) + } + + return +} + +func createHTTPRequest(c *Client, r *Request) (err error) { + if r.bodyBuf == nil { + if reader, ok := r.Body.(io.Reader); ok { + r.RawRequest, err = http.NewRequest(r.Method, r.URL, reader) + } else { + r.RawRequest, err = http.NewRequest(r.Method, r.URL, nil) + } + } else { + r.RawRequest, err = http.NewRequest(r.Method, r.URL, r.bodyBuf) + } + + if err != nil { + return + } + + // Assign close connection option + r.RawRequest.Close = c.closeConnection + + // Add headers into http request + r.RawRequest.Header = r.Header + + // Add cookies into http request + for _, cookie := range c.Cookies { + r.RawRequest.AddCookie(cookie) + } + + // it's for non-http scheme option + if r.RawRequest.URL != nil && r.RawRequest.URL.Scheme == "" { + r.RawRequest.URL.Scheme = c.scheme + r.RawRequest.URL.Host = r.URL + } + + // Use context if it was specified + r.addContextIfAvailable() + + return +} + +func addCredentials(c *Client, r *Request) error { + var isBasicAuth bool + // Basic Auth + if r.UserInfo != nil { // takes precedence + r.RawRequest.SetBasicAuth(r.UserInfo.Username, r.UserInfo.Password) + isBasicAuth = true + } else if c.UserInfo != nil { + r.RawRequest.SetBasicAuth(c.UserInfo.Username, c.UserInfo.Password) + isBasicAuth = true + } + + if !c.DisableWarn { + if isBasicAuth && !strings.HasPrefix(r.URL, "https") { + c.Log.Println("WARNING - Using Basic Auth in HTTP mode is not secure.") + } + } + + // Token Auth + if !IsStringEmpty(r.Token) { // takes precedence + r.RawRequest.Header.Set(hdrAuthorizationKey, "Bearer "+r.Token) + } else if !IsStringEmpty(c.Token) { + r.RawRequest.Header.Set(hdrAuthorizationKey, "Bearer "+c.Token) + } + + return nil +} + +func requestLogger(c *Client, r *Request) error { + if c.Debug { + rr := r.RawRequest + rl := &RequestLog{Header: copyHeaders(rr.Header), Body: r.fmtBodyString()} + if c.requestLog != nil { + if err := c.requestLog(rl); err != nil { + return err + } + } + + reqLog := "\n---------------------- REQUEST LOG -----------------------\n" + + fmt.Sprintf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) + + fmt.Sprintf("HOST : %s\n", rr.URL.Host) + + fmt.Sprintf("HEADERS:\n") + + composeHeaders(rl.Header) + "\n" + + fmt.Sprintf("BODY :\n%v\n", rl.Body) + + "----------------------------------------------------------\n" + + c.Log.Print(reqLog) + } + + return nil +} + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// Response Middleware(s) +//___________________________________ + +func responseLogger(c *Client, res *Response) error { + if c.Debug { + rl := &ResponseLog{Header: copyHeaders(res.Header()), Body: res.fmtBodyString(c.debugBodySizeLimit)} + if c.responseLog != nil { + if err := c.responseLog(rl); err != nil { + return err + } + } + + resLog := "\n---------------------- RESPONSE LOG -----------------------\n" + + fmt.Sprintf("STATUS : %s\n", res.Status()) + + fmt.Sprintf("RECEIVED AT : %v\n", res.ReceivedAt().Format(time.RFC3339Nano)) + + fmt.Sprintf("RESPONSE TIME : %v\n", res.Time()) + + "HEADERS:\n" + + composeHeaders(rl.Header) + "\n" + if res.Request.isSaveResponse { + resLog += fmt.Sprintf("BODY :\n***** RESPONSE WRITTEN INTO FILE *****\n") + } else { + resLog += fmt.Sprintf("BODY :\n%v\n", rl.Body) + } + resLog += "----------------------------------------------------------\n" + + c.Log.Print(resLog) + } + + return nil +} + +func parseResponseBody(c *Client, res *Response) (err error) { + if res.StatusCode() == http.StatusNoContent { + return + } + // Handles only JSON or XML content type + ct := firstNonEmpty(res.Header().Get(hdrContentTypeKey), res.Request.fallbackContentType) + if IsJSONType(ct) || IsXMLType(ct) { + // HTTP status code > 199 and < 300, considered as Result + if res.IsSuccess() { + if res.Request.Result != nil { + err = Unmarshalc(c, ct, res.body, res.Request.Result) + return + } + } + + // HTTP status code > 399, considered as Error + if res.IsError() { + // global error interface + if res.Request.Error == nil && c.Error != nil { + res.Request.Error = reflect.New(c.Error).Interface() + } + + if res.Request.Error != nil { + err = Unmarshalc(c, ct, res.body, res.Request.Error) + } + } + } + + return +} + +func handleMultipart(c *Client, r *Request) (err error) { + r.bodyBuf = acquireBuffer() + w := multipart.NewWriter(r.bodyBuf) + + for k, v := range c.FormData { + for _, iv := range v { + if err = w.WriteField(k, iv); err != nil { + return err + } + } + } + + for k, v := range r.FormData { + for _, iv := range v { + if strings.HasPrefix(k, "@") { // file + err = addFile(w, k[1:], iv) + if err != nil { + return + } + } else { // form value + if err = w.WriteField(k, iv); err != nil { + return err + } + } + } + } + + // #21 - adding io.Reader support + if len(r.multipartFiles) > 0 { + for _, f := range r.multipartFiles { + err = addFileReader(w, f) + if err != nil { + return + } + } + } + + // GitHub #130 adding multipart field support with content type + if len(r.multipartFields) > 0 { + for _, mf := range r.multipartFields { + if err = addMultipartFormField(w, mf); err != nil { + return + } + } + } + + r.Header.Set(hdrContentTypeKey, w.FormDataContentType()) + err = w.Close() + + return +} + +func handleFormData(c *Client, r *Request) { + formData := url.Values{} + + for k, v := range c.FormData { + for _, iv := range v { + formData.Add(k, iv) + } + } + + for k, v := range r.FormData { + // remove form data field from client level by key + // since overrides happens for that key in the request + formData.Del(k) + + for _, iv := range v { + formData.Add(k, iv) + } + } + + r.bodyBuf = bytes.NewBuffer([]byte(formData.Encode())) + r.Header.Set(hdrContentTypeKey, formContentType) + r.isFormData = true +} + +func handleContentType(c *Client, r *Request) { + contentType := r.Header.Get(hdrContentTypeKey) + if IsStringEmpty(contentType) { + contentType = DetectContentType(r.Body) + r.Header.Set(hdrContentTypeKey, contentType) + } +} + +func handleRequestBody(c *Client, r *Request) (err error) { + var bodyBytes []byte + contentType := r.Header.Get(hdrContentTypeKey) + kind := kindOf(r.Body) + r.bodyBuf = nil + + if reader, ok := r.Body.(io.Reader); ok { + if c.setContentLength || r.setContentLength { // keep backward compability + r.bodyBuf = acquireBuffer() + _, err = r.bodyBuf.ReadFrom(reader) + r.Body = nil + } else { + // Otherwise buffer less processing for `io.Reader`, sounds good. + return + } + } else if b, ok := r.Body.([]byte); ok { + bodyBytes = b + } else if s, ok := r.Body.(string); ok { + bodyBytes = []byte(s) + } else if IsJSONType(contentType) && + (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) { + bodyBytes, err = jsonMarshal(c, r, r.Body) + } else if IsXMLType(contentType) && (kind == reflect.Struct) { + bodyBytes, err = xml.Marshal(r.Body) + } + + if bodyBytes == nil && r.bodyBuf == nil { + err = errors.New("unsupported 'Body' type/value") + } + + // if any errors during body bytes handling, return it + if err != nil { + return + } + + // []byte into Buffer + if bodyBytes != nil && r.bodyBuf == nil { + r.bodyBuf = acquireBuffer() + _, _ = r.bodyBuf.Write(bodyBytes) + } + + return +} + +func saveResponseIntoFile(c *Client, res *Response) error { + if res.Request.isSaveResponse { + file := "" + + if len(c.outputDirectory) > 0 && !filepath.IsAbs(res.Request.outputFile) { + file += c.outputDirectory + string(filepath.Separator) + } + + file = filepath.Clean(file + res.Request.outputFile) + if err := createDirectory(filepath.Dir(file)); err != nil { + return err + } + + outFile, err := os.Create(file) + if err != nil { + return err + } + defer closeq(outFile) + + // io.Copy reads maximum 32kb size, it is perfect for large file download too + defer closeq(res.RawResponse.Body) + + written, err := io.Copy(outFile, res.RawResponse.Body) + if err != nil { + return err + } + + res.size = written + } + + return nil +} diff --git a/vendor/gopkg.in/resty.v1/redirect.go b/vendor/gopkg.in/resty.v1/redirect.go new file mode 100644 index 000000000000..b426134ad433 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/redirect.go @@ -0,0 +1,99 @@ +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "errors" + "fmt" + "net" + "net/http" + "strings" +) + +type ( + // RedirectPolicy to regulate the redirects in the resty client. + // Objects implementing the RedirectPolicy interface can be registered as + // + // Apply function should return nil to continue the redirect jounery, otherwise + // return error to stop the redirect. + RedirectPolicy interface { + Apply(req *http.Request, via []*http.Request) error + } + + // The RedirectPolicyFunc type is an adapter to allow the use of ordinary functions as RedirectPolicy. + // If f is a function with the appropriate signature, RedirectPolicyFunc(f) is a RedirectPolicy object that calls f. + RedirectPolicyFunc func(*http.Request, []*http.Request) error +) + +// Apply calls f(req, via). +func (f RedirectPolicyFunc) Apply(req *http.Request, via []*http.Request) error { + return f(req, via) +} + +// NoRedirectPolicy is used to disable redirects in the HTTP client +// resty.SetRedirectPolicy(NoRedirectPolicy()) +func NoRedirectPolicy() RedirectPolicy { + return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + return errors.New("auto redirect is disabled") + }) +} + +// FlexibleRedirectPolicy is convenient method to create No of redirect policy for HTTP client. +// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20)) +func FlexibleRedirectPolicy(noOfRedirect int) RedirectPolicy { + return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + if len(via) >= noOfRedirect { + return fmt.Errorf("stopped after %d redirects", noOfRedirect) + } + + checkHostAndAddHeaders(req, via[0]) + + return nil + }) +} + +// DomainCheckRedirectPolicy is convenient method to define domain name redirect rule in resty client. +// Redirect is allowed for only mentioned host in the policy. +// resty.SetRedirectPolicy(DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net")) +func DomainCheckRedirectPolicy(hostnames ...string) RedirectPolicy { + hosts := make(map[string]bool) + for _, h := range hostnames { + hosts[strings.ToLower(h)] = true + } + + fn := RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error { + if ok := hosts[getHostname(req.URL.Host)]; !ok { + return errors.New("redirect is not allowed as per DomainCheckRedirectPolicy") + } + + return nil + }) + + return fn +} + +func getHostname(host string) (hostname string) { + if strings.Index(host, ":") > 0 { + host, _, _ = net.SplitHostPort(host) + } + hostname = strings.ToLower(host) + return +} + +// By default Golang will not redirect request headers +// after go throughing various discussion comments from thread +// https://github.com/golang/go/issues/4800 +// go-resty will add all the headers during a redirect for the same host +func checkHostAndAddHeaders(cur *http.Request, pre *http.Request) { + curHostname := getHostname(cur.URL.Host) + preHostname := getHostname(pre.URL.Host) + if strings.EqualFold(curHostname, preHostname) { + for key, val := range pre.Header { + cur.Header[key] = val + } + } else { // only library User-Agent header is added + cur.Header.Set(hdrUserAgentKey, fmt.Sprintf(hdrUserAgentValue, Version)) + } +} diff --git a/vendor/gopkg.in/resty.v1/request.go b/vendor/gopkg.in/resty.v1/request.go new file mode 100644 index 000000000000..c6adff3dd802 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/request.go @@ -0,0 +1,586 @@ +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net" + "net/url" + "reflect" + "strings" +) + +// SRVRecord holds the data to query the SRV record for the following service +type SRVRecord struct { + Service string + Domain string +} + +// SetHeader method is to set a single header field and its value in the current request. +// Example: To set `Content-Type` and `Accept` as `application/json`. +// resty.R(). +// SetHeader("Content-Type", "application/json"). +// SetHeader("Accept", "application/json") +// +// Also you can override header value, which was set at client instance level. +// +func (r *Request) SetHeader(header, value string) *Request { + r.Header.Set(header, value) + return r +} + +// SetHeaders method sets multiple headers field and its values at one go in the current request. +// Example: To set `Content-Type` and `Accept` as `application/json` +// +// resty.R(). +// SetHeaders(map[string]string{ +// "Content-Type": "application/json", +// "Accept": "application/json", +// }) +// Also you can override header value, which was set at client instance level. +// +func (r *Request) SetHeaders(headers map[string]string) *Request { + for h, v := range headers { + r.SetHeader(h, v) + } + + return r +} + +// SetQueryParam method sets single parameter and its value in the current request. +// It will be formed as query string for the request. +// Example: `search=kitchen%20papers&size=large` in the URL after `?` mark. +// resty.R(). +// SetQueryParam("search", "kitchen papers"). +// SetQueryParam("size", "large") +// Also you can override query params value, which was set at client instance level +// +func (r *Request) SetQueryParam(param, value string) *Request { + r.QueryParam.Set(param, value) + return r +} + +// SetQueryParams method sets multiple parameters and its values at one go in the current request. +// It will be formed as query string for the request. +// Example: `search=kitchen%20papers&size=large` in the URL after `?` mark. +// resty.R(). +// SetQueryParams(map[string]string{ +// "search": "kitchen papers", +// "size": "large", +// }) +// Also you can override query params value, which was set at client instance level +// +func (r *Request) SetQueryParams(params map[string]string) *Request { + for p, v := range params { + r.SetQueryParam(p, v) + } + + return r +} + +// SetMultiValueQueryParams method appends multiple parameters with multi-value +// at one go in the current request. It will be formed as query string for the request. +// Example: `status=pending&status=approved&status=open` in the URL after `?` mark. +// resty.R(). +// SetMultiValueQueryParams(url.Values{ +// "status": []string{"pending", "approved", "open"}, +// }) +// Also you can override query params value, which was set at client instance level +// +func (r *Request) SetMultiValueQueryParams(params url.Values) *Request { + for p, v := range params { + for _, pv := range v { + r.QueryParam.Add(p, pv) + } + } + + return r +} + +// SetQueryString method provides ability to use string as an input to set URL query string for the request. +// +// Using String as an input +// resty.R(). +// SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more") +// +func (r *Request) SetQueryString(query string) *Request { + params, err := url.ParseQuery(strings.TrimSpace(query)) + if err == nil { + for p, v := range params { + for _, pv := range v { + r.QueryParam.Add(p, pv) + } + } + } else { + r.client.Log.Printf("ERROR %v", err) + } + return r +} + +// SetFormData method sets Form parameters and their values in the current request. +// It's applicable only HTTP method `POST` and `PUT` and requests content type would be set as +// `application/x-www-form-urlencoded`. +// resty.R(). +// SetFormData(map[string]string{ +// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F", +// "user_id": "3455454545", +// }) +// Also you can override form data value, which was set at client instance level +// +func (r *Request) SetFormData(data map[string]string) *Request { + for k, v := range data { + r.FormData.Set(k, v) + } + + return r +} + +// SetMultiValueFormData method appends multiple form parameters with multi-value +// at one go in the current request. +// resty.R(). +// SetMultiValueFormData(url.Values{ +// "search_criteria": []string{"book", "glass", "pencil"}, +// }) +// Also you can override form data value, which was set at client instance level +// +func (r *Request) SetMultiValueFormData(params url.Values) *Request { + for k, v := range params { + for _, kv := range v { + r.FormData.Add(k, kv) + } + } + + return r +} + +// SetBody method sets the request body for the request. It supports various realtime needs as easy. +// We can say its quite handy or powerful. Supported request body data types is `string`, +// `[]byte`, `struct`, `map`, `slice` and `io.Reader`. Body value can be pointer or non-pointer. +// Automatic marshalling for JSON and XML content type, if it is `struct`, `map`, or `slice`. +// +// Note: `io.Reader` is processed as bufferless mode while sending request. +// +// Example: +// +// Struct as a body input, based on content type, it will be marshalled. +// resty.R(). +// SetBody(User{ +// Username: "jeeva@myjeeva.com", +// Password: "welcome2resty", +// }) +// +// Map as a body input, based on content type, it will be marshalled. +// resty.R(). +// SetBody(map[string]interface{}{ +// "username": "jeeva@myjeeva.com", +// "password": "welcome2resty", +// "address": &Address{ +// Address1: "1111 This is my street", +// Address2: "Apt 201", +// City: "My City", +// State: "My State", +// ZipCode: 00000, +// }, +// }) +// +// String as a body input. Suitable for any need as a string input. +// resty.R(). +// SetBody(`{ +// "username": "jeeva@getrightcare.com", +// "password": "admin" +// }`) +// +// []byte as a body input. Suitable for raw request such as file upload, serialize & deserialize, etc. +// resty.R(). +// SetBody([]byte("This is my raw request, sent as-is")) +// +func (r *Request) SetBody(body interface{}) *Request { + r.Body = body + return r +} + +// SetResult method is to register the response `Result` object for automatic unmarshalling in the RESTful mode +// if response status code is between 200 and 299 and content type either JSON or XML. +// +// Note: Result object can be pointer or non-pointer. +// resty.R().SetResult(&AuthToken{}) +// // OR +// resty.R().SetResult(AuthToken{}) +// +// Accessing a result value +// response.Result().(*AuthToken) +// +func (r *Request) SetResult(res interface{}) *Request { + r.Result = getPointer(res) + return r +} + +// SetError method is to register the request `Error` object for automatic unmarshalling in the RESTful mode +// if response status code is greater than 399 and content type either JSON or XML. +// +// Note: Error object can be pointer or non-pointer. +// resty.R().SetError(&AuthError{}) +// // OR +// resty.R().SetError(AuthError{}) +// +// Accessing a error value +// response.Error().(*AuthError) +// +func (r *Request) SetError(err interface{}) *Request { + r.Error = getPointer(err) + return r +} + +// SetFile method is to set single file field name and its path for multipart upload. +// resty.R(). +// SetFile("my_file", "/Users/jeeva/Gas Bill - Sep.pdf") +// +func (r *Request) SetFile(param, filePath string) *Request { + r.isMultiPart = true + r.FormData.Set("@"+param, filePath) + return r +} + +// SetFiles method is to set multiple file field name and its path for multipart upload. +// resty.R(). +// SetFiles(map[string]string{ +// "my_file1": "/Users/jeeva/Gas Bill - Sep.pdf", +// "my_file2": "/Users/jeeva/Electricity Bill - Sep.pdf", +// "my_file3": "/Users/jeeva/Water Bill - Sep.pdf", +// }) +// +func (r *Request) SetFiles(files map[string]string) *Request { + r.isMultiPart = true + + for f, fp := range files { + r.FormData.Set("@"+f, fp) + } + + return r +} + +// SetFileReader method is to set single file using io.Reader for multipart upload. +// resty.R(). +// SetFileReader("profile_img", "my-profile-img.png", bytes.NewReader(profileImgBytes)). +// SetFileReader("notes", "user-notes.txt", bytes.NewReader(notesBytes)) +// +func (r *Request) SetFileReader(param, fileName string, reader io.Reader) *Request { + r.isMultiPart = true + r.multipartFiles = append(r.multipartFiles, &File{ + Name: fileName, + ParamName: param, + Reader: reader, + }) + return r +} + +// SetMultipartField method is to set custom data using io.Reader for multipart upload. +func (r *Request) SetMultipartField(param, fileName, contentType string, reader io.Reader) *Request { + r.isMultiPart = true + r.multipartFields = append(r.multipartFields, &MultipartField{ + Param: param, + FileName: fileName, + ContentType: contentType, + Reader: reader, + }) + return r +} + +// SetMultipartFields method is to set multiple data fields using io.Reader for multipart upload. +// Example: +// resty.R().SetMultipartFields( +// &resty.MultipartField{ +// Param: "uploadManifest1", +// FileName: "upload-file-1.json", +// ContentType: "application/json", +// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 1", "_filename" : ["file1.txt"]}}`), +// }, +// &resty.MultipartField{ +// Param: "uploadManifest2", +// FileName: "upload-file-2.json", +// ContentType: "application/json", +// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 2", "_filename" : ["file2.txt"]}}`), +// }) +// +// If you have slice already, then simply call- +// resty.R().SetMultipartFields(fields...) +func (r *Request) SetMultipartFields(fields ...*MultipartField) *Request { + r.isMultiPart = true + r.multipartFields = append(r.multipartFields, fields...) + return r +} + +// SetContentLength method sets the HTTP header `Content-Length` value for current request. +// By default go-resty won't set `Content-Length`. Also you have an option to enable for every +// request. See `resty.SetContentLength` +// resty.R().SetContentLength(true) +// +func (r *Request) SetContentLength(l bool) *Request { + r.setContentLength = true + return r +} + +// SetBasicAuth method sets the basic authentication header in the current HTTP request. +// For Header example: +// Authorization: Basic +// +// To set the header for username "go-resty" and password "welcome" +// resty.R().SetBasicAuth("go-resty", "welcome") +// +// This method overrides the credentials set by method `resty.SetBasicAuth`. +// +func (r *Request) SetBasicAuth(username, password string) *Request { + r.UserInfo = &User{Username: username, Password: password} + return r +} + +// SetAuthToken method sets bearer auth token header in the current HTTP request. Header example: +// Authorization: Bearer +// +// Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F +// +// resty.R().SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F") +// +// This method overrides the Auth token set by method `resty.SetAuthToken`. +// +func (r *Request) SetAuthToken(token string) *Request { + r.Token = token + return r +} + +// SetOutput method sets the output file for current HTTP request. Current HTTP response will be +// saved into given file. It is similar to `curl -o` flag. Absolute path or relative path can be used. +// If is it relative path then output file goes under the output directory, as mentioned +// in the `Client.SetOutputDirectory`. +// resty.R(). +// SetOutput("/Users/jeeva/Downloads/ReplyWithHeader-v5.1-beta.zip"). +// Get("http://bit.ly/1LouEKr") +// +// Note: In this scenario `Response.Body` might be nil. +func (r *Request) SetOutput(file string) *Request { + r.outputFile = file + r.isSaveResponse = true + return r +} + +// SetSRV method sets the details to query the service SRV record and execute the +// request. +// resty.R(). +// SetSRV(SRVRecord{"web", "testservice.com"}). +// Get("/get") +func (r *Request) SetSRV(srv *SRVRecord) *Request { + r.SRV = srv + return r +} + +// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically. +// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body, +// otherwise you might get into connection leaks, no connection reuse. +// +// Please Note: Response middlewares are not applicable, if you use this option. Basically you have +// taken over the control of response parsing from `Resty`. +func (r *Request) SetDoNotParseResponse(parse bool) *Request { + r.notParseResponse = parse + return r +} + +// SetPathParams method sets multiple URL path key-value pairs at one go in the +// resty current request instance. +// resty.R().SetPathParams(map[string]string{ +// "userId": "sample@sample.com", +// "subAccountId": "100002", +// }) +// +// Result: +// URL - /v1/users/{userId}/{subAccountId}/details +// Composed URL - /v1/users/sample@sample.com/100002/details +// It replace the value of the key while composing request URL. Also you can +// override Path Params value, which was set at client instance level. +func (r *Request) SetPathParams(params map[string]string) *Request { + for p, v := range params { + r.pathParams[p] = v + } + return r +} + +// ExpectContentType method allows to provide fallback `Content-Type` for automatic unmarshalling +// when `Content-Type` response header is unavailable. +func (r *Request) ExpectContentType(contentType string) *Request { + r.fallbackContentType = contentType + return r +} + +// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal. +// +// NOTE: This option only applicable to standard JSON Marshaller. +func (r *Request) SetJSONEscapeHTML(b bool) *Request { + r.jsonEscapeHTML = b + return r +} + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// HTTP verb method starts here +//___________________________________ + +// Get method does GET HTTP request. It's defined in section 4.3.1 of RFC7231. +func (r *Request) Get(url string) (*Response, error) { + return r.Execute(MethodGet, url) +} + +// Head method does HEAD HTTP request. It's defined in section 4.3.2 of RFC7231. +func (r *Request) Head(url string) (*Response, error) { + return r.Execute(MethodHead, url) +} + +// Post method does POST HTTP request. It's defined in section 4.3.3 of RFC7231. +func (r *Request) Post(url string) (*Response, error) { + return r.Execute(MethodPost, url) +} + +// Put method does PUT HTTP request. It's defined in section 4.3.4 of RFC7231. +func (r *Request) Put(url string) (*Response, error) { + return r.Execute(MethodPut, url) +} + +// Delete method does DELETE HTTP request. It's defined in section 4.3.5 of RFC7231. +func (r *Request) Delete(url string) (*Response, error) { + return r.Execute(MethodDelete, url) +} + +// Options method does OPTIONS HTTP request. It's defined in section 4.3.7 of RFC7231. +func (r *Request) Options(url string) (*Response, error) { + return r.Execute(MethodOptions, url) +} + +// Patch method does PATCH HTTP request. It's defined in section 2 of RFC5789. +func (r *Request) Patch(url string) (*Response, error) { + return r.Execute(MethodPatch, url) +} + +// Execute method performs the HTTP request with given HTTP method and URL +// for current `Request`. +// resp, err := resty.R().Execute(resty.GET, "http://httpbin.org/get") +// +func (r *Request) Execute(method, url string) (*Response, error) { + var addrs []*net.SRV + var err error + + if r.isMultiPart && !(method == MethodPost || method == MethodPut) { + return nil, fmt.Errorf("multipart content is not allowed in HTTP verb [%v]", method) + } + + if r.SRV != nil { + _, addrs, err = net.LookupSRV(r.SRV.Service, "tcp", r.SRV.Domain) + if err != nil { + return nil, err + } + } + + r.Method = method + r.URL = r.selectAddr(addrs, url, 0) + + if r.client.RetryCount == 0 { + return r.client.execute(r) + } + + var resp *Response + attempt := 0 + _ = Backoff( + func() (*Response, error) { + attempt++ + + r.URL = r.selectAddr(addrs, url, attempt) + + resp, err = r.client.execute(r) + if err != nil { + r.client.Log.Printf("ERROR %v, Attempt %v", err, attempt) + if r.isContextCancelledIfAvailable() { + // stop Backoff from retrying request if request has been + // canceled by context + return resp, nil + } + } + + return resp, err + }, + Retries(r.client.RetryCount), + WaitTime(r.client.RetryWaitTime), + MaxWaitTime(r.client.RetryMaxWaitTime), + RetryConditions(r.client.RetryConditions), + ) + + return resp, err +} + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// Request Unexported methods +//___________________________________ + +func (r *Request) fmtBodyString() (body string) { + body = "***** NO CONTENT *****" + if isPayloadSupported(r.Method, r.client.AllowGetMethodPayload) { + if _, ok := r.Body.(io.Reader); ok { + body = "***** BODY IS io.Reader *****" + return + } + + // multipart or form-data + if r.isMultiPart || r.isFormData { + body = r.bodyBuf.String() + return + } + + // request body data + if r.Body == nil { + return + } + var prtBodyBytes []byte + var err error + + contentType := r.Header.Get(hdrContentTypeKey) + kind := kindOf(r.Body) + if canJSONMarshal(contentType, kind) { + prtBodyBytes, err = json.MarshalIndent(&r.Body, "", " ") + } else if IsXMLType(contentType) && (kind == reflect.Struct) { + prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ") + } else if b, ok := r.Body.(string); ok { + if IsJSONType(contentType) { + bodyBytes := []byte(b) + out := acquireBuffer() + defer releaseBuffer(out) + if err = json.Indent(out, bodyBytes, "", " "); err == nil { + prtBodyBytes = out.Bytes() + } + } else { + body = b + return + } + } else if b, ok := r.Body.([]byte); ok { + body = base64.StdEncoding.EncodeToString(b) + } + + if prtBodyBytes != nil && err == nil { + body = string(prtBodyBytes) + } + } + + return +} + +func (r *Request) selectAddr(addrs []*net.SRV, path string, attempt int) string { + if addrs == nil { + return path + } + + idx := attempt % len(addrs) + domain := strings.TrimRight(addrs[idx].Target, ".") + path = strings.TrimLeft(path, "/") + + return fmt.Sprintf("%s://%s:%d/%s", r.client.scheme, domain, addrs[idx].Port, path) +} diff --git a/vendor/gopkg.in/resty.v1/request16.go b/vendor/gopkg.in/resty.v1/request16.go new file mode 100644 index 000000000000..079ecfca7492 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/request16.go @@ -0,0 +1,63 @@ +// +build !go1.7 + +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com) +// 2016 Andrew Grigorev (https://github.com/ei-grad) +// All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + "time" +) + +// Request type is used to compose and send individual request from client +// go-resty is provide option override client level settings such as +// Auth Token, Basic Auth credentials, Header, Query Param, Form Data, Error object +// and also you can add more options for that particular request +type Request struct { + URL string + Method string + Token string + QueryParam url.Values + FormData url.Values + Header http.Header + Time time.Time + Body interface{} + Result interface{} + Error interface{} + RawRequest *http.Request + SRV *SRVRecord + UserInfo *User + + isMultiPart bool + isFormData bool + setContentLength bool + isSaveResponse bool + notParseResponse bool + jsonEscapeHTML bool + outputFile string + fallbackContentType string + pathParams map[string]string + client *Client + bodyBuf *bytes.Buffer + multipartFiles []*File + multipartFields []*MultipartField +} + +func (r *Request) addContextIfAvailable() { + // nothing to do for golang<1.7 +} + +func (r *Request) isContextCancelledIfAvailable() bool { + // just always return false golang<1.7 + return false +} + +// for !go1.7 +var noescapeJSONMarshal = json.Marshal diff --git a/vendor/gopkg.in/resty.v1/request17.go b/vendor/gopkg.in/resty.v1/request17.go new file mode 100644 index 000000000000..0629a114c029 --- /dev/null +++ b/vendor/gopkg.in/resty.v1/request17.go @@ -0,0 +1,96 @@ +// +build go1.7 go1.8 + +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com) +// 2016 Andrew Grigorev (https://github.com/ei-grad) +// All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" + "time" +) + +// Request type is used to compose and send individual request from client +// go-resty is provide option override client level settings such as +// Auth Token, Basic Auth credentials, Header, Query Param, Form Data, Error object +// and also you can add more options for that particular request +type Request struct { + URL string + Method string + Token string + QueryParam url.Values + FormData url.Values + Header http.Header + Time time.Time + Body interface{} + Result interface{} + Error interface{} + RawRequest *http.Request + SRV *SRVRecord + UserInfo *User + + isMultiPart bool + isFormData bool + setContentLength bool + isSaveResponse bool + notParseResponse bool + jsonEscapeHTML bool + outputFile string + fallbackContentType string + ctx context.Context + pathParams map[string]string + client *Client + bodyBuf *bytes.Buffer + multipartFiles []*File + multipartFields []*MultipartField +} + +// Context method returns the Context if its already set in request +// otherwise it creates new one using `context.Background()`. +func (r *Request) Context() context.Context { + if r.ctx == nil { + return context.Background() + } + return r.ctx +} + +// SetContext method sets the context.Context for current Request. It allows +// to interrupt the request execution if ctx.Done() channel is closed. +// See https://blog.golang.org/context article and the "context" package +// documentation. +func (r *Request) SetContext(ctx context.Context) *Request { + r.ctx = ctx + return r +} + +func (r *Request) addContextIfAvailable() { + if r.ctx != nil { + r.RawRequest = r.RawRequest.WithContext(r.ctx) + } +} + +func (r *Request) isContextCancelledIfAvailable() bool { + if r.ctx != nil { + if r.ctx.Err() != nil { + return true + } + } + return false +} + +// for go1.7+ +var noescapeJSONMarshal = func(v interface{}) ([]byte, error) { + buf := acquireBuffer() + defer releaseBuffer(buf) + encoder := json.NewEncoder(buf) + encoder.SetEscapeHTML(false) + err := encoder.Encode(v) + return buf.Bytes(), err +} diff --git a/vendor/gopkg.in/resty.v1/response.go b/vendor/gopkg.in/resty.v1/response.go new file mode 100644 index 000000000000..ea2a027a538c --- /dev/null +++ b/vendor/gopkg.in/resty.v1/response.go @@ -0,0 +1,150 @@ +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +// Response is an object represents executed request and its values. +type Response struct { + Request *Request + RawResponse *http.Response + + body []byte + size int64 + receivedAt time.Time +} + +// Body method returns HTTP response as []byte array for the executed request. +// Note: `Response.Body` might be nil, if `Request.SetOutput` is used. +func (r *Response) Body() []byte { + if r.RawResponse == nil { + return []byte{} + } + return r.body +} + +// Status method returns the HTTP status string for the executed request. +// Example: 200 OK +func (r *Response) Status() string { + if r.RawResponse == nil { + return "" + } + + return r.RawResponse.Status +} + +// StatusCode method returns the HTTP status code for the executed request. +// Example: 200 +func (r *Response) StatusCode() int { + if r.RawResponse == nil { + return 0 + } + + return r.RawResponse.StatusCode +} + +// Result method returns the response value as an object if it has one +func (r *Response) Result() interface{} { + return r.Request.Result +} + +// Error method returns the error object if it has one +func (r *Response) Error() interface{} { + return r.Request.Error +} + +// Header method returns the response headers +func (r *Response) Header() http.Header { + if r.RawResponse == nil { + return http.Header{} + } + + return r.RawResponse.Header +} + +// Cookies method to access all the response cookies +func (r *Response) Cookies() []*http.Cookie { + if r.RawResponse == nil { + return make([]*http.Cookie, 0) + } + + return r.RawResponse.Cookies() +} + +// String method returns the body of the server response as String. +func (r *Response) String() string { + if r.body == nil { + return "" + } + + return strings.TrimSpace(string(r.body)) +} + +// Time method returns the time of HTTP response time that from request we sent and received a request. +// See `response.ReceivedAt` to know when client recevied response and see `response.Request.Time` to know +// when client sent a request. +func (r *Response) Time() time.Duration { + return r.receivedAt.Sub(r.Request.Time) +} + +// ReceivedAt method returns when response got recevied from server for the request. +func (r *Response) ReceivedAt() time.Time { + return r.receivedAt +} + +// Size method returns the HTTP response size in bytes. Ya, you can relay on HTTP `Content-Length` header, +// however it won't be good for chucked transfer/compressed response. Since Resty calculates response size +// at the client end. You will get actual size of the http response. +func (r *Response) Size() int64 { + return r.size +} + +// RawBody method exposes the HTTP raw response body. Use this method in-conjunction with `SetDoNotParseResponse` +// option otherwise you get an error as `read err: http: read on closed response body`. +// +// Do not forget to close the body, otherwise you might get into connection leaks, no connection reuse. +// Basically you have taken over the control of response parsing from `Resty`. +func (r *Response) RawBody() io.ReadCloser { + if r.RawResponse == nil { + return nil + } + return r.RawResponse.Body +} + +// IsSuccess method returns true if HTTP status code >= 200 and <= 299 otherwise false. +func (r *Response) IsSuccess() bool { + return r.StatusCode() > 199 && r.StatusCode() < 300 +} + +// IsError method returns true if HTTP status code >= 400 otherwise false. +func (r *Response) IsError() bool { + return r.StatusCode() > 399 +} + +func (r *Response) fmtBodyString(sl int64) string { + if r.body != nil { + if int64(len(r.body)) > sl { + return fmt.Sprintf("***** RESPONSE TOO LARGE (size - %d) *****", len(r.body)) + } + ct := r.Header().Get(hdrContentTypeKey) + if IsJSONType(ct) { + out := acquireBuffer() + defer releaseBuffer(out) + if err := json.Indent(out, r.body, "", " "); err == nil { + return out.String() + } + } + return r.String() + } + + return "***** NO CONTENT *****" +} diff --git a/vendor/gopkg.in/resty.v1/resty.go b/vendor/gopkg.in/resty.v1/resty.go new file mode 100644 index 000000000000..9a32463d73ba --- /dev/null +++ b/vendor/gopkg.in/resty.v1/resty.go @@ -0,0 +1,9 @@ +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +// Package resty provides Simple HTTP and REST client library for Go. +package resty + +// Version # of resty +const Version = "1.12.0" diff --git a/vendor/gopkg.in/resty.v1/retry.go b/vendor/gopkg.in/resty.v1/retry.go new file mode 100644 index 000000000000..4ed9b6d6067e --- /dev/null +++ b/vendor/gopkg.in/resty.v1/retry.go @@ -0,0 +1,118 @@ +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "math" + "math/rand" + "time" +) + +const ( + defaultMaxRetries = 3 + defaultWaitTime = time.Duration(100) * time.Millisecond + defaultMaxWaitTime = time.Duration(2000) * time.Millisecond +) + +type ( + // Option is to create convenient retry options like wait time, max retries, etc. + Option func(*Options) + + // RetryConditionFunc type is for retry condition function + RetryConditionFunc func(*Response) (bool, error) + + // Options to hold go-resty retry values + Options struct { + maxRetries int + waitTime time.Duration + maxWaitTime time.Duration + retryConditions []RetryConditionFunc + } +) + +// Retries sets the max number of retries +func Retries(value int) Option { + return func(o *Options) { + o.maxRetries = value + } +} + +// WaitTime sets the default wait time to sleep between requests +func WaitTime(value time.Duration) Option { + return func(o *Options) { + o.waitTime = value + } +} + +// MaxWaitTime sets the max wait time to sleep between requests +func MaxWaitTime(value time.Duration) Option { + return func(o *Options) { + o.maxWaitTime = value + } +} + +// RetryConditions sets the conditions that will be checked for retry. +func RetryConditions(conditions []RetryConditionFunc) Option { + return func(o *Options) { + o.retryConditions = conditions + } +} + +// Backoff retries with increasing timeout duration up until X amount of retries +// (Default is 3 attempts, Override with option Retries(n)) +func Backoff(operation func() (*Response, error), options ...Option) error { + // Defaults + opts := Options{ + maxRetries: defaultMaxRetries, + waitTime: defaultWaitTime, + maxWaitTime: defaultMaxWaitTime, + retryConditions: []RetryConditionFunc{}, + } + + for _, o := range options { + o(&opts) + } + + var ( + resp *Response + err error + ) + base := float64(opts.waitTime) // Time to wait between each attempt + capLevel := float64(opts.maxWaitTime) // Maximum amount of wait time for the retry + for attempt := 0; attempt < opts.maxRetries; attempt++ { + resp, err = operation() + + var needsRetry bool + var conditionErr error + for _, condition := range opts.retryConditions { + needsRetry, conditionErr = condition(resp) + if needsRetry || conditionErr != nil { + break + } + } + + // If the operation returned no error, there was no condition satisfied and + // there was no error caused by the conditional functions. + if err == nil && !needsRetry && conditionErr == nil { + return nil + } + // Adding capped exponential backup with jitter + // See the following article... + // http://www.awsarchitectureblog.com/2015/03/backoff.html + temp := math.Min(capLevel, base*math.Exp2(float64(attempt))) + ri := int(temp / 2) + if ri <= 0 { + ri = 1<<31 - 1 // max int for arch 386 + } + sleepDuration := time.Duration(math.Abs(float64(ri + rand.Intn(ri)))) + + if sleepDuration < opts.waitTime { + sleepDuration = opts.waitTime + } + time.Sleep(sleepDuration) + } + + return err +} diff --git a/vendor/gopkg.in/resty.v1/util.go b/vendor/gopkg.in/resty.v1/util.go new file mode 100644 index 000000000000..997cd1009efd --- /dev/null +++ b/vendor/gopkg.in/resty.v1/util.go @@ -0,0 +1,281 @@ +// Copyright (c) 2015-2019 Jeevanandam M (jeeva@myjeeva.com), All rights reserved. +// resty source code and usage is governed by a MIT style +// license that can be found in the LICENSE file. + +package resty + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "net/textproto" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strings" +) + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// Package Helper methods +//___________________________________ + +// IsStringEmpty method tells whether given string is empty or not +func IsStringEmpty(str string) bool { + return len(strings.TrimSpace(str)) == 0 +} + +// DetectContentType method is used to figure out `Request.Body` content type for request header +func DetectContentType(body interface{}) string { + contentType := plainTextType + kind := kindOf(body) + switch kind { + case reflect.Struct, reflect.Map: + contentType = jsonContentType + case reflect.String: + contentType = plainTextType + default: + if b, ok := body.([]byte); ok { + contentType = http.DetectContentType(b) + } else if kind == reflect.Slice { + contentType = jsonContentType + } + } + + return contentType +} + +// IsJSONType method is to check JSON content type or not +func IsJSONType(ct string) bool { + return jsonCheck.MatchString(ct) +} + +// IsXMLType method is to check XML content type or not +func IsXMLType(ct string) bool { + return xmlCheck.MatchString(ct) +} + +// Unmarshal content into object from JSON or XML +// Deprecated: kept for backward compatibility +func Unmarshal(ct string, b []byte, d interface{}) (err error) { + if IsJSONType(ct) { + err = json.Unmarshal(b, d) + } else if IsXMLType(ct) { + err = xml.Unmarshal(b, d) + } + + return +} + +// Unmarshalc content into object from JSON or XML +func Unmarshalc(c *Client, ct string, b []byte, d interface{}) (err error) { + if IsJSONType(ct) { + err = c.JSONUnmarshal(b, d) + } else if IsXMLType(ct) { + err = xml.Unmarshal(b, d) + } + + return +} + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// RequestLog and ResponseLog type +//___________________________________ + +// RequestLog struct is used to collected information from resty request +// instance for debug logging. It sent to request log callback before resty +// actually logs the information. +type RequestLog struct { + Header http.Header + Body string +} + +// ResponseLog struct is used to collected information from resty response +// instance for debug logging. It sent to response log callback before resty +// actually logs the information. +type ResponseLog struct { + Header http.Header + Body string +} + +//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ +// Package Unexported methods +//___________________________________ + +// way to disable the HTML escape as opt-in +func jsonMarshal(c *Client, r *Request, d interface{}) ([]byte, error) { + if !r.jsonEscapeHTML { + return noescapeJSONMarshal(d) + } else if !c.jsonEscapeHTML { + return noescapeJSONMarshal(d) + } + return c.JSONMarshal(d) +} + +func firstNonEmpty(v ...string) string { + for _, s := range v { + if !IsStringEmpty(s) { + return s + } + } + return "" +} + +func getLogger(w io.Writer) *log.Logger { + return log.New(w, "RESTY ", log.LstdFlags) +} + +var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + +func escapeQuotes(s string) string { + return quoteEscaper.Replace(s) +} + +func createMultipartHeader(param, fileName, contentType string) textproto.MIMEHeader { + hdr := make(textproto.MIMEHeader) + hdr.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + escapeQuotes(param), escapeQuotes(fileName))) + hdr.Set("Content-Type", contentType) + return hdr +} + +func addMultipartFormField(w *multipart.Writer, mf *MultipartField) error { + partWriter, err := w.CreatePart(createMultipartHeader(mf.Param, mf.FileName, mf.ContentType)) + if err != nil { + return err + } + + _, err = io.Copy(partWriter, mf.Reader) + return err +} + +func writeMultipartFormFile(w *multipart.Writer, fieldName, fileName string, r io.Reader) error { + // Auto detect actual multipart content type + cbuf := make([]byte, 512) + size, err := r.Read(cbuf) + if err != nil { + return err + } + + partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf))) + if err != nil { + return err + } + + if _, err = partWriter.Write(cbuf[:size]); err != nil { + return err + } + + _, err = io.Copy(partWriter, r) + return err +} + +func addFile(w *multipart.Writer, fieldName, path string) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer closeq(file) + return writeMultipartFormFile(w, fieldName, filepath.Base(path), file) +} + +func addFileReader(w *multipart.Writer, f *File) error { + return writeMultipartFormFile(w, f.ParamName, f.Name, f.Reader) +} + +func getPointer(v interface{}) interface{} { + vv := valueOf(v) + if vv.Kind() == reflect.Ptr { + return v + } + return reflect.New(vv.Type()).Interface() +} + +func isPayloadSupported(m string, allowMethodGet bool) bool { + return !(m == MethodHead || m == MethodOptions || (m == MethodGet && !allowMethodGet)) +} + +func typeOf(i interface{}) reflect.Type { + return indirect(valueOf(i)).Type() +} + +func valueOf(i interface{}) reflect.Value { + return reflect.ValueOf(i) +} + +func indirect(v reflect.Value) reflect.Value { + return reflect.Indirect(v) +} + +func kindOf(v interface{}) reflect.Kind { + return typeOf(v).Kind() +} + +func createDirectory(dir string) (err error) { + if _, err = os.Stat(dir); err != nil { + if os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return + } + } + } + return +} + +func canJSONMarshal(contentType string, kind reflect.Kind) bool { + return IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) +} + +func functionName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} + +func acquireBuffer() *bytes.Buffer { + return bufPool.Get().(*bytes.Buffer) +} + +func releaseBuffer(buf *bytes.Buffer) { + if buf != nil { + buf.Reset() + bufPool.Put(buf) + } +} + +func closeq(v interface{}) { + if c, ok := v.(io.Closer); ok { + sliently(c.Close()) + } +} + +func sliently(_ ...interface{}) {} + +func composeHeaders(hdrs http.Header) string { + var str []string + for _, k := range sortHeaderKeys(hdrs) { + str = append(str, fmt.Sprintf("%25s: %s", k, strings.Join(hdrs[k], ", "))) + } + return strings.Join(str, "\n") +} + +func sortHeaderKeys(hdrs http.Header) []string { + var keys []string + for key := range hdrs { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func copyHeaders(hdrs http.Header) http.Header { + nh := http.Header{} + for k, v := range hdrs { + nh[k] = v + } + return nh +} diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 4e7f28d8c974..6524f8ca96e3 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -17,37 +17,23 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto - - It has these top-level messages: - BoundObjectReference - ExtraValue - TokenRequest - TokenRequestSpec - TokenRequestStatus - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + io "io" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import strings "strings" -import reflect "reflect" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -58,43 +44,259 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } -func (*BoundObjectReference) ProtoMessage() {} -func (*BoundObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } +func (*BoundObjectReference) ProtoMessage() {} +func (*BoundObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{0} +} +func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoundObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BoundObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoundObjectReference.Merge(m, src) +} +func (m *BoundObjectReference) XXX_Size() int { + return m.Size() +} +func (m *BoundObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_BoundObjectReference.DiscardUnknown(m) +} -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo -func (m *TokenRequest) Reset() { *m = TokenRequest{} } -func (*TokenRequest) ProtoMessage() {} -func (*TokenRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{1} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo -func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } -func (*TokenRequestSpec) ProtoMessage() {} -func (*TokenRequestSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{2} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} -func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } -func (*TokenRequestStatus) ProtoMessage() {} -func (*TokenRequestStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } +func (*TokenRequestSpec) ProtoMessage() {} +func (*TokenRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{3} +} +func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestSpec.Merge(m, src) +} +func (m *TokenRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestSpec.DiscardUnknown(m) +} -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } +func (*TokenRequestStatus) ProtoMessage() {} +func (*TokenRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{4} +} +func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestStatus.Merge(m, src) +} +func (m *TokenRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestStatus.DiscardUnknown(m) +} -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{5} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReview proto.InternalMessageInfo + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{6} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{7} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{8} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_UserInfo proto.InternalMessageInfo func init() { proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") @@ -106,11 +308,78 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1.UserInfo.ExtraEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptor_2953ea822e7ffe1e) +} + +var fileDescriptor_2953ea822e7ffe1e = []byte{ + // 903 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x20, 0x2e, 0x5e, 0x09, + 0x55, 0xc0, 0xda, 0x9b, 0x08, 0xc1, 0x6a, 0x91, 0x90, 0x6a, 0x1a, 0x41, 0x84, 0x60, 0x57, 0xb3, + 0xdb, 0x82, 0x38, 0x31, 0xb1, 0x5f, 0x53, 0x13, 0x3c, 0x36, 0xf6, 0x38, 0x6c, 0x6e, 0xfb, 0x27, + 0x70, 0x04, 0x89, 0x03, 0x7f, 0x04, 0x12, 0xff, 0x42, 0x8f, 0x2b, 0x4e, 0x3d, 0xa0, 0x88, 0x9a, + 0x2b, 0x47, 0x4e, 0x9c, 0xd0, 0x8c, 0xa7, 0x71, 0x9c, 0xb4, 0x69, 0x4e, 0x7b, 0x8b, 0xdf, 0xfb, + 0xde, 0xf7, 0xde, 0xfb, 0xe6, 0xcb, 0x0c, 0xea, 0x8d, 0x1e, 0xc4, 0xa6, 0x17, 0x58, 0xa3, 0x64, + 0x00, 0x11, 0x03, 0x0e, 0xb1, 0x35, 0x06, 0xe6, 0x06, 0x91, 0xa5, 0x12, 0x34, 0xf4, 0x2c, 0x9a, + 0xf0, 0x53, 0x60, 0xdc, 0x73, 0x28, 0xf7, 0x02, 0x66, 0x8d, 0x3b, 0xd6, 0x10, 0x18, 0x44, 0x94, + 0x83, 0x6b, 0x86, 0x51, 0xc0, 0x03, 0xfc, 0x7a, 0x86, 0x36, 0x69, 0xe8, 0x99, 0x45, 0xb4, 0x39, + 0xee, 0xec, 0xde, 0x1b, 0x7a, 0xfc, 0x34, 0x19, 0x98, 0x4e, 0xe0, 0x5b, 0xc3, 0x60, 0x18, 0x58, + 0xb2, 0x68, 0x90, 0x9c, 0xc8, 0x2f, 0xf9, 0x21, 0x7f, 0x65, 0x64, 0xbb, 0xef, 0xe5, 0xad, 0x7d, + 0xea, 0x9c, 0x7a, 0x0c, 0xa2, 0x89, 0x15, 0x8e, 0x86, 0x22, 0x10, 0x5b, 0x3e, 0x70, 0x7a, 0xc5, + 0x08, 0xbb, 0xd6, 0x75, 0x55, 0x51, 0xc2, 0xb8, 0xe7, 0xc3, 0x52, 0xc1, 0xfb, 0x37, 0x15, 0xc4, + 0xce, 0x29, 0xf8, 0x74, 0xb1, 0xce, 0xf8, 0x43, 0x43, 0xaf, 0xda, 0x41, 0xc2, 0xdc, 0x47, 0x83, + 0x6f, 0xc1, 0xe1, 0x04, 0x4e, 0x20, 0x02, 0xe6, 0x00, 0xde, 0x43, 0xd5, 0x91, 0xc7, 0xdc, 0x96, + 0xb6, 0xa7, 0xed, 0x37, 0xec, 0x5b, 0x67, 0x53, 0xbd, 0x94, 0x4e, 0xf5, 0xea, 0x67, 0x1e, 0x73, + 0x89, 0xcc, 0xe0, 0x2e, 0x42, 0xf4, 0x71, 0xff, 0x18, 0xa2, 0xd8, 0x0b, 0x58, 0xab, 0x2c, 0x71, + 0x58, 0xe1, 0xd0, 0xc1, 0x2c, 0x43, 0xe6, 0x50, 0x82, 0x95, 0x51, 0x1f, 0x5a, 0x95, 0x22, 0xeb, + 0x17, 0xd4, 0x07, 0x22, 0x33, 0xd8, 0x46, 0x95, 0xa4, 0x7f, 0xd8, 0xaa, 0x4a, 0xc0, 0x7d, 0x05, + 0xa8, 0x1c, 0xf5, 0x0f, 0xff, 0x9b, 0xea, 0x6f, 0x5e, 0xb7, 0x24, 0x9f, 0x84, 0x10, 0x9b, 0x47, + 0xfd, 0x43, 0x22, 0x8a, 0x8d, 0x0f, 0x10, 0xea, 0x3d, 0xe3, 0x11, 0x3d, 0xa6, 0xdf, 0x25, 0x80, + 0x75, 0x54, 0xf3, 0x38, 0xf8, 0x71, 0x4b, 0xdb, 0xab, 0xec, 0x37, 0xec, 0x46, 0x3a, 0xd5, 0x6b, + 0x7d, 0x11, 0x20, 0x59, 0xfc, 0x61, 0xfd, 0xa7, 0x5f, 0xf5, 0xd2, 0xf3, 0x3f, 0xf7, 0x4a, 0xc6, + 0x2f, 0x65, 0x74, 0xeb, 0x69, 0x30, 0x02, 0x46, 0xe0, 0xfb, 0x04, 0x62, 0x8e, 0xbf, 0x41, 0x75, + 0x71, 0x44, 0x2e, 0xe5, 0x54, 0x2a, 0xd1, 0xec, 0xde, 0x37, 0x73, 0x77, 0xcc, 0x86, 0x30, 0xc3, + 0xd1, 0x50, 0x04, 0x62, 0x53, 0xa0, 0xcd, 0x71, 0xc7, 0xcc, 0xe4, 0xfc, 0x1c, 0x38, 0xcd, 0x35, + 0xc9, 0x63, 0x64, 0xc6, 0x8a, 0x1f, 0xa3, 0x6a, 0x1c, 0x82, 0x23, 0xf5, 0x6b, 0x76, 0x4d, 0x73, + 0x95, 0xf7, 0xcc, 0xf9, 0xd9, 0x9e, 0x84, 0xe0, 0xe4, 0x0a, 0x8a, 0x2f, 0x22, 0x99, 0xf0, 0x57, + 0x68, 0x23, 0xe6, 0x94, 0x27, 0xb1, 0x54, 0xb9, 0x38, 0xf1, 0x4d, 0x9c, 0xb2, 0xce, 0xde, 0x52, + 0xac, 0x1b, 0xd9, 0x37, 0x51, 0x7c, 0xc6, 0xbf, 0x1a, 0xda, 0x5e, 0x1c, 0x01, 0xbf, 0x83, 0x1a, + 0x34, 0x71, 0x3d, 0x61, 0x9a, 0x4b, 0x89, 0x37, 0xd3, 0xa9, 0xde, 0x38, 0xb8, 0x0c, 0x92, 0x3c, + 0x8f, 0x3f, 0x46, 0x3b, 0xf0, 0x2c, 0xf4, 0x22, 0xd9, 0xfd, 0x09, 0x38, 0x01, 0x73, 0x63, 0x79, + 0xd6, 0x15, 0xfb, 0x4e, 0x3a, 0xd5, 0x77, 0x7a, 0x8b, 0x49, 0xb2, 0x8c, 0xc7, 0x0c, 0x6d, 0x0d, + 0x0a, 0x96, 0x55, 0x8b, 0x76, 0x57, 0x2f, 0x7a, 0x95, 0xcd, 0x6d, 0x9c, 0x4e, 0xf5, 0xad, 0x62, + 0x86, 0x2c, 0xb0, 0x1b, 0xbf, 0x69, 0x08, 0x2f, 0xab, 0x84, 0xef, 0xa2, 0x1a, 0x17, 0x51, 0xf5, + 0x17, 0xd9, 0x54, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x13, 0x74, 0x3b, 0x5f, 0xe0, 0xa9, 0xe7, + 0x43, 0xcc, 0xa9, 0x1f, 0xaa, 0xd3, 0x7e, 0x7b, 0x3d, 0x2f, 0x89, 0x32, 0xfb, 0x35, 0x45, 0x7f, + 0xbb, 0xb7, 0x4c, 0x47, 0xae, 0xea, 0x61, 0xfc, 0x5c, 0x46, 0x4d, 0x35, 0xf6, 0xd8, 0x83, 0x1f, + 0x5e, 0x82, 0x97, 0x1f, 0x15, 0xbc, 0x7c, 0x6f, 0x2d, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x97, + 0x0b, 0x56, 0xb6, 0xd6, 0xa7, 0x5c, 0xed, 0x64, 0x07, 0xbd, 0xb2, 0xd0, 0x7f, 0xbd, 0xe3, 0x2c, + 0x98, 0xbd, 0xbc, 0xda, 0xec, 0xc6, 0x3f, 0x1a, 0xda, 0x59, 0x1a, 0x09, 0x7f, 0x88, 0x36, 0xe7, + 0x26, 0x87, 0xec, 0x86, 0xad, 0xdb, 0x77, 0x54, 0xbf, 0xcd, 0x83, 0xf9, 0x24, 0x29, 0x62, 0xf1, + 0xa7, 0xa8, 0x9a, 0xc4, 0x10, 0x29, 0x85, 0xdf, 0x5a, 0x2d, 0xc7, 0x51, 0x0c, 0x51, 0x9f, 0x9d, + 0x04, 0xb9, 0xb4, 0x22, 0x42, 0x24, 0x43, 0x71, 0x93, 0xea, 0x0d, 0x7f, 0xdb, 0xbb, 0xa8, 0x06, + 0x51, 0x14, 0x44, 0xea, 0xde, 0x9e, 0x69, 0xd3, 0x13, 0x41, 0x92, 0xe5, 0x8c, 0xdf, 0xcb, 0xa8, + 0x7e, 0xd9, 0x12, 0xbf, 0x8b, 0xea, 0xa2, 0x8d, 0xbc, 0xec, 0x33, 0x41, 0xb7, 0x55, 0x91, 0xc4, + 0x88, 0x38, 0x99, 0x21, 0xf0, 0x1b, 0xa8, 0x92, 0x78, 0xae, 0x7a, 0x43, 0x9a, 0x73, 0x97, 0x3e, + 0x11, 0x71, 0x6c, 0xa0, 0x8d, 0x61, 0x14, 0x24, 0xa1, 0xb0, 0x81, 0x18, 0x14, 0x89, 0x13, 0xfd, + 0x44, 0x46, 0x88, 0xca, 0xe0, 0x63, 0x54, 0x03, 0x71, 0xe7, 0xcb, 0x5d, 0x9a, 0xdd, 0xce, 0x7a, + 0xd2, 0x98, 0xf2, 0x9d, 0xe8, 0x31, 0x1e, 0x4d, 0xe6, 0xb6, 0x12, 0x31, 0x92, 0xd1, 0xed, 0x0e, + 0xd4, 0x5b, 0x22, 0x31, 0x78, 0x1b, 0x55, 0x46, 0x30, 0xc9, 0x36, 0x22, 0xe2, 0x27, 0xfe, 0x08, + 0xd5, 0xc6, 0xe2, 0x99, 0x51, 0x47, 0xb2, 0xbf, 0xba, 0x6f, 0xfe, 0x2c, 0x91, 0xac, 0xec, 0x61, + 0xf9, 0x81, 0x66, 0xef, 0x9f, 0x5d, 0xb4, 0x4b, 0x2f, 0x2e, 0xda, 0xa5, 0xf3, 0x8b, 0x76, 0xe9, + 0x79, 0xda, 0xd6, 0xce, 0xd2, 0xb6, 0xf6, 0x22, 0x6d, 0x6b, 0xe7, 0x69, 0x5b, 0xfb, 0x2b, 0x6d, + 0x6b, 0x3f, 0xfe, 0xdd, 0x2e, 0x7d, 0x5d, 0x1e, 0x77, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x8c, + 0x44, 0x87, 0xd0, 0xe2, 0x08, 0x00, 0x00, } + func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -118,33 +387,42 @@ func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { } func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoundObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ + i -= len(m.UID) + copy(dAtA[i:], m.UID) i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -152,32 +430,31 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -185,41 +462,52 @@ func (m *TokenRequest) Marshal() (dAtA []byte, err error) { } func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -227,47 +515,48 @@ func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x20 } if m.BoundObjectRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.BoundObjectRef.Size())) - n4, err := m.BoundObjectRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.BoundObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 + i-- + dAtA[i] = 0x1a } - if m.ExpirationSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - return i, nil + return len(dAtA) - i, nil } func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -275,29 +564,37 @@ func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpirationTimestamp.Size())) - n5, err := m.ExpirationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ExpirationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -305,41 +602,52 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n7, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n8, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -347,36 +655,36 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) if len(m.Audiences) > 0 { - for _, s := range m.Audiences { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -384,52 +692,54 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n9, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -437,77 +747,81 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n10 + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - return i, nil + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *BoundObjectReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -522,6 +836,9 @@ func (m *BoundObjectReference) Size() (n int) { } func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -534,6 +851,9 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -546,6 +866,9 @@ func (m *TokenRequest) Size() (n int) { } func (m *TokenRequestSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Audiences) > 0 { @@ -565,6 +888,9 @@ func (m *TokenRequestSpec) Size() (n int) { } func (m *TokenRequestStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -575,6 +901,9 @@ func (m *TokenRequestStatus) Size() (n int) { } func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -587,6 +916,9 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Token) @@ -601,6 +933,9 @@ func (m *TokenReviewSpec) Size() (n int) { } func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -618,6 +953,9 @@ func (m *TokenReviewStatus) Size() (n int) { } func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Username) @@ -643,14 +981,7 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -673,7 +1004,7 @@ func (this *TokenRequest) String() string { return "nil" } s := strings.Join([]string{`&TokenRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -686,7 +1017,7 @@ func (this *TokenRequestSpec) String() string { } s := strings.Join([]string{`&TokenRequestSpec{`, `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, - `BoundObjectRef:` + strings.Replace(fmt.Sprintf("%v", this.BoundObjectRef), "BoundObjectReference", "BoundObjectReference", 1) + `,`, + `BoundObjectRef:` + strings.Replace(this.BoundObjectRef.String(), "BoundObjectReference", "BoundObjectReference", 1) + `,`, `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, `}`, }, "") @@ -698,7 +1029,7 @@ func (this *TokenRequestStatus) String() string { } s := strings.Join([]string{`&TokenRequestStatus{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `ExpirationTimestamp:` + strings.Replace(strings.Replace(this.ExpirationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `ExpirationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExpirationTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -708,7 +1039,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -785,7 +1116,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -813,7 +1144,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -823,6 +1154,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -842,7 +1176,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -852,6 +1186,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -871,7 +1208,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -881,6 +1218,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -900,7 +1240,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -910,6 +1250,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -924,6 +1267,9 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -951,7 +1297,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -979,7 +1325,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -989,6 +1335,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1003,6 +1352,9 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1030,7 +1382,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1058,7 +1410,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1067,6 +1419,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1088,7 +1443,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1097,6 +1452,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1118,7 +1476,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1127,6 +1485,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1143,6 +1504,9 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1170,7 +1534,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1198,7 +1562,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1208,6 +1572,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1227,7 +1594,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1236,6 +1603,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1260,7 +1630,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -1275,6 +1645,9 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1675,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1330,7 +1703,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1340,6 +1713,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1359,7 +1735,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1368,6 +1744,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1384,6 +1763,9 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1411,7 +1793,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1439,7 +1821,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1448,6 +1830,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1469,7 +1854,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1478,6 +1863,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1499,7 +1887,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1508,6 +1896,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1524,6 +1915,9 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1551,7 +1945,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1579,7 +1973,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1589,6 +1983,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1608,7 +2005,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1618,6 +2015,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1632,6 +2032,9 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1659,7 +2062,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1687,7 +2090,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1707,7 +2110,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1716,6 +2119,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1737,7 +2143,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1747,6 +2153,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1766,7 +2175,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1776,6 +2185,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1790,6 +2202,9 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1817,7 +2232,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1845,7 +2260,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1855,6 +2270,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1874,7 +2292,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1884,6 +2302,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1903,7 +2324,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1913,6 +2334,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1932,7 +2356,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -1941,6 +2365,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1961,7 +2388,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1978,7 +2405,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -1988,6 +2415,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -2004,7 +2434,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2013,7 +2443,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } if postmsgIndex > l { @@ -2050,6 +2480,9 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2065,6 +2498,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -2096,10 +2530,8 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -2116,118 +2548,34 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 900 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0xc0, 0x2e, 0x41, 0x42, - 0x15, 0xb0, 0xf6, 0x26, 0x42, 0xb0, 0x5a, 0x24, 0xa4, 0x9a, 0x46, 0x10, 0x21, 0xd8, 0xd5, 0xec, - 0xb6, 0x20, 0x4e, 0x4c, 0xec, 0xd7, 0xc4, 0x04, 0x8f, 0x8d, 0x3d, 0x0e, 0x9b, 0xdb, 0xfe, 0x09, - 0x1c, 0x41, 0xe2, 0xc0, 0x1f, 0x81, 0xc4, 0xbf, 0xd0, 0xe3, 0x8a, 0xd3, 0x1e, 0x50, 0x44, 0xcd, - 0x95, 0x23, 0x27, 0x4e, 0x68, 0xc6, 0xd3, 0x38, 0x4e, 0xda, 0x34, 0x27, 0x6e, 0x9e, 0xf7, 0xbe, - 0xf7, 0xbd, 0x37, 0xdf, 0x7c, 0x9e, 0x41, 0xbd, 0xf1, 0xfd, 0xd8, 0xf4, 0x02, 0x6b, 0x9c, 0x0c, - 0x20, 0x62, 0xc0, 0x21, 0xb6, 0x26, 0xc0, 0xdc, 0x20, 0xb2, 0x54, 0x82, 0x86, 0x9e, 0x45, 0x13, - 0x3e, 0x02, 0xc6, 0x3d, 0x87, 0x72, 0x2f, 0x60, 0xd6, 0xa4, 0x63, 0x0d, 0x81, 0x41, 0x44, 0x39, - 0xb8, 0x66, 0x18, 0x05, 0x3c, 0xc0, 0xaf, 0x66, 0x68, 0x93, 0x86, 0x9e, 0x59, 0x44, 0x9b, 0x93, - 0xce, 0xfe, 0xdd, 0xa1, 0xc7, 0x47, 0xc9, 0xc0, 0x74, 0x02, 0xdf, 0x1a, 0x06, 0xc3, 0xc0, 0x92, - 0x45, 0x83, 0xe4, 0x4c, 0xae, 0xe4, 0x42, 0x7e, 0x65, 0x64, 0xfb, 0xef, 0xe6, 0xad, 0x7d, 0xea, - 0x8c, 0x3c, 0x06, 0xd1, 0xd4, 0x0a, 0xc7, 0x43, 0x11, 0x88, 0x2d, 0x1f, 0x38, 0xbd, 0x62, 0x84, - 0x7d, 0xeb, 0xba, 0xaa, 0x28, 0x61, 0xdc, 0xf3, 0x61, 0xa5, 0xe0, 0xbd, 0x9b, 0x0a, 0x62, 0x67, - 0x04, 0x3e, 0x5d, 0xae, 0x6b, 0xff, 0xae, 0xa1, 0x97, 0xed, 0x20, 0x61, 0xee, 0xc3, 0xc1, 0x37, - 0xe0, 0x70, 0x02, 0x67, 0x10, 0x01, 0x73, 0x00, 0x1f, 0xa0, 0xea, 0xd8, 0x63, 0x6e, 0x4b, 0x3b, - 0xd0, 0x0e, 0x1b, 0xf6, 0xad, 0xf3, 0x99, 0x51, 0x4a, 0x67, 0x46, 0xf5, 0x53, 0x8f, 0xb9, 0x44, - 0x66, 0x70, 0x17, 0x21, 0xfa, 0xa8, 0x7f, 0x0a, 0x51, 0xec, 0x05, 0xac, 0x55, 0x96, 0x38, 0xac, - 0x70, 0xe8, 0x68, 0x9e, 0x21, 0x0b, 0x28, 0xc1, 0xca, 0xa8, 0x0f, 0xad, 0x4a, 0x91, 0xf5, 0x73, - 0xea, 0x03, 0x91, 0x19, 0x6c, 0xa3, 0x4a, 0xd2, 0x3f, 0x6e, 0x55, 0x25, 0xe0, 0x9e, 0x02, 0x54, - 0x4e, 0xfa, 0xc7, 0xff, 0xce, 0x8c, 0xd7, 0xaf, 0xdb, 0x24, 0x9f, 0x86, 0x10, 0x9b, 0x27, 0xfd, - 0x63, 0x22, 0x8a, 0xdb, 0xef, 0x23, 0xd4, 0x7b, 0xca, 0x23, 0x7a, 0x4a, 0xbf, 0x4d, 0x00, 0x1b, - 0xa8, 0xe6, 0x71, 0xf0, 0xe3, 0x96, 0x76, 0x50, 0x39, 0x6c, 0xd8, 0x8d, 0x74, 0x66, 0xd4, 0xfa, - 0x22, 0x40, 0xb2, 0xf8, 0x83, 0xfa, 0x8f, 0xbf, 0x18, 0xa5, 0x67, 0x7f, 0x1c, 0x94, 0xda, 0x3f, - 0x97, 0xd1, 0xad, 0x27, 0xc1, 0x18, 0x18, 0x81, 0xef, 0x12, 0x88, 0x39, 0xfe, 0x1a, 0xd5, 0xc5, - 0x11, 0xb9, 0x94, 0x53, 0xa9, 0x44, 0xb3, 0x7b, 0xcf, 0xcc, 0xdd, 0x31, 0x1f, 0xc2, 0x0c, 0xc7, - 0x43, 0x11, 0x88, 0x4d, 0x81, 0x36, 0x27, 0x1d, 0x33, 0x93, 0xf3, 0x33, 0xe0, 0x34, 0xd7, 0x24, - 0x8f, 0x91, 0x39, 0x2b, 0x7e, 0x84, 0xaa, 0x71, 0x08, 0x8e, 0xd4, 0xaf, 0xd9, 0x35, 0xcd, 0x75, - 0xde, 0x33, 0x17, 0x67, 0x7b, 0x1c, 0x82, 0x93, 0x2b, 0x28, 0x56, 0x44, 0x32, 0xe1, 0x2f, 0xd1, - 0x56, 0xcc, 0x29, 0x4f, 0x62, 0xa9, 0x72, 0x71, 0xe2, 0x9b, 0x38, 0x65, 0x9d, 0xbd, 0xa3, 0x58, - 0xb7, 0xb2, 0x35, 0x51, 0x7c, 0xed, 0x7f, 0x34, 0xb4, 0xbb, 0x3c, 0x02, 0x7e, 0x1b, 0x35, 0x68, - 0xe2, 0x7a, 0xc2, 0x34, 0x97, 0x12, 0x6f, 0xa7, 0x33, 0xa3, 0x71, 0x74, 0x19, 0x24, 0x79, 0x1e, - 0x33, 0xb4, 0x33, 0x28, 0xb8, 0x4d, 0xcd, 0xd8, 0x5d, 0x3f, 0xe3, 0x55, 0x0e, 0xb5, 0x71, 0x3a, - 0x33, 0x76, 0x8a, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x42, 0x7b, 0xf0, 0x34, 0xf4, 0x22, 0xc9, 0xf4, - 0x18, 0x9c, 0x80, 0xb9, 0xb1, 0xf4, 0x56, 0xc5, 0xbe, 0x93, 0xce, 0x8c, 0xbd, 0xde, 0x72, 0x92, - 0xac, 0xe2, 0xdb, 0xbf, 0x6a, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x40, 0x35, 0x2e, 0xa2, 0xea, 0x17, - 0xd9, 0x56, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x53, 0x74, 0x3b, 0x27, 0x7c, 0xe2, 0xf9, 0x10, - 0x73, 0xea, 0x87, 0xea, 0xb4, 0xdf, 0xda, 0xcc, 0x4b, 0xa2, 0xcc, 0x7e, 0x45, 0xd1, 0xdf, 0xee, - 0xad, 0xd2, 0x91, 0xab, 0x7a, 0xb4, 0x7f, 0x2a, 0xa3, 0xa6, 0x1a, 0x7b, 0xe2, 0xc1, 0xf7, 0xff, - 0x83, 0x97, 0x1f, 0x16, 0xbc, 0x7c, 0x77, 0x23, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x17, 0x4b, - 0x56, 0xb6, 0x36, 0xa7, 0x5c, 0xef, 0x64, 0x07, 0xbd, 0xb4, 0xd4, 0x7f, 0xb3, 0xe3, 0x2c, 0x98, - 0xbd, 0xbc, 0xde, 0xec, 0xed, 0xbf, 0x35, 0xb4, 0xb7, 0x32, 0x12, 0xfe, 0x00, 0x6d, 0x2f, 0x4c, - 0x0e, 0xd9, 0x0d, 0x5b, 0xb7, 0xef, 0xa8, 0x7e, 0xdb, 0x47, 0x8b, 0x49, 0x52, 0xc4, 0xe2, 0x4f, - 0x50, 0x35, 0x89, 0x21, 0x52, 0x0a, 0xbf, 0xb9, 0x5e, 0x8e, 0x93, 0x18, 0xa2, 0x3e, 0x3b, 0x0b, - 0x72, 0x69, 0x45, 0x84, 0x48, 0x06, 0xb1, 0x5d, 0x88, 0xa2, 0x20, 0x52, 0x57, 0xf1, 0x7c, 0xbb, - 0x3d, 0x11, 0x24, 0x59, 0xae, 0xb8, 0xdd, 0xea, 0x0d, 0xdb, 0xfd, 0xad, 0x8c, 0xea, 0x97, 0x2d, - 0xf1, 0x3b, 0xa8, 0x2e, 0xda, 0xc8, 0xcb, 0x3e, 0x13, 0x74, 0x57, 0x75, 0x90, 0x18, 0x11, 0x27, - 0x73, 0x04, 0x7e, 0x0d, 0x55, 0x12, 0xcf, 0x55, 0x6f, 0x48, 0x73, 0xe1, 0xd2, 0x27, 0x22, 0x8e, - 0xdb, 0x68, 0x6b, 0x18, 0x05, 0x49, 0x28, 0x6c, 0x20, 0x66, 0x40, 0xe2, 0x44, 0x3f, 0x96, 0x11, - 0xa2, 0x32, 0xf8, 0x14, 0xd5, 0x40, 0xdc, 0xf9, 0x72, 0xcc, 0x66, 0xb7, 0xb3, 0x99, 0x34, 0xa6, - 0x7c, 0x27, 0x7a, 0x8c, 0x47, 0xd3, 0x05, 0x09, 0x44, 0x8c, 0x64, 0x74, 0xfb, 0x03, 0xf5, 0x96, - 0x48, 0x0c, 0xde, 0x45, 0x95, 0x31, 0x4c, 0xb3, 0x1d, 0x11, 0xf1, 0x89, 0x3f, 0x44, 0xb5, 0x89, - 0x78, 0x66, 0xd4, 0x91, 0x1c, 0xae, 0xef, 0x9b, 0x3f, 0x4b, 0x24, 0x2b, 0x7b, 0x50, 0xbe, 0xaf, - 0xd9, 0x87, 0xe7, 0x17, 0x7a, 0xe9, 0xf9, 0x85, 0x5e, 0x7a, 0x71, 0xa1, 0x97, 0x9e, 0xa5, 0xba, - 0x76, 0x9e, 0xea, 0xda, 0xf3, 0x54, 0xd7, 0x5e, 0xa4, 0xba, 0xf6, 0x67, 0xaa, 0x6b, 0x3f, 0xfc, - 0xa5, 0x97, 0xbe, 0x2a, 0x4f, 0x3a, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x04, 0x81, 0x6f, - 0xe2, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index c48b03691e42..668b720380f3 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -40,7 +40,7 @@ const ( // +genclient // +genclient:nonNamespaced -// +genclient:noVerbs +// +genclient:onlyVerbs=create // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TokenReview attempts to authenticate a token to a known user. diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index 63434030ca1c..435297a8d513 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -23,4 +23,3 @@ reviewers: - krousey - cjcullen - david-mcmahon -- goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index 958273ea7240..e53c3e61fd14 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -32,7 +32,9 @@ import ( const ( // StatusTooManyRequests means the server experienced too many requests within a // given window and that the client must wait to perform the action again. - StatusTooManyRequests = 429 + // DEPRECATED: please use http.StatusTooManyRequests, this will be removed in + // the future version. + StatusTooManyRequests = http.StatusTooManyRequests ) // StatusError is an error intended for consumption by a REST API server; it can also be @@ -68,6 +70,28 @@ func (e *StatusError) DebugError() (string, []interface{}) { return "server response object: %#v", []interface{}{e.ErrStatus} } +// HasStatusCause returns true if the provided error has a details cause +// with the provided type name. +func HasStatusCause(err error, name metav1.CauseType) bool { + _, ok := StatusCause(err, name) + return ok +} + +// StatusCause returns the named cause from the provided error if it exists and +// the error is of the type APIStatus. Otherwise it returns false. +func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) { + apierr, ok := err.(APIStatus) + if !ok || apierr == nil || apierr.Status().Details == nil { + return metav1.StatusCause{}, false + } + for _, cause := range apierr.Status().Details.Causes { + if cause.Type == name { + return cause, true + } + } + return metav1.StatusCause{}, false +} + // UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. type UnexpectedObjectError struct { Object runtime.Object @@ -199,6 +223,7 @@ func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError } // NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. +// DEPRECATED: Please use NewResourceExpired instead. func NewGone(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, @@ -349,7 +374,7 @@ func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { func NewTooManyRequestsError(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: StatusTooManyRequests, + Code: http.StatusTooManyRequests, Reason: metav1.StatusReasonTooManyRequests, Message: fmt.Sprintf("Too many requests: %s", message), }} @@ -394,7 +419,11 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr case http.StatusNotAcceptable: reason = metav1.StatusReasonNotAcceptable // the server message has details about what types are acceptable - message = serverMessage + if len(serverMessage) == 0 || serverMessage == "unknown" { + message = "the server was unable to respond with a content type that the client supports" + } else { + message = serverMessage + } case http.StatusUnsupportedMediaType: reason = metav1.StatusReasonUnsupportedMediaType // the server message has details about what types are acceptable diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index 8454be55ed01..dc7740190ae9 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -11,8 +11,6 @@ reviewers: - janetkuo - tallclair - eparis -- jbeda - xiang90 - mbohlool - david-mcmahon -- goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 9d7835bc23f9..2e09f4face77 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -17,20 +17,15 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto -/* -Package resource is a generated protocol buffer package. +package resource -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +import ( + fmt "fmt" -It has these top-level messages: - Quantity -*/ -package resource + math "math" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -41,21 +36,40 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { + return fileDescriptor_612bba87bd70906c, []int{0} +} +func (m *Quantity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantity.Unmarshal(m, b) +} +func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantity.Marshal(b, m, deterministic) +} +func (m *Quantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantity.Merge(m, src) +} +func (m *Quantity) XXX_Size() int { + return xxx_messageInfo_Quantity.Size(m) +} +func (m *Quantity) XXX_DiscardUnknown() { + xxx_messageInfo_Quantity.DiscardUnknown(m) +} -func (m *Quantity) Reset() { *m = Quantity{} } -func (*Quantity) ProtoMessage() {} -func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +var xxx_messageInfo_Quantity proto.InternalMessageInfo func init() { proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_612bba87bd70906c = []byte{ // 237 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index acc904445228..18a6c7cd6812 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -26,7 +26,7 @@ option go_package = "resource"; // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. +// in addition to String() and AsInt64() accessors. // // The serialization format is: // diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go index 72d3880c0281..8ffcb9f09ac5 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -37,12 +37,8 @@ var ( big1024 = big.NewInt(1024) // Commonly needed inf.Dec values-- treat as read only! - decZero = inf.NewDec(0, 0) - decOne = inf.NewDec(1, 0) - decMinusOne = inf.NewDec(-1, 0) - decThousand = inf.NewDec(1000, 0) - dec1024 = inf.NewDec(1024, 0) - decMinus1024 = inf.NewDec(-1024, 0) + decZero = inf.NewDec(0, 0) + decOne = inf.NewDec(1, 0) // Largest (in magnitude) number allowed. maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64 @@ -194,9 +190,9 @@ func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { } if fraction { if base > 0 { - value += 1 + value++ } else { - value += -1 + value-- } } return value, !fraction diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 54fda58064d0..d95e03aa92da 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -29,7 +29,7 @@ import ( // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. +// in addition to String() and AsInt64() accessors. // // The serialization format is: // @@ -584,6 +584,12 @@ func (q *Quantity) Neg() { q.d.Dec.Neg(q.d.Dec) } +// Equal checks equality of two Quantities. This is useful for testing with +// cmp.Equal. +func (q Quantity) Equal(v Quantity) bool { + return q.Cmp(v) == 0 +} + // int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation // of most Quantity values. const int64QuantityExpectedBytes = 18 @@ -628,6 +634,11 @@ func (q Quantity) MarshalJSON() ([]byte, error) { return result, nil } +// ToUnstructured implements the value.UnstructuredConverter interface. +func (q Quantity) ToUnstructured() interface{} { + return q.String() +} + // UnmarshalJSON implements the json.Unmarshaller interface. // TODO: Remove support for leading/trailing whitespace func (q *Quantity) UnmarshalJSON(value []byte) error { @@ -691,7 +702,9 @@ func (q *Quantity) MilliValue() int64 { return q.ScaledValue(Milli) } -// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. +// ScaledValue returns the value of ceil(q / 10^scale). +// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000. +// This could overflow an int64. // To detect overflow, call Value() first and verify the expected magnitude. func (q *Quantity) ScaledValue(scale Scale) int64 { if q.d.Dec == nil { @@ -718,21 +731,3 @@ func (q *Quantity) SetScaled(value int64, scale Scale) { q.d.Dec = nil q.i = int64Amount{value: value, scale: scale} } - -// Copy is a convenience function that makes a deep copy for you. Non-deep -// copies of quantities share pointers and you will regret that. -func (q *Quantity) Copy() *Quantity { - if q.d.Dec == nil { - return &Quantity{ - s: q.s, - i: q.i, - Format: q.Format, - } - } - tmp := &inf.Dec{} - return &Quantity{ - s: q.s, - d: infDecAmount{tmp.Set(q.d.Dec)}, - Format: q.Format, - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go index 74dfb4e4b7c9..f89ca163cd39 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -19,6 +19,7 @@ package resource import ( "fmt" "io" + "math/bits" "github.com/gogo/protobuf/proto" ) @@ -28,7 +29,7 @@ var _ proto.Sizer = &Quantity{} func (m *Quantity) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) - n, err := m.MarshalTo(data) + n, err := m.MarshalToSizedBuffer(data[:size]) if err != nil { return nil, err } @@ -38,30 +39,40 @@ func (m *Quantity) Marshal() (data []byte, err error) { // MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct // with a single string field. func (m *Quantity) MarshalTo(data []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(data[:size]) +} + +// MarshalToSizedBuffer is a customized version of the generated +// Protobuf unmarshaler for a struct with a single string field. +func (m *Quantity) MarshalToSizedBuffer(data []byte) (int, error) { + i := len(data) _ = i var l int _ = l - data[i] = 0xa - i++ // BEGIN CUSTOM MARSHAL out := m.String() + i -= len(out) + copy(data[i:], out) i = encodeVarintGenerated(data, i, uint64(len(out))) - i += copy(data[i:], out) // END CUSTOM MARSHAL + i-- + data[i] = 0xa - return i, nil + return len(data) - i, nil } func encodeVarintGenerated(data []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } data[offset] = uint8(v) - return offset + 1 + return base } func (m *Quantity) Size() (n int) { @@ -77,14 +88,7 @@ func (m *Quantity) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (bits.Len64(x|1) + 6) / 7 } // Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 44929b1c002f..77cfb0c1aa36 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -30,4 +30,3 @@ reviewers: - mqliang - kevin-wangzefeng - jianhuiz -- feihujiang diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 042cd5b9c558..15b45ffa84b8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -22,7 +22,7 @@ import ( // IsControlledBy checks if the object has a controllerRef set to the given owner func IsControlledBy(obj Object, owner Object) bool { - ref := GetControllerOf(obj) + ref := GetControllerOfNoCopy(obj) if ref == nil { return false } @@ -31,9 +31,20 @@ func IsControlledBy(obj Object, owner Object) bool { // GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller func GetControllerOf(controllee Object) *OwnerReference { - for _, ref := range controllee.GetOwnerReferences() { - if ref.Controller != nil && *ref.Controller { - return &ref + ref := GetControllerOfNoCopy(controllee) + if ref == nil { + return nil + } + cp := *ref + return &cp +} + +// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +func GetControllerOfNoCopy(controllee Object) *OwnerReference { + refs := controllee.GetOwnerReferences() + for i := range refs { + if refs[i].Controller != nil && *refs[i].Controller { + return &refs[i] } } return nil diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index 5c36f82c1277..b937398cd347 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -18,6 +18,7 @@ package v1 import ( "fmt" + "net/url" "strconv" "strings" @@ -25,61 +26,10 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" ) -func AddConversionFuncs(scheme *runtime.Scheme) error { - return scheme.AddConversionFuncs( - Convert_v1_TypeMeta_To_v1_TypeMeta, - - Convert_v1_ListMeta_To_v1_ListMeta, - - Convert_intstr_IntOrString_To_intstr_IntOrString, - - Convert_Pointer_v1_Duration_To_v1_Duration, - Convert_v1_Duration_To_Pointer_v1_Duration, - - Convert_Slice_string_To_v1_Time, - - Convert_v1_Time_To_v1_Time, - Convert_v1_MicroTime_To_v1_MicroTime, - - Convert_resource_Quantity_To_resource_Quantity, - - Convert_string_To_labels_Selector, - Convert_labels_Selector_To_string, - - Convert_string_To_fields_Selector, - Convert_fields_Selector_To_string, - - Convert_Pointer_bool_To_bool, - Convert_bool_To_Pointer_bool, - - Convert_Pointer_string_To_string, - Convert_string_To_Pointer_string, - - Convert_Pointer_int64_To_int, - Convert_int_To_Pointer_int64, - - Convert_Pointer_int32_To_int32, - Convert_int32_To_Pointer_int32, - - Convert_Pointer_int64_To_int64, - Convert_int64_To_Pointer_int64, - - Convert_Pointer_float64_To_float64, - Convert_float64_To_Pointer_float64, - - Convert_Map_string_To_string_To_v1_LabelSelector, - Convert_v1_LabelSelector_To_Map_string_To_string, - - Convert_Slice_string_To_Slice_int32, - - Convert_Slice_string_To_v1_DeletionPropagation, - ) -} - func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error { if *in == nil { *out = 0 @@ -192,12 +142,33 @@ func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) e return nil } +// +k8s:conversion-fn=copy-only +func Convert_v1_DeleteOptions_To_v1_DeleteOptions(in, out *DeleteOptions, s conversion.Scope) error { + *out = *in + return nil +} + // +k8s:conversion-fn=copy-only func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { *out = *in return nil } +func Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(in **intstr.IntOrString, out *intstr.IntOrString, s conversion.Scope) error { + if *in == nil { + *out = intstr.IntOrString{} // zero value + return nil + } + *out = **in // copy + return nil +} + +func Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(in *intstr.IntOrString, out **intstr.IntOrString, s conversion.Scope) error { + temp := *in // copy + *out = &temp + return nil +} + // +k8s:conversion-fn=copy-only func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error { // Cannot deep copy these, because time.Time has unexported fields. @@ -228,14 +199,30 @@ func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s } // Convert_Slice_string_To_v1_Time allows converting a URL query parameter value -func Convert_Slice_string_To_v1_Time(input *[]string, out *Time, s conversion.Scope) error { +func Convert_Slice_string_To_v1_Time(in *[]string, out *Time, s conversion.Scope) error { str := "" - if len(*input) > 0 { - str = (*input)[0] + if len(*in) > 0 { + str = (*in)[0] } return out.UnmarshalQueryParameter(str) } +func Convert_Slice_string_To_Pointer_v1_Time(in *[]string, out **Time, s conversion.Scope) error { + if in == nil { + return nil + } + str := "" + if len(*in) > 0 { + str = (*in)[0] + } + temp := Time{} + if err := temp.UnmarshalQueryParameter(str); err != nil { + return err + } + *out = &temp + return nil +} + func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { selector, err := labels.Parse(*in) if err != nil { @@ -308,12 +295,53 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio return nil } -// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy -func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error { - if len(*input) > 0 { - *out = DeletionPropagation((*input)[0]) +// Convert_Slice_string_To_Pointer_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy +func Convert_Slice_string_To_Pointer_v1_DeletionPropagation(in *[]string, out **DeletionPropagation, s conversion.Scope) error { + var str string + if len(*in) > 0 { + str = (*in)[0] } else { - *out = "" + str = "" + } + temp := DeletionPropagation(str) + *out = &temp + return nil +} + +// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value +func Convert_Slice_string_To_v1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*in) > 0 { + *out = IncludeObjectPolicy((*in)[0]) + } + return nil +} + +// Convert_url_Values_To_v1_DeleteOptions allows converting a URL to DeleteOptions. +func Convert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { + if err := autoConvert_url_Values_To_v1_DeleteOptions(in, out, s); err != nil { + return err + } + + uid := types.UID("") + if values, ok := (*in)["uid"]; ok && len(values) > 0 { + uid = types.UID(values[0]) + } + + resourceVersion := "" + if values, ok := (*in)["resourceVersion"]; ok && len(values) > 0 { + resourceVersion = values[0] + } + + if len(uid) > 0 || len(resourceVersion) > 0 { + if out.Preconditions == nil { + out.Preconditions = &Preconditions{} + } + if len(uid) > 0 { + out.Preconditions.UID = &uid + } + if len(resourceVersion) > 0 { + out.Preconditions.ResourceVersion = &resourceVersion + } } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go new file mode 100644 index 000000000000..8751d0524f46 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func (in *TableRow) DeepCopy() *TableRow { + if in == nil { + return nil + } + + out := new(TableRow) + + if in.Cells != nil { + out.Cells = make([]interface{}, len(in.Cells)) + for i := range in.Cells { + out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) + } + } + + if in.Conditions != nil { + out.Conditions = make([]TableRowCondition, len(in.Conditions)) + for i := range in.Conditions { + in.Conditions[i].DeepCopyInto(&out.Conditions[i]) + } + } + + in.Object.DeepCopyInto(&out.Object) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index dbaa87c879fc..7736753d6682 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:conversion-gen=false // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go index babe8a8b53b7..a22b07878f62 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -49,6 +49,11 @@ func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(d.Duration.String()) } +// ToUnstructured implements the value.UnstructuredConverter interface. +func (d Duration) ToUnstructured() interface{} { + return d.Duration.String() +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 69e650993215..3288c564918c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -17,73 +17,25 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto - - It has these top-level messages: - APIGroup - APIGroupList - APIResource - APIResourceList - APIVersions - CreateOptions - DeleteOptions - Duration - ExportOptions - Fields - GetOptions - GroupKind - GroupResource - GroupVersion - GroupVersionForDiscovery - GroupVersionKind - GroupVersionResource - Initializer - Initializers - LabelSelector - LabelSelectorRequirement - List - ListMeta - ListOptions - ManagedFieldsEntry - MicroTime - ObjectMeta - OwnerReference - Patch - PatchOptions - Preconditions - RootPaths - ServerAddressByClientCIDR - Status - StatusCause - StatusDetails - Time - Timestamp - TypeMeta - UpdateOptions - Verbs - WatchEvent -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + io "io" -import time "time" -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" -import strings "strings" -import reflect "reflect" - -import io "io" + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -95,181 +47,1201 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{0} +} +func (m *APIGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroup.Merge(m, src) +} +func (m *APIGroup) XXX_Size() int { + return m.Size() +} +func (m *APIGroup) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroup proto.InternalMessageInfo + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{1} +} +func (m *APIGroupList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupList.Merge(m, src) +} +func (m *APIGroupList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupList proto.InternalMessageInfo + +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{2} +} +func (m *APIResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResource.Merge(m, src) +} +func (m *APIResource) XXX_Size() int { + return m.Size() +} +func (m *APIResource) XXX_DiscardUnknown() { + xxx_messageInfo_APIResource.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResource proto.InternalMessageInfo + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{3} +} +func (m *APIResourceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceList.Merge(m, src) +} +func (m *APIResourceList) XXX_Size() int { + return m.Size() +} +func (m *APIResourceList) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResourceList proto.InternalMessageInfo + +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{4} +} +func (m *APIVersions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersions.Merge(m, src) +} +func (m *APIVersions) XXX_Size() int { + return m.Size() +} +func (m *APIVersions) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersions.DiscardUnknown(m) +} + +var xxx_messageInfo_APIVersions proto.InternalMessageInfo + +func (m *CreateOptions) Reset() { *m = CreateOptions{} } +func (*CreateOptions) ProtoMessage() {} +func (*CreateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{5} +} +func (m *CreateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CreateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateOptions.Merge(m, src) +} +func (m *CreateOptions) XXX_Size() int { + return m.Size() +} +func (m *CreateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CreateOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateOptions proto.InternalMessageInfo -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (*APIGroup) ProtoMessage() {} -func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{6} +} +func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeleteOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteOptions.Merge(m, src) +} +func (m *DeleteOptions) XXX_Size() int { + return m.Size() +} +func (m *DeleteOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{7} +} +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (*APIGroupList) ProtoMessage() {} -func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_Duration proto.InternalMessageInfo -func (m *APIResource) Reset() { *m = APIResource{} } -func (*APIResource) ProtoMessage() {} -func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{8} +} +func (m *ExportOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExportOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportOptions.Merge(m, src) +} +func (m *ExportOptions) XXX_Size() int { + return m.Size() +} +func (m *ExportOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExportOptions.DiscardUnknown(m) +} -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (*APIResourceList) ProtoMessage() {} -func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ExportOptions proto.InternalMessageInfo -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (*APIVersions) ProtoMessage() {} -func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *FieldsV1) Reset() { *m = FieldsV1{} } +func (*FieldsV1) ProtoMessage() {} +func (*FieldsV1) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{9} +} +func (m *FieldsV1) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldsV1) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldsV1.Merge(m, src) +} +func (m *FieldsV1) XXX_Size() int { + return m.Size() +} +func (m *FieldsV1) XXX_DiscardUnknown() { + xxx_messageInfo_FieldsV1.DiscardUnknown(m) +} -func (m *CreateOptions) Reset() { *m = CreateOptions{} } -func (*CreateOptions) ProtoMessage() {} -func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo -func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } -func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{10} +} +func (m *GetOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GetOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOptions.Merge(m, src) +} +func (m *GetOptions) XXX_Size() int { + return m.Size() +} +func (m *GetOptions) XXX_DiscardUnknown() { + xxx_messageInfo_GetOptions.DiscardUnknown(m) +} -func (m *Duration) Reset() { *m = Duration{} } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +var xxx_messageInfo_GetOptions proto.InternalMessageInfo -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{11} +} +func (m *GroupKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupKind.Merge(m, src) +} +func (m *GroupKind) XXX_Size() int { + return m.Size() +} +func (m *GroupKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupKind.DiscardUnknown(m) +} -func (m *Fields) Reset() { *m = Fields{} } -func (*Fields) ProtoMessage() {} -func (*Fields) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +var xxx_messageInfo_GroupKind proto.InternalMessageInfo -func (m *GetOptions) Reset() { *m = GetOptions{} } -func (*GetOptions) ProtoMessage() {} -func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{12} +} +func (m *GroupResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResource.Merge(m, src) +} +func (m *GroupResource) XXX_Size() int { + return m.Size() +} +func (m *GroupResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResource.DiscardUnknown(m) +} -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +var xxx_messageInfo_GroupResource proto.InternalMessageInfo -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{13} +} +func (m *GroupVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersion.Merge(m, src) +} +func (m *GroupVersion) XXX_Size() int { + return m.Size() +} +func (m *GroupVersion) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersion.DiscardUnknown(m) +} -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +var xxx_messageInfo_GroupVersion proto.InternalMessageInfo func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} + return fileDescriptor_cf52fa777ced5367, []int{14} +} +func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src) +} +func (m *GroupVersionForDiscovery) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo + +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{15} +} +func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionKind.Merge(m, src) +} +func (m *GroupVersionKind) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionKind.DiscardUnknown(m) } -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{16} +} +func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionResource.Merge(m, src) +} +func (m *GroupVersionResource) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionResource.DiscardUnknown(m) +} -func (m *Initializer) Reset() { *m = Initializer{} } -func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo -func (m *Initializers) Reset() { *m = Initializers{} } -func (*Initializers) ProtoMessage() {} -func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{17} +} +func (m *LabelSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelector.Merge(m, src) +} +func (m *LabelSelector) XXX_Size() int { + return m.Size() +} +func (m *LabelSelector) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelector.DiscardUnknown(m) +} -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +var xxx_messageInfo_LabelSelector proto.InternalMessageInfo func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_cf52fa777ced5367, []int{18} +} +func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorRequirement.Merge(m, src) +} +func (m *LabelSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{19} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} + +var xxx_messageInfo_List proto.InternalMessageInfo + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{20} +} +func (m *ListMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMeta.Merge(m, src) +} +func (m *ListMeta) XXX_Size() int { + return m.Size() +} +func (m *ListMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ListMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMeta proto.InternalMessageInfo + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{21} +} +func (m *ListOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOptions.Merge(m, src) +} +func (m *ListOptions) XXX_Size() int { + return m.Size() +} +func (m *ListOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ListOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ListOptions proto.InternalMessageInfo + +func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } +func (*ManagedFieldsEntry) ProtoMessage() {} +func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{22} +} +func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedFieldsEntry.Merge(m, src) +} +func (m *ManagedFieldsEntry) XXX_Size() int { + return m.Size() +} +func (m *ManagedFieldsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo + +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{23} +} +func (m *MicroTime) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MicroTime.Unmarshal(m, b) +} +func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic) +} +func (m *MicroTime) XXX_Merge(src proto.Message) { + xxx_messageInfo_MicroTime.Merge(m, src) +} +func (m *MicroTime) XXX_Size() int { + return xxx_messageInfo_MicroTime.Size(m) +} +func (m *MicroTime) XXX_DiscardUnknown() { + xxx_messageInfo_MicroTime.DiscardUnknown(m) +} + +var xxx_messageInfo_MicroTime proto.InternalMessageInfo + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{24} +} +func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMeta.Merge(m, src) +} +func (m *ObjectMeta) XXX_Size() int { + return m.Size() +} +func (m *ObjectMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMeta.DiscardUnknown(m) } -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{25} +} +func (m *OwnerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OwnerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_OwnerReference.Merge(m, src) +} +func (m *OwnerReference) XXX_Size() int { + return m.Size() +} +func (m *OwnerReference) XXX_DiscardUnknown() { + xxx_messageInfo_OwnerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_OwnerReference proto.InternalMessageInfo + +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{26} +} +func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadata.Merge(m, src) +} +func (m *PartialObjectMetadata) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{27} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo -func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } -func (*ManagedFieldsEntry) ProtoMessage() {} -func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{28} +} +func (m *Patch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Patch) XXX_Merge(src proto.Message) { + xxx_messageInfo_Patch.Merge(m, src) +} +func (m *Patch) XXX_Size() int { + return m.Size() +} +func (m *Patch) XXX_DiscardUnknown() { + xxx_messageInfo_Patch.DiscardUnknown(m) +} -func (m *MicroTime) Reset() { *m = MicroTime{} } -func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +var xxx_messageInfo_Patch proto.InternalMessageInfo -func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } -func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (m *PatchOptions) Reset() { *m = PatchOptions{} } +func (*PatchOptions) ProtoMessage() {} +func (*PatchOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{29} +} +func (m *PatchOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PatchOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PatchOptions.Merge(m, src) +} +func (m *PatchOptions) XXX_Size() int { + return m.Size() +} +func (m *PatchOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PatchOptions.DiscardUnknown(m) +} -func (m *OwnerReference) Reset() { *m = OwnerReference{} } -func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +var xxx_messageInfo_PatchOptions proto.InternalMessageInfo -func (m *Patch) Reset() { *m = Patch{} } -func (*Patch) ProtoMessage() {} -func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{30} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) +} -func (m *PatchOptions) Reset() { *m = PatchOptions{} } -func (*PatchOptions) ProtoMessage() {} -func (*PatchOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +var xxx_messageInfo_Preconditions proto.InternalMessageInfo -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{31} +} +func (m *RootPaths) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RootPaths) XXX_Merge(src proto.Message) { + xxx_messageInfo_RootPaths.Merge(m, src) +} +func (m *RootPaths) XXX_Size() int { + return m.Size() +} +func (m *RootPaths) XXX_DiscardUnknown() { + xxx_messageInfo_RootPaths.DiscardUnknown(m) +} -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +var xxx_messageInfo_RootPaths proto.InternalMessageInfo func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{32} + return fileDescriptor_cf52fa777ced5367, []int{32} +} +func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src) +} +func (m *ServerAddressByClientCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{33} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{34} +} +func (m *StatusCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusCause.Merge(m, src) +} +func (m *StatusCause) XXX_Size() int { + return m.Size() +} +func (m *StatusCause) XXX_DiscardUnknown() { + xxx_messageInfo_StatusCause.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusCause proto.InternalMessageInfo + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{35} +} +func (m *StatusDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusDetails.Merge(m, src) +} +func (m *StatusDetails) XXX_Size() int { + return m.Size() +} +func (m *StatusDetails) XXX_DiscardUnknown() { + xxx_messageInfo_StatusDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusDetails proto.InternalMessageInfo + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{36} +} +func (m *TableOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TableOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableOptions.Merge(m, src) +} +func (m *TableOptions) XXX_Size() int { + return m.Size() +} +func (m *TableOptions) XXX_DiscardUnknown() { + xxx_messageInfo_TableOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_TableOptions proto.InternalMessageInfo + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{37} +} +func (m *Time) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Time.Unmarshal(m, b) +} +func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) +} +func (m *Time) XXX_Merge(src proto.Message) { + xxx_messageInfo_Time.Merge(m, src) +} +func (m *Time) XXX_Size() int { + return xxx_messageInfo_Time.Size(m) +} +func (m *Time) XXX_DiscardUnknown() { + xxx_messageInfo_Time.DiscardUnknown(m) +} + +var xxx_messageInfo_Time proto.InternalMessageInfo + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{38} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) } -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +var xxx_messageInfo_Timestamp proto.InternalMessageInfo -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{39} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo -func (m *Time) Reset() { *m = Time{} } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } +func (*UpdateOptions) ProtoMessage() {} +func (*UpdateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{40} +} +func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UpdateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateOptions.Merge(m, src) +} +func (m *UpdateOptions) XXX_Size() int { + return m.Size() +} +func (m *UpdateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateOptions.DiscardUnknown(m) +} -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{41} +} +func (m *Verbs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Verbs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Verbs.Merge(m, src) +} +func (m *Verbs) XXX_Size() int { + return m.Size() +} +func (m *Verbs) XXX_DiscardUnknown() { + xxx_messageInfo_Verbs.DiscardUnknown(m) +} -func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } -func (*UpdateOptions) ProtoMessage() {} -func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +var xxx_messageInfo_Verbs proto.InternalMessageInfo -func (m *Verbs) Reset() { *m = Verbs{} } -func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{42} +} +func (m *WatchEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WatchEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchEvent.Merge(m, src) +} +func (m *WatchEvent) XXX_Size() int { + return m.Size() +} +func (m *WatchEvent) XXX_DiscardUnknown() { + xxx_messageInfo_WatchEvent.DiscardUnknown(m) +} -func (m *WatchEvent) Reset() { *m = WatchEvent{} } -func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +var xxx_messageInfo_WatchEvent proto.InternalMessageInfo func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -281,7 +1253,7 @@ func init() { proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") - proto.RegisterType((*Fields)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Fields") + proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") @@ -289,9 +1261,8 @@ func init() { proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") - proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") - proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry") proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") @@ -299,7 +1270,11 @@ func init() { proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry") proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata") + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList") proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") @@ -308,6 +1283,7 @@ func init() { proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions") proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") @@ -315,10 +1291,189 @@ func init() { proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) +} + +var fileDescriptor_cf52fa777ced5367 = []byte{ + // 2713 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x1b, 0x59, + 0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x00, 0x71, 0x76, 0x16, 0xad, + 0x52, 0xe8, 0x3a, 0x9b, 0x02, 0xab, 0xd2, 0x65, 0x0b, 0x71, 0x9c, 0x74, 0xc3, 0x36, 0x4d, 0xf4, + 0xd2, 0x16, 0x28, 0x15, 0xea, 0x64, 0xe6, 0xc5, 0x19, 0x32, 0x9e, 0xf1, 0xbe, 0x19, 0x27, 0x35, + 0x1c, 0xd8, 0x03, 0x08, 0x90, 0x60, 0xd5, 0x23, 0xe2, 0x80, 0xb6, 0x82, 0xbf, 0x80, 0x13, 0x7f, + 0x00, 0x12, 0xbd, 0x20, 0xad, 0xc4, 0x65, 0x25, 0x90, 0xb5, 0x0d, 0x07, 0x8e, 0x88, 0x6b, 0x4e, + 0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xf6, 0xe6, 0xf9, 0x7d, 0xff, 0xde, 0xfb, + 0xbd, 0xdf, 0x97, 0x61, 0xeb, 0xf0, 0x9a, 0x5f, 0xb3, 0xbd, 0xe5, 0xc3, 0xce, 0x1e, 0xa1, 0x2e, + 0x09, 0x88, 0xbf, 0x7c, 0x44, 0x5c, 0xcb, 0xa3, 0xcb, 0x12, 0x61, 0xb4, 0xed, 0x96, 0x61, 0x1e, + 0xd8, 0x2e, 0xa1, 0xdd, 0xe5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0xe5, 0x16, 0x09, 0x8c, 0xe5, 0xa3, + 0x95, 0xe5, 0x26, 0x71, 0x09, 0x35, 0x02, 0x62, 0xd5, 0xda, 0xd4, 0x0b, 0x3c, 0xf4, 0x45, 0xc1, + 0x55, 0x8b, 0x73, 0xd5, 0xda, 0x87, 0x4d, 0x06, 0xf0, 0x6b, 0x8c, 0xab, 0x76, 0xb4, 0x32, 0xff, + 0x6a, 0xd3, 0x0e, 0x0e, 0x3a, 0x7b, 0x35, 0xd3, 0x6b, 0x2d, 0x37, 0xbd, 0xa6, 0xb7, 0xcc, 0x99, + 0xf7, 0x3a, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x42, 0xe8, 0xfc, 0x50, 0x53, 0x68, 0xc7, 0x0d, + 0xec, 0x16, 0xe9, 0xb7, 0x62, 0xfe, 0xf5, 0xf3, 0x18, 0x7c, 0xf3, 0x80, 0xb4, 0x8c, 0x7e, 0x3e, + 0xfd, 0x2f, 0x59, 0x28, 0xac, 0xee, 0x6c, 0xde, 0xa4, 0x5e, 0xa7, 0x8d, 0x16, 0x61, 0xdc, 0x35, + 0x5a, 0xa4, 0xa2, 0x2d, 0x6a, 0x4b, 0xc5, 0xfa, 0xe4, 0xd3, 0x5e, 0x75, 0xec, 0xa4, 0x57, 0x1d, + 0xbf, 0x6d, 0xb4, 0x08, 0xe6, 0x18, 0xe4, 0x40, 0xe1, 0x88, 0x50, 0xdf, 0xf6, 0x5c, 0xbf, 0x92, + 0x59, 0xcc, 0x2e, 0x95, 0xae, 0xde, 0xa8, 0xa5, 0xf1, 0xbf, 0xc6, 0x15, 0xdc, 0x13, 0xac, 0x1b, + 0x1e, 0x6d, 0xd8, 0xbe, 0xe9, 0x1d, 0x11, 0xda, 0xad, 0xcf, 0x48, 0x2d, 0x05, 0x89, 0xf4, 0x71, + 0xa8, 0x01, 0xfd, 0x54, 0x83, 0x99, 0x36, 0x25, 0xfb, 0x84, 0x52, 0x62, 0x49, 0x7c, 0x25, 0xbb, + 0xa8, 0x7d, 0x0c, 0x6a, 0x2b, 0x52, 0xed, 0xcc, 0x4e, 0x9f, 0x7c, 0x3c, 0xa0, 0x11, 0xfd, 0x5e, + 0x83, 0x79, 0x9f, 0xd0, 0x23, 0x42, 0x57, 0x2d, 0x8b, 0x12, 0xdf, 0xaf, 0x77, 0xd7, 0x1c, 0x9b, + 0xb8, 0xc1, 0xda, 0x66, 0x03, 0xfb, 0x95, 0x71, 0x7e, 0x0e, 0xdf, 0x4c, 0x67, 0xd0, 0xee, 0x30, + 0x39, 0x75, 0x5d, 0x5a, 0x34, 0x3f, 0x94, 0xc4, 0xc7, 0xcf, 0x31, 0x43, 0xdf, 0x87, 0x49, 0x75, + 0x91, 0xb7, 0x6c, 0x3f, 0x40, 0xf7, 0x20, 0xdf, 0x64, 0x1f, 0x7e, 0x45, 0xe3, 0x06, 0xd6, 0xd2, + 0x19, 0xa8, 0x64, 0xd4, 0xa7, 0xa4, 0x3d, 0x79, 0xfe, 0xe9, 0x63, 0x29, 0x4d, 0xff, 0xe5, 0x38, + 0x94, 0x56, 0x77, 0x36, 0x31, 0xf1, 0xbd, 0x0e, 0x35, 0x49, 0x8a, 0xa0, 0xb9, 0x06, 0x93, 0xbe, + 0xed, 0x36, 0x3b, 0x8e, 0x41, 0x19, 0xb4, 0x92, 0xe7, 0x94, 0x17, 0x24, 0xe5, 0xe4, 0x6e, 0x0c, + 0x87, 0x13, 0x94, 0xe8, 0x2a, 0x00, 0x93, 0xe0, 0xb7, 0x0d, 0x93, 0x58, 0x95, 0xcc, 0xa2, 0xb6, + 0x54, 0xa8, 0x23, 0xc9, 0x07, 0xb7, 0x43, 0x0c, 0x8e, 0x51, 0xa1, 0x97, 0x21, 0xc7, 0x2d, 0xad, + 0x14, 0xb8, 0x9a, 0xb2, 0x24, 0xcf, 0x71, 0x37, 0xb0, 0xc0, 0xa1, 0xcb, 0x30, 0x21, 0xa3, 0xac, + 0x52, 0xe4, 0x64, 0xd3, 0x92, 0x6c, 0x42, 0x85, 0x81, 0xc2, 0x33, 0xff, 0x0e, 0x6d, 0xd7, 0xe2, + 0x71, 0x17, 0xf3, 0xef, 0x6d, 0xdb, 0xb5, 0x30, 0xc7, 0xa0, 0x5b, 0x90, 0x3b, 0x22, 0x74, 0x8f, + 0x45, 0x02, 0x0b, 0xcd, 0x2f, 0xa7, 0x3b, 0xe8, 0x7b, 0x8c, 0xa5, 0x5e, 0x64, 0xa6, 0xf1, 0x9f, + 0x58, 0x08, 0x41, 0x35, 0x00, 0xff, 0xc0, 0xa3, 0x01, 0x77, 0xaf, 0x92, 0x5b, 0xcc, 0x2e, 0x15, + 0xeb, 0x53, 0xcc, 0xdf, 0xdd, 0x10, 0x8a, 0x63, 0x14, 0x8c, 0xde, 0x34, 0x02, 0xd2, 0xf4, 0xa8, + 0x4d, 0xfc, 0xca, 0x44, 0x44, 0xbf, 0x16, 0x42, 0x71, 0x8c, 0x02, 0x7d, 0x1b, 0x90, 0x1f, 0x78, + 0xd4, 0x68, 0x12, 0xe9, 0xea, 0x5b, 0x86, 0x7f, 0x50, 0x01, 0xee, 0xdd, 0xbc, 0xf4, 0x0e, 0xed, + 0x0e, 0x50, 0xe0, 0x33, 0xb8, 0xf4, 0x3f, 0x6a, 0x30, 0x1d, 0x8b, 0x05, 0x1e, 0x77, 0xd7, 0x60, + 0xb2, 0x19, 0x7b, 0x75, 0x32, 0x2e, 0xc2, 0xdb, 0x8e, 0xbf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, + 0x54, 0x4a, 0x52, 0xd9, 0x65, 0x25, 0x75, 0xd0, 0x2a, 0x1b, 0x22, 0x4d, 0x31, 0xa0, 0x8f, 0x23, + 0xc9, 0xfa, 0xbf, 0x34, 0x1e, 0xc0, 0x2a, 0xdf, 0xa0, 0xa5, 0x58, 0x4e, 0xd3, 0xf8, 0xf1, 0x4d, + 0x0e, 0xc9, 0x47, 0xe7, 0x24, 0x82, 0xcc, 0xa7, 0x22, 0x11, 0x5c, 0x2f, 0xfc, 0xe6, 0xfd, 0xea, + 0xd8, 0xbb, 0xff, 0x58, 0x1c, 0xd3, 0x5b, 0x50, 0x5e, 0xa3, 0xc4, 0x08, 0xc8, 0x76, 0x3b, 0xe0, + 0x0e, 0xe8, 0x90, 0xb7, 0x68, 0x17, 0x77, 0x5c, 0xe9, 0x28, 0xb0, 0xf7, 0xdd, 0xe0, 0x10, 0x2c, + 0x31, 0xec, 0xfe, 0xf6, 0x6d, 0xe2, 0x58, 0x5b, 0x86, 0x6b, 0x34, 0x09, 0x95, 0x71, 0x1f, 0x9e, + 0xea, 0x46, 0x0c, 0x87, 0x13, 0x94, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, 0x10, 0x87, 0x44, 0xfa, 0x36, + 0x00, 0x35, 0xa9, 0x61, 0x92, 0x1d, 0x42, 0x6d, 0xcf, 0xda, 0x25, 0xa6, 0xe7, 0x5a, 0x3e, 0x8f, + 0x88, 0x6c, 0xfd, 0x33, 0x2c, 0xce, 0x6e, 0x0e, 0x60, 0xf1, 0x19, 0x1c, 0xc8, 0x81, 0x72, 0x9b, + 0xf2, 0xdf, 0x76, 0x20, 0x6b, 0x0f, 0x7b, 0x69, 0x5f, 0x49, 0x77, 0xd4, 0x3b, 0x71, 0xd6, 0xfa, + 0xec, 0x49, 0xaf, 0x5a, 0x4e, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x16, 0xcc, 0x78, 0xb4, 0x7d, 0x60, + 0xb8, 0x0d, 0xd2, 0x26, 0xae, 0x45, 0xdc, 0xc0, 0xe7, 0xa7, 0x50, 0xa8, 0x5f, 0x60, 0x15, 0x63, + 0xbb, 0x0f, 0x87, 0x07, 0xa8, 0xd1, 0x7d, 0x98, 0x6d, 0x53, 0xaf, 0x6d, 0x34, 0x0d, 0x26, 0x71, + 0xc7, 0x73, 0x6c, 0xb3, 0xcb, 0xb3, 0x43, 0xb1, 0x7e, 0xe5, 0xa4, 0x57, 0x9d, 0xdd, 0xe9, 0x47, + 0x9e, 0xf6, 0xaa, 0x73, 0xfc, 0xe8, 0x18, 0x24, 0x42, 0xe2, 0x41, 0x31, 0xb1, 0x3b, 0xcc, 0x0d, + 0xbb, 0x43, 0x7d, 0x13, 0x0a, 0x8d, 0x0e, 0xe5, 0x5c, 0xe8, 0x4d, 0x28, 0x58, 0xf2, 0xb7, 0x3c, + 0xf9, 0x97, 0x54, 0xc9, 0x55, 0x34, 0xa7, 0xbd, 0x6a, 0x99, 0x35, 0x09, 0x35, 0x05, 0xc0, 0x21, + 0x8b, 0xfe, 0x00, 0xca, 0xeb, 0x8f, 0xda, 0x1e, 0x0d, 0xd4, 0x9d, 0xbe, 0x02, 0x79, 0xc2, 0x01, + 0x5c, 0x5a, 0x21, 0xaa, 0x13, 0x82, 0x0c, 0x4b, 0x2c, 0xcb, 0xc3, 0xe4, 0x91, 0x61, 0x06, 0x32, + 0x6d, 0x87, 0x79, 0x78, 0x9d, 0x01, 0xb1, 0xc0, 0xe9, 0x9f, 0x87, 0x02, 0x0f, 0x28, 0xff, 0xde, + 0x0a, 0x9a, 0x81, 0x2c, 0x36, 0x8e, 0xb9, 0xd4, 0x49, 0x9c, 0xa5, 0xc6, 0xb1, 0xbe, 0x0d, 0x70, + 0x93, 0x84, 0x8a, 0x57, 0x61, 0x5a, 0x3d, 0xe2, 0x64, 0x6e, 0xf9, 0xac, 0x14, 0x3d, 0x8d, 0x93, + 0x68, 0xdc, 0x4f, 0xaf, 0x3f, 0x80, 0x22, 0xcf, 0x3f, 0x2c, 0x79, 0x47, 0x85, 0x42, 0x7b, 0x4e, + 0xa1, 0x50, 0xd9, 0x3f, 0x33, 0x2c, 0xfb, 0xc7, 0x9e, 0x9b, 0x03, 0x65, 0xc1, 0xab, 0x4a, 0x63, + 0x2a, 0x0d, 0x57, 0xa0, 0xa0, 0xcc, 0x94, 0x5a, 0xc2, 0x96, 0x48, 0x09, 0xc2, 0x21, 0x45, 0x4c, + 0xdb, 0x01, 0x24, 0x72, 0x69, 0x3a, 0x65, 0xb1, 0xba, 0x97, 0x79, 0x7e, 0xdd, 0x8b, 0x69, 0xfa, + 0x09, 0x54, 0x86, 0xf5, 0x51, 0x2f, 0x90, 0xed, 0xd3, 0x9b, 0xa2, 0xbf, 0xa7, 0xc1, 0x4c, 0x5c, + 0x52, 0xfa, 0xeb, 0x4b, 0xaf, 0xe4, 0xfc, 0x3a, 0x1f, 0x3b, 0x91, 0xdf, 0x69, 0x70, 0x21, 0xe1, + 0xda, 0x48, 0x37, 0x3e, 0x82, 0x51, 0xf1, 0xe0, 0xc8, 0x8e, 0x10, 0x1c, 0x7f, 0xcb, 0x40, 0xf9, + 0x96, 0xb1, 0x47, 0x9c, 0x5d, 0xe2, 0x10, 0x33, 0xf0, 0x28, 0xfa, 0x31, 0x94, 0x5a, 0x46, 0x60, + 0x1e, 0x70, 0xa8, 0xea, 0x09, 0x1b, 0xe9, 0x12, 0x68, 0x42, 0x52, 0x6d, 0x2b, 0x12, 0xb3, 0xee, + 0x06, 0xb4, 0x5b, 0x9f, 0x93, 0x26, 0x95, 0x62, 0x18, 0x1c, 0xd7, 0xc6, 0x1b, 0x79, 0xfe, 0xbd, + 0xfe, 0xa8, 0xcd, 0x0a, 0xd6, 0xe8, 0xf3, 0x43, 0xc2, 0x04, 0x4c, 0xde, 0xe9, 0xd8, 0x94, 0xb4, + 0x88, 0x1b, 0x44, 0x8d, 0xfc, 0x56, 0x9f, 0x7c, 0x3c, 0xa0, 0x71, 0xfe, 0x06, 0xcc, 0xf4, 0x1b, + 0xcf, 0xb2, 0xce, 0x21, 0xe9, 0x8a, 0xfb, 0xc2, 0xec, 0x27, 0xba, 0x00, 0xb9, 0x23, 0xc3, 0xe9, + 0xc8, 0xd7, 0x88, 0xc5, 0xc7, 0xf5, 0xcc, 0x35, 0x4d, 0xff, 0x83, 0x06, 0x95, 0x61, 0x86, 0xa0, + 0x2f, 0xc4, 0x04, 0xd5, 0x4b, 0xd2, 0xaa, 0xec, 0xdb, 0xa4, 0x2b, 0xa4, 0xae, 0x43, 0xc1, 0x6b, + 0xb3, 0xd1, 0xcb, 0xa3, 0xf2, 0xd6, 0x2f, 0xab, 0x9b, 0xdc, 0x96, 0xf0, 0xd3, 0x5e, 0xf5, 0x62, + 0x42, 0xbc, 0x42, 0xe0, 0x90, 0x95, 0x65, 0x7f, 0x6e, 0x0f, 0xab, 0x48, 0x61, 0xf6, 0xbf, 0xc7, + 0x21, 0x58, 0x62, 0xf4, 0x3f, 0x69, 0x30, 0xce, 0x5b, 0xb1, 0x07, 0x50, 0x60, 0xe7, 0x67, 0x19, + 0x81, 0xc1, 0xed, 0x4a, 0x3d, 0x04, 0x30, 0xee, 0x2d, 0x12, 0x18, 0x51, 0xb4, 0x29, 0x08, 0x0e, + 0x25, 0x22, 0x0c, 0x39, 0x3b, 0x20, 0x2d, 0x75, 0x91, 0xaf, 0x0e, 0x15, 0x2d, 0x47, 0xd0, 0x1a, + 0x36, 0x8e, 0xd7, 0x1f, 0x05, 0xc4, 0x65, 0x97, 0x11, 0x3d, 0x8d, 0x4d, 0x26, 0x03, 0x0b, 0x51, + 0xfa, 0x7f, 0x34, 0x08, 0x55, 0xb1, 0xe0, 0xf7, 0x89, 0xb3, 0x7f, 0xcb, 0x76, 0x0f, 0xe5, 0xb1, + 0x86, 0xe6, 0xec, 0x4a, 0x38, 0x0e, 0x29, 0xce, 0x2a, 0x0f, 0x99, 0xd1, 0xca, 0x03, 0x53, 0x68, + 0x7a, 0x6e, 0x60, 0xbb, 0x9d, 0x81, 0xd7, 0xb6, 0x26, 0xe1, 0x38, 0xa4, 0x60, 0xcd, 0x0d, 0x25, + 0x2d, 0xc3, 0x76, 0x6d, 0xb7, 0xc9, 0x9c, 0x58, 0xf3, 0x3a, 0x6e, 0xc0, 0xab, 0xbc, 0x6c, 0x6e, + 0xf0, 0x00, 0x16, 0x9f, 0xc1, 0xa1, 0xff, 0x35, 0x0b, 0x25, 0xe6, 0xb3, 0xaa, 0x73, 0x6f, 0x40, + 0xd9, 0x89, 0x47, 0x81, 0xf4, 0xfd, 0xa2, 0x34, 0x25, 0xf9, 0xae, 0x71, 0x92, 0x96, 0x31, 0xf3, + 0x9e, 0x2c, 0x64, 0xce, 0x24, 0x99, 0x37, 0xe2, 0x48, 0x9c, 0xa4, 0x65, 0xd9, 0xeb, 0x98, 0xbd, + 0x0f, 0xd9, 0xed, 0x84, 0x57, 0xf4, 0x1d, 0x06, 0xc4, 0x02, 0x87, 0xb6, 0x60, 0xce, 0x70, 0x1c, + 0xef, 0x98, 0x03, 0xeb, 0x9e, 0x77, 0xd8, 0x32, 0xe8, 0xa1, 0xcf, 0xc7, 0xa8, 0x42, 0xfd, 0x73, + 0x92, 0x65, 0x6e, 0x75, 0x90, 0x04, 0x9f, 0xc5, 0x77, 0xd6, 0xb5, 0x8d, 0x8f, 0x78, 0x6d, 0xd7, + 0x61, 0x8a, 0xc5, 0x97, 0xd7, 0x09, 0x54, 0x87, 0x99, 0xe3, 0x97, 0x80, 0x4e, 0x7a, 0xd5, 0xa9, + 0x3b, 0x09, 0x0c, 0xee, 0xa3, 0x64, 0x2e, 0x3b, 0x76, 0xcb, 0x0e, 0x2a, 0x13, 0x9c, 0x25, 0x74, + 0xf9, 0x16, 0x03, 0x62, 0x81, 0x4b, 0xc4, 0x45, 0xe1, 0xbc, 0xb8, 0xd0, 0x7f, 0x9b, 0x05, 0x24, + 0x5a, 0x62, 0x4b, 0xf4, 0x36, 0x22, 0xd1, 0x5c, 0x86, 0x89, 0x96, 0x6c, 0xa9, 0xb5, 0x64, 0xd6, + 0x57, 0xdd, 0xb4, 0xc2, 0xa3, 0x2d, 0x28, 0x8a, 0x07, 0x1f, 0x05, 0xf1, 0xb2, 0x24, 0x2e, 0x6e, + 0x2b, 0xc4, 0x69, 0xaf, 0x3a, 0x9f, 0x50, 0x13, 0x62, 0xee, 0x74, 0xdb, 0x04, 0x47, 0x12, 0xd8, + 0x14, 0x6d, 0xb4, 0xed, 0xf8, 0xfe, 0xa4, 0x18, 0x4d, 0xd1, 0xd1, 0x24, 0x84, 0x63, 0x54, 0xe8, + 0x2d, 0x18, 0x67, 0x27, 0x25, 0x47, 0xda, 0x2f, 0xa5, 0x4b, 0x1b, 0xec, 0xac, 0xeb, 0x05, 0x56, + 0x35, 0xd9, 0x2f, 0xcc, 0x25, 0x30, 0xed, 0x3c, 0xca, 0x7c, 0x66, 0x96, 0x9c, 0xfd, 0x43, 0xed, + 0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa1, 0xb0, 0x2f, 0xdb, 0x42, 0x7e, 0x31, 0xa9, 0x13, + 0x97, 0x6a, 0x26, 0xc5, 0x08, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x07, 0x8a, 0x5b, 0xb6, 0x49, + 0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x93, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59, 0x9c, + 0xb8, 0x86, 0xeb, 0x89, 0xc9, 0x23, 0x17, 0xc5, 0xc9, 0x6d, 0x06, 0xc4, 0x02, 0x77, 0xfd, 0x02, + 0xab, 0xbf, 0xbf, 0x78, 0x52, 0x1d, 0x7b, 0xfc, 0xa4, 0x3a, 0xf6, 0xfe, 0x13, 0x59, 0x8b, 0x4f, + 0x01, 0x60, 0x7b, 0xef, 0x87, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xbe, 0x44, 0xad, 0xe9, 0xf8, 0xbe, + 0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0x6e, 0x42, 0xe4, 0x45, + 0xcf, 0xaa, 0xc0, 0x09, 0xd7, 0x25, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c, 0x1d, + 0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x4d, 0x95, 0xb8, 0xbb, 0x9b, 0x8d, 0xd3, 0x5e, + 0xf5, 0xa5, 0x61, 0x0b, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xee, 0x66, 0x03, 0x33, 0xe6, 0xb3, + 0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x15, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23, 0xea, + 0x66, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x46, 0x61, 0xf6, 0x3c, 0xec, 0x16, 0xf1, + 0x03, 0xa3, 0x25, 0x36, 0x44, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18, 0x1e, + 0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x50, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53, 0xd8, + 0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0x1f, 0xc0, 0xbc, 0x02, 0x0e, 0x4e, 0xd6, 0x7c, 0xc7, 0x93, + 0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x39, 0x12, 0x90, 0x05, 0x79, 0x47, + 0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x91, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31, 0x9c, + 0x1c, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0x47, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xeb, 0x4f, + 0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab, 0x42, + 0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca, 0x5c, + 0xfb, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa, 0xb1, + 0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x23, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xc4, 0xdb, 0x08, 0xa1, 0x38, + 0x46, 0x81, 0xbe, 0x06, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0x1b, 0xd5, 0x69, 0xfe, 0x82, 0x42, + 0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5, 0xde, + 0x5d, 0x4b, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f, 0x1d, + 0x4a, 0xff, 0x63, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x73, 0x06, 0xa6, + 0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0x4b, 0x60, 0x95, + 0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x92, 0x3d, 0x6b, 0x00, 0xac, 0xcf, 0xa0, + 0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0x2e, 0x7b, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e, 0x75, + 0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d, 0x00, + 0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3, 0x43, + 0xc0, 0xc3, 0x81, 0x11, 0xe3, 0xb5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33, 0xf4, + 0xbf, 0x6b, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc0, 0x88, 0xf3, 0x30, 0x39, 0xe2, 0xbc, 0x91, 0x72, + 0xdf, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0xb5, 0x06, + 0x93, 0xfc, 0xd7, 0x28, 0xbb, 0xda, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0x3f, 0x13, + 0x36, 0x18, 0x00, 0x0b, 0xf8, 0x0b, 0x2c, 0x73, 0xdf, 0xd3, 0x20, 0xb9, 0x25, 0x45, 0x37, 0x44, + 0xfc, 0x6a, 0xe1, 0x1a, 0x73, 0xc4, 0xd8, 0x7d, 0x73, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77, 0x77, + 0x05, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e, 0x0d, + 0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd2, 0xe0, 0xd2, 0xd0, 0xfd, 0x39, 0x4b, 0x01, 0x66, + 0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0x96, 0xee, 0xfd, + 0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xce, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e, 0xff, + 0x7f, 0x8e, 0xd8, 0x57, 0x20, 0xef, 0x73, 0x3d, 0xd2, 0xbc, 0xb0, 0xc6, 0x0a, 0xed, 0x58, 0x62, + 0xf9, 0x34, 0x42, 0x7c, 0xdf, 0x68, 0xaa, 0x8c, 0x15, 0x4d, 0x23, 0x02, 0x8c, 0x15, 0x1e, 0xbd, + 0x0e, 0x79, 0x4a, 0x0c, 0x3f, 0x1c, 0xcc, 0x16, 0x94, 0x48, 0xcc, 0xa1, 0xa7, 0xbd, 0xea, 0xa4, + 0x14, 0xce, 0xbf, 0xb1, 0xa4, 0x46, 0xf7, 0x61, 0xc2, 0x22, 0x81, 0x61, 0x3b, 0x62, 0x1e, 0x4b, + 0xbd, 0xae, 0x17, 0xc2, 0x1a, 0x82, 0xb5, 0x5e, 0x62, 0x36, 0xc9, 0x0f, 0xac, 0x04, 0xb2, 0x6c, + 0x6b, 0x7a, 0x96, 0x18, 0x27, 0x72, 0x51, 0xb6, 0x5d, 0xf3, 0x2c, 0x82, 0x39, 0x46, 0x7f, 0xac, + 0x41, 0x49, 0x48, 0x5a, 0x33, 0x3a, 0x3e, 0x41, 0x2b, 0xa1, 0x17, 0xe2, 0xba, 0x55, 0x27, 0x37, + 0xce, 0x06, 0x8e, 0xd3, 0x5e, 0xb5, 0xc8, 0xc9, 0xf8, 0x24, 0xa2, 0x1c, 0x88, 0x9d, 0x51, 0xe6, + 0x9c, 0x33, 0x7a, 0x19, 0x72, 0xfc, 0xf5, 0xc8, 0xc3, 0x0c, 0xdf, 0x3a, 0x7f, 0x60, 0x58, 0xe0, + 0xf4, 0x8f, 0x32, 0x50, 0x4e, 0x38, 0x97, 0x62, 0x16, 0x08, 0x17, 0x8a, 0x99, 0x14, 0x4b, 0xea, + 0xe1, 0x7f, 0x51, 0xca, 0xda, 0x93, 0x7f, 0x91, 0xda, 0xf3, 0x3d, 0xc8, 0x9b, 0xec, 0x8c, 0xd4, + 0x3f, 0xde, 0x2b, 0xa3, 0x5c, 0x27, 0x3f, 0xdd, 0x28, 0x1a, 0xf9, 0xa7, 0x8f, 0xa5, 0x40, 0x74, + 0x13, 0x66, 0x29, 0x09, 0x68, 0x77, 0x75, 0x3f, 0x20, 0x34, 0x3e, 0xc4, 0xe7, 0xa2, 0x8e, 0x1b, + 0xf7, 0x13, 0xe0, 0x41, 0x1e, 0x7d, 0x0f, 0x26, 0xef, 0x18, 0x7b, 0x4e, 0xf8, 0x07, 0x14, 0x86, + 0xb2, 0xed, 0x9a, 0x4e, 0xc7, 0x22, 0x22, 0x1b, 0xab, 0xec, 0xa5, 0x1e, 0xed, 0x66, 0x1c, 0x79, + 0xda, 0xab, 0xce, 0x25, 0x00, 0xe2, 0x1f, 0x17, 0x9c, 0x14, 0xa1, 0x3b, 0x30, 0xfe, 0x09, 0x4e, + 0x8f, 0xdf, 0x87, 0x62, 0xd4, 0xdf, 0x7f, 0xcc, 0x2a, 0xf5, 0x87, 0x50, 0x60, 0x11, 0xaf, 0xe6, + 0xd2, 0x73, 0x5a, 0x9c, 0x64, 0xe3, 0x94, 0x49, 0xd3, 0x38, 0xe9, 0x2d, 0x28, 0xdf, 0x6d, 0x5b, + 0x2f, 0xf8, 0x17, 0x64, 0x26, 0x75, 0xd5, 0xba, 0x0a, 0xe2, 0xcf, 0x74, 0x56, 0x20, 0x44, 0xe5, + 0x8e, 0x15, 0x88, 0x78, 0xe1, 0x8d, 0xed, 0xca, 0x7f, 0xa6, 0x01, 0xf0, 0xa5, 0xd4, 0xfa, 0x11, + 0x71, 0x03, 0x76, 0x0e, 0x2c, 0xf0, 0xfb, 0xcf, 0x81, 0x67, 0x06, 0x8e, 0x41, 0x77, 0x21, 0xef, + 0x89, 0x68, 0x12, 0x7f, 0x43, 0x8e, 0xb8, 0xf9, 0x0c, 0x1f, 0x81, 0x88, 0x27, 0x2c, 0x85, 0xd5, + 0x97, 0x9e, 0x3e, 0x5b, 0x18, 0xfb, 0xe0, 0xd9, 0xc2, 0xd8, 0x87, 0xcf, 0x16, 0xc6, 0xde, 0x3d, + 0x59, 0xd0, 0x9e, 0x9e, 0x2c, 0x68, 0x1f, 0x9c, 0x2c, 0x68, 0x1f, 0x9e, 0x2c, 0x68, 0x1f, 0x9d, + 0x2c, 0x68, 0x8f, 0xff, 0xb9, 0x30, 0x76, 0x3f, 0x73, 0xb4, 0xf2, 0xdf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x61, 0xb7, 0xc5, 0x7c, 0xc2, 0x24, 0x00, 0x00, +} + func (m *APIGroup) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -326,53 +1481,65 @@ func (m *APIGroup) Marshal() (dAtA []byte, err error) { } func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.PreferredVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIGroupList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -380,29 +1547,36 @@ func (m *APIGroupList) Marshal() (dAtA []byte, err error) { } func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Groups) > 0 { - for _, msg := range m.Groups { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *APIResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -410,89 +1584,90 @@ func (m *APIResource) Marshal() (dAtA []byte, err error) { } func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.Namespaced { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - if m.Verbs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) - n2, err := m.Verbs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.StorageVersionHash) + copy(dAtA[i:], m.StorageVersionHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) + i-- + dAtA[i] = 0x52 + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x4a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x42 + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i += n2 } + i -= len(m.SingularName) + copy(dAtA[i:], m.SingularName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i-- + dAtA[i] = 0x32 if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) - i += copy(dAtA[i:], m.SingularName) - if len(m.Categories) > 0 { - for _, s := range m.Categories { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Verbs != nil { + { + size, err := m.Verbs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) - i += copy(dAtA[i:], m.StorageVersionHash) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i-- + if m.Namespaced { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIResourceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -500,33 +1675,41 @@ func (m *APIResourceList) Marshal() (dAtA []byte, err error) { } func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) if len(m.APIResources) > 0 { - for _, msg := range m.APIResources { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.APIResources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.APIResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *APIVersions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -534,44 +1717,45 @@ func (m *APIVersions) Marshal() (dAtA []byte, err error) { } func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i-- + dAtA[i] = 0x12 } } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Versions[iNdEx]) + copy(dAtA[i:], m.Versions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Versions[iNdEx]))) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *CreateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -579,36 +1763,36 @@ func (m *CreateOptions) Marshal() (dAtA []byte, err error) { } func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + return len(dAtA) - i, nil } func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -616,63 +1800,65 @@ func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { } func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.GracePeriodSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) - n3, err := m.Preconditions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0x2a } - i += n3 + } + if m.PropagationPolicy != nil { + i -= len(*m.PropagationPolicy) + copy(dAtA[i:], *m.PropagationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i-- + dAtA[i] = 0x22 } if m.OrphanDependents != nil { - dAtA[i] = 0x18 - i++ + i-- if *m.OrphanDependents { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.PropagationPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) - i += copy(dAtA[i:], *m.PropagationPolicy) + i-- + dAtA[i] = 0x18 } - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ + if m.Preconditions != nil { + { + size, err := m.Preconditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + if m.GracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *Duration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -680,20 +1866,25 @@ func (m *Duration) Marshal() (dAtA []byte, err error) { } func (m *Duration) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *ExportOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -701,82 +1892,68 @@ func (m *ExportOptions) Marshal() (dAtA []byte, err error) { } func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - if m.Export { + i-- + if m.Exact { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- dAtA[i] = 0x10 - i++ - if m.Exact { + i-- + if m.Export { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *Fields) Marshal() (dAtA []byte, err error) { +func (m *FieldsV1) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Fields) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *FieldsV1) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldsV1) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Map) > 0 { - keysForMap := make([]string, 0, len(m.Map)) - for k := range m.Map { - keysForMap = append(keysForMap, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - for _, k := range keysForMap { - dAtA[i] = 0xa - i++ - v := m.Map[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n4, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *GetOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -784,21 +1961,27 @@ func (m *GetOptions) Marshal() (dAtA []byte, err error) { } func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -806,25 +1989,32 @@ func (m *GroupKind) Marshal() (dAtA []byte, err error) { } func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -832,25 +2022,32 @@ func (m *GroupResource) Marshal() (dAtA []byte, err error) { } func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *GroupVersion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -858,173 +2055,141 @@ func (m *GroupVersion) Marshal() (dAtA []byte, err error) { } func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil -} - -func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i += copy(dAtA[i:], m.GroupVersion) - dAtA[i] = 0x12 - i++ + i -= len(m.Version) + copy(dAtA[i:], m.Version) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil -} - -func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionForDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - return i, nil + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Initializer) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil + return len(dAtA) - i, nil } -func (m *Initializers) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Pending) > 0 { - for _, msg := range m.Pending { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Result != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n5, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - return i, nil + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *LabelSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1032,51 +2197,60 @@ func (m *LabelSelector) Marshal() (dAtA []byte, err error) { } func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } if len(m.MatchLabels) > 0 { keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) for k := range m.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) - for _, k := range keysForMatchLabels { - dAtA[i] = 0xa - i++ - v := m.MatchLabels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.MatchLabels[string(keysForMatchLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i -= len(keysForMatchLabels[iNdEx]) + copy(dAtA[i:], keysForMatchLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMatchLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1084,40 +2258,41 @@ func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { } func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) if len(m.Values) > 0 { - for _, s := range m.Values { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1125,37 +2300,46 @@ func (m *List) Marshal() (dAtA []byte, err error) { } func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ListMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1163,29 +2347,42 @@ func (m *ListMeta) Marshal() (dAtA []byte, err error) { } func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x1a - i++ + if m.RemainingItemCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount)) + i-- + dAtA[i] = 0x20 + } + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ListOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1193,49 +2390,66 @@ func (m *ListOptions) Marshal() (dAtA []byte, err error) { } func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) - i += copy(dAtA[i:], m.LabelSelector) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) - i += copy(dAtA[i:], m.FieldSelector) - dAtA[i] = 0x18 - i++ - if m.Watch { + i-- + if m.AllowWatchBookmarks { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) + i-- + dAtA[i] = 0x48 + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x38 if m.TimeoutSeconds != nil { - dAtA[i] = 0x28 - i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x28 } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i += copy(dAtA[i:], m.Continue) - return i, nil + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i-- + if m.Watch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.FieldSelector) + copy(dAtA[i:], m.FieldSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i-- + dAtA[i] = 0x12 + i -= len(m.LabelSelector) + copy(dAtA[i:], m.LabelSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1243,49 +2457,66 @@ func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { } func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) - i += copy(dAtA[i:], m.Manager) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i += copy(dAtA[i:], m.Operation) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Time != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n7, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.FieldsV1 != nil { + { + size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 + i-- + dAtA[i] = 0x3a } - if m.Fields != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Fields.Size())) - n8, err := m.Fields.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.FieldsType) + copy(dAtA[i:], m.FieldsType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldsType))) + i-- + dAtA[i] = 0x32 + if m.Time != nil { + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n8 + i-- + dAtA[i] = 0x22 } - return i, nil + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x12 + i -= len(m.Manager) + copy(dAtA[i:], m.Manager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1293,80 +2524,57 @@ func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { } func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) - i += copy(dAtA[i:], m.GenerateName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i += copy(dAtA[i:], m.SelfLink) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n9, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - if m.DeletionTimestamp != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n10, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.ManagedFields) > 0 { + for iNdEx := len(m.ManagedFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ManagedFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - i += n10 } - if m.DeletionGracePeriodSeconds != nil { - dAtA[i] = 0x50 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0x7a + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0x72 } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for _, k := range keysForLabels { - dAtA[i] = 0x5a - i++ - v := m.Labels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + } + if len(m.OwnerReferences) > 0 { + for iNdEx := len(m.OwnerReferences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OwnerReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } } if len(m.Annotations) > 0 { @@ -1375,86 +2583,115 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { keysForAnnotations = append(keysForAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for _, k := range keysForAnnotations { - dAtA[i] = 0x62 - i++ - v := m.Annotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.OwnerReferences) > 0 { - for _, msg := range m.OwnerReferences { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 } } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0x72 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) } - } - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) - i += copy(dAtA[i:], m.ClusterName) - if m.Initializers != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n11, err := m.Initializers.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a } - i += n11 } - if len(m.ManagedFields) > 0 { - for _, msg := range m.ManagedFields { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.DeletionGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + i-- + dAtA[i] = 0x50 + } + if m.DeletionTimestamp != nil { + { + size, err := m.DeletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x4a + } + { + size, err := m.CreationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return i, nil + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x38 + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.GenerateName) + copy(dAtA[i:], m.GenerateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *OwnerReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1462,53 +2699,142 @@ func (m *OwnerReference) Marshal() (dAtA []byte, err error) { } func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - if m.Controller != nil { - dAtA[i] = 0x30 - i++ - if *m.Controller { + if m.BlockOwnerDeletion != nil { + i-- + if *m.BlockOwnerDeletion { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.BlockOwnerDeletion != nil { + i-- dAtA[i] = 0x38 - i++ - if *m.BlockOwnerDeletion { + } + if m.Controller != nil { + i-- + if *m.Controller { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x30 + } + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Patch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1516,17 +2842,22 @@ func (m *Patch) Marshal() (dAtA []byte, err error) { } func (m *Patch) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *PatchOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1534,46 +2865,46 @@ func (m *PatchOptions) Marshal() (dAtA []byte, err error) { } func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a if m.Force != nil { - dAtA[i] = 0x10 - i++ + i-- if *m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x10 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1581,29 +2912,36 @@ func (m *Preconditions) Marshal() (dAtA []byte, err error) { } func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.UID != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) - } if m.ResourceVersion != nil { - dAtA[i] = 0x12 - i++ + i -= len(*m.ResourceVersion) + copy(dAtA[i:], *m.ResourceVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) + i-- + dAtA[i] = 0x12 + } + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *RootPaths) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1611,32 +2949,31 @@ func (m *RootPaths) Marshal() (dAtA []byte, err error) { } func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RootPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Paths) > 0 { - for _, s := range m.Paths { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1644,25 +2981,32 @@ func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { } func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServerAddressByClientCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) - i += copy(dAtA[i:], m.ClientCIDR) - dAtA[i] = 0x12 - i++ + i -= len(m.ServerAddress) + copy(dAtA[i:], m.ServerAddress) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) - i += copy(dAtA[i:], m.ServerAddress) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ClientCIDR) + copy(dAtA[i:], m.ClientCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Status) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1670,50 +3014,62 @@ func (m *Status) Marshal() (dAtA []byte, err error) { } func (m *Status) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x30 if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n13, err := m.Details.MarshalTo(dAtA[i:]) + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n13 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusCause) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1721,29 +3077,37 @@ func (m *StatusCause) Marshal() (dAtA []byte, err error) { } func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ + i -= len(m.Field) + copy(dAtA[i:], m.Field) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) - i += copy(dAtA[i:], m.Field) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *StatusDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1751,72 +3115,116 @@ func (m *StatusDetails) Marshal() (dAtA []byte, err error) { } func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + i-- + dAtA[i] = 0x28 if len(m.Causes) > 0 { - for _, msg := range m.Causes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x22 } } - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *Timestamp) Marshal() (dAtA []byte, err error) { +func (m *TableOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IncludeObject) + copy(dAtA[i:], m.IncludeObject) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) - dAtA[i] = 0x10 - i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) - return i, nil + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1824,25 +3232,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1850,36 +3265,36 @@ func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { } func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x12 if len(m.DryRun) > 0 { - for _, s := range m.DryRun { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i += copy(dAtA[i:], m.FieldManager) - return i, nil + return len(dAtA) - i, nil } func (m Verbs) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1887,32 +3302,31 @@ func (m Verbs) Marshal() (dAtA []byte, err error) { } func (m Verbs) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m Verbs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, s := range m { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *WatchEvent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1920,35 +3334,48 @@ func (m *WatchEvent) Marshal() (dAtA []byte, err error) { } func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n14 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *APIGroup) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1971,6 +3398,9 @@ func (m *APIGroup) Size() (n int) { } func (m *APIGroupList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Groups) > 0 { @@ -1983,6 +3413,9 @@ func (m *APIGroupList) Size() (n int) { } func (m *APIResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2018,6 +3451,9 @@ func (m *APIResource) Size() (n int) { } func (m *APIResourceList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -2032,6 +3468,9 @@ func (m *APIResourceList) Size() (n int) { } func (m *APIVersions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Versions) > 0 { @@ -2050,6 +3489,9 @@ func (m *APIVersions) Size() (n int) { } func (m *CreateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2064,6 +3506,9 @@ func (m *CreateOptions) Size() (n int) { } func (m *DeleteOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.GracePeriodSeconds != nil { @@ -2090,6 +3535,9 @@ func (m *DeleteOptions) Size() (n int) { } func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Duration)) @@ -2097,6 +3545,9 @@ func (m *Duration) Size() (n int) { } func (m *ExportOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -2104,22 +3555,23 @@ func (m *ExportOptions) Size() (n int) { return n } -func (m *Fields) Size() (n int) { +func (m *FieldsV1) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l - if len(m.Map) > 0 { - for k, v := range m.Map { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *GetOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ResourceVersion) @@ -2128,6 +3580,9 @@ func (m *GetOptions) Size() (n int) { } func (m *GroupKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2138,6 +3593,9 @@ func (m *GroupKind) Size() (n int) { } func (m *GroupResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2148,6 +3606,9 @@ func (m *GroupResource) Size() (n int) { } func (m *GroupVersion) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2158,6 +3619,9 @@ func (m *GroupVersion) Size() (n int) { } func (m *GroupVersionForDiscovery) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.GroupVersion) @@ -2168,6 +3632,9 @@ func (m *GroupVersionForDiscovery) Size() (n int) { } func (m *GroupVersionKind) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2180,6 +3647,9 @@ func (m *GroupVersionKind) Size() (n int) { } func (m *GroupVersionResource) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Group) @@ -2191,31 +3661,10 @@ func (m *GroupVersionResource) Size() (n int) { return n } -func (m *Initializer) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Initializers) Size() (n int) { - var l int - _ = l - if len(m.Pending) > 0 { - for _, e := range m.Pending { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *LabelSelector) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.MatchLabels) > 0 { @@ -2236,6 +3685,9 @@ func (m *LabelSelector) Size() (n int) { } func (m *LabelSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Key) @@ -2252,6 +3704,9 @@ func (m *LabelSelectorRequirement) Size() (n int) { } func (m *List) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2266,6 +3721,9 @@ func (m *List) Size() (n int) { } func (m *ListMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.SelfLink) @@ -2274,10 +3732,16 @@ func (m *ListMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) + if m.RemainingItemCount != nil { + n += 1 + sovGenerated(uint64(*m.RemainingItemCount)) + } return n } func (m *ListOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.LabelSelector) @@ -2293,10 +3757,14 @@ func (m *ListOptions) Size() (n int) { n += 1 + sovGenerated(uint64(m.Limit)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } func (m *ManagedFieldsEntry) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Manager) @@ -2309,14 +3777,19 @@ func (m *ManagedFieldsEntry) Size() (n int) { l = m.Time.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Fields != nil { - l = m.Fields.Size() + l = len(m.FieldsType) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldsV1 != nil { + l = m.FieldsV1.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } func (m *ObjectMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2371,10 +3844,6 @@ func (m *ObjectMeta) Size() (n int) { } l = len(m.ClusterName) n += 1 + l + sovGenerated(uint64(l)) - if m.Initializers != nil { - l = m.Initializers.Size() - n += 2 + l + sovGenerated(uint64(l)) - } if len(m.ManagedFields) > 0 { for _, e := range m.ManagedFields { l = e.Size() @@ -2385,6 +3854,9 @@ func (m *ObjectMeta) Size() (n int) { } func (m *OwnerReference) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2404,13 +3876,47 @@ func (m *OwnerReference) Size() (n int) { return n } +func (m *PartialObjectMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *Patch) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *PatchOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2428,6 +3934,9 @@ func (m *PatchOptions) Size() (n int) { } func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.UID != nil { @@ -2442,6 +3951,9 @@ func (m *Preconditions) Size() (n int) { } func (m *RootPaths) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Paths) > 0 { @@ -2454,6 +3966,9 @@ func (m *RootPaths) Size() (n int) { } func (m *ServerAddressByClientCIDR) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ClientCIDR) @@ -2464,6 +3979,9 @@ func (m *ServerAddressByClientCIDR) Size() (n int) { } func (m *Status) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -2483,6 +4001,9 @@ func (m *Status) Size() (n int) { } func (m *StatusCause) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2495,6 +4016,9 @@ func (m *StatusCause) Size() (n int) { } func (m *StatusDetails) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -2515,7 +4039,21 @@ func (m *StatusDetails) Size() (n int) { return n } +func (m *TableOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IncludeObject) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Seconds)) @@ -2524,6 +4062,9 @@ func (m *Timestamp) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Kind) @@ -2534,6 +4075,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *UpdateOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.DryRun) > 0 { @@ -2548,6 +4092,9 @@ func (m *UpdateOptions) Size() (n int) { } func (m Verbs) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -2560,6 +4107,9 @@ func (m Verbs) Size() (n int) { } func (m *WatchEvent) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -2570,14 +4120,7 @@ func (m *WatchEvent) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2586,11 +4129,21 @@ func (this *APIGroup) String() string { if this == nil { return "nil" } + repeatedStringForVersions := "[]GroupVersionForDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForServerAddressByClientCIDRs := "[]ServerAddressByClientCIDR{" + for _, f := range this.ServerAddressByClientCIDRs { + repeatedStringForServerAddressByClientCIDRs += strings.Replace(strings.Replace(f.String(), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForServerAddressByClientCIDRs += "}" s := strings.Join([]string{`&APIGroup{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `Versions:` + repeatedStringForVersions + `,`, `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, - `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + repeatedStringForServerAddressByClientCIDRs + `,`, `}`, }, "") return s @@ -2599,8 +4152,13 @@ func (this *APIGroupList) String() string { if this == nil { return "nil" } + repeatedStringForGroups := "[]APIGroup{" + for _, f := range this.Groups { + repeatedStringForGroups += strings.Replace(strings.Replace(f.String(), "APIGroup", "APIGroup", 1), `&`, ``, 1) + "," + } + repeatedStringForGroups += "}" s := strings.Join([]string{`&APIGroupList{`, - `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, + `Groups:` + repeatedStringForGroups + `,`, `}`, }, "") return s @@ -2628,9 +4186,14 @@ func (this *APIResourceList) String() string { if this == nil { return "nil" } + repeatedStringForAPIResources := "[]APIResource{" + for _, f := range this.APIResources { + repeatedStringForAPIResources += strings.Replace(strings.Replace(f.String(), "APIResource", "APIResource", 1), `&`, ``, 1) + "," + } + repeatedStringForAPIResources += "}" s := strings.Join([]string{`&APIResourceList{`, `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, + `APIResources:` + repeatedStringForAPIResources + `,`, `}`, }, "") return s @@ -2652,7 +4215,7 @@ func (this *DeleteOptions) String() string { } s := strings.Join([]string{`&DeleteOptions{`, `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, - `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `Preconditions:` + strings.Replace(this.Preconditions.String(), "Preconditions", "Preconditions", 1) + `,`, `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, @@ -2681,22 +4244,12 @@ func (this *ExportOptions) String() string { }, "") return s } -func (this *Fields) String() string { +func (this *FieldsV1) String() string { if this == nil { return "nil" } - keysForMap := make([]string, 0, len(this.Map)) - for k := range this.Map { - keysForMap = append(keysForMap, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMap) - mapStringForMap := "map[string]Fields{" - for _, k := range keysForMap { - mapStringForMap += fmt.Sprintf("%v: %v,", k, this.Map[k]) - } - mapStringForMap += "}" - s := strings.Join([]string{`&Fields{`, - `Map:` + mapStringForMap + `,`, + s := strings.Join([]string{`&FieldsV1{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, `}`, }, "") return s @@ -2722,31 +4275,15 @@ func (this *GroupVersionForDiscovery) String() string { }, "") return s } -func (this *Initializer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Initializers) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializers{`, - `Pending:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Pending), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, - `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "Status", 1) + `,`, - `}`, - }, "") - return s -} func (this *LabelSelector) String() string { if this == nil { return "nil" } + repeatedStringForMatchExpressions := "[]LabelSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) for k := range this.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, k) @@ -2759,7 +4296,7 @@ func (this *LabelSelector) String() string { mapStringForMatchLabels += "}" s := strings.Join([]string{`&LabelSelector{`, `MatchLabels:` + mapStringForMatchLabels + `,`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, `}`, }, "") return s @@ -2780,9 +4317,14 @@ func (this *List) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&List{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -2795,6 +4337,7 @@ func (this *ListMeta) String() string { `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`, `}`, }, "") return s @@ -2811,6 +4354,7 @@ func (this *ListOptions) String() string { `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, `}`, }, "") return s @@ -2824,7 +4368,8 @@ func (this *ManagedFieldsEntry) String() string { `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, - `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Fields", "Fields", 1) + `,`, + `FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`, + `FieldsV1:` + strings.Replace(this.FieldsV1.String(), "FieldsV1", "FieldsV1", 1) + `,`, `}`, }, "") return s @@ -2833,6 +4378,16 @@ func (this *ObjectMeta) String() string { if this == nil { return "nil" } + repeatedStringForOwnerReferences := "[]OwnerReference{" + for _, f := range this.OwnerReferences { + repeatedStringForOwnerReferences += strings.Replace(strings.Replace(f.String(), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForOwnerReferences += "}" + repeatedStringForManagedFields := "[]ManagedFieldsEntry{" + for _, f := range this.ManagedFields { + repeatedStringForManagedFields += strings.Replace(strings.Replace(f.String(), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForManagedFields += "}" keysForLabels := make([]string, 0, len(this.Labels)) for k := range this.Labels { keysForLabels = append(keysForLabels, k) @@ -2861,16 +4416,15 @@ func (this *ObjectMeta) String() string { `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreationTimestamp), "Time", "Time", 1), `&`, ``, 1) + `,`, `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, `Labels:` + mapStringForLabels + `,`, `Annotations:` + mapStringForAnnotations + `,`, - `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, - `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, - `ManagedFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ManagedFields), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + `,`, + `ManagedFields:` + repeatedStringForManagedFields + `,`, `}`, }, "") return s @@ -2890,6 +4444,32 @@ func (this *OwnerReference) String() string { }, "") return s } +func (this *PartialObjectMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadata{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} func (this *Patch) String() string { if this == nil { return "nil" @@ -2952,7 +4532,7 @@ func (this *Status) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, + `Details:` + strings.Replace(this.Details.String(), "StatusDetails", "StatusDetails", 1) + `,`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, `}`, }, "") @@ -2974,17 +4554,32 @@ func (this *StatusDetails) String() string { if this == nil { return "nil" } + repeatedStringForCauses := "[]StatusCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "StatusCause", "StatusCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" s := strings.Join([]string{`&StatusDetails{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, + `Causes:` + repeatedStringForCauses + `,`, `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `}`, }, "") return s } +func (this *TableOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableOptions{`, + `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + `}`, + }, "") + return s +} func (this *Timestamp) String() string { if this == nil { return "nil" @@ -3024,7 +4619,7 @@ func (this *WatchEvent) String() string { } s := strings.Join([]string{`&WatchEvent{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3052,7 +4647,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3080,7 +4675,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3090,6 +4685,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3109,7 +4707,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3118,6 +4716,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3140,7 +4741,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3149,6 +4750,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3170,7 +4774,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3179,6 +4783,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3196,6 +4803,9 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3223,7 +4833,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3251,7 +4861,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3260,6 +4870,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3277,6 +4890,9 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3304,7 +4920,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3332,7 +4948,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3342,6 +4958,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3361,7 +4980,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3381,7 +5000,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3391,6 +5010,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3410,7 +5032,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3419,6 +5041,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3443,7 +5068,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3453,6 +5078,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3472,7 +5100,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3482,6 +5110,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3501,7 +5132,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3511,6 +5142,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3530,7 +5164,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3540,6 +5174,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3559,7 +5196,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3569,6 +5206,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3588,7 +5228,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3598,6 +5238,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3612,6 +5255,9 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3639,7 +5285,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3667,7 +5313,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3677,6 +5323,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3696,7 +5345,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3705,6 +5354,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3722,6 +5374,9 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3749,7 +5404,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3777,7 +5432,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3787,6 +5442,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3806,7 +5464,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3815,6 +5473,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3832,6 +5493,9 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3859,7 +5523,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3887,7 +5551,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3897,6 +5561,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3916,7 +5583,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3926,6 +5593,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3940,6 +5610,9 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3967,7 +5640,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3995,7 +5668,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4015,7 +5688,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4024,6 +5697,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4048,7 +5724,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4069,7 +5745,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4079,6 +5755,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4099,7 +5778,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4109,6 +5788,9 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4120,7 +5802,10 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -4150,7 +5835,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4178,7 +5863,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Duration |= (time.Duration(b) & 0x7F) << shift + m.Duration |= time.Duration(b&0x7F) << shift if b < 0x80 { break } @@ -4192,6 +5877,9 @@ func (m *Duration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4219,7 +5907,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4247,7 +5935,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4267,7 +5955,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4282,6 +5970,9 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4294,7 +5985,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *Fields) Unmarshal(dAtA []byte) error { +func (m *FieldsV1) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4309,7 +6000,7 @@ func (m *Fields) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4317,17 +6008,17 @@ func (m *Fields) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Fields: wiretype end group for non-group") + return fmt.Errorf("proto: FieldsV1: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Fields: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FieldsV1: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Map", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4337,114 +6028,25 @@ func (m *Fields) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Map == nil { - m.Map = make(map[string]Fields) - } - var mapkey string - mapvalue := &Fields{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Fields{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} } - m.Map[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -4455,6 +6057,9 @@ func (m *Fields) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4482,7 +6087,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4510,7 +6115,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4520,6 +6125,9 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4534,6 +6142,9 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4561,7 +6172,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4589,7 +6200,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4599,6 +6210,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4618,7 +6232,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4628,6 +6242,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4642,6 +6259,9 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4669,7 +6289,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4697,7 +6317,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4707,6 +6327,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4726,7 +6349,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4736,6 +6359,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4750,6 +6376,9 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4777,7 +6406,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4805,7 +6434,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4815,6 +6444,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4834,7 +6466,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4844,6 +6476,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4858,6 +6493,9 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4885,7 +6523,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4913,144 +6551,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GroupVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5060,14 +6561,17 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.GroupVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5079,7 +6583,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5089,10 +6593,13 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5103,6 +6610,9 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5115,7 +6625,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { +func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5130,7 +6640,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5138,10 +6648,10 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -5158,7 +6668,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5168,6 +6678,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5187,7 +6700,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5197,6 +6710,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5204,7 +6720,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5216,7 +6732,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5226,10 +6742,13 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5240,6 +6759,9 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5252,7 +6774,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Initializer) Unmarshal(dAtA []byte) error { +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5267,7 +6789,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5275,15 +6797,15 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5295,7 +6817,7 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5305,66 +6827,19 @@ func (m *Initializer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Initializers) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Initializers: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5374,28 +6849,29 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Pending = append(m.Pending, Initializer{}) - if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5405,24 +6881,23 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &Status{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5433,6 +6908,9 @@ func (m *Initializers) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5460,7 +6938,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5488,7 +6966,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5497,6 +6975,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5517,7 +6998,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5534,7 +7015,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5544,6 +7025,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -5560,7 +7044,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5570,6 +7054,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -5606,7 +7093,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5615,6 +7102,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5632,6 +7122,9 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5659,7 +7152,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5687,7 +7180,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5697,6 +7190,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5716,7 +7212,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5726,6 +7222,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5745,7 +7244,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5755,6 +7254,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5769,6 +7271,9 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5796,7 +7301,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5824,7 +7329,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5833,6 +7338,9 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5854,7 +7362,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5863,10 +7371,13 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + m.Items = append(m.Items, runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -5880,6 +7391,9 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5907,7 +7421,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5935,7 +7449,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5945,6 +7459,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5964,7 +7481,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5974,6 +7491,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5993,7 +7513,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6003,11 +7523,34 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RemainingItemCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6017,6 +7560,9 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6044,7 +7590,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6072,7 +7618,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6082,6 +7628,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6101,7 +7650,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6111,6 +7660,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6130,7 +7682,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6150,7 +7702,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6160,6 +7712,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6179,7 +7734,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6199,7 +7754,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= (int64(b) & 0x7F) << shift + m.Limit |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6218,7 +7773,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6228,11 +7783,34 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowWatchBookmarks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowWatchBookmarks = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6242,6 +7820,9 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6269,7 +7850,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6297,7 +7878,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6307,6 +7888,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6326,7 +7910,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6336,6 +7920,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6355,7 +7942,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6365,6 +7952,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6384,7 +7974,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6393,6 +7983,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6403,9 +7996,41 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldsType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldsType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldsV1", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6417,7 +8042,7 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6426,13 +8051,16 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Fields == nil { - m.Fields = &Fields{} + if m.FieldsV1 == nil { + m.FieldsV1 = &FieldsV1{} } - if err := m.Fields.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FieldsV1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6445,6 +8073,9 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6472,7 +8103,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6500,7 +8131,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6510,6 +8141,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6529,7 +8163,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6539,6 +8173,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6558,7 +8195,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6568,6 +8205,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6587,7 +8227,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6597,6 +8237,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6616,7 +8259,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6626,6 +8269,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6645,7 +8291,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6655,6 +8301,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6674,7 +8323,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift + m.Generation |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6693,7 +8342,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6702,6 +8351,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6723,7 +8375,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6732,6 +8384,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6756,7 +8411,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -6776,7 +8431,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6785,6 +8440,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6805,7 +8463,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6822,7 +8480,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6832,6 +8490,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6848,7 +8509,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6858,6 +8519,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -6894,7 +8558,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6903,6 +8567,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6923,7 +8590,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6940,7 +8607,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift + stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6950,6 +8617,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } @@ -6966,7 +8636,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6976,6 +8646,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } @@ -7012,7 +8685,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7021,6 +8694,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7043,7 +8719,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7053,6 +8729,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7072,7 +8751,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7082,43 +8761,13 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Initializers == nil { - m.Initializers = &Initializers{} - } - if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ClusterName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 17: if wireType != 2 { @@ -7134,7 +8783,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7143,6 +8792,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7160,6 +8812,9 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7187,7 +8842,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7215,7 +8870,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7225,6 +8880,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7244,7 +8902,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7254,6 +8912,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7273,7 +8934,7 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7283,6 +8944,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7292,7 +8956,220 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } - var stringLen uint64 + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BlockOwnerDeletion = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7302,26 +9179,30 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7331,34 +9212,26 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Controller = &b - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - b := bool(v != 0) - m.BlockOwnerDeletion = &b + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7368,6 +9241,9 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7395,7 +9271,7 @@ func (m *Patch) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7418,6 +9294,9 @@ func (m *Patch) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7445,7 +9324,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7473,7 +9352,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7483,6 +9362,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7502,7 +9384,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7523,7 +9405,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7533,6 +9415,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7547,6 +9432,9 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7574,7 +9462,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7602,7 +9490,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7612,6 +9500,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7632,7 +9523,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7642,6 +9533,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7657,6 +9551,9 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7684,7 +9581,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7712,7 +9609,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7722,6 +9619,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7736,6 +9636,9 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7763,7 +9666,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7791,7 +9694,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7801,6 +9704,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7820,7 +9726,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7830,6 +9736,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7844,6 +9753,9 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7871,7 +9783,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7899,7 +9811,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -7908,6 +9820,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7929,7 +9844,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7939,6 +9854,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7958,7 +9876,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7968,6 +9886,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7987,7 +9908,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -7997,6 +9918,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8016,7 +9940,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8025,6 +9949,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8049,7 +9976,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= (int32(b) & 0x7F) << shift + m.Code |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8063,6 +9990,9 @@ func (m *Status) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8090,7 +10020,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8118,7 +10048,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8128,6 +10058,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8147,7 +10080,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8157,6 +10090,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8176,7 +10112,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8186,6 +10122,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8200,6 +10139,9 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8227,7 +10169,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8255,7 +10197,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8265,6 +10207,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8284,7 +10229,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8294,6 +10239,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8313,7 +10261,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8323,6 +10271,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8342,7 +10293,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8351,6 +10302,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8373,7 +10327,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + m.RetryAfterSeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8392,7 +10346,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8402,6 +10356,9 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8416,6 +10373,94 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8443,7 +10488,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8471,7 +10516,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Seconds |= (int64(b) & 0x7F) << shift + m.Seconds |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -8490,7 +10535,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Nanos |= (int32(b) & 0x7F) << shift + m.Nanos |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -8504,6 +10549,9 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8531,7 +10579,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8559,7 +10607,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8569,6 +10617,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8588,7 +10639,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8598,6 +10649,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8612,6 +10666,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8639,7 +10696,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8667,7 +10724,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8677,6 +10734,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8696,7 +10756,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8706,6 +10766,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8720,6 +10783,9 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8747,7 +10813,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8775,7 +10841,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8785,6 +10851,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8799,6 +10868,9 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8826,7 +10898,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8854,7 +10926,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -8864,6 +10936,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8883,7 +10958,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -8892,6 +10967,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8908,6 +10986,9 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8923,6 +11004,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -8954,10 +11036,8 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -8974,229 +11054,34 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2674 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0x4d, 0x6c, 0x23, 0x57, - 0x39, 0x63, 0xc7, 0x8e, 0xfd, 0x39, 0xce, 0xcf, 0xeb, 0x16, 0x5c, 0x4b, 0xc4, 0xe9, 0x14, 0x55, - 0x29, 0x6c, 0x6d, 0x92, 0xd2, 0x6a, 0x59, 0x68, 0x21, 0x8e, 0x93, 0x6d, 0xe8, 0xa6, 0x89, 0x5e, - 0xba, 0x8b, 0x58, 0x56, 0x88, 0x89, 0xe7, 0xc5, 0x19, 0x62, 0xcf, 0x4c, 0xdf, 0x1b, 0x67, 0x37, - 0x70, 0xa0, 0x07, 0x10, 0x20, 0x41, 0xb5, 0x47, 0x4e, 0xa8, 0x2b, 0xb8, 0x70, 0xe5, 0xc4, 0x89, - 0x53, 0x25, 0xf6, 0x58, 0x89, 0x4b, 0x0f, 0xc8, 0xea, 0x06, 0x24, 0xb8, 0x71, 0xcf, 0x01, 0xa1, - 0xf7, 0x33, 0x33, 0x6f, 0xec, 0x78, 0x33, 0x66, 0x0b, 0xe2, 0x14, 0xcf, 0xf7, 0xff, 0xbe, 0xef, - 0x7b, 0xdf, 0xf7, 0xbd, 0x2f, 0xb0, 0x73, 0x7c, 0x8d, 0xd5, 0x1d, 0xaf, 0x71, 0xdc, 0x3f, 0x20, - 0xd4, 0x25, 0x01, 0x61, 0x8d, 0x13, 0xe2, 0xda, 0x1e, 0x6d, 0x28, 0x84, 0xe5, 0x3b, 0x3d, 0xab, - 0x7d, 0xe4, 0xb8, 0x84, 0x9e, 0x36, 0xfc, 0xe3, 0x0e, 0x07, 0xb0, 0x46, 0x8f, 0x04, 0x56, 0xe3, - 0x64, 0xb5, 0xd1, 0x21, 0x2e, 0xa1, 0x56, 0x40, 0xec, 0xba, 0x4f, 0xbd, 0xc0, 0x43, 0x9f, 0x97, - 0x5c, 0x75, 0x9d, 0xab, 0xee, 0x1f, 0x77, 0x38, 0x80, 0xd5, 0x39, 0x57, 0xfd, 0x64, 0xb5, 0xfa, - 0x72, 0xc7, 0x09, 0x8e, 0xfa, 0x07, 0xf5, 0xb6, 0xd7, 0x6b, 0x74, 0xbc, 0x8e, 0xd7, 0x10, 0xcc, - 0x07, 0xfd, 0x43, 0xf1, 0x25, 0x3e, 0xc4, 0x2f, 0x29, 0xb4, 0x3a, 0xd6, 0x14, 0xda, 0x77, 0x03, - 0xa7, 0x47, 0x86, 0xad, 0xa8, 0xbe, 0x76, 0x19, 0x03, 0x6b, 0x1f, 0x91, 0x9e, 0x35, 0xcc, 0x67, - 0xfe, 0x29, 0x0b, 0x85, 0xf5, 0xbd, 0xed, 0x1b, 0xd4, 0xeb, 0xfb, 0x68, 0x19, 0xa6, 0x5d, 0xab, - 0x47, 0x2a, 0xc6, 0xb2, 0xb1, 0x52, 0x6c, 0xce, 0x3e, 0x1a, 0xd4, 0xa6, 0xce, 0x06, 0xb5, 0xe9, - 0xb7, 0xad, 0x1e, 0xc1, 0x02, 0x83, 0xba, 0x50, 0x38, 0x21, 0x94, 0x39, 0x9e, 0xcb, 0x2a, 0x99, - 0xe5, 0xec, 0x4a, 0x69, 0xed, 0x8d, 0x7a, 0x9a, 0xf3, 0xd7, 0x85, 0x82, 0xdb, 0x92, 0x75, 0xcb, - 0xa3, 0x2d, 0x87, 0xb5, 0xbd, 0x13, 0x42, 0x4f, 0x9b, 0x0b, 0x4a, 0x4b, 0x41, 0x21, 0x19, 0x8e, - 0x34, 0xa0, 0x1f, 0x1b, 0xb0, 0xe0, 0x53, 0x72, 0x48, 0x28, 0x25, 0xb6, 0xc2, 0x57, 0xb2, 0xcb, - 0xc6, 0xa7, 0xa0, 0xb6, 0xa2, 0xd4, 0x2e, 0xec, 0x0d, 0xc9, 0xc7, 0x23, 0x1a, 0xd1, 0x6f, 0x0c, - 0xa8, 0x32, 0x42, 0x4f, 0x08, 0x5d, 0xb7, 0x6d, 0x4a, 0x18, 0x6b, 0x9e, 0x6e, 0x74, 0x1d, 0xe2, - 0x06, 0x1b, 0xdb, 0x2d, 0xcc, 0x2a, 0xd3, 0xc2, 0x0f, 0x5f, 0x4f, 0x67, 0xd0, 0xfe, 0x38, 0x39, - 0x4d, 0x53, 0x59, 0x54, 0x1d, 0x4b, 0xc2, 0xf0, 0x13, 0xcc, 0x30, 0x0f, 0x61, 0x36, 0x0c, 0xe4, - 0x4d, 0x87, 0x05, 0xe8, 0x36, 0xe4, 0x3b, 0xfc, 0x83, 0x55, 0x0c, 0x61, 0x60, 0x3d, 0x9d, 0x81, - 0xa1, 0x8c, 0xe6, 0x9c, 0xb2, 0x27, 0x2f, 0x3e, 0x19, 0x56, 0xd2, 0xcc, 0x9f, 0x4f, 0x43, 0x69, - 0x7d, 0x6f, 0x1b, 0x13, 0xe6, 0xf5, 0x69, 0x9b, 0xa4, 0x48, 0x9a, 0x35, 0x00, 0xfe, 0x97, 0xf9, - 0x56, 0x9b, 0xd8, 0x95, 0xcc, 0xb2, 0xb1, 0x52, 0x68, 0x22, 0x45, 0x07, 0x6f, 0x47, 0x18, 0xac, - 0x51, 0x71, 0xa9, 0xc7, 0x8e, 0x6b, 0x8b, 0x68, 0x6b, 0x52, 0xdf, 0x72, 0x5c, 0x1b, 0x0b, 0x0c, - 0xba, 0x09, 0xb9, 0x13, 0x42, 0x0f, 0xb8, 0xff, 0x79, 0x42, 0x7c, 0x31, 0xdd, 0xf1, 0x6e, 0x73, - 0x96, 0x66, 0xf1, 0x6c, 0x50, 0xcb, 0x89, 0x9f, 0x58, 0x0a, 0x41, 0x75, 0x00, 0x76, 0xe4, 0xd1, - 0x40, 0x98, 0x53, 0xc9, 0x2d, 0x67, 0x57, 0x8a, 0xcd, 0x39, 0x6e, 0xdf, 0x7e, 0x04, 0xc5, 0x1a, - 0x05, 0xba, 0x06, 0xb3, 0xcc, 0x71, 0x3b, 0xfd, 0xae, 0x45, 0x39, 0xa0, 0x92, 0x17, 0x76, 0x5e, - 0x51, 0x76, 0xce, 0xee, 0x6b, 0x38, 0x9c, 0xa0, 0xe4, 0x9a, 0xda, 0x56, 0x40, 0x3a, 0x1e, 0x75, - 0x08, 0xab, 0xcc, 0xc4, 0x9a, 0x36, 0x22, 0x28, 0xd6, 0x28, 0xd0, 0x0b, 0x90, 0x13, 0x9e, 0xaf, - 0x14, 0x84, 0x8a, 0xb2, 0x52, 0x91, 0x13, 0x61, 0xc1, 0x12, 0x87, 0x5e, 0x82, 0x19, 0x75, 0x6b, - 0x2a, 0x45, 0x41, 0x36, 0xaf, 0xc8, 0x66, 0xc2, 0xb4, 0x0e, 0xf1, 0xe8, 0x9b, 0x80, 0x58, 0xe0, - 0x51, 0xab, 0x43, 0x14, 0xea, 0x4d, 0x8b, 0x1d, 0x55, 0x40, 0x70, 0x55, 0x15, 0x17, 0xda, 0x1f, - 0xa1, 0xc0, 0x17, 0x70, 0x99, 0xbf, 0x37, 0x60, 0x5e, 0xcb, 0x05, 0x91, 0x77, 0xd7, 0x60, 0xb6, - 0xa3, 0xdd, 0x3a, 0x95, 0x17, 0x91, 0x67, 0xf4, 0x1b, 0x89, 0x13, 0x94, 0x88, 0x40, 0x91, 0x2a, - 0x49, 0x61, 0x75, 0x59, 0x4d, 0x9d, 0xb4, 0xa1, 0x0d, 0xb1, 0x26, 0x0d, 0xc8, 0x70, 0x2c, 0xd9, - 0xfc, 0xbb, 0x21, 0x12, 0x38, 0xac, 0x37, 0x68, 0x45, 0xab, 0x69, 0x86, 0x08, 0xc7, 0xec, 0x98, - 0x7a, 0x74, 0x49, 0x21, 0xc8, 0xfc, 0x5f, 0x14, 0x82, 0xeb, 0x85, 0x5f, 0x7d, 0x50, 0x9b, 0x7a, - 0xef, 0x2f, 0xcb, 0x53, 0x66, 0x0f, 0xca, 0x1b, 0x94, 0x58, 0x01, 0xd9, 0xf5, 0x03, 0x71, 0x00, - 0x13, 0xf2, 0x36, 0x3d, 0xc5, 0x7d, 0x57, 0x1d, 0x14, 0xf8, 0xfd, 0x6e, 0x09, 0x08, 0x56, 0x18, - 0x1e, 0xbf, 0x43, 0x87, 0x74, 0xed, 0x1d, 0xcb, 0xb5, 0x3a, 0x84, 0xaa, 0x1b, 0x18, 0x79, 0x75, - 0x4b, 0xc3, 0xe1, 0x04, 0xa5, 0xf9, 0xd3, 0x2c, 0x94, 0x5b, 0xa4, 0x4b, 0x62, 0x7d, 0x5b, 0x80, - 0x3a, 0xd4, 0x6a, 0x93, 0x3d, 0x42, 0x1d, 0xcf, 0xde, 0x27, 0x6d, 0xcf, 0xb5, 0x99, 0xc8, 0x88, - 0x6c, 0xf3, 0x33, 0x3c, 0xcf, 0x6e, 0x8c, 0x60, 0xf1, 0x05, 0x1c, 0xa8, 0x0b, 0x65, 0x9f, 0x8a, - 0xdf, 0x4e, 0xa0, 0x7a, 0x0f, 0xbf, 0xf3, 0xaf, 0xa4, 0x73, 0xf5, 0x9e, 0xce, 0xda, 0x5c, 0x3c, - 0x1b, 0xd4, 0xca, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x8f, 0xfa, 0x47, 0x96, 0xdb, - 0x22, 0x3e, 0x71, 0x6d, 0xe2, 0x06, 0x4c, 0x78, 0xa1, 0xd0, 0xbc, 0xc2, 0x3b, 0xc6, 0xee, 0x10, - 0x0e, 0x8f, 0x50, 0xa3, 0x3b, 0xb0, 0xe8, 0x53, 0xcf, 0xb7, 0x3a, 0x16, 0x97, 0xb8, 0xe7, 0x75, - 0x9d, 0xf6, 0xa9, 0xa8, 0x53, 0xc5, 0xe6, 0xd5, 0xb3, 0x41, 0x6d, 0x71, 0x6f, 0x18, 0x79, 0x3e, - 0xa8, 0x3d, 0x23, 0x5c, 0xc7, 0x21, 0x31, 0x12, 0x8f, 0x8a, 0xd1, 0x62, 0x98, 0x1b, 0x17, 0x43, - 0x73, 0x1b, 0x0a, 0xad, 0x3e, 0x15, 0x5c, 0xe8, 0x75, 0x28, 0xd8, 0xea, 0xb7, 0xf2, 0xfc, 0xf3, - 0x61, 0xcb, 0x0d, 0x69, 0xce, 0x07, 0xb5, 0x32, 0x1f, 0x12, 0xea, 0x21, 0x00, 0x47, 0x2c, 0xe6, - 0x5d, 0x28, 0x6f, 0xde, 0xf7, 0x3d, 0x1a, 0x84, 0x31, 0x7d, 0x11, 0xf2, 0x44, 0x00, 0x84, 0xb4, - 0x42, 0xdc, 0x27, 0x24, 0x19, 0x56, 0x58, 0x5e, 0xb7, 0xc8, 0x7d, 0xab, 0x1d, 0xa8, 0x82, 0x1f, - 0xd5, 0xad, 0x4d, 0x0e, 0xc4, 0x12, 0x67, 0x7e, 0x68, 0x40, 0x5e, 0x64, 0x14, 0x43, 0xef, 0x40, - 0xb6, 0x67, 0xf9, 0xaa, 0x59, 0xbd, 0x9a, 0x2e, 0xb2, 0x92, 0xb5, 0xbe, 0x63, 0xf9, 0x9b, 0x6e, - 0x40, 0x4f, 0x9b, 0x25, 0xa5, 0x24, 0xbb, 0x63, 0xf9, 0x98, 0x8b, 0xab, 0xda, 0x50, 0x08, 0xb1, - 0x68, 0x01, 0xb2, 0xc7, 0xe4, 0x54, 0x16, 0x24, 0xcc, 0x7f, 0xa2, 0x26, 0xe4, 0x4e, 0xac, 0x6e, - 0x9f, 0xa8, 0x7c, 0xba, 0x3a, 0x89, 0x56, 0x2c, 0x59, 0xaf, 0x67, 0xae, 0x19, 0xe6, 0x2e, 0xc0, - 0x0d, 0x12, 0x79, 0x68, 0x1d, 0xe6, 0xc3, 0x6a, 0x93, 0x2c, 0x82, 0x9f, 0x55, 0xe6, 0xcd, 0xe3, - 0x24, 0x1a, 0x0f, 0xd3, 0x9b, 0x77, 0xa1, 0x28, 0x0a, 0x25, 0xef, 0x77, 0x71, 0x07, 0x30, 0x9e, - 0xd0, 0x01, 0xc2, 0x86, 0x99, 0x19, 0xd7, 0x30, 0xb5, 0xba, 0xd0, 0x85, 0xb2, 0xe4, 0x0d, 0x7b, - 0x78, 0x2a, 0x0d, 0x57, 0xa1, 0x10, 0x9a, 0xa9, 0xb4, 0x44, 0xb3, 0x5b, 0x28, 0x08, 0x47, 0x14, - 0x9a, 0xb6, 0x23, 0x48, 0x14, 0xfd, 0x74, 0xca, 0xb4, 0x86, 0x96, 0x79, 0x72, 0x43, 0xd3, 0x34, - 0xfd, 0x08, 0x2a, 0xe3, 0x06, 0xbe, 0xa7, 0x68, 0x4b, 0xe9, 0x4d, 0x31, 0xdf, 0x37, 0x60, 0x41, - 0x97, 0x94, 0x3e, 0x7c, 0xe9, 0x95, 0x5c, 0x3e, 0x1a, 0x69, 0x1e, 0xf9, 0xb5, 0x01, 0x57, 0x12, - 0x47, 0x9b, 0x28, 0xe2, 0x13, 0x18, 0xa5, 0x27, 0x47, 0x76, 0x82, 0xe4, 0x68, 0x40, 0x69, 0xdb, - 0x75, 0x02, 0xc7, 0xea, 0x3a, 0x3f, 0x20, 0xf4, 0xf2, 0x61, 0xd2, 0xfc, 0xa3, 0x01, 0xb3, 0x1a, - 0x07, 0x43, 0x77, 0x61, 0x86, 0xd7, 0x5d, 0xc7, 0xed, 0xa8, 0xda, 0x91, 0x72, 0x66, 0xd0, 0x84, - 0xc4, 0xe7, 0xda, 0x93, 0x92, 0x70, 0x28, 0x12, 0xed, 0x41, 0x9e, 0x12, 0xd6, 0xef, 0x06, 0x93, - 0x95, 0x88, 0xfd, 0xc0, 0x0a, 0xfa, 0x4c, 0xd6, 0x66, 0x2c, 0xf8, 0xb1, 0x92, 0x63, 0xfe, 0x39, - 0x03, 0xe5, 0x9b, 0xd6, 0x01, 0xe9, 0xee, 0x93, 0x2e, 0x69, 0x07, 0x1e, 0x45, 0x3f, 0x84, 0x52, - 0xcf, 0x0a, 0xda, 0x47, 0x02, 0x1a, 0x8e, 0xeb, 0xad, 0x74, 0x8a, 0x12, 0x92, 0xea, 0x3b, 0xb1, - 0x18, 0x59, 0x10, 0x9f, 0x51, 0x07, 0x2b, 0x69, 0x18, 0xac, 0x6b, 0x13, 0x6f, 0x2c, 0xf1, 0xbd, - 0x79, 0xdf, 0xe7, 0xb3, 0xc4, 0xe4, 0x4f, 0xbb, 0x84, 0x09, 0x98, 0xbc, 0xdb, 0x77, 0x28, 0xe9, - 0x11, 0x37, 0x88, 0xdf, 0x58, 0x3b, 0x43, 0xf2, 0xf1, 0x88, 0xc6, 0xea, 0x1b, 0xb0, 0x30, 0x6c, - 0xfc, 0x05, 0xf5, 0xfa, 0x8a, 0x5e, 0xaf, 0x8b, 0x7a, 0x05, 0xfe, 0xad, 0x01, 0x95, 0x71, 0x86, - 0xa0, 0xcf, 0x69, 0x82, 0xe2, 0x1e, 0xf1, 0x16, 0x39, 0x95, 0x52, 0x37, 0xa1, 0xe0, 0xf9, 0xfc, - 0x55, 0xec, 0x51, 0x95, 0xe7, 0x2f, 0x85, 0xb9, 0xbb, 0xab, 0xe0, 0xe7, 0x83, 0xda, 0xb3, 0x09, - 0xf1, 0x21, 0x02, 0x47, 0xac, 0xbc, 0x31, 0x0b, 0x7b, 0xf8, 0xb0, 0x10, 0x35, 0xe6, 0xdb, 0x02, - 0x82, 0x15, 0xc6, 0xfc, 0x83, 0x01, 0xd3, 0x62, 0x4a, 0xbe, 0x0b, 0x05, 0xee, 0x3f, 0xdb, 0x0a, - 0x2c, 0x61, 0x57, 0xea, 0xf7, 0x19, 0xe7, 0xde, 0x21, 0x81, 0x15, 0xdf, 0xaf, 0x10, 0x82, 0x23, - 0x89, 0x08, 0x43, 0xce, 0x09, 0x48, 0x2f, 0x0c, 0xe4, 0xcb, 0x63, 0x45, 0xab, 0xed, 0x40, 0x1d, - 0x5b, 0xf7, 0x36, 0xef, 0x07, 0xc4, 0xe5, 0xc1, 0x88, 0x8b, 0xc1, 0x36, 0x97, 0x81, 0xa5, 0x28, - 0xf3, 0x77, 0x06, 0x44, 0xaa, 0xf8, 0x75, 0x67, 0xa4, 0x7b, 0x78, 0xd3, 0x71, 0x8f, 0x95, 0x5b, - 0x23, 0x73, 0xf6, 0x15, 0x1c, 0x47, 0x14, 0x17, 0x35, 0xc4, 0xcc, 0x64, 0x0d, 0x91, 0x2b, 0x6c, - 0x7b, 0x6e, 0xe0, 0xb8, 0xfd, 0x91, 0xfa, 0xb2, 0xa1, 0xe0, 0x38, 0xa2, 0x30, 0xff, 0x95, 0x81, - 0x12, 0xb7, 0x35, 0xec, 0xc8, 0x5f, 0x85, 0x72, 0x57, 0x8f, 0x9e, 0xb2, 0xf9, 0x59, 0x25, 0x22, - 0x79, 0x1f, 0x71, 0x92, 0x96, 0x33, 0x8b, 0x31, 0x37, 0x62, 0xce, 0x24, 0x99, 0xb7, 0x74, 0x24, - 0x4e, 0xd2, 0xf2, 0x3a, 0x7b, 0x8f, 0xe7, 0xb5, 0x1a, 0x20, 0x23, 0xd7, 0x7e, 0x8b, 0x03, 0xb1, - 0xc4, 0x5d, 0xe4, 0x9f, 0xe9, 0x09, 0xfd, 0x73, 0x1d, 0xe6, 0x78, 0x20, 0xbd, 0x7e, 0x10, 0x4e, - 0xd9, 0x39, 0x31, 0xeb, 0xa1, 0xb3, 0x41, 0x6d, 0xee, 0x9d, 0x04, 0x06, 0x0f, 0x51, 0x72, 0x1b, - 0xbb, 0x4e, 0xcf, 0x09, 0x2a, 0x33, 0x82, 0x25, 0xb2, 0xf1, 0x26, 0x07, 0x62, 0x89, 0x4b, 0x04, - 0xa0, 0x70, 0x69, 0x00, 0xfe, 0x91, 0x01, 0x24, 0x9f, 0x05, 0xb6, 0x9c, 0x96, 0xe4, 0x8d, 0x7e, - 0x09, 0x66, 0x7a, 0xea, 0x59, 0x61, 0x24, 0x1b, 0x4a, 0xf8, 0xa2, 0x08, 0xf1, 0x68, 0x07, 0x8a, - 0xf2, 0x66, 0xc5, 0xd9, 0xd2, 0x50, 0xc4, 0xc5, 0xdd, 0x10, 0x71, 0x3e, 0xa8, 0x55, 0x13, 0x6a, - 0x22, 0xcc, 0x3b, 0xa7, 0x3e, 0xc1, 0xb1, 0x04, 0xb4, 0x06, 0x60, 0xf9, 0x8e, 0xbe, 0x43, 0x2a, - 0xc6, 0x3b, 0x88, 0xf8, 0x35, 0x88, 0x35, 0x2a, 0xf4, 0x26, 0x4c, 0x73, 0x4f, 0xa9, 0x05, 0xc3, - 0x17, 0xd2, 0xdd, 0x4f, 0xee, 0xeb, 0x66, 0x81, 0x37, 0x2d, 0xfe, 0x0b, 0x0b, 0x09, 0xe8, 0x0e, - 0xe4, 0x45, 0x5a, 0xc8, 0xa8, 0x4c, 0x38, 0x68, 0x8a, 0x57, 0x87, 0x9a, 0x92, 0xcf, 0xa3, 0x5f, - 0x58, 0x49, 0x34, 0xdf, 0x85, 0xe2, 0x8e, 0xd3, 0xa6, 0x1e, 0x57, 0xc7, 0x1d, 0xcc, 0x12, 0xaf, - 0xac, 0xc8, 0xc1, 0x61, 0xf0, 0x43, 0x3c, 0x8f, 0xba, 0x6b, 0xb9, 0x9e, 0x7c, 0x4b, 0xe5, 0xe2, - 0xa8, 0xbf, 0xcd, 0x81, 0x58, 0xe2, 0xae, 0x5f, 0xe1, 0x8d, 0xfa, 0x67, 0x0f, 0x6b, 0x53, 0x0f, - 0x1e, 0xd6, 0xa6, 0x3e, 0x78, 0xa8, 0x9a, 0xf6, 0xdf, 0x4a, 0x00, 0xbb, 0x07, 0xdf, 0x27, 0x6d, - 0x59, 0x0c, 0x2e, 0xdf, 0x00, 0xf1, 0xe1, 0x4b, 0x2d, 0x1e, 0xc5, 0xb6, 0x24, 0x33, 0x34, 0x7c, - 0x69, 0x38, 0x9c, 0xa0, 0x44, 0x0d, 0x28, 0x46, 0x5b, 0x21, 0x15, 0xb6, 0xc5, 0x30, 0x0d, 0xa2, - 0xd5, 0x11, 0x8e, 0x69, 0x12, 0x95, 0x69, 0xfa, 0xd2, 0xca, 0xd4, 0x84, 0x6c, 0xdf, 0xb1, 0x45, - 0x54, 0x8a, 0xcd, 0x2f, 0x85, 0x9d, 0xe1, 0xd6, 0x76, 0xeb, 0x7c, 0x50, 0x7b, 0x7e, 0xdc, 0x4a, - 0x35, 0x38, 0xf5, 0x09, 0xab, 0xdf, 0xda, 0x6e, 0x61, 0xce, 0x7c, 0xd1, 0xed, 0xcd, 0x4f, 0x78, - 0x7b, 0xd7, 0x00, 0xd4, 0xa9, 0x39, 0xb7, 0xbc, 0x86, 0x51, 0x76, 0xde, 0x88, 0x30, 0x58, 0xa3, - 0x42, 0x0c, 0x16, 0xdb, 0xfc, 0x71, 0xcf, 0x93, 0xdd, 0xe9, 0x11, 0x16, 0x58, 0x3d, 0xb9, 0x23, - 0x9a, 0x2c, 0x55, 0x9f, 0x53, 0x6a, 0x16, 0x37, 0x86, 0x85, 0xe1, 0x51, 0xf9, 0xc8, 0x83, 0x45, - 0x5b, 0x3d, 0x53, 0x63, 0xa5, 0xc5, 0x89, 0x95, 0x3e, 0xcb, 0x15, 0xb6, 0x86, 0x05, 0xe1, 0x51, - 0xd9, 0xe8, 0xbb, 0x50, 0x0d, 0x81, 0xa3, 0xbb, 0x02, 0xb1, 0xb5, 0xca, 0x36, 0x97, 0xce, 0x06, - 0xb5, 0x6a, 0x6b, 0x2c, 0x15, 0x7e, 0x82, 0x04, 0x64, 0x43, 0xbe, 0x2b, 0xc7, 0xae, 0x92, 0x68, - 0x95, 0x5f, 0x4b, 0x77, 0x8a, 0x38, 0xfb, 0xeb, 0xfa, 0xb8, 0x15, 0xbd, 0x85, 0xd5, 0xa4, 0xa5, - 0x64, 0xa3, 0xfb, 0x50, 0xb2, 0x5c, 0xd7, 0x0b, 0x2c, 0xb9, 0xbd, 0x98, 0x15, 0xaa, 0xd6, 0x27, - 0x56, 0xb5, 0x1e, 0xcb, 0x18, 0x1a, 0xef, 0x34, 0x0c, 0xd6, 0x55, 0xa1, 0x7b, 0x30, 0xef, 0xdd, - 0x73, 0x09, 0xc5, 0xe4, 0x90, 0x50, 0xe2, 0xb6, 0x09, 0xab, 0x94, 0x85, 0xf6, 0x2f, 0xa7, 0xd4, - 0x9e, 0x60, 0x8e, 0x53, 0x3a, 0x09, 0x67, 0x78, 0x58, 0x0b, 0xaa, 0x03, 0x1c, 0x3a, 0xae, 0x1a, - 0xd2, 0x2b, 0x73, 0xf1, 0x9a, 0x73, 0x2b, 0x82, 0x62, 0x8d, 0x02, 0xbd, 0x0a, 0xa5, 0x76, 0xb7, - 0xcf, 0x02, 0x22, 0xf7, 0xa9, 0xf3, 0xe2, 0x06, 0x45, 0xe7, 0xdb, 0x88, 0x51, 0x58, 0xa7, 0x43, - 0x47, 0x30, 0xeb, 0x68, 0xaf, 0x81, 0xca, 0x82, 0xc8, 0xc5, 0xb5, 0x89, 0x9f, 0x00, 0xac, 0xb9, - 0xc0, 0x2b, 0x91, 0x0e, 0xc1, 0x09, 0xc9, 0xa8, 0x0f, 0xe5, 0x9e, 0xde, 0x6a, 0x2a, 0x8b, 0xc2, - 0x8f, 0xd7, 0xd2, 0xa9, 0x1a, 0x6d, 0x86, 0xf1, 0x00, 0x91, 0xc0, 0xe1, 0xa4, 0x96, 0xea, 0x57, - 0xa0, 0xf4, 0x1f, 0xce, 0xc4, 0x7c, 0xa6, 0x1e, 0xce, 0x98, 0x89, 0x66, 0xea, 0x0f, 0x33, 0x30, - 0x97, 0x8c, 0x73, 0xf4, 0xf6, 0x34, 0xc6, 0xae, 0xe5, 0xc3, 0x66, 0x90, 0x1d, 0xdb, 0x0c, 0x54, - 0xcd, 0x9d, 0x7e, 0x9a, 0x9a, 0x9b, 0x6c, 0xe7, 0xb9, 0x54, 0xed, 0xbc, 0x0e, 0xc0, 0xe7, 0x13, - 0xea, 0x75, 0xbb, 0x84, 0x8a, 0x12, 0x5d, 0x50, 0x8b, 0xf7, 0x08, 0x8a, 0x35, 0x0a, 0xb4, 0x05, - 0xe8, 0xa0, 0xeb, 0xb5, 0x8f, 0x85, 0x0b, 0xc2, 0xf2, 0x22, 0x8a, 0x73, 0x41, 0x2e, 0x2f, 0x9b, - 0x23, 0x58, 0x7c, 0x01, 0x87, 0x39, 0x03, 0xb9, 0x3d, 0x3e, 0xe6, 0x99, 0xbf, 0x34, 0x60, 0x56, - 0xfc, 0x9a, 0x64, 0x1d, 0x5b, 0x83, 0xdc, 0xa1, 0x17, 0xae, 0x5c, 0x0a, 0xf2, 0x3f, 0x17, 0x5b, - 0x1c, 0x80, 0x25, 0xfc, 0x29, 0xf6, 0xb5, 0xef, 0x1b, 0x90, 0x5c, 0x84, 0xa2, 0x37, 0x64, 0x68, - 0x8c, 0x68, 0x53, 0x39, 0x61, 0x58, 0x5e, 0x1f, 0x37, 0xe8, 0x3f, 0x93, 0x6a, 0xeb, 0x75, 0x15, - 0x8a, 0xd8, 0xf3, 0x82, 0x3d, 0x2b, 0x38, 0x62, 0xfc, 0xe0, 0x3e, 0xff, 0xa1, 0x7c, 0x23, 0x0e, - 0x2e, 0x30, 0x58, 0xc2, 0xcd, 0x5f, 0x18, 0xf0, 0xdc, 0xd8, 0x15, 0x39, 0xcf, 0x90, 0x76, 0xf4, - 0xa5, 0x4e, 0x14, 0x65, 0x48, 0x4c, 0x87, 0x35, 0x2a, 0x3e, 0xe9, 0x27, 0xf6, 0xea, 0xc3, 0x93, - 0x7e, 0x42, 0x1b, 0x4e, 0xd2, 0x9a, 0xff, 0xcc, 0x40, 0x5e, 0x3e, 0xfb, 0xff, 0xcb, 0x8f, 0xbb, - 0x17, 0x21, 0xcf, 0x84, 0x1e, 0x65, 0x5e, 0xd4, 0x74, 0xa4, 0x76, 0xac, 0xb0, 0x62, 0xd8, 0x26, - 0x8c, 0x59, 0x9d, 0xf0, 0x32, 0xc6, 0xc3, 0xb6, 0x04, 0xe3, 0x10, 0x8f, 0x5e, 0x83, 0x3c, 0x25, - 0x16, 0x8b, 0xde, 0x1d, 0x4b, 0xa1, 0x48, 0x2c, 0xa0, 0xe7, 0x83, 0xda, 0xac, 0x12, 0x2e, 0xbe, - 0xb1, 0xa2, 0x46, 0x77, 0x60, 0xc6, 0x26, 0x81, 0xe5, 0x74, 0xc3, 0xc1, 0xf6, 0x95, 0x49, 0xd6, - 0x23, 0x2d, 0xc9, 0xda, 0x2c, 0x71, 0x9b, 0xd4, 0x07, 0x0e, 0x05, 0xf2, 0x42, 0xd2, 0xf6, 0x6c, - 0xf9, 0x9f, 0xb5, 0x5c, 0x5c, 0x48, 0x36, 0x3c, 0x9b, 0x60, 0x81, 0x31, 0x1f, 0x18, 0x50, 0x92, - 0x92, 0x36, 0xac, 0x3e, 0x23, 0x68, 0x35, 0x3a, 0x85, 0x0c, 0x77, 0x38, 0xda, 0x4c, 0xf3, 0xc7, - 0xc0, 0xf9, 0xa0, 0x56, 0x14, 0x64, 0xe2, 0x65, 0x10, 0x1e, 0x40, 0xf3, 0x51, 0xe6, 0x12, 0x1f, - 0xbd, 0x00, 0x39, 0x71, 0x7b, 0x94, 0x33, 0xa3, 0x79, 0x59, 0x5c, 0x30, 0x2c, 0x71, 0xe6, 0x27, - 0x19, 0x28, 0x27, 0x0e, 0x97, 0x62, 0x38, 0x8e, 0x56, 0x71, 0x99, 0x14, 0xeb, 0xdd, 0xf1, 0xff, - 0x0f, 0xfd, 0x36, 0xe4, 0xdb, 0xfc, 0x7c, 0xe1, 0x3f, 0xa4, 0x57, 0x27, 0x09, 0x85, 0xf0, 0x4c, - 0x9c, 0x49, 0xe2, 0x93, 0x61, 0x25, 0x10, 0xdd, 0x80, 0x45, 0x4a, 0x02, 0x7a, 0xba, 0x7e, 0x18, - 0x10, 0xaa, 0xbf, 0x2f, 0x73, 0xf1, 0xf8, 0x88, 0x87, 0x09, 0xf0, 0x28, 0x4f, 0x58, 0xfa, 0xf3, - 0x4f, 0x51, 0xfa, 0xcd, 0x2e, 0x4c, 0xff, 0x0f, 0x9f, 0x3a, 0xdf, 0x81, 0x62, 0x3c, 0x8c, 0x7e, - 0xca, 0x2a, 0xcd, 0xef, 0x41, 0x81, 0x67, 0x63, 0xf8, 0x88, 0xba, 0xa4, 0xb3, 0x26, 0x7b, 0x5e, - 0x26, 0x4d, 0xcf, 0x33, 0x7b, 0x50, 0xbe, 0xe5, 0xdb, 0x4f, 0xf9, 0x1f, 0xc0, 0x4c, 0xea, 0x8e, - 0xb2, 0x06, 0xf2, 0xbf, 0xea, 0xbc, 0x78, 0xcb, 0x05, 0x94, 0x56, 0xbc, 0xf5, 0x6d, 0x92, 0xb6, - 0x01, 0xfe, 0x89, 0x01, 0x20, 0xb6, 0x21, 0x9b, 0x27, 0xc4, 0x0d, 0xb8, 0x1f, 0x78, 0xc0, 0x87, - 0xfd, 0x20, 0x6e, 0xad, 0xc0, 0xa0, 0x5b, 0x90, 0xf7, 0xc4, 0x4c, 0xac, 0x56, 0xb2, 0x13, 0x6e, - 0xb7, 0xa2, 0x24, 0x97, 0x83, 0x35, 0x56, 0xc2, 0x9a, 0x2b, 0x8f, 0x1e, 0x2f, 0x4d, 0x7d, 0xf4, - 0x78, 0x69, 0xea, 0xe3, 0xc7, 0x4b, 0x53, 0xef, 0x9d, 0x2d, 0x19, 0x8f, 0xce, 0x96, 0x8c, 0x8f, - 0xce, 0x96, 0x8c, 0x8f, 0xcf, 0x96, 0x8c, 0x4f, 0xce, 0x96, 0x8c, 0x07, 0x7f, 0x5d, 0x9a, 0xba, - 0x93, 0x39, 0x59, 0xfd, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x66, 0x55, 0x2c, 0x41, 0x24, - 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index 36bda1fe5918..ba1194dcc56c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -163,6 +163,7 @@ message DeleteOptions { // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be // returned. + // +k8s:conversion-gen=false // +optional optional Preconditions preconditions = 2; @@ -212,21 +213,20 @@ message ExportOptions { optional bool exact = 2; } -// Fields stores a set of fields in a data structure like a Trie. -// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff -message Fields { - // Map stores a set of fields in a data structure like a Trie. - // - // Each key is either a '.' representing the field itself, and will always map to an empty set, - // or a string representing a sub-field or item. The string will follow one of these four formats: - // 'f:', where is the name of a field in a struct, or key in a map - // 'v:', where is the exact json formatted value of a list item - // 'i:', where is position of a item in a list - // 'k:', where is a map of a list item's key fields to their unique values - // If a key maps to an empty Fields value, the field that key represents is part of the set. - // - // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal - map map = 1; +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +message FieldsV1 { + // Raw is the underlying serialization of this object. + optional bytes Raw = 1; } // GetOptions is the standard query options to the standard REST get call. @@ -302,27 +302,6 @@ message GroupVersionResource { optional string resource = 3; } -// Initializer is information about an initializer that has not yet completed. -message Initializer { - // name of the process that is responsible for initializing this object. - optional string name = 1; -} - -// Initializers tracks the progress of initialization. -message Initializers { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - repeated Initializer pending = 1; - - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - optional Status result = 2; -} - // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. @@ -361,7 +340,7 @@ message LabelSelectorRequirement { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; @@ -375,6 +354,10 @@ message ListMeta { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 1; @@ -383,7 +366,7 @@ message ListMeta { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 2; @@ -395,6 +378,18 @@ message ListMeta { // identical to the value in the first response, unless you have received this token from an error // message. optional string continue = 3; + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + optional int64 remainingItemCount = 4; } // ListOptions is the query options to a standard REST list call. @@ -414,6 +409,17 @@ message ListOptions { // +optional optional bool watch = 3; + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // +optional + optional bool allowWatchBookmarks = 9; + // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -483,9 +489,13 @@ message ManagedFieldsEntry { // +optional optional Time time = 4; - // Fields identifies a set of fields. + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + optional string fieldsType = 6; + + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional - optional Fields fields = 5; + optional FieldsV1 fieldsV1 = 7; } // MicroTime is version of Time with microsecond level precision. @@ -532,7 +542,7 @@ message ObjectMeta { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional optional string generateName = 2; @@ -550,6 +560,10 @@ message ObjectMeta { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional optional string selfLink = 4; @@ -572,7 +586,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -588,7 +602,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time creationTimestamp = 8; @@ -609,7 +623,7 @@ message ObjectMeta { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional optional Time deletionTimestamp = 9; @@ -643,23 +657,19 @@ message ObjectMeta { // +patchStrategy=merge repeated OwnerReference ownerReferences = 13; - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - // - // DEPRECATED - initializers are an alpha field and will be removed in v1.15. - optional Initializers initializers = 16; - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. + // Finalizers may be processed and removed in any order. Order is NOT enforced + // because it introduces significant risk of stuck finalizers. + // finalizers is a shared field, any actor with permission can reorder it. + // If the finalizer list is processed in order, then this can lead to a situation + // in which the component responsible for the first finalizer in the list is + // waiting for a signal (field value, external system, or other) produced by a + // component responsible for a finalizer later in the list, resulting in a deadlock. + // Without enforced ordering finalizers are free to order amongst themselves and + // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge repeated string finalizers = 14; @@ -678,8 +688,6 @@ message ObjectMeta { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional repeated ManagedFieldsEntry managedFields = 17; } @@ -692,7 +700,7 @@ message OwnerReference { optional string apiVersion = 5; // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; // Name of the referent. @@ -717,6 +725,28 @@ message OwnerReference { optional bool blockOwnerDeletion = 7; } +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadata { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional ObjectMeta metadata = 1; +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // items contains each of the included items. + repeated PartialObjectMetadata items = 2; +} + // Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. message Patch { } @@ -780,13 +810,13 @@ message ServerAddressByClientCIDR { // Status is a return value for calls that don't return other objects. message Status { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional optional string status = 2; @@ -857,7 +887,7 @@ message StatusDetails { // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 3; @@ -879,6 +909,16 @@ message StatusDetails { optional int32 retryAfterSeconds = 5; } +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message TableOptions { + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + optional string includeObject = 1; +} + // Time is a wrapper around time.Time which supports correct // marshaling to YAML and JSON. Wrappers are provided for many // of the factory methods that the time package offers. @@ -925,14 +965,14 @@ message TypeMeta { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional optional string kind = 1; // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional optional string apiVersion = 2; } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index b4dc78b3eaa6..ad989ad75cac 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -17,7 +17,9 @@ limitations under the License. package v1 import ( + "bytes" "encoding/json" + "errors" "fmt" "k8s.io/apimachinery/pkg/fields" @@ -250,18 +252,31 @@ func ResetObjectMetaForStatus(meta, existingMeta Object) { meta.SetAnnotations(existingMeta.GetAnnotations()) meta.SetFinalizers(existingMeta.GetFinalizers()) meta.SetOwnerReferences(existingMeta.GetOwnerReferences()) - meta.SetManagedFields(existingMeta.GetManagedFields()) + // managedFields must be preserved since it's been modified to + // track changed fields in the status update. + //meta.SetManagedFields(existingMeta.GetManagedFields()) } // MarshalJSON implements json.Marshaler -func (f Fields) MarshalJSON() ([]byte, error) { - return json.Marshal(&f.Map) +// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (f FieldsV1) MarshalJSON() ([]byte, error) { + if f.Raw == nil { + return []byte("null"), nil + } + return f.Raw, nil } // UnmarshalJSON implements json.Unmarshaler -func (f *Fields) UnmarshalJSON(b []byte) error { - return json.Unmarshal(b, &f.Map) +func (f *FieldsV1) UnmarshalJSON(b []byte) error { + if f == nil { + return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + } + if !bytes.Equal(b, []byte("null")) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil } -var _ json.Marshaler = Fields{} -var _ json.Unmarshaler = &Fields{} +var _ json.Marshaler = FieldsV1{} +var _ json.Unmarshaler = &FieldsV1{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 05f07adf1b81..912cf2036b0a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -55,8 +55,6 @@ type Object interface { SetLabels(labels map[string]string) GetAnnotations() map[string]string SetAnnotations(annotations map[string]string) - GetInitializers() *Initializers - SetInitializers(initializers *Initializers) GetFinalizers() []string SetFinalizers(finalizers []string) GetOwnerReferences() []OwnerReference @@ -94,6 +92,8 @@ type ListInterface interface { SetSelfLink(selfLink string) GetContinue() string SetContinue(c string) + GetRemainingItemCount() *int64 + SetRemainingItemCount(c *int64) } // Type exposes the type and APIVersion of versioned or internal API objects. @@ -105,12 +105,16 @@ type Type interface { SetKind(kind string) } +var _ ListInterface = &ListMeta{} + func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } func (meta *ListMeta) GetContinue() string { return meta.Continue } func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } +func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount } +func (meta *ListMeta) SetRemainingItemCount(c *int64) { meta.RemainingItemCount = c } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } @@ -160,8 +164,6 @@ func (meta *ObjectMeta) GetLabels() map[string]string { return m func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetInitializers() *Initializers { return meta.Initializers } -func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index 6f6c5111bc82..cdd9a6a7a0cc 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -41,11 +41,6 @@ func (t *MicroTime) DeepCopyInto(out *MicroTime) { *out = *t } -// String returns the representation of the time. -func (t MicroTime) String() string { - return t.Time.String() -} - // NewMicroTime returns a wrapped instance of the provided time func NewMicroTime(time time.Time) MicroTime { return MicroTime{time} @@ -72,22 +67,40 @@ func (t *MicroTime) IsZero() bool { // Before reports whether the time instant t is before u. func (t *MicroTime) Before(u *MicroTime) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // Equal reports whether the time instant t is equal to u. func (t *MicroTime) Equal(u *MicroTime) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // BeforeTime reports whether the time instant t is before second-lever precision u. func (t *MicroTime) BeforeTime(u *Time) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // EqualTime reports whether the time instant t is equal to second-lever precision u. func (t *MicroTime) EqualTime(u *Time) bool { - return t.Time.Equal(u.Time) + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false } // UnixMicro returns the local time corresponding to the given Unix time diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go index 14841be512ae..6dd6d8999f74 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -70,3 +70,11 @@ func (m *MicroTime) MarshalTo(data []byte) (int, error) { } return m.ProtoMicroTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf marshalling interface. +func (m *MicroTime) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 76d042a96613..c1a077178bfb 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -25,6 +25,13 @@ import ( // GroupName is the group name for this API. const GroupName = "meta.k8s.io" +var ( + // localSchemeBuilder is used to make compiler happy for autogenerated + // conversions. However, it's not used. + schemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &schemeBuilder +) + // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} @@ -40,6 +47,22 @@ func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } +// scheme is the registry for the common types that adhere to the meta v1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +var optionsTypes = []runtime.Object{ + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + &CreateOptions{}, + &UpdateOptions{}, + &PatchOptions{}, +} + // AddToGroupVersion registers common meta types into schemas. func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{}) @@ -48,21 +71,7 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &InternalEvent{}, ) // Supports legacy code paths, most callers should use metav1.ParameterCodec for now - scheme.AddKnownTypes(groupVersion, - &ListOptions{}, - &ExportOptions{}, - &GetOptions{}, - &DeleteOptions{}, - &CreateOptions{}, - &UpdateOptions{}, - &PatchOptions{}, - ) - utilruntime.Must(scheme.AddConversionFuncs( - Convert_v1_WatchEvent_To_watch_Event, - Convert_v1_InternalEvent_To_v1_WatchEvent, - Convert_watch_Event_To_v1_WatchEvent, - Convert_v1_WatchEvent_To_v1_InternalEvent, - )) + scheme.AddKnownTypes(groupVersion, optionsTypes...) // Register Unversioned types under their own special group scheme.AddUnversionedTypes(Unversioned, &Status{}, @@ -73,26 +82,26 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - utilruntime.Must(AddConversionFuncs(scheme)) + utilruntime.Must(RegisterConversions(scheme)) utilruntime.Must(RegisterDefaults(scheme)) } -// scheme is the registry for the common types that adhere to the meta v1 API spec. -var scheme = runtime.NewScheme() +// AddMetaToScheme registers base meta types into schemas. +func AddMetaToScheme(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) -// ParameterCodec knows about query parameters used with the meta v1 API spec. -var ParameterCodec = runtime.NewParameterCodec(scheme) + return nil +} func init() { - scheme.AddUnversionedTypes(SchemeGroupVersion, - &ListOptions{}, - &ExportOptions{}, - &GetOptions{}, - &DeleteOptions{}, - &CreateOptions{}, - &UpdateOptions{}, - &PatchOptions{}, - ) + scheme.AddUnversionedTypes(SchemeGroupVersion, optionsTypes...) + + utilruntime.Must(AddMetaToScheme(scheme)) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. utilruntime.Must(RegisterDefaults(scheme)) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 79904fd79435..4a1d89cfce85 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -41,11 +41,6 @@ func (t *Time) DeepCopyInto(out *Time) { *out = *t } -// String returns the representation of the time. -func (t Time) String() string { - return t.Time.String() -} - // NewTime returns a wrapped instance of the provided time func NewTime(time time.Time) Time { return Time{time} @@ -72,7 +67,10 @@ func (t *Time) IsZero() bool { // Before reports whether the time instant t is before u. func (t *Time) Before(u *Time) bool { - return t.Time.Before(u.Time) + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false } // Equal reports whether the time instant t is equal to u. @@ -155,6 +153,16 @@ func (t Time) MarshalJSON() ([]byte, error) { return buf, nil } +// ToUnstructured implements the value.UnstructuredConverter interface. +func (t Time) ToUnstructured() interface{} { + if t.IsZero() { + return nil + } + buf := make([]byte, 0, len(time.RFC3339)) + buf = t.UTC().AppendFormat(buf, time.RFC3339) + return string(buf) +} + // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index ed72186b49fe..eac8d965890a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -75,7 +75,7 @@ func (m *Time) Unmarshal(data []byte) error { return nil } -// Marshal implements the protobuf marshalling interface. +// Marshal implements the protobuf marshaling interface. func (m *Time) Marshal() (data []byte, err error) { if m == nil || m.Time.IsZero() { return nil, nil @@ -83,10 +83,18 @@ func (m *Time) Marshal() (data []byte, err error) { return m.ProtoTime().Marshal() } -// MarshalTo implements the protobuf marshalling interface. +// MarshalTo implements the protobuf marshaling interface. func (m *Time) MarshalTo(data []byte) (int, error) { if m == nil || m.Time.IsZero() { return 0, nil } return m.ProtoTime().MarshalTo(data) } + +// MarshalToSizedBuffer implements the protobuf reverse marshaling interface. +func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index fd6395256a5a..bf125b62a73f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -43,14 +43,14 @@ type TypeMeta struct { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` } @@ -61,6 +61,10 @@ type ListMeta struct { // selfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` @@ -69,7 +73,7 @@ type ListMeta struct { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` @@ -81,6 +85,18 @@ type ListMeta struct { // identical to the value in the first response, unless you have received this token from an error // message. Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"` } // These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here @@ -115,7 +131,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency // +optional GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` @@ -133,6 +149,10 @@ type ObjectMeta struct { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` @@ -155,7 +175,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -171,7 +191,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` @@ -192,7 +212,7 @@ type ObjectMeta struct { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` @@ -226,23 +246,19 @@ type ObjectMeta struct { // +patchStrategy=merge OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - // - // DEPRECATED - initializers are an alpha field and will be removed in v1.15. - Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` - // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. + // Finalizers may be processed and removed in any order. Order is NOT enforced + // because it introduces significant risk of stuck finalizers. + // finalizers is a shared field, any actor with permission can reorder it. + // If the finalizer list is processed in order, then this can lead to a situation + // in which the component responsible for the first finalizer in the list is + // waiting for a signal (field value, external system, or other) produced by a + // component responsible for a finalizer later in the list, resulting in a deadlock. + // Without enforced ordering finalizers are free to order amongst themselves and + // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` @@ -261,32 +277,10 @@ type ObjectMeta struct { // "ci-cd". The set of fields is always in the version that the // workflow used when modifying the object. // - // This field is alpha and can be changed or removed without notice. - // // +optional ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` } -// Initializers tracks the progress of initialization. -type Initializers struct { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"` - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` -} - -// Initializer is information about an initializer that has not yet completed. -type Initializer struct { - // name of the process that is responsible for initializing this object. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - const ( // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients NamespaceDefault string = "default" @@ -307,7 +301,7 @@ type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. // More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -328,6 +322,7 @@ type OwnerReference struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ListOptions is the query options to a standard REST list call. @@ -349,6 +344,17 @@ type ListOptions struct { // add, update, and remove notifications. Specify resourceVersion. // +optional Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // +optional + AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -396,6 +402,7 @@ type ListOptions struct { Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExportOptions is the query options to the standard REST get call. @@ -410,6 +417,7 @@ type ExportOptions struct { Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // GetOptions is the standard query options to the standard REST get call. @@ -447,6 +455,7 @@ const ( DryRunAll = "All" ) +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DeleteOptions may be provided when deleting an API object. @@ -462,6 +471,7 @@ type DeleteOptions struct { // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be // returned. + // +k8s:conversion-gen=false // +optional Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` @@ -492,6 +502,7 @@ type DeleteOptions struct { DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CreateOptions may be provided when creating an API object. @@ -515,6 +526,7 @@ type CreateOptions struct { FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PatchOptions may be provided when patching an API object. @@ -547,6 +559,7 @@ type PatchOptions struct { FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` } +// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // UpdateOptions may be provided when updating an API object. @@ -586,13 +599,13 @@ type Preconditions struct { type Status struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` // A human-readable description of the status of this operation. @@ -631,7 +644,7 @@ type StatusDetails struct { Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. @@ -763,11 +776,13 @@ const ( // doesn't make any sense, for example deleting a read-only object. This is different than // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the // data was invalid. API calls that return BadRequest can never succeed. + // Status code 400 StatusReasonBadRequest StatusReason = "BadRequest" // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the // resource was not supported by the code - for instance, attempting to delete a resource that // can only be created. API calls that return MethodNotAllowed can never succeed. + // Status code 405 StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable @@ -866,7 +881,7 @@ const ( type List struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1099,9 +1114,16 @@ type ManagedFieldsEntry struct { // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' // +optional Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` - // Fields identifies a set of fields. + + // Fields is tombstoned to show why 5 is a reserved protobuf tag. + //Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + FieldsType string `json:"fieldsType,omitempty" protobuf:"bytes,6,opt,name=fieldsType"` + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. // +optional - Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"` } // ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. @@ -1112,19 +1134,180 @@ const ( ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" ) -// Fields stores a set of fields in a data structure like a Trie. -// To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff -type Fields struct { - // Map stores a set of fields in a data structure like a Trie. - // - // Each key is either a '.' representing the field itself, and will always map to an empty set, - // or a string representing a sub-field or item. The string will follow one of these four formats: - // 'f:', where is the name of a field in a struct, or key in a map - // 'v:', where is the exact json formatted value of a list item - // 'i:', where is position of a item in a list - // 'k:', where is a map of a list item's key fields to their unique values - // If a key maps to an empty Fields value, the field that key represents is part of the set. - // - // The exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal - Map map[string]Fields `json:",inline" protobuf:"bytes,1,rep,name=map"` +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +type FieldsV1 struct { + // Raw is the underlying serialization of this object. + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"` +} + +// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf +// generation to support a meta type that can accept any valid JSON. This can be introduced +// in a v1 because clients a) receive an error if they try to access proto today, and b) +// once introduced they would be able to gracefully switch over to using it. + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=false +type Table struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty"` + + // columnDefinitions describes each column in the returned items array. The number of cells per row + // will always match the number of column definitions. + ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` + // rows is the list of items in the table. + Rows []TableRow `json:"rows"` +} + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name"` + // type is an OpenAPI type definition for this column, such as number, integer, string, or + // array. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type"` + // format is an optional OpenAPI type modifier for this column. A format modifies the type and + // imposes additional rules, like date or time formatting for a string. The 'name' format is applied + // to the primary identifier column which has type 'string' to assist in clients identifying column + // is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format"` + // description is a human readable description of this column. + Description string `json:"description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority"` +} + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow struct { + // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or + // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a + // more detailed description. + Cells []interface{} `json:"cells"` + // conditions describe additional status of a row that are relevant for a human user. These conditions + // apply to the row, not to the object, and will be specific to table output. The only defined + // condition type is 'Completed', for a row that indicates a resource that has run to completion and + // can be given less visual priority. + // +optional + Conditions []TableRowCondition `json:"conditions,omitempty"` + // This field contains the requested additional information about each object based on the includeObject + // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the + // default serialization of the object for the current API version, and if "Metadata" (the default) will + // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. + // The media type of the object will always match the enclosing list - if this as a JSON table, these + // will be JSON encoded objects. + // +optional + Object runtime.RawExtension `json:"object,omitempty"` +} + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition struct { + // Type of row condition. The only defined value is 'Completed' indicating that the + // object this row represents has reached a completed state and may be given less visual + // priority than other rows. Clients are not required to honor any conditions but should + // be consistent where possible about handling the conditions. + Type RowConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // (brief) machine readable reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +type RowConditionType string + +// These are valid conditions of a row. This list is not exhaustive and new conditions may be +// included by other resources. +const ( + // RowCompleted means the underlying resource has reached completion and may be given less + // visual priority than other resources. + RowCompleted RowConditionType = "Completed" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) + +// TableOptions are used when a Table is requested by the caller. +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions struct { + TypeMeta `json:",inline"` + + // NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions + // and may be removed as a field in a future release. + NoHeaders bool `json:"-"` + + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata struct { + TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items contains each of the included items. + Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"` } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 3b1a09e57f85..b62e591ee877 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -119,12 +119,12 @@ func (ExportOptions) SwaggerDoc() map[string]string { return map_ExportOptions } -var map_Fields = map[string]string{ - "": "Fields stores a set of fields in a data structure like a Trie. To understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff", +var map_FieldsV1 = map[string]string{ + "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", } -func (Fields) SwaggerDoc() map[string]string { - return map_Fields +func (FieldsV1) SwaggerDoc() map[string]string { + return map_FieldsV1 } var map_GetOptions = map[string]string{ @@ -146,25 +146,6 @@ func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { return map_GroupVersionForDiscovery } -var map_Initializer = map[string]string{ - "": "Initializer is information about an initializer that has not yet completed.", - "name": "name of the process that is responsible for initializing this object.", -} - -func (Initializer) SwaggerDoc() map[string]string { - return map_Initializer -} - -var map_Initializers = map[string]string{ - "": "Initializers tracks the progress of initialization.", - "pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", - "result": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", -} - -func (Initializers) SwaggerDoc() map[string]string { - return map_Initializers -} - var map_LabelSelector = map[string]string{ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", @@ -188,7 +169,7 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { var map_List = map[string]string{ "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "items": "List of objects", } @@ -197,10 +178,11 @@ func (List) SwaggerDoc() map[string]string { } var map_ListMeta = map[string]string{ - "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", - "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", } func (ListMeta) SwaggerDoc() map[string]string { @@ -208,14 +190,15 @@ func (ListMeta) SwaggerDoc() map[string]string { } var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", } func (ListOptions) SwaggerDoc() map[string]string { @@ -228,7 +211,8 @@ var map_ManagedFieldsEntry = map[string]string{ "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", - "fields": "Fields identifies a set of fields.", + "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", } func (ManagedFieldsEntry) SwaggerDoc() map[string]string { @@ -238,22 +222,21 @@ func (ManagedFieldsEntry) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15.", - "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.", + "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", } func (ObjectMeta) SwaggerDoc() map[string]string { @@ -263,7 +246,7 @@ func (ObjectMeta) SwaggerDoc() map[string]string { var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "controller": "If true, this reference points to the managing controller.", @@ -274,6 +257,25 @@ func (OwnerReference) SwaggerDoc() map[string]string { return map_OwnerReference } +var map_PartialObjectMetadata = map[string]string{ + "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (PartialObjectMetadata) SwaggerDoc() map[string]string { + return map_PartialObjectMetadata +} + +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + var map_Patch = map[string]string{ "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", } @@ -324,8 +326,8 @@ func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { var map_Status = map[string]string{ "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", "message": "A human-readable description of the status of this operation.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", @@ -351,7 +353,7 @@ var map_StatusDetails = map[string]string{ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", @@ -361,10 +363,66 @@ func (StatusDetails) SwaggerDoc() map[string]string { return map_StatusDetails } +var map_Table = map[string]string{ + "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + "rows": "rows is the list of items in the table.", +} + +func (Table) SwaggerDoc() map[string]string { + return map_Table +} + +var map_TableColumnDefinition = map[string]string{ + "": "TableColumnDefinition contains information about a column returned in the Table.", + "name": "name is a human readable name for the column.", + "type": "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "format": "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "description": "description is a human readable description of this column.", + "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", +} + +func (TableColumnDefinition) SwaggerDoc() map[string]string { + return map_TableColumnDefinition +} + +var map_TableOptions = map[string]string{ + "": "TableOptions are used when a Table is requested by the caller.", + "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", +} + +func (TableOptions) SwaggerDoc() map[string]string { + return map_TableOptions +} + +var map_TableRow = map[string]string{ + "": "TableRow is an individual row in a table.", + "cells": "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.", + "conditions": "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.", + "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", +} + +func (TableRow) SwaggerDoc() map[string]string { + return map_TableRow +} + +var map_TableRowCondition = map[string]string{ + "": "TableRowCondition allows a row to be marked with additional information.", + "type": "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.", + "status": "Status of the condition, one of True, False, Unknown.", + "reason": "(brief) machine readable reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (TableRowCondition) SwaggerDoc() map[string]string { + return map_TableRowCondition +} + var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", } func (TypeMeta) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go new file mode 100644 index 000000000000..2ade69dd9eb2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -0,0 +1,523 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + url "net/url" + unsafe "unsafe" + + resource "k8s.io/apimachinery/pkg/api/resource" + conversion "k8s.io/apimachinery/pkg/conversion" + fields "k8s.io/apimachinery/pkg/fields" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + watch "k8s.io/apimachinery/pkg/watch" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*CreateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_CreateOptions(a.(*url.Values), b.(*CreateOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ExportOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ExportOptions(a.(*url.Values), b.(*ExportOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*GetOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_GetOptions(a.(*url.Values), b.(*GetOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ListOptions(a.(*url.Values), b.(*ListOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*PatchOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PatchOptions(a.(*url.Values), b.(*PatchOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*TableOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_TableOptions(a.(*url.Values), b.(*TableOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*UpdateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_UpdateOptions(a.(*url.Values), b.(*UpdateOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*map[string]string)(nil), (*LabelSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Map_string_To_string_To_v1_LabelSelector(a.(*map[string]string), b.(*LabelSelector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**bool)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_bool_To_bool(a.(**bool), b.(*bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**float64)(nil), (*float64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_float64_To_float64(a.(**float64), b.(*float64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int32)(nil), (*int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int32_To_int32(a.(**int32), b.(*int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int64)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int64_To_int(a.(**int64), b.(*int), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int64)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int64_To_int64(a.(**int64), b.(*int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(a.(**intstr.IntOrString), b.(*intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_string_To_string(a.(**string), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**Duration)(nil), (*Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_v1_Duration_To_v1_Duration(a.(**Duration), b.(*Duration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (**DeletionPropagation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Pointer_v1_DeletionPropagation(a.(*[]string), b.(**DeletionPropagation), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (**Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Pointer_v1_Time(a.(*[]string), b.(**Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*[]int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Slice_int32(a.(*[]string), b.(*[]int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*IncludeObjectPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_v1_IncludeObjectPolicy(a.(*[]string), b.(*IncludeObjectPolicy), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_v1_Time(a.(*[]string), b.(*Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*bool)(nil), (**bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_bool_To_Pointer_bool(a.(*bool), b.(**bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*fields.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_fields_Selector_To_string(a.(*fields.Selector), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*float64)(nil), (**float64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_float64_To_Pointer_float64(a.(*float64), b.(**float64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int32)(nil), (**int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int32_To_Pointer_int32(a.(*int32), b.(**int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int64)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int64_To_Pointer_int64(a.(*int64), b.(**int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int_To_Pointer_int64(a.(*int), b.(**int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (**intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(a.(*intstr.IntOrString), b.(**intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_intstr_IntOrString_To_intstr_IntOrString(a.(*intstr.IntOrString), b.(*intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*labels.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_labels_Selector_To_string(a.(*labels.Selector), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*resource.Quantity)(nil), (*resource.Quantity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_Quantity_To_resource_Quantity(a.(*resource.Quantity), b.(*resource.Quantity), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (**string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_Pointer_string(a.(*string), b.(**string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (*fields.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_fields_Selector(a.(*string), b.(*fields.Selector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (*labels.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_labels_Selector(a.(*string), b.(*labels.Selector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*DeleteOptions)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeleteOptions_To_v1_DeleteOptions(a.(*DeleteOptions), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Duration)(nil), (**Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Duration_To_Pointer_v1_Duration(a.(*Duration), b.(**Duration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*InternalEvent)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_InternalEvent_To_v1_WatchEvent(a.(*InternalEvent), b.(*WatchEvent), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*LabelSelector)(nil), (*map[string]string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LabelSelector_To_Map_string_To_string(a.(*LabelSelector), b.(*map[string]string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ListMeta)(nil), (*ListMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ListMeta_To_v1_ListMeta(a.(*ListMeta), b.(*ListMeta), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*MicroTime)(nil), (*MicroTime)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_MicroTime_To_v1_MicroTime(a.(*MicroTime), b.(*MicroTime), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Time)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Time_To_v1_Time(a.(*Time), b.(*Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*TypeMeta)(nil), (*TypeMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TypeMeta_To_v1_TypeMeta(a.(*TypeMeta), b.(*TypeMeta), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*WatchEvent)(nil), (*InternalEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WatchEvent_To_v1_InternalEvent(a.(*WatchEvent), b.(*InternalEvent), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*WatchEvent)(nil), (*watch.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WatchEvent_To_watch_Event(a.(*WatchEvent), b.(*watch.Event), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*watch.Event)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_watch_Event_To_v1_WatchEvent(a.(*watch.Event), b.(*WatchEvent), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_CreateOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_CreateOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["gracePeriodSeconds"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.GracePeriodSeconds, s); err != nil { + return err + } + } else { + out.GracePeriodSeconds = nil + } + // INFO: in.Preconditions opted out of conversion generation + if values, ok := map[string][]string(*in)["orphanDependents"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.OrphanDependents, s); err != nil { + return err + } + } else { + out.OrphanDependents = nil + } + if values, ok := map[string][]string(*in)["propagationPolicy"]; ok && len(values) > 0 { + if err := Convert_Slice_string_To_Pointer_v1_DeletionPropagation(&values, &out.PropagationPolicy, s); err != nil { + return err + } + } else { + out.PropagationPolicy = nil + } + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + return nil +} + +func autoConvert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["export"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Export, s); err != nil { + return err + } + } else { + out.Export = false + } + if values, ok := map[string][]string(*in)["exact"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Exact, s); err != nil { + return err + } + } else { + out.Exact = false + } + return nil +} + +// Convert_url_Values_To_v1_ExportOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_ExportOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { + return err + } + } else { + out.ResourceVersion = "" + } + return nil +} + +// Convert_url_Values_To_v1_GetOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_GetOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["labelSelector"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.LabelSelector, s); err != nil { + return err + } + } else { + out.LabelSelector = "" + } + if values, ok := map[string][]string(*in)["fieldSelector"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldSelector, s); err != nil { + return err + } + } else { + out.FieldSelector = "" + } + if values, ok := map[string][]string(*in)["watch"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Watch, s); err != nil { + return err + } + } else { + out.Watch = false + } + if values, ok := map[string][]string(*in)["allowWatchBookmarks"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.AllowWatchBookmarks, s); err != nil { + return err + } + } else { + out.AllowWatchBookmarks = false + } + if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { + return err + } + } else { + out.ResourceVersion = "" + } + if values, ok := map[string][]string(*in)["timeoutSeconds"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TimeoutSeconds, s); err != nil { + return err + } + } else { + out.TimeoutSeconds = nil + } + if values, ok := map[string][]string(*in)["limit"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_int64(&values, &out.Limit, s); err != nil { + return err + } + } else { + out.Limit = 0 + } + if values, ok := map[string][]string(*in)["continue"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Continue, s); err != nil { + return err + } + } else { + out.Continue = "" + } + return nil +} + +// Convert_url_Values_To_v1_ListOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_ListOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["force"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.Force, s); err != nil { + return err + } + } else { + out.Force = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_PatchOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_PatchOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["-"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.NoHeaders, s); err != nil { + return err + } + } else { + out.NoHeaders = false + } + if values, ok := map[string][]string(*in)["includeObject"]; ok && len(values) > 0 { + if err := Convert_Slice_string_To_v1_IncludeObjectPolicy(&values, &out.IncludeObject, s); err != nil { + return err + } + } else { + out.IncludeObject = "" + } + return nil +} + +// Convert_url_Values_To_v1_TableOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_TableOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_UpdateOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_UpdateOptions(in, out, s) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index 68498b8d2097..b82fdf202f2c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -313,24 +313,22 @@ func (in *ExportOptions) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Fields) DeepCopyInto(out *Fields) { +func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { *out = *in - if in.Map != nil { - in, out := &in.Map, &out.Map - *out = make(map[string]Fields, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fields. -func (in *Fields) DeepCopy() *Fields { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsV1. +func (in *FieldsV1) DeepCopy() *FieldsV1 { if in == nil { return nil } - out := new(Fields) + out := new(FieldsV1) in.DeepCopyInto(out) return out } @@ -456,48 +454,6 @@ func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { - if in == nil { - return nil - } - out := new(Initializer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializers) DeepCopyInto(out *Initializers) { - *out = *in - if in.Pending != nil { - in, out := &in.Pending, &out.Pending - *out = make([]Initializer, len(*in)) - copy(*out, *in) - } - if in.Result != nil { - in, out := &in.Result, &out.Result - *out = new(Status) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers. -func (in *Initializers) DeepCopy() *Initializers { - if in == nil { - return nil - } - out := new(Initializers) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { *out = *in @@ -572,7 +528,7 @@ func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -604,6 +560,11 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListMeta) DeepCopyInto(out *ListMeta) { *out = *in + if in.RemainingItemCount != nil { + in, out := &in.RemainingItemCount, &out.RemainingItemCount + *out = new(int64) + **out = **in + } return } @@ -654,9 +615,9 @@ func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) { in, out := &in.Time, &out.Time *out = (*in).DeepCopy() } - if in.Fields != nil { - in, out := &in.Fields, &out.Fields - *out = new(Fields) + if in.FieldsV1 != nil { + in, out := &in.FieldsV1, &out.FieldsV1 + *out = new(FieldsV1) (*in).DeepCopyInto(*out) } return @@ -716,11 +677,6 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = new(Initializers) - (*in).DeepCopyInto(*out) - } if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]string, len(*in)) @@ -772,6 +728,65 @@ func (in *OwnerReference) DeepCopy() *OwnerReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. +func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { + if in == nil { + return nil + } + out := new(PartialObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PartialObjectMetadata, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Patch) DeepCopyInto(out *Patch) { *out = *in @@ -890,7 +905,7 @@ func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { func (in *Status) DeepCopyInto(out *Status) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Details != nil { in, out := &in.Details, &out.Details *out = new(StatusDetails) @@ -954,6 +969,108 @@ func (in *StatusDetails) DeepCopy() *StatusDetails { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.ColumnDefinitions != nil { + in, out := &in.ColumnDefinitions, &out.ColumnDefinitions + *out = make([]TableColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Rows != nil { + in, out := &in.Rows, &out.Rows + *out = make([]TableRow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. +func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { + if in == nil { + return nil + } + out := new(TableColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableOptions) DeepCopyInto(out *TableOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. +func (in *TableOptions) DeepCopy() *TableOptions { + if in == nil { + return nil + } + out := new(TableOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRow) DeepCopyInto(out *TableRow) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. +func (in *TableRowCondition) DeepCopy() *TableRowCondition { + if in == nil { + return nil + } + out := new(TableRowCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. func (in *Time) DeepCopy() *Time { if in == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index bc615dc3ace0..2d7c8bd1e29c 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -54,7 +54,8 @@ type Converter struct { generatedConversionFuncs ConversionFuncs // Set of conversions that should be treated as a no-op - ignoredConversions map[typePair]struct{} + ignoredConversions map[typePair]struct{} + ignoredUntypedConversions map[typePair]struct{} // This is a map from a source field type and name, to a list of destination // field type and name. @@ -83,17 +84,23 @@ type Converter struct { // NewConverter creates a new Converter object. func NewConverter(nameFn NameFunc) *Converter { c := &Converter{ - conversionFuncs: NewConversionFuncs(), - generatedConversionFuncs: NewConversionFuncs(), - ignoredConversions: make(map[typePair]struct{}), - nameFunc: nameFn, - structFieldDests: make(map[typeNamePair][]typeNamePair), - structFieldSources: make(map[typeNamePair][]typeNamePair), + conversionFuncs: NewConversionFuncs(), + generatedConversionFuncs: NewConversionFuncs(), + ignoredConversions: make(map[typePair]struct{}), + ignoredUntypedConversions: make(map[typePair]struct{}), + nameFunc: nameFn, + structFieldDests: make(map[typeNamePair][]typeNamePair), + structFieldSources: make(map[typeNamePair][]typeNamePair), inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc), inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags), } - c.RegisterConversionFunc(Convert_Slice_byte_To_Slice_byte) + c.RegisterUntypedConversionFunc( + (*[]byte)(nil), (*[]byte)(nil), + func(a, b interface{}, s Scope) error { + return Convert_Slice_byte_To_Slice_byte(a.(*[]byte), b.(*[]byte), s) + }, + ) return c } @@ -131,10 +138,6 @@ type Scope interface { // parameters, you'll run out of stack space before anything useful happens. Convert(src, dest interface{}, flags FieldMatchingFlags) error - // DefaultConvert performs the default conversion, without calling a conversion func - // on the current stack frame. This makes it safe to call from a conversion func. - DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error - // SrcTags and DestTags contain the struct tags that src and dest had, respectively. // If the enclosing object was not a struct, then these will contain no tags, of course. SrcTag() reflect.StructTag @@ -153,31 +156,14 @@ type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (so func NewConversionFuncs() ConversionFuncs { return ConversionFuncs{ - fns: make(map[typePair]reflect.Value), untyped: make(map[typePair]ConversionFunc), } } type ConversionFuncs struct { - fns map[typePair]reflect.Value untyped map[typePair]ConversionFunc } -// Add adds the provided conversion functions to the lookup table - they must have the signature -// `func(type1, type2, Scope) error`. Functions are added in the order passed and will override -// previously registered pairs. -func (c ConversionFuncs) Add(fns ...interface{}) error { - for _, fn := range fns { - fv := reflect.ValueOf(fn) - ft := fv.Type() - if err := verifyConversionFunctionSignature(ft); err != nil { - return err - } - c.fns[typePair{ft.In(0).Elem(), ft.In(1).Elem()}] = fv - } - return nil -} - // AddUntyped adds the provided conversion function to the lookup table for the types that are // supplied as a and b. a and b must be pointers or an error is returned. This method overwrites // previously defined functions. @@ -197,12 +183,6 @@ func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error { // both other and c, with other conversions taking precedence. func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs { merged := NewConversionFuncs() - for k, v := range c.fns { - merged.fns[k] = v - } - for k, v := range other.fns { - merged.fns[k] = v - } for k, v := range c.untyped { merged.untyped[k] = v } @@ -290,12 +270,6 @@ func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error { return s.converter.Convert(src, dest, flags, s.meta) } -// DefaultConvert continues a conversion, performing a default conversion (no conversion func) -// for the current stack frame. -func (s *scope) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error { - return s.converter.DefaultConvert(src, dest, flags, s.meta) -} - // SrcTag returns the tag of the struct containing the current source item, if any. func (s *scope) SrcTag() reflect.StructTag { return s.srcStack.top().tag @@ -360,29 +334,6 @@ func verifyConversionFunctionSignature(ft reflect.Type) error { return nil } -// RegisterConversionFunc registers a conversion func with the -// Converter. conversionFunc must take three parameters: a pointer to the input -// type, a pointer to the output type, and a conversion.Scope (which should be -// used if recursive conversion calls are desired). It must return an error. -// -// Example: -// c.RegisterConversionFunc( -// func(in *Pod, out *v1.Pod, s Scope) error { -// // conversion logic... -// return nil -// }) -// DEPRECATED: Will be removed in favor of RegisterUntypedConversionFunc -func (c *Converter) RegisterConversionFunc(conversionFunc interface{}) error { - return c.conversionFuncs.Add(conversionFunc) -} - -// Similar to RegisterConversionFunc, but registers conversion function that were -// automatically generated. -// DEPRECATED: Will be removed in favor of RegisterGeneratedUntypedConversionFunc -func (c *Converter) RegisterGeneratedConversionFunc(conversionFunc interface{}) error { - return c.generatedConversionFuncs.Add(conversionFunc) -} - // RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce // any other guarantee. @@ -409,6 +360,7 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo) } c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{} + c.ignoredUntypedConversions[typePair{typeFrom, typeTo}] = struct{}{} return nil } @@ -470,18 +422,6 @@ func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, met return c.doConversion(src, dest, flags, meta, c.convert) } -// DefaultConvert will translate src to dest if it knows how. Both must be pointers. -// No conversion func is used. If the default copying mechanism -// doesn't work on this type pair, an error will be returned. -// Read the comments on the various FieldMatchingFlags constants to understand -// what the 'flags' parameter does. -// 'meta' is given to allow you to pass information to conversion functions, -// it is not used by DefaultConvert() other than storing it in the scope. -// Not safe for objects with cyclic references! -func (c *Converter) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { - return c.doConversion(src, dest, flags, meta, c.defaultConvert) -} - type conversionFunc func(sv, dv reflect.Value, scope *scope) error func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error { @@ -491,6 +431,11 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags flags: flags, meta: meta, } + + // ignore conversions of this type + if _, ok := c.ignoredUntypedConversions[pair]; ok { + return nil + } if fn, ok := c.conversionFuncs.untyped[pair]; ok { return fn(src, dest, scope) } @@ -517,33 +462,20 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags return f(sv, dv, scope) } -// callCustom calls 'custom' with sv & dv. custom must be a conversion function. -func (c *Converter) callCustom(sv, dv, custom reflect.Value, scope *scope) error { - if !sv.CanAddr() { - sv2 := reflect.New(sv.Type()) - sv2.Elem().Set(sv) - sv = sv2 - } else { - sv = sv.Addr() - } +// callUntyped calls predefined conversion func. +func (c *Converter) callUntyped(sv, dv reflect.Value, f ConversionFunc, scope *scope) error { if !dv.CanAddr() { - if !dv.CanSet() { - return scope.errorf("can't addr or set dest.") - } - dvOrig := dv - dv := reflect.New(dvOrig.Type()) - defer func() { dvOrig.Set(dv) }() - } else { - dv = dv.Addr() + return scope.errorf("cant addr dest") } - args := []reflect.Value{sv, dv, reflect.ValueOf(scope)} - ret := custom.Call(args)[0].Interface() - // This convolution is necessary because nil interfaces won't convert - // to errors. - if ret == nil { - return nil + var svPointer reflect.Value + if sv.CanAddr() { + svPointer = sv.Addr() + } else { + svPointer = reflect.New(sv.Type()) + svPointer.Elem().Set(sv) } - return ret.(error) + dvPointer := dv.Addr() + return f(svPointer.Interface(), dvPointer.Interface(), scope) } // convert recursively copies sv into dv, calling an appropriate conversion function if @@ -561,27 +493,14 @@ func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error { } // Convert sv to dv. - if fv, ok := c.conversionFuncs.fns[pair]; ok { - if c.Debug != nil { - c.Debug.Logf("Calling custom conversion of '%v' to '%v'", st, dt) - } - return c.callCustom(sv, dv, fv, scope) + pair = typePair{reflect.PtrTo(sv.Type()), reflect.PtrTo(dv.Type())} + if f, ok := c.conversionFuncs.untyped[pair]; ok { + return c.callUntyped(sv, dv, f, scope) } - if fv, ok := c.generatedConversionFuncs.fns[pair]; ok { - if c.Debug != nil { - c.Debug.Logf("Calling generated conversion of '%v' to '%v'", st, dt) - } - return c.callCustom(sv, dv, fv, scope) + if f, ok := c.generatedConversionFuncs.untyped[pair]; ok { + return c.callUntyped(sv, dv, f, scope) } - return c.defaultConvert(sv, dv, scope) -} - -// defaultConvert recursively copies sv into dv. no conversion function is called -// for the current stack frame (but conversion functions may be called for nested objects) -func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error { - dt, st := dv.Type(), sv.Type() - if !dv.CanSet() { return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)") } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index b3804aa42b26..2f0dd0074af9 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -54,10 +54,6 @@ func jsonTag(field reflect.StructField) (string, bool) { return tag, omitempty } -func formatValue(value interface{}) string { - return fmt.Sprintf("%v", value) -} - func isPointerKind(kind reflect.Kind) bool { return kind == reflect.Ptr } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index 32db4d96f69f..abf3ace6f1cc 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -172,7 +172,7 @@ func ConvertSelectorToLabelsMap(selector string) (Set, error) { return labelsMap, err } value := strings.TrimSpace(l[1]) - if err := validateLabelValue(value); err != nil { + if err := validateLabelValue(key, value); err != nil { return labelsMap, err } labelsMap[key] = value diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index f5a0888932f2..2f8e1e2b0c4f 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -54,6 +54,11 @@ type Selector interface { // Make a deep copy of the selector. DeepCopySelector() Selector + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific label to be set, and if so returns the value it + // requires. + RequiresExactMatch(label string) (value string, found bool) } // Everything returns a selector that matches all labels. @@ -63,12 +68,13 @@ func Everything() Selector { type nothingSelector struct{} -func (n nothingSelector) Matches(_ Labels) bool { return false } -func (n nothingSelector) Empty() bool { return false } -func (n nothingSelector) String() string { return "" } -func (n nothingSelector) Add(_ ...Requirement) Selector { return n } -func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } -func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { return "", false } // Nothing returns a selector that matches no labels func Nothing() Selector { @@ -162,7 +168,7 @@ func NewRequirement(key string, op selection.Operator, vals []string) (*Requirem } for i := range vals { - if err := validateLabelValue(vals[i]); err != nil { + if err := validateLabelValue(key, vals[i]); err != nil { return nil, err } } @@ -358,6 +364,23 @@ func (lsel internalSelector) String() string { return strings.Join(reqs, ",") } +// RequiresExactMatch introspect whether a given selector requires a single specific field +// to be set, and if so returns the value it requires. +func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { + for ix := range lsel { + if lsel[ix].key == label { + switch lsel[ix].operator { + case selection.Equals, selection.DoubleEquals, selection.In: + if len(lsel[ix].strValues) == 1 { + return lsel[ix].strValues[0], true + } + } + return "", false + } + } + return "", false +} + // Token represents constant definition for lexer token type Token int @@ -837,9 +860,9 @@ func validateLabelKey(k string) error { return nil } -func validateLabelValue(v string) error { +func validateLabelValue(k, v string) error { if errs := validation.IsValidLabelValue(v); len(errs) != 0 { - return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; ")) } return nil } @@ -850,7 +873,7 @@ func SelectorFromSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} } - var requirements internalSelector + requirements := make([]Requirement, 0, len(ls)) for label, value := range ls { r, err := NewRequirement(label, selection.Equals, []string{value}) if err == nil { @@ -862,7 +885,7 @@ func SelectorFromSet(ls Set) Selector { } // sort to have deterministic string representation sort.Sort(ByKey(requirements)) - return requirements + return internalSelector(requirements) } // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. @@ -872,13 +895,13 @@ func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} } - var requirements internalSelector + requirements := make([]Requirement, 0, len(ls)) for label, value := range ls { requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) } // sort to have deterministic string representation sort.Sort(ByKey(requirements)) - return requirements + return internalSelector(requirements) } // ParseToRequirements takes a string representing a selector and returns a list of diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index 284e32bc3cb8..0bccf9dd95bd 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -19,13 +19,17 @@ package runtime import ( "bytes" "encoding/base64" + "encoding/json" "fmt" "io" "net/url" "reflect" + "strconv" + "strings" "k8s.io/apimachinery/pkg/conversion/queryparams" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog" ) // codec binds an encoder and decoder. @@ -100,10 +104,19 @@ type NoopEncoder struct { var _ Serializer = NoopEncoder{} +const noopEncoderIdentifier Identifier = "noop" + func (n NoopEncoder) Encode(obj Object, w io.Writer) error { + // There is no need to handle runtime.CacheableObject, as we don't + // process the obj at all. return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) } +// Identifier implements runtime.Encoder interface. +func (n NoopEncoder) Identifier() Identifier { + return noopEncoderIdentifier +} + // NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. type NoopDecoder struct { Encoder @@ -193,19 +206,51 @@ func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (u type base64Serializer struct { Encoder Decoder + + identifier Identifier } func NewBase64Serializer(e Encoder, d Decoder) Serializer { - return &base64Serializer{e, d} + return &base64Serializer{ + Encoder: e, + Decoder: d, + identifier: identifier(e), + } +} + +func identifier(e Encoder) Identifier { + result := map[string]string{ + "name": "base64", + } + if e != nil { + result["encoder"] = string(e.Identifier()) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for base64Serializer: %v", err) + } + return Identifier(identifier) } func (s base64Serializer) Encode(obj Object, stream io.Writer) error { + if co, ok := obj.(CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, stream) + } + return s.doEncode(obj, stream) +} + +func (s base64Serializer) doEncode(obj Object, stream io.Writer) error { e := base64.NewEncoder(base64.StdEncoding, stream) err := s.Encoder.Encode(obj, e) e.Close() return err } +// Identifier implements runtime.Encoder interface. +func (s base64Serializer) Identifier() Identifier { + return s.identifier +} + func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) n, err := base64.StdEncoding.Decode(out, data) @@ -238,6 +283,11 @@ var ( DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} ) +const ( + internalGroupVersionerIdentifier = "internal" + disabledGroupVersionerIdentifier = "disabled" +) + type internalGroupVersioner struct{} // KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. @@ -253,6 +303,11 @@ func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi return schema.GroupVersionKind{}, false } +// Identifier implements GroupVersioner interface. +func (internalGroupVersioner) Identifier() string { + return internalGroupVersionerIdentifier +} + type disabledGroupVersioner struct{} // KindForGroupVersionKinds returns false for any input. @@ -260,19 +315,9 @@ func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi return schema.GroupVersionKind{}, false } -// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. -type GroupVersioners []GroupVersioner - -// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred. -func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { - for _, gv := range gvs { - target, ok := gv.KindForGroupVersionKinds(kinds) - if !ok { - continue - } - return target, true - } - return schema.GroupVersionKind{}, false +// Identifier implements GroupVersioner interface. +func (disabledGroupVersioner) Identifier() string { + return disabledGroupVersionerIdentifier } // Assert that schema.GroupVersion and GroupVersions implement GroupVersioner @@ -330,3 +375,22 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio } return schema.GroupVersionKind{}, false } + +// Identifier implements GroupVersioner interface. +func (v multiGroupVersioner) Identifier() string { + groupKinds := make([]string, 0, len(v.acceptedGroupKinds)) + for _, gk := range v.acceptedGroupKinds { + groupKinds = append(groupKinds, gk.String()) + } + result := map[string]string{ + "name": "multi", + "target": v.target.String(), + "accepted": strings.Join(groupKinds, ","), + "coerce": strconv.FormatBool(v.coerce), + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling Identifier for %#v: %v", v, err) + } + return string(identifier) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go index 08d2abfe687d..d04d701f376c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -53,27 +53,21 @@ func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, st return key, key } -// DefaultStringConversions are helpers for converting []string and string to real values. -var DefaultStringConversions = []interface{}{ - Convert_Slice_string_To_string, - Convert_Slice_string_To_int, - Convert_Slice_string_To_bool, - Convert_Slice_string_To_int64, -} - -func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { - if len(*input) == 0 { +func Convert_Slice_string_To_string(in *[]string, out *string, s conversion.Scope) error { + if len(*in) == 0 { *out = "" + return nil } - *out = (*input)[0] + *out = (*in)[0] return nil } -func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { - if len(*input) == 0 { +func Convert_Slice_string_To_int(in *[]string, out *int, s conversion.Scope) error { + if len(*in) == 0 { *out = 0 + return nil } - str := (*input)[0] + str := (*in)[0] i, err := strconv.Atoi(str) if err != nil { return err @@ -83,15 +77,16 @@ func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) } // Convert_Slice_string_To_bool will convert a string parameter to boolean. -// Only the absence of a value, a value of "false", or a value of "0" resolve to false. +// Only the absence of a value (i.e. zero-length slice), a value of "false", or a +// value of "0" resolve to false. // Any other value (including empty string) resolves to true. -func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { - if len(*input) == 0 { +func Convert_Slice_string_To_bool(in *[]string, out *bool, s conversion.Scope) error { + if len(*in) == 0 { *out = false return nil } - switch strings.ToLower((*input)[0]) { - case "false", "0": + switch { + case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): *out = false default: *out = true @@ -99,15 +94,103 @@ func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope return nil } -func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { - if len(*input) == 0 { +// Convert_Slice_string_To_bool will convert a string parameter to boolean. +// Only the absence of a value (i.e. zero-length slice), a value of "false", or a +// value of "0" resolve to false. +// Any other value (including empty string) resolves to true. +func Convert_Slice_string_To_Pointer_bool(in *[]string, out **bool, s conversion.Scope) error { + if len(*in) == 0 { + boolVar := false + *out = &boolVar + return nil + } + switch { + case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): + boolVar := false + *out = &boolVar + default: + boolVar := true + *out = &boolVar + } + return nil +} + +func string_to_int64(in string) (int64, error) { + return strconv.ParseInt(in, 10, 64) +} + +func Convert_string_To_int64(in *string, out *int64, s conversion.Scope) error { + if in == nil { *out = 0 + return nil } - str := (*input)[0] - i, err := strconv.ParseInt(str, 10, 64) + i, err := string_to_int64(*in) if err != nil { return err } *out = i return nil } + +func Convert_Slice_string_To_int64(in *[]string, out *int64, s conversion.Scope) error { + if len(*in) == 0 { + *out = 0 + return nil + } + i, err := string_to_int64((*in)[0]) + if err != nil { + return err + } + *out = i + return nil +} + +func Convert_string_To_Pointer_int64(in *string, out **int64, s conversion.Scope) error { + if in == nil { + *out = nil + return nil + } + i, err := string_to_int64(*in) + if err != nil { + return err + } + *out = &i + return nil +} + +func Convert_Slice_string_To_Pointer_int64(in *[]string, out **int64, s conversion.Scope) error { + if len(*in) == 0 { + *out = nil + return nil + } + i, err := string_to_int64((*in)[0]) + if err != nil { + return err + } + *out = &i + return nil +} + +func RegisterStringConversions(s *Scheme) error { + if err := s.AddConversionFunc((*[]string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_string(a.(*[]string), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_int(a.(*[]string), b.(*int), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_bool(a.(*[]string), b.(*bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_int64(a.(*[]string), b.(*int64), scope) + }); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 80343081f52d..918d0831d9a2 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -17,7 +17,6 @@ limitations under the License. package runtime import ( - "bytes" encodingjson "encoding/json" "fmt" "math" @@ -32,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/structured-merge-diff/v3/value" "k8s.io/klog" ) @@ -68,13 +68,8 @@ func newFieldsCache() *fieldsCache { } var ( - marshalerType = reflect.TypeOf(new(encodingjson.Marshaler)).Elem() - unmarshalerType = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem() mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) stringType = reflect.TypeOf(string("")) - int64Type = reflect.TypeOf(int64(0)) - float64Type = reflect.TypeOf(float64(0)) - boolType = reflect.TypeOf(bool(false)) fieldCache = newFieldsCache() // DefaultUnstructuredConverter performs unstructured to Go typed object conversions. @@ -94,7 +89,7 @@ func parseBool(key string) bool { } value, err := strconv.ParseBool(key) if err != nil { - utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key)) + utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key)) } return value } @@ -208,13 +203,9 @@ func fromUnstructured(sv, dv reflect.Value) error { } // Check if the object has a custom JSON marshaller/unmarshaller. - if reflect.PtrTo(dt).Implements(unmarshalerType) { - data, err := json.Marshal(sv.Interface()) - if err != nil { - return fmt.Errorf("error encoding %s to json: %v", st.String(), err) - } - unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler) - return unmarshaler.UnmarshalJSON(data) + entry := value.TypeReflectEntryOf(dv.Type()) + if entry.CanConvertFromUnstructured() { + return entry.FromUnstructured(sv, dv) } switch dt.Kind() { @@ -256,6 +247,7 @@ func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo { for i := range items { if items[i] == "omitempty" { info.omitempty = true + break } } } @@ -483,112 +475,28 @@ func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { return json.Unmarshal(data, u) } -var ( - nullBytes = []byte("null") - trueBytes = []byte("true") - falseBytes = []byte("false") -) - -func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) { - // Check value receivers if v is not a pointer and pointer receivers if v is a pointer - if v.Type().Implements(marshalerType) { - return v.Interface().(encodingjson.Marshaler), true - } - // Check pointer receivers if v is not a pointer - if v.Kind() != reflect.Ptr && v.CanAddr() { - v = v.Addr() - if v.Type().Implements(marshalerType) { - return v.Interface().(encodingjson.Marshaler), true - } - } - return nil, false -} - func toUnstructured(sv, dv reflect.Value) error { - // Check if the object has a custom JSON marshaller/unmarshaller. - if marshaler, ok := getMarshaler(sv); ok { - if sv.Kind() == reflect.Ptr && sv.IsNil() { - // We're done - we don't need to store anything. - return nil - } - - data, err := marshaler.MarshalJSON() + // Check if the object has a custom string converter. + entry := value.TypeReflectEntryOf(sv.Type()) + if entry.CanConvertToUnstructured() { + v, err := entry.ToUnstructured(sv) if err != nil { return err } - switch { - case len(data) == 0: - return fmt.Errorf("error decoding from json: empty value") - - case bytes.Equal(data, nullBytes): - // We're done - we don't need to store anything. - - case bytes.Equal(data, trueBytes): - dv.Set(reflect.ValueOf(true)) - - case bytes.Equal(data, falseBytes): - dv.Set(reflect.ValueOf(false)) - - case data[0] == '"': - var result string - err := json.Unmarshal(data, &result) - if err != nil { - return fmt.Errorf("error decoding string from json: %v", err) - } - dv.Set(reflect.ValueOf(result)) - - case data[0] == '{': - result := make(map[string]interface{}) - err := json.Unmarshal(data, &result) - if err != nil { - return fmt.Errorf("error decoding object from json: %v", err) - } - dv.Set(reflect.ValueOf(result)) - - case data[0] == '[': - result := make([]interface{}, 0) - err := json.Unmarshal(data, &result) - if err != nil { - return fmt.Errorf("error decoding array from json: %v", err) - } - dv.Set(reflect.ValueOf(result)) - - default: - var ( - resultInt int64 - resultFloat float64 - err error - ) - if err = json.Unmarshal(data, &resultInt); err == nil { - dv.Set(reflect.ValueOf(resultInt)) - } else if err = json.Unmarshal(data, &resultFloat); err == nil { - dv.Set(reflect.ValueOf(resultFloat)) - } else { - return fmt.Errorf("error decoding number from json: %v", err) - } + if v != nil { + dv.Set(reflect.ValueOf(v)) } - return nil } - - st, dt := sv.Type(), dv.Type() + st := sv.Type() switch st.Kind() { case reflect.String: - if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.New(stringType)) - } dv.Set(reflect.ValueOf(sv.String())) return nil case reflect.Bool: - if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.New(boolType)) - } dv.Set(reflect.ValueOf(sv.Bool())) return nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.New(int64Type)) - } dv.Set(reflect.ValueOf(sv.Int())) return nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -596,15 +504,9 @@ func toUnstructured(sv, dv reflect.Value) error { if uVal > math.MaxInt64 { return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal) } - if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.New(int64Type)) - } dv.Set(reflect.ValueOf(int64(uVal))) return nil case reflect.Float32, reflect.Float64: - if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.New(float64Type)) - } dv.Set(reflect.ValueOf(sv.Float())) return nil case reflect.Map: diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go index db11eb8bcf64..7251e65f6e07 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go @@ -134,9 +134,16 @@ func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Objec return nil } -func DefaultEmbeddedConversions() []interface{} { - return []interface{}{ - Convert_runtime_Object_To_runtime_RawExtension, - Convert_runtime_RawExtension_To_runtime_Object, +func RegisterEmbeddedConversions(s *Scheme) error { + if err := s.AddConversionFunc((*Object)(nil), (*RawExtension)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_runtime_Object_To_runtime_RawExtension(a.(*Object), b.(*RawExtension), scope) + }); err != nil { + return err } + if err := s.AddConversionFunc((*RawExtension)(nil), (*Object)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_runtime_RawExtension_To_runtime_Object(a.(*RawExtension), b.(*Object), scope) + }); err != nil { + return err + } + return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index 322b0313df55..be0c5edc8554 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -120,3 +120,32 @@ func IsMissingVersion(err error) bool { _, ok := err.(*missingVersionErr) return ok } + +// strictDecodingError is a base error type that is returned by a strict Decoder such +// as UniversalStrictDecoder. +type strictDecodingError struct { + message string + data string +} + +// NewStrictDecodingError creates a new strictDecodingError object. +func NewStrictDecodingError(message string, data string) error { + return &strictDecodingError{ + message: message, + data: data, + } +} + +func (e *strictDecodingError) Error() string { + return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message) +} + +// IsStrictDecodingError returns true if the error indicates that the provided object +// strictness violations. +func IsStrictDecodingError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*strictDecodingError) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 9b15989c8270..071971817317 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -17,27 +17,19 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto - - It has these top-level messages: - RawExtension - TypeMeta - Unknown -*/ package runtime -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import strings "strings" -import reflect "reflect" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import io "io" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -48,29 +40,134 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{0} +} +func (m *RawExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RawExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawExtension.Merge(m, src) +} +func (m *RawExtension) XXX_Size() int { + return m.Size() +} +func (m *RawExtension) XXX_DiscardUnknown() { + xxx_messageInfo_RawExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_RawExtension proto.InternalMessageInfo + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{1} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} -func (m *RawExtension) Reset() { *m = RawExtension{} } -func (*RawExtension) ProtoMessage() {} -func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{2} +} +func (m *Unknown) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Unknown) XXX_Merge(src proto.Message) { + xxx_messageInfo_Unknown.Merge(m, src) +} +func (m *Unknown) XXX_Size() int { + return m.Size() +} +func (m *Unknown) XXX_DiscardUnknown() { + xxx_messageInfo_Unknown.DiscardUnknown(m) +} -func (m *Unknown) Reset() { *m = Unknown{} } -func (*Unknown) ProtoMessage() {} -func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +var xxx_messageInfo_Unknown proto.InternalMessageInfo func init() { proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) +} + +var fileDescriptor_9d3c45d7f546725c = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, + 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, + 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, + 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, + 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, + 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, + 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, + 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, + 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, + 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, + 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, + 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, + 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, + 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, + 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, + 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, + 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, + 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, + 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, + 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, + 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, + 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, + 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, +} + func (m *RawExtension) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -78,23 +175,29 @@ func (m *RawExtension) Marshal() (dAtA []byte, err error) { } func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RawExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if m.Raw != nil { - dAtA[i] = 0xa - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -102,25 +205,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *Unknown) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -128,45 +238,60 @@ func (m *Unknown) Marshal() (dAtA []byte, err error) { } func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Unknown) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x22 + i -= len(m.ContentEncoding) + copy(dAtA[i:], m.ContentEncoding) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i-- + dAtA[i] = 0x1a if m.Raw != nil { - dAtA[i] = 0x12 - i++ + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) + i-- + dAtA[i] = 0x12 } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) - i += copy(dAtA[i:], m.ContentEncoding) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) - i += copy(dAtA[i:], m.ContentType) - return i, nil + { + size, err := m.TypeMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *RawExtension) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Raw != nil { @@ -177,6 +302,9 @@ func (m *RawExtension) Size() (n int) { } func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.APIVersion) @@ -187,6 +315,9 @@ func (m *TypeMeta) Size() (n int) { } func (m *Unknown) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.TypeMeta.Size() @@ -203,14 +334,7 @@ func (m *Unknown) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -272,7 +396,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -300,7 +424,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -309,6 +433,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -326,6 +453,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -353,7 +483,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -381,7 +511,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -391,6 +521,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -410,7 +543,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -420,6 +553,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -434,6 +570,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -461,7 +600,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -489,7 +628,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -498,6 +637,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -519,7 +661,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -528,6 +670,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -550,7 +695,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -560,6 +705,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -579,7 +727,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -589,6 +737,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -603,6 +754,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -618,6 +772,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -649,10 +804,8 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -669,85 +822,34 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 378 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, - 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, - 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, - 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, - 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, - 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, - 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, - 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, - 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, - 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, - 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, - 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, - 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, - 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, - 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, - 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, - 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, - 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, - 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, - 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, - 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, - 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, - 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, - 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 00bffe3081cc..f44693c0c6f8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -37,13 +37,36 @@ type GroupVersioner interface { // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool) + // Identifier returns string representation of the object. + // Identifiers of two different encoders should be equal only if for every input + // kinds they return the same result. + Identifier() string } +// Identifier represents an identifier. +// Identitier of two different objects should be equal if and only if for every +// input the output they produce is exactly the same. +type Identifier string + // Encoder writes objects to a serialized form type Encoder interface { // Encode writes an object to a stream. Implementations may return errors if the versions are // incompatible, or if no conversion is defined. Encode(obj Object, w io.Writer) error + // Identifier returns an identifier of the encoder. + // Identifiers of two different encoders should be equal if and only if for every input + // object it will be encoded to the same representation by both of them. + // + // Identifier is inteted for use with CacheableObject#CacheEncode method. In order to + // correctly handle CacheableObject, Encode() method should look similar to below, where + // doEncode() is the encoding logic of implemented encoder: + // func (e *MyEncoder) Encode(obj Object, w io.Writer) error { + // if co, ok := obj.(CacheableObject); ok { + // return co.CacheEncode(e.Identifier(), e.doEncode, w) + // } + // return e.doEncode(obj, w) + // } + Identifier() Identifier } // Decoder attempts to load an object from data. @@ -132,6 +155,28 @@ type NegotiatedSerializer interface { DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder } +// ClientNegotiator handles turning an HTTP content type into the appropriate encoder. +// Use NewClientNegotiator or NewVersionedClientNegotiator to create this interface from +// a NegotiatedSerializer. +type ClientNegotiator interface { + // Encoder returns the appropriate encoder for the provided contentType (e.g. application/json) + // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found + // a NegotiateError will be returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + Encoder(contentType string, params map[string]string) (Encoder, error) + // Decoder returns the appropriate decoder for the provided contentType (e.g. application/json) + // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found + // a NegotiateError will be returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + Decoder(contentType string, params map[string]string) (Decoder, error) + // StreamDecoder returns the appropriate stream decoder for the provided contentType (e.g. + // application/json) and any optional mediaType parameters (e.g. pretty=1), or an error. If no + // serializer is found a NegotiateError will be returned. The Serializer and Framer will always + // be returned if a Decoder is returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) +} + // StorageSerializer is an interface used for obtaining encoders, decoders, and serializers // that can read and write data at rest. This would commonly be used by client tools that must // read files, or server side storage interfaces that persist restful objects. @@ -210,6 +255,25 @@ type ObjectCreater interface { New(kind schema.GroupVersionKind) (out Object, err error) } +// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource +type EquivalentResourceMapper interface { + // EquivalentResourcesFor returns a list of resources that address the same underlying data as resource. + // If subresource is specified, only equivalent resources which also have the same subresource are included. + // The specified resource can be included in the returned list. + EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource + // KindFor returns the kind expected by the specified resource[/subresource]. + // A zero value is returned if the kind is unknown. + KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind +} + +// EquivalentResourceRegistry provides an EquivalentResourceMapper interface, +// and allows registering known resource[/subresource] -> kind +type EquivalentResourceRegistry interface { + EquivalentResourceMapper + // RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind. + RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) +} + // ResourceVersioner provides methods for setting and retrieving // the resource version from an API object. type ResourceVersioner interface { @@ -237,10 +301,34 @@ type Object interface { DeepCopyObject() Object } +// CacheableObject allows an object to cache its different serializations +// to avoid performing the same serialization multiple times. +type CacheableObject interface { + // CacheEncode writes an object to a stream. The function will + // be used in case of cache miss. The function takes ownership + // of the object. + // If CacheableObject is a wrapper, then deep-copy of the wrapped object + // should be passed to function. + // CacheEncode assumes that for two different calls with the same , + // function will also be the same. + CacheEncode(id Identifier, encode func(Object, io.Writer) error, w io.Writer) error + // GetObject returns a deep-copy of an object to be encoded - the caller of + // GetObject() is the owner of returned object. The reason for making a copy + // is to avoid bugs, where caller modifies the object and forgets to copy it, + // thus modifying the object for everyone. + // The object returned by GetObject should be the same as the one that is supposed + // to be passed to function in CacheEncode method. + // If CacheableObject is a wrapper, the copy of wrapped object should be returned. + GetObject() Object +} + // Unstructured objects store values as map[string]interface{}, with only values that can be serialized // to JSON allowed. type Unstructured interface { Object + // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. + // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. + NewEmptyInstance() Unstructured // UnstructuredContent returns a non-nil map with this object's contents. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. SetUnstructuredContent should be used to mutate the contents. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go new file mode 100644 index 000000000000..3ff84611ab55 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type equivalentResourceRegistry struct { + // keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups). + // if null, or if "" is returned, resource.String() is used as the key + keyFunc func(resource schema.GroupResource) string + // resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources). + // main resources are stored with subresource="". + resources map[string]map[string][]schema.GroupVersionResource + // kinds maps resource -> subresource -> kind + kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind + // keys caches the computed key for each GroupResource + keys map[schema.GroupResource]string + + mutex sync.RWMutex +} + +var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil) +var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil) + +// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent. +func NewEquivalentResourceRegistry() EquivalentResourceRegistry { + return &equivalentResourceRegistry{} +} + +// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function. +// If "" is returned by the function, GroupResource#String is used as the identity. +// GroupResources with the same identity string are considered equivalent. +func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry { + return &equivalentResourceRegistry{keyFunc: keyFunc} +} + +func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.resources[r.keys[resource.GroupResource()]][subresource] +} +func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.kinds[resource][subresource] +} +func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) { + r.mutex.Lock() + defer r.mutex.Unlock() + if r.kinds == nil { + r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{} + } + if r.kinds[resource] == nil { + r.kinds[resource] = map[string]schema.GroupVersionKind{} + } + r.kinds[resource][subresource] = kind + + // get the shared key of the parent resource + key := "" + gr := resource.GroupResource() + if r.keyFunc != nil { + key = r.keyFunc(gr) + } + if key == "" { + key = gr.String() + } + + if r.keys == nil { + r.keys = map[schema.GroupResource]string{} + } + r.keys[gr] = key + + if r.resources == nil { + r.resources = map[string]map[string][]schema.GroupVersionResource{} + } + if r.resources[key] == nil { + r.resources[key] = map[string][]schema.GroupVersionResource{} + } + r.resources[key][subresource] = append(r.resources[key][subresource], resource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go new file mode 100644 index 000000000000..159b301206a7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go @@ -0,0 +1,146 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// NegotiateError is returned when a ClientNegotiator is unable to locate +// a serializer for the requested operation. +type NegotiateError struct { + ContentType string + Stream bool +} + +func (e NegotiateError) Error() string { + if e.Stream { + return fmt.Sprintf("no stream serializers registered for %s", e.ContentType) + } + return fmt.Sprintf("no serializers registered for %s", e.ContentType) +} + +type clientNegotiator struct { + serializer NegotiatedSerializer + encode, decode GroupVersioner +} + +func (n *clientNegotiator) Encoder(contentType string, params map[string]string) (Encoder, error) { + // TODO: `pretty=1` is handled in NegotiateOutputMediaType, consider moving it to this method + // if client negotiators truly need to use it + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, NegotiateError{ContentType: contentType} + } + info = mediaTypes[0] + } + return n.serializer.EncoderForVersion(info.Serializer, n.encode), nil +} + +func (n *clientNegotiator) Decoder(contentType string, params map[string]string) (Decoder, error) { + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, NegotiateError{ContentType: contentType} + } + info = mediaTypes[0] + } + return n.serializer.DecoderToVersion(info.Serializer, n.decode), nil +} + +func (n *clientNegotiator) StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) { + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, nil, nil, NegotiateError{ContentType: contentType, Stream: true} + } + info = mediaTypes[0] + } + if info.StreamSerializer == nil { + return nil, nil, nil, NegotiateError{ContentType: info.MediaType, Stream: true} + } + return n.serializer.DecoderToVersion(info.Serializer, n.decode), info.StreamSerializer.Serializer, info.StreamSerializer.Framer, nil +} + +// NewClientNegotiator will attempt to retrieve the appropriate encoder, decoder, or +// stream decoder for a given content type. Does not perform any conversion, but will +// encode the object to the desired group, version, and kind. Use when creating a client. +func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { + return &clientNegotiator{ + serializer: serializer, + encode: gv, + } +} + +// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver +// where objects are converted to gv prior to sending and decoded to their internal representation prior +// to retrieval. +// +// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release. +func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { + decode := schema.GroupVersions{ + { + Group: gv.Group, + Version: APIVersionInternal, + }, + // always include the legacy group as a decoding target to handle non-error `Status` return types + { + Group: "", + Version: APIVersionInternal, + }, + } + return &clientNegotiator{ + encode: gv, + decode: decode, + serializer: serializer, + } +} + +// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used +// for testing or when the caller is taking responsibility for setting the GVK on encoded objects. +func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator { + return &clientNegotiator{ + serializer: &simpleNegotiatedSerializer{info: info}, + encode: gv, + } +} + +type simpleNegotiatedSerializer struct { + info SerializerInfo +} + +func NewSimpleNegotiatedSerializer(info SerializerInfo) NegotiatedSerializer { + return &simpleNegotiatedSerializer{info: info} +} + +func (n *simpleNegotiatedSerializer) SupportedMediaTypes() []SerializerInfo { + return []SerializerInfo{n.info} +} + +func (n *simpleNegotiatedSerializer) EncoderForVersion(e Encoder, _ GroupVersioner) Encoder { + return e +} + +func (n *simpleNegotiatedSerializer) DecoderToVersion(d Decoder, _gv GroupVersioner) Decoder { + return d +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go index eeb380c3dc39..1cd2e4c3871b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/register.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -29,33 +29,3 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } - -// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind -// interface if no objects are provided, or the ObjectKind interface of the object in the -// highest array position. -func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind { - last := obj.Last() - if last == nil { - return schema.EmptyObjectKind - } - return last.GetObjectKind() -} - -// First returns the leftmost object in the VersionedObjects array, which is usually the -// object as serialized on the wire. -func (obj *VersionedObjects) First() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[0] -} - -// Last is the rightmost object in the VersionedObjects array, which is the object after -// all transformations have been applied. This is the same object that would be returned -// by Decode in a normal invocation (without VersionedObjects in the into argument). -func (obj *VersionedObjects) Last() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[len(obj.Objects)-1] -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 28a61d5fb574..29d3ac45bea9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -17,19 +17,15 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto -/* -Package schema is a generated protocol buffer package. +package schema -It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +import ( + fmt "fmt" -It has these top-level messages: -*/ -package schema + math "math" -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -40,13 +36,13 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) } -var fileDescriptorGenerated = []byte{ +var fileDescriptor_0462724132518e0d = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30, 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 4c67ed59801b..636103312f0f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -191,6 +191,11 @@ func (gv GroupVersion) String() string { return gv.Version } +// Identifier implements runtime.GroupVersioner interface. +func (gv GroupVersion) Identifier() string { + return gv.String() +} + // KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. @@ -246,6 +251,15 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // in fewer places. type GroupVersions []GroupVersion +// Identifier implements runtime.GroupVersioner interface. +func (gv GroupVersions) Identifier() string { + groupVersions := make([]string, 0, len(gv)) + for i := range gv { + groupVersions = append(groupVersions, gv[i].String()) + } + return fmt.Sprintf("[%s]", strings.Join(groupVersions, ",")) +} + // KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false // if none of the options match the group. func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index fd37e293ab1a..4b739ec38fe6 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -102,10 +102,10 @@ func NewScheme() *Scheme { } s.converter = conversion.NewConverter(s.nameFunc) - utilruntime.Must(s.AddConversionFuncs(DefaultEmbeddedConversions()...)) + // Enable couple default conversions by default. + utilruntime.Must(RegisterEmbeddedConversions(s)) + utilruntime.Must(RegisterStringConversions(s)) - // Enable map[string][]string conversions by default - utilruntime.Must(s.AddConversionFuncs(DefaultStringConversions...)) utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) return s @@ -308,45 +308,6 @@ func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error { return s.converter.RegisterIgnoredConversion(from, to) } -// AddConversionFuncs adds functions to the list of conversion functions. The given -// functions should know how to convert between two of your API objects, or their -// sub-objects. We deduce how to call these functions from the types of their two -// parameters; see the comment for Converter.Register. -// -// Note that, if you need to copy sub-objects that didn't change, you can use the -// conversion.Scope object that will be passed to your conversion function. -// Additionally, all conversions started by Scheme will set the SrcVersion and -// DestVersion fields on the Meta object. Example: -// -// s.AddConversionFuncs( -// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error { -// // You can depend on Meta() being non-nil, and this being set to -// // the source version, e.g., "" -// s.Meta().SrcVersion -// // You can depend on this being set to the destination version, -// // e.g., "v1". -// s.Meta().DestVersion -// // Call scope.Convert to copy sub-fields. -// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0) -// return nil -// }, -// ) -// -// (For more detail about conversion functions, see Converter.Register's comment.) -// -// Also note that the default behavior, if you don't add a conversion function, is to -// sanely copy fields that have the same names and same type names. It's OK if the -// destination type has extra fields, but it must not remove any. So you only need to -// add conversion functions for things with changed/removed fields. -func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { - for _, f := range conversionFuncs { - if err := s.converter.RegisterConversionFunc(f); err != nil { - return err - } - } - return nil -} - // AddConversionFunc registers a function that converts between a and b by passing objects of those // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce // any other guarantee. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 3d3ebe5f9d1b..31359f35f451 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -95,7 +95,7 @@ type RawExtension struct { // Raw is the underlying serialization of this object. // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - Raw []byte `protobuf:"bytes,1,opt,name=raw"` + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` // Object can hold a representation of this extension - useful for working with versioned // structs. Object Object `json:"-"` @@ -124,16 +124,3 @@ type Unknown struct { // Unspecified means ContentTypeJSON. ContentType string `protobuf:"bytes,4,opt,name=contentType"` } - -// VersionedObjects is used by Decoders to give callers a way to access all versions -// of an object during the decoding process. -// -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:deepcopy-gen=true -type VersionedObjects struct { - // Objects is the set of objects retrieved during decoding, in order of conversion. - // The 0 index is the object as serialized on the wire. If conversion has occurred, - // other objects may be present. The right most object is the same as would be returned - // by a normal Decode call. - Objects []Object -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go index ead96ee05543..a82227b239af 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -24,46 +24,66 @@ type ProtobufMarshaller interface { MarshalTo(data []byte) (int, error) } +type ProtobufReverseMarshaller interface { + MarshalToSizedBuffer(data []byte) (int, error) +} + // NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown -// that will contain an object that implements ProtobufMarshaller. +// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller. func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err + // Calculate the full size of the message. + msgSize := m.Size() + if b != nil { + msgSize += int(size) + sovGenerated(size) + 1 } - i += n1 + // Reverse marshal the fields of m. + i := msgSize + i -= len(m.ContentType) + copy(data[i:], m.ContentType) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i-- + data[i] = 0x22 + i -= len(m.ContentEncoding) + copy(data[i:], m.ContentEncoding) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i-- + data[i] = 0x1a if b != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, size) - n2, err := b.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - if uint64(n2) != size { - // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto - // struct returned would be wrong. - return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2) + if r, ok := b.(ProtobufReverseMarshaller); ok { + n1, err := r.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= int(size) + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of LashramOt, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } + } else { + i -= int(size) + n1, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } } - i += n2 + i = encodeVarintGenerated(data, i, size) + i-- + data[i] = 0x12 } - - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) - i += copy(data[i:], m.ContentEncoding) - - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) - i += copy(data[i:], m.ContentType) - return i, nil + n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= n2 + i = encodeVarintGenerated(data, i, uint64(n2)) + i-- + data[i] = 0xa + return msgSize - i, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index 8b9182f359d4..b0393839e1f4 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -73,36 +73,3 @@ func (in *Unknown) DeepCopyObject() Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) { - *out = *in - if in.Objects != nil { - in, out := &in.Objects, &out.Objects - *out = make([]Object, len(*in)) - for i := range *in { - if (*in)[i] != nil { - (*out)[i] = (*in)[i].DeepCopyObject() - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects. -func (in *VersionedObjects) DeepCopy() *VersionedObjects { - if in == nil { - return nil - } - out := new(VersionedObjects) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. -func (in *VersionedObjects) DeepCopyObject() Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 62a73f34ebe7..5bafc218e2f4 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -28,9 +28,14 @@ type MessageCountMap map[string]int // Aggregate represents an object that contains multiple errors, but does not // necessarily have singular semantic meaning. +// The aggregate can be used with `errors.Is()` to check for the occurrence of +// a specific error type. +// Errors.As() is not supported, because the caller presumably cares about a +// specific error of potentially multiple that match the given type. type Aggregate interface { error Errors() []error + Is(error) bool } // NewAggregate converts a slice of errors into an Aggregate interface, which @@ -71,16 +76,17 @@ func (agg aggregate) Error() string { } seenerrs := sets.NewString() result := "" - agg.visit(func(err error) { + agg.visit(func(err error) bool { msg := err.Error() if seenerrs.Has(msg) { - return + return false } seenerrs.Insert(msg) if len(seenerrs) > 1 { result += ", " } result += msg + return false }) if len(seenerrs) == 1 { return result @@ -88,19 +94,33 @@ func (agg aggregate) Error() string { return "[" + result + "]" } -func (agg aggregate) visit(f func(err error)) { +func (agg aggregate) Is(target error) bool { + return agg.visit(func(err error) bool { + return errors.Is(err, target) + }) +} + +func (agg aggregate) visit(f func(err error) bool) bool { for _, err := range agg { switch err := err.(type) { case aggregate: - err.visit(f) + if match := err.visit(f); match { + return match + } case Aggregate: for _, nestedErr := range err.Errors() { - f(nestedErr) + if match := f(nestedErr); match { + return match + } } default: - f(err) + if match := f(err); match { + return match + } } } + + return false } // Errors is part of the Aggregate interface. diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index 48dd7d9c551d..ec1cb70f29bd 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -17,22 +17,17 @@ limitations under the License. // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto -/* - Package intstr is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto - - It has these top-level messages: - IntOrString -*/ package intstr -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import io "io" + io "io" + math "math" + math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,19 +38,71 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (*IntOrString) ProtoMessage() {} -func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { + return fileDescriptor_94e046ae3ce6121c, []int{0} +} +func (m *IntOrString) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IntOrString) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntOrString.Merge(m, src) +} +func (m *IntOrString) XXX_Size() int { + return m.Size() +} +func (m *IntOrString) XXX_DiscardUnknown() { + xxx_messageInfo_IntOrString.DiscardUnknown(m) +} + +var xxx_messageInfo_IntOrString proto.InternalMessageInfo func init() { proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") } + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) +} + +var fileDescriptor_94e046ae3ce6121c = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, +} + func (m *IntOrString) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -63,33 +110,44 @@ func (m *IntOrString) Marshal() (dAtA []byte, err error) { } func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) - dAtA[i] = 0x1a - i++ + i -= len(m.StrVal) + copy(dAtA[i:], m.StrVal) i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) - i += copy(dAtA[i:], m.StrVal) - return i, nil + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *IntOrString) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.Type)) @@ -100,14 +158,7 @@ func (m *IntOrString) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -127,7 +178,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -155,7 +206,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= (Type(b) & 0x7F) << shift + m.Type |= Type(b&0x7F) << shift if b < 0x80 { break } @@ -174,7 +225,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.IntVal |= (int32(b) & 0x7F) << shift + m.IntVal |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -193,7 +244,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -203,6 +254,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -217,6 +271,9 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -232,6 +289,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -263,10 +321,8 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -283,80 +339,34 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") ) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, - 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, - 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, - 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, - 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, - 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, - 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, - 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, - 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, - 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, - 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, - 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, - 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, - 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, - 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, - 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, - 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, - 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, - 0x64, 0x01, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 5b26ed262631..cb974dcf7c90 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -45,7 +45,7 @@ type IntOrString struct { } // Type represents the stored type of IntOrString. -type Type int +type Type int64 const ( Int Type = iota // The IntOrString holds an int. @@ -97,7 +97,8 @@ func (intstr *IntOrString) String() string { } // IntValue returns the IntVal if type Int, or if -// it is a String, will attempt a conversion to int. +// it is a String, will attempt a conversion to int, +// returning 0 if a parsing error occurs. func (intstr *IntOrString) IntValue() int { if intstr.Type == String { i, _ := strconv.Atoi(intstr.StrVal) @@ -122,11 +123,11 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { // the OpenAPI spec of this type. // // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } +func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. -func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } +func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 10c8cb837ed5..0e2e30175476 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -19,6 +19,7 @@ package json import ( "bytes" "encoding/json" + "fmt" "io" ) @@ -34,6 +35,9 @@ func Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + // Unmarshal unmarshals the given data // If v is a *map[string]interface{}, numbers are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { @@ -48,7 +52,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v) + return convertMapNumbers(*v, 0) case *[]interface{}: // Build a decoder from the given data @@ -60,7 +64,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v) + return convertSliceNumbers(*v, 0) default: return json.Unmarshal(data, v) @@ -69,16 +73,20 @@ func Unmarshal(data []byte, v interface{}) error { // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}) error { +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for k, v := range m { switch v := v.(type) { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err @@ -89,16 +97,20 @@ func convertMapNumbers(m map[string]interface{}) error { // convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}) error { +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for i, v := range s { switch v := v.(type) { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go index 2965d5a8bc52..d69bf32caa8b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go +++ b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go @@ -82,7 +82,7 @@ var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[ func extractStackCreator() (string, int, bool) { stack := debug.Stack() matches := stackCreator.FindStringSubmatch(string(stack)) - if matches == nil || len(matches) != 4 { + if len(matches) != 4 { return "", 0, false } line, err := strconv.Atoi(matches[3]) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 078f00d9b979..7449cbb0a01a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -55,6 +55,12 @@ func JoinPreservingTrailingSlash(elem ...string) string { return result } +// IsTimeout returns true if the given error is a network timeout error +func IsTimeout(err error) bool { + neterr, ok := err.(net.Error) + return ok && neterr != nil && neterr.Timeout() +} + // IsProbableEOF returns true if the given error resembles a connection termination // scenario that would justify assuming that the watch is empty. // These errors are what the Go http stack returns back to us which are general @@ -101,6 +107,9 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport { if t.TLSHandshakeTimeout == 0 { t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout } + if t.IdleConnTimeout == 0 { + t.IdleConnTimeout = defaultTransport.IdleConnTimeout + } return t } @@ -111,7 +120,7 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { klog.Infof("HTTP2 has been explicitly disabled") - } else { + } else if allowsHTTP2(t) { if err := http2.ConfigureTransport(t); err != nil { klog.Warningf("Transport failed http2 configuration: %v", err) } @@ -119,6 +128,21 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { return t } +func allowsHTTP2(t *http.Transport) bool { + if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 { + // the transport expressed no NextProto preference, allow + return true + } + for _, p := range t.TLSClientConfig.NextProtos { + if p == http2.NextProtoTLS { + // the transport explicitly allowed http/2 + return true + } + } + // the transport explicitly set NextProtos and excluded http/2 + return false +} + type RoundTripperWrapper interface { http.RoundTripper WrappedRoundTripper() http.RoundTripper @@ -188,13 +212,17 @@ func GetHTTPClient(req *http.Request) string { return "unknown" } -// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr, -// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid. +// SourceIPs splits the comma separated X-Forwarded-For header and joins it with +// the X-Real-Ip header and/or req.RemoteAddr, ignoring invalid IPs. +// The X-Real-Ip is omitted if it's already present in the X-Forwarded-For chain. +// The req.RemoteAddr is always the last IP in the returned list. +// It returns nil if all of these are empty or invalid. func SourceIPs(req *http.Request) []net.IP { + var srcIPs []net.IP + hdr := req.Header // First check the X-Forwarded-For header for requests via proxy. hdrForwardedFor := hdr.Get("X-Forwarded-For") - forwardedForIPs := []net.IP{} if hdrForwardedFor != "" { // X-Forwarded-For can be a csv of IPs in case of multiple proxies. // Use the first valid one. @@ -202,38 +230,49 @@ func SourceIPs(req *http.Request) []net.IP { for _, part := range parts { ip := net.ParseIP(strings.TrimSpace(part)) if ip != nil { - forwardedForIPs = append(forwardedForIPs, ip) + srcIPs = append(srcIPs, ip) } } } - if len(forwardedForIPs) > 0 { - return forwardedForIPs - } // Try the X-Real-Ip header. hdrRealIp := hdr.Get("X-Real-Ip") if hdrRealIp != "" { ip := net.ParseIP(hdrRealIp) - if ip != nil { - return []net.IP{ip} + // Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain. + if ip != nil && !containsIP(srcIPs, ip) { + srcIPs = append(srcIPs, ip) } } - // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy. + // Always include the request Remote Address as it cannot be easily spoofed. + var remoteIP net.IP // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. host, _, err := net.SplitHostPort(req.RemoteAddr) if err == nil { - if remoteIP := net.ParseIP(host); remoteIP != nil { - return []net.IP{remoteIP} - } + remoteIP = net.ParseIP(host) } - // Fallback if Remote Address was just IP. - if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil { - return []net.IP{remoteIP} + if remoteIP == nil { + remoteIP = net.ParseIP(req.RemoteAddr) + } + + // Don't duplicate remote IP if it's already the last address in the chain. + if remoteIP != nil && (len(srcIPs) == 0 || !remoteIP.Equal(srcIPs[len(srcIPs)-1])) { + srcIPs = append(srcIPs, remoteIP) } - return nil + return srcIPs +} + +// Checks whether the given IP address is contained in the list of IPs. +func containsIP(ips []net.IP, ip net.IP) bool { + for _, v := range ips { + if v.Equal(ip) { + return true + } + } + return false } // Extracts and returns the clients IP from the given request. diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index daf5d2496455..836494d579aa 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -36,6 +36,18 @@ const ( familyIPv6 AddressFamily = 6 ) +type AddressFamilyPreference []AddressFamily + +var ( + preferIPv4 = AddressFamilyPreference{familyIPv4, familyIPv6} + preferIPv6 = AddressFamilyPreference{familyIPv6, familyIPv4} +) + +const ( + // LoopbackInterfaceName is the default name of the loopback interface + LoopbackInterfaceName = "lo" +) + const ( ipv4RouteFile = "/proc/net/route" ipv6RouteFile = "/proc/net/ipv6_route" @@ -53,7 +65,7 @@ type RouteFile struct { parse func(input io.Reader) ([]Route, error) } -// noRoutesError can be returned by ChooseBindAddress() in case of no routes +// noRoutesError can be returned in case of no routes type noRoutesError struct { message string } @@ -254,7 +266,7 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte return nil, nil } -// memberOF tells if the IP is of the desired family. Used for checking interface addresses. +// memberOf tells if the IP is of the desired family. Used for checking interface addresses. func memberOf(ip net.IP, family AddressFamily) bool { if ip.To4() != nil { return family == familyIPv4 @@ -265,8 +277,8 @@ func memberOf(ip net.IP, family AddressFamily) bool { // chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that // has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. -// Searches for IPv4 addresses, and then IPv6 addresses. -func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { +// addressFamilies determines whether it prefers IPv4 or IPv6 +func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { intfs, err := nw.Interfaces() if err != nil { return nil, err @@ -274,7 +286,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { if len(intfs) == 0 { return nil, fmt.Errorf("no interfaces found on host.") } - for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + for _, family := range addressFamilies { klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) for _, intf := range intfs { if !isInterfaceUp(&intf) { @@ -321,15 +333,19 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { // IP of the interface with a gateway on it (with priority given to IPv4). For a node // with no internet connection, it returns error. func ChooseHostInterface() (net.IP, error) { + return chooseHostInterface(preferIPv4) +} + +func chooseHostInterface(addressFamilies AddressFamilyPreference) (net.IP, error) { var nw networkInterfacer = networkInterface{} if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) { - return chooseIPFromHostInterfaces(nw) + return chooseIPFromHostInterfaces(nw, addressFamilies) } routes, err := getAllDefaultRoutes() if err != nil { return nil, err } - return chooseHostInterfaceFromRoute(routes, nw) + return chooseHostInterfaceFromRoute(routes, nw, addressFamilies) } // networkInterfacer defines an interface for several net library functions. Production @@ -377,10 +393,10 @@ func getAllDefaultRoutes() ([]Route, error) { } // chooseHostInterfaceFromRoute cycles through each default route provided, looking for a -// global IP address from the interface for the route. Will first look all each IPv4 route for -// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. -func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { - for _, family := range []AddressFamily{familyIPv4, familyIPv6} { +// global IP address from the interface for the route. addressFamilies determines whether it +// prefers IPv4 or IPv6 +func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { + for _, family := range addressFamilies { klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) for _, route := range routes { if route.Family != family { @@ -401,12 +417,19 @@ func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, return nil, fmt.Errorf("unable to select an IP from default routes.") } -// If bind-address is usable, return it directly -// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default -// interface. -func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { +// ResolveBindAddress returns the IP address of a daemon, based on the given bindAddress: +// If bindAddress is unset, it returns the host's default IP, as with ChooseHostInterface(). +// If bindAddress is unspecified or loopback, it returns the default IP of the same +// address family as bindAddress. +// Otherwise, it just returns bindAddress. +func ResolveBindAddress(bindAddress net.IP) (net.IP, error) { + addressFamilies := preferIPv4 + if bindAddress != nil && memberOf(bindAddress, familyIPv6) { + addressFamilies = preferIPv6 + } + if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { - hostIP, err := ChooseHostInterface() + hostIP, err := chooseHostInterface(addressFamilies) if err != nil { return nil, err } @@ -414,3 +437,21 @@ func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { } return bindAddress, nil } + +// ChooseBindAddressForInterface choose a global IP for a specific interface, with priority given to IPv4. +// This is required in case of network setups where default routes are present, but network +// interfaces use only link-local addresses (e.g. as described in RFC5549). +// e.g when using BGP to announce a host IP over link-local ip addresses and this ip address is attached to the lo interface. +func ChooseBindAddressForInterface(intfName string) (net.IP, error) { + var nw networkInterfacer = networkInterface{} + for _, family := range preferIPv4 { + ip, err := getIPFromInterface(intfName, family, nw) + if err != nil { + return nil, err + } + if ip != nil { + return ip, nil + } + } + return nil, fmt.Errorf("unable to select an IP from %s network interface", intfName) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 8344d10c83ae..2e7cb9499465 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -54,3 +54,20 @@ func IsConnectionReset(err error) bool { } return false } + +// Returns if the given err is "connection refused" error +func IsConnectionRefused(err error) bool { + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + if osErr, ok := err.(*os.SyscallError); ok { + err = osErr.Err + } + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { + return true + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 3c886f46c3f1..1428443f5449 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -18,6 +18,7 @@ package runtime import ( "fmt" + "net/http" "runtime" "sync" "time" @@ -40,11 +41,7 @@ var PanicHandlers = []func(interface{}){logPanic} // called in case of panic. HandleCrash actually crashes, after calling the // handlers and logging the panic message. // -// TODO: remove this function. We are switching to a world where it's safe for -// apiserver to panic, since it will be restarted by kubelet. At the beginning -// of the Kubernetes project, nothing was going to restart apiserver and so -// catching panics was important. But it's actually much simpler for monitoring -// software if we just exit when an unexpected panic happens. +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { for _, fn := range PanicHandlers { @@ -60,8 +57,16 @@ func HandleCrash(additionalHandlers ...func(interface{})) { } } -// logPanic logs the caller tree when a panic occurs. +// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). func logPanic(r interface{}) { + if r == http.ErrAbortHandler { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return + } + // Same as stdlib http server code. Manually allocate stack trace buffer size // to prevent excessively large logs const size = 64 << 10 diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 766f4501e0f2..9bfa85d43d41 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -46,17 +46,19 @@ func ByteKeySet(theMap interface{}) Byte { } // Insert adds items to the set. -func (s Byte) Insert(items ...byte) { +func (s Byte) Insert(items ...byte) Byte { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Byte) Delete(items ...byte) { +func (s Byte) Delete(items ...byte) Byte { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index a0a513cd9b51..88bd7096791e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -46,17 +46,19 @@ func IntKeySet(theMap interface{}) Int { } // Insert adds items to the set. -func (s Int) Insert(items ...int) { +func (s Int) Insert(items ...int) Int { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int) Delete(items ...int) { +func (s Int) Delete(items ...int) Int { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go new file mode 100644 index 000000000000..96a48555426f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +type Int32 map[int32]Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + ss := Int32{} + ss.Insert(items...) + return ss +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet(theMap interface{}) Int32 { + v := reflect.ValueOf(theMap) + ret := Int32{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int32)) + } + return ret +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) Int32 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) Int32 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int32) Difference(s2 Int32) Int32 { + result := NewInt32() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + result := NewInt32() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + var walk, other Int32 + result := NewInt32() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt32 []int32 + +func (s sortableSliceOfInt32) Len() int { return len(s) } +func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } +func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + res := make(sortableSliceOfInt32, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int32(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + res := make([]int32, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int32 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} + +func lessInt32(lhs, rhs int32) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index 9ca9af0c5918..b375a1b065c9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -46,17 +46,19 @@ func Int64KeySet(theMap interface{}) Int64 { } // Insert adds items to the set. -func (s Int64) Insert(items ...int64) { +func (s Int64) Insert(items ...int64) Int64 { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s Int64) Delete(items ...int64) { +func (s Int64) Delete(items ...int64) Int64 { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index ba00ad7df4e7..e6f37db8874b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -46,17 +46,19 @@ func StringKeySet(theMap interface{}) String { } // Insert adds items to the set. -func (s String) Insert(items ...string) { +func (s String) Insert(items ...string) String { for _, item := range items { s[item] = Empty{} } + return s } // Delete removes all items from the set. -func (s String) Delete(items ...string) { +func (s String) Delete(items ...string) String { for _, item := range items { delete(s, item) } + return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 4767fd1dda10..0cd5d65775a7 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -116,6 +116,10 @@ const ( // This is similar to ErrorTypeInvalid, but the error will not include the // too-long value. See TooLong(). ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + ErrorTypeTooMany ErrorType = "FieldValueTooMany" // ErrorTypeInternal is used to report other errors that are not related // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" @@ -138,6 +142,8 @@ func (t ErrorType) String() string { return "Forbidden" case ErrorTypeTooLong: return "Too long" + case ErrorTypeTooMany: + return "Too many" case ErrorTypeInternal: return "Internal error" default: @@ -198,7 +204,14 @@ func Forbidden(field *Path, detail string) *Error { // Invalid, but the returned error will not include the too-long // value. func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} +} + +// TooMany returns a *Error indicating "too many". This is used to +// report that a given list has too many items. This is similar to TooLong, +// but the returned error indicates quantity instead of length. +func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { + return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} } // InternalError returns a *Error indicating "internal error". This is used diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 2dd99992dcad..915231f2e757 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -70,7 +70,11 @@ func IsQualifiedName(value string) []string { return errs } -// IsFullyQualifiedName checks if the name is fully qualified. +// IsFullyQualifiedName checks if the name is fully qualified. This is similar +// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of +// 2 and does not accept a trailing . as valid. +// TODO: This function is deprecated and preserved until all callers migrate to +// IsFullyQualifiedDomainName; please don't add new callers. func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { var allErrors field.ErrorList if len(name) == 0 { @@ -85,6 +89,64 @@ func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { return allErrors } +// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This +// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments +// instead of 3 and accepts a trailing . as valid. +func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if strings.HasSuffix(name, ".") { + name = name[:len(name)-1] + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 2 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots")) + } + return allErrors +} + +// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may +// contain: +// * unreserved characters (alphanumeric, '-', '.', '_', '~') +// * percent-encoded octets +// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=") +// * a colon character (":") +const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+` + +var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$") + +// IsDomainPrefixedPath checks if the given string is a domain-prefixed path +// (e.g. acme.io/foo). All characters before the first "/" must be a valid +// subdomain as defined by RFC 1123. All characters trailing the first "/" must +// be valid HTTP Path characters as defined by RFC 3986. +func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList { + var allErrs field.ErrorList + if len(dpPath) == 0 { + return append(allErrs, field.Required(fldPath, "")) + } + + segments := strings.SplitN(dpPath, "/", 2) + if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 { + return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")")) + } + + host := segments[0] + for _, err := range IsDNS1123Subdomain(host) { + allErrs = append(allErrs, field.Invalid(fldPath, host, err)) + } + + path := segments[1] + if !httpPathRegexp.MatchString(path) { + return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt))) + } + + return allErrs +} + const labelValueFmt string = "(" + qualifiedNameFmt + ")?" const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" @@ -285,6 +347,26 @@ func IsValidIP(value string) []string { return nil } +// IsValidIPv4Address tests that the argument is a valid IPv4 address. +func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() == nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) + } + return allErrors +} + +// IsValidIPv6Address tests that the argument is a valid IPv6 address. +func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) + } + return allErrors +} + const percentFmt string = "[0-9]+%" const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 8af256eb12a0..4269a836a874 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -113,7 +113,7 @@ func (sw *StreamWatcher) receive() { case io.ErrUnexpectedEOF: klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: - if net.IsProbableEOF(err) { + if net.IsProbableEOF(err) || net.IsTimeout(err) { klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) } else { sw.result <- Event{ diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index be9c90c03d10..988aba3ed62a 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -44,6 +44,7 @@ const ( Added EventType = "ADDED" Modified EventType = "MODIFIED" Deleted EventType = "DELETED" + Bookmark EventType = "BOOKMARK" Error EventType = "ERROR" DefaultChanSize int32 = 100 @@ -57,6 +58,10 @@ type Event struct { // Object is: // * If Type is Added or Modified: the new state of the object. // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Bookmark: the object (instance of a type being watched) where + // only ResourceVersion field is set. On successful restart of watch from a + // bookmark resourceVersion, client is guaranteed to not get repeat event + // nor miss any events. // * If Type is Error: *api.Status is recommended; other types may make sense // depending on context. Object runtime.Object @@ -85,7 +90,7 @@ func (w emptyWatch) ResultChan() <-chan Event { // FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. type FakeWatcher struct { result chan Event - Stopped bool + stopped bool sync.Mutex } @@ -105,24 +110,24 @@ func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() - if !f.Stopped { + if !f.stopped { klog.V(4).Infof("Stopping fake watcher.") close(f.result) - f.Stopped = true + f.stopped = true } } func (f *FakeWatcher) IsStopped() bool { f.Lock() defer f.Unlock() - return f.Stopped + return f.stopped } // Reset prepares the watcher to be reused. func (f *FakeWatcher) Reset() { f.Lock() defer f.Unlock() - f.Stopped = false + f.stopped = false f.result = make(chan Event) } diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml index 0f508dae66ed..5677664c2183 100644 --- a/vendor/k8s.io/klog/.travis.yml +++ b/vendor/k8s.io/klog/.travis.yml @@ -5,11 +5,12 @@ go: - 1.9.x - 1.10.x - 1.11.x + - 1.12.x script: - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d .) - diff -u <(echo -n) <(golint $(go list -e ./...)) - - go tool vet . + - go tool vet . || go vet . - go test -v -race ./... install: - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md index 6cb6d1683752..841468b4b65c 100644 --- a/vendor/k8s.io/klog/README.md +++ b/vendor/k8s.io/klog/README.md @@ -1,7 +1,27 @@ klog ==== -klog is a permanant fork of https://github.com/golang/glog. original README from glog is below +klog is a permanent fork of https://github.com/golang/glog. + +## Why was klog created? + +The decision to create klog was one that wasn't made lightly, but it was necessary due to some +drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: + +> The code in this repo [...] is not itself under development + +This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: + + * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. + * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it + * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. + +Historical context is available here: + + * https://github.com/kubernetes/kubernetes/issues/61006 + * https://github.com/kubernetes/kubernetes/issues/70264 + * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ + * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ ---- @@ -11,7 +31,7 @@ How to use klog - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags - You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) -- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md)) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) ### Coexisting with glog This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS index 520ddb525754..6128a586995b 100644 --- a/vendor/k8s.io/klog/SECURITY_CONTACTS +++ b/vendor/k8s.io/klog/SECURITY_CONTACTS @@ -1,10 +1,10 @@ # Defined below are the security contacts for this repo. # -# They are the contact point for the Product Security Team to reach out +# They are the contact point for the Product Security Committee to reach out # to for triaging and handling of incoming issues. # # The below names agree to abide by the -# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) # and will be removed and replaced if they violate that agreement. # # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod new file mode 100644 index 000000000000..3877d8546ab9 --- /dev/null +++ b/vendor/k8s.io/klog/go.mod @@ -0,0 +1,5 @@ +module k8s.io/klog + +go 1.12 + +require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum new file mode 100644 index 000000000000..fb64d277a7af --- /dev/null +++ b/vendor/k8s.io/klog/go.sum @@ -0,0 +1,2 @@ +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go index 398cd1c5dbf0..2712ce0afc11 100644 --- a/vendor/k8s.io/klog/klog.go +++ b/vendor/k8s.io/klog/klog.go @@ -20,26 +20,26 @@ // // Basic examples: // -// glog.Info("Prepare to repel boarders") +// klog.Info("Prepare to repel boarders") // -// glog.Fatalf("Initialization failed: %s", err) +// klog.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // -// if glog.V(2) { -// glog.Info("Starting transaction...") +// if klog.V(2) { +// klog.Info("Starting transaction...") // } // -// glog.V(2).Infoln("Processed", nItems, "elements") +// klog.V(2).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. // -// By default, all log statements write to files in a temporary directory. +// By default, all log statements write to standard error. // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // -// -logtostderr=false +// -logtostderr=true // Logs are written to standard error instead of to files. // -alsologtostderr=false // Logs are written to standard error as well as to files. @@ -78,6 +78,7 @@ import ( "fmt" "io" stdLog "log" + "math" "os" "path/filepath" "runtime" @@ -141,7 +142,7 @@ func (s *severity) Set(value string) error { if v, ok := severityByName(value); ok { threshold = v } else { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -225,7 +226,7 @@ func (l *Level) Get() interface{} { // Set is part of the flag.Value interface. func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) + v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } @@ -293,7 +294,7 @@ func (m *moduleSpec) Set(value string) error { return errVmoduleSyntax } pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) + v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { return errors.New("syntax error: expect comma-separated list of filename=N") } @@ -395,25 +396,38 @@ type flushSyncWriter interface { io.Writer } +// init sets up the defaults and runs flushDaemon. func init() { - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - + logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. logging.setVState(0, nil, false) + logging.logDir = "" + logging.logFile = "" + logging.logFileMaxSizeMB = 1800 + logging.toStderr = true + logging.alsoToStderr = false + logging.skipHeaders = false + logging.addDirHeader = false + logging.skipLogHeaders = false go logging.flushDaemon() } -// InitFlags is for explicitly initializing the flags +// InitFlags is for explicitly initializing the flags. func InitFlags(flagset *flag.FlagSet) { if flagset == nil { flagset = flag.CommandLine } - flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file") - flagset.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + + flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") + flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") + flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, + "Defines the maximum size a log file can grow to. Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") - flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") + flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -471,8 +485,18 @@ type loggingT struct { // with the log-dir option. logFile string + // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the + // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. + logFileMaxSizeMB uint64 + // If true, do not add the prefix headers, useful when used with SetOutput skipHeaders bool + + // If true, do not add the headers to log files + skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -558,9 +582,14 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { file = "???" line = 1 } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } } } return l.formatHeader(s, file, line), file, line @@ -709,6 +738,8 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() for s := fatalLog; s >= infoLog; s-- { rb := &redirectBuffer{ w: w, @@ -719,6 +750,8 @@ func SetOutput(w io.Writer) { // SetOutputBySeverity sets the output destination for specific severity func SetOutputBySeverity(name string, w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() sev, ok := severityByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) @@ -744,24 +777,38 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[infoLog] == nil { + if err := l.createFiles(infoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: l.file[infoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } } } if s == fatalLog { @@ -800,7 +847,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds +// by Flush may deadlock when klog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) @@ -811,7 +858,7 @@ func timeoutFlush(timeout time.Duration) { select { case <-done: case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) } } @@ -861,17 +908,32 @@ func (l *loggingT) exit(err error) { type syncBuffer struct { logger *loggingT *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file + maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } func (sb *syncBuffer) Sync() error { return sb.file.Sync() } +// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. +func CalculateMaxSize() uint64 { + if logging.logFile != "" { + if logging.logFileMaxSizeMB == 0 { + // If logFileMaxSizeMB is zero, we don't have limitations on the log size. + return math.MaxUint64 + } + // Flag logFileMaxSizeMB is in MB for user convenience. + return logging.logFileMaxSizeMB * 1024 * 1024 + } + // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. + return MaxSize +} + func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { + if sb.nbytes+uint64(len(p)) >= sb.maxbytes { if err := sb.rotateFile(time.Now(), false); err != nil { sb.logger.exit(err) } @@ -886,7 +948,7 @@ func (sb *syncBuffer) Write(p []byte) (n int, err error) { // rotateFile closes the syncBuffer's file and starts a new one. // The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for apending instead of truncated. +// If startup is true, existing files are opened for appending instead of truncated. func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { if sb.file != nil { sb.Flush() @@ -901,6 +963,10 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + if sb.logger.skipLogHeaders { + return nil + } + // Write header. var buf bytes.Buffer fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) @@ -925,8 +991,9 @@ func (l *loggingT) createFiles(sev severity) error { // has already been created, we can stop. for s := sev; s >= infoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ - logger: l, - sev: s, + logger: l, + sev: s, + maxbytes: CalculateMaxSize(), } if err := sb.rotateFile(now, true); err != nil { return err @@ -1047,9 +1114,9 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2) { glog.Info("log this") } +// if klog.V(2) { klog.Info("log this") } // or -// glog.V(2).Info("log this") +// klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1123,7 +1190,7 @@ func InfoDepth(depth int, args ...interface{}) { } // Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { logging.println(infoLog, args...) } @@ -1147,7 +1214,7 @@ func WarningDepth(depth int, args ...interface{}) { } // Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { logging.println(warningLog, args...) } @@ -1171,7 +1238,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { logging.println(errorLog, args...) } @@ -1197,7 +1264,7 @@ func FatalDepth(depth int, args ...interface{}) { // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { logging.println(fatalLog, args...) } diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go index 1c4897f4fdc8..e4010ad4df06 100644 --- a/vendor/k8s.io/klog/klog_file.go +++ b/vendor/k8s.io/klog/klog_file.go @@ -98,7 +98,7 @@ var onceLogDirs sync.Once // successfully, create also attempts to update the symlink for that tag, ignoring // errors. // The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for apending instead of truncated. +// If startup is true, existing files are opened for appending instead of truncated. func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { if logging.logFile != "" { f, err := openOrCreate(logging.logFile, startup) diff --git a/vendor/modules.txt b/vendor/modules.txt index 116d5ed922ab..9bcfea13a6c1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -23,10 +23,11 @@ cloud.google.com/go/storage code.cloudfoundry.org/gofileutils/fileutils # github.com/Azure/azure-pipeline-go v0.2.2 github.com/Azure/azure-pipeline-go/pipeline -# github.com/Azure/azure-sdk-for-go v36.2.0+incompatible +# github.com/Azure/azure-sdk-for-go v44.0.0+incompatible github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault +github.com/Azure/azure-sdk-for-go/services/network/mgmt/2015-06-15/network github.com/Azure/azure-sdk-for-go/services/preview/authorization/mgmt/2018-01-01-preview/authorization github.com/Azure/azure-sdk-for-go/version # github.com/Azure/azure-storage-blob-go v0.10.0 @@ -34,24 +35,26 @@ github.com/Azure/azure-storage-blob-go/azblob # github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/Azure/go-autorest/autorest v0.10.1 +# github.com/Azure/go-autorest v14.2.0+incompatible +github.com/Azure/go-autorest +# github.com/Azure/go-autorest/autorest v0.11.0 github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/azure -# github.com/Azure/go-autorest/autorest/adal v0.8.3 +# github.com/Azure/go-autorest/autorest/adal v0.9.0 github.com/Azure/go-autorest/autorest/adal -# github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 +# github.com/Azure/go-autorest/autorest/azure/auth v0.5.0 github.com/Azure/go-autorest/autorest/azure/auth -# github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 +# github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 github.com/Azure/go-autorest/autorest/azure/cli -# github.com/Azure/go-autorest/autorest/date v0.2.0 +# github.com/Azure/go-autorest/autorest/date v0.3.0 github.com/Azure/go-autorest/autorest/date -# github.com/Azure/go-autorest/autorest/to v0.3.0 +# github.com/Azure/go-autorest/autorest/to v0.4.0 github.com/Azure/go-autorest/autorest/to -# github.com/Azure/go-autorest/autorest/validation v0.2.0 +# github.com/Azure/go-autorest/autorest/validation v0.3.0 github.com/Azure/go-autorest/autorest/validation -# github.com/Azure/go-autorest/logger v0.1.0 +# github.com/Azure/go-autorest/logger v0.2.0 github.com/Azure/go-autorest/logger -# github.com/Azure/go-autorest/tracing v0.5.0 +# github.com/Azure/go-autorest/tracing v0.6.0 github.com/Azure/go-autorest/tracing # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml @@ -234,8 +237,14 @@ github.com/denisenkom/go-mssqldb github.com/denisenkom/go-mssqldb/internal/cp github.com/denisenkom/go-mssqldb/internal/decimal github.com/denisenkom/go-mssqldb/internal/querytext +# github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 +github.com/denverdino/aliyungo/common +github.com/denverdino/aliyungo/ecs +github.com/denverdino/aliyungo/util # github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go +# github.com/digitalocean/godo v1.7.5 +github.com/digitalocean/godo # github.com/dimchansky/utfbom v1.1.0 github.com/dimchansky/utfbom # github.com/docker/distribution v2.7.1+incompatible @@ -357,12 +366,23 @@ github.com/google/go-metrics-stackdriver github.com/google/go-metrics-stackdriver/vault # github.com/google/go-querystring v1.0.0 github.com/google/go-querystring/query -# github.com/google/gofuzz v1.0.0 +# github.com/google/gofuzz v1.1.0 github.com/google/gofuzz # github.com/google/uuid v1.1.1 github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.5 github.com/googleapis/gax-go/v2 +# github.com/gophercloud/gophercloud v0.1.0 +github.com/gophercloud/gophercloud +github.com/gophercloud/gophercloud/openstack +github.com/gophercloud/gophercloud/openstack/compute/v2/flavors +github.com/gophercloud/gophercloud/openstack/compute/v2/images +github.com/gophercloud/gophercloud/openstack/compute/v2/servers +github.com/gophercloud/gophercloud/openstack/identity/v2/tenants +github.com/gophercloud/gophercloud/openstack/identity/v2/tokens +github.com/gophercloud/gophercloud/openstack/identity/v3/tokens +github.com/gophercloud/gophercloud/openstack/utils +github.com/gophercloud/gophercloud/pagination # github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 github.com/gorhill/cronexpr # github.com/gorilla/websocket v1.4.1 @@ -388,6 +408,22 @@ github.com/hashicorp/errwrap github.com/hashicorp/go-bindata # github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-cleanhttp +# github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f +github.com/hashicorp/go-discover +github.com/hashicorp/go-discover/provider/aliyun +github.com/hashicorp/go-discover/provider/aws +github.com/hashicorp/go-discover/provider/azure +github.com/hashicorp/go-discover/provider/digitalocean +github.com/hashicorp/go-discover/provider/gce +github.com/hashicorp/go-discover/provider/linode +github.com/hashicorp/go-discover/provider/mdns +github.com/hashicorp/go-discover/provider/os +github.com/hashicorp/go-discover/provider/packet +github.com/hashicorp/go-discover/provider/scaleway +github.com/hashicorp/go-discover/provider/softlayer +github.com/hashicorp/go-discover/provider/tencentcloud +github.com/hashicorp/go-discover/provider/triton +github.com/hashicorp/go-discover/provider/vsphere # github.com/hashicorp/go-gcp-common v0.6.0 github.com/hashicorp/go-gcp-common/gcputil # github.com/hashicorp/go-hclog v0.14.1 @@ -447,6 +483,8 @@ github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token # github.com/hashicorp/logutils v1.0.0 github.com/hashicorp/logutils +# github.com/hashicorp/mdns v1.0.1 +github.com/hashicorp/mdns # github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d github.com/hashicorp/nomad/api github.com/hashicorp/nomad/api/contexts @@ -572,6 +610,8 @@ github.com/hashicorp/vault/sdk/queue github.com/hashicorp/vault/sdk/testing/stepwise github.com/hashicorp/vault/sdk/testing/stepwise/environments/docker github.com/hashicorp/vault/sdk/version +# github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 +github.com/hashicorp/vic/pkg/vsphere/tags # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d github.com/hashicorp/yamux # github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 @@ -642,6 +682,7 @@ github.com/jmespath/go-jmespath github.com/joyent/triton-go github.com/joyent/triton-go/authentication github.com/joyent/triton-go/client +github.com/joyent/triton-go/compute github.com/joyent/triton-go/errors github.com/joyent/triton-go/storage # github.com/json-iterator/go v1.1.10 @@ -676,6 +717,8 @@ github.com/kr/text github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram +# github.com/linode/linodego v0.7.1 +github.com/linode/linodego # github.com/mattn/go-colorable v0.1.6 github.com/mattn/go-colorable # github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d @@ -690,6 +733,8 @@ github.com/matttproud/golang_protobuf_extensions/pbutil github.com/mholt/archiver # github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5 github.com/michaelklishin/rabbit-hole +# github.com/miekg/dns v1.1.15 +github.com/miekg/dns # github.com/mitchellh/cli v1.1.1 github.com/mitchellh/cli # github.com/mitchellh/copystructure v1.0.0 @@ -722,6 +767,8 @@ github.com/mwielbut/pointy github.com/natefinch/atomic # github.com/ncw/swift v1.0.47 github.com/ncw/swift +# github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 +github.com/nicolai86/scaleway-sdk # github.com/nwaples/rardecode v1.1.0 github.com/nwaples/rardecode # github.com/oklog/run v1.0.0 @@ -771,6 +818,8 @@ github.com/ory/dockertest/docker/types/network github.com/ory/dockertest/docker/types/registry github.com/ory/dockertest/docker/types/strslice github.com/ory/dockertest/docker/types/versions +# github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c +github.com/packethost/packngo # github.com/patrickmn/go-cache v2.1.0+incompatible github.com/patrickmn/go-cache # github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 @@ -810,6 +859,8 @@ github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/rboyer/safeio v0.2.1 github.com/rboyer/safeio +# github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 +github.com/renier/xmlrpc # github.com/ryanuber/columnize v2.1.0+incompatible github.com/ryanuber/columnize # github.com/ryanuber/go-glob v1.0.0 @@ -833,12 +884,25 @@ github.com/shirou/gopsutil/net github.com/shirou/gopsutil/process # github.com/sirupsen/logrus v1.6.0 github.com/sirupsen/logrus +# github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d +github.com/softlayer/softlayer-go/config +github.com/softlayer/softlayer-go/datatypes +github.com/softlayer/softlayer-go/filter +github.com/softlayer/softlayer-go/services +github.com/softlayer/softlayer-go/session +github.com/softlayer/softlayer-go/sl # github.com/stretchr/objx v0.2.0 github.com/stretchr/objx # github.com/stretchr/testify v1.6.1 github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require +# github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm/v20170312 # github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 github.com/tv42/httpunix # github.com/ulikunitz/xz v0.5.7 @@ -846,6 +910,23 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma +# github.com/vmware/govmomi v0.18.0 +github.com/vmware/govmomi +github.com/vmware/govmomi/find +github.com/vmware/govmomi/list +github.com/vmware/govmomi/nfc +github.com/vmware/govmomi/object +github.com/vmware/govmomi/property +github.com/vmware/govmomi/session +github.com/vmware/govmomi/task +github.com/vmware/govmomi/vim25 +github.com/vmware/govmomi/vim25/debug +github.com/vmware/govmomi/vim25/methods +github.com/vmware/govmomi/vim25/mo +github.com/vmware/govmomi/vim25/progress +github.com/vmware/govmomi/vim25/soap +github.com/vmware/govmomi/vim25/types +github.com/vmware/govmomi/vim25/xml # github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/xdg/scram # github.com/xdg/stringprep v1.0.0 @@ -972,6 +1053,7 @@ golang.org/x/lint/golint golang.org/x/mod/module golang.org/x/mod/semver # golang.org/x/net v0.0.0-20200625001655-4c5254603344 +golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts @@ -979,9 +1061,14 @@ golang.org/x/net/http/httpproxy golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/iana +golang.org/x/net/internal/socket golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries +golang.org/x/net/ipv4 +golang.org/x/net/ipv6 golang.org/x/net/proxy +golang.org/x/net/publicsuffix golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/oauth2 @@ -1001,6 +1088,7 @@ golang.org/x/sys/windows golang.org/x/sys/windows/registry # golang.org/x/text v0.3.2 golang.org/x/text/encoding +golang.org/x/text/encoding/charmap golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier golang.org/x/text/encoding/unicode @@ -1200,6 +1288,8 @@ gopkg.in/mgo.v2/internal/sasl gopkg.in/mgo.v2/internal/scram # gopkg.in/ory-am/dockertest.v3 v3.3.4 gopkg.in/ory-am/dockertest.v3 +# gopkg.in/resty.v1 v1.12.0 +gopkg.in/resty.v1 # gopkg.in/square/go-jose.v2 v2.5.1 gopkg.in/square/go-jose.v2 gopkg.in/square/go-jose.v2/cipher @@ -1239,9 +1329,9 @@ honnef.co/go/tools/staticcheck honnef.co/go/tools/stylecheck honnef.co/go/tools/unused honnef.co/go/tools/version -# k8s.io/api v0.0.0-20190409092523-d687e77c8ae9 +# k8s.io/api v0.18.2 k8s.io/api/authentication/v1 -# k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b +# k8s.io/apimachinery v0.18.2 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/apis/meta/v1 @@ -1264,8 +1354,10 @@ k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/klog v0.0.0-20190306015804-8e90cee79f82 +# k8s.io/klog v1.0.0 k8s.io/klog # layeh.com/radius v0.0.0-20190322222518-890bc1058917 layeh.com/radius layeh.com/radius/rfc2865 +# sigs.k8s.io/structured-merge-diff/v3 v3.0.0 +sigs.k8s.io/structured-merge-diff/v3/value diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE b/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE new file mode 100644 index 000000000000..8dada3edaf50 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go new file mode 100644 index 000000000000..f70cd41674bb --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go @@ -0,0 +1,203 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// Allocator provides a value object allocation strategy. +// Value objects can be allocated by passing an allocator to the "Using" +// receiver functions on the value interfaces, e.g. Map.ZipUsing(allocator, ...). +// Value objects returned from "Using" functions should be given back to the allocator +// once longer needed by calling Allocator.Free(Value). +type Allocator interface { + // Free gives the allocator back any value objects returned by the "Using" + // receiver functions on the value interfaces. + // interface{} may be any of: Value, Map, List or Range. + Free(interface{}) + + // The unexported functions are for "Using" receiver functions of the value types + // to request what they need from the allocator. + allocValueUnstructured() *valueUnstructured + allocListUnstructuredRange() *listUnstructuredRange + allocValueReflect() *valueReflect + allocMapReflect() *mapReflect + allocStructReflect() *structReflect + allocListReflect() *listReflect + allocListReflectRange() *listReflectRange +} + +// HeapAllocator simply allocates objects to the heap. It is the default +// allocator used receiver functions on the value interfaces that do not accept +// an allocator and should be used whenever allocating objects that will not +// be given back to an allocator by calling Allocator.Free(Value). +var HeapAllocator = &heapAllocator{} + +type heapAllocator struct{} + +func (p *heapAllocator) allocValueUnstructured() *valueUnstructured { + return &valueUnstructured{} +} + +func (p *heapAllocator) allocListUnstructuredRange() *listUnstructuredRange { + return &listUnstructuredRange{vv: &valueUnstructured{}} +} + +func (p *heapAllocator) allocValueReflect() *valueReflect { + return &valueReflect{} +} + +func (p *heapAllocator) allocStructReflect() *structReflect { + return &structReflect{} +} + +func (p *heapAllocator) allocMapReflect() *mapReflect { + return &mapReflect{} +} + +func (p *heapAllocator) allocListReflect() *listReflect { + return &listReflect{} +} + +func (p *heapAllocator) allocListReflectRange() *listReflectRange { + return &listReflectRange{vr: &valueReflect{}} +} + +func (p *heapAllocator) Free(_ interface{}) {} + +// NewFreelistAllocator creates freelist based allocator. +// This allocator provides fast allocation and freeing of short lived value objects. +// +// The freelists are bounded in size by freelistMaxSize. If more than this amount of value objects is +// allocated at once, the excess will be returned to the heap for garbage collection when freed. +// +// This allocator is unsafe and must not be accessed concurrently by goroutines. +// +// This allocator works well for traversal of value data trees. Typical usage is to acquire +// a freelist at the beginning of the traversal and use it through out +// for all temporary value access. +func NewFreelistAllocator() Allocator { + return &freelistAllocator{ + valueUnstructured: &freelist{new: func() interface{} { + return &valueUnstructured{} + }}, + listUnstructuredRange: &freelist{new: func() interface{} { + return &listUnstructuredRange{vv: &valueUnstructured{}} + }}, + valueReflect: &freelist{new: func() interface{} { + return &valueReflect{} + }}, + mapReflect: &freelist{new: func() interface{} { + return &mapReflect{} + }}, + structReflect: &freelist{new: func() interface{} { + return &structReflect{} + }}, + listReflect: &freelist{new: func() interface{} { + return &listReflect{} + }}, + listReflectRange: &freelist{new: func() interface{} { + return &listReflectRange{vr: &valueReflect{}} + }}, + } +} + +// Bound memory usage of freelists. This prevents the processing of very large lists from leaking memory. +// This limit is large enough for endpoints objects containing 1000 IP address entries. Freed objects +// that don't fit into the freelist are orphaned on the heap to be garbage collected. +const freelistMaxSize = 1000 + +type freelistAllocator struct { + valueUnstructured *freelist + listUnstructuredRange *freelist + valueReflect *freelist + mapReflect *freelist + structReflect *freelist + listReflect *freelist + listReflectRange *freelist +} + +type freelist struct { + list []interface{} + new func() interface{} +} + +func (f *freelist) allocate() interface{} { + var w2 interface{} + if n := len(f.list); n > 0 { + w2, f.list = f.list[n-1], f.list[:n-1] + } else { + w2 = f.new() + } + return w2 +} + +func (f *freelist) free(v interface{}) { + if len(f.list) < freelistMaxSize { + f.list = append(f.list, v) + } +} + +func (w *freelistAllocator) Free(value interface{}) { + switch v := value.(type) { + case *valueUnstructured: + v.Value = nil // don't hold references to unstructured objects + w.valueUnstructured.free(v) + case *listUnstructuredRange: + v.vv.Value = nil // don't hold references to unstructured objects + w.listUnstructuredRange.free(v) + case *valueReflect: + v.ParentMapKey = nil + v.ParentMap = nil + w.valueReflect.free(v) + case *mapReflect: + w.mapReflect.free(v) + case *structReflect: + w.structReflect.free(v) + case *listReflect: + w.listReflect.free(v) + case *listReflectRange: + v.vr.ParentMapKey = nil + v.vr.ParentMap = nil + w.listReflectRange.free(v) + } +} + +func (w *freelistAllocator) allocValueUnstructured() *valueUnstructured { + return w.valueUnstructured.allocate().(*valueUnstructured) +} + +func (w *freelistAllocator) allocListUnstructuredRange() *listUnstructuredRange { + return w.listUnstructuredRange.allocate().(*listUnstructuredRange) +} + +func (w *freelistAllocator) allocValueReflect() *valueReflect { + return w.valueReflect.allocate().(*valueReflect) +} + +func (w *freelistAllocator) allocStructReflect() *structReflect { + return w.structReflect.allocate().(*structReflect) +} + +func (w *freelistAllocator) allocMapReflect() *mapReflect { + return w.mapReflect.allocate().(*mapReflect) +} + +func (w *freelistAllocator) allocListReflect() *listReflect { + return w.listReflect.allocate().(*listReflect) +} + +func (w *freelistAllocator) allocListReflectRange() *listReflectRange { + return w.listReflectRange.allocate().(*listReflectRange) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go new file mode 100644 index 000000000000..84d7f0f3fc23 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package value defines types for an in-memory representation of yaml or json +// objects, organized for convenient comparison with a schema (as defined by +// the sibling schema package). Functions for reading and writing the objects +// are also provided. +package value diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go new file mode 100644 index 000000000000..be3c672494ee --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "sort" + "strings" +) + +// Field is an individual key-value pair. +type Field struct { + Name string + Value Value +} + +// FieldList is a list of key-value pairs. Each field is expected to +// have a different name. +type FieldList []Field + +// Sort sorts the field list by Name. +func (f FieldList) Sort() { + if len(f) < 2 { + return + } + if len(f) == 2 { + if f[1].Name < f[0].Name { + f[0], f[1] = f[1], f[0] + } + return + } + sort.SliceStable(f, func(i, j int) bool { + return f[i].Name < f[j].Name + }) +} + +// Less compares two lists lexically. +func (f FieldList) Less(rhs FieldList) bool { + return f.Compare(rhs) == -1 +} + +// Compare compares two lists lexically. The result will be 0 if f==rhs, -1 +// if f < rhs, and +1 if f > rhs. +func (f FieldList) Compare(rhs FieldList) int { + i := 0 + for { + if i >= len(f) && i >= len(rhs) { + // Maps are the same length and all items are equal. + return 0 + } + if i >= len(f) { + // F is shorter. + return -1 + } + if i >= len(rhs) { + // RHS is shorter. + return 1 + } + if c := strings.Compare(f[i].Name, rhs[i].Name); c != 0 { + return c + } + if c := Compare(f[i].Value, rhs[i].Value); c != 0 { + return c + } + // The items are equal; continue. + i++ + } +} + +// Equals returns true if the two fieldslist are equals, false otherwise. +func (f FieldList) Equals(rhs FieldList) bool { + if len(f) != len(rhs) { + return false + } + for i := range f { + if f[i].Name != rhs[i].Name { + return false + } + if !Equals(f[i].Value, rhs[i].Value) { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go new file mode 100644 index 000000000000..d4adb8fc9d25 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" + "reflect" + "strings" +) + +// TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236 +// but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go + +func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool) { + tag := f.Tag.Get("json") + if tag == "-" { + return "", true, false, false + } + name, opts := parseTag(tag) + if name == "" { + name = f.Name + } + return name, false, opts.Contains("inline"), opts.Contains("omitempty") +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Chan, reflect.Func: + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + } + return false +} + +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go new file mode 100644 index 000000000000..0748f18e82e7 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go @@ -0,0 +1,139 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// List represents a list object. +type List interface { + // Length returns how many items can be found in the map. + Length() int + // At returns the item at the given position in the map. It will + // panic if the index is out of range. + At(int) Value + // AtUsing uses the provided allocator and returns the item at the given + // position in the map. It will panic if the index is out of range. + // The returned Value should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + AtUsing(Allocator, int) Value + // Range returns a ListRange for iterating over the items in the list. + Range() ListRange + // RangeUsing uses the provided allocator and returns a ListRange for + // iterating over the items in the list. + // The returned Range should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + RangeUsing(Allocator) ListRange + // Equals compares the two lists, and return true if they are the same, false otherwise. + // Implementations can use ListEquals as a general implementation for this methods. + Equals(List) bool + // EqualsUsing uses the provided allocator and compares the two lists, and return true if + // they are the same, false otherwise. Implementations can use ListEqualsUsing as a general + // implementation for this methods. + EqualsUsing(Allocator, List) bool +} + +// ListRange represents a single iteration across the items of a list. +type ListRange interface { + // Next increments to the next item in the range, if there is one, and returns true, or returns false if there are no more items. + Next() bool + // Item returns the index and value of the current item in the range. or panics if there is no current item. + // For efficiency, Item may reuse the values returned by previous Item calls. Callers should be careful avoid holding + // pointers to the value returned by Item() that escape the iteration loop since they become invalid once either + // Item() or Allocator.Free() is called. + Item() (index int, value Value) +} + +var EmptyRange = &emptyRange{} + +type emptyRange struct{} + +func (_ *emptyRange) Next() bool { + return false +} + +func (_ *emptyRange) Item() (index int, value Value) { + panic("Item called on empty ListRange") +} + +// ListEquals compares two lists lexically. +// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. +func ListEquals(lhs, rhs List) bool { + return ListEqualsUsing(HeapAllocator, lhs, rhs) +} + +// ListEqualsUsing uses the provided allocator and compares two lists lexically. +// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. +func ListEqualsUsing(a Allocator, lhs, rhs List) bool { + if lhs.Length() != rhs.Length() { + return false + } + + lhsRange := lhs.RangeUsing(a) + defer a.Free(lhsRange) + rhsRange := rhs.RangeUsing(a) + defer a.Free(rhsRange) + + for lhsRange.Next() && rhsRange.Next() { + _, lv := lhsRange.Item() + _, rv := rhsRange.Item() + if !EqualsUsing(a, lv, rv) { + return false + } + } + return true +} + +// ListLess compares two lists lexically. +func ListLess(lhs, rhs List) bool { + return ListCompare(lhs, rhs) == -1 +} + +// ListCompare compares two lists lexically. The result will be 0 if l==rhs, -1 +// if l < rhs, and +1 if l > rhs. +func ListCompare(lhs, rhs List) int { + return ListCompareUsing(HeapAllocator, lhs, rhs) +} + +// ListCompareUsing uses the provided allocator and compares two lists lexically. The result will be 0 if l==rhs, -1 +// if l < rhs, and +1 if l > rhs. +func ListCompareUsing(a Allocator, lhs, rhs List) int { + lhsRange := lhs.RangeUsing(a) + defer a.Free(lhsRange) + rhsRange := rhs.RangeUsing(a) + defer a.Free(rhsRange) + + for { + lhsOk := lhsRange.Next() + rhsOk := rhsRange.Next() + if !lhsOk && !rhsOk { + // Lists are the same length and all items are equal. + return 0 + } + if !lhsOk { + // LHS is shorter. + return -1 + } + if !rhsOk { + // RHS is shorter. + return 1 + } + _, lv := lhsRange.Item() + _, rv := rhsRange.Item() + if c := CompareUsing(a, lv, rv); c != 0 { + return c + } + // The items are equal; continue. + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go new file mode 100644 index 000000000000..197d4c921d4a --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "reflect" +) + +type listReflect struct { + Value reflect.Value +} + +func (r listReflect) Length() int { + val := r.Value + return val.Len() +} + +func (r listReflect) At(i int) Value { + val := r.Value + return mustWrapValueReflect(val.Index(i), nil, nil) +} + +func (r listReflect) AtUsing(a Allocator, i int) Value { + val := r.Value + return a.allocValueReflect().mustReuse(val.Index(i), nil, nil, nil) +} + +func (r listReflect) Unstructured() interface{} { + l := r.Length() + result := make([]interface{}, l) + for i := 0; i < l; i++ { + result[i] = r.At(i).Unstructured() + } + return result +} + +func (r listReflect) Range() ListRange { + return r.RangeUsing(HeapAllocator) +} + +func (r listReflect) RangeUsing(a Allocator) ListRange { + length := r.Value.Len() + if length == 0 { + return EmptyRange + } + rr := a.allocListReflectRange() + rr.list = r.Value + rr.i = -1 + rr.entry = TypeReflectEntryOf(r.Value.Type().Elem()) + return rr +} + +func (r listReflect) Equals(other List) bool { + return r.EqualsUsing(HeapAllocator, other) +} +func (r listReflect) EqualsUsing(a Allocator, other List) bool { + if otherReflectList, ok := other.(*listReflect); ok { + return reflect.DeepEqual(r.Value.Interface(), otherReflectList.Value.Interface()) + } + return ListEqualsUsing(a, &r, other) +} + +type listReflectRange struct { + list reflect.Value + vr *valueReflect + i int + entry *TypeReflectCacheEntry +} + +func (r *listReflectRange) Next() bool { + r.i += 1 + return r.i < r.list.Len() +} + +func (r *listReflectRange) Item() (index int, value Value) { + if r.i < 0 { + panic("Item() called before first calling Next()") + } + if r.i >= r.list.Len() { + panic("Item() called on ListRange with no more items") + } + v := r.list.Index(r.i) + return r.i, r.vr.mustReuse(v, r.entry, nil, nil) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go new file mode 100644 index 000000000000..64cd8e7c0c4f --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +type listUnstructured []interface{} + +func (l listUnstructured) Length() int { + return len(l) +} + +func (l listUnstructured) At(i int) Value { + return NewValueInterface(l[i]) +} + +func (l listUnstructured) AtUsing(a Allocator, i int) Value { + return a.allocValueUnstructured().reuse(l[i]) +} + +func (l listUnstructured) Equals(other List) bool { + return l.EqualsUsing(HeapAllocator, other) +} + +func (l listUnstructured) EqualsUsing(a Allocator, other List) bool { + return ListEqualsUsing(a, &l, other) +} + +func (l listUnstructured) Range() ListRange { + return l.RangeUsing(HeapAllocator) +} + +func (l listUnstructured) RangeUsing(a Allocator) ListRange { + if len(l) == 0 { + return EmptyRange + } + r := a.allocListUnstructuredRange() + r.list = l + r.i = -1 + return r +} + +type listUnstructuredRange struct { + list listUnstructured + vv *valueUnstructured + i int +} + +func (r *listUnstructuredRange) Next() bool { + r.i += 1 + return r.i < len(r.list) +} + +func (r *listUnstructuredRange) Item() (index int, value Value) { + if r.i < 0 { + panic("Item() called before first calling Next()") + } + if r.i >= len(r.list) { + panic("Item() called on ListRange with no more items") + } + return r.i, r.vv.reuse(r.list[r.i]) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go new file mode 100644 index 000000000000..168b9fa082b3 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go @@ -0,0 +1,270 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "sort" +) + +// Map represents a Map or go structure. +type Map interface { + // Set changes or set the value of the given key. + Set(key string, val Value) + // Get returns the value for the given key, if present, or (nil, false) otherwise. + Get(key string) (Value, bool) + // GetUsing uses the provided allocator and returns the value for the given key, + // if present, or (nil, false) otherwise. + // The returned Value should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + GetUsing(a Allocator, key string) (Value, bool) + // Has returns true if the key is present, or false otherwise. + Has(key string) bool + // Delete removes the key from the map. + Delete(key string) + // Equals compares the two maps, and return true if they are the same, false otherwise. + // Implementations can use MapEquals as a general implementation for this methods. + Equals(other Map) bool + // EqualsUsing uses the provided allocator and compares the two maps, and return true if + // they are the same, false otherwise. Implementations can use MapEqualsUsing as a general + // implementation for this methods. + EqualsUsing(a Allocator, other Map) bool + // Iterate runs the given function for each key/value in the + // map. Returning false in the closure prematurely stops the + // iteration. + Iterate(func(key string, value Value) bool) bool + // IterateUsing uses the provided allocator and runs the given function for each key/value + // in the map. Returning false in the closure prematurely stops the iteration. + IterateUsing(Allocator, func(key string, value Value) bool) bool + // Length returns the number of items in the map. + Length() int + // Empty returns true if the map is empty. + Empty() bool + // Zip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called + // with the values from both maps, otherwise it is called with the value of the map that contains the key and nil + // for the map that does not contain the key. Returning false in the closure prematurely stops the iteration. + Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool + // ZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps + // contain a value for a given key, fn is called with the values from both maps, otherwise it is called with + // the value of the map that contains the key and nil for the map that does not contain the key. Returning + // false in the closure prematurely stops the iteration. + ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool +} + +// MapTraverseOrder defines the map traversal ordering available. +type MapTraverseOrder int + +const ( + // Unordered indicates that the map traversal has no ordering requirement. + Unordered = iota + // LexicalKeyOrder indicates that the map traversal is ordered by key, lexically. + LexicalKeyOrder +) + +// MapZip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called +// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil +// for the other map. Returning false in the closure prematurely stops the iteration. +func MapZip(lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return MapZipUsing(HeapAllocator, lhs, rhs, order, fn) +} + +// MapZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps +// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with +// the value of the map that contains the key and nil for the other map. Returning false in the closure +// prematurely stops the iteration. +func MapZipUsing(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if lhs != nil { + return lhs.ZipUsing(a, rhs, order, fn) + } + if rhs != nil { + return rhs.ZipUsing(a, lhs, order, func(key string, rhs, lhs Value) bool { // arg positions of lhs and rhs deliberately swapped + return fn(key, lhs, rhs) + }) + } + return true +} + +// defaultMapZip provides a default implementation of Zip for implementations that do not need to provide +// their own optimized implementation. +func defaultMapZip(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + switch order { + case Unordered: + return unorderedMapZip(a, lhs, rhs, fn) + case LexicalKeyOrder: + return lexicalKeyOrderedMapZip(a, lhs, rhs, fn) + default: + panic("Unsupported map order") + } +} + +func unorderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { + if (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) { + return true + } + + if lhs != nil { + ok := lhs.IterateUsing(a, func(key string, lhsValue Value) bool { + var rhsValue Value + if rhs != nil { + if item, ok := rhs.GetUsing(a, key); ok { + rhsValue = item + defer a.Free(rhsValue) + } + } + return fn(key, lhsValue, rhsValue) + }) + if !ok { + return false + } + } + if rhs != nil { + return rhs.IterateUsing(a, func(key string, rhsValue Value) bool { + if lhs == nil || !lhs.Has(key) { + return fn(key, nil, rhsValue) + } + return true + }) + } + return true +} + +func lexicalKeyOrderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { + var lhsLength, rhsLength int + var orderedLength int // rough estimate of length of union of map keys + if lhs != nil { + lhsLength = lhs.Length() + orderedLength = lhsLength + } + if rhs != nil { + rhsLength = rhs.Length() + if rhsLength > orderedLength { + orderedLength = rhsLength + } + } + if lhsLength == 0 && rhsLength == 0 { + return true + } + + ordered := make([]string, 0, orderedLength) + if lhs != nil { + lhs.IterateUsing(a, func(key string, _ Value) bool { + ordered = append(ordered, key) + return true + }) + } + if rhs != nil { + rhs.IterateUsing(a, func(key string, _ Value) bool { + if lhs == nil || !lhs.Has(key) { + ordered = append(ordered, key) + } + return true + }) + } + sort.Strings(ordered) + for _, key := range ordered { + var litem, ritem Value + if lhs != nil { + litem, _ = lhs.GetUsing(a, key) + } + if rhs != nil { + ritem, _ = rhs.GetUsing(a, key) + } + ok := fn(key, litem, ritem) + if litem != nil { + a.Free(litem) + } + if ritem != nil { + a.Free(ritem) + } + if !ok { + return false + } + } + return true +} + +// MapLess compares two maps lexically. +func MapLess(lhs, rhs Map) bool { + return MapCompare(lhs, rhs) == -1 +} + +// MapCompare compares two maps lexically. +func MapCompare(lhs, rhs Map) int { + return MapCompareUsing(HeapAllocator, lhs, rhs) +} + +// MapCompareUsing uses the provided allocator and compares two maps lexically. +func MapCompareUsing(a Allocator, lhs, rhs Map) int { + c := 0 + var llength, rlength int + if lhs != nil { + llength = lhs.Length() + } + if rhs != nil { + rlength = rhs.Length() + } + if llength == 0 && rlength == 0 { + return 0 + } + i := 0 + MapZipUsing(a, lhs, rhs, LexicalKeyOrder, func(key string, lhs, rhs Value) bool { + switch { + case i == llength: + c = -1 + case i == rlength: + c = 1 + case lhs == nil: + c = 1 + case rhs == nil: + c = -1 + default: + c = CompareUsing(a, lhs, rhs) + } + i++ + return c == 0 + }) + return c +} + +// MapEquals returns true if lhs == rhs, false otherwise. This function +// acts on generic types and should not be used by callers, but can help +// implement Map.Equals. +// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. +func MapEquals(lhs, rhs Map) bool { + return MapEqualsUsing(HeapAllocator, lhs, rhs) +} + +// MapEqualsUsing uses the provided allocator and returns true if lhs == rhs, +// false otherwise. This function acts on generic types and should not be used +// by callers, but can help implement Map.Equals. +// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. +func MapEqualsUsing(a Allocator, lhs, rhs Map) bool { + if lhs == nil && rhs == nil { + return true + } + if lhs == nil || rhs == nil { + return false + } + if lhs.Length() != rhs.Length() { + return false + } + return MapZipUsing(a, lhs, rhs, Unordered, func(key string, lhs, rhs Value) bool { + if lhs == nil || rhs == nil { + return false + } + return EqualsUsing(a, lhs, rhs) + }) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go new file mode 100644 index 000000000000..dc8b8c72006c --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go @@ -0,0 +1,209 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "reflect" +) + +type mapReflect struct { + valueReflect +} + +func (r mapReflect) Length() int { + val := r.Value + return val.Len() +} + +func (r mapReflect) Empty() bool { + val := r.Value + return val.Len() == 0 +} + +func (r mapReflect) Get(key string) (Value, bool) { + return r.GetUsing(HeapAllocator, key) +} + +func (r mapReflect) GetUsing(a Allocator, key string) (Value, bool) { + k, v, ok := r.get(key) + if !ok { + return nil, false + } + return a.allocValueReflect().mustReuse(v, nil, &r.Value, &k), true +} + +func (r mapReflect) get(k string) (key, value reflect.Value, ok bool) { + mapKey := r.toMapKey(k) + val := r.Value.MapIndex(mapKey) + return mapKey, val, val.IsValid() && val != reflect.Value{} +} + +func (r mapReflect) Has(key string) bool { + var val reflect.Value + val = r.Value.MapIndex(r.toMapKey(key)) + if !val.IsValid() { + return false + } + return val != reflect.Value{} +} + +func (r mapReflect) Set(key string, val Value) { + r.Value.SetMapIndex(r.toMapKey(key), reflect.ValueOf(val.Unstructured())) +} + +func (r mapReflect) Delete(key string) { + val := r.Value + val.SetMapIndex(r.toMapKey(key), reflect.Value{}) +} + +// TODO: Do we need to support types that implement json.Marshaler and are used as string keys? +func (r mapReflect) toMapKey(key string) reflect.Value { + val := r.Value + return reflect.ValueOf(key).Convert(val.Type().Key()) +} + +func (r mapReflect) Iterate(fn func(string, Value) bool) bool { + return r.IterateUsing(HeapAllocator, fn) +} + +func (r mapReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { + if r.Value.Len() == 0 { + return true + } + v := a.allocValueReflect() + defer a.Free(v) + return eachMapEntry(r.Value, func(e *TypeReflectCacheEntry, key reflect.Value, value reflect.Value) bool { + return fn(key.String(), v.mustReuse(value, e, &r.Value, &key)) + }) +} + +func eachMapEntry(val reflect.Value, fn func(*TypeReflectCacheEntry, reflect.Value, reflect.Value) bool) bool { + iter := val.MapRange() + entry := TypeReflectEntryOf(val.Type().Elem()) + for iter.Next() { + next := iter.Value() + if !next.IsValid() { + continue + } + if !fn(entry, iter.Key(), next) { + return false + } + } + return true +} + +func (r mapReflect) Unstructured() interface{} { + result := make(map[string]interface{}, r.Length()) + r.Iterate(func(s string, value Value) bool { + result[s] = value.Unstructured() + return true + }) + return result +} + +func (r mapReflect) Equals(m Map) bool { + return r.EqualsUsing(HeapAllocator, m) +} + +func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { + lhsLength := r.Length() + rhsLength := m.Length() + if lhsLength != rhsLength { + return false + } + if lhsLength == 0 { + return true + } + vr := a.allocValueReflect() + defer a.Free(vr) + entry := TypeReflectEntryOf(r.Value.Type().Elem()) + return m.Iterate(func(key string, value Value) bool { + _, lhsVal, ok := r.get(key) + if !ok { + return false + } + return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) + }) +} + +func (r mapReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return r.ZipUsing(HeapAllocator, other, order, fn) +} + +func (r mapReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if otherMapReflect, ok := other.(*mapReflect); ok && order == Unordered { + return r.unorderedReflectZip(a, otherMapReflect, fn) + } + return defaultMapZip(a, &r, other, order, fn) +} + +// unorderedReflectZip provides an optimized unordered zip for mapReflect types. +func (r mapReflect) unorderedReflectZip(a Allocator, other *mapReflect, fn func(key string, lhs, rhs Value) bool) bool { + if r.Empty() && (other == nil || other.Empty()) { + return true + } + + lhs := r.Value + lhsEntry := TypeReflectEntryOf(lhs.Type().Elem()) + + // map lookup via reflection is expensive enough that it is better to keep track of visited keys + visited := map[string]struct{}{} + + vlhs, vrhs := a.allocValueReflect(), a.allocValueReflect() + defer a.Free(vlhs) + defer a.Free(vrhs) + + if other != nil { + rhs := other.Value + rhsEntry := TypeReflectEntryOf(rhs.Type().Elem()) + iter := rhs.MapRange() + + for iter.Next() { + key := iter.Key() + keyString := key.String() + next := iter.Value() + if !next.IsValid() { + continue + } + rhsVal := vrhs.mustReuse(next, rhsEntry, &rhs, &key) + visited[keyString] = struct{}{} + var lhsVal Value + if _, v, ok := r.get(keyString); ok { + lhsVal = vlhs.mustReuse(v, lhsEntry, &lhs, &key) + } + if !fn(keyString, lhsVal, rhsVal) { + return false + } + } + } + + iter := lhs.MapRange() + for iter.Next() { + key := iter.Key() + if _, ok := visited[key.String()]; ok { + continue + } + next := iter.Value() + if !next.IsValid() { + continue + } + if !fn(key.String(), vlhs.mustReuse(next, lhsEntry, &lhs, &key), nil) { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go new file mode 100644 index 000000000000..d8e208628def --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +type mapUnstructuredInterface map[interface{}]interface{} + +func (m mapUnstructuredInterface) Set(key string, val Value) { + m[key] = val.Unstructured() +} + +func (m mapUnstructuredInterface) Get(key string) (Value, bool) { + return m.GetUsing(HeapAllocator, key) +} + +func (m mapUnstructuredInterface) GetUsing(a Allocator, key string) (Value, bool) { + if v, ok := m[key]; !ok { + return nil, false + } else { + return a.allocValueUnstructured().reuse(v), true + } +} + +func (m mapUnstructuredInterface) Has(key string) bool { + _, ok := m[key] + return ok +} + +func (m mapUnstructuredInterface) Delete(key string) { + delete(m, key) +} + +func (m mapUnstructuredInterface) Iterate(fn func(key string, value Value) bool) bool { + return m.IterateUsing(HeapAllocator, fn) +} + +func (m mapUnstructuredInterface) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { + if len(m) == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + for k, v := range m { + if ks, ok := k.(string); !ok { + continue + } else { + if !fn(ks, vv.reuse(v)) { + return false + } + } + } + return true +} + +func (m mapUnstructuredInterface) Length() int { + return len(m) +} + +func (m mapUnstructuredInterface) Empty() bool { + return len(m) == 0 +} + +func (m mapUnstructuredInterface) Equals(other Map) bool { + return m.EqualsUsing(HeapAllocator, other) +} + +func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { + lhsLength := m.Length() + rhsLength := other.Length() + if lhsLength != rhsLength { + return false + } + if lhsLength == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + return other.Iterate(func(key string, value Value) bool { + lhsVal, ok := m[key] + if !ok { + return false + } + return Equals(vv.reuse(lhsVal), value) + }) +} + +func (m mapUnstructuredInterface) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return m.ZipUsing(HeapAllocator, other, order, fn) +} + +func (m mapUnstructuredInterface) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return defaultMapZip(a, m, other, order, fn) +} + +type mapUnstructuredString map[string]interface{} + +func (m mapUnstructuredString) Set(key string, val Value) { + m[key] = val.Unstructured() +} + +func (m mapUnstructuredString) Get(key string) (Value, bool) { + return m.GetUsing(HeapAllocator, key) +} +func (m mapUnstructuredString) GetUsing(a Allocator, key string) (Value, bool) { + if v, ok := m[key]; !ok { + return nil, false + } else { + return a.allocValueUnstructured().reuse(v), true + } +} + +func (m mapUnstructuredString) Has(key string) bool { + _, ok := m[key] + return ok +} + +func (m mapUnstructuredString) Delete(key string) { + delete(m, key) +} + +func (m mapUnstructuredString) Iterate(fn func(key string, value Value) bool) bool { + return m.IterateUsing(HeapAllocator, fn) +} + +func (m mapUnstructuredString) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { + if len(m) == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + for k, v := range m { + if !fn(k, vv.reuse(v)) { + return false + } + } + return true +} + +func (m mapUnstructuredString) Length() int { + return len(m) +} + +func (m mapUnstructuredString) Equals(other Map) bool { + return m.EqualsUsing(HeapAllocator, other) +} + +func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { + lhsLength := m.Length() + rhsLength := other.Length() + if lhsLength != rhsLength { + return false + } + if lhsLength == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + return other.Iterate(func(key string, value Value) bool { + lhsVal, ok := m[key] + if !ok { + return false + } + return Equals(vv.reuse(lhsVal), value) + }) +} + +func (m mapUnstructuredString) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return m.ZipUsing(HeapAllocator, other, order, fn) +} + +func (m mapUnstructuredString) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return defaultMapZip(a, m, other, order, fn) +} + +func (m mapUnstructuredString) Empty() bool { + return len(m) == 0 +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go new file mode 100644 index 000000000000..49e6dd1690e0 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go @@ -0,0 +1,463 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "sync" + "sync/atomic" +) + +// UnstructuredConverter defines how a type can be converted directly to unstructured. +// Types that implement json.Marshaler may also optionally implement this interface to provide a more +// direct and more efficient conversion. All types that choose to implement this interface must still +// implement this same conversion via json.Marshaler. +type UnstructuredConverter interface { + json.Marshaler // require that json.Marshaler is implemented + + // ToUnstructured returns the unstructured representation. + ToUnstructured() interface{} +} + +// TypeReflectCacheEntry keeps data gathered using reflection about how a type is converted to/from unstructured. +type TypeReflectCacheEntry struct { + isJsonMarshaler bool + ptrIsJsonMarshaler bool + isJsonUnmarshaler bool + ptrIsJsonUnmarshaler bool + isStringConvertable bool + ptrIsStringConvertable bool + + structFields map[string]*FieldCacheEntry + orderedStructFields []*FieldCacheEntry +} + +// FieldCacheEntry keeps data gathered using reflection about how the field of a struct is converted to/from +// unstructured. +type FieldCacheEntry struct { + // JsonName returns the name of the field according to the json tags on the struct field. + JsonName string + // isOmitEmpty is true if the field has the json 'omitempty' tag. + isOmitEmpty bool + // fieldPath is a list of field indices (see FieldByIndex) to lookup the value of + // a field in a reflect.Value struct. The field indices in the list form a path used + // to traverse through intermediary 'inline' fields. + fieldPath [][]int + + fieldType reflect.Type + TypeEntry *TypeReflectCacheEntry +} + +func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool { + return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) +} + +// GetUsing returns the field identified by this FieldCacheEntry from the provided struct. +func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value { + // field might be nested within 'inline' structs + for _, elem := range f.fieldPath { + structVal = structVal.FieldByIndex(elem) + } + return structVal +} + +var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem() +var unstructuredConvertableType = reflect.TypeOf(new(UnstructuredConverter)).Elem() +var defaultReflectCache = newReflectCache() + +// TypeReflectEntryOf returns the TypeReflectCacheEntry of the provided reflect.Type. +func TypeReflectEntryOf(t reflect.Type) *TypeReflectCacheEntry { + cm := defaultReflectCache.get() + if record, ok := cm[t]; ok { + return record + } + updates := reflectCacheMap{} + result := typeReflectEntryOf(cm, t, updates) + if len(updates) > 0 { + defaultReflectCache.update(updates) + } + return result +} + +// TypeReflectEntryOf returns all updates needed to add provided reflect.Type, and the types its fields transitively +// depend on, to the cache. +func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCacheMap) *TypeReflectCacheEntry { + if record, ok := cm[t]; ok { + return record + } + if record, ok := updates[t]; ok { + return record + } + typeEntry := &TypeReflectCacheEntry{ + isJsonMarshaler: t.Implements(marshalerType), + ptrIsJsonMarshaler: reflect.PtrTo(t).Implements(marshalerType), + isJsonUnmarshaler: reflect.PtrTo(t).Implements(unmarshalerType), + isStringConvertable: t.Implements(unstructuredConvertableType), + ptrIsStringConvertable: reflect.PtrTo(t).Implements(unstructuredConvertableType), + } + if t.Kind() == reflect.Struct { + fieldEntries := map[string]*FieldCacheEntry{} + buildStructCacheEntry(t, fieldEntries, nil) + typeEntry.structFields = fieldEntries + sortedByJsonName := make([]*FieldCacheEntry, len(fieldEntries)) + i := 0 + for _, entry := range fieldEntries { + sortedByJsonName[i] = entry + i++ + } + sort.Slice(sortedByJsonName, func(i, j int) bool { + return sortedByJsonName[i].JsonName < sortedByJsonName[j].JsonName + }) + typeEntry.orderedStructFields = sortedByJsonName + } + + // cyclic type references are allowed, so we must add the typeEntry to the updates map before resolving + // the field.typeEntry references, or creating them if they are not already in the cache + updates[t] = typeEntry + + for _, field := range typeEntry.structFields { + if field.TypeEntry == nil { + field.TypeEntry = typeReflectEntryOf(cm, field.fieldType, updates) + } + } + return typeEntry +} + +func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) { + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + jsonName, omit, isInline, isOmitempty := lookupJsonTags(field) + if omit { + continue + } + if isInline { + buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index)) + continue + } + info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} + infos[jsonName] = info + } +} + +// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. +func (e TypeReflectCacheEntry) Fields() map[string]*FieldCacheEntry { + return e.structFields +} + +// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. +func (e TypeReflectCacheEntry) OrderedFields() []*FieldCacheEntry { + return e.orderedStructFields +} + +// CanConvertToUnstructured returns true if this TypeReflectCacheEntry can convert values of its type to unstructured. +func (e TypeReflectCacheEntry) CanConvertToUnstructured() bool { + return e.isJsonMarshaler || e.ptrIsJsonMarshaler || e.isStringConvertable || e.ptrIsStringConvertable +} + +// ToUnstructured converts the provided value to unstructured and returns it. +func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, error) { + // This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505 + // and is intended to replace it. + + // Check if the object has a custom string converter and use it if available, since it is much more efficient + // than round tripping through json. + if converter, ok := e.getUnstructuredConverter(sv); ok { + return converter.ToUnstructured(), nil + } + // Check if the object has a custom JSON marshaller/unmarshaller. + if marshaler, ok := e.getJsonMarshaler(sv); ok { + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil, nil + } + + data, err := marshaler.MarshalJSON() + if err != nil { + return nil, err + } + switch { + case len(data) == 0: + return nil, fmt.Errorf("error decoding from json: empty value") + + case bytes.Equal(data, nullBytes): + // We're done - we don't need to store anything. + return nil, nil + + case bytes.Equal(data, trueBytes): + return true, nil + + case bytes.Equal(data, falseBytes): + return false, nil + + case data[0] == '"': + var result string + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding string from json: %v", err) + } + return result, nil + + case data[0] == '{': + result := make(map[string]interface{}) + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding object from json: %v", err) + } + return result, nil + + case data[0] == '[': + result := make([]interface{}, 0) + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding array from json: %v", err) + } + return result, nil + + default: + var ( + resultInt int64 + resultFloat float64 + err error + ) + if err = unmarshal(data, &resultInt); err == nil { + return resultInt, nil + } else if err = unmarshal(data, &resultFloat); err == nil { + return resultFloat, nil + } else { + return nil, fmt.Errorf("error decoding number from json: %v", err) + } + } + } + + return nil, fmt.Errorf("provided type cannot be converted: %v", sv.Type()) +} + +// CanConvertFromUnstructured returns true if this TypeReflectCacheEntry can convert objects of the type from unstructured. +func (e TypeReflectCacheEntry) CanConvertFromUnstructured() bool { + return e.isJsonUnmarshaler +} + +// FromUnstructured converts the provided source value from unstructured into the provided destination value. +func (e TypeReflectCacheEntry) FromUnstructured(sv, dv reflect.Value) error { + // TODO: this could be made much more efficient using direct conversions like + // UnstructuredConverter.ToUnstructured provides. + st := dv.Type() + data, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st.String(), err) + } + if unmarshaler, ok := e.getJsonUnmarshaler(dv); ok { + return unmarshaler.UnmarshalJSON(data) + } + return fmt.Errorf("unable to unmarshal %v into %v", sv.Type(), dv.Type()) +} + +var ( + nullBytes = []byte("null") + trueBytes = []byte("true") + falseBytes = []byte("false") +) + +func (e TypeReflectCacheEntry) getJsonMarshaler(v reflect.Value) (json.Marshaler, bool) { + if e.isJsonMarshaler { + return v.Interface().(json.Marshaler), true + } + if e.ptrIsJsonMarshaler { + // Check pointer receivers if v is not a pointer + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + return v.Interface().(json.Marshaler), true + } + } + return nil, false +} + +func (e TypeReflectCacheEntry) getJsonUnmarshaler(v reflect.Value) (json.Unmarshaler, bool) { + if !e.isJsonUnmarshaler { + return nil, false + } + return v.Addr().Interface().(json.Unmarshaler), true +} + +func (e TypeReflectCacheEntry) getUnstructuredConverter(v reflect.Value) (UnstructuredConverter, bool) { + if e.isStringConvertable { + return v.Interface().(UnstructuredConverter), true + } + if e.ptrIsStringConvertable { + // Check pointer receivers if v is not a pointer + if v.CanAddr() { + v = v.Addr() + return v.Interface().(UnstructuredConverter), true + } + } + return nil, false +} + +type typeReflectCache struct { + // use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any + // go program using this cache + value atomic.Value + // mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a + // read-lock since the atomic value is always read-only + mu sync.Mutex +} + +func newReflectCache() *typeReflectCache { + cache := &typeReflectCache{} + cache.value.Store(make(reflectCacheMap)) + return cache +} + +type reflectCacheMap map[reflect.Type]*TypeReflectCacheEntry + +// get returns the reflectCacheMap. +func (c *typeReflectCache) get() reflectCacheMap { + return c.value.Load().(reflectCacheMap) +} + +// update merges the provided updates into the cache. +func (c *typeReflectCache) update(updates reflectCacheMap) { + c.mu.Lock() + defer c.mu.Unlock() + + currentCacheMap := c.value.Load().(reflectCacheMap) + + hasNewEntries := false + for t := range updates { + if _, ok := currentCacheMap[t]; !ok { + hasNewEntries = true + break + } + } + if !hasNewEntries { + // Bail if the updates have been set while waiting for lock acquisition. + // This is safe since setting entries is idempotent. + return + } + + newCacheMap := make(reflectCacheMap, len(currentCacheMap)+len(updates)) + for k, v := range currentCacheMap { + newCacheMap[k] = v + } + for t, update := range updates { + newCacheMap[t] = update + } + c.value.Store(newCacheMap) +} + +// Below json Unmarshal is fromk8s.io/apimachinery/pkg/util/json +// to handle number conversions as expected by Kubernetes + +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + +// unmarshal unmarshals the given data +// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +func unmarshal(data []byte, v interface{}) error { + switch v := v.(type) { + case *map[string]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertMapNumbers(*v, 0) + + case *[]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertSliceNumbers(*v, 0) + + default: + return json.Unmarshal(data, v) + } +} + +// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for k, v := range m { + switch v := v.(type) { + case json.Number: + m[k], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for i, v := range s { + switch v := v.(type) { + case json.Number: + s[i], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertNumber converts a json.Number to an int64 or float64, or returns an error +func convertNumber(n json.Number) (interface{}, error) { + // Attempt to convert to an int64 first + if i, err := n.Int64(); err == nil { + return i, nil + } + // Return a float64 (default json.Decode() behavior) + // An overflow will return an error + return n.Float64() +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go new file mode 100644 index 000000000000..c78a4c18d122 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// Compare compares floats. The result will be 0 if lhs==rhs, -1 if f < +// rhs, and +1 if f > rhs. +func FloatCompare(lhs, rhs float64) int { + if lhs > rhs { + return 1 + } else if lhs < rhs { + return -1 + } + return 0 +} + +// IntCompare compares integers. The result will be 0 if i==rhs, -1 if i < +// rhs, and +1 if i > rhs. +func IntCompare(lhs, rhs int64) int { + if lhs > rhs { + return 1 + } else if lhs < rhs { + return -1 + } + return 0 +} + +// Compare compares booleans. The result will be 0 if b==rhs, -1 if b < +// rhs, and +1 if b > rhs. +func BoolCompare(lhs, rhs bool) int { + if lhs == rhs { + return 0 + } else if lhs == false { + return -1 + } + return 1 +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go new file mode 100644 index 000000000000..4a7bb5c6ebd4 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go @@ -0,0 +1,208 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" + "reflect" +) + +type structReflect struct { + valueReflect +} + +func (r structReflect) Length() int { + i := 0 + eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { + i++ + return true + }) + return i +} + +func (r structReflect) Empty() bool { + return eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { + return false // exit early if the struct is non-empty + }) +} + +func (r structReflect) Get(key string) (Value, bool) { + return r.GetUsing(HeapAllocator, key) +} + +func (r structReflect) GetUsing(a Allocator, key string) (Value, bool) { + if val, ok := r.findJsonNameField(key); ok { + return a.allocValueReflect().mustReuse(val, nil, nil, nil), true + } + return nil, false +} + +func (r structReflect) Has(key string) bool { + _, ok := r.findJsonNameField(key) + return ok +} + +func (r structReflect) Set(key string, val Value) { + fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] + if !ok { + panic(fmt.Sprintf("key %s may not be set on struct %T: field does not exist", key, r.Value.Interface())) + } + oldVal := fieldEntry.GetFrom(r.Value) + newVal := reflect.ValueOf(val.Unstructured()) + r.update(fieldEntry, key, oldVal, newVal) +} + +func (r structReflect) Delete(key string) { + fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] + if !ok { + panic(fmt.Sprintf("key %s may not be deleted on struct %T: field does not exist", key, r.Value.Interface())) + } + oldVal := fieldEntry.GetFrom(r.Value) + if oldVal.Kind() != reflect.Ptr && !fieldEntry.isOmitEmpty { + panic(fmt.Sprintf("key %s may not be deleted on struct: %T: value is neither a pointer nor an omitempty field", key, r.Value.Interface())) + } + r.update(fieldEntry, key, oldVal, reflect.Zero(oldVal.Type())) +} + +func (r structReflect) update(fieldEntry *FieldCacheEntry, key string, oldVal, newVal reflect.Value) { + if oldVal.CanSet() { + oldVal.Set(newVal) + return + } + + // map items are not addressable, so if a struct is contained in a map, the only way to modify it is + // to write a replacement fieldEntry into the map. + if r.ParentMap != nil { + if r.ParentMapKey == nil { + panic("ParentMapKey must not be nil if ParentMap is not nil") + } + replacement := reflect.New(r.Value.Type()).Elem() + fieldEntry.GetFrom(replacement).Set(newVal) + r.ParentMap.SetMapIndex(*r.ParentMapKey, replacement) + return + } + + // This should never happen since NewValueReflect ensures that the root object reflected on is a pointer and map + // item replacement is handled above. + panic(fmt.Sprintf("key %s may not be modified on struct: %T: struct is not settable", key, r.Value.Interface())) +} + +func (r structReflect) Iterate(fn func(string, Value) bool) bool { + return r.IterateUsing(HeapAllocator, fn) +} + +func (r structReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { + vr := a.allocValueReflect() + defer a.Free(vr) + return eachStructField(r.Value, func(e *TypeReflectCacheEntry, s string, value reflect.Value) bool { + return fn(s, vr.mustReuse(value, e, nil, nil)) + }) +} + +func eachStructField(structVal reflect.Value, fn func(*TypeReflectCacheEntry, string, reflect.Value) bool) bool { + for _, fieldCacheEntry := range TypeReflectEntryOf(structVal.Type()).OrderedFields() { + fieldVal := fieldCacheEntry.GetFrom(structVal) + if fieldCacheEntry.CanOmit(fieldVal) { + // omit it + continue + } + ok := fn(fieldCacheEntry.TypeEntry, fieldCacheEntry.JsonName, fieldVal) + if !ok { + return false + } + } + return true +} + +func (r structReflect) Unstructured() interface{} { + // Use number of struct fields as a cheap way to rough estimate map size + result := make(map[string]interface{}, r.Value.NumField()) + r.Iterate(func(s string, value Value) bool { + result[s] = value.Unstructured() + return true + }) + return result +} + +func (r structReflect) Equals(m Map) bool { + return r.EqualsUsing(HeapAllocator, m) +} + +func (r structReflect) EqualsUsing(a Allocator, m Map) bool { + // MapEquals uses zip and is fairly efficient for structReflect + return MapEqualsUsing(a, &r, m) +} + +func (r structReflect) findJsonNameFieldAndNotEmpty(jsonName string) (reflect.Value, bool) { + structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] + if !ok { + return reflect.Value{}, false + } + fieldVal := structCacheEntry.GetFrom(r.Value) + return fieldVal, !structCacheEntry.CanOmit(fieldVal) +} + +func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool) { + structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] + if !ok { + return reflect.Value{}, false + } + fieldVal := structCacheEntry.GetFrom(r.Value) + return fieldVal, !structCacheEntry.CanOmit(fieldVal) +} + +func (r structReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return r.ZipUsing(HeapAllocator, other, order, fn) +} + +func (r structReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if otherStruct, ok := other.(*structReflect); ok && r.Value.Type() == otherStruct.Value.Type() { + lhsvr, rhsvr := a.allocValueReflect(), a.allocValueReflect() + defer a.Free(lhsvr) + defer a.Free(rhsvr) + return r.structZip(otherStruct, lhsvr, rhsvr, fn) + } + return defaultMapZip(a, &r, other, order, fn) +} + +// structZip provides an optimized zip for structReflect types. The zip is always lexical key ordered since there is +// no additional cost to ordering the zip for structured types. +func (r structReflect) structZip(other *structReflect, lhsvr, rhsvr *valueReflect, fn func(key string, lhs, rhs Value) bool) bool { + lhsVal := r.Value + rhsVal := other.Value + + for _, fieldCacheEntry := range TypeReflectEntryOf(lhsVal.Type()).OrderedFields() { + lhsFieldVal := fieldCacheEntry.GetFrom(lhsVal) + rhsFieldVal := fieldCacheEntry.GetFrom(rhsVal) + lhsOmit := fieldCacheEntry.CanOmit(lhsFieldVal) + rhsOmit := fieldCacheEntry.CanOmit(rhsFieldVal) + if lhsOmit && rhsOmit { + continue + } + var lhsVal, rhsVal Value + if !lhsOmit { + lhsVal = lhsvr.mustReuse(lhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) + } + if !rhsOmit { + rhsVal = rhsvr.mustReuse(rhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) + } + if !fn(fieldCacheEntry.JsonName, lhsVal, rhsVal) { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go new file mode 100644 index 000000000000..ea79e3a000e7 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go @@ -0,0 +1,347 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "bytes" + "fmt" + "io" + "strings" + + jsoniter "github.com/json-iterator/go" + "gopkg.in/yaml.v2" +) + +var ( + readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool() + writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool() +) + +// A Value corresponds to an 'atom' in the schema. It should return true +// for at least one of the IsXXX methods below, or the value is +// considered "invalid" +type Value interface { + // IsMap returns true if the Value is a Map, false otherwise. + IsMap() bool + // IsList returns true if the Value is a List, false otherwise. + IsList() bool + // IsBool returns true if the Value is a bool, false otherwise. + IsBool() bool + // IsInt returns true if the Value is a int64, false otherwise. + IsInt() bool + // IsFloat returns true if the Value is a float64, false + // otherwise. + IsFloat() bool + // IsString returns true if the Value is a string, false + // otherwise. + IsString() bool + // IsMap returns true if the Value is null, false otherwise. + IsNull() bool + + // AsMap converts the Value into a Map (or panic if the type + // doesn't allow it). + AsMap() Map + // AsMapUsing uses the provided allocator and converts the Value + // into a Map (or panic if the type doesn't allow it). + AsMapUsing(Allocator) Map + // AsList converts the Value into a List (or panic if the type + // doesn't allow it). + AsList() List + // AsListUsing uses the provided allocator and converts the Value + // into a List (or panic if the type doesn't allow it). + AsListUsing(Allocator) List + // AsBool converts the Value into a bool (or panic if the type + // doesn't allow it). + AsBool() bool + // AsInt converts the Value into an int64 (or panic if the type + // doesn't allow it). + AsInt() int64 + // AsFloat converts the Value into a float64 (or panic if the type + // doesn't allow it). + AsFloat() float64 + // AsString converts the Value into a string (or panic if the type + // doesn't allow it). + AsString() string + + // Unstructured converts the Value into an Unstructured interface{}. + Unstructured() interface{} +} + +// FromJSON is a helper function for reading a JSON document. +func FromJSON(input []byte) (Value, error) { + return FromJSONFast(input) +} + +// FromJSONFast is a helper function for reading a JSON document. +func FromJSONFast(input []byte) (Value, error) { + iter := readPool.BorrowIterator(input) + defer readPool.ReturnIterator(iter) + return ReadJSONIter(iter) +} + +// ToJSON is a helper function for producing a JSon document. +func ToJSON(v Value) ([]byte, error) { + buf := bytes.Buffer{} + stream := writePool.BorrowStream(&buf) + defer writePool.ReturnStream(stream) + WriteJSONStream(v, stream) + b := stream.Buffer() + err := stream.Flush() + // Help jsoniter manage its buffers--without this, the next + // use of the stream is likely to require an allocation. Look + // at the jsoniter stream code to understand why. They were probably + // optimizing for folks using the buffer directly. + stream.SetBuffer(b[:0]) + return buf.Bytes(), err +} + +// ReadJSONIter reads a Value from a JSON iterator. +func ReadJSONIter(iter *jsoniter.Iterator) (Value, error) { + v := iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + return nil, iter.Error + } + return NewValueInterface(v), nil +} + +// WriteJSONStream writes a value into a JSON stream. +func WriteJSONStream(v Value, stream *jsoniter.Stream) { + stream.WriteVal(v.Unstructured()) +} + +// ToYAML marshals a value as YAML. +func ToYAML(v Value) ([]byte, error) { + return yaml.Marshal(v.Unstructured()) +} + +// Equals returns true iff the two values are equal. +func Equals(lhs, rhs Value) bool { + return EqualsUsing(HeapAllocator, lhs, rhs) +} + +// EqualsUsing uses the provided allocator and returns true iff the two values are equal. +func EqualsUsing(a Allocator, lhs, rhs Value) bool { + if lhs.IsFloat() || rhs.IsFloat() { + var lf float64 + if lhs.IsFloat() { + lf = lhs.AsFloat() + } else if lhs.IsInt() { + lf = float64(lhs.AsInt()) + } else { + return false + } + var rf float64 + if rhs.IsFloat() { + rf = rhs.AsFloat() + } else if rhs.IsInt() { + rf = float64(rhs.AsInt()) + } else { + return false + } + return lf == rf + } + if lhs.IsInt() { + if rhs.IsInt() { + return lhs.AsInt() == rhs.AsInt() + } + return false + } else if rhs.IsInt() { + return false + } + if lhs.IsString() { + if rhs.IsString() { + return lhs.AsString() == rhs.AsString() + } + return false + } else if rhs.IsString() { + return false + } + if lhs.IsBool() { + if rhs.IsBool() { + return lhs.AsBool() == rhs.AsBool() + } + return false + } else if rhs.IsBool() { + return false + } + if lhs.IsList() { + if rhs.IsList() { + lhsList := lhs.AsListUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsListUsing(a) + defer a.Free(rhsList) + return lhsList.EqualsUsing(a, rhsList) + } + return false + } else if rhs.IsList() { + return false + } + if lhs.IsMap() { + if rhs.IsMap() { + lhsList := lhs.AsMapUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsMapUsing(a) + defer a.Free(rhsList) + return lhsList.EqualsUsing(a, rhsList) + } + return false + } else if rhs.IsMap() { + return false + } + if lhs.IsNull() { + if rhs.IsNull() { + return true + } + return false + } else if rhs.IsNull() { + return false + } + // No field is set, on either objects. + return true +} + +// ToString returns a human-readable representation of the value. +func ToString(v Value) string { + if v.IsNull() { + return "null" + } + switch { + case v.IsFloat(): + return fmt.Sprintf("%v", v.AsFloat()) + case v.IsInt(): + return fmt.Sprintf("%v", v.AsInt()) + case v.IsString(): + return fmt.Sprintf("%q", v.AsString()) + case v.IsBool(): + return fmt.Sprintf("%v", v.AsBool()) + case v.IsList(): + strs := []string{} + list := v.AsList() + for i := 0; i < list.Length(); i++ { + strs = append(strs, ToString(list.At(i))) + } + return "[" + strings.Join(strs, ",") + "]" + case v.IsMap(): + strs := []string{} + v.AsMap().Iterate(func(k string, v Value) bool { + strs = append(strs, fmt.Sprintf("%v=%v", k, ToString(v))) + return true + }) + return strings.Join(strs, "") + } + // No field is set, on either objects. + return "{{undefined}}" +} + +// Less provides a total ordering for Value (so that they can be sorted, even +// if they are of different types). +func Less(lhs, rhs Value) bool { + return Compare(lhs, rhs) == -1 +} + +// Compare provides a total ordering for Value (so that they can be +// sorted, even if they are of different types). The result will be 0 if +// v==rhs, -1 if v < rhs, and +1 if v > rhs. +func Compare(lhs, rhs Value) int { + return CompareUsing(HeapAllocator, lhs, rhs) +} + +// CompareUsing uses the provided allocator and provides a total +// ordering for Value (so that they can be sorted, even if they +// are of different types). The result will be 0 if v==rhs, -1 +// if v < rhs, and +1 if v > rhs. +func CompareUsing(a Allocator, lhs, rhs Value) int { + if lhs.IsFloat() { + if !rhs.IsFloat() { + // Extra: compare floats and ints numerically. + if rhs.IsInt() { + return FloatCompare(lhs.AsFloat(), float64(rhs.AsInt())) + } + return -1 + } + return FloatCompare(lhs.AsFloat(), rhs.AsFloat()) + } else if rhs.IsFloat() { + // Extra: compare floats and ints numerically. + if lhs.IsInt() { + return FloatCompare(float64(lhs.AsInt()), rhs.AsFloat()) + } + return 1 + } + + if lhs.IsInt() { + if !rhs.IsInt() { + return -1 + } + return IntCompare(lhs.AsInt(), rhs.AsInt()) + } else if rhs.IsInt() { + return 1 + } + + if lhs.IsString() { + if !rhs.IsString() { + return -1 + } + return strings.Compare(lhs.AsString(), rhs.AsString()) + } else if rhs.IsString() { + return 1 + } + + if lhs.IsBool() { + if !rhs.IsBool() { + return -1 + } + return BoolCompare(lhs.AsBool(), rhs.AsBool()) + } else if rhs.IsBool() { + return 1 + } + + if lhs.IsList() { + if !rhs.IsList() { + return -1 + } + lhsList := lhs.AsListUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsListUsing(a) + defer a.Free(rhsList) + return ListCompareUsing(a, lhsList, rhsList) + } else if rhs.IsList() { + return 1 + } + if lhs.IsMap() { + if !rhs.IsMap() { + return -1 + } + lhsMap := lhs.AsMapUsing(a) + defer a.Free(lhsMap) + rhsMap := rhs.AsMapUsing(a) + defer a.Free(rhsMap) + return MapCompareUsing(a, lhsMap, rhsMap) + } else if rhs.IsMap() { + return 1 + } + if lhs.IsNull() { + if !rhs.IsNull() { + return -1 + } + return 0 + } else if rhs.IsNull() { + return 1 + } + + // Invalid Value-- nothing is set. + return 0 +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go new file mode 100644 index 000000000000..05e70debaef3 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go @@ -0,0 +1,294 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "encoding/base64" + "fmt" + "reflect" +) + +// NewValueReflect creates a Value backed by an "interface{}" type, +// typically an structured object in Kubernetes world that is uses reflection to expose. +// The provided "interface{}" value must be a pointer so that the value can be modified via reflection. +// The provided "interface{}" may contain structs and types that are converted to Values +// by the jsonMarshaler interface. +func NewValueReflect(value interface{}) (Value, error) { + if value == nil { + return NewValueInterface(nil), nil + } + v := reflect.ValueOf(value) + if v.Kind() != reflect.Ptr { + // The root value to reflect on must be a pointer so that map.Set() and map.Delete() operations are possible. + return nil, fmt.Errorf("value provided to NewValueReflect must be a pointer") + } + return wrapValueReflect(v, nil, nil) +} + +// wrapValueReflect wraps the provide reflect.Value as a value. If parent in the data tree is a map, parentMap +// and parentMapKey must be provided so that the returned value may be set and deleted. +func wrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) (Value, error) { + val := HeapAllocator.allocValueReflect() + return val.reuse(value, nil, parentMap, parentMapKey) +} + +// wrapValueReflect wraps the provide reflect.Value as a value, and panics if there is an error. If parent in the data +// tree is a map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. +func mustWrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) Value { + v, err := wrapValueReflect(value, parentMap, parentMapKey) + if err != nil { + panic(err) + } + return v +} + +// the value interface doesn't care about the type for value.IsNull, so we can use a constant +var nilType = reflect.TypeOf(&struct{}{}) + +// reuse replaces the value of the valueReflect. If parent in the data tree is a map, parentMap and parentMapKey +// must be provided so that the returned value may be set and deleted. +func (r *valueReflect) reuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) (Value, error) { + if cacheEntry == nil { + cacheEntry = TypeReflectEntryOf(value.Type()) + } + if cacheEntry.CanConvertToUnstructured() { + u, err := cacheEntry.ToUnstructured(value) + if err != nil { + return nil, err + } + if u == nil { + value = reflect.Zero(nilType) + } else { + value = reflect.ValueOf(u) + } + } + r.Value = dereference(value) + r.ParentMap = parentMap + r.ParentMapKey = parentMapKey + r.kind = kind(r.Value) + return r, nil +} + +// mustReuse replaces the value of the valueReflect and panics if there is an error. If parent in the data tree is a +// map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. +func (r *valueReflect) mustReuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) Value { + v, err := r.reuse(value, cacheEntry, parentMap, parentMapKey) + if err != nil { + panic(err) + } + return v +} + +func dereference(val reflect.Value) reflect.Value { + kind := val.Kind() + if (kind == reflect.Interface || kind == reflect.Ptr) && !safeIsNil(val) { + return val.Elem() + } + return val +} + +type valueReflect struct { + ParentMap *reflect.Value + ParentMapKey *reflect.Value + Value reflect.Value + kind reflectType +} + +func (r valueReflect) IsMap() bool { + return r.kind == mapType || r.kind == structMapType +} + +func (r valueReflect) IsList() bool { + return r.kind == listType +} + +func (r valueReflect) IsBool() bool { + return r.kind == boolType +} + +func (r valueReflect) IsInt() bool { + return r.kind == intType || r.kind == uintType +} + +func (r valueReflect) IsFloat() bool { + return r.kind == floatType +} + +func (r valueReflect) IsString() bool { + return r.kind == stringType || r.kind == byteStringType +} + +func (r valueReflect) IsNull() bool { + return r.kind == nullType +} + +type reflectType = int + +const ( + mapType = iota + structMapType + listType + intType + uintType + floatType + stringType + byteStringType + boolType + nullType +) + +func kind(v reflect.Value) reflectType { + typ := v.Type() + rk := typ.Kind() + switch rk { + case reflect.Map: + if v.IsNil() { + return nullType + } + return mapType + case reflect.Struct: + return structMapType + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + return intType + case reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8: + // Uint64 deliberately excluded, see valueUnstructured.Int. + return uintType + case reflect.Float64, reflect.Float32: + return floatType + case reflect.String: + return stringType + case reflect.Bool: + return boolType + case reflect.Slice: + if v.IsNil() { + return nullType + } + elemKind := typ.Elem().Kind() + if elemKind == reflect.Uint8 { + return byteStringType + } + return listType + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Interface: + if v.IsNil() { + return nullType + } + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + default: + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + } +} + +// TODO find a cleaner way to avoid panics from reflect.IsNil() +func safeIsNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return v.IsNil() + } + return false +} + +func (r valueReflect) AsMap() Map { + return r.AsMapUsing(HeapAllocator) +} + +func (r valueReflect) AsMapUsing(a Allocator) Map { + switch r.kind { + case structMapType: + v := a.allocStructReflect() + v.valueReflect = r + return v + case mapType: + v := a.allocMapReflect() + v.valueReflect = r + return v + default: + panic("value is not a map or struct") + } +} + +func (r valueReflect) AsList() List { + return r.AsListUsing(HeapAllocator) +} + +func (r valueReflect) AsListUsing(a Allocator) List { + if r.IsList() { + v := a.allocListReflect() + v.Value = r.Value + return v + } + panic("value is not a list") +} + +func (r valueReflect) AsBool() bool { + if r.IsBool() { + return r.Value.Bool() + } + panic("value is not a bool") +} + +func (r valueReflect) AsInt() int64 { + if r.kind == intType { + return r.Value.Int() + } + if r.kind == uintType { + return int64(r.Value.Uint()) + } + + panic("value is not an int") +} + +func (r valueReflect) AsFloat() float64 { + if r.IsFloat() { + return r.Value.Float() + } + panic("value is not a float") +} + +func (r valueReflect) AsString() string { + switch r.kind { + case stringType: + return r.Value.String() + case byteStringType: + return base64.StdEncoding.EncodeToString(r.Value.Bytes()) + } + panic("value is not a string") +} + +func (r valueReflect) Unstructured() interface{} { + val := r.Value + switch { + case r.IsNull(): + return nil + case val.Kind() == reflect.Struct: + return structReflect{r}.Unstructured() + case val.Kind() == reflect.Map: + return mapReflect{valueReflect: r}.Unstructured() + case r.IsList(): + return listReflect{r.Value}.Unstructured() + case r.IsString(): + return r.AsString() + case r.IsInt(): + return r.AsInt() + case r.IsBool(): + return r.AsBool() + case r.IsFloat(): + return r.AsFloat() + default: + panic(fmt.Sprintf("value of type %s is not a supported by value reflector", val.Type())) + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go new file mode 100644 index 000000000000..ac5a926285d3 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go @@ -0,0 +1,178 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" +) + +// NewValueInterface creates a Value backed by an "interface{}" type, +// typically an unstructured object in Kubernetes world. +// interface{} must be one of: map[string]interface{}, map[interface{}]interface{}, []interface{}, int types, float types, +// string or boolean. Nested interface{} must also be one of these types. +func NewValueInterface(v interface{}) Value { + return Value(HeapAllocator.allocValueUnstructured().reuse(v)) +} + +type valueUnstructured struct { + Value interface{} +} + +// reuse replaces the value of the valueUnstructured. +func (vi *valueUnstructured) reuse(value interface{}) Value { + vi.Value = value + return vi +} + +func (v valueUnstructured) IsMap() bool { + if _, ok := v.Value.(map[string]interface{}); ok { + return true + } + if _, ok := v.Value.(map[interface{}]interface{}); ok { + return true + } + return false +} + +func (v valueUnstructured) AsMap() Map { + return v.AsMapUsing(HeapAllocator) +} + +func (v valueUnstructured) AsMapUsing(_ Allocator) Map { + if v.Value == nil { + panic("invalid nil") + } + switch t := v.Value.(type) { + case map[string]interface{}: + return mapUnstructuredString(t) + case map[interface{}]interface{}: + return mapUnstructuredInterface(t) + } + panic(fmt.Errorf("not a map: %#v", v)) +} + +func (v valueUnstructured) IsList() bool { + if v.Value == nil { + return false + } + _, ok := v.Value.([]interface{}) + return ok +} + +func (v valueUnstructured) AsList() List { + return v.AsListUsing(HeapAllocator) +} + +func (v valueUnstructured) AsListUsing(_ Allocator) List { + return listUnstructured(v.Value.([]interface{})) +} + +func (v valueUnstructured) IsFloat() bool { + if v.Value == nil { + return false + } else if _, ok := v.Value.(float64); ok { + return true + } else if _, ok := v.Value.(float32); ok { + return true + } + return false +} + +func (v valueUnstructured) AsFloat() float64 { + if f, ok := v.Value.(float32); ok { + return float64(f) + } + return v.Value.(float64) +} + +func (v valueUnstructured) IsInt() bool { + if v.Value == nil { + return false + } else if _, ok := v.Value.(int); ok { + return true + } else if _, ok := v.Value.(int8); ok { + return true + } else if _, ok := v.Value.(int16); ok { + return true + } else if _, ok := v.Value.(int32); ok { + return true + } else if _, ok := v.Value.(int64); ok { + return true + } else if _, ok := v.Value.(uint); ok { + return true + } else if _, ok := v.Value.(uint8); ok { + return true + } else if _, ok := v.Value.(uint16); ok { + return true + } else if _, ok := v.Value.(uint32); ok { + return true + } + return false +} + +func (v valueUnstructured) AsInt() int64 { + if i, ok := v.Value.(int); ok { + return int64(i) + } else if i, ok := v.Value.(int8); ok { + return int64(i) + } else if i, ok := v.Value.(int16); ok { + return int64(i) + } else if i, ok := v.Value.(int32); ok { + return int64(i) + } else if i, ok := v.Value.(uint); ok { + return int64(i) + } else if i, ok := v.Value.(uint8); ok { + return int64(i) + } else if i, ok := v.Value.(uint16); ok { + return int64(i) + } else if i, ok := v.Value.(uint32); ok { + return int64(i) + } + return v.Value.(int64) +} + +func (v valueUnstructured) IsString() bool { + if v.Value == nil { + return false + } + _, ok := v.Value.(string) + return ok +} + +func (v valueUnstructured) AsString() string { + return v.Value.(string) +} + +func (v valueUnstructured) IsBool() bool { + if v.Value == nil { + return false + } + _, ok := v.Value.(bool) + return ok +} + +func (v valueUnstructured) AsBool() bool { + return v.Value.(bool) +} + +func (v valueUnstructured) IsNull() bool { + return v.Value == nil +} + +func (v valueUnstructured) Unstructured() interface{} { + return v.Value +} From f5be0716dbe41ea5b909d3c8a81ebac37fd124e0 Mon Sep 17 00:00:00 2001 From: Brian Kassouf Date: Tue, 13 Oct 2020 16:38:21 -0700 Subject: [PATCH 2/7] Revert "Migrate internalshared out (#9727)" (#10141) This reverts commit ee6391b691ac12ab6ca13c3912404f1d3a842bd6. --- command/agent.go | 2 +- command/agent/cache/listener.go | 4 +- command/agent/config/config.go | 2 +- command/agent/config/config_test.go | 2 +- command/base_helpers.go | 2 +- command/debug.go | 2 +- command/server.go | 10 +- command/server/config.go | 2 +- command/server/config_test_helpers.go | 2 +- command/server/listener.go | 4 +- command/server/listener_tcp.go | 6 +- command/server/listener_tcp_test.go | 2 +- .../server/server_seal_transit_acc_test.go | 2 +- go.mod | 13 +- go.sum | 179 +--- .../metricsutil/bucket.go | 0 helper/metricsutil/bucket_test.go | 28 + .../metricsutil/gauge_process.go | 0 helper/metricsutil/gauge_process_test.go | 551 ++++++++++++ .../metricsutil/metricsutil.go | 0 helper/metricsutil/metricsutil_test.go | 46 + helper/metricsutil/wrapped_metrics.go | 83 ++ helper/metricsutil/wrapped_metrics_test.go | 111 +++ helper/testhelpers/seal/sealhelper.go | 2 +- http/forwarded_for_test.go | 2 +- http/handler.go | 2 +- http/sys_metrics.go | 2 +- http/sys_metrics_test.go | 4 +- internalshared/README.md | 14 + .../configutil/config.go | 0 .../configutil/config_util.go | 0 .../configutil/encrypt_decrypt.go | 0 .../configutil/encrypt_decrypt_test.go | 111 +++ .../configutil/kms.go | 0 .../configutil/listener.go | 0 .../configutil/merge.go | 0 .../configutil/telemetry.go | 2 +- .../gatedwriter/writer.go | 0 internalshared/gatedwriter/writer_test.go | 34 + .../kv-builder/builder.go | 0 internalshared/kv-builder/builder_test.go | 139 +++ .../listenerutil/listener.go | 4 +- internalshared/listenerutil/listener_test.go | 88 ++ .../reloadutil/reload.go | 0 internalshared/reloadutil/reload_test.go | 74 ++ vault/core.go | 4 +- vault/core_metrics.go | 9 +- vault/identity_store.go | 4 +- vault/logical_system.go | 2 +- vault/quotas/quotas.go | 2 +- vault/quotas/quotas_rate_limit.go | 2 +- vault/quotas/quotas_rate_limit_test.go | 2 +- vault/quotas/quotas_test.go | 2 +- vault/quotas/quotas_util.go | 2 +- vault/request_handling.go | 9 +- vault/testing.go | 6 +- vault/token_store.go | 13 +- vault/token_store_test.go | 2 +- vault/wrapping.go | 5 +- .../hashicorp/shared-secure-libs/LICENSE | 363 -------- .../metricsutil/wrapped_metrics.go | 89 -- vendor/github.com/json-iterator/go/README.md | 36 +- vendor/github.com/json-iterator/go/any_str.go | 4 +- vendor/github.com/json-iterator/go/config.go | 4 +- .../json-iterator/go/iter_object.go | 4 +- .../json-iterator/go/reflect_extension.go | 2 +- .../json-iterator/go/reflect_map.go | 80 +- .../json-iterator/go/reflect_optional.go | 4 + .../go/reflect_struct_decoder.go | 22 +- vendor/github.com/json-iterator/go/stream.go | 5 +- vendor/github.com/mitchellh/cli/.travis.yml | 10 +- vendor/github.com/mitchellh/cli/Makefile | 5 +- vendor/github.com/mitchellh/cli/cli.go | 57 +- vendor/github.com/mitchellh/cli/go.mod | 2 - vendor/github.com/mitchellh/cli/ui_mock.go | 7 +- .../client_golang/prometheus/counter.go | 4 +- .../client_golang/prometheus/desc.go | 1 - .../client_golang/prometheus/doc.go | 37 +- .../client_golang/prometheus/histogram.go | 3 +- .../client_golang/prometheus/metric.go | 1 - .../prometheus/process_collector_windows.go | 24 +- .../client_golang/prometheus/push/push.go | 29 +- .../client_golang/prometheus/registry.go | 1 - .../client_golang/prometheus/summary.go | 1 - .../client_golang/prometheus/value.go | 4 +- .../client_golang/prometheus/vec.go | 12 - .../client_golang/prometheus/wrap.go | 14 +- .../prometheus/common/expfmt/decode.go | 2 +- .../github.com/prometheus/common/model/fnv.go | 2 +- .../prometheus/common/model/time.go | 100 ++- .../prometheus/procfs/.golangci.yml | 4 +- .../prometheus/procfs/Makefile.common | 43 +- .../github.com/prometheus/procfs/cpuinfo.go | 265 +----- .../prometheus/procfs/cpuinfo_arm.go | 18 - .../prometheus/procfs/cpuinfo_arm64.go | 19 - .../prometheus/procfs/cpuinfo_default.go | 19 - .../prometheus/procfs/cpuinfo_mips.go | 18 - .../prometheus/procfs/cpuinfo_mips64.go | 18 - .../prometheus/procfs/cpuinfo_mips64le.go | 18 - .../prometheus/procfs/cpuinfo_mipsle.go | 18 - .../prometheus/procfs/cpuinfo_ppc64.go | 18 - .../prometheus/procfs/cpuinfo_ppc64le.go | 18 - .../prometheus/procfs/cpuinfo_s390x.go | 18 - vendor/github.com/prometheus/procfs/crypto.go | 160 ++-- .../prometheus/procfs/fixtures.ttar | 806 +----------------- .../github.com/prometheus/procfs/fscache.go | 422 --------- vendor/github.com/prometheus/procfs/go.mod | 1 - vendor/github.com/prometheus/procfs/go.sum | 2 - .../prometheus/procfs/internal/util/parse.go | 9 - .../prometheus/procfs/kernel_random.go | 62 -- .../github.com/prometheus/procfs/loadavg.go | 62 -- vendor/github.com/prometheus/procfs/mdstat.go | 2 +- .../github.com/prometheus/procfs/mountinfo.go | 20 +- .../prometheus/procfs/mountstats.go | 20 +- .../prometheus/procfs/net_conntrackstat.go | 153 ---- .../prometheus/procfs/net_softnet.go | 107 ++- .../github.com/prometheus/procfs/net_udp.go | 229 ----- .../github.com/prometheus/procfs/net_unix.go | 222 ++--- vendor/github.com/prometheus/procfs/proc.go | 21 - .../prometheus/procfs/proc_cgroup.go | 98 --- .../prometheus/procfs/proc_fdinfo.go | 34 +- .../github.com/prometheus/procfs/proc_maps.go | 209 ----- .../prometheus/procfs/proc_smaps.go | 165 ---- .../prometheus/procfs/proc_status.go | 37 +- vendor/github.com/prometheus/procfs/swaps.go | 89 -- .../github.com/samuel/go-zookeeper/zk/conn.go | 98 +-- .../samuel/go-zookeeper/zk/constants.go | 25 +- .../github.com/samuel/go-zookeeper/zk/flw.go | 8 +- .../samuel/go-zookeeper/zk/server_help.go | 216 +++++ .../samuel/go-zookeeper/zk/server_java.go | 136 +++ .../samuel/go-zookeeper/zk/structs.go | 19 +- vendor/gopkg.in/yaml.v2/apic.go | 1 - vendor/modules.txt | 23 +- 133 files changed, 2258 insertions(+), 4216 deletions(-) rename {vendor/github.com/hashicorp/shared-secure-libs => helper}/metricsutil/bucket.go (100%) create mode 100644 helper/metricsutil/bucket_test.go rename {vendor/github.com/hashicorp/shared-secure-libs => helper}/metricsutil/gauge_process.go (100%) create mode 100644 helper/metricsutil/gauge_process_test.go rename {vendor/github.com/hashicorp/shared-secure-libs => helper}/metricsutil/metricsutil.go (100%) create mode 100644 helper/metricsutil/metricsutil_test.go create mode 100644 helper/metricsutil/wrapped_metrics_test.go create mode 100644 internalshared/README.md rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/configutil/config.go (100%) rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/configutil/config_util.go (100%) rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/configutil/encrypt_decrypt.go (100%) create mode 100644 internalshared/configutil/encrypt_decrypt_test.go rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/configutil/kms.go (100%) rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/configutil/listener.go (100%) rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/configutil/merge.go (100%) rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/configutil/telemetry.go (99%) rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/gatedwriter/writer.go (100%) create mode 100644 internalshared/gatedwriter/writer_test.go rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/kv-builder/builder.go (100%) create mode 100644 internalshared/kv-builder/builder_test.go rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/listenerutil/listener.go (98%) create mode 100644 internalshared/listenerutil/listener_test.go rename {vendor/github.com/hashicorp/shared-secure-libs => internalshared}/reloadutil/reload.go (100%) create mode 100644 internalshared/reloadutil/reload_test.go delete mode 100644 vendor/github.com/hashicorp/shared-secure-libs/LICENSE delete mode 100644 vendor/github.com/hashicorp/shared-secure-libs/metricsutil/wrapped_metrics.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_arm.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_arm64.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_default.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips64.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go delete mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_s390x.go delete mode 100644 vendor/github.com/prometheus/procfs/fscache.go delete mode 100644 vendor/github.com/prometheus/procfs/kernel_random.go delete mode 100644 vendor/github.com/prometheus/procfs/loadavg.go delete mode 100644 vendor/github.com/prometheus/procfs/net_conntrackstat.go delete mode 100644 vendor/github.com/prometheus/procfs/net_udp.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_cgroup.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_maps.go delete mode 100644 vendor/github.com/prometheus/procfs/proc_smaps.go delete mode 100644 vendor/github.com/prometheus/procfs/swaps.go create mode 100644 vendor/github.com/samuel/go-zookeeper/zk/server_help.go create mode 100644 vendor/github.com/samuel/go-zookeeper/zk/server_java.go diff --git a/command/agent.go b/command/agent.go index 8d5fe71591af..36d19ea0632e 100644 --- a/command/agent.go +++ b/command/agent.go @@ -17,7 +17,6 @@ import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/shared-secure-libs/gatedwriter" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/command/agent/auth" "github.com/hashicorp/vault/command/agent/auth/alicloud" @@ -36,6 +35,7 @@ import ( "github.com/hashicorp/vault/command/agent/sink/file" "github.com/hashicorp/vault/command/agent/sink/inmem" "github.com/hashicorp/vault/command/agent/template" + "github.com/hashicorp/vault/internalshared/gatedwriter" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" diff --git a/command/agent/cache/listener.go b/command/agent/cache/listener.go index 414efd6d83b2..4febcd83ce76 100644 --- a/command/agent/cache/listener.go +++ b/command/agent/cache/listener.go @@ -7,9 +7,9 @@ import ( "strings" - "github.com/hashicorp/shared-secure-libs/configutil" - "github.com/hashicorp/shared-secure-libs/listenerutil" "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" ) func StartListener(lnConfig *configutil.Listener) (net.Listener, *tls.Config, error) { diff --git a/command/agent/config/config.go b/command/agent/config/config.go index 5831f3fc6358..67eca00b3052 100644 --- a/command/agent/config/config.go +++ b/command/agent/config/config.go @@ -13,8 +13,8 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/shared-secure-libs/configutil" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/parseutil" "github.com/mitchellh/mapstructure" ) diff --git a/command/agent/config/config_test.go b/command/agent/config/config_test.go index 789d56692649..dda9fc452e64 100644 --- a/command/agent/config/config_test.go +++ b/command/agent/config/config_test.go @@ -7,7 +7,7 @@ import ( "github.com/go-test/deep" ctconfig "github.com/hashicorp/consul-template/config" - "github.com/hashicorp/shared-secure-libs/configutil" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/pointerutil" ) diff --git a/command/base_helpers.go b/command/base_helpers.go index 866f7cbc6ffc..a65822f418dc 100644 --- a/command/base_helpers.go +++ b/command/base_helpers.go @@ -9,8 +9,8 @@ import ( "time" "github.com/hashicorp/errwrap" - kvbuilder "github.com/hashicorp/shared-secure-libs/kv-builder" "github.com/hashicorp/vault/api" + kvbuilder "github.com/hashicorp/vault/internalshared/kv-builder" "github.com/kr/text" homedir "github.com/mitchellh/go-homedir" "github.com/mitchellh/mapstructure" diff --git a/command/debug.go b/command/debug.go index 466fa22ca92d..655583957f46 100644 --- a/command/debug.go +++ b/command/debug.go @@ -13,8 +13,8 @@ import ( "time" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/shared-secure-libs/gatedwriter" "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/internalshared/gatedwriter" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/helper/strutil" "github.com/hashicorp/vault/sdk/version" diff --git a/command/server.go b/command/server.go index 4cc67427134b..3d170e978bae 100644 --- a/command/server.go +++ b/command/server.go @@ -26,16 +26,16 @@ import ( wrapping "github.com/hashicorp/go-kms-wrapping" aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/shared-secure-libs/configutil" - "github.com/hashicorp/shared-secure-libs/gatedwriter" - "github.com/hashicorp/shared-secure-libs/listenerutil" - "github.com/hashicorp/shared-secure-libs/metricsutil" - "github.com/hashicorp/shared-secure-libs/reloadutil" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/gatedwriter" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/internalshared/reloadutil" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/helper/mlock" diff --git a/command/server/config.go b/command/server/config.go index 631dc89081e2..bde97ff7a3eb 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -16,7 +16,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/shared-secure-libs/configutil" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/parseutil" ) diff --git a/command/server/config_test_helpers.go b/command/server/config_test_helpers.go index 7a1b054aa42d..6bc61212bc79 100644 --- a/command/server/config_test_helpers.go +++ b/command/server/config_test_helpers.go @@ -11,7 +11,7 @@ import ( "github.com/go-test/deep" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/shared-secure-libs/configutil" + "github.com/hashicorp/vault/internalshared/configutil" ) func testConfigRaftRetryJoin(t *testing.T) { diff --git a/command/server/listener.go b/command/server/listener.go index 9f1b9195620a..f376ba9b20d1 100644 --- a/command/server/listener.go +++ b/command/server/listener.go @@ -9,9 +9,9 @@ import ( "io" "net" - "github.com/hashicorp/shared-secure-libs/configutil" - "github.com/hashicorp/shared-secure-libs/reloadutil" "github.com/hashicorp/vault/helper/proxyutil" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/reloadutil" "github.com/mitchellh/cli" ) diff --git a/command/server/listener_tcp.go b/command/server/listener_tcp.go index b93c39b4858a..08234017ee05 100644 --- a/command/server/listener_tcp.go +++ b/command/server/listener_tcp.go @@ -9,9 +9,9 @@ import ( "strings" "time" - "github.com/hashicorp/shared-secure-libs/configutil" - "github.com/hashicorp/shared-secure-libs/listenerutil" - "github.com/hashicorp/shared-secure-libs/reloadutil" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/internalshared/reloadutil" "github.com/mitchellh/cli" ) diff --git a/command/server/listener_tcp_test.go b/command/server/listener_tcp_test.go index da1f7a6bda35..c503d427e801 100644 --- a/command/server/listener_tcp_test.go +++ b/command/server/listener_tcp_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/hashicorp/shared-secure-libs/configutil" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/mitchellh/cli" ) diff --git a/command/server/server_seal_transit_acc_test.go b/command/server/server_seal_transit_acc_test.go index 7ba99b1155ad..e6c56c725a1e 100644 --- a/command/server/server_seal_transit_acc_test.go +++ b/command/server/server_seal_transit_acc_test.go @@ -10,9 +10,9 @@ import ( "time" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/shared-secure-libs/configutil" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/testhelpers/docker" + "github.com/hashicorp/vault/internalshared/configutil" ) func TestTransitWrapper_Lifecycle(t *testing.T) { diff --git a/go.mod b/go.mod index c3da27493726..370881bdc11a 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ replace github.com/hashicorp/vault/api => ./api replace github.com/hashicorp/vault/sdk => ./sdk require ( + cloud.google.com/go v0.56.0 cloud.google.com/go/spanner v1.5.1 cloud.google.com/go/storage v1.6.0 github.com/Azure/azure-storage-blob-go v0.10.0 @@ -48,6 +49,7 @@ require ( github.com/gocql/gocql v0.0.0-20200624222514-34081eda590e github.com/golang/protobuf v1.4.2 github.com/google/go-github v17.0.0+incompatible + github.com/google/go-metrics-stackdriver v0.2.0 github.com/hashicorp/consul-template v0.25.1 github.com/hashicorp/consul/api v1.4.0 github.com/hashicorp/errwrap v1.0.0 @@ -71,7 +73,6 @@ require ( github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d github.com/hashicorp/raft v1.1.3-0.20201002073007-f367681f9c48 github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab - github.com/hashicorp/shared-secure-libs v0.0.2 github.com/hashicorp/vault-plugin-auth-alicloud v0.5.5 github.com/hashicorp/vault-plugin-auth-azure v0.5.6 github.com/hashicorp/vault-plugin-auth-centrify v0.5.5 @@ -97,6 +98,7 @@ require ( github.com/hashicorp/vault/sdk v0.1.14-0.20201007132131-6a41edbf89f5 github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 github.com/jcmturner/gokrb5/v8 v8.0.0 + github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f github.com/jefferai/jsonx v1.0.0 github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f @@ -106,7 +108,7 @@ require ( github.com/mattn/go-colorable v0.1.6 github.com/mholt/archiver v3.1.1+incompatible github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5 - github.com/mitchellh/cli v1.1.1 + github.com/mitchellh/cli v1.0.0 github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-testing-interface v1.0.0 @@ -127,10 +129,12 @@ require ( github.com/pkg/errors v0.9.1 github.com/posener/complete v1.2.1 github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d + github.com/prometheus/client_golang v1.4.0 + github.com/prometheus/common v0.9.1 github.com/rboyer/safeio v0.2.1 github.com/ryanuber/columnize v2.1.0+incompatible github.com/ryanuber/go-glob v1.0.0 - github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da + github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec github.com/sasha-s/go-deadlock v0.2.0 github.com/sethvargo/go-limiter v0.3.0 github.com/shirou/gopsutil v2.20.6-0.20200630091542-01afd763e6c0+incompatible @@ -145,8 +149,9 @@ require ( go.mongodb.org/mongo-driver v1.2.1 go.uber.org/atomic v1.6.0 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/net v0.0.0-20200625001655-4c5254603344 + golang.org/x/net v0.0.0-20200602114024-627f9648deb9 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect golang.org/x/tools v0.0.0-20200521155704-91d71f6c2f04 google.golang.org/api v0.29.0 google.golang.org/grpc v1.29.1 diff --git a/go.sum b/go.sum index ad49c98b7865..4d1b2ee5e311 100644 --- a/go.sum +++ b/go.sum @@ -17,15 +17,12 @@ cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbf cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.6.0 h1:ajp/DjpiCHO71SyIhwb83YsUGAyWuzVvMko+9xCsJLw= cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/spanner v1.5.1 h1:dWyj10TLlaxH2No6+tXsSCaq9oWgrRbXy1N3x/bhMGU= cloud.google.com/go/spanner v1.5.1/go.mod h1:e1+8M6PF3ntV9Xr57X2Gf+UhylXXYF6gI4WRZ1kfu2A= @@ -78,7 +75,6 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= @@ -102,7 +98,6 @@ github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -126,22 +121,17 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f h1:oRD16bhpKNAanfcDDVU+J0NXqsgHIvGbbe/sy+r6Rs0= github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c= github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k= github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -157,16 +147,11 @@ github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.27 h1:9gPjZWVDSoQrBO2AvqrWObS6KAZByfEJxQoCYo4ZfK0= github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -175,9 +160,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= @@ -185,7 +168,6 @@ github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBW github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f h1:ZMEzE7R0WNqgbHplzSBaYJhJi5AZWTCK9baU0ebzG6g= github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= @@ -206,20 +188,15 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyY github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381 h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -242,19 +219,16 @@ github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/gocb/v2 v2.1.4 h1:HRuVhqZpVNIck3FwzTxWh5TnmGXeTmSfjhxkjeradLg= github.com/couchbase/gocb/v2 v2.1.4/go.mod h1:lESKM6wCEajrFVSZUewYuRzNtuNtnRey5wOfcZZsH90= github.com/couchbase/gocbcore/v9 v9.0.4 h1:VM7IiKoK25mq9CdFLLchJMzmHa5Grkn+94pQNaG3oc8= github.com/couchbase/gocbcore/v9 v9.0.4/go.mod h1:jOSQeBSECyNvD7aS4lfuaw+pD5t6ciTOf8hrDP/4Nus= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -271,7 +245,6 @@ github.com/digitalocean/godo v1.7.5 h1:JOQbAO6QT1GGjor0doT0mXefX2FgUDPOpYh2RaXA+ github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -290,18 +263,15 @@ github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdf github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M= github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/go-bindata-assetfs v1.0.1-0.20200509193318-234c15e7648f h1:AwZUiMWfYSmIiHdFJIubTSs8BFIFoMmUFbeuwBzHIPs= github.com/elazarl/go-bindata-assetfs v1.0.1-0.20200509193318-234c15e7648f/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -313,13 +283,9 @@ github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= -github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= @@ -340,15 +306,12 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= github.com/go-ldap/ldap/v3 v3.1.10 h1:7WsKqasmPThNvdl0Q5GPpbTDD/ZD98CfuawrMIuh7qQ= github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= @@ -356,7 +319,6 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= @@ -372,7 +334,6 @@ github.com/gocql/gocql v0.0.0-20200624222514-34081eda590e h1:SroDcndcOU9BVAduPf/ github.com/gocql/gocql v0.0.0-20200624222514-34081eda590e/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -394,7 +355,6 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -416,7 +376,6 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -435,7 +394,6 @@ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -455,45 +413,33 @@ github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTV github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= -github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ= github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul-template v0.25.1 h1:+D2s8eyRqWyX7GPNxeUi8tsyh8pRn3J6k8giEchPfKQ= github.com/hashicorp/consul-template v0.25.1/go.mod h1:/vUsrJvDuuQHcxEw0zik+YXTS7ZKWZjQeaQhshBmfH0= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.4.0 h1:jfESivXnO5uLdH650JU/6AnjRoHrLhULq0FnC3Kp9EY= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.4.0 h1:zBtCfKJZcJDBvSCkQJch4ulp59m1rATFLKwNo/LYY30= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -504,7 +450,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f h1:7WFMVeuJQp6BkzuTv9O52pzwtEFVUJubKYN+zez8eTI= github.com/hashicorp/go-discover v0.0.0-20200812215701-c4b85f6ed31f/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o= -github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg= github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= github.com/hashicorp/go-gcp-common v0.6.0 h1:m1X+DK003bj4WEjqOnv+Csepb3zpfN/bidboUeUSj68= @@ -514,13 +459,11 @@ github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.13.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping v0.5.12/go.mod h1:yVIWtGOTh/cdGc++/NOlXLus0hJ19Lz4iFrpF6WsZh4= github.com/hashicorp/go-kms-wrapping v0.5.16 h1:7qvB7JYLFART/bt1wafobMU5dDeyseE3ZBKB6UiyxWs= github.com/hashicorp/go-kms-wrapping v0.5.16/go.mod h1:lxD7e9q7ZyCtDEP+tnMevsEvw3M0gmZnneAgv8BaO1Q= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs= @@ -542,7 +485,6 @@ github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6 github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo= github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -572,7 +514,6 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.4 h1:gkyML/r71w3FL8gUi74Vk76avkj/9lYAY9lvg0OcoGs= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d h1:BXqsASWhyiAiEVm6FcltF0dg8XvoookQwmpHn8lstu8= github.com/hashicorp/nomad/api v0.0.0-20191220223628-edc62acd919d/go.mod h1:WKCL+tLVhN1D+APwH3JiTRZoxcdwRk86bWu1LVCUPaE= @@ -586,8 +527,6 @@ github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab/go.mod h github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.3 h1:MWYcmct5EtKz0efYooPcL0yNkem+7kWxqXDi/UIh+8k= github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= -github.com/hashicorp/shared-secure-libs v0.0.2 h1:+izKlB8vxpBK+K6zNmB4DZtDEl1a7fB8JmqdBNhnhjs= -github.com/hashicorp/shared-secure-libs v0.0.2/go.mod h1:xhtA0FH6AYYFOy0sir7u0O0zzdLi7ofU6oWxy+gjnuc= github.com/hashicorp/vault-plugin-auth-alicloud v0.5.5 h1:JYf3VYpKs7mOdtcwZWi73S82oXrC/JR7uoPVUd8c4Hk= github.com/hashicorp/vault-plugin-auth-alicloud v0.5.5/go.mod h1:sQ+VNwPQlemgXHXikYH6onfH9gPwDZ1GUVRLz0ZvHx8= github.com/hashicorp/vault-plugin-auth-azure v0.5.6 h1:yg1zVb0zf5dkCz68sVwO9Y258NkF4kDKFCZHvwidKpo= @@ -636,24 +575,19 @@ github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35n github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huaweicloud/golangsdk v0.0.0-20200304081349-45ec0797f2a4/go.mod h1:WQBcHRNX9shz3928lWEvstQJtAtYI7ks6XlgtRT9Tcw= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 h1:3K3KcD4S6/Y2hevi70EzUTNKOS3cryQyhUnkjE6Tz0w= github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90= github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jarcoal/httpmock v1.0.4/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= -github.com/jarcoal/httpmock v1.0.5 h1:cHtVEcTxRSX4J0je7mWPfc9BpDpqzXSJ5HbymZmyHck= github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jcmturner/aescts v1.0.1 h1:5jhUSHbHSZjQeWFY//Lv8dpP/O3sMDOxrGV/IfCqh44= github.com/jcmturner/aescts v1.0.1/go.mod h1:k9gJoDUf1GH5r2IBtBjwjDCoLELYxOcEhitdP8RL7qQ= @@ -676,28 +610,23 @@ github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f h1:ENpDacvnr8faw5ugQmEF1QYk+f/Y9lXFvuYmRxykago= github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= @@ -724,14 +653,10 @@ github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -759,12 +684,10 @@ github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.1 h1:J64v/xD7Clql+JVKSvkYojLOXu1ibnY9ZjGLwSt/89w= -github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -796,30 +719,20 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mongodb/go-client-mongodb-atlas v0.1.2 h1:qmUme1TlQBPZupmXMnpD8DxnfGXLVGs3w+0Z17HBiSA= github.com/mongodb/go-client-mongodb-atlas v0.1.2/go.mod h1:LS8O0YLkA+sbtOb3fZLF10yY3tJM+1xATXMJ3oU35LU= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwielbut/pointy v1.1.0 h1:U5/YEfoIkaGCHv0St3CgjduqXID4FNRoyZgLM1kY9vg= github.com/mwielbut/pointy v1.1.0/go.mod h1:MvvO+uMFj9T5DMda33HlvogsFBX7pWWKAkFIn4teYwY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc h1:7xGrl4tTpBQu5Zjll08WupHyq+Sp0Z/adtyf1cfk3Q8= github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc/go.mod h1:1rLVY/DWf3U6vSZgH16S7pymfrhK2lcUlXjgGglw/lY= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ= github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -834,15 +747,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -854,39 +764,26 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/oracle/oci-go-sdk v12.5.0+incompatible h1:pr08ECoaDKHWO9tnzJB1YqClEs7ZK1CFOez2DQocH14= github.com/oracle/oci-go-sdk v12.5.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= -github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v0.0.0-20180815053127-5633e0862627/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -897,7 +794,6 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -912,15 +808,12 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -928,20 +821,16 @@ github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7q github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.11.1 h1:0ZISXCMRuCZcxF77aT1BXY5m74mX2vrGYl1dSwBI0Jo= -github.com/prometheus/common v0.11.1/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU= github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= @@ -952,29 +841,24 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sethvargo/go-limiter v0.3.0 h1:yRMc+Qs2yqw6YJp6UxrO2iUs6DOSq4zcnljbB7/rMns= github.com/sethvargo/go-limiter v0.3.0/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= github.com/shirou/gopsutil v2.20.6-0.20200630091542-01afd763e6c0+incompatible h1:IYOqH6sML3rQGNVEQ5foLtpDt4TeW8PIUBuI9f8itkI= github.com/shirou/gopsutil v2.20.6-0.20200630091542-01afd763e6c0+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -983,17 +867,12 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1007,16 +886,12 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/square/go-jose v2.4.1+incompatible/go.mod h1:7MxpAF/1WTVUu8Am+T5kNy+t0902CaLWM4Z745MkOa8= github.com/square/go-jose/v3 v3.0.0-20200225220504-708a9fe87ddc/go.mod h1:JbpHhNyeVc538vtj/ECJ3gPYm1VEitNjsLhm4eJQQbg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= @@ -1030,10 +905,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible h1:K3fcS92NS8cRntIdu8Uqy2ZSePvX73nNhOkKuPGJLXQ= github.com/tencentcloud/tencentcloud-sdk-go v3.0.171+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= -github.com/tidwall/pretty v1.0.1 h1:WE4RBSZ1x6McVVC8S/Md+Qse8YUv6HRObAx6ke00NY8= github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -1044,7 +917,6 @@ github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4= github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= @@ -1053,7 +925,6 @@ github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yandex-cloud/go-genproto v0.0.0-20200722140432-762fe965ce77/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE= @@ -1061,11 +932,9 @@ github.com/yandex-cloud/go-sdk v0.0.0-20200722140627-2194e5077f13/go.mod h1:LEdA github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547 h1:s71VGheLtWmCYsnNjf+s7XE8HsrZnd3EYGrLGWVm7nY= go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM= go.mongodb.org/mongo-driver v1.2.1 h1:ANAlYXXM5XmOdW/Nc38jOr+wS5nlk7YihT24U1imiWM= @@ -1073,25 +942,19 @@ go.mongodb.org/mongo-driver v1.2.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1113,7 +976,6 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1183,11 +1045,9 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1234,16 +1094,13 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1258,7 +1115,6 @@ golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1306,7 +1162,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1348,7 +1203,6 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1365,7 +1219,6 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1385,7 +1238,6 @@ google.golang.org/genproto v0.0.0-20200323114720-3f67cca34472/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200416231807-8751e049a2a0/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200519141106-08726f379972/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= @@ -1393,12 +1245,10 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -1422,17 +1272,13 @@ google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -1441,7 +1287,6 @@ gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/ldap.v3 v3.0.3 h1:YKRHW/2sIl05JsCtx/5ZuUueFuJyoj/6+DGXe3wp6ro= gopkg.in/ldap.v3 v3.0.3/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= @@ -1454,20 +1299,16 @@ gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1504,6 +1345,4 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/vendor/github.com/hashicorp/shared-secure-libs/metricsutil/bucket.go b/helper/metricsutil/bucket.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/metricsutil/bucket.go rename to helper/metricsutil/bucket.go diff --git a/helper/metricsutil/bucket_test.go b/helper/metricsutil/bucket_test.go new file mode 100644 index 000000000000..f37584781627 --- /dev/null +++ b/helper/metricsutil/bucket_test.go @@ -0,0 +1,28 @@ +package metricsutil + +import ( + "testing" + "time" +) + +func TestTTLBucket_Lookup(t *testing.T) { + testCases := []struct { + Input time.Duration + Expected string + }{ + {30 * time.Second, "1m"}, + {0 * time.Second, "1m"}, + {2 * time.Hour, "2h"}, + {2*time.Hour - time.Second, "2h"}, + {2*time.Hour + time.Second, "1d"}, + {30 * 24 * time.Hour, "30d"}, + {31 * 24 * time.Hour, "+Inf"}, + } + + for _, tc := range testCases { + bucket := TTLBucket(tc.Input) + if bucket != tc.Expected { + t.Errorf("Expected %q, got %q for duration %v.", tc.Expected, bucket, tc.Input) + } + } +} diff --git a/vendor/github.com/hashicorp/shared-secure-libs/metricsutil/gauge_process.go b/helper/metricsutil/gauge_process.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/metricsutil/gauge_process.go rename to helper/metricsutil/gauge_process.go diff --git a/helper/metricsutil/gauge_process_test.go b/helper/metricsutil/gauge_process_test.go new file mode 100644 index 000000000000..7a12e90c90a1 --- /dev/null +++ b/helper/metricsutil/gauge_process_test.go @@ -0,0 +1,551 @@ +package metricsutil + +import ( + "context" + "errors" + "fmt" + "math/rand" + "reflect" + "sync/atomic" + "testing" + "time" + + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" +) + +// SimulatedTime maintains a virtual clock so the test isn't +// dependent upon real time. +// Unfortunately there is no way to run these tests in parallel +// since they rely on the same global timeNow function. +type SimulatedTime struct { + now time.Time + tickerBarrier chan *SimulatedTicker +} + +var _ clock = &SimulatedTime{} + +type SimulatedTicker struct { + ticker *time.Ticker + duration time.Duration + sender chan time.Time +} + +func (s *SimulatedTime) Now() time.Time { + return s.now +} + +func (s *SimulatedTime) NewTicker(d time.Duration) *time.Ticker { + // Create a real ticker, but set its duration to an amount that will never fire for real. + // We'll inject times into the channel directly. + replacementChannel := make(chan time.Time) + t := time.NewTicker(1000 * time.Hour) + t.C = replacementChannel + s.tickerBarrier <- &SimulatedTicker{t, d, replacementChannel} + return t +} + +func (s *SimulatedTime) waitForTicker(t *testing.T) *SimulatedTicker { + t.Helper() + // System under test should create a ticker within 100ms, + // wait for it to show up or else fail the test. + timeout := time.After(100 * time.Millisecond) + select { + case <-timeout: + t.Fatal("Timeout waiting for ticker creation.") + return nil + case t := <-s.tickerBarrier: + return t + } +} + +func (s *SimulatedTime) allowTickers(n int) { + s.tickerBarrier = make(chan *SimulatedTicker, n) +} + +func startSimulatedTime() *SimulatedTime { + s := &SimulatedTime{ + now: time.Now(), + tickerBarrier: make(chan *SimulatedTicker, 1), + } + return s +} + +type SimulatedCollector struct { + numCalls uint32 + callBarrier chan uint32 +} + +func newSimulatedCollector() *SimulatedCollector { + return &SimulatedCollector{ + numCalls: 0, + callBarrier: make(chan uint32, 1), + } +} + +func (s *SimulatedCollector) waitForCall(t *testing.T) { + timeout := time.After(100 * time.Millisecond) + select { + case <-timeout: + t.Fatal("Timeout waiting for call to collection function.") + return + case <-s.callBarrier: + return + } +} + +func (s *SimulatedCollector) EmptyCollectionFunction(ctx context.Context) ([]GaugeLabelValues, error) { + atomic.AddUint32(&s.numCalls, 1) + s.callBarrier <- s.numCalls + return []GaugeLabelValues{}, nil +} + +func TestGauge_Creation(t *testing.T) { + c := newSimulatedCollector() + sink := BlackholeSink() + sink.GaugeInterval = 33 * time.Minute + + key := []string{"example", "count"} + labels := []Label{{"gauge", "test"}} + + p, err := sink.NewGaugeCollectionProcess( + key, + labels, + c.EmptyCollectionFunction, + log.Default(), + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + if _, ok := p.clock.(defaultClock); !ok { + t.Error("Default clock not installed.") + } + + if !reflect.DeepEqual(p.key, key) { + t.Errorf("Key not initialized, got %v but expected %v", + p.key, key) + } + + if !reflect.DeepEqual(p.labels, labels) { + t.Errorf("Labels not initialized, got %v but expected %v", + p.key, key) + } + + if p.originalInterval != sink.GaugeInterval || p.currentInterval != sink.GaugeInterval { + t.Errorf("Intervals not initialized, got %v and %v, expected %v", + p.originalInterval, p.currentInterval, sink.GaugeInterval) + } +} + +func TestGauge_StartDelay(t *testing.T) { + // Work through an entire startup sequence, up to collecting + // the first batch of gauges. + s := startSimulatedTime() + c := newSimulatedCollector() + + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + p, err := sink.newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.EmptyCollectionFunction, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + go p.Run() + + delayTicker := s.waitForTicker(t) + if delayTicker.duration > sink.GaugeInterval { + t.Errorf("Delayed start %v is more than interval %v.", + delayTicker.duration, sink.GaugeInterval) + } + if c.numCalls > 0 { + t.Error("Collection function has been called") + } + + // Signal the end of delay, then another ticker should start + delayTicker.sender <- time.Now() + + intervalTicker := s.waitForTicker(t) + if intervalTicker.duration != sink.GaugeInterval { + t.Errorf("Ticker duration is %v, expected %v", + intervalTicker.duration, sink.GaugeInterval) + } + if c.numCalls > 0 { + t.Error("Collection function has been called") + } + + // Time's up, ensure the collection function is executed. + intervalTicker.sender <- time.Now() + c.waitForCall(t) + if c.numCalls != 1 { + t.Errorf("Collection function called %v times, expected %v.", c.numCalls, 1) + } + + p.Stop() +} + +func waitForStopped(t *testing.T, p *GaugeCollectionProcess) { + t.Helper() + timeout := time.After(100 * time.Millisecond) + select { + case <-timeout: + t.Fatal("Timeout waiting for process to stop.") + case <-p.stopped: + return + } +} + +func TestGauge_StoppedDuringInitialDelay(t *testing.T) { + // Stop the process before it gets into its main loop + s := startSimulatedTime() + c := newSimulatedCollector() + + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + p, err := sink.newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.EmptyCollectionFunction, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + go p.Run() + + // Stop during the initial delay, check that goroutine exits + s.waitForTicker(t) + p.Stop() + waitForStopped(t, p) +} + +func TestGauge_StoppedAfterInitialDelay(t *testing.T) { + // Stop the process during its main loop + s := startSimulatedTime() + c := newSimulatedCollector() + + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + p, err := sink.newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.EmptyCollectionFunction, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + go p.Run() + + // Get through initial delay, wait for interval ticker + delayTicker := s.waitForTicker(t) + delayTicker.sender <- time.Now() + + s.waitForTicker(t) + p.Stop() + waitForStopped(t, p) +} + +func TestGauge_Backoff(t *testing.T) { + s := startSimulatedTime() + s.allowTickers(100) + + c := newSimulatedCollector() + + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + threshold := time.Duration(int(sink.GaugeInterval) / 100) + f := func(ctx context.Context) ([]GaugeLabelValues, error) { + atomic.AddUint32(&c.numCalls, 1) + // Move time forward by more than 1% of the gauge interval + s.now = s.now.Add(threshold).Add(time.Second) + c.callBarrier <- c.numCalls + return []GaugeLabelValues{}, nil + } + + p, err := sink.newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + f, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + // Do not run, we'll just going to call an internal function. + p.collectAndFilterGauges() + + if p.currentInterval != 2*p.originalInterval { + t.Errorf("Current interval is %v, should be 2x%v.", + p.currentInterval, + p.originalInterval) + } +} + +func TestGauge_RestartTimer(t *testing.T) { + s := startSimulatedTime() + c := newSimulatedCollector() + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + p, err := sink.newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.EmptyCollectionFunction, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + p.resetTicker() + t1 := s.waitForTicker(t) + if t1.duration != p.currentInterval { + t.Fatalf("Bad ticker interval, got %v expected %v", + t1.duration, p.currentInterval) + } + + p.currentInterval = 4 * p.originalInterval + p.resetTicker() + t2 := s.waitForTicker(t) + if t2.duration != p.currentInterval { + t.Fatalf("Bad ticker interval, got %v expected %v", + t1.duration, p.currentInterval) + } +} + +func waitForDone(t *testing.T, + tick chan<- time.Time, + done <-chan struct{}, +) int { + t.Helper() + timeout := time.After(100 * time.Millisecond) + + numTicks := 0 + for { + select { + case <-timeout: + t.Fatal("Timeout waiting for metrics to be sent.") + case tick <- time.Now(): + numTicks += 1 + case <-done: + return numTicks + } + } +} + +func makeLabels(numLabels int) []GaugeLabelValues { + values := make([]GaugeLabelValues, numLabels) + for i := range values { + values[i].Labels = []Label{ + {"test", "true"}, + {"which", fmt.Sprintf("%v", i)}, + } + values[i].Value = float32(i + 1) + } + return values +} + +func TestGauge_InterruptedStreaming(t *testing.T) { + s := startSimulatedTime() + // Long bucket time == low chance of crossing interval + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + + sink := NewClusterMetricSink("test", inmemSink) + sink.MaxGaugeCardinality = 500 + sink.GaugeInterval = 2 * time.Hour + + p, err := sink.newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + nil, // shouldn't be called + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + // We'll queue up at least two batches; only one will be sent + // unless we give a ticker. + values := makeLabels(75) + done := make(chan struct{}) + go func() { + p.streamGaugesToSink(values) + close(done) + }() + + p.Stop() + // a nil channel is never writeable + waitForDone(t, nil, done) + + // If we start close to the end of an interval, metrics will + // be split across two buckets. + intervals := inmemSink.Data() + if len(intervals) > 1 { + t.Skip("Detected interval crossing.") + } + + if len(intervals[0].Gauges) == len(values) { + t.Errorf("Found %v gauges, expected fewer.", + len(intervals[0].Gauges)) + } + +} + +// helper function to create a closure that's a GaugeCollector. +func (c *SimulatedCollector) makeFunctionForValues( + values []GaugeLabelValues, + s *SimulatedTime, + advanceTime time.Duration, +) GaugeCollector { + // A function that returns a static list + return func(ctx context.Context) ([]GaugeLabelValues, error) { + atomic.AddUint32(&c.numCalls, 1) + // TODO: this seems like a data race? + s.now = s.now.Add(advanceTime) + c.callBarrier <- c.numCalls + return values, nil + } +} + +func TestGauge_MaximumMeasurements(t *testing.T) { + s := startSimulatedTime() + c := newSimulatedCollector() + + // Long bucket time == low chance of crossing interval + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + + sink := NewClusterMetricSink("test", inmemSink) + sink.MaxGaugeCardinality = 500 + sink.GaugeInterval = 2 * time.Hour + + // Create a report larger than the default limit + excessGauges := 100 + values := makeLabels(sink.MaxGaugeCardinality + excessGauges) + rand.Shuffle(len(values), func(i, j int) { + values[i], values[j] = values[j], values[i] + }) + + // Advance time by 0.5% of duration + advance := time.Duration(int(0.005 * float32(sink.GaugeInterval))) + p, err := sink.newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.makeFunctionForValues(values, s, advance), + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + // This needs a ticker in order to do its thing, + // so run it in the background and we'll send the ticks + // from here. + done := make(chan struct{}, 1) + go func() { + p.collectAndFilterGauges() + close(done) + }() + + sendTicker := s.waitForTicker(t) + numTicksSent := waitForDone(t, sendTicker.sender, done) + + // 500 items, one delay after after each 25, means that + // 19 ticks are consumed, so 19 or 20 must be sent. + expectedTicks := sink.MaxGaugeCardinality/25 - 1 + if numTicksSent < expectedTicks || numTicksSent > expectedTicks+1 { + t.Errorf("Number of ticks = %v, expected %v.", numTicksSent, expectedTicks) + } + + // If we start close to the end of an interval, metrics will + // be split across two buckets. + intervals := inmemSink.Data() + if len(intervals) > 1 { + t.Skip("Detected interval crossing.") + } + + if len(intervals[0].Gauges) != sink.MaxGaugeCardinality { + t.Errorf("Found %v gauges, expected %v.", + len(intervals[0].Gauges), + sink.MaxGaugeCardinality) + } + + minVal := float32(excessGauges) + for _, v := range intervals[0].Gauges { + if v.Value < minVal { + t.Errorf("Gauge %v with value %v should not have been included.", v.Labels, v.Value) + break + } + } +} + +func TestGauge_MeasurementError(t *testing.T) { + s := startSimulatedTime() + c := newSimulatedCollector() + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + sink := NewClusterMetricSink("test", inmemSink) + sink.MaxGaugeCardinality = 500 + sink.GaugeInterval = 2 * time.Hour + + // Create a small report so we don't have to deal with batching. + numGauges := 10 + values := make([]GaugeLabelValues, numGauges) + for i := range values { + values[i].Labels = []Label{ + {"test", "true"}, + {"which", fmt.Sprintf("%v", i)}, + } + values[i].Value = float32(i + 1) + } + + f := func(ctx context.Context) ([]GaugeLabelValues, error) { + atomic.AddUint32(&c.numCalls, 1) + c.callBarrier <- c.numCalls + return values, errors.New("test error") + } + + p, err := sink.newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + f, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + p.collectAndFilterGauges() + + // We should see no data in the sink + intervals := inmemSink.Data() + if len(intervals) > 1 { + t.Skip("Detected interval crossing.") + } + + if len(intervals[0].Gauges) != 0 { + t.Errorf("Found %v gauges, expected %v.", + len(intervals[0].Gauges), 0) + } +} diff --git a/vendor/github.com/hashicorp/shared-secure-libs/metricsutil/metricsutil.go b/helper/metricsutil/metricsutil.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/metricsutil/metricsutil.go rename to helper/metricsutil/metricsutil.go diff --git a/helper/metricsutil/metricsutil_test.go b/helper/metricsutil/metricsutil_test.go new file mode 100644 index 000000000000..1b817ddad192 --- /dev/null +++ b/helper/metricsutil/metricsutil_test.go @@ -0,0 +1,46 @@ +package metricsutil + +import ( + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestFormatFromRequest(t *testing.T) { + testCases := []struct { + original *logical.Request + expected string + }{ + { + original: &logical.Request{Headers: map[string][]string{ + "Accept": { + "application/vnd.google.protobuf", + "schema=\"prometheus/telemetry\"", + }, + }}, + expected: "prometheus", + }, + { + original: &logical.Request{Headers: map[string][]string{ + "Accept": { + "schema=\"prometheus\"", + }, + }}, + expected: "", + }, + { + original: &logical.Request{Headers: map[string][]string{ + "Accept": { + "application/openmetrics-text", + }, + }}, + expected: "prometheus", + }, + } + + for _, tCase := range testCases { + if metricsType := FormatFromRequest(tCase.original); metricsType != tCase.expected { + t.Fatalf("expected %s but got %s", tCase.expected, metricsType) + } + } +} diff --git a/helper/metricsutil/wrapped_metrics.go b/helper/metricsutil/wrapped_metrics.go index af392e44d249..45628d69948a 100644 --- a/helper/metricsutil/wrapped_metrics.go +++ b/helper/metricsutil/wrapped_metrics.go @@ -2,11 +2,94 @@ package metricsutil import ( "strings" + "sync/atomic" + "time" metrics "github.com/armon/go-metrics" "github.com/hashicorp/vault/helper/namespace" ) +// ClusterMetricSink serves as a shim around go-metrics +// and inserts a "cluster" label. +// +// It also provides a mechanism to limit the cardinality of the labels on a gauge +// (at each reporting interval, which isn't sufficient if there is variability in which +// labels are the top N) and a backoff mechanism for gauge computation. +type ClusterMetricSink struct { + // ClusterName is either the cluster ID, or a name provided + // in the telemetry configuration stanza. + // + // Because it may be set after the Core is initialized, we need + // to protect against concurrent access. + ClusterName atomic.Value + + MaxGaugeCardinality int + GaugeInterval time.Duration + + // Sink is the go-metrics instance to send to. + Sink metrics.MetricSink +} + +// Convenience alias +type Label = metrics.Label + +func (m *ClusterMetricSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + m.Sink.SetGaugeWithLabels(key, val, + append(labels, Label{"cluster", m.ClusterName.Load().(string)})) +} + +func (m *ClusterMetricSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + m.Sink.IncrCounterWithLabels(key, val, + append(labels, Label{"cluster", m.ClusterName.Load().(string)})) +} + +func (m *ClusterMetricSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + m.Sink.AddSampleWithLabels(key, val, + append(labels, Label{"cluster", m.ClusterName.Load().(string)})) +} + +func (m *ClusterMetricSink) AddDurationWithLabels(key []string, d time.Duration, labels []Label) { + val := float32(d) / float32(time.Millisecond) + m.AddSampleWithLabels(key, val, labels) +} + +func (m *ClusterMetricSink) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + elapsed := time.Now().Sub(start) + val := float32(elapsed) / float32(time.Millisecond) + m.AddSampleWithLabels(key, val, labels) +} + +// BlackholeSink is a default suitable for use in unit tests. +func BlackholeSink() *ClusterMetricSink { + sink, _ := metrics.New(metrics.DefaultConfig(""), + &metrics.BlackholeSink{}) + cms := &ClusterMetricSink{ + ClusterName: atomic.Value{}, + Sink: sink, + } + cms.ClusterName.Store("") + return cms +} + +func NewClusterMetricSink(clusterName string, sink metrics.MetricSink) *ClusterMetricSink { + cms := &ClusterMetricSink{ + ClusterName: atomic.Value{}, + Sink: sink, + } + cms.ClusterName.Store(clusterName) + return cms +} + +// SetDefaultClusterName changes the cluster name from its default value, +// if it has not previously been configured. +func (m *ClusterMetricSink) SetDefaultClusterName(clusterName string) { + // This is not a true compare-and-swap, but it should be + // consistent enough for normal uses + if m.ClusterName.Load().(string) == "" { + m.ClusterName.Store(clusterName) + } +} + // NamespaceLabel creates a metrics label for the given // Namespace: root is "root"; others are path with the // final '/' removed. diff --git a/helper/metricsutil/wrapped_metrics_test.go b/helper/metricsutil/wrapped_metrics_test.go new file mode 100644 index 000000000000..8ee82af587a9 --- /dev/null +++ b/helper/metricsutil/wrapped_metrics_test.go @@ -0,0 +1,111 @@ +package metricsutil + +import ( + "testing" + "time" + + "github.com/armon/go-metrics" +) + +func isLabelPresent(toFind Label, ls []Label) bool { + for _, l := range ls { + if l == toFind { + return true + } + } + return false +} + +// We can use a sink directly, or wrap the top-level +// go-metrics implementation for testing purposes. +func defaultMetrics(sink metrics.MetricSink) *metrics.Metrics { + // No service name + config := metrics.DefaultConfig("") + + // No host name + config.EnableHostname = false + m, _ := metrics.New(config, sink) + return m +} + +func TestClusterLabelPresent(t *testing.T) { + testClusterName := "test-cluster" + + // Use a ridiculously long time to minimize the chance + // that we have to deal with more than one interval. + // InMemSink rounds down to an interval boundary rather than + // starting one at the time of initialization. + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + clusterSink := NewClusterMetricSink(testClusterName, defaultMetrics(inmemSink)) + + key1 := []string{"aaa", "bbb"} + key2 := []string{"ccc", "ddd"} + key3 := []string{"eee", "fff"} + labels1 := []Label{{"dim1", "val1"}} + labels2 := []Label{{"dim2", "val2"}} + labels3 := []Label{{"dim3", "val3"}} + clusterLabel := Label{"cluster", testClusterName} + expectedKey1 := "aaa.bbb;dim1=val1;cluster=" + testClusterName + expectedKey2 := "ccc.ddd;dim2=val2;cluster=" + testClusterName + expectedKey3 := "eee.fff;dim3=val3;cluster=" + testClusterName + + clusterSink.SetGaugeWithLabels(key1, 1.0, labels1) + clusterSink.IncrCounterWithLabels(key2, 2.0, labels2) + clusterSink.AddSampleWithLabels(key3, 3.0, labels3) + + intervals := inmemSink.Data() + // If we start very close to the end of an interval, then our metrics might be + // split across two different buckets. We won't write the code to try to handle that. + // 100000-hours = at most once every 4167 days + if len(intervals) > 1 { + t.Skip("Detected interval crossing.") + } + + // Check Gauge + g, ok := intervals[0].Gauges[expectedKey1] + if !ok { + t.Fatal("Key", expectedKey1, "not found in map", intervals[0].Gauges) + } + if g.Value != 1.0 { + t.Error("Gauge value", g.Value, "does not match", 1.0) + } + if !isLabelPresent(labels1[0], g.Labels) { + t.Error("Gauge label", g.Labels, "does not include", labels1) + } + if !isLabelPresent(clusterLabel, g.Labels) { + t.Error("Gauge label", g.Labels, "does not include", clusterLabel) + } + + // Check Counter + c, ok := intervals[0].Counters[expectedKey2] + if !ok { + t.Fatal("Key", expectedKey2, "not found in map", intervals[0].Counters) + } + if c.Sum != 2.0 { + t.Error("Counter value", c.Sum, "does not match", 2.0) + } + if !isLabelPresent(labels2[0], c.Labels) { + t.Error("Counter label", c.Labels, "does not include", labels2) + } + if !isLabelPresent(clusterLabel, c.Labels) { + t.Error("Counter label", c.Labels, "does not include", clusterLabel) + } + + // Check Sample + s, ok := intervals[0].Samples[expectedKey3] + if !ok { + t.Fatal("Key", expectedKey3, "not found in map", intervals[0].Samples) + } + if s.Sum != 3.0 { + t.Error("Sample value", s.Sum, "does not match", 3.0) + } + if !isLabelPresent(labels3[0], s.Labels) { + t.Error("Sample label", s.Labels, "does not include", labels3) + } + if !isLabelPresent(clusterLabel, s.Labels) { + t.Error("Sample label", s.Labels, "does not include", clusterLabel) + } + +} diff --git a/helper/testhelpers/seal/sealhelper.go b/helper/testhelpers/seal/sealhelper.go index 3a19f7574e82..3fa449e5a051 100644 --- a/helper/testhelpers/seal/sealhelper.go +++ b/helper/testhelpers/seal/sealhelper.go @@ -4,11 +4,11 @@ import ( "path" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/shared-secure-libs/configutil" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/logical/transit" "github.com/hashicorp/vault/helper/testhelpers/teststorage" "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" diff --git a/http/forwarded_for_test.go b/http/forwarded_for_test.go index 1a9b7b4d22f2..9323f5bf1c72 100644 --- a/http/forwarded_for_test.go +++ b/http/forwarded_for_test.go @@ -7,7 +7,7 @@ import ( "testing" sockaddr "github.com/hashicorp/go-sockaddr" - "github.com/hashicorp/shared-secure-libs/configutil" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/vault" ) diff --git a/http/handler.go b/http/handler.go index ac3fe1b1098e..5ab7d74e5a18 100644 --- a/http/handler.go +++ b/http/handler.go @@ -22,8 +22,8 @@ import ( "github.com/hashicorp/errwrap" cleanhttp "github.com/hashicorp/go-cleanhttp" sockaddr "github.com/hashicorp/go-sockaddr" - "github.com/hashicorp/shared-secure-libs/configutil" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" "github.com/hashicorp/vault/sdk/helper/parseutil" diff --git a/http/sys_metrics.go b/http/sys_metrics.go index 70d664b02b43..0e58be3ea262 100644 --- a/http/sys_metrics.go +++ b/http/sys_metrics.go @@ -4,7 +4,7 @@ import ( "fmt" "net/http" - "github.com/hashicorp/shared-secure-libs/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" ) diff --git a/http/sys_metrics_test.go b/http/sys_metrics_test.go index 8f4c4f12a083..492e6514baa4 100644 --- a/http/sys_metrics_test.go +++ b/http/sys_metrics_test.go @@ -5,8 +5,8 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/shared-secure-libs/configutil" - "github.com/hashicorp/shared-secure-libs/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/vault" ) diff --git a/internalshared/README.md b/internalshared/README.md new file mode 100644 index 000000000000..dd2ee8a0b22c --- /dev/null +++ b/internalshared/README.md @@ -0,0 +1,14 @@ +HashiCorp-internal libs +================= + +Do not use these unless you know what you're doing. + +These libraries are used by other HashiCorp software to reduce code duplication +and increase consistency. They are not libraries needed by Vault plugins -- +those are in the sdk/ module. + +There are no compatibility guarantees. Things in here may change or move or +disappear at any time. + +If you are a Vault plugin author and think you need a library in here in your +plugin, please open an issue for discussion. diff --git a/vendor/github.com/hashicorp/shared-secure-libs/configutil/config.go b/internalshared/configutil/config.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/configutil/config.go rename to internalshared/configutil/config.go diff --git a/vendor/github.com/hashicorp/shared-secure-libs/configutil/config_util.go b/internalshared/configutil/config_util.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/configutil/config_util.go rename to internalshared/configutil/config_util.go diff --git a/vendor/github.com/hashicorp/shared-secure-libs/configutil/encrypt_decrypt.go b/internalshared/configutil/encrypt_decrypt.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/configutil/encrypt_decrypt.go rename to internalshared/configutil/encrypt_decrypt.go diff --git a/internalshared/configutil/encrypt_decrypt_test.go b/internalshared/configutil/encrypt_decrypt_test.go new file mode 100644 index 000000000000..4d14ba23b994 --- /dev/null +++ b/internalshared/configutil/encrypt_decrypt_test.go @@ -0,0 +1,111 @@ +package configutil + +import ( + "bytes" + "context" + "encoding/base64" + "testing" + + wrapping "github.com/hashicorp/go-kms-wrapping" + "google.golang.org/protobuf/proto" +) + +func getAEADTestKMS(t *testing.T) { +} + +func TestEncryptParams(t *testing.T) { + rawStr := ` +storage "consul" { + api_key = "{{encrypt(foobar)}}" +} + +telemetry { + some_param = "something" + circonus_api_key = "{{encrypt(barfoo)}}" +} +` + + finalStr := ` +storage "consul" { + api_key = "foobar" +} + +telemetry { + some_param = "something" + circonus_api_key = "barfoo" +} +` + + reverser := new(reversingWrapper) + out, err := EncryptDecrypt(rawStr, false, false, reverser) + if err != nil { + t.Fatal(err) + } + + first := true + locs := decryptRegex.FindAllIndex([]byte(out), -1) + for _, match := range locs { + matchBytes := []byte(out)[match[0]:match[1]] + matchBytes = bytes.TrimSuffix(bytes.TrimPrefix(matchBytes, []byte("{{decrypt(")), []byte(")}}")) + inMsg, err := base64.RawURLEncoding.DecodeString(string(matchBytes)) + if err != nil { + t.Fatal(err) + } + inBlob := new(wrapping.EncryptedBlobInfo) + if err := proto.Unmarshal(inMsg, inBlob); err != nil { + t.Fatal(err) + } + ct := string(inBlob.Ciphertext) + if first { + if ct != "raboof" { + t.Fatal(ct) + } + first = false + } else { + if ct != "oofrab" { + t.Fatal(ct) + } + } + } + + decOut, err := EncryptDecrypt(out, true, false, reverser) + if err != nil { + t.Fatal(err) + } + + if decOut != rawStr { + t.Fatal(decOut) + } + + decOut, err = EncryptDecrypt(out, true, true, reverser) + if err != nil { + t.Fatal(err) + } + + if decOut != finalStr { + t.Fatal(decOut) + } +} + +type reversingWrapper struct{} + +func (r *reversingWrapper) Type() string { return "reversing" } +func (r *reversingWrapper) KeyID() string { return "reverser" } +func (r *reversingWrapper) HMACKeyID() string { return "" } +func (r *reversingWrapper) Init(_ context.Context) error { return nil } +func (r *reversingWrapper) Finalize(_ context.Context) error { return nil } +func (r *reversingWrapper) Encrypt(_ context.Context, input []byte, _ []byte) (*wrapping.EncryptedBlobInfo, error) { + return &wrapping.EncryptedBlobInfo{ + Ciphertext: r.reverse(input), + }, nil +} +func (r *reversingWrapper) Decrypt(_ context.Context, input *wrapping.EncryptedBlobInfo, _ []byte) ([]byte, error) { + return r.reverse(input.Ciphertext), nil +} +func (r *reversingWrapper) reverse(input []byte) []byte { + output := make([]byte, len(input)) + for i, j := 0, len(input)-1; i < j; i, j = i+1, j-1 { + output[i], output[j] = input[j], input[i] + } + return output +} diff --git a/vendor/github.com/hashicorp/shared-secure-libs/configutil/kms.go b/internalshared/configutil/kms.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/configutil/kms.go rename to internalshared/configutil/kms.go diff --git a/vendor/github.com/hashicorp/shared-secure-libs/configutil/listener.go b/internalshared/configutil/listener.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/configutil/listener.go rename to internalshared/configutil/listener.go diff --git a/vendor/github.com/hashicorp/shared-secure-libs/configutil/merge.go b/internalshared/configutil/merge.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/configutil/merge.go rename to internalshared/configutil/merge.go diff --git a/vendor/github.com/hashicorp/shared-secure-libs/configutil/telemetry.go b/internalshared/configutil/telemetry.go similarity index 99% rename from vendor/github.com/hashicorp/shared-secure-libs/configutil/telemetry.go rename to internalshared/configutil/telemetry.go index b0069c161463..a7d49a70cccb 100644 --- a/vendor/github.com/hashicorp/shared-secure-libs/configutil/telemetry.go +++ b/internalshared/configutil/telemetry.go @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/shared-secure-libs/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/sdk/helper/parseutil" "github.com/mitchellh/cli" "google.golang.org/api/option" diff --git a/vendor/github.com/hashicorp/shared-secure-libs/gatedwriter/writer.go b/internalshared/gatedwriter/writer.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/gatedwriter/writer.go rename to internalshared/gatedwriter/writer.go diff --git a/internalshared/gatedwriter/writer_test.go b/internalshared/gatedwriter/writer_test.go new file mode 100644 index 000000000000..31659a8adda6 --- /dev/null +++ b/internalshared/gatedwriter/writer_test.go @@ -0,0 +1,34 @@ +package gatedwriter + +import ( + "bytes" + "io" + "testing" +) + +func TestWriter_impl(t *testing.T) { + var _ io.Writer = new(Writer) +} + +func TestWriter(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriter(buf) + w.Write([]byte("foo\n")) + w.Write([]byte("bar\n")) + + if buf.String() != "" { + t.Fatalf("bad: %s", buf.String()) + } + + w.Flush() + + if buf.String() != "foo\nbar\n" { + t.Fatalf("bad: %s", buf.String()) + } + + w.Write([]byte("baz\n")) + + if buf.String() != "foo\nbar\nbaz\n" { + t.Fatalf("bad: %s", buf.String()) + } +} diff --git a/vendor/github.com/hashicorp/shared-secure-libs/kv-builder/builder.go b/internalshared/kv-builder/builder.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/kv-builder/builder.go rename to internalshared/kv-builder/builder.go diff --git a/internalshared/kv-builder/builder_test.go b/internalshared/kv-builder/builder_test.go new file mode 100644 index 000000000000..46b4d05b057d --- /dev/null +++ b/internalshared/kv-builder/builder_test.go @@ -0,0 +1,139 @@ +package kvbuilder + +import ( + "bytes" + "reflect" + "testing" +) + +func TestBuilder_basic(t *testing.T) { + var b Builder + err := b.Add("foo=bar", "bar=baz", "baz=") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + "baz": "", + } + actual := b.Map() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestBuilder_escapedAt(t *testing.T) { + var b Builder + err := b.Add("foo=bar", "bar=\\@baz") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "@baz", + } + actual := b.Map() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestBuilder_stdin(t *testing.T) { + var b Builder + b.Stdin = bytes.NewBufferString("baz") + err := b.Add("foo=bar", "bar=-") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + actual := b.Map() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestBuilder_stdinMap(t *testing.T) { + var b Builder + b.Stdin = bytes.NewBufferString(`{"foo": "bar"}`) + err := b.Add("-", "bar=baz") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + actual := b.Map() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestBuilder_stdinTwice(t *testing.T) { + var b Builder + b.Stdin = bytes.NewBufferString(`{"foo": "bar"}`) + err := b.Add("-", "-") + if err == nil { + t.Fatal("should error") + } +} + +func TestBuilder_sameKeyTwice(t *testing.T) { + var b Builder + err := b.Add("foo=bar", "foo=baz") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "foo": []interface{}{"bar", "baz"}, + } + actual := b.Map() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestBuilder_sameKeyMultipleTimes(t *testing.T) { + var b Builder + err := b.Add("foo=bar", "foo=baz", "foo=bay", "foo=bax", "bar=baz") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "foo": []interface{}{"bar", "baz", "bay", "bax"}, + "bar": "baz", + } + actual := b.Map() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestBuilder_specialCharactersInKey(t *testing.T) { + var b Builder + b.Stdin = bytes.NewBufferString("{\"foo\": \"bay\"}") + err := b.Add("@foo=bar", "-foo=baz", "-") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := map[string]interface{}{ + "@foo": "bar", + "-foo": "baz", + "foo": "bay", + } + actual := b.Map() + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} diff --git a/vendor/github.com/hashicorp/shared-secure-libs/listenerutil/listener.go b/internalshared/listenerutil/listener.go similarity index 98% rename from vendor/github.com/hashicorp/shared-secure-libs/listenerutil/listener.go rename to internalshared/listenerutil/listener.go index 7ce855d01daa..c72342c22bc2 100644 --- a/vendor/github.com/hashicorp/shared-secure-libs/listenerutil/listener.go +++ b/internalshared/listenerutil/listener.go @@ -11,8 +11,8 @@ import ( "strconv" "github.com/hashicorp/errwrap" - "github.com/hashicorp/shared-secure-libs/configutil" - "github.com/hashicorp/shared-secure-libs/reloadutil" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/reloadutil" "github.com/hashicorp/vault/sdk/helper/tlsutil" "github.com/jefferai/isbadcipher" "github.com/mitchellh/cli" diff --git a/internalshared/listenerutil/listener_test.go b/internalshared/listenerutil/listener_test.go new file mode 100644 index 000000000000..c6bc5afad743 --- /dev/null +++ b/internalshared/listenerutil/listener_test.go @@ -0,0 +1,88 @@ +package listenerutil + +import ( + "io/ioutil" + "os" + osuser "os/user" + "strconv" + "testing" +) + +func TestUnixSocketListener(t *testing.T) { + t.Run("ids", func(t *testing.T) { + socket, err := ioutil.TempFile("", "socket") + if err != nil { + t.Fatal(err) + } + defer os.Remove(socket.Name()) + + uid, gid := os.Getuid(), os.Getgid() + + u, err := osuser.LookupId(strconv.Itoa(uid)) + if err != nil { + t.Fatal(err) + } + user := u.Username + + g, err := osuser.LookupGroupId(strconv.Itoa(gid)) + if err != nil { + t.Fatal(err) + } + group := g.Name + + l, err := UnixSocketListener(socket.Name(), &UnixSocketsConfig{ + User: user, + Group: group, + Mode: "644", + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + fi, err := os.Stat(socket.Name()) + if err != nil { + t.Fatal(err) + } + + mode, err := strconv.ParseUint("644", 8, 32) + if err != nil { + t.Fatal(err) + } + if fi.Mode().Perm() != os.FileMode(mode) { + t.Fatalf("failed to set permissions on the socket file") + } + }) + t.Run("names", func(t *testing.T) { + socket, err := ioutil.TempFile("", "socket") + if err != nil { + t.Fatal(err) + } + defer os.Remove(socket.Name()) + + uid, gid := os.Getuid(), os.Getgid() + l, err := UnixSocketListener(socket.Name(), &UnixSocketsConfig{ + User: strconv.Itoa(uid), + Group: strconv.Itoa(gid), + Mode: "644", + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + fi, err := os.Stat(socket.Name()) + if err != nil { + t.Fatal(err) + } + + mode, err := strconv.ParseUint("644", 8, 32) + if err != nil { + t.Fatal(err) + } + if fi.Mode().Perm() != os.FileMode(mode) { + t.Fatalf("failed to set permissions on the socket file") + } + }) + +} diff --git a/vendor/github.com/hashicorp/shared-secure-libs/reloadutil/reload.go b/internalshared/reloadutil/reload.go similarity index 100% rename from vendor/github.com/hashicorp/shared-secure-libs/reloadutil/reload.go rename to internalshared/reloadutil/reload.go diff --git a/internalshared/reloadutil/reload_test.go b/internalshared/reloadutil/reload_test.go new file mode 100644 index 000000000000..872ab37168c2 --- /dev/null +++ b/internalshared/reloadutil/reload_test.go @@ -0,0 +1,74 @@ +package reloadutil + +import ( + "crypto/x509" + "io/ioutil" + "testing" + + "github.com/hashicorp/errwrap" +) + +func TestReload_KeyWithPassphrase(t *testing.T) { + password := "password" + cert := []byte(`-----BEGIN CERTIFICATE----- +MIICLzCCAZgCCQCq27CeP4WhlDANBgkqhkiG9w0BAQUFADBcMQswCQYDVQQGEwJV +UzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xEjAQBgNVBAoM +CUhhc2hpQ29ycDEUMBIGA1UEAwwLbXl2YXVsdC5jb20wHhcNMTcxMjEzMjEzNTM3 +WhcNMTgxMjEzMjEzNTM3WjBcMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExFjAU +BgNVBAcMDVNhbiBGcmFuY2lzY28xEjAQBgNVBAoMCUhhc2hpQ29ycDEUMBIGA1UE +AwwLbXl2YXVsdC5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMvsz/9l +EJIlRG6DOw4fXdB/aJgJk2rR8cU0D8+vECIzb+MdDK0cBHtLiVpZC/RnZMdMzjGn +Z++Fp3dEnT6CD0IjKdJcD+qSyZSjHIuYpHjnjrVlM/Le0xST7egoG+fXkSt4myzG +ec2WK1jcZefRRGPycvMqx1yUWU76jDdFZSL5AgMBAAEwDQYJKoZIhvcNAQEFBQAD +gYEAQfYE26FLZ9SPPU8bHNDxoxDmGrn8yJ78C490Qpix/w6gdLaBtILenrZbhpnB +3L3okraM8mplaN2KdAcpnsr4wPv9hbYkam0coxCQEKs8ltHSBaXT6uKRWb00nkGu +yAXDRpuPdFRqbXW3ZFC5broUrz4ujxTDKfVeIn0zpPZkv24= +-----END CERTIFICATE-----`) + key := []byte(`-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: DES-EDE3-CBC,64B032D83BD6A6DC + +qVJ+mXEBKMkUPrQ8odHunMpPgChQUny4CX73/dAcm7O9iXIv9eXQSxj2qfgCOloj +vthg7jYNwtRb0ydzCEnEud35zWw38K/l19/pe4ULfNXlOddlsk4XIHarBiz+KUaX +WTbNk0H+DwdcEwhprPgpTk8gp88lZBiHCnTG/s8v/JNt+wkdqjfAp0Xbm9m+OZ7s +hlNxZin1OuBdprBqfKWBltUALZYiIBhspMTmh+jGQSyEKNTAIBejIiRH5+xYWuOy +xKencq8UpQMOMPR2ZiSw42dU9j8HHMgldI7KszU2FDIEFXG7aSjcxNyyybeBT+Uz +YPoxGxSdUYWqaz50UszvHg/QWR8NlPlQc3nFAUVpGKUF9MEQCIAK8HjcpMP+IAVO +ertp4cTa2Rpm9YeoFrY6tabvmXApXlQPw6rBn6o5KpceWG3ceOsDOsT+e3edHu9g +SGO4hjggbRpO+dBOuwfw4rMn9X1BbqXKJcREAmrgVVSf9/s942E4YOQ+IGJPdtmY +WHAFk8hiJepsVCA2NpwVlAD+QbPPaR2RtvYOtq3IKlWRuVQ+6dpxDsz5FlJhs2L+ +HsX6XqtwuQM8kk1hO8Gm3VeV7+b64r9kfbO8jCM18GexCYiCtig51mJW6IO42d1K +bS1axMx/KeDc/sy7LKEbHnjnYanpGz2Wa2EWhnWAeNXD1nUfUNFPp2SsIGbCMnat +mC4O4cO7YRl3+iJg3kHtTPGtgtCjrZcjlyBtxT2VC7SsTcTXZBWovczMIstyr4Ka +opM24uvQT3Bc0UM0WNh3tdRFuboxDeBDh7PX/2RIoiaMuCCiRZ3O0A== +-----END RSA PRIVATE KEY-----`) + tempDir, err := ioutil.TempDir("", "vault-test") + if err != nil { + t.Fatalf("Error creating temporary directory: %s", err) + } + keyFile := tempDir + "/server.key" + certFile := tempDir + "/server.crt" + + err = ioutil.WriteFile(certFile, cert, 0755) + if err != nil { + t.Fatalf("Error writing to temp file: %s", err) + } + err = ioutil.WriteFile(keyFile, key, 0755) + if err != nil { + t.Fatalf("Error writing to temp file: %s", err) + } + + cg := NewCertificateGetter(certFile, keyFile, "") + err = cg.Reload() + if err == nil { + t.Fatal("error expected") + } + if !errwrap.Contains(err, x509.IncorrectPasswordError.Error()) { + t.Fatalf("expected incorrect password error, got %v", err) + } + + cg = NewCertificateGetter(certFile, keyFile, password) + if err := cg.Reload(); err != nil { + t.Fatalf("err: %v", err) + } +} diff --git a/vault/core.go b/vault/core.go index efadb824f175..7a35a8a6d0b1 100644 --- a/vault/core.go +++ b/vault/core.go @@ -26,12 +26,12 @@ import ( aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/shared-secure-libs/metricsutil" - "github.com/hashicorp/shared-secure-libs/reloadutil" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/reloadutil" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/jsonutil" diff --git a/vault/core_metrics.go b/vault/core_metrics.go index c2674804b628..499c8bcce639 100644 --- a/vault/core_metrics.go +++ b/vault/core_metrics.go @@ -8,8 +8,7 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/shared-secure-libs/metricsutil" - vaultmetrics "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/logical" ) @@ -339,7 +338,7 @@ func (c *Core) kvSecretGaugeCollector(ctx context.Context) ([]metricsutil.GaugeL } results[i].Labels = []metrics.Label{ - vaultmetrics.NamespaceLabel(m.Namespace), + metricsutil.NamespaceLabel(m.Namespace), {"mount_point", m.MountPoint}, } @@ -370,7 +369,7 @@ func (c *Core) entityGaugeCollector(ctx context.Context) ([]metricsutil.GaugeLab values := make([]metricsutil.GaugeLabelValues, len(allNamespaces)) for i := range values { values[i].Labels = []metrics.Label{ - vaultmetrics.NamespaceLabel(allNamespaces[i]), + metricsutil.NamespaceLabel(allNamespaces[i]), } values[i].Value = float32(byNamespace[allNamespaces[i].ID]) } @@ -409,7 +408,7 @@ func (c *Core) entityGaugeCollectorByMount(ctx context.Context) ([]metricsutil.G } values = append(values, metricsutil.GaugeLabelValues{ Labels: []metrics.Label{ - vaultmetrics.NamespaceLabel(mountEntry.namespace), + metricsutil.NamespaceLabel(mountEntry.namespace), {"auth_method", mountEntry.Type}, {"mount_point", "auth/" + mountEntry.Path}, }, diff --git a/vault/identity_store.go b/vault/identity_store.go index 5d03d9bee899..4b844d58b7ed 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -12,7 +12,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" "github.com/hashicorp/vault/helper/identity" - vaultmetrics "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/sdk/framework" @@ -576,7 +576,7 @@ func (i *IdentityStore) CreateOrFetchEntity(ctx context.Context, alias *logical. if err != nil { nsLabel = metrics.Label{"namespace", "unknown"} } else { - nsLabel = vaultmetrics.NamespaceLabel(ns) + nsLabel = metricsutil.NamespaceLabel(ns) } i.core.MetricSink().IncrCounterWithLabels( []string{"identity", "entity", "creation"}, diff --git a/vault/logical_system.go b/vault/logical_system.go index 6419f2b57ad3..c8841a369439 100644 --- a/vault/logical_system.go +++ b/vault/logical_system.go @@ -23,9 +23,9 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-multierror" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/shared-secure-libs/metricsutil" "github.com/hashicorp/vault/helper/hostutil" "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/monitor" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/helper/random" diff --git a/vault/quotas/quotas.go b/vault/quotas/quotas.go index 5b91407a4f26..afac32a18f58 100644 --- a/vault/quotas/quotas.go +++ b/vault/quotas/quotas.go @@ -10,7 +10,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - "github.com/hashicorp/shared-secure-libs/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/sdk/logical" ) diff --git a/vault/quotas/quotas_rate_limit.go b/vault/quotas/quotas_rate_limit.go index bd2e541bcb1c..4c85f20483be 100644 --- a/vault/quotas/quotas_rate_limit.go +++ b/vault/quotas/quotas_rate_limit.go @@ -10,7 +10,7 @@ import ( "github.com/armon/go-metrics" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/shared-secure-libs/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/sdk/helper/pathmanager" "github.com/sethvargo/go-limiter" "github.com/sethvargo/go-limiter/httplimit" diff --git a/vault/quotas/quotas_rate_limit_test.go b/vault/quotas/quotas_rate_limit_test.go index 061858a2f428..270254ca0dad 100644 --- a/vault/quotas/quotas_rate_limit_test.go +++ b/vault/quotas/quotas_rate_limit_test.go @@ -8,7 +8,7 @@ import ( "time" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/shared-secure-libs/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/stretchr/testify/require" "go.uber.org/atomic" diff --git a/vault/quotas/quotas_test.go b/vault/quotas/quotas_test.go index bd9a5001de7d..3f867ced719d 100644 --- a/vault/quotas/quotas_test.go +++ b/vault/quotas/quotas_test.go @@ -7,7 +7,7 @@ import ( "github.com/go-test/deep" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/shared-secure-libs/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/stretchr/testify/require" ) diff --git a/vault/quotas/quotas_util.go b/vault/quotas/quotas_util.go index edd14f0e6542..983417476cfb 100644 --- a/vault/quotas/quotas_util.go +++ b/vault/quotas/quotas_util.go @@ -6,7 +6,7 @@ import ( "context" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/shared-secure-libs/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" memdb "github.com/hashicorp/go-memdb" ) diff --git a/vault/request_handling.go b/vault/request_handling.go index b8880bbefb79..634135ca3902 100644 --- a/vault/request_handling.go +++ b/vault/request_handling.go @@ -12,11 +12,10 @@ import ( "github.com/hashicorp/errwrap" multierror "github.com/hashicorp/go-multierror" sockaddr "github.com/hashicorp/go-sockaddr" - "github.com/hashicorp/shared-secure-libs/configutil" - "github.com/hashicorp/shared-secure-libs/metricsutil" "github.com/hashicorp/vault/helper/identity" - vaultmetrics "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -870,7 +869,7 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp []string{"secret", "lease", "creation"}, 1, []metrics.Label{ - vaultmetrics.NamespaceLabel(ns), + metricsutil.NamespaceLabel(ns), {"secret_engine", req.MountType}, {"mount_point", mountPointWithoutNs}, {"creation_ttl", ttl_label}, @@ -1267,7 +1266,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re []string{"token", "creation"}, 1, []metrics.Label{ - vaultmetrics.NamespaceLabel(ns), + metricsutil.NamespaceLabel(ns), {"auth_method", req.MountType}, {"mount_point", mountPointWithoutNs}, {"creation_ttl", ttl_label}, diff --git a/vault/testing.go b/vault/testing.go index 09bc114f09b2..3af6488c1184 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -30,13 +30,13 @@ import ( cleanhttp "github.com/hashicorp/go-cleanhttp" log "github.com/hashicorp/go-hclog" raftlib "github.com/hashicorp/raft" - "github.com/hashicorp/shared-secure-libs/configutil" - "github.com/hashicorp/shared-secure-libs/metricsutil" - "github.com/hashicorp/shared-secure-libs/reloadutil" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/reloadutil" dbMysql "github.com/hashicorp/vault/plugins/database/mysql" dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql" "github.com/hashicorp/vault/sdk/framework" diff --git a/vault/token_store.go b/vault/token_store.go index 1229ea7fec3f..a12f19bb9c8b 100644 --- a/vault/token_store.go +++ b/vault/token_store.go @@ -20,9 +20,8 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-sockaddr" - "github.com/hashicorp/shared-secure-libs/metricsutil" "github.com/hashicorp/vault/helper/identity" - vaultmetrics "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/base62" @@ -2729,7 +2728,7 @@ func (ts *TokenStore) handleCreateCommon(ctx context.Context, req *logical.Reque []string{"token", "creation"}, 1, []metrics.Label{ - vaultmetrics.NamespaceLabel(ns), + metricsutil.NamespaceLabel(ns), {"auth_method", "token"}, {"mount_point", mountPointWithoutNs}, // path, not accessor {"creation_ttl", ttl_label}, @@ -3430,7 +3429,7 @@ func (ts *TokenStore) gaugeCollector(ctx context.Context) ([]metricsutil.GaugeLa // to potentially handle a larger number of tokens. intValues := make([]int, len(allNamespaces)) for i, ns := range allNamespaces { - values[i].Labels = []metrics.Label{vaultmetrics.NamespaceLabel(ns)} + values[i].Labels = []metrics.Label{metricsutil.NamespaceLabel(ns)} namespacePosition[ns.ID] = i } @@ -3522,7 +3521,7 @@ func (ts *TokenStore) gaugeCollectorByPolicy(ctx context.Context) ([]metricsutil flattenedResults = append(flattenedResults, metricsutil.GaugeLabelValues{ Labels: []metrics.Label{ - vaultmetrics.NamespaceLabel(ns), + metricsutil.NamespaceLabel(ns), {"policy", policy}, }, Value: float32(count), @@ -3590,7 +3589,7 @@ func (ts *TokenStore) gaugeCollectorByTtl(ctx context.Context) ([]metricsutil.Ga flattenedResults = append(flattenedResults, metricsutil.GaugeLabelValues{ Labels: []metrics.Label{ - vaultmetrics.NamespaceLabel(ns), + metricsutil.NamespaceLabel(ns), {"creation_ttl", bucket}, }, Value: float32(count), @@ -3690,7 +3689,7 @@ func (ts *TokenStore) gaugeCollectorByMethod(ctx context.Context) ([]metricsutil flattenedResults = append(flattenedResults, metricsutil.GaugeLabelValues{ Labels: []metrics.Label{ - vaultmetrics.NamespaceLabel(ns), + metricsutil.NamespaceLabel(ns), {"auth_method", method}, }, Value: float32(count), diff --git a/vault/token_store_test.go b/vault/token_store_test.go index 082e55be80e8..393294615780 100644 --- a/vault/token_store_test.go +++ b/vault/token_store_test.go @@ -18,8 +18,8 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/shared-secure-libs/metricsutil" "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/locksutil" "github.com/hashicorp/vault/sdk/helper/parseutil" diff --git a/vault/wrapping.go b/vault/wrapping.go index bd55d1cc3dc9..73e33c5951eb 100644 --- a/vault/wrapping.go +++ b/vault/wrapping.go @@ -12,8 +12,7 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/errwrap" - "github.com/hashicorp/shared-secure-libs/metricsutil" - vaultmetrics "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/consts" @@ -153,7 +152,7 @@ DONELISTHANDLING: []string{"token", "creation"}, 1, []metrics.Label{ - vaultmetrics.NamespaceLabel(ns), + metricsutil.NamespaceLabel(ns), // The type of the secret engine is not all that useful; // we could use "token" but let's be more descriptive, // even if it's not a real auth method. diff --git a/vendor/github.com/hashicorp/shared-secure-libs/LICENSE b/vendor/github.com/hashicorp/shared-secure-libs/LICENSE deleted file mode 100644 index e87a115e462e..000000000000 --- a/vendor/github.com/hashicorp/shared-secure-libs/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/shared-secure-libs/metricsutil/wrapped_metrics.go b/vendor/github.com/hashicorp/shared-secure-libs/metricsutil/wrapped_metrics.go deleted file mode 100644 index 27b4f570433c..000000000000 --- a/vendor/github.com/hashicorp/shared-secure-libs/metricsutil/wrapped_metrics.go +++ /dev/null @@ -1,89 +0,0 @@ -package metricsutil - -import ( - "sync/atomic" - "time" - - metrics "github.com/armon/go-metrics" -) - -// ClusterMetricSink serves as a shim around go-metrics -// and inserts a "cluster" label. -// -// It also provides a mechanism to limit the cardinality of the labels on a gauge -// (at each reporting interval, which isn't sufficient if there is variability in which -// labels are the top N) and a backoff mechanism for gauge computation. -type ClusterMetricSink struct { - // ClusterName is either the cluster ID, or a name provided - // in the telemetry configuration stanza. - // - // Because it may be set after the Core is initialized, we need - // to protect against concurrent access. - ClusterName atomic.Value - - MaxGaugeCardinality int - GaugeInterval time.Duration - - // Sink is the go-metrics instance to send to. - Sink metrics.MetricSink -} - -// Convenience alias -type Label = metrics.Label - -func (m *ClusterMetricSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { - m.Sink.SetGaugeWithLabels(key, val, - append(labels, Label{"cluster", m.ClusterName.Load().(string)})) -} - -func (m *ClusterMetricSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { - m.Sink.IncrCounterWithLabels(key, val, - append(labels, Label{"cluster", m.ClusterName.Load().(string)})) -} - -func (m *ClusterMetricSink) AddSampleWithLabels(key []string, val float32, labels []Label) { - m.Sink.AddSampleWithLabels(key, val, - append(labels, Label{"cluster", m.ClusterName.Load().(string)})) -} - -func (m *ClusterMetricSink) AddDurationWithLabels(key []string, d time.Duration, labels []Label) { - val := float32(d) / float32(time.Millisecond) - m.AddSampleWithLabels(key, val, labels) -} - -func (m *ClusterMetricSink) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { - elapsed := time.Now().Sub(start) - val := float32(elapsed) / float32(time.Millisecond) - m.AddSampleWithLabels(key, val, labels) -} - -// BlackholeSink is a default suitable for use in unit tests. -func BlackholeSink() *ClusterMetricSink { - sink, _ := metrics.New(metrics.DefaultConfig(""), - &metrics.BlackholeSink{}) - cms := &ClusterMetricSink{ - ClusterName: atomic.Value{}, - Sink: sink, - } - cms.ClusterName.Store("") - return cms -} - -func NewClusterMetricSink(clusterName string, sink metrics.MetricSink) *ClusterMetricSink { - cms := &ClusterMetricSink{ - ClusterName: atomic.Value{}, - Sink: sink, - } - cms.ClusterName.Store(clusterName) - return cms -} - -// SetDefaultClusterName changes the cluster name from its default value, -// if it has not previously been configured. -func (m *ClusterMetricSink) SetDefaultClusterName(clusterName string) { - // This is not a true compare-and-swap, but it should be - // consistent enough for normal uses - if m.ClusterName.Load().(string) == "" { - m.ClusterName.Store(clusterName) - } -} diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md index 52b111d5f36e..50d56ffbf07e 100644 --- a/vendor/github.com/json-iterator/go/README.md +++ b/vendor/github.com/json-iterator/go/README.md @@ -1,5 +1,5 @@ [![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) [![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) [![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) [![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) @@ -18,16 +18,16 @@ Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/githu Raw Result (easyjson requires static code generation) -| | ns/op | allocation bytes | allocation times | -| --------------- | ----------- | ---------------- | ---------------- | -| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | -| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | -| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | -| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | -| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | -| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | -Always benchmark with your own workload. +Always benchmark with your own workload. The result depends heavily on the data input. # Usage @@ -41,10 +41,10 @@ import "encoding/json" json.Marshal(&data) ``` -with +with ```go -import jsoniter "github.com/json-iterator/go" +import "github.com/json-iterator/go" var json = jsoniter.ConfigCompatibleWithStandardLibrary json.Marshal(&data) @@ -60,7 +60,7 @@ json.Unmarshal(input, &data) with ```go -import jsoniter "github.com/json-iterator/go" +import "github.com/json-iterator/go" var json = jsoniter.ConfigCompatibleWithStandardLibrary json.Unmarshal(input, &data) @@ -78,10 +78,10 @@ go get github.com/json-iterator/go Contributors -- [thockin](https://github.com/thockin) -- [mattn](https://github.com/mattn) -- [cch123](https://github.com/cch123) -- [Oleg Shaldybin](https://github.com/olegshaldybin) -- [Jason Toffaletti](https://github.com/toffaletti) +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) +* [Oleg Shaldybin](https://github.com/olegshaldybin) +* [Jason Toffaletti](https://github.com/toffaletti) Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go index 1f12f6612de9..a4b93c78c822 100644 --- a/vendor/github.com/json-iterator/go/any_str.go +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -64,6 +64,7 @@ func (any *stringAny) ToInt64() int64 { flag := 1 startPos := 0 + endPos := 0 if any.val[0] == '+' || any.val[0] == '-' { startPos = 1 } @@ -72,7 +73,6 @@ func (any *stringAny) ToInt64() int64 { flag = -1 } - endPos := startPos for i := startPos; i < len(any.val); i++ { if any.val[i] >= '0' && any.val[i] <= '9' { endPos = i + 1 @@ -98,6 +98,7 @@ func (any *stringAny) ToUint64() uint64 { } startPos := 0 + endPos := 0 if any.val[0] == '-' { return 0 @@ -106,7 +107,6 @@ func (any *stringAny) ToUint64() uint64 { startPos = 1 } - endPos := startPos for i := startPos; i < len(any.val); i++ { if any.val[i] >= '0' && any.val[i] <= '9' { endPos = i + 1 diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go index 2adcdc3b790e..8c58fcba5922 100644 --- a/vendor/github.com/json-iterator/go/config.go +++ b/vendor/github.com/json-iterator/go/config.go @@ -183,11 +183,11 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { rawMessage := *(*json.RawMessage)(ptr) iter := cfg.BorrowIterator([]byte(rawMessage)) - defer cfg.ReturnIterator(iter) iter.Read() - if iter.Error != nil && iter.Error != io.EOF { + if iter.Error != nil { stream.WriteRaw("null") } else { + cfg.ReturnIterator(iter) stream.WriteRaw(string(rawMessage)) } }, func(ptr unsafe.Pointer) bool { diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go index 58ee89c849e7..b65137114f65 100644 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -150,7 +150,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { if c == '}' { return iter.decrementDepth() } - iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) + iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) iter.decrementDepth() return false } @@ -206,7 +206,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { if c == '}' { return iter.decrementDepth() } - iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) iter.decrementDepth() return false } diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 74a97bfe5abf..80320cd64341 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -475,7 +475,7 @@ func calcFieldNames(originalFieldName string, tagProvidedFieldName string, whole fieldNames = []string{tagProvidedFieldName} } // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' + isNotExported := unicode.IsLower(rune(originalFieldName[0])) if isNotExported { fieldNames = []string{} } diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 582967130135..9e2b623fe8d6 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -49,33 +49,6 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { return decoder } } - - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(unmarshalerType) { - return &unmarshalerDecoder{ - valType: typ, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(textUnmarshalerType) { - return &textUnmarshalerDecoder{ - valType: typ, - } - } - switch typ.Kind() { case reflect.String: return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) @@ -90,6 +63,31 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { typ = reflect2.DefaultTypeOfKind(typ.Kind()) return &numericMapKeyDecoder{decoderOfType(ctx, typ)} default: + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} } } @@ -105,19 +103,6 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { return encoder } } - - if typ == textMarshalerType { - return &directTextMarshalerEncoder{ - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Implements(textMarshalerType) { - return &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - switch typ.Kind() { case reflect.String: return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) @@ -132,6 +117,17 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { typ = reflect2.DefaultTypeOfKind(typ.Kind()) return &numericMapKeyEncoder{encoderOfType(ctx, typ)} default: + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } if typ.Kind() == reflect.Interface { return &dynamicMapKeyEncoder{ctx, typ} } @@ -167,6 +163,10 @@ func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { if c == '}' { return } + if c != '"' { + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return + } iter.unreadByte() key := decoder.keyType.UnsafeNew() decoder.keyDecoder.Decode(key, iter) diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go index fa71f4748912..43ec71d6dadf 100644 --- a/vendor/github.com/json-iterator/go/reflect_optional.go +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -2,6 +2,7 @@ package jsoniter import ( "github.com/modern-go/reflect2" + "reflect" "unsafe" ) @@ -9,6 +10,9 @@ func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { ptrType := typ.(*reflect2.UnsafePtrType) elemType := ptrType.Elem() decoder := decoderOfType(ctx, elemType) + if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { + return &dereferenceDecoder{elemType, decoder} + } return &OptionalDecoder{elemType, decoder} } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index d7eb0eb5caa8..5ad5cc561af2 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -507,7 +507,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } if c != '}' { @@ -588,7 +588,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -622,7 +622,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -660,7 +660,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -702,7 +702,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -748,7 +748,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -798,7 +798,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -852,7 +852,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -910,7 +910,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -972,7 +972,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() @@ -1038,7 +1038,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator break } } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } iter.decrementDepth() diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go index 23d8a3ad6b12..17662fdedcb5 100644 --- a/vendor/github.com/json-iterator/go/stream.go +++ b/vendor/github.com/json-iterator/go/stream.go @@ -103,14 +103,14 @@ func (stream *Stream) Flush() error { if stream.Error != nil { return stream.Error } - _, err := stream.out.Write(stream.buf) + n, err := stream.out.Write(stream.buf) if err != nil { if stream.Error == nil { stream.Error = err } return err } - stream.buf = stream.buf[:0] + stream.buf = stream.buf[n:] return nil } @@ -177,6 +177,7 @@ func (stream *Stream) WriteEmptyObject() { func (stream *Stream) WriteMore() { stream.writeByte(',') stream.writeIndention(0) + stream.Flush() } // WriteArrayStart write [ with possible indention diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml index 5c01b3a0ecad..b8599b3ac662 100644 --- a/vendor/github.com/mitchellh/cli/.travis.yml +++ b/vendor/github.com/mitchellh/cli/.travis.yml @@ -2,14 +2,10 @@ sudo: false language: go -env: - - GO111MODULE=on - go: - - "1.11" - - "1.12" - - "1.13" - - "1.14" + - "1.8" + - "1.9" + - "1.10" branches: only: diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile index 89c0a1209725..4874b0082a34 100644 --- a/vendor/github.com/mitchellh/cli/Makefile +++ b/vendor/github.com/mitchellh/cli/Makefile @@ -12,6 +12,9 @@ testrace: # updatedeps installs all the dependencies to run and build updatedeps: - go mod download + go list ./... \ + | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \ + | grep -v github.com/mitchellh/cli \ + | xargs go get -f -u -v .PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go index 45f427c648fa..c2dbe55aa07f 100644 --- a/vendor/github.com/mitchellh/cli/cli.go +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -3,7 +3,6 @@ package cli import ( "fmt" "io" - "io/ioutil" "os" "regexp" "sort" @@ -110,23 +109,18 @@ type CLI struct { AutocompleteGlobalFlags complete.Flags autocompleteInstaller autocompleteInstaller // For tests + // HelpFunc and HelpWriter are used to output help information, if + // requested. + // // HelpFunc is the function called to generate the generic help // text that is shown if help must be shown for the CLI that doesn't // pertain to a specific command. - HelpFunc HelpFunc - - // HelpWriter is used to print help text and version when requested. - // Defaults to os.Stderr for backwards compatibility. - // It is recommended that you set HelpWriter to os.Stdout, and - // ErrorWriter to os.Stderr. + // + // HelpWriter is the Writer where the help text is outputted to. If + // not specified, it will default to Stderr. + HelpFunc HelpFunc HelpWriter io.Writer - // ErrorWriter used to output errors when a command can not be run. - // Defaults to the value of HelpWriter for backwards compatibility. - // It is recommended that you set HelpWriter to os.Stdout, and - // ErrorWriter to os.Stderr. - ErrorWriter io.Writer - //--------------------------------------------------------------- // Internal fields set automatically @@ -234,7 +228,7 @@ func (c *CLI) Run() (int, error) { // implementation. If the command is invalid or blank, it is an error. raw, ok := c.commandTree.Get(c.Subcommand()) if !ok { - c.ErrorWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) return 127, nil } @@ -245,23 +239,23 @@ func (c *CLI) Run() (int, error) { // If we've been instructed to just print the help, then print it if c.IsHelp() { - c.commandHelp(c.HelpWriter, command) + c.commandHelp(command) return 0, nil } // If there is an invalid flag, then error if len(c.topFlags) > 0 { - c.ErrorWriter.Write([]byte( + c.HelpWriter.Write([]byte( "Invalid flags before the subcommand. If these flags are for\n" + "the subcommand, please put them after the subcommand.\n\n")) - c.commandHelp(c.ErrorWriter, command) + c.commandHelp(command) return 1, nil } code := command.Run(c.SubcommandArgs()) if code == RunResultHelp { // Requesting help - c.commandHelp(c.ErrorWriter, command) + c.commandHelp(command) return 1, nil } @@ -316,9 +310,6 @@ func (c *CLI) init() { if c.HelpWriter == nil { c.HelpWriter = os.Stderr } - if c.ErrorWriter == nil { - c.ErrorWriter = c.HelpWriter - } // Build our hidden commands if len(c.HiddenCommands) > 0 { @@ -404,16 +395,6 @@ func (c *CLI) initAutocomplete() { c.autocompleteInstaller = &realAutocompleteInstaller{} } - // We first set c.autocomplete to a noop autocompleter that outputs - // to nul so that we can detect if we're autocompleting or not. If we're - // not, then we do nothing. This saves a LOT of compute cycles since - // initAutoCompleteSub has to walk every command. - c.autocomplete = complete.New(c.Name, complete.Command{}) - c.autocomplete.Out = ioutil.Discard - if !c.autocomplete.Complete() { - return - } - // Build the root command cmd := c.initAutocompleteSub("") @@ -423,8 +404,8 @@ func (c *CLI) initAutocomplete() { cmd.Flags = map[string]complete.Predictor{ "-" + c.AutocompleteInstall: complete.PredictNothing, "-" + c.AutocompleteUninstall: complete.PredictNothing, - "-help": complete.PredictNothing, - "-version": complete.PredictNothing, + "-help": complete.PredictNothing, + "-version": complete.PredictNothing, } } cmd.GlobalFlags = c.AutocompleteGlobalFlags @@ -502,7 +483,7 @@ func (c *CLI) initAutocompleteSub(prefix string) complete.Command { return cmd } -func (c *CLI) commandHelp(out io.Writer, command Command) { +func (c *CLI) commandHelp(command Command) { // Get the template to use tpl := strings.TrimSpace(defaultHelpTemplate) if t, ok := command.(CommandHelpTemplate); ok { @@ -552,12 +533,12 @@ func (c *CLI) commandHelp(out io.Writer, command Command) { // Get the command raw, ok := subcommands[k] if !ok { - c.ErrorWriter.Write([]byte(fmt.Sprintf( + c.HelpWriter.Write([]byte(fmt.Sprintf( "Error getting subcommand %q", k))) } sub, err := raw() if err != nil { - c.ErrorWriter.Write([]byte(fmt.Sprintf( + c.HelpWriter.Write([]byte(fmt.Sprintf( "Error instantiating %q: %s", k, err))) } @@ -578,13 +559,13 @@ func (c *CLI) commandHelp(out io.Writer, command Command) { data["Subcommands"] = subcommandsTpl // Write - err = t.Execute(out, data) + err = t.Execute(c.HelpWriter, data) if err == nil { return } // An error, just output... - c.ErrorWriter.Write([]byte(fmt.Sprintf( + c.HelpWriter.Write([]byte(fmt.Sprintf( "Internal error rendering help: %s", err))) } diff --git a/vendor/github.com/mitchellh/cli/go.mod b/vendor/github.com/mitchellh/cli/go.mod index bd6c53d355b1..675325ffa047 100644 --- a/vendor/github.com/mitchellh/cli/go.mod +++ b/vendor/github.com/mitchellh/cli/go.mod @@ -1,7 +1,5 @@ module github.com/mitchellh/cli -go 1.11 - require ( github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 github.com/bgentry/speakeasy v0.1.0 diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go index 935f28a4a6bc..0bfe0a19121c 100644 --- a/vendor/github.com/mitchellh/cli/ui_mock.go +++ b/vendor/github.com/mitchellh/cli/ui_mock.go @@ -1,11 +1,9 @@ package cli import ( - "bufio" "bytes" "fmt" "io" - "strings" "sync" ) @@ -37,12 +35,9 @@ func (u *MockUi) Ask(query string) (string, error) { var result string fmt.Fprint(u.OutputWriter, query) - r := bufio.NewReader(u.InputReader) - line, err := r.ReadString('\n') - if err != nil { + if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { return "", err } - result = strings.TrimRight(line, "\r\n") return result, nil } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 0e1b48c03f14..a1877badbd49 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -180,7 +180,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { if len(lvs) != len(desc.variableLabels) { panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } - result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now} + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. return result }), @@ -309,8 +309,6 @@ type CounterFunc interface { // provided function must be concurrency-safe. The function should also honor // the contract for a Counter (values only go up, not down), but compliance will // not be checked. -// -// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 2f19f5e1e7ea..e3232d79f44b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -20,7 +20,6 @@ import ( "strings" "github.com/cespare/xxhash/v2" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 98450125d6a3..01977de66140 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -84,21 +84,25 @@ // of those four metric types can be found in the Prometheus docs: // https://prometheus.io/docs/concepts/metric_types/ // -// In addition to the fundamental metric types Gauge, Counter, Summary, and -// Histogram, a very important part of the Prometheus data model is the -// partitioning of samples along dimensions called labels, which results in +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// and HistogramVec. +// HistogramVec, and UntypedVec. // // While only the fundamental metric types implement the Metric interface, both // the metrics and their vector versions implement the Collector interface. A // Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and -// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, -// and HistogramVec are not. +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. // // To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. // // Custom Collectors and constant Metrics // @@ -114,16 +118,13 @@ // existing numbers into Prometheus Metrics during collection. An own // implementation of the Collector interface is perfect for that. You can create // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). NewConstMetric is used -// for all metric types with just a float64 as their value: Counter, Gauge, and -// a special “type” called Untyped. Use the latter if you are not sure if the -// mirrored metric is a Counter or a Gauge. Creation of the Metric instance -// happens in the Collect method. The Describe method has to return separate -// Desc instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. Alternatively, -// you could return no Desc at all, which will mark the Collector “unchecked”. -// No checks are performed at registration time, but metric consistency will -// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. Alternatively, you +// could return no Desc at all, which will mark the Collector “unchecked”. No +// checks are performed at registration time, but metric consistency will still +// be ensured at scrape time, i.e. any inconsistencies will lead to scrape // errors. Thus, with unchecked Collectors, the responsibility to not collect // metrics that lead to inconsistencies in the total scrape result lies with the // implementer of the Collector. While this is not a desirable state, it is diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index d4ea301a33ce..4271f438aeaa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -22,7 +22,6 @@ import ( "sync/atomic" "time" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" @@ -607,7 +606,7 @@ func NewConstHistogram( } // MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstHistogram would have returned an error. +// NewConstMetric would have returned an error. func MustNewConstHistogram( desc *Desc, count uint64, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 35bd8bde34c7..0df1eff8814f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -17,7 +17,6 @@ import ( "strings" "time" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go index f973398df2d4..e0b935d1fef7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -33,22 +33,18 @@ var ( ) type processMemoryCounters struct { - // System interface description - // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex - - // Refer to the Golang internal implementation - // https://golang.org/src/internal/syscall/windows/psapi_windows.go + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex _ uint32 PageFaultCount uint32 - PeakWorkingSetSize uintptr - WorkingSetSize uintptr - QuotaPeakPagedPoolUsage uintptr - QuotaPagedPoolUsage uintptr - QuotaPeakNonPagedPoolUsage uintptr - QuotaNonPagedPoolUsage uintptr - PagefileUsage uintptr - PeakPagefileUsage uintptr - PrivateUsage uintptr + PeakWorkingSetSize uint64 + WorkingSetSize uint64 + QuotaPeakPagedPoolUsage uint64 + QuotaPagedPoolUsage uint64 + QuotaPeakNonPagedPoolUsage uint64 + QuotaNonPagedPoolUsage uint64 + PagefileUsage uint64 + PeakPagefileUsage uint64 + PrivateUsage uint64 } func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go index c1a6cb99fcbb..77ce9c83734f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go @@ -37,7 +37,6 @@ package push import ( "bytes" "encoding/base64" - "errors" "fmt" "io/ioutil" "net/http" @@ -57,8 +56,6 @@ const ( base64Suffix = "@base64" ) -var errJobEmpty = errors.New("job name is empty") - // HTTPDoer is an interface for the one method of http.Client that is used by Pusher type HTTPDoer interface { Do(*http.Request) (*http.Response, error) @@ -83,17 +80,14 @@ type Pusher struct { } // New creates a new Pusher to push to the provided URL with the provided job -// name (which must not be empty). You can use just host:port or ip:port as url, -// in which case “http://” is added automatically. Alternatively, include the -// schema in the URL. However, do not include the “/metrics/jobs/…” part. +// name. You can use just host:port or ip:port as url, in which case “http://” +// is added automatically. Alternatively, include the schema in the +// URL. However, do not include the “/metrics/jobs/…” part. func New(url, job string) *Pusher { var ( reg = prometheus.NewRegistry() err error ) - if job == "" { - err = errJobEmpty - } if !strings.Contains(url, "://") { url = "http://" + url } @@ -273,7 +267,7 @@ func (p *Pusher) push(method string) error { return err } defer resp.Body.Close() - // Depending on version and configuration of the PGW, StatusOK or StatusAccepted may be returned. + // Pushgateway 0.10+ responds with StatusOK, earlier versions with StatusAccepted. if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, p.fullURL(), body) @@ -284,11 +278,9 @@ func (p *Pusher) push(method string) error { // fullURL assembles the URL used to push/delete metrics and returns it as a // string. The job name and any grouping label values containing a '/' will // trigger a base64 encoding of the affected component and proper suffixing of -// the preceding component. Similarly, an empty grouping label value will be -// encoded as base64 just with a single `=` padding character (to avoid an empty -// path component). If the component does not contain a '/' but other special -// characters, the usual url.QueryEscape is used for compatibility with older -// versions of the Pushgateway and for better readability. +// the preceding component. If the component does not contain a '/' but other +// special character, the usual url.QueryEscape is used for compatibility with +// older versions of the Pushgateway and for better readability. func (p *Pusher) fullURL() string { urlComponents := []string{} if encodedJob, base64 := encodeComponent(p.job); base64 { @@ -307,12 +299,9 @@ func (p *Pusher) fullURL() string { } // encodeComponent encodes the provided string with base64.RawURLEncoding in -// case it contains '/' and as "=" in case it is empty. If neither is the case, -// it uses url.QueryEscape instead. It returns true in the former two cases. +// case it contains '/'. If not, it uses url.QueryEscape instead. It returns +// true in the former case. func encodeComponent(s string) (string, bool) { - if s == "" { - return "=", true - } if strings.Contains(s, "/") { return base64.RawURLEncoding.EncodeToString([]byte(s)), true } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index ba94405af4ca..c05d6ee1b385 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -26,7 +26,6 @@ import ( "unicode/utf8" "github.com/cespare/xxhash/v2" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/expfmt" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index f3c1440d1c65..ae42e761a19a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -23,7 +23,6 @@ import ( "time" "github.com/beorn7/perks/quantile" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index 6206928cc67c..3dcb4fde2633 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -19,7 +19,6 @@ import ( "time" "unicode/utf8" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" @@ -29,8 +28,7 @@ import ( // ValueType is an enumeration of metric types that represent a simple value. type ValueType int -// Possible values for the ValueType enum. Use UntypedValue to mark a metric -// with an unknown type. +// Possible values for the ValueType enum. const ( _ ValueType = iota CounterValue diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index d53848dc4810..19df3fe6b12c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -91,18 +91,6 @@ func (m *metricVec) Delete(labels Labels) bool { return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) } -// Without explicit forwarding of Describe, Collect, Reset, those methods won't -// show up in GoDoc. - -// Describe implements Collector. -func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } - -// Collect implements Collector. -func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } - -// Reset deletes all metrics in this vector. -func (m *metricVec) Reset() { m.metricMap.Reset() } - func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { var ( newCurry []curriedLabelValue diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 438aa5e9247d..e303eef6d33b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,7 +17,6 @@ import ( "fmt" "sort" - //lint:ignore SA1019 Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" @@ -28,8 +27,7 @@ import ( // registered with the wrapped Registerer in a modified way. The modified // Collector adds the provided Labels to all Metrics it collects (as // ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. Wrapping a nil value is valid, resulting -// in a no-op Registerer. +// duplicate any of those labels. // // WrapRegistererWith provides a way to add fixed labels to a subset of // Collectors. It should not be used to add fixed labels to all metrics exposed. @@ -52,7 +50,6 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer { // Registerer. Collectors registered with the returned Registerer will be // registered with the wrapped Registerer in a modified way. The modified // Collector adds the provided prefix to the name of all Metrics it collects. -// Wrapping a nil value is valid, resulting in a no-op Registerer. // // WrapRegistererWithPrefix is useful to have one place to prefix all metrics of // a sub-system. To make this work, register metrics of the sub-system with the @@ -83,9 +80,6 @@ type wrappingRegisterer struct { } func (r *wrappingRegisterer) Register(c Collector) error { - if r.wrappedRegisterer == nil { - return nil - } return r.wrappedRegisterer.Register(&wrappingCollector{ wrappedCollector: c, prefix: r.prefix, @@ -94,9 +88,6 @@ func (r *wrappingRegisterer) Register(c Collector) error { } func (r *wrappingRegisterer) MustRegister(cs ...Collector) { - if r.wrappedRegisterer == nil { - return - } for _, c := range cs { if err := r.Register(c); err != nil { panic(err) @@ -105,9 +96,6 @@ func (r *wrappingRegisterer) MustRegister(cs ...Collector) { } func (r *wrappingRegisterer) Unregister(c Collector) bool { - if r.wrappedRegisterer == nil { - return false - } return r.wrappedRegisterer.Unregister(&wrappingCollector{ wrappedCollector: c, prefix: r.prefix, diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7657f841d632..c092723e84a4 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -164,7 +164,7 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error { } // ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurs during sample extraction, it continues to +// families. If an error occurrs during sample extraction, it continues to // extract from the remaining metric families. The returned error is the last // error that has occurred. func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go index 367afecd30e6..038fc1c9003c 100644 --- a/vendor/github.com/prometheus/common/model/fnv.go +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -20,7 +20,7 @@ const ( prime64 = 1099511628211 ) -// hashNew initializes a new fnv64a hash value. +// hashNew initializies a new fnv64a hash value. func hashNew() uint64 { return offset64 } diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index c40e6403ca80..7b0064fdb232 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -181,77 +181,73 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { - case "0": - // Allow 0 without a unit. - return 0, nil - case "": - return 0, fmt.Errorf("empty duration string") - } matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { + if len(matches) != 3 { return 0, fmt.Errorf("not a valid duration string: %q", durationStr) } - var dur time.Duration - - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return - } - n, _ := strconv.Atoi(matches[pos]) - d := time.Duration(n) * time.Millisecond - dur += d * mult + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - return Duration(dur), nil } func (d Duration) String() string { var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" ) if ms == 0 { return "0s" } - - f := func(unit string, mult int64, exact bool) { - if exact && ms%mult != 0 { - return - } - if v := ms / mult; v > 0 { - r += fmt.Sprintf("%d%s", v, unit) - ms -= v * mult - } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, } - // Only format years and weeks if the remainder is zero, as it is often - // easier to read 90d than 12w6d. - f("y", 1000*60*60*24*365, true) - f("w", 1000*60*60*24*7, true) - - f("d", 1000*60*60*24, false) - f("h", 1000*60*60, false) - f("m", 1000*60, false) - f("s", 1000, false) - f("ms", 1, false) - - return r + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) } // MarshalYAML implements the yaml.Marshaler interface. diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 0aa09edacb38..7c4ce1fa848a 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,4 +1,4 @@ ---- linters: enable: - - golint + - staticcheck + - govet diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 9320176ca24f..d7aea1b86f23 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -69,21 +69,12 @@ else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif -GOTEST := $(GO) test -GOTEST_DIR := -ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) - GOTEST_DIR := test-results - GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- -endif -endif - -PROMU_VERSION ?= 0.5.0 +PROMU_VERSION ?= 0.4.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.18.0 +GOLANGCI_LINT_VERSION ?= v1.16.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -95,8 +86,7 @@ endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./Dockerfile -DOCKERBUILD_CONTEXT ?= ./ +DOCKERFILE_PATH ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 @@ -150,29 +140,15 @@ else $(GO) get $(GOOPTS) -t ./... endif -.PHONY: update-go-deps -update-go-deps: - @echo ">> updating Go dependencies" - @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get $$m; \ - done - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifneq (,$(wildcard vendor)) - GO111MODULE=$(GO111MODULE) $(GO) mod vendor -endif - .PHONY: common-test-short -common-test-short: $(GOTEST_DIR) +common-test-short: @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) .PHONY: common-test -common-test: $(GOTEST_DIR) +common-test: @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) - -$(GOTEST_DIR): - @mkdir -p $@ + GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) .PHONY: common-format common-format: @@ -224,7 +200,7 @@ endif .PHONY: common-build common-build: promu @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) .PHONY: common-tarball common-tarball: promu @@ -235,10 +211,9 @@ common-tarball: promu common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ - -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ - $(DOCKERBUILD_CONTEXT) + $(DOCKERFILE_PATH) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 31d42f7124c3..2e02215528f0 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -11,15 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux - package procfs import ( "bufio" "bytes" - "errors" - "regexp" "strconv" "strings" @@ -56,11 +52,6 @@ type CPUInfo struct { PowerManagement string } -var ( - cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) - cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) -) - // CPUInfo returns information about current system CPUs. // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt func (fs FS) CPUInfo() ([]CPUInfo, error) { @@ -71,26 +62,14 @@ func (fs FS) CPUInfo() ([]CPUInfo, error) { return parseCPUInfo(data) } -func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { +// parseCPUInfo parses data from /proc/cpuinfo +func parseCPUInfo(info []byte) ([]CPUInfo, error) { + cpuinfo := []CPUInfo{} + i := -1 scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - for scanner.Scan() { line := scanner.Text() - if !strings.Contains(line, ":") { + if strings.TrimSpace(line) == "" { continue } field := strings.SplitN(line, ": ", 2) @@ -103,7 +82,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { return nil, err } cpuinfo[i].Processor = uint(v) - case "vendor", "vendor_id": + case "vendor_id": cpuinfo[i].VendorID = field[1] case "cpu family": cpuinfo[i].CPUFamily = field[1] @@ -184,237 +163,5 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { } } return cpuinfo, nil -} - -func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) - if !match || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - featuresLine := "" - commonCPUInfo := CPUInfo{} - i := 0 - if strings.TrimSpace(field[0]) == "Processor" { - commonCPUInfo = CPUInfo{ModelName: field[1]} - i = -1 - } else { - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo = []CPUInfo{firstcpu} - } - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "BogoMIPS": - if i == -1 { - cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor - i++ - cpuinfo[i].Processor = 0 - } - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "Features": - featuresLine = line - case "model name": - cpuinfo[i].ModelName = field[1] - } - } - fields := strings.SplitN(featuresLine, ": ", 2) - for i := range cpuinfo { - cpuinfo[i].Flags = strings.Fields(fields[1]) - } - return cpuinfo, nil - -} - -func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - commonCPUInfo := CPUInfo{VendorID: field[1]} - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "bogomips per cpu": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - commonCPUInfo.BogoMips = v - case "features": - commonCPUInfo.Flags = strings.Fields(field[1]) - } - if strings.HasPrefix(line, "processor") { - match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) - if len(match) < 2 { - return nil, errors.New("Invalid line found in cpuinfo: " + line) - } - cpu := commonCPUInfo - v, err := strconv.ParseUint(match[1], 0, 32) - if err != nil { - return nil, err - } - cpu.Processor = uint(v) - cpuinfo = append(cpuinfo, cpu) - } - if strings.HasPrefix(line, "cpu number") { - break - } - } - - i := 0 - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "cpu number": - i++ - case "cpu MHz dynamic": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - } - } - - return cpuinfo, nil -} - -func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - systemType := field[1] - - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - cpuinfo[i].VendorID = systemType - case "cpu model": - cpuinfo[i].ModelName = field[1] - case "BogoMIPS": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - } - } - return cpuinfo, nil -} - -func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "cpu": - cpuinfo[i].VendorID = field[1] - case "clock": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - } - } - return cpuinfo, nil -} -// firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line -func firstNonEmptyLine(scanner *bufio.Scanner) string { - for scanner.Scan() { - line := scanner.Text() - if strings.TrimSpace(line) != "" { - return line - } - } - return "" } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go b/vendor/github.com/prometheus/procfs/cpuinfo_arm.go deleted file mode 100644 index 83555077069a..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_arm.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go b/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go deleted file mode 100644 index 4f5d172a3565..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_arm64.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux -// +build arm64 - -package procfs - -var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_default.go b/vendor/github.com/prometheus/procfs/cpuinfo_default.go deleted file mode 100644 index d5bedf97f31c..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_default.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux -// +build 386 amd64 - -package procfs - -var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips.go deleted file mode 100644 index 22d93f8ef0c2..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mips.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go deleted file mode 100644 index 22d93f8ef0c2..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mips64.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go deleted file mode 100644 index 22d93f8ef0c2..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go deleted file mode 100644 index 22d93f8ef0c2..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go deleted file mode 100644 index 64aee9c63c07..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go deleted file mode 100644 index 64aee9c63c07..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go deleted file mode 100644 index 26814eebaaf3..000000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index a9589337577b..19d4041b29a9 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -14,10 +14,10 @@ package procfs import ( - "bufio" "bytes" "fmt" - "io" + "io/ioutil" + "strconv" "strings" "github.com/prometheus/procfs/internal/util" @@ -52,102 +52,80 @@ type Crypto struct { // structs containing the relevant info. More information available here: // https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html func (fs FS) Crypto() ([]Crypto, error) { - path := fs.proc.Path("crypto") - b, err := util.ReadFileNoStat(path) + data, err := ioutil.ReadFile(fs.proc.Path("crypto")) if err != nil { - return nil, fmt.Errorf("error reading crypto %s: %s", path, err) + return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) } - - crypto, err := parseCrypto(bytes.NewReader(b)) + crypto, err := parseCrypto(data) if err != nil { - return nil, fmt.Errorf("error parsing crypto %s: %s", path, err) + return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) } - return crypto, nil } -// parseCrypto parses a /proc/crypto stream into Crypto elements. -func parseCrypto(r io.Reader) ([]Crypto, error) { - var out []Crypto - - s := bufio.NewScanner(r) - for s.Scan() { - text := s.Text() - switch { - case strings.HasPrefix(text, "name"): - // Each crypto element begins with its name. - out = append(out, Crypto{}) - case text == "": - continue - } - - kv := strings.Split(text, ":") - if len(kv) != 2 { - return nil, fmt.Errorf("malformed crypto line: %q", text) - } - - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) - - // Parse the key/value pair into the currently focused element. - c := &out[len(out)-1] - if err := c.parseKV(k, v); err != nil { - return nil, err +func parseCrypto(cryptoData []byte) ([]Crypto, error) { + crypto := []Crypto{} + + cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n")) + + for _, block := range cryptoBlocks { + var newCryptoElem Crypto + + lines := strings.Split(string(block), "\n") + for _, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' { + continue + } + fields := strings.Split(line, ":") + key := strings.TrimSpace(fields[0]) + value := strings.TrimSpace(fields[1]) + vp := util.NewValueParser(value) + + switch strings.TrimSpace(key) { + case "async": + b, err := strconv.ParseBool(value) + if err == nil { + newCryptoElem.Async = b + } + case "blocksize": + newCryptoElem.Blocksize = vp.PUInt64() + case "chunksize": + newCryptoElem.Chunksize = vp.PUInt64() + case "digestsize": + newCryptoElem.Digestsize = vp.PUInt64() + case "driver": + newCryptoElem.Driver = value + case "geniv": + newCryptoElem.Geniv = value + case "internal": + newCryptoElem.Internal = value + case "ivsize": + newCryptoElem.Ivsize = vp.PUInt64() + case "maxauthsize": + newCryptoElem.Maxauthsize = vp.PUInt64() + case "max keysize": + newCryptoElem.MaxKeysize = vp.PUInt64() + case "min keysize": + newCryptoElem.MinKeysize = vp.PUInt64() + case "module": + newCryptoElem.Module = value + case "name": + newCryptoElem.Name = value + case "priority": + newCryptoElem.Priority = vp.PInt64() + case "refcnt": + newCryptoElem.Refcnt = vp.PInt64() + case "seedsize": + newCryptoElem.Seedsize = vp.PUInt64() + case "selftest": + newCryptoElem.Selftest = value + case "type": + newCryptoElem.Type = value + case "walksize": + newCryptoElem.Walksize = vp.PUInt64() + } } + crypto = append(crypto, newCryptoElem) } - - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -// parseKV parses a key/value pair into the appropriate field of c. -func (c *Crypto) parseKV(k, v string) error { - vp := util.NewValueParser(v) - - switch k { - case "async": - // Interpret literal yes as true. - c.Async = v == "yes" - case "blocksize": - c.Blocksize = vp.PUInt64() - case "chunksize": - c.Chunksize = vp.PUInt64() - case "digestsize": - c.Digestsize = vp.PUInt64() - case "driver": - c.Driver = v - case "geniv": - c.Geniv = v - case "internal": - c.Internal = v - case "ivsize": - c.Ivsize = vp.PUInt64() - case "maxauthsize": - c.Maxauthsize = vp.PUInt64() - case "max keysize": - c.MaxKeysize = vp.PUInt64() - case "min keysize": - c.MinKeysize = vp.PUInt64() - case "module": - c.Module = v - case "name": - c.Name = v - case "priority": - c.Priority = vp.PInt64() - case "refcnt": - c.Refcnt = vp.PInt64() - case "seedsize": - c.Seedsize = vp.PUInt64() - case "selftest": - c.Selftest = v - case "type": - c.Type = v - case "walksize": - c.Walksize = vp.PUInt64() - } - - return vp.Err() + return crypto, nil } diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar index 868c8573d925..c50a18ace4d6 100644 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -173,283 +173,6 @@ Lines: 1 411605849 93680043 79 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps -Lines: 252 -00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager -Size: 8900 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2952 kB -Pss: 2952 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 2952 kB -Private_Dirty: 0 kB -Referenced: 2864 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me dw sd -00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager -Size: 10236 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 6152 kB -Pss: 6152 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 6152 kB -Private_Dirty: 0 kB -Referenced: 5308 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr mw me dw sd -016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager -Size: 424 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 176 kB -Pss: 176 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 84 kB -Private_Dirty: 92 kB -Referenced: 176 kB -Anonymous: 92 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 12 kB -SwapPss: 12 kB -Locked: 0 kB -VmFlags: rd wr mr mw me dw ac sd -0171a000-0173f000 rw-p 00000000 00:00 0 -Size: 148 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 76 kB -Pss: 76 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 76 kB -Referenced: 76 kB -Anonymous: 76 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000000000-c000400000 rw-p 00000000 00:00 0 -Size: 4096 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2564 kB -Pss: 2564 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 20 kB -Private_Dirty: 2544 kB -Referenced: 2544 kB -Anonymous: 2564 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1100 kB -SwapPss: 1100 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000400000-c001600000 rw-p 00000000 00:00 0 -Size: 18432 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 16024 kB -Pss: 16024 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 5864 kB -Private_Dirty: 10160 kB -Referenced: 11944 kB -Anonymous: 16024 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 440 kB -SwapPss: 440 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd nh -c001600000-c004000000 rw-p 00000000 00:00 0 -Size: 43008 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 -Size: 38596 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 1992 kB -Pss: 1992 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 476 kB -Private_Dirty: 1516 kB -Referenced: 1828 kB -Anonymous: 1992 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 384 kB -SwapPss: 384 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] -Size: 132 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 8 kB -Pss: 8 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 8 kB -Referenced: 8 kB -Anonymous: 8 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 4 kB -SwapPss: 4 kB -Locked: 0 kB -VmFlags: rd wr mr mw me gd ac -7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] -Size: 12 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr pf io de dd sd -7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] -Size: 8 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 4 kB -Pss: 0 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 4 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me de sd -ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] -Size: 4 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps_rollup -Lines: 17 -00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] -Rss: 29948 kB -Pss: 29944 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 15548 kB -Private_Dirty: 14396 kB -Referenced: 24752 kB -Anonymous: 20756 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1940 kB -SwapPss: 1940 kB -Locked: 0 kB -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/26231/stat Lines: 1 26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 @@ -466,7 +189,7 @@ Ngid: 0 Pid: 26231 PPid: 1 TracerPid: 0 -Uid: 1000 1000 1000 0 +Uid: 0 0 0 0 Gid: 0 0 0 0 FDSize: 128 Groups: @@ -512,11 +235,6 @@ voluntary_ctxt_switches: 4742839 nonvoluntary_ctxt_switches: 1727500 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/wchan -Lines: 1 -poll_schedule_timeoutEOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/26232 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -571,19 +289,6 @@ Max realtime priority 0 0 Max realtime timeout unlimited unlimited us Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/maps -Lines: 9 -55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat -55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat -55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] -7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive -7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so -7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] -7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] -7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] -ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/26232/root SymlinkTo: /does/not/exist # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -592,11 +297,6 @@ Lines: 1 33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/wchan -Lines: 1 -0EOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/26233 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -617,17 +317,6 @@ Lines: 8 || || Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26234 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26234/maps -Lines: 4 -08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh -08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh -0808c000-08146000 rwxp 00000000 00:00 0 -40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/584 Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -865,7 +554,7 @@ power management: Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/crypto -Lines: 972 +Lines: 971 name : ccm(aes) driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) module : ccm @@ -899,7 +588,6 @@ refcnt : 1 selftest : passed internal : no type : kpp -async : yes name : ecb(arc4) driver : ecb(arc4)-generic @@ -1841,7 +1529,7 @@ max keysize : 32 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/diskstats -Lines: 52 +Lines: 49 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 @@ -1891,45 +1579,11 @@ Lines: 52 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 - 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 - 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 - 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/fs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/fscache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/fscache/stats -Lines: 24 -FS-Cache statistics -Cookies: idx=3 dat=67877 spc=0 -Objects: alc=67473 nal=0 avl=67473 ded=388 -ChkAux : non=12 ok=33 upd=44 obs=55 -Pages : mrk=547164 unc=364577 -Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 -Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 -Invals : n=14 run=13 -Updates: n=7 nul=3 run=8 -Relinqs: n=394 nul=1 wcr=2 rtr=3 -AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 -Allocs : n=20 ok=19 wt=18 nbf=17 int=16 -Allocs : ops=15 owt=14 abt=13 -Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 -Retrvls: ops=151959 owt=42747 abt=44 -Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 -Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 -VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 -Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 -Ops : ini=377538 dfr=27 rel=377538 gc=37 -CacheOp: alo=1 luo=2 luc=3 gro=4 -CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 -CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 -CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/fs/xfs Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1960,11 +1614,6 @@ xpc 399724544 92823103 86219234 debug 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/loadavg -Lines: 1 -0.02 0.04 0.05 1/497 11947 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/mdstat Lines: 56 Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] @@ -2172,35 +1821,8 @@ FRAG6: inuse 0 memory 0 Mode: 444 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/net/softnet_stat -Lines: 2 -00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 -01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat.broken Lines: 1 -00015c73 00020e76 F0000769 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp_broken -Lines: 2 - sl local_address rem_address st - 1: 00000000:0016 00000000:0000 0A +00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/proc/net/unix @@ -2308,12 +1930,6 @@ procs_blocked 1 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/swaps -Lines: 2 -Filename Type Size Used Priority -/dev/dm-2 partition 131068 176 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/symlinktargets Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2346,32 +1962,6 @@ Mode: 644 Directory: fixtures/proc/sys Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel/random -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/entropy_avail -Lines: 1 -3943 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/poolsize -Lines: 1 -4096 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold -Lines: 1 -3072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/proc/sys/vm Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -2873,237 +2463,6 @@ Mode: 664 Directory: fixtures/sys/block/sda Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/add_random -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/chunk_sectors -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/dax -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_granularity -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_zeroes_data -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/fua -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/hw_sector_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll_delay -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_timeout -Lines: 1 -30000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue/iosched -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_max -Lines: 1 -16384 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async -Lines: 1 -250 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/low_latency -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/max_budget -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle -Lines: 1 -8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us -Lines: 1 -8000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/timeout_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iostats -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/logical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_discard_segments -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb -Lines: 1 -32767 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_integrity_segments -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_sectors_kb -Lines: 1 -1280 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segment_size -Lines: 1 -65536 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segments -Lines: 1 -168 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/minimum_io_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nomerges -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_requests -Lines: 1 -64 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_zones -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/optimal_io_size -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/physical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/read_ahead_kb -Lines: 1 -128 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rotational -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rq_affinity -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/scheduler -Lines: 1 -mq-deadline kyber [bfq] none -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/wbt_lat_usec -Lines: 1 -75000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_cache -Lines: 1 -write back -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_same_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/zoned -Lines: 1 -none -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/block/sda/stat Lines: 1 9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 @@ -3112,140 +2471,6 @@ Mode: 664 Directory: fixtures/sys/class Mode: 775 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo -Lines: 1 -30 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/fabric_name -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/node_name -Lines: 1 -0x2000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_id -Lines: 1 -0x000002 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_name -Lines: 1 -0x1000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_state -Lines: 1 -Online -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_type -Lines: 1 -Point-To-Point (direct nport connection) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/speed -Lines: 1 -16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames -Lines: 1 -0xffffffffffffffff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/error_frames -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts -Lines: 1 -0x13 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count -Lines: 1 -0x2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count -Lines: 1 -0x8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count -Lines: 1 -0x9 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count -Lines: 1 -0x11 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count -Lines: 1 -0x10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/nos_count -Lines: 1 -0x12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames -Lines: 1 -0x3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_words -Lines: 1 -0x4 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset -Lines: 1 -0x7 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames -Lines: 1 -0x5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_words -Lines: 1 -0x6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_classes -Lines: 1 -Class 3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_speeds -Lines: 1 -4 Gbit, 8 Gbit, 16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/symbolic_name -Lines: 1 -Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Directory: fixtures/sys/class/infiniband Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -3276,11 +2501,6 @@ Mode: 755 Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors Lines: 1 0 @@ -3382,11 +2602,6 @@ Mode: 755 Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters Mode: 755 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors Lines: 1 0 @@ -3831,7 +3046,7 @@ Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/thermal/thermal_zone1/temp Lines: 1 --44000 +44000 Mode: 664 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/class/thermal/thermal_zone1/type @@ -5009,17 +4224,6 @@ Lines: 1 0 Mode: 644 # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug -Lines: 7 -rate: 1.1M/sec -dirty: 20.4G -target: 20.4G -proportional: 427.5k -integral: 790.0k -change: 321.5k/sec -next io: 17ms -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size Lines: 1 0 diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go deleted file mode 100644 index 8783cf3cc18c..000000000000 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Fscacheinfo represents fscache statistics. -type Fscacheinfo struct { - // Number of index cookies allocated - IndexCookiesAllocated uint64 - // data storage cookies allocated - DataStorageCookiesAllocated uint64 - // Number of special cookies allocated - SpecialCookiesAllocated uint64 - // Number of objects allocated - ObjectsAllocated uint64 - // Number of object allocation failures - ObjectAllocationsFailure uint64 - // Number of objects that reached the available state - ObjectsAvailable uint64 - // Number of objects that reached the dead state - ObjectsDead uint64 - // Number of objects that didn't have a coherency check - ObjectsWithoutCoherencyCheck uint64 - // Number of objects that passed a coherency check - ObjectsWithCoherencyCheck uint64 - // Number of objects that needed a coherency data update - ObjectsNeedCoherencyCheckUpdate uint64 - // Number of objects that were declared obsolete - ObjectsDeclaredObsolete uint64 - // Number of pages marked as being cached - PagesMarkedAsBeingCached uint64 - // Number of uncache page requests seen - UncachePagesRequestSeen uint64 - // Number of acquire cookie requests seen - AcquireCookiesRequestSeen uint64 - // Number of acq reqs given a NULL parent - AcquireRequestsWithNullParent uint64 - // Number of acq reqs rejected due to no cache available - AcquireRequestsRejectedNoCacheAvailable uint64 - // Number of acq reqs succeeded - AcquireRequestsSucceeded uint64 - // Number of acq reqs rejected due to error - AcquireRequestsRejectedDueToError uint64 - // Number of acq reqs failed on ENOMEM - AcquireRequestsFailedDueToEnomem uint64 - // Number of lookup calls made on cache backends - LookupsNumber uint64 - // Number of negative lookups made - LookupsNegative uint64 - // Number of positive lookups made - LookupsPositive uint64 - // Number of objects created by lookup - ObjectsCreatedByLookup uint64 - // Number of lookups timed out and requeued - LookupsTimedOutAndRequed uint64 - InvalidationsNumber uint64 - InvalidationsRunning uint64 - // Number of update cookie requests seen - UpdateCookieRequestSeen uint64 - // Number of upd reqs given a NULL parent - UpdateRequestsWithNullParent uint64 - // Number of upd reqs granted CPU time - UpdateRequestsRunning uint64 - // Number of relinquish cookie requests seen - RelinquishCookiesRequestSeen uint64 - // Number of rlq reqs given a NULL parent - RelinquishCookiesWithNullParent uint64 - // Number of rlq reqs waited on completion of creation - RelinquishRequestsWaitingCompleteCreation uint64 - // Relinqs rtr - RelinquishRetries uint64 - // Number of attribute changed requests seen - AttributeChangedRequestsSeen uint64 - // Number of attr changed requests queued - AttributeChangedRequestsQueued uint64 - // Number of attr changed rejected -ENOBUFS - AttributeChangedRejectDueToEnobufs uint64 - // Number of attr changed failed -ENOMEM - AttributeChangedFailedDueToEnomem uint64 - // Number of attr changed ops given CPU time - AttributeChangedOps uint64 - // Number of allocation requests seen - AllocationRequestsSeen uint64 - // Number of successful alloc reqs - AllocationOkRequests uint64 - // Number of alloc reqs that waited on lookup completion - AllocationWaitingOnLookup uint64 - // Number of alloc reqs rejected -ENOBUFS - AllocationsRejectedDueToEnobufs uint64 - // Number of alloc reqs aborted -ERESTARTSYS - AllocationsAbortedDueToErestartsys uint64 - // Number of alloc reqs submitted - AllocationOperationsSubmitted uint64 - // Number of alloc reqs waited for CPU time - AllocationsWaitedForCPU uint64 - // Number of alloc reqs aborted due to object death - AllocationsAbortedDueToObjectDeath uint64 - // Number of retrieval (read) requests seen - RetrievalsReadRequests uint64 - // Number of successful retr reqs - RetrievalsOk uint64 - // Number of retr reqs that waited on lookup completion - RetrievalsWaitingLookupCompletion uint64 - // Number of retr reqs returned -ENODATA - RetrievalsReturnedEnodata uint64 - // Number of retr reqs rejected -ENOBUFS - RetrievalsRejectedDueToEnobufs uint64 - // Number of retr reqs aborted -ERESTARTSYS - RetrievalsAbortedDueToErestartsys uint64 - // Number of retr reqs failed -ENOMEM - RetrievalsFailedDueToEnomem uint64 - // Number of retr reqs submitted - RetrievalsRequests uint64 - // Number of retr reqs waited for CPU time - RetrievalsWaitingCPU uint64 - // Number of retr reqs aborted due to object death - RetrievalsAbortedDueToObjectDeath uint64 - // Number of storage (write) requests seen - StoreWriteRequests uint64 - // Number of successful store reqs - StoreSuccessfulRequests uint64 - // Number of store reqs on a page already pending storage - StoreRequestsOnPendingStorage uint64 - // Number of store reqs rejected -ENOBUFS - StoreRequestsRejectedDueToEnobufs uint64 - // Number of store reqs failed -ENOMEM - StoreRequestsFailedDueToEnomem uint64 - // Number of store reqs submitted - StoreRequestsSubmitted uint64 - // Number of store reqs granted CPU time - StoreRequestsRunning uint64 - // Number of pages given store req processing time - StorePagesWithRequestsProcessing uint64 - // Number of store reqs deleted from tracking tree - StoreRequestsDeleted uint64 - // Number of store reqs over store limit - StoreRequestsOverStoreLimit uint64 - // Number of release reqs against pages with no pending store - ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 - // Number of release reqs against pages stored by time lock granted - ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 - // Number of release reqs ignored due to in-progress store - ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req - PageStoresCancelledByReleaseRequests uint64 - VmscanWaiting uint64 - // Number of times async ops added to pending queues - OpsPending uint64 - // Number of times async ops given CPU time - OpsRunning uint64 - // Number of times async ops queued for processing - OpsEnqueued uint64 - // Number of async ops cancelled - OpsCancelled uint64 - // Number of async ops rejected due to object lookup/create failure - OpsRejected uint64 - // Number of async ops initialised - OpsInitialised uint64 - // Number of async ops queued for deferred release - OpsDeferred uint64 - // Number of async ops released (should equal ini=N when idle) - OpsReleased uint64 - // Number of deferred-release async ops garbage collected - OpsGarbageCollected uint64 - // Number of in-progress alloc_object() cache ops - CacheopAllocationsinProgress uint64 - // Number of in-progress lookup_object() cache ops - CacheopLookupObjectInProgress uint64 - // Number of in-progress lookup_complete() cache ops - CacheopLookupCompleteInPorgress uint64 - // Number of in-progress grab_object() cache ops - CacheopGrabObjectInProgress uint64 - CacheopInvalidations uint64 - // Number of in-progress update_object() cache ops - CacheopUpdateObjectInProgress uint64 - // Number of in-progress drop_object() cache ops - CacheopDropObjectInProgress uint64 - // Number of in-progress put_object() cache ops - CacheopPutObjectInProgress uint64 - // Number of in-progress attr_changed() cache ops - CacheopAttributeChangeInProgress uint64 - // Number of in-progress sync_cache() cache ops - CacheopSyncCacheInProgress uint64 - // Number of in-progress read_or_alloc_page() cache ops - CacheopReadOrAllocPageInProgress uint64 - // Number of in-progress read_or_alloc_pages() cache ops - CacheopReadOrAllocPagesInProgress uint64 - // Number of in-progress allocate_page() cache ops - CacheopAllocatePageInProgress uint64 - // Number of in-progress allocate_pages() cache ops - CacheopAllocatePagesInProgress uint64 - // Number of in-progress write_page() cache ops - CacheopWritePagesInProgress uint64 - // Number of in-progress uncache_page() cache ops - CacheopUncachePagesInProgress uint64 - // Number of in-progress dissociate_pages() cache ops - CacheopDissociatePagesInProgress uint64 - // Number of object lookups/creations rejected due to lack of space - CacheevLookupsAndCreationsRejectedLackSpace uint64 - // Number of stale objects deleted - CacheevStaleObjectsDeleted uint64 - // Number of objects retired when relinquished - CacheevRetiredWhenReliquished uint64 - // Number of objects culled - CacheevObjectsCulled uint64 -} - -// Fscacheinfo returns information about current fscache statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt -func (fs FS) Fscacheinfo() (Fscacheinfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) - if err != nil { - return Fscacheinfo{}, err - } - - m, err := parseFscacheinfo(bytes.NewReader(b)) - if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err) - } - - return *m, nil -} - -func setFSCacheFields(fields []string, setFields ...*uint64) error { - var err error - if len(fields) < len(setFields) { - return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) - } - - for i := range setFields { - *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) - if err != nil { - return err - } - } - return nil -} - -func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { - var m Fscacheinfo - s := bufio.NewScanner(r) - for s.Scan() { - fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) - } - - switch fields[0] { - case "Cookies:": - err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, - &m.SpecialCookiesAllocated) - if err != nil { - return &m, err - } - case "Objects:": - err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, - &m.ObjectsAvailable, &m.ObjectsDead) - if err != nil { - return &m, err - } - case "ChkAux": - err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, - &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) - if err != nil { - return &m, err - } - case "Pages": - err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) - if err != nil { - return &m, err - } - case "Acquire:": - err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, - &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, - &m.AcquireRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - case "Lookups:": - err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, - &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) - if err != nil { - return &m, err - } - case "Invals": - err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) - if err != nil { - return &m, err - } - case "Updates:": - err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, - &m.UpdateRequestsRunning) - if err != nil { - return &m, err - } - case "Relinqs:": - err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, - &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) - if err != nil { - return &m, err - } - case "AttrChg:": - err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, - &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) - if err != nil { - return &m, err - } - case "Allocs": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, - &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, - &m.AllocationsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Retrvls:": - if strings.Split(fields[1], "=")[0] == "n" { - err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, - &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, - &m.RetrievalsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Stores": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, - &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, - &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) - if err != nil { - return &m, err - } - } - case "VmScan": - err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, - &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, - &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) - if err != nil { - return &m, err - } - case "Ops": - if strings.Split(fields[2], "=")[0] == "pend" { - err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) - if err != nil { - return &m, err - } - } - case "CacheOp:": - if strings.Split(fields[1], "=")[0] == "alo" { - err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, - &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) - if err != nil { - return &m, err - } - } else if strings.Split(fields[1], "=")[0] == "inv" { - err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, - &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, - &m.CacheopSyncCacheInProgress) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, - &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, - &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) - if err != nil { - return &m, err - } - } - case "CacheEv:": - err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, - &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) - if err != nil { - return &m, err - } - } - } - - return &m, nil -} diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod index ded48253cd66..0e04e5d1fdac 100644 --- a/vendor/github.com/prometheus/procfs/go.mod +++ b/vendor/github.com/prometheus/procfs/go.mod @@ -5,5 +5,4 @@ go 1.12 require ( github.com/google/go-cmp v0.3.1 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e ) diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum index 54b5f330339f..33b824b01bcf 100644 --- a/vendor/github.com/prometheus/procfs/go.sum +++ b/vendor/github.com/prometheus/procfs/go.sum @@ -2,5 +2,3 @@ github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 22cb07a6bbb5..755591d9a5e9 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -73,15 +73,6 @@ func ReadUintFromFile(path string) (uint64, error) { return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) } -// ReadIntFromFile reads a file and attempts to parse a int64 from it. -func ReadIntFromFile(path string) (int64, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) -} - // ParseBool parses a string into a boolean pointer. func ParseBool(b string) *bool { var truth bool diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go deleted file mode 100644 index beefdf02e934..000000000000 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package procfs - -import ( - "os" - - "github.com/prometheus/procfs/internal/util" -) - -// KernelRandom contains information about to the kernel's random number generator. -type KernelRandom struct { - // EntropyAvaliable gives the available entropy, in bits. - EntropyAvaliable *uint64 - // PoolSize gives the size of the entropy pool, in bytes. - PoolSize *uint64 - // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. - URandomMinReseedSeconds *uint64 - // WriteWakeupThreshold the number of bits of entropy below which we wake up processes - // that do a select(2) or poll(2) for write access to /dev/random. - WriteWakeupThreshold *uint64 - // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep - // waiting for entropy from /dev/random. - ReadWakeupThreshold *uint64 -} - -// KernelRandom returns values from /proc/sys/kernel/random. -func (fs FS) KernelRandom() (KernelRandom, error) { - random := KernelRandom{} - - for file, p := range map[string]**uint64{ - "entropy_avail": &random.EntropyAvaliable, - "poolsize": &random.PoolSize, - "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, - "write_wakeup_threshold": &random.WriteWakeupThreshold, - "read_wakeup_threshold": &random.ReadWakeupThreshold, - } { - val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) - if os.IsNotExist(err) { - continue - } - if err != nil { - return random, err - } - *p = &val - } - - return random, nil -} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go deleted file mode 100644 index 00bbe1441727..000000000000 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// LoadAvg represents an entry in /proc/loadavg -type LoadAvg struct { - Load1 float64 - Load5 float64 - Load15 float64 -} - -// LoadAvg returns loadavg from /proc. -func (fs FS) LoadAvg() (*LoadAvg, error) { - path := fs.proc.Path("loadavg") - - data, err := util.ReadFileNoStat(path) - if err != nil { - return nil, err - } - return parseLoad(data) -} - -// Parse /proc loadavg and return 1m, 5m and 15m. -func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { - loads := make([]float64, 3) - parts := strings.Fields(string(loadavgBytes)) - if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes)) - } - - var err error - for i, load := range parts[0:3] { - loads[i], err = strconv.ParseFloat(load, 64) - if err != nil { - return nil, fmt.Errorf("could not parse load '%s': %s", load, err) - } - } - return &LoadAvg{ - Load1: loads[0], - Load5: loads[1], - Load15: loads[2], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 3e9362a94d93..2af3ada18045 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -52,7 +52,7 @@ type MDStat struct { func (fs FS) MDStat() ([]MDStat, error) { data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) if err != nil { - return nil, err + return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) } mdstat, err := parseMDStat(data) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 59f4d5055836..bb01bb5a2a9f 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -29,10 +29,10 @@ import ( // is described in the following man page. // http://man7.org/linux/man-pages/man5/proc.5.html type MountInfo struct { - // Unique ID for the mount - MountID int - // The ID of the parent mount - ParentID int + // Unique Id for the mount + MountId int + // The Id of the parent mount + ParentId int // The value of `st_dev` for the files on this FS MajorMinorVer string // The pathname of the directory in the FS that forms @@ -77,7 +77,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mountInfo := strings.Split(mountString, " ") mountInfoLength := len(mountInfo) - if mountInfoLength < 10 { + if mountInfoLength < 11 { return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) } @@ -96,11 +96,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), } - mount.MountID, err = strconv.Atoi(mountInfo[0]) + mount.MountId, err = strconv.Atoi(mountInfo[0]) if err != nil { return nil, fmt.Errorf("failed to parse mount ID") } - mount.ParentID, err = strconv.Atoi(mountInfo[1]) + mount.ParentId, err = strconv.Atoi(mountInfo[1]) if err != nil { return nil, fmt.Errorf("failed to parse parent ID") } @@ -144,7 +144,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { return optionalFields, nil } -// mountOptionsParser parses the mount options, superblock options. +// Parses the mount options, superblock options. func mountOptionsParser(mountOptions string) map[string]string { opts := make(map[string]string) options := strings.Split(mountOptions, ",") @@ -161,7 +161,7 @@ func mountOptionsParser(mountOptions string) map[string]string { return opts } -// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. +// Retrieves mountinfo information from `/proc/self/mountinfo`. func GetMounts() ([]*MountInfo, error) { data, err := util.ReadFileNoStat("/proc/self/mountinfo") if err != nil { @@ -170,7 +170,7 @@ func GetMounts() ([]*MountInfo, error) { return parseMountInfo(data) } -// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. +// Retrieves mountinfo information from a processes' `/proc//mountinfo`. func GetProcMounts(pid int) ([]*MountInfo, error) { data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 861ced9da030..35b2ef3513f9 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -186,8 +186,6 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 - // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. - Errors uint64 } // A NFSTransportStats contains statistics for the NFS mount RPC requests and @@ -496,8 +494,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { // line is reached. func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { const ( - // Minimum number of expected fields in each per-operation statistics set - minFields = 9 + // Number of expected fields in each per-operation statistics set + numFields = 9 ) var ops []NFSOperationStats @@ -510,12 +508,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { break } - if len(ss) < minFields { + if len(ss) != numFields { return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) } // Skip string operation name for integers - ns := make([]uint64, 0, minFields-1) + ns := make([]uint64, 0, numFields-1) for _, st := range ss[1:] { n, err := strconv.ParseUint(st, 10, 64) if err != nil { @@ -525,7 +523,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - opStats := NFSOperationStats{ + ops = append(ops, NFSOperationStats{ Operation: strings.TrimSuffix(ss[0], ":"), Requests: ns[0], Transmissions: ns[1], @@ -535,13 +533,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeQueueMilliseconds: ns[5], CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], - } - - if len(ns) > 8 { - opStats.Errors = ns[8] - } - - ops = append(ops, opStats) + }) } return ops, s.Err() diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go deleted file mode 100644 index b637be98458f..000000000000 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core -type ConntrackStatEntry struct { - Entries uint64 - Found uint64 - Invalid uint64 - Ignore uint64 - Insert uint64 - InsertFailed uint64 - Drop uint64 - EarlyDrop uint64 - SearchRestart uint64 -} - -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores -func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { - return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) -} - -// Parses a slice of ConntrackStatEntries from the given filepath -func readConntrackStat(path string) ([]ConntrackStatEntry, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(path) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseConntrackStat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err) - } - - return stat, nil -} - -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries -func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { - var entries []ConntrackStatEntry - - scanner := bufio.NewScanner(r) - scanner.Scan() - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - conntrackEntry, err := parseConntrackStatEntry(fields) - if err != nil { - return nil, err - } - entries = append(entries, *conntrackEntry) - } - - return entries, nil -} - -// Parses a ConntrackStatEntry from given array of fields -func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - if len(fields) != 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") - } - entry := &ConntrackStatEntry{} - - entries, err := parseConntrackStatField(fields[0]) - if err != nil { - return nil, err - } - entry.Entries = entries - - found, err := parseConntrackStatField(fields[2]) - if err != nil { - return nil, err - } - entry.Found = found - - invalid, err := parseConntrackStatField(fields[4]) - if err != nil { - return nil, err - } - entry.Invalid = invalid - - ignore, err := parseConntrackStatField(fields[5]) - if err != nil { - return nil, err - } - entry.Ignore = ignore - - insert, err := parseConntrackStatField(fields[8]) - if err != nil { - return nil, err - } - entry.Insert = insert - - insertFailed, err := parseConntrackStatField(fields[9]) - if err != nil { - return nil, err - } - entry.InsertFailed = insertFailed - - drop, err := parseConntrackStatField(fields[10]) - if err != nil { - return nil, err - } - entry.Drop = drop - - earlyDrop, err := parseConntrackStatField(fields[11]) - if err != nil { - return nil, err - } - entry.EarlyDrop = earlyDrop - - searchRestart, err := parseConntrackStatField(fields[16]) - if err != nil { - return nil, err - } - entry.SearchRestart = searchRestart - - return entry, nil -} - -// Parses a uint64 from given hex in string -func parseConntrackStatField(field string) (uint64, error) { - val, err := strconv.ParseUint(field, 16, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err) - } - return val, err -} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index db5debdf4a1f..6fcad20afc75 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -14,89 +14,78 @@ package procfs import ( - "bufio" - "bytes" "fmt" - "io" + "io/ioutil" "strconv" "strings" - - "github.com/prometheus/procfs/internal/util" ) // For the proc file format details, -// See: -// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 +// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 // and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. -// SoftnetStat contains a single row of data from /proc/net/softnet_stat -type SoftnetStat struct { +// SoftnetEntry contains a single row of data from /proc/net/softnet_stat +type SoftnetEntry struct { // Number of processed packets - Processed uint32 + Processed uint // Number of dropped packets - Dropped uint32 + Dropped uint // Number of times processing packets ran out of quota - TimeSqueezed uint32 + TimeSqueezed uint } -var softNetProcFile = "net/softnet_stat" - -// NetSoftnetStat reads data from /proc/net/softnet_stat. -func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { - b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) - if err != nil { - return nil, err - } - - entries, err := parseSoftnet(bytes.NewReader(b)) +// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns, +// and then return a slice of SoftnetEntry's. +func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) { + data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat")) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err) + return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err) } - return entries, nil + return parseSoftnetEntries(data) } -func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { - const minColumns = 9 - - s := bufio.NewScanner(r) - - var stats []SoftnetStat - for s.Scan() { - columns := strings.Fields(s.Text()) +func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) { + lines := strings.Split(string(data), "\n") + entries := make([]SoftnetEntry, 0) + var err error + const ( + expectedColumns = 11 + ) + for _, line := range lines { + columns := strings.Fields(line) width := len(columns) - - if width < minColumns { - return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + if width == 0 { + continue } - - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err + if width != expectedColumns { + return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns) } - - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) + var entry SoftnetEntry + if entry, err = parseSoftnetEntry(columns); err != nil { + return []SoftnetEntry{}, err + } + entries = append(entries, entry) } - return stats, nil + return entries, nil } -func parseHexUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 16, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) +func parseSoftnetEntry(columns []string) (SoftnetEntry, error) { + var err error + var processed, dropped, timeSqueezed uint64 + if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil { + return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err) } - - return us, nil + if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil { + return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err) + } + if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil { + return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err) + } + return SoftnetEntry{ + Processed: uint(processed), + Dropped: uint(dropped), + TimeSqueezed: uint(timeSqueezed), + }, nil } diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go deleted file mode 100644 index d017e3f18da0..000000000000 --- a/vendor/github.com/prometheus/procfs/net_udp.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "encoding/hex" - "fmt" - "io" - "net" - "os" - "strconv" - "strings" -) - -const ( - // readLimit is used by io.LimitReader while reading the content of the - // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic - // as each line represents a single used socket. - // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. - // With e.g. 150 Byte per line and the maximum number of 65535, - // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. - readLimit = 4294967296 // Byte -> 4 GiB -) - -type ( - // NetUDP represents the contents of /proc/net/udp{,6} file without the header. - NetUDP []*netUDPLine - - // NetUDPSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetUDP it does not collect - // the parsed lines into a slice. - NetUDPSummary struct { - // TxQueueLength shows the total queue length of all parsed tx_queue lengths. - TxQueueLength uint64 - // RxQueueLength shows the total queue length of all parsed rx_queue lengths. - RxQueueLength uint64 - // UsedSockets shows the total number of parsed lines representing the - // number of used sockets. - UsedSockets uint64 - } - - // netUDPLine represents the fields parsed from a single line - // in /proc/net/udp{,6}. Fields which are not used by UDP are skipped. - // For the proc file format details, see https://linux.die.net/man/5/proc. - netUDPLine struct { - Sl uint64 - LocalAddr net.IP - LocalPort uint64 - RemAddr net.IP - RemPort uint64 - St uint64 - TxQueue uint64 - RxQueue uint64 - UID uint64 - } -) - -// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp. -func (fs FS) NetUDP() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp")) -} - -// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp6. -func (fs FS) NetUDP6() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp6")) -} - -// NetUDPSummary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp. -func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp")) -} - -// NetUDP6Summary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp6. -func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp6")) -} - -// newNetUDP creates a new NetUDP{,6} from the contents of the given file. -func newNetUDP(file string) (NetUDP, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - netUDP := NetUDP{} - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetUDPLine(fields) - if err != nil { - return nil, err - } - netUDP = append(netUDP, line) - } - if err := s.Err(); err != nil { - return nil, err - } - return netUDP, nil -} - -// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file. -func newNetUDPSummary(file string) (*NetUDPSummary, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - netUDPSummary := &NetUDPSummary{} - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetUDPLine(fields) - if err != nil { - return nil, err - } - netUDPSummary.TxQueueLength += line.TxQueue - netUDPSummary.RxQueueLength += line.RxQueue - netUDPSummary.UsedSockets++ - } - if err := s.Err(); err != nil { - return nil, err - } - return netUDPSummary, nil -} - -// parseNetUDPLine parses a single line, represented by a list of fields. -func parseNetUDPLine(fields []string) (*netUDPLine, error) { - line := &netUDPLine{} - if len(fields) < 8 { - return nil, fmt.Errorf( - "cannot parse net udp socket line as it has less then 8 columns: %s", - strings.Join(fields, " "), - ) - } - var err error // parse error - - // sl - s := strings.Split(fields[0], ":") - if len(s) != 2 { - return nil, fmt.Errorf( - "cannot parse sl field in udp socket line: %s", fields[0]) - } - - if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err) - } - // local_address - l := strings.Split(fields[1], ":") - if len(l) != 2 { - return nil, fmt.Errorf( - "cannot parse local_address field in udp socket line: %s", fields[1]) - } - if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil { - return nil, fmt.Errorf( - "cannot parse local_address value in udp socket line: %s", err) - } - if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf( - "cannot parse local_address port value in udp socket line: %s", err) - } - - // remote_address - r := strings.Split(fields[2], ":") - if len(r) != 2 { - return nil, fmt.Errorf( - "cannot parse rem_address field in udp socket line: %s", fields[1]) - } - if line.RemAddr, err = hex.DecodeString(r[0]); err != nil { - return nil, fmt.Errorf( - "cannot parse rem_address value in udp socket line: %s", err) - } - if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf( - "cannot parse rem_address port value in udp socket line: %s", err) - } - - // st - if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf( - "cannot parse st value in udp socket line: %s", err) - } - - // tx_queue and rx_queue - q := strings.Split(fields[4], ":") - if len(q) != 2 { - return nil, fmt.Errorf( - "cannot parse tx/rx queues in udp socket line as it has a missing colon: %s", - fields[4], - ) - } - if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err) - } - if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err) - } - - // uid - if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf( - "cannot parse uid value in udp socket line: %s", err) - } - - return line, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index c55b4b18e4f3..93bd58f8095a 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -15,6 +15,7 @@ package procfs import ( "bufio" + "errors" "fmt" "io" "os" @@ -26,15 +27,25 @@ import ( // see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 // and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. -// Constants for the various /proc/net/unix enumerations. -// TODO: match against x/sys/unix or similar? +const ( + netUnixKernelPtrIdx = iota + netUnixRefCountIdx + _ + netUnixFlagsIdx + netUnixTypeIdx + netUnixStateIdx + netUnixInodeIdx + + // Inode and Path are optional. + netUnixStaticFieldsCnt = 6 +) + const ( netUnixTypeStream = 1 netUnixTypeDgram = 2 netUnixTypeSeqpacket = 5 - netUnixFlagDefault = 0 - netUnixFlagListen = 1 << 16 + netUnixFlagListen = 1 << 16 netUnixStateUnconnected = 1 netUnixStateConnecting = 2 @@ -42,127 +53,129 @@ const ( netUnixStateDisconnected = 4 ) -// NetUNIXType is the type of the type field. -type NetUNIXType uint64 +var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format") -// NetUNIXFlags is the type of the flags field. -type NetUNIXFlags uint64 +// NetUnixType is the type of the type field. +type NetUnixType uint64 -// NetUNIXState is the type of the state field. -type NetUNIXState uint64 +// NetUnixFlags is the type of the flags field. +type NetUnixFlags uint64 -// NetUNIXLine represents a line of /proc/net/unix. -type NetUNIXLine struct { +// NetUnixState is the type of the state field. +type NetUnixState uint64 + +// NetUnixLine represents a line of /proc/net/unix. +type NetUnixLine struct { KernelPtr string RefCount uint64 Protocol uint64 - Flags NetUNIXFlags - Type NetUNIXType - State NetUNIXState + Flags NetUnixFlags + Type NetUnixType + State NetUnixState Inode uint64 Path string } -// NetUNIX holds the data read from /proc/net/unix. -type NetUNIX struct { - Rows []*NetUNIXLine +// NetUnix holds the data read from /proc/net/unix. +type NetUnix struct { + Rows []*NetUnixLine } -// NetUNIX returns data read from /proc/net/unix. -func (fs FS) NetUNIX() (*NetUNIX, error) { - return readNetUNIX(fs.proc.Path("net/unix")) +// NewNetUnix returns data read from /proc/net/unix. +func NewNetUnix() (*NetUnix, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetUnix() } -// readNetUNIX reads data in /proc/net/unix format from the specified file. -func readNetUNIX(file string) (*NetUNIX, error) { - // This file could be quite large and a streaming read is desirable versus - // reading the entire contents at once. - f, err := os.Open(file) +// NewNetUnix returns data read from /proc/net/unix. +func (fs FS) NewNetUnix() (*NetUnix, error) { + return NewNetUnixByPath(fs.proc.Path("net/unix")) +} + +// NewNetUnixByPath returns data read from /proc/net/unix by file path. +// It might returns an error with partial parsed data, if an error occur after some data parsed. +func NewNetUnixByPath(path string) (*NetUnix, error) { + f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() - - return parseNetUNIX(f) + return NewNetUnixByReader(f) } -// parseNetUNIX creates a NetUnix structure from the incoming stream. -func parseNetUNIX(r io.Reader) (*NetUNIX, error) { - // Begin scanning by checking for the existence of Inode. - s := bufio.NewScanner(r) - s.Scan() - +// NewNetUnixByReader returns data read from /proc/net/unix by a reader. +// It might returns an error with partial parsed data, if an error occur after some data parsed. +func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) { + nu := &NetUnix{ + Rows: make([]*NetUnixLine, 0, 32), + } + scanner := bufio.NewScanner(reader) + // Omit the header line. + scanner.Scan() + header := scanner.Text() // From the man page of proc(5), it does not contain an Inode field, - // but in actually it exists. This code works for both cases. - hasInode := strings.Contains(s.Text(), "Inode") + // but in actually it exists. + // This code works for both cases. + hasInode := strings.Contains(header, "Inode") - // Expect a minimum number of fields, but Inode and Path are optional: - // Num RefCount Protocol Flags Type St Inode Path - minFields := 6 + minFieldsCnt := netUnixStaticFieldsCnt if hasInode { - minFields++ + minFieldsCnt++ } - - var nu NetUNIX - for s.Scan() { - line := s.Text() - item, err := nu.parseLine(line, hasInode, minFields) + for scanner.Scan() { + line := scanner.Text() + item, err := nu.parseLine(line, hasInode, minFieldsCnt) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err) + return nu, err } - nu.Rows = append(nu.Rows, item) } - if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err) - } - - return &nu, nil + return nu, scanner.Err() } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) { fields := strings.Fields(line) - - l := len(fields) - if l < min { - return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + fieldsLen := len(fields) + if fieldsLen < minFieldsCnt { + return nil, fmt.Errorf( + "Parse Unix domain failed: expect at least %d fields but got %d", + minFieldsCnt, fieldsLen) } - - // Field offsets are as follows: - // Num RefCount Protocol Flags Type St Inode Path - - kernelPtr := strings.TrimSuffix(fields[0], ":") - - users, err := u.parseUsers(fields[1]) + kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx]) if err != nil { - return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err) + return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err) } - - flags, err := u.parseFlags(fields[3]) + users, err := u.parseUsers(fields[netUnixRefCountIdx]) if err != nil { - return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err) + return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err) } - - typ, err := u.parseType(fields[4]) + flags, err := u.parseFlags(fields[netUnixFlagsIdx]) if err != nil { - return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err) + return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err) } - - state, err := u.parseState(fields[5]) + typ, err := u.parseType(fields[netUnixTypeIdx]) if err != nil { - return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err) + return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err) + } + state, err := u.parseState(fields[netUnixStateIdx]) + if err != nil { + return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err) } - var inode uint64 if hasInode { - inode, err = u.parseInode(fields[6]) + inodeStr := fields[netUnixInodeIdx] + inode, err = u.parseInode(inodeStr) if err != nil { - return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err) + return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err) } } - n := &NetUNIXLine{ + nuLine := &NetUnixLine{ KernelPtr: kernelPtr, RefCount: users, Type: typ, @@ -172,56 +185,57 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { - // Path occurs at either index 6 or 7 depending on whether inode is - // already present. - pathIdx := 7 + if fieldsLen > minFieldsCnt { + pathIdx := netUnixInodeIdx + 1 if !hasInode { pathIdx-- } - - n.Path = fields[pathIdx] + nuLine.Path = fields[pathIdx] } - return n, nil + return nuLine, nil +} + +func (u NetUnix) parseKernelPtr(str string) (string, error) { + if !strings.HasSuffix(str, ":") { + return "", errInvalidKernelPtrFmt + } + return str[:len(str)-1], nil } -func (u NetUNIX) parseUsers(s string) (uint64, error) { - return strconv.ParseUint(s, 16, 32) +func (u NetUnix) parseUsers(hexStr string) (uint64, error) { + return strconv.ParseUint(hexStr, 16, 32) } -func (u NetUNIX) parseType(s string) (NetUNIXType, error) { - typ, err := strconv.ParseUint(s, 16, 16) +func (u NetUnix) parseType(hexStr string) (NetUnixType, error) { + typ, err := strconv.ParseUint(hexStr, 16, 16) if err != nil { return 0, err } - - return NetUNIXType(typ), nil + return NetUnixType(typ), nil } -func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { - flags, err := strconv.ParseUint(s, 16, 32) +func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) { + flags, err := strconv.ParseUint(hexStr, 16, 32) if err != nil { return 0, err } - - return NetUNIXFlags(flags), nil + return NetUnixFlags(flags), nil } -func (u NetUNIX) parseState(s string) (NetUNIXState, error) { - st, err := strconv.ParseInt(s, 16, 8) +func (u NetUnix) parseState(hexStr string) (NetUnixState, error) { + st, err := strconv.ParseInt(hexStr, 16, 8) if err != nil { return 0, err } - - return NetUNIXState(st), nil + return NetUnixState(st), nil } -func (u NetUNIX) parseInode(s string) (uint64, error) { - return strconv.ParseUint(s, 10, 64) +func (u NetUnix) parseInode(inodeStr string) (uint64, error) { + return strconv.ParseUint(inodeStr, 10, 64) } -func (t NetUNIXType) String() string { +func (t NetUnixType) String() string { switch t { case netUnixTypeStream: return "stream" @@ -233,7 +247,7 @@ func (t NetUNIXType) String() string { return "unknown" } -func (f NetUNIXFlags) String() string { +func (f NetUnixFlags) String() string { switch f { case netUnixFlagListen: return "listen" @@ -242,7 +256,7 @@ func (f NetUNIXFlags) String() string { } } -func (s NetUNIXState) String() string { +func (s NetUnixState) String() string { switch s { case netUnixStateUnconnected: return "unconnected" diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 9f97b6e5236a..330e472c70fd 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -134,27 +134,6 @@ func (p Proc) CmdLine() ([]string, error) { return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil } -// Wchan returns the wchan (wait channel) of a process. -func (p Proc) Wchan() (string, error) { - f, err := os.Open(p.path("wchan")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return "", err - } - - wchan := string(data) - if wchan == "" || wchan == "0" { - return "", nil - } - - return wchan, nil -} - // Comm returns the command name of a process. func (p Proc) Comm() (string, error) { data, err := util.ReadFileNoStat(p.path("comm")) diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go deleted file mode 100644 index 4abd46451c69..000000000000 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource -// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies -// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in -// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of -// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID -// in this hierarchy -// -// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html -type Cgroup struct { - // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one - // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number - HierarchyID int - // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For - // Cgroups V2 this may be empty, as all active controllers use the same hierarchy - Controllers []string - // Path of this control group, relative to the mount point of the cgroupfs representing this specific - // hierarchy - Path string -} - -// parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path -func parseCgroupString(cgroupStr string) (*Cgroup, error) { - var err error - - fields := strings.Split(cgroupStr, ":") - if len(fields) < 3 { - return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) - } - - cgroup := &Cgroup{ - Path: fields[2], - Controllers: nil, - } - cgroup.HierarchyID, err = strconv.Atoi(fields[0]) - if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") - } - if fields[1] != "" { - ssNames := strings.Split(fields[1], ",") - cgroup.Controllers = append(cgroup.Controllers, ssNames...) - } - return cgroup, nil -} - -// parseCgroups reads each line of the /proc/[pid]/cgroup file -func parseCgroups(data []byte) ([]Cgroup, error) { - var cgroups []Cgroup - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseCgroupString(mountString) - if err != nil { - return nil, err - } - cgroups = append(cgroups, *parsedMounts) - } - - err := scanner.Err() - return cgroups, err -} - -// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process -// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system -func (p Proc) Cgroups() ([]Cgroup, error) { - data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID)) - if err != nil { - return nil, err - } - return parseCgroups(data) -} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index a76ca7079194..4e7597f86b6e 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -16,7 +16,6 @@ package procfs import ( "bufio" "bytes" - "errors" "regexp" "github.com/prometheus/procfs/internal/util" @@ -24,11 +23,10 @@ import ( // Regexp variables var ( - rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) - rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) - rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) - rInotify = regexp.MustCompile(`^inotify`) - rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) + rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) + rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) + rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rInotify = regexp.MustCompile(`^inotify`) ) // ProcFDInfo contains represents file descriptor information. @@ -41,7 +39,7 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string - // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) + // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -98,21 +96,15 @@ type InotifyInfo struct { // InotifyInfo constructor. Only available on kernel 3.8+. func parseInotifyInfo(line string) (*InotifyInfo, error) { - m := rInotifyParts.FindStringSubmatch(line) - if len(m) >= 4 { - var mask string - if len(m) == 5 { - mask = m[4] - } - i := &InotifyInfo{ - WD: m[1], - Ino: m[2], - Sdev: m[3], - Mask: mask, - } - return i, nil + r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`) + m := r.FindStringSubmatch(line) + i := &InotifyInfo{ + WD: m[1], + Ino: m[2], + Sdev: m[3], + Mask: m[4], } - return nil, errors.New("invalid inode entry: " + line) + return i, nil } // ProcFDInfos represents a list of ProcFDInfo structs. diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go deleted file mode 100644 index 1d7772d516a4..000000000000 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -// ProcMapPermissions contains permission settings read from /proc/[pid]/maps -type ProcMapPermissions struct { - // mapping has the [R]ead flag set - Read bool - // mapping has the [W]rite flag set - Write bool - // mapping has the [X]ecutable flag set - Execute bool - // mapping has the [S]hared flag set - Shared bool - // mapping is marked as [P]rivate (copy on write) - Private bool -} - -// ProcMap contains the process memory-mappings of the process, -// read from /proc/[pid]/maps -type ProcMap struct { - // The start address of current mapping. - StartAddr uintptr - // The end address of the current mapping - EndAddr uintptr - // The permissions for this mapping - Perms *ProcMapPermissions - // The current offset into the file/fd (e.g., shared libs) - Offset int64 - // Device owner of this mapping (major:minor) in Mkdev format. - Dev uint64 - // The inode of the device above - Inode uint64 - // The file or psuedofile (or empty==anonymous) - Pathname string -} - -// parseDevice parses the device token of a line and converts it to a dev_t -// (mkdev) like structure. -func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("unexpected number of fields") - } - - major, err := strconv.ParseUint(toks[0], 16, 0) - if err != nil { - return 0, err - } - - minor, err := strconv.ParseUint(toks[1], 16, 0) - if err != nil { - return 0, err - } - - return unix.Mkdev(uint32(major), uint32(minor)), nil -} - -// parseAddress just converts a hex-string to a uintptr -func parseAddress(s string) (uintptr, error) { - a, err := strconv.ParseUint(s, 16, 0) - if err != nil { - return 0, err - } - - return uintptr(a), nil -} - -// parseAddresses parses the start-end address -func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("invalid address") - } - - saddr, err := parseAddress(toks[0]) - if err != nil { - return 0, 0, err - } - - eaddr, err := parseAddress(toks[1]) - if err != nil { - return 0, 0, err - } - - return saddr, eaddr, nil -} - -// parsePermissions parses a token and returns any that are set. -func parsePermissions(s string) (*ProcMapPermissions, error) { - if len(s) < 4 { - return nil, fmt.Errorf("invalid permissions token") - } - - perms := ProcMapPermissions{} - for _, ch := range s { - switch ch { - case 'r': - perms.Read = true - case 'w': - perms.Write = true - case 'x': - perms.Execute = true - case 'p': - perms.Private = true - case 's': - perms.Shared = true - } - } - - return &perms, nil -} - -// parseProcMap will attempt to parse a single line within a proc/[pid]/maps -// buffer. -func parseProcMap(text string) (*ProcMap, error) { - fields := strings.Fields(text) - if len(fields) < 5 { - return nil, fmt.Errorf("truncated procmap entry") - } - - saddr, eaddr, err := parseAddresses(fields[0]) - if err != nil { - return nil, err - } - - perms, err := parsePermissions(fields[1]) - if err != nil { - return nil, err - } - - offset, err := strconv.ParseInt(fields[2], 16, 0) - if err != nil { - return nil, err - } - - device, err := parseDevice(fields[3]) - if err != nil { - return nil, err - } - - inode, err := strconv.ParseUint(fields[4], 10, 0) - if err != nil { - return nil, err - } - - pathname := "" - - if len(fields) >= 5 { - pathname = strings.Join(fields[5:], " ") - } - - return &ProcMap{ - StartAddr: saddr, - EndAddr: eaddr, - Perms: perms, - Offset: offset, - Dev: device, - Inode: inode, - Pathname: pathname, - }, nil -} - -// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the -// process. -func (p Proc) ProcMaps() ([]*ProcMap, error) { - file, err := os.Open(p.path("maps")) - if err != nil { - return nil, err - } - defer file.Close() - - maps := []*ProcMap{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - m, err := parseProcMap(scan.Text()) - if err != nil { - return nil, err - } - - maps = append(maps, m) - } - - return maps, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go deleted file mode 100644 index a576a720a442..000000000000 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package procfs - -import ( - "bufio" - "errors" - "fmt" - "os" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - // match the header line before each mapped zone in /proc/pid/smaps - procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) -) - -type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM - Rss uint64 - // Process's proportional share of this mapping - Pss uint64 - // Size in bytes of clean shared pages - SharedClean uint64 - // Size in bytes of dirty shared pages - SharedDirty uint64 - // Size in bytes of clean private pages - PrivateClean uint64 - // Size in bytes of dirty private pages - PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed - Referenced uint64 - // Amount of memory that does not belong to any file - Anonymous uint64 - // Amount would-be-anonymous memory currently on swap - Swap uint64 - // Process's proportional memory on swap - SwapPss uint64 -} - -// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the -// process. -// -// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will -// we read and summed. -func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { - data, err := util.ReadFileNoStat(p.path("smaps_rollup")) - if err != nil && os.IsNotExist(err) { - return p.procSMapsRollupManual() - } - if err != nil { - return ProcSMapsRollup{}, err - } - - lines := strings.Split(string(data), "\n") - smaps := ProcSMapsRollup{} - - // skip first line which don't contains information we need - lines = lines[1:] - for _, line := range lines { - if line == "" { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -// Read /proc/pid/smaps and do the roll-up in Go code. -func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { - file, err := os.Open(p.path("smaps")) - if err != nil { - return ProcSMapsRollup{}, err - } - defer file.Close() - - smaps := ProcSMapsRollup{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - line := scan.Text() - - if procSMapsHeaderLine.MatchString(line) { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -func (s *ProcSMapsRollup) parseLine(line string) error { - kv := strings.SplitN(line, ":", 2) - if len(kv) != 2 { - fmt.Println(line) - return errors.New("invalid net/dev line, missing colon") - } - - k := kv[0] - if k == "VmFlags" { - return nil - } - - v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") - - vKBytes, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return err - } - vBytes := vKBytes * 1024 - - s.addValue(k, v, vKBytes, vBytes) - - return nil -} - -func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { - switch k { - case "Rss": - s.Rss += vUintBytes - case "Pss": - s.Pss += vUintBytes - case "Shared_Clean": - s.SharedClean += vUintBytes - case "Shared_Dirty": - s.SharedDirty += vUintBytes - case "Private_Clean": - s.PrivateClean += vUintBytes - case "Private_Dirty": - s.PrivateDirty += vUintBytes - case "Referenced": - s.Referenced += vUintBytes - case "Anonymous": - s.Anonymous += vUintBytes - case "Swap": - s.Swap += vUintBytes - case "SwapPss": - s.SwapPss += vUintBytes - } -} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index c58346d910f7..e30c2b88f473 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -33,37 +33,37 @@ type ProcStatus struct { TGID int // Peak virtual memory size. - VmPeak uint64 // nolint:golint + VmPeak uint64 // Virtual memory size. - VmSize uint64 // nolint:golint + VmSize uint64 // Locked memory size. - VmLck uint64 // nolint:golint + VmLck uint64 // Pinned memory size. - VmPin uint64 // nolint:golint + VmPin uint64 // Peak resident set size. - VmHWM uint64 // nolint:golint + VmHWM uint64 // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:golint + VmRSS uint64 // Size of resident anonymous memory. - RssAnon uint64 // nolint:golint + RssAnon uint64 // Size of resident file mappings. - RssFile uint64 // nolint:golint + RssFile uint64 // Size of resident shared memory. - RssShmem uint64 // nolint:golint + RssShmem uint64 // Size of data segments. - VmData uint64 // nolint:golint + VmData uint64 // Size of stack segments. - VmStk uint64 // nolint:golint + VmStk uint64 // Size of text segments. - VmExe uint64 // nolint:golint + VmExe uint64 // Shared library code size. - VmLib uint64 // nolint:golint + VmLib uint64 // Page table entries size. - VmPTE uint64 // nolint:golint + VmPTE uint64 // Size of second-level page tables. - VmPMD uint64 // nolint:golint + VmPMD uint64 // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:golint + VmSwap uint64 // Size of hugetlb memory portions HugetlbPages uint64 @@ -71,9 +71,6 @@ type ProcStatus struct { VoluntaryCtxtSwitches uint64 // Number of involuntary context switches. NonVoluntaryCtxtSwitches uint64 - - // UIDs of the process (Real, effective, saved set, and filesystem UIDs (GIDs)) - UIDs [4]string } // NewStatus returns the current status information of the process. @@ -117,8 +114,6 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.TGID = int(vUint) case "Name": s.Name = vString - case "Uid": - copy(s.UIDs[:], strings.Split(vString, "\t")) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go deleted file mode 100644 index 15edc2212b67..000000000000 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Swap represents an entry in /proc/swaps. -type Swap struct { - Filename string - Type string - Size int - Used int - Priority int -} - -// Swaps returns a slice of all configured swap devices on the system. -func (fs FS) Swaps() ([]*Swap, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) - if err != nil { - return nil, err - } - return parseSwaps(data) -} - -func parseSwaps(info []byte) ([]*Swap, error) { - swaps := []*Swap{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - scanner.Scan() // ignore header line - for scanner.Scan() { - swapString := scanner.Text() - parsedSwap, err := parseSwapString(swapString) - if err != nil { - return nil, err - } - swaps = append(swaps, parsedSwap) - } - - err := scanner.Err() - return swaps, err -} - -func parseSwapString(swapString string) (*Swap, error) { - var err error - - swapFields := strings.Fields(swapString) - swapLength := len(swapFields) - if swapLength < 5 { - return nil, fmt.Errorf("too few fields in swap string: %s", swapString) - } - - swap := &Swap{ - Filename: swapFields[0], - Type: swapFields[1], - } - - swap.Size, err = strconv.Atoi(swapFields[2]) - if err != nil { - return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) - } - swap.Used, err = strconv.Atoi(swapFields[3]) - if err != nil { - return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) - } - swap.Priority, err = strconv.Atoi(swapFields[4]) - if err != nil { - return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) - } - - return swap, nil -} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/conn.go b/vendor/github.com/samuel/go-zookeeper/zk/conn.go index da9503a27161..f79a51b35516 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/conn.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/conn.go @@ -409,11 +409,13 @@ func (c *Conn) resendZkAuth(reauthReadyChan chan struct{}) { defer close(reauthReadyChan) if c.logInfo { - c.logger.Printf("re-submitting `%d` credentials after reconnect", len(c.creds)) + c.logger.Printf("Re-submitting `%d` credentials after reconnect", + len(c.creds)) } for _, cred := range c.creds { if shouldCancel() { + c.logger.Printf("Cancel rer-submitting credentials") return } resChan, err := c.sendRequest( @@ -426,7 +428,7 @@ func (c *Conn) resendZkAuth(reauthReadyChan chan struct{}) { nil) if err != nil { - c.logger.Printf("call to sendRequest failed during credential resubmit: %s", err) + c.logger.Printf("Call to sendRequest failed during credential resubmit: %s", err) // FIXME(prozlach): lets ignore errors for now continue } @@ -435,14 +437,14 @@ func (c *Conn) resendZkAuth(reauthReadyChan chan struct{}) { select { case res = <-resChan: case <-c.closeChan: - c.logger.Printf("recv closed, cancel re-submitting credentials") + c.logger.Printf("Recv closed, cancel re-submitting credentials") return case <-c.shouldQuit: - c.logger.Printf("should quit, cancel re-submitting credentials") + c.logger.Printf("Should quit, cancel re-submitting credentials") return } if res.err != nil { - c.logger.Printf("credential re-submit failed: %s", res.err) + c.logger.Printf("Credential re-submit failed: %s", res.err) // FIXME(prozlach): lets ignore errors for now continue } @@ -484,14 +486,14 @@ func (c *Conn) loop() { err := c.authenticate() switch { case err == ErrSessionExpired: - c.logger.Printf("authentication failed: %s", err) + c.logger.Printf("Authentication failed: %s", err) c.invalidateWatches(err) case err != nil && c.conn != nil: - c.logger.Printf("authentication failed: %s", err) + c.logger.Printf("Authentication failed: %s", err) c.conn.Close() case err == nil: if c.logInfo { - c.logger.Printf("authenticated: id=%d, timeout=%d", c.SessionID(), c.sessionTimeoutMs) + c.logger.Printf("Authenticated: id=%d, timeout=%d", c.SessionID(), c.sessionTimeoutMs) } c.hostProvider.Connected() // mark success c.closeChan = make(chan struct{}) // channel to tell send loop stop @@ -506,7 +508,7 @@ func (c *Conn) loop() { } err := c.sendLoop() if err != nil || c.logInfo { - c.logger.Printf("send loop terminated: err=%v", err) + c.logger.Printf("Send loop terminated: err=%v", err) } c.conn.Close() // causes recv loop to EOF/exit wg.Done() @@ -521,7 +523,7 @@ func (c *Conn) loop() { err = c.recvLoop(c.conn) } if err != io.EOF || c.logInfo { - c.logger.Printf("recv loop terminated: err=%v", err) + c.logger.Printf("Recv loop terminated: err=%v", err) } if err == nil { panic("zk: recvLoop should never return nil error") @@ -695,28 +697,20 @@ func (c *Conn) authenticate() error { binary.BigEndian.PutUint32(buf[:4], uint32(n)) - if err := c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10)); err != nil { - return err - } + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10)) _, err = c.conn.Write(buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) if err != nil { return err } - if err := c.conn.SetWriteDeadline(time.Time{}); err != nil { - return err - } // Receive and decode a connect response. - if err := c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10)); err != nil { - return err - } + c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10)) _, err = io.ReadFull(c.conn, buf[:4]) + c.conn.SetReadDeadline(time.Time{}) if err != nil { return err } - if err := c.conn.SetReadDeadline(time.Time{}); err != nil { - return err - } blen := int(binary.BigEndian.Uint32(buf[:4])) if cap(buf) < blen { @@ -778,18 +772,14 @@ func (c *Conn) sendData(req *request) error { c.requests[req.xid] = req c.requestsLock.Unlock() - if err := c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)); err != nil { - return err - } + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) _, err = c.conn.Write(c.buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) if err != nil { req.recvChan <- response{-1, err} c.conn.Close() return err } - if err := c.conn.SetWriteDeadline(time.Time{}); err != nil { - return err - } return nil } @@ -812,17 +802,13 @@ func (c *Conn) sendLoop() error { binary.BigEndian.PutUint32(c.buf[:4], uint32(n)) - if err := c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)); err != nil { - return err - } + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) _, err = c.conn.Write(c.buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) if err != nil { c.conn.Close() return err } - if err := c.conn.SetWriteDeadline(time.Time{}); err != nil { - return err - } case <-c.closeChan: return nil } @@ -837,12 +823,10 @@ func (c *Conn) recvLoop(conn net.Conn) error { buf := make([]byte, sz) for { // package length - if err := conn.SetReadDeadline(time.Now().Add(c.recvTimeout)); err != nil { - c.logger.Printf("failed to set connection deadline: %v", err) - } + conn.SetReadDeadline(time.Now().Add(c.recvTimeout)) _, err := io.ReadFull(conn, buf[:4]) if err != nil { - return fmt.Errorf("failed to read from connection: %v", err) + return err } blen := int(binary.BigEndian.Uint32(buf[:4])) @@ -854,12 +838,10 @@ func (c *Conn) recvLoop(conn net.Conn) error { } _, err = io.ReadFull(conn, buf[:blen]) + conn.SetReadDeadline(time.Time{}) if err != nil { return err } - if err := conn.SetReadDeadline(time.Time{}); err != nil { - return err - } res := responseHeader{} _, err = decodePacket(buf[:16], &res) @@ -892,7 +874,7 @@ func (c *Conn) recvLoop(conn net.Conn) error { c.watchersLock.Lock() for _, t := range wTypes { wpt := watchPathType{res.Path, t} - if watchers, ok := c.watchers[wpt]; ok { + if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 { for _, ch := range watchers { ch <- ev close(ch) @@ -1238,38 +1220,6 @@ func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) { return mr, err } -// IncrementalReconfig is the zookeeper reconfiguration api that allows adding and removing servers -// by lists of members. -// Return the new configuration stats. -func (c *Conn) IncrementalReconfig(joining, leaving []string, version int64) (*Stat, error) { - // TODO: validate the shape of the member string to give early feedback. - request := &reconfigRequest{ - JoiningServers: []byte(strings.Join(joining, ",")), - LeavingServers: []byte(strings.Join(leaving, ",")), - CurConfigId: version, - } - - return c.internalReconfig(request) -} - -// Reconfig is the non-incremental update functionality for Zookeeper where the list preovided -// is the entire new member list. -// the optional version allows for conditional reconfigurations, -1 ignores the condition. -func (c *Conn) Reconfig(members []string, version int64) (*Stat, error) { - request := &reconfigRequest{ - NewMembers: []byte(strings.Join(members, ",")), - CurConfigId: version, - } - - return c.internalReconfig(request) -} - -func (c *Conn) internalReconfig(request *reconfigRequest) (*Stat, error) { - response := &reconfigReponse{} - _, err := c.request(opReconfig, request, response, nil) - return &response.Stat, err -} - // Server returns the current or last-connected server name. func (c *Conn) Server() string { c.serverMu.Lock() diff --git a/vendor/github.com/samuel/go-zookeeper/zk/constants.go b/vendor/github.com/samuel/go-zookeeper/zk/constants.go index ccafcfc977ab..33b5563b9f33 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/constants.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/constants.go @@ -2,7 +2,6 @@ package zk import ( "errors" - "fmt" ) const ( @@ -26,7 +25,6 @@ const ( opGetChildren2 = 12 opCheck = 13 opMulti = 14 - opReconfig = 16 opClose = -11 opSetAuth = 100 opSetWatches = 101 @@ -94,7 +92,7 @@ func (s State) String() string { if name := stateNames[s]; name != "" { return name } - return "unknown state" + return "Unknown" } type ErrCode int32 @@ -115,10 +113,8 @@ var ( ErrClosing = errors.New("zk: zookeeper is closing") ErrNothing = errors.New("zk: no server responsees to process") ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored") - ErrReconfigDisabled = errors.New("attempts to perform a reconfiguration operation when reconfiguration feature is disabled") - ErrBadArguments = errors.New("invalid arguments") - // ErrInvalidCallback = errors.New("zk: invalid callback specified") + // ErrInvalidCallback = errors.New("zk: invalid callback specified") errCodeToError = map[ErrCode]error{ 0: nil, errAPIError: ErrAPIError, @@ -130,13 +126,11 @@ var ( errNotEmpty: ErrNotEmpty, errSessionExpired: ErrSessionExpired, // errInvalidCallback: ErrInvalidCallback, - errInvalidAcl: ErrInvalidACL, - errAuthFailed: ErrAuthFailed, - errClosing: ErrClosing, - errNothing: ErrNothing, - errSessionMoved: ErrSessionMoved, - errZReconfigDisabled: ErrReconfigDisabled, - errBadArguments: ErrBadArguments, + errInvalidAcl: ErrInvalidACL, + errAuthFailed: ErrAuthFailed, + errClosing: ErrClosing, + errNothing: ErrNothing, + errSessionMoved: ErrSessionMoved, } ) @@ -144,7 +138,7 @@ func (e ErrCode) toError() error { if err, ok := errCodeToError[e]; ok { return err } - return fmt.Errorf("unknown error: %v", e) + return ErrUnknown } const ( @@ -174,8 +168,6 @@ const ( errClosing ErrCode = -116 errNothing ErrCode = -117 errSessionMoved ErrCode = -118 - // Attempts to perform a reconfiguration operation when reconfiguration feature is disabled - errZReconfigDisabled ErrCode = -123 ) // Constants for ACL permissions @@ -205,7 +197,6 @@ var ( opGetChildren2: "getChildren2", opCheck: "check", opMulti: "multi", - opReconfig: "reconfig", opClose: "close", opSetAuth: "setAuth", opSetWatches: "setWatches", diff --git a/vendor/github.com/samuel/go-zookeeper/zk/flw.go b/vendor/github.com/samuel/go-zookeeper/zk/flw.go index 1fb8b2aed020..3e97f96876c5 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/flw.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/flw.go @@ -255,16 +255,12 @@ func fourLetterWord(server, command string, timeout time.Duration) ([]byte, erro // once the command has been processed, but better safe than sorry defer conn.Close() - if err := conn.SetWriteDeadline(time.Now().Add(timeout)); err != nil { - return nil, err - } + conn.SetWriteDeadline(time.Now().Add(timeout)) _, err = conn.Write([]byte(command)) if err != nil { return nil, err } - if err := conn.SetReadDeadline(time.Now().Add(timeout)); err != nil { - return nil, err - } + conn.SetReadDeadline(time.Now().Add(timeout)) return ioutil.ReadAll(conn) } diff --git a/vendor/github.com/samuel/go-zookeeper/zk/server_help.go b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go new file mode 100644 index 000000000000..3663064caed7 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go @@ -0,0 +1,216 @@ +package zk + +import ( + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +type TestServer struct { + Port int + Path string + Srv *Server +} + +type TestCluster struct { + Path string + Servers []TestServer +} + +func StartTestCluster(size int, stdout, stderr io.Writer) (*TestCluster, error) { + tmpPath, err := ioutil.TempDir("", "gozk") + if err != nil { + return nil, err + } + success := false + startPort := int(rand.Int31n(6000) + 10000) + cluster := &TestCluster{Path: tmpPath} + defer func() { + if !success { + cluster.Stop() + } + }() + for serverN := 0; serverN < size; serverN++ { + srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN)) + if err := os.Mkdir(srvPath, 0700); err != nil { + return nil, err + } + port := startPort + serverN*3 + cfg := ServerConfig{ + ClientPort: port, + DataDir: srvPath, + } + for i := 0; i < size; i++ { + cfg.Servers = append(cfg.Servers, ServerConfigServer{ + ID: i + 1, + Host: "127.0.0.1", + PeerPort: startPort + i*3 + 1, + LeaderElectionPort: startPort + i*3 + 2, + }) + } + cfgPath := filepath.Join(srvPath, "zoo.cfg") + fi, err := os.Create(cfgPath) + if err != nil { + return nil, err + } + err = cfg.Marshall(fi) + fi.Close() + if err != nil { + return nil, err + } + + fi, err = os.Create(filepath.Join(srvPath, "myid")) + if err != nil { + return nil, err + } + _, err = fmt.Fprintf(fi, "%d\n", serverN+1) + fi.Close() + if err != nil { + return nil, err + } + + srv := &Server{ + ConfigPath: cfgPath, + Stdout: stdout, + Stderr: stderr, + } + if err := srv.Start(); err != nil { + return nil, err + } + cluster.Servers = append(cluster.Servers, TestServer{ + Path: srvPath, + Port: cfg.ClientPort, + Srv: srv, + }) + } + if err := cluster.waitForStart(10, time.Second); err != nil { + return nil, err + } + success = true + return cluster, nil +} + +func (tc *TestCluster) Connect(idx int) (*Conn, error) { + zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", tc.Servers[idx].Port)}, time.Second*15) + return zk, err +} + +func (tc *TestCluster) ConnectAll() (*Conn, <-chan Event, error) { + return tc.ConnectAllTimeout(time.Second * 15) +} + +func (tc *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) { + return tc.ConnectWithOptions(sessionTimeout) +} + +func (tc *TestCluster) ConnectWithOptions(sessionTimeout time.Duration, options ...connOption) (*Conn, <-chan Event, error) { + hosts := make([]string, len(tc.Servers)) + for i, srv := range tc.Servers { + hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port) + } + zk, ch, err := Connect(hosts, sessionTimeout, options...) + return zk, ch, err +} + +func (tc *TestCluster) Stop() error { + for _, srv := range tc.Servers { + srv.Srv.Stop() + } + defer os.RemoveAll(tc.Path) + return tc.waitForStop(5, time.Second) +} + +// waitForStart blocks until the cluster is up +func (tc *TestCluster) waitForStart(maxRetry int, interval time.Duration) error { + // verify that the servers are up with SRVR + serverAddrs := make([]string, len(tc.Servers)) + for i, s := range tc.Servers { + serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port) + } + + for i := 0; i < maxRetry; i++ { + _, ok := FLWSrvr(serverAddrs, time.Second) + if ok { + return nil + } + time.Sleep(interval) + } + return fmt.Errorf("unable to verify health of servers") +} + +// waitForStop blocks until the cluster is down +func (tc *TestCluster) waitForStop(maxRetry int, interval time.Duration) error { + // verify that the servers are up with RUOK + serverAddrs := make([]string, len(tc.Servers)) + for i, s := range tc.Servers { + serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port) + } + + var success bool + for i := 0; i < maxRetry && !success; i++ { + success = true + for _, ok := range FLWRuok(serverAddrs, time.Second) { + if ok { + success = false + } + } + if !success { + time.Sleep(interval) + } + } + if !success { + return fmt.Errorf("unable to verify servers are down") + } + return nil +} + +func (tc *TestCluster) StartServer(server string) { + for _, s := range tc.Servers { + if strings.HasSuffix(server, fmt.Sprintf(":%d", s.Port)) { + s.Srv.Start() + return + } + } + panic(fmt.Sprintf("Unknown server: %s", server)) +} + +func (tc *TestCluster) StopServer(server string) { + for _, s := range tc.Servers { + if strings.HasSuffix(server, fmt.Sprintf(":%d", s.Port)) { + s.Srv.Stop() + return + } + } + panic(fmt.Sprintf("Unknown server: %s", server)) +} + +func (tc *TestCluster) StartAllServers() error { + for _, s := range tc.Servers { + if err := s.Srv.Start(); err != nil { + return fmt.Errorf( + "Failed to start server listening on port `%d` : %+v", s.Port, err) + } + } + + return nil +} + +func (tc *TestCluster) StopAllServers() error { + for _, s := range tc.Servers { + if err := s.Srv.Stop(); err != nil { + return fmt.Errorf( + "Failed to stop server listening on port `%d` : %+v", s.Port, err) + } + } + + return nil +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/server_java.go b/vendor/github.com/samuel/go-zookeeper/zk/server_java.go new file mode 100644 index 000000000000..e553ec1d9fa6 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/server_java.go @@ -0,0 +1,136 @@ +package zk + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" +) + +type ErrMissingServerConfigField string + +func (e ErrMissingServerConfigField) Error() string { + return fmt.Sprintf("zk: missing server config field '%s'", string(e)) +} + +const ( + DefaultServerTickTime = 2000 + DefaultServerInitLimit = 10 + DefaultServerSyncLimit = 5 + DefaultServerAutoPurgeSnapRetainCount = 3 + DefaultPeerPort = 2888 + DefaultLeaderElectionPort = 3888 +) + +type ServerConfigServer struct { + ID int + Host string + PeerPort int + LeaderElectionPort int +} + +type ServerConfig struct { + TickTime int // Number of milliseconds of each tick + InitLimit int // Number of ticks that the initial synchronization phase can take + SyncLimit int // Number of ticks that can pass between sending a request and getting an acknowledgement + DataDir string // Direcrory where the snapshot is stored + ClientPort int // Port at which clients will connect + AutoPurgeSnapRetainCount int // Number of snapshots to retain in dataDir + AutoPurgePurgeInterval int // Purge task internal in hours (0 to disable auto purge) + Servers []ServerConfigServer +} + +func (sc ServerConfig) Marshall(w io.Writer) error { + if sc.DataDir == "" { + return ErrMissingServerConfigField("dataDir") + } + fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir) + if sc.TickTime <= 0 { + sc.TickTime = DefaultServerTickTime + } + fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime) + if sc.InitLimit <= 0 { + sc.InitLimit = DefaultServerInitLimit + } + fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit) + if sc.SyncLimit <= 0 { + sc.SyncLimit = DefaultServerSyncLimit + } + fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit) + if sc.ClientPort <= 0 { + sc.ClientPort = DefaultPort + } + fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort) + if sc.AutoPurgePurgeInterval > 0 { + if sc.AutoPurgeSnapRetainCount <= 0 { + sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount + } + fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount) + fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval) + } + if len(sc.Servers) > 0 { + for _, srv := range sc.Servers { + if srv.PeerPort <= 0 { + srv.PeerPort = DefaultPeerPort + } + if srv.LeaderElectionPort <= 0 { + srv.LeaderElectionPort = DefaultLeaderElectionPort + } + fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort) + } + } + return nil +} + +var jarSearchPaths = []string{ + "zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", + "../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", + "/usr/share/java/zookeeper-*.jar", + "/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", + "/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar", +} + +func findZookeeperFatJar() string { + var paths []string + zkPath := os.Getenv("ZOOKEEPER_PATH") + if zkPath == "" { + paths = jarSearchPaths + } else { + paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")} + } + for _, path := range paths { + matches, _ := filepath.Glob(path) + // TODO: could sort by version and pick latest + if len(matches) > 0 { + return matches[0] + } + } + return "" +} + +type Server struct { + JarPath string + ConfigPath string + Stdout, Stderr io.Writer + + cmd *exec.Cmd +} + +func (srv *Server) Start() error { + if srv.JarPath == "" { + srv.JarPath = findZookeeperFatJar() + if srv.JarPath == "" { + return fmt.Errorf("zk: unable to find server jar") + } + } + srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath) + srv.cmd.Stdout = srv.Stdout + srv.cmd.Stderr = srv.Stderr + return srv.cmd.Start() +} + +func (srv *Server) Stop() error { + srv.cmd.Process.Signal(os.Kill) + return srv.cmd.Wait() +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/structs.go b/vendor/github.com/samuel/go-zookeeper/zk/structs.go index 9400c3c0b708..d4af27deaad3 100644 --- a/vendor/github.com/samuel/go-zookeeper/zk/structs.go +++ b/vendor/github.com/samuel/go-zookeeper/zk/structs.go @@ -6,7 +6,6 @@ import ( "log" "reflect" "runtime" - "strings" "time" ) @@ -278,18 +277,6 @@ type multiResponse struct { DoneHeader multiHeader } -// zk version 3.5 reconfig API -type reconfigRequest struct { - JoiningServers []byte - LeavingServers []byte - NewMembers []byte - // curConfigId version of the current configuration - // optional - causes reconfiguration to return an error if configuration is no longer current - CurConfigId int64 -} - -type reconfigReponse getDataResponse - func (r *multiRequest) Encode(buf []byte) (int, error) { total := 0 for _, op := range r.Ops { @@ -405,7 +392,7 @@ type encoder interface { func decodePacket(buf []byte, st interface{}) (n int, err error) { defer func() { if r := recover(); r != nil { - if e, ok := r.(runtime.Error); ok && strings.HasPrefix(e.Error(), "runtime error: slice bounds out of range") { + if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { err = ErrShortBuffer } else { panic(r) @@ -496,7 +483,7 @@ func decodePacketValue(buf []byte, v reflect.Value) (int, error) { func encodePacket(buf []byte, st interface{}) (n int, err error) { defer func() { if r := recover(); r != nil { - if e, ok := r.(runtime.Error); ok && strings.HasPrefix(e.Error(), "runtime error: slice bounds out of range") { + if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { err = ErrShortBuffer } else { panic(r) @@ -617,8 +604,6 @@ func requestStructForOp(op int32) interface{} { return &CheckVersionRequest{} case opMulti: return &multiRequest{} - case opReconfig: - return &reconfigRequest{} } return nil } diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index d2c2308f1f4f..1f7e87e67275 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -86,7 +86,6 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), - best_width: -1, } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 9bcfea13a6c1..aa672ac08d46 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -494,13 +494,6 @@ github.com/hashicorp/raft github.com/hashicorp/raft-snapshot # github.com/hashicorp/serf v0.8.3 github.com/hashicorp/serf/coordinate -# github.com/hashicorp/shared-secure-libs v0.0.2 -github.com/hashicorp/shared-secure-libs/configutil -github.com/hashicorp/shared-secure-libs/gatedwriter -github.com/hashicorp/shared-secure-libs/kv-builder -github.com/hashicorp/shared-secure-libs/listenerutil -github.com/hashicorp/shared-secure-libs/metricsutil -github.com/hashicorp/shared-secure-libs/reloadutil # github.com/hashicorp/vault-plugin-auth-alicloud v0.5.5 github.com/hashicorp/vault-plugin-auth-alicloud github.com/hashicorp/vault-plugin-auth-alicloud/tools @@ -685,7 +678,7 @@ github.com/joyent/triton-go/client github.com/joyent/triton-go/compute github.com/joyent/triton-go/errors github.com/joyent/triton-go/storage -# github.com/json-iterator/go v1.1.10 +# github.com/json-iterator/go v1.1.9 github.com/json-iterator/go # github.com/jstemmer/go-junit-report v0.9.1 github.com/jstemmer/go-junit-report @@ -735,7 +728,7 @@ github.com/mholt/archiver github.com/michaelklishin/rabbit-hole # github.com/miekg/dns v1.1.15 github.com/miekg/dns -# github.com/mitchellh/cli v1.1.1 +# github.com/mitchellh/cli v1.0.0 github.com/mitchellh/cli # github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure @@ -843,17 +836,17 @@ github.com/pquerna/cachecontrol/cacheobject github.com/pquerna/otp github.com/pquerna/otp/hotp github.com/pquerna/otp/totp -# github.com/prometheus/client_golang v1.7.1 +# github.com/prometheus/client_golang v1.4.0 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/push # github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.11.1 +# github.com/prometheus/common v0.9.1 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.1.3 +# github.com/prometheus/procfs v0.0.8 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -865,7 +858,7 @@ github.com/renier/xmlrpc github.com/ryanuber/columnize # github.com/ryanuber/go-glob v1.0.0 github.com/ryanuber/go-glob -# github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da +# github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec github.com/samuel/go-zookeeper/zk # github.com/sasha-s/go-deadlock v0.2.0 github.com/sasha-s/go-deadlock @@ -1052,7 +1045,7 @@ golang.org/x/lint/golint # golang.org/x/mod v0.2.0 golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20200625001655-4c5254603344 +# golang.org/x/net v0.0.0-20200602114024-627f9648deb9 golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -1295,7 +1288,7 @@ gopkg.in/square/go-jose.v2 gopkg.in/square/go-jose.v2/cipher gopkg.in/square/go-jose.v2/json gopkg.in/square/go-jose.v2/jwt -# gopkg.in/yaml.v2 v2.3.0 +# gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c gopkg.in/yaml.v3 From 65988085d071efa60d917082bfe2e752063363d2 Mon Sep 17 00:00:00 2001 From: Hridoy Roy Date: Wed, 14 Oct 2020 06:52:23 -0700 Subject: [PATCH 3/7] upgrade docs for new telemetry [VAULT-672] (#10137) * upgrade docs for new telemetry * Update telemetry.mdx Co-authored-by: HridoyRoy --- website/pages/docs/internals/telemetry.mdx | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/website/pages/docs/internals/telemetry.mdx b/website/pages/docs/internals/telemetry.mdx index bbe072c09e18..124eafd283aa 100644 --- a/website/pages/docs/internals/telemetry.mdx +++ b/website/pages/docs/internals/telemetry.mdx @@ -81,10 +81,15 @@ These metrics represent operational aspects of the running Vault instance. | `vault.barrier.get` | Duration of time taken by GET operations at the barrier | ms | summary | | `vault.barrier.put` | Duration of time taken by PUT operations at the barrier | ms | summary | | `vault.barrier.list` | Duration of time taken by LIST operations at the barrier | ms | summary | +| `vault.cache.hit` | Number of times a value was retrieved from the LRU cache. | cache hit | counter | +| `vault.cache.miss` | Number of times a value was not in the LRU cache. The results in a read from the configured storage. | cache miss | counter | +| `vault.cache.write` | Number of times a value was written to the LRU cache. | cache write | counter | +| `vault.cache.delete` | Number of times a value was deleted from the LRU cache. This does not count cache expirations. | cache delete | counter | | `vault.core.check_token` | Duration of time taken by token checks handled by Vault core | ms | summary | | `vault.core.fetch_acl_and_token` | Duration of time taken by ACL and corresponding token entry fetches handled by Vault core | ms | summary | | `vault.core.handle_request` | Duration of time taken by requests handled by Vault core | ms | summary | | `vault.core.handle_login_request` | Duration of time taken by login requests handled by Vault core | ms | summary | +| `vault.core.leader` | Has value 1 when the vault node is leader, and 0 when node is in standby. | bool | gauge | | `vault.core.leadership_setup_failed` | Duration of time taken by cluster leadership setup failures which have occurred in a highly available Vault cluster. This should be monitored and alerted on for overall cluster leadership status. | ms | summary | | `vault.core.leadership_lost` | Duration of time taken by cluster leadership losses which have occurred in a highly available Vault cluster. This should be monitored and alerted on for overall cluster leadership status. | ms | summary | | `vault.core.post_unseal` | Duration of time taken by post-unseal operations handled by Vault core | ms | summary | @@ -104,10 +109,6 @@ These metrics represent operational aspects of the running Vault instance. | `vault.route.list.` | Time taken to dispatch a list operation to a backend, and for that backend to process it. | ms | summary | | `vault.route.read.` | Time taken to dispatch a read operation to a backend, and for that backend to process it. | ms | summary | | `vault.route.rollback.` | Time taken to dispatch a rollback operation to a backend, and for that backend to process it. Rollback operations are automatically scheduled to clean up partial errors. | ms | summary | -| `vault.cache.hit` | Number of times a value was retrieved from the LRU cache. | cache hit | counter | -| `vault.cache.miss` | Number of times a value was not in the LRU cache. The results in a read from the configured storage. | cache miss | counter | -| `vault.cache.write` | Number of times a value was written to the LRU cache. | cache write | counter | -| `vault.cache.delete` | Number of times a value was deleted from the LRU cache. This does not count cache expirations. | cache delete | counter | ## Runtime Metrics From a83727f0cc2e05d3bae3ad381fec6c2a0a9a84cc Mon Sep 17 00:00:00 2001 From: Jim Kalafut Date: Wed, 14 Oct 2020 07:34:47 -0700 Subject: [PATCH 4/7] Add GCS storage change to 1.5.0 upgrade guide (#10139) --- website/pages/docs/upgrading/upgrade-to-1.5.0.mdx | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/website/pages/docs/upgrading/upgrade-to-1.5.0.mdx b/website/pages/docs/upgrading/upgrade-to-1.5.0.mdx index a29cecac063b..2946154738f5 100644 --- a/website/pages/docs/upgrading/upgrade-to-1.5.0.mdx +++ b/website/pages/docs/upgrading/upgrade-to-1.5.0.mdx @@ -12,6 +12,13 @@ description: |- This page contains the list of deprecations and important or breaking changes for Vault 1.5.0 compared to 1.4.1. Please read it carefully. +## Google Cloud Storage `credentials_file` removed + +The deprecated `credentials_file` config option has been removed. The `GOOGLE_APPLICATION_CREDENTIALS` +environment variable or default credentials may be used instead. See +[GCS Authentication](https://www.vaultproject.io/docs/configuration/storage/google-cloud-storage#gcs-authentication) +for details on supported options. + ## Raft Configuration A new Raft configuration value, `max_entry_size`, has been introduced. This value From 863f95c7d5d5ed603cd74861d3ab642491a8ec5e Mon Sep 17 00:00:00 2001 From: Jimmy Merritello <7191639+jmfury@users.noreply.github.com> Date: Wed, 14 Oct 2020 13:32:48 -0500 Subject: [PATCH 5/7] [Website] Bump HSM version (#10149) * Bump HSM version * Updated pkg --- website/package-lock.json | 6 +++--- website/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/website/package-lock.json b/website/package-lock.json index c5c2dc2a8824..449a838609c8 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -1748,9 +1748,9 @@ "integrity": "sha512-lv6XR2plm2m3+qO6VE+RYquTzOODIt3mQ/1fBT1bn7wsR0qxFiuryW4JfsF94oCGk++LkDkRt/8V742HiT+fHw==" }, "@hashicorp/react-hashi-stack-menu": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@hashicorp/react-hashi-stack-menu/-/react-hashi-stack-menu-1.0.3.tgz", - "integrity": "sha512-bzO6fBodh61OrunYIMQKfGSJQUKIq70TQ74dt2trBPjBsgURHMCIkiMmKffjuSdJjhCxx2nb+y0/B/PwxxHxmg==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@hashicorp/react-hashi-stack-menu/-/react-hashi-stack-menu-1.0.6.tgz", + "integrity": "sha512-GKUmF6hoPal9mi/y1lVFeQcizQ9Kc1iYVcYwgqEDMi40C84na6Wd/AFYRckC4MNruCFXtuS3Fs8Sv7l5qqW4wA==", "requires": { "@hashicorp/react-inline-svg": "^1.0.2", "slugify": "1.3.4" diff --git a/website/package.json b/website/package.json index 4d5d2de17e10..3eb07b255b34 100644 --- a/website/package.json +++ b/website/package.json @@ -12,7 +12,7 @@ "@hashicorp/react-docs-sidenav": "^3.2.5", "@hashicorp/react-enterprise-alert": "^2.1.0", "@hashicorp/react-global-styles": "^4.4.0", - "@hashicorp/react-hashi-stack-menu": "^1.0.3", + "@hashicorp/react-hashi-stack-menu": "^1.0.6", "@hashicorp/react-head": "^1.1.1", "@hashicorp/react-hero": "3.1.2", "@hashicorp/react-image": "^2.0.1", From da6f9f6fc01af7b62be3f02f980d1b9dd1bdc609 Mon Sep 17 00:00:00 2001 From: Lucas de los Santos Date: Wed, 14 Oct 2020 16:55:49 -0400 Subject: [PATCH 6/7] Fix robots.txt to disallow indexing of all paths (#10150) Following suit of recent change logged in Consul https://github.com/hashicorp/consul/pull/8958 The path pattern specified in allow / deny lines should start with a forward slash to designate the root. [1] https://tools.ietf.org/html/draft-koster-rep-00#section-2.2 [2] https://developers.google.com/search/reference/robots_txt --- ui/public/robots.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/public/robots.txt b/ui/public/robots.txt index 207b726856dd..217b42f0e090 100644 --- a/ui/public/robots.txt +++ b/ui/public/robots.txt @@ -1,3 +1,3 @@ # http://www.robotstxt.org User-agent: * -Disallow: * +Disallow: / From 3b5c6914d8c0ca8eb0011120382f60822fb060b5 Mon Sep 17 00:00:00 2001 From: Peter Souter Date: Thu, 15 Oct 2020 00:43:07 +0100 Subject: [PATCH 7/7] Adds note that it requires a PEM-encoded file (#10145) --- website/pages/docs/configuration/listener/tcp.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/pages/docs/configuration/listener/tcp.mdx b/website/pages/docs/configuration/listener/tcp.mdx index cb29ce802745..10c79f84191b 100644 --- a/website/pages/docs/configuration/listener/tcp.mdx +++ b/website/pages/docs/configuration/listener/tcp.mdx @@ -83,16 +83,16 @@ advertise the correct address to other nodes. insecure communication. - `tls_cert_file` `(string: , reloads-on-SIGHUP)` – - Specifies the path to the certificate for TLS. To configure the listener to - use a CA certificate, concatenate the primary certificate and the CA + Specifies the path to the certificate for TLS. It requires a PEM-encoded file. + To configure the listener to use a CA certificate, concatenate the primary certificate and the CA certificate together. The primary certificate should appear first in the combined file. On `SIGHUP`, the path set here _at Vault startup_ will be used for reloading the certificate; modifying this value while Vault is running will have no effect for `SIGHUP`s. - `tls_key_file` `(string: , reloads-on-SIGHUP)` – - Specifies the path to the private key for the certificate. If the key file - is encrypted, you will be prompted to enter the passphrase on server startup. + Specifies the path to the private key for the certificate. It requires a PEM-encoded file. + If the key file is encrypted, you will be prompted to enter the passphrase on server startup. The passphrase must stay the same between key files when reloading your configuration using `SIGHUP`. On `SIGHUP`, the path set here _at Vault startup_ will be used for reloading the certificate; modifying this value